xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision dd5f5c1afa4ab969b68717be955752f19527fb17)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hif.h"
21 #include "hif_io32.h"
22 #include "ce_api.h"
23 #include "ce_main.h"
24 #include "ce_internal.h"
25 #include "ce_reg.h"
26 #include "qdf_lock.h"
27 #include "regtable.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_napi.h"
31 #include "qdf_module.h"
32 #include <qdf_tracepoint.h>
33 
34 #ifdef IPA_OFFLOAD
35 #ifdef QCA_WIFI_3_0
36 #define CE_IPA_RING_INIT(ce_desc)                       \
37 	do {                                            \
38 		ce_desc->gather = 0;                    \
39 		ce_desc->enable_11h = 0;                \
40 		ce_desc->meta_data_low = 0;             \
41 		ce_desc->packet_result_offset = 64;     \
42 		ce_desc->toeplitz_hash_enable = 0;      \
43 		ce_desc->addr_y_search_disable = 0;     \
44 		ce_desc->addr_x_search_disable = 0;     \
45 		ce_desc->misc_int_disable = 0;          \
46 		ce_desc->target_int_disable = 0;        \
47 		ce_desc->host_int_disable = 0;          \
48 		ce_desc->dest_byte_swap = 0;            \
49 		ce_desc->byte_swap = 0;                 \
50 		ce_desc->type = 2;                      \
51 		ce_desc->tx_classify = 1;               \
52 		ce_desc->buffer_addr_hi = 0;            \
53 		ce_desc->meta_data = 0;                 \
54 		ce_desc->nbytes = 128;                  \
55 	} while (0)
56 #else
57 #define CE_IPA_RING_INIT(ce_desc)                       \
58 	do {                                            \
59 		ce_desc->byte_swap = 0;                 \
60 		ce_desc->nbytes = 60;                   \
61 		ce_desc->gather = 0;                    \
62 	} while (0)
63 #endif /* QCA_WIFI_3_0 */
64 #endif /* IPA_OFFLOAD */
65 
66 static int war1_allow_sleep;
67 /* io32 write workaround */
68 static int hif_ce_war1;
69 
70 /**
71  * hif_ce_war_disable() - disable ce war gobally
72  */
73 void hif_ce_war_disable(void)
74 {
75 	hif_ce_war1 = 0;
76 }
77 
78 /**
79  * hif_ce_war_enable() - enable ce war gobally
80  */
81 void hif_ce_war_enable(void)
82 {
83 	hif_ce_war1 = 1;
84 }
85 
86 /*
87  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
88  * for defined here
89  */
90 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
91 
92 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
93 #define CE_DEBUG_DATA_PER_ROW 16
94 
95 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
96 
97 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
98 {
99 	int record_index = qdf_atomic_inc_return(table_index);
100 
101 	if (record_index == array_size)
102 		qdf_atomic_sub(array_size, table_index);
103 
104 	while (record_index >= array_size)
105 		record_index -= array_size;
106 
107 	return record_index;
108 }
109 
110 qdf_export_symbol(get_next_record_index);
111 
112 #ifdef HIF_CE_DEBUG_DATA_BUF
113 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
114 {
115 	uint8_t *data = NULL;
116 
117 	if (!event->data) {
118 		hif_err_rl("No ce debug memory allocated");
119 		return;
120 	}
121 
122 	if (event->memory && len > 0)
123 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
124 
125 	event->actual_data_len = 0;
126 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
127 
128 	if (data && len > 0) {
129 		qdf_mem_copy(event->data, data,
130 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
131 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
132 		event->actual_data_len = len;
133 	}
134 }
135 
136 qdf_export_symbol(hif_ce_desc_data_record);
137 
138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
139 {
140 	qdf_mem_zero(event,
141 		     offsetof(struct hif_ce_desc_event, data));
142 }
143 
144 qdf_export_symbol(hif_clear_ce_desc_debug_data);
145 #else
146 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
147 {
148 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
149 }
150 
151 qdf_export_symbol(hif_clear_ce_desc_debug_data);
152 #endif /* HIF_CE_DEBUG_DATA_BUF */
153 
154 #if defined(HIF_RECORD_PADDR)
155 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
156 				 struct hif_ce_desc_event *event,
157 				 qdf_nbuf_t memory)
158 {
159 	if (memory) {
160 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
161 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
162 					scn->qdf_dev,
163 					event->dma_addr);
164 
165 		event->virt_to_phy =
166 			virt_to_phys(qdf_nbuf_data(memory));
167 	}
168 }
169 #endif /* HIF_RECORD_RX_PADDR */
170 
171 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx)
172 {
173 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
174 	struct ce_desc_hist *ce_hist;
175 	struct latest_evt_history *evt;
176 	int i;
177 
178 	if (!scn)
179 		return;
180 
181 	ce_hist = &scn->hif_ce_desc_hist;
182 
183 	for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) {
184 		if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST])
185 			continue;
186 
187 		evt = &ce_hist->latest_evt[i];
188 		hif_info_high("CE_id:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x",
189 			      (i + HIF_CE_MAX_LATEST_HIST), evt->cpu_id,
190 			      evt->irq_entry_ts, evt->bh_entry_ts,
191 			      evt->bh_resched_ts, evt->bh_exit_ts,
192 			      evt->bh_work_ts, evt->ring_hp, evt->ring_tp);
193 	}
194 }
195 
196 void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
197 			   uint8_t type,
198 			   int ce_id, uint64_t time,
199 			   uint32_t hp, uint32_t tp)
200 {
201 	struct latest_evt_history *latest_evt;
202 
203 	if (ce_id != 2 && ce_id != 3)
204 		return;
205 
206 	latest_evt = &ce_hist->latest_evt[ce_id - HIF_CE_MAX_LATEST_HIST];
207 
208 	switch (type) {
209 	case HIF_IRQ_EVENT:
210 		latest_evt->irq_entry_ts = time;
211 		latest_evt->cpu_id = qdf_get_cpu();
212 		break;
213 	case HIF_CE_TASKLET_ENTRY:
214 		latest_evt->bh_entry_ts = time;
215 		break;
216 	case HIF_CE_TASKLET_RESCHEDULE:
217 		latest_evt->bh_resched_ts = time;
218 		break;
219 	case HIF_CE_TASKLET_EXIT:
220 		latest_evt->bh_exit_ts = time;
221 		break;
222 	case HIF_TX_DESC_COMPLETION:
223 	case HIF_CE_DEST_STATUS_RING_REAP:
224 		latest_evt->bh_work_ts = time;
225 		latest_evt->ring_hp = hp;
226 		latest_evt->ring_tp = tp;
227 		break;
228 	default:
229 		break;
230 	}
231 }
232 
233 /**
234  * hif_record_ce_desc_event() - record ce descriptor events
235  * @scn: hif_softc
236  * @ce_id: which ce is the event occurring on
237  * @type: what happened
238  * @descriptor: pointer to the descriptor posted/completed
239  * @memory: virtual address of buffer related to the descriptor
240  * @index: index that the descriptor was/will be at.
241  * @len:
242  */
243 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
244 				enum hif_ce_event_type type,
245 				union ce_desc *descriptor,
246 				void *memory, int index,
247 				int len)
248 {
249 	int record_index;
250 	struct hif_ce_desc_event *event;
251 
252 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
253 	struct hif_ce_desc_event *hist_ev = NULL;
254 
255 	if (ce_id < CE_COUNT_MAX)
256 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
257 	else
258 		return;
259 
260 	if (ce_id >= CE_COUNT_MAX)
261 		return;
262 
263 	if (!ce_hist->enable[ce_id])
264 		return;
265 
266 	if (!hist_ev)
267 		return;
268 
269 	record_index = get_next_record_index(
270 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
271 
272 	event = &hist_ev[record_index];
273 
274 	hif_clear_ce_desc_debug_data(event);
275 
276 	event->type = type;
277 	event->time = qdf_get_log_timestamp();
278 	event->cpu_id = qdf_get_cpu();
279 
280 	if (descriptor)
281 		qdf_mem_copy(&event->descriptor, descriptor,
282 			     sizeof(union ce_desc));
283 
284 	event->memory = memory;
285 	event->index = index;
286 
287 	if (event->type == HIF_RX_DESC_POST ||
288 	    event->type == HIF_RX_DESC_COMPLETION)
289 		hif_ce_desc_record_rx_paddr(scn, event, memory);
290 
291 	if (ce_hist->data_enable[ce_id])
292 		hif_ce_desc_data_record(event, len);
293 
294 	hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0);
295 }
296 qdf_export_symbol(hif_record_ce_desc_event);
297 
298 /**
299  * ce_init_ce_desc_event_log() - initialize the ce event log
300  * @scn: HIF context
301  * @ce_id: copy engine id for which we are initializing the log
302  * @size: size of array to dedicate
303  *
304  * Currently the passed size is ignored in favor of a precompiled value.
305  */
306 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
307 {
308 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
309 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
310 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
311 }
312 
313 /**
314  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
315  * @scn: HIF context
316  * @ce_id: copy engine id for which we are deinitializing the log
317  *
318  */
319 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
320 {
321 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
322 
323 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
324 }
325 
326 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
327 void hif_record_ce_desc_event(struct hif_softc *scn,
328 		int ce_id, enum hif_ce_event_type type,
329 		union ce_desc *descriptor, void *memory,
330 		int index, int len)
331 {
332 }
333 qdf_export_symbol(hif_record_ce_desc_event);
334 
335 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
336 					int size)
337 {
338 }
339 
340 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
341 {
342 }
343 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
344 
345 #ifdef NAPI_YIELD_BUDGET_BASED
346 bool hif_ce_service_should_yield(struct hif_softc *scn,
347 				 struct CE_state *ce_state)
348 {
349 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
350 
351 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
352 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
353 	 * can happen in fast path handling as processing is happening in
354 	 * batches.
355 	 */
356 	if (yield)
357 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
358 
359 	return yield;
360 }
361 #else
362 /**
363  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
364  * @scn: hif context
365  * @ce_state: context of the copy engine being serviced
366  *
367  * Return: true if the service should yield
368  */
369 bool hif_ce_service_should_yield(struct hif_softc *scn,
370 				 struct CE_state *ce_state)
371 {
372 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
373 
374 	time_limit_reached = qdf_time_sched_clock() >
375 					ce_state->ce_service_yield_time ? 1 : 0;
376 
377 	if (!time_limit_reached)
378 		rxpkt_thresh_reached = hif_max_num_receives_reached
379 					(scn, ce_state->receive_count);
380 
381 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
382 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
383 	 * can happen in fast path handling as processing is happening in
384 	 * batches.
385 	 */
386 	if (rxpkt_thresh_reached)
387 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
388 
389 	yield =  time_limit_reached || rxpkt_thresh_reached;
390 
391 	if (yield &&
392 	    ce_state->htt_rx_data &&
393 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
394 		hif_napi_update_yield_stats(ce_state,
395 					    time_limit_reached,
396 					    rxpkt_thresh_reached);
397 	}
398 
399 	return yield;
400 }
401 qdf_export_symbol(hif_ce_service_should_yield);
402 #endif
403 
404 void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush)
405 {
406 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
407 	struct CE_ring_state *src_ring = ce_state->src_ring;
408 	struct hif_softc *scn = ce_state->scn;
409 
410 	if (force_flush)
411 		ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
412 
413 	if (ce_ring_get_clear_event(src_ring, CE_RING_FLUSH_EVENT)) {
414 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
415 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
416 					  src_ring->write_index);
417 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
418 
419 		src_ring->last_flush_ts = qdf_get_log_timestamp();
420 		hif_debug("flushed");
421 	}
422 }
423 
424 /* Make sure this wrapper is called under ce_index_lock */
425 void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
426 					 bool flush)
427 {
428 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
429 	struct CE_ring_state *src_ring = ce_state->src_ring;
430 	struct hif_softc *scn = ce_state->scn;
431 
432 	if (flush)
433 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
434 					  src_ring->write_index);
435 	else
436 		ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
437 }
438 
439 /*
440  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
441  * The caller takes responsibility for any needed locking.
442  */
443 
444 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
445 				   u32 ctrl_addr, unsigned int write_index)
446 {
447 	if (hif_ce_war1) {
448 		void __iomem *indicator_addr;
449 
450 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
451 
452 		if (!war1_allow_sleep
453 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
454 			hif_write32_mb(scn, indicator_addr,
455 				       (CDC_WAR_MAGIC_STR | write_index));
456 		} else {
457 			unsigned long irq_flags;
458 
459 			local_irq_save(irq_flags);
460 			hif_write32_mb(scn, indicator_addr, 1);
461 
462 			/*
463 			 * PCIE write waits for ACK in IPQ8K, there is no
464 			 * need to read back value.
465 			 */
466 			(void)hif_read32_mb(scn, indicator_addr);
467 			/* conservative */
468 			(void)hif_read32_mb(scn, indicator_addr);
469 
470 			CE_SRC_RING_WRITE_IDX_SET(scn,
471 						  ctrl_addr, write_index);
472 
473 			hif_write32_mb(scn, indicator_addr, 0);
474 			local_irq_restore(irq_flags);
475 		}
476 	} else {
477 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
478 	}
479 }
480 
481 qdf_export_symbol(war_ce_src_ring_write_idx_set);
482 
483 QDF_STATUS
484 ce_send(struct CE_handle *copyeng,
485 		void *per_transfer_context,
486 		qdf_dma_addr_t buffer,
487 		uint32_t nbytes,
488 		uint32_t transfer_id,
489 		uint32_t flags,
490 		uint32_t user_flag)
491 {
492 	struct CE_state *CE_state = (struct CE_state *)copyeng;
493 	QDF_STATUS status;
494 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
495 
496 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
497 	status = hif_state->ce_services->ce_send_nolock(copyeng,
498 			per_transfer_context, buffer, nbytes,
499 			transfer_id, flags, user_flag);
500 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
501 
502 	return status;
503 }
504 qdf_export_symbol(ce_send);
505 
506 unsigned int ce_sendlist_sizeof(void)
507 {
508 	return sizeof(struct ce_sendlist);
509 }
510 
511 void ce_sendlist_init(struct ce_sendlist *sendlist)
512 {
513 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
514 
515 	sl->num_items = 0;
516 }
517 
518 QDF_STATUS
519 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
520 					qdf_dma_addr_t buffer,
521 					uint32_t nbytes,
522 					uint32_t flags,
523 					uint32_t user_flags)
524 {
525 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
526 	unsigned int num_items = sl->num_items;
527 	struct ce_sendlist_item *item;
528 
529 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
530 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
531 		return QDF_STATUS_E_RESOURCES;
532 	}
533 
534 	item = &sl->item[num_items];
535 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
536 	item->data = buffer;
537 	item->u.nbytes = nbytes;
538 	item->flags = flags;
539 	item->user_flags = user_flags;
540 	sl->num_items = num_items + 1;
541 	return QDF_STATUS_SUCCESS;
542 }
543 
544 QDF_STATUS
545 ce_sendlist_send(struct CE_handle *copyeng,
546 		 void *per_transfer_context,
547 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
548 {
549 	struct CE_state *CE_state = (struct CE_state *)copyeng;
550 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
551 
552 	return hif_state->ce_services->ce_sendlist_send(copyeng,
553 			per_transfer_context, sendlist, transfer_id);
554 }
555 
556 #ifndef AH_NEED_TX_DATA_SWAP
557 #define AH_NEED_TX_DATA_SWAP 0
558 #endif
559 
560 /**
561  * ce_batch_send() - sends bunch of msdus at once
562  * @ce_tx_hdl : pointer to CE handle
563  * @msdu : list of msdus to be sent
564  * @transfer_id : transfer id
565  * @len : Downloaded length
566  * @sendhead : sendhead
567  *
568  * Assumption : Called with an array of MSDU's
569  * Function:
570  * For each msdu in the array
571  * 1. Send each msdu
572  * 2. Increment write index accordinlgy.
573  *
574  * Return: list of msds not sent
575  */
576 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
577 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
578 {
579 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
580 	struct hif_softc *scn = ce_state->scn;
581 	struct CE_ring_state *src_ring = ce_state->src_ring;
582 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
583 	/*  A_target_id_t targid = TARGID(scn);*/
584 
585 	uint32_t nentries_mask = src_ring->nentries_mask;
586 	uint32_t sw_index, write_index;
587 
588 	struct CE_src_desc *src_desc_base =
589 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
590 	uint32_t *src_desc;
591 
592 	struct CE_src_desc lsrc_desc = {0};
593 	int deltacount = 0;
594 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
595 
596 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
597 	sw_index = src_ring->sw_index;
598 	write_index = src_ring->write_index;
599 
600 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
601 
602 	while (msdu) {
603 		tempnext = qdf_nbuf_next(msdu);
604 
605 		if (deltacount < 2) {
606 			if (sendhead)
607 				return msdu;
608 			hif_err("Out of descriptors");
609 			src_ring->write_index = write_index;
610 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
611 					write_index);
612 
613 			sw_index = src_ring->sw_index;
614 			write_index = src_ring->write_index;
615 
616 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
617 					sw_index-1);
618 			if (!freelist) {
619 				freelist = msdu;
620 				hfreelist = msdu;
621 			} else {
622 				qdf_nbuf_set_next(freelist, msdu);
623 				freelist = msdu;
624 			}
625 			qdf_nbuf_set_next(msdu, NULL);
626 			msdu = tempnext;
627 			continue;
628 		}
629 
630 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
631 				write_index);
632 
633 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
634 
635 		lsrc_desc.meta_data = transfer_id;
636 		if (len  > msdu->len)
637 			len =  msdu->len;
638 		lsrc_desc.nbytes = len;
639 		/*  Data packet is a byte stream, so disable byte swap */
640 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
641 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
642 
643 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
644 
645 
646 		src_ring->per_transfer_context[write_index] = msdu;
647 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
648 
649 		if (sendhead)
650 			break;
651 		qdf_nbuf_set_next(msdu, NULL);
652 		msdu = tempnext;
653 
654 	}
655 
656 
657 	src_ring->write_index = write_index;
658 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
659 
660 	return hfreelist;
661 }
662 
663 /**
664  * ce_update_tx_ring() - Advance sw index.
665  * @ce_tx_hdl : pointer to CE handle
666  * @num_htt_cmpls : htt completions received.
667  *
668  * Function:
669  * Increment the value of sw index of src ring
670  * according to number of htt completions
671  * received.
672  *
673  * Return: void
674  */
675 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
676 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
677 {
678 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
679 	struct CE_ring_state *src_ring = ce_state->src_ring;
680 	uint32_t nentries_mask = src_ring->nentries_mask;
681 	/*
682 	 * Advance the s/w index:
683 	 * This effectively simulates completing the CE ring descriptors
684 	 */
685 	src_ring->sw_index =
686 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
687 				num_htt_cmpls);
688 }
689 #else
690 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
691 {}
692 #endif
693 
694 /**
695  * ce_send_single() - sends
696  * @ce_tx_hdl : pointer to CE handle
697  * @msdu : msdu to be sent
698  * @transfer_id : transfer id
699  * @len : Downloaded length
700  *
701  * Function:
702  * 1. Send one msdu
703  * 2. Increment write index of src ring accordinlgy.
704  *
705  * Return: QDF_STATUS: CE sent status
706  */
707 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
708 			  uint32_t transfer_id, u_int32_t len)
709 {
710 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
711 	struct hif_softc *scn = ce_state->scn;
712 	struct CE_ring_state *src_ring = ce_state->src_ring;
713 	uint32_t ctrl_addr = ce_state->ctrl_addr;
714 	/*A_target_id_t targid = TARGID(scn);*/
715 
716 	uint32_t nentries_mask = src_ring->nentries_mask;
717 	uint32_t sw_index, write_index;
718 
719 	struct CE_src_desc *src_desc_base =
720 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
721 	uint32_t *src_desc;
722 
723 	struct CE_src_desc lsrc_desc = {0};
724 	enum hif_ce_event_type event_type;
725 
726 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
727 	sw_index = src_ring->sw_index;
728 	write_index = src_ring->write_index;
729 
730 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
731 					sw_index-1) < 1)) {
732 		hif_err("ce send fail %d %d %d", nentries_mask,
733 		       write_index, sw_index);
734 		return QDF_STATUS_E_RESOURCES;
735 	}
736 
737 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
738 
739 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
740 
741 	lsrc_desc.meta_data = transfer_id;
742 	lsrc_desc.nbytes = len;
743 	/*  Data packet is a byte stream, so disable byte swap */
744 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
745 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
746 
747 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
748 
749 
750 	src_ring->per_transfer_context[write_index] = msdu;
751 
752 	if (((struct CE_src_desc *)src_desc)->gather)
753 		event_type = HIF_TX_GATHER_DESC_POST;
754 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
755 		event_type = HIF_TX_DESC_SOFTWARE_POST;
756 	else
757 		event_type = HIF_TX_DESC_POST;
758 
759 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
760 				(union ce_desc *)src_desc, msdu,
761 				write_index, len);
762 
763 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
764 
765 	src_ring->write_index = write_index;
766 
767 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
768 
769 	return QDF_STATUS_SUCCESS;
770 }
771 
772 /**
773  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
774  * @copyeng: copy engine handle
775  * @per_recv_context: virtual address of the nbuf
776  * @buffer: physical address of the nbuf
777  *
778  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
779  */
780 QDF_STATUS
781 ce_recv_buf_enqueue(struct CE_handle *copyeng,
782 		    void *per_recv_context, qdf_dma_addr_t buffer)
783 {
784 	struct CE_state *CE_state = (struct CE_state *)copyeng;
785 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
786 
787 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
788 			per_recv_context, buffer);
789 }
790 qdf_export_symbol(ce_recv_buf_enqueue);
791 
792 void
793 ce_send_watermarks_set(struct CE_handle *copyeng,
794 		       unsigned int low_alert_nentries,
795 		       unsigned int high_alert_nentries)
796 {
797 	struct CE_state *CE_state = (struct CE_state *)copyeng;
798 	uint32_t ctrl_addr = CE_state->ctrl_addr;
799 	struct hif_softc *scn = CE_state->scn;
800 
801 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
802 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
803 }
804 
805 void
806 ce_recv_watermarks_set(struct CE_handle *copyeng,
807 		       unsigned int low_alert_nentries,
808 		       unsigned int high_alert_nentries)
809 {
810 	struct CE_state *CE_state = (struct CE_state *)copyeng;
811 	uint32_t ctrl_addr = CE_state->ctrl_addr;
812 	struct hif_softc *scn = CE_state->scn;
813 
814 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
815 				low_alert_nentries);
816 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
817 				high_alert_nentries);
818 }
819 
820 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
821 {
822 	struct CE_state *CE_state = (struct CE_state *)copyeng;
823 	struct CE_ring_state *src_ring = CE_state->src_ring;
824 	unsigned int nentries_mask = src_ring->nentries_mask;
825 	unsigned int sw_index;
826 	unsigned int write_index;
827 
828 	qdf_spin_lock(&CE_state->ce_index_lock);
829 	sw_index = src_ring->sw_index;
830 	write_index = src_ring->write_index;
831 	qdf_spin_unlock(&CE_state->ce_index_lock);
832 
833 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
834 }
835 
836 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
837 {
838 	struct CE_state *CE_state = (struct CE_state *)copyeng;
839 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
840 	unsigned int nentries_mask = dest_ring->nentries_mask;
841 	unsigned int sw_index;
842 	unsigned int write_index;
843 
844 	qdf_spin_lock(&CE_state->ce_index_lock);
845 	sw_index = dest_ring->sw_index;
846 	write_index = dest_ring->write_index;
847 	qdf_spin_unlock(&CE_state->ce_index_lock);
848 
849 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
850 }
851 
852 /*
853  * Guts of ce_completed_recv_next.
854  * The caller takes responsibility for any necessary locking.
855  */
856 QDF_STATUS
857 ce_completed_recv_next(struct CE_handle *copyeng,
858 		       void **per_CE_contextp,
859 		       void **per_transfer_contextp,
860 		       qdf_dma_addr_t *bufferp,
861 		       unsigned int *nbytesp,
862 		       unsigned int *transfer_idp, unsigned int *flagsp)
863 {
864 	struct CE_state *CE_state = (struct CE_state *)copyeng;
865 	QDF_STATUS status;
866 	struct hif_softc *scn = CE_state->scn;
867 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
868 	struct ce_ops *ce_services;
869 
870 	ce_services = hif_state->ce_services;
871 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
872 	status =
873 		ce_services->ce_completed_recv_next_nolock(CE_state,
874 				per_CE_contextp, per_transfer_contextp, bufferp,
875 					      nbytesp, transfer_idp, flagsp);
876 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
877 
878 	return status;
879 }
880 
881 QDF_STATUS
882 ce_revoke_recv_next(struct CE_handle *copyeng,
883 		    void **per_CE_contextp,
884 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
885 {
886 	struct CE_state *CE_state = (struct CE_state *)copyeng;
887 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
888 
889 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
890 			per_CE_contextp, per_transfer_contextp, bufferp);
891 }
892 
893 QDF_STATUS
894 ce_cancel_send_next(struct CE_handle *copyeng,
895 		void **per_CE_contextp,
896 		void **per_transfer_contextp,
897 		qdf_dma_addr_t *bufferp,
898 		unsigned int *nbytesp,
899 		unsigned int *transfer_idp,
900 		uint32_t *toeplitz_hash_result)
901 {
902 	struct CE_state *CE_state = (struct CE_state *)copyeng;
903 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
904 
905 	return hif_state->ce_services->ce_cancel_send_next
906 		(copyeng, per_CE_contextp, per_transfer_contextp,
907 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
908 }
909 qdf_export_symbol(ce_cancel_send_next);
910 
911 QDF_STATUS
912 ce_completed_send_next(struct CE_handle *copyeng,
913 		       void **per_CE_contextp,
914 		       void **per_transfer_contextp,
915 		       qdf_dma_addr_t *bufferp,
916 		       unsigned int *nbytesp,
917 		       unsigned int *transfer_idp,
918 		       unsigned int *sw_idx,
919 		       unsigned int *hw_idx,
920 		       unsigned int *toeplitz_hash_result)
921 {
922 	struct CE_state *CE_state = (struct CE_state *)copyeng;
923 	struct hif_softc *scn = CE_state->scn;
924 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
925 	struct ce_ops *ce_services;
926 	QDF_STATUS status;
927 
928 	ce_services = hif_state->ce_services;
929 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
930 	status =
931 		ce_services->ce_completed_send_next_nolock(CE_state,
932 					per_CE_contextp, per_transfer_contextp,
933 					bufferp, nbytesp, transfer_idp, sw_idx,
934 					      hw_idx, toeplitz_hash_result);
935 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
936 
937 	return status;
938 }
939 
940 #ifdef ATH_11AC_TXCOMPACT
941 /* CE engine descriptor reap
942  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
943  * does receive and reaping of completed descriptor ,
944  * This function only handles reaping of Tx complete descriptor.
945  * The Function is called from threshold reap  poll routine
946  * hif_send_complete_check so should not contain receive functionality
947  * within it .
948  */
949 
950 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
951 {
952 	void *CE_context;
953 	void *transfer_context;
954 	qdf_dma_addr_t buf;
955 	unsigned int nbytes;
956 	unsigned int id;
957 	unsigned int sw_idx, hw_idx;
958 	uint32_t toeplitz_hash_result;
959 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
960 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
961 
962 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
963 		return;
964 
965 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
966 			NULL, NULL, 0, 0);
967 
968 	/* Since this function is called from both user context and
969 	 * tasklet context the spinlock has to lock the bottom halves.
970 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
971 	 * enabled in TX polling mode. If this is not the case, more
972 	 * bottom halve spin lock changes are needed. Due to data path
973 	 * performance concern, after internal discussion we've decided
974 	 * to make minimum change, i.e., only address the issue occurred
975 	 * in this function. The possible negative effect of this minimum
976 	 * change is that, in the future, if some other function will also
977 	 * be opened to let the user context to use, those cases need to be
978 	 * addressed by change spin_lock to spin_lock_bh also.
979 	 */
980 
981 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
982 
983 	if (CE_state->send_cb) {
984 		{
985 			struct ce_ops *ce_services = hif_state->ce_services;
986 			/* Pop completed send buffers and call the
987 			 * registered send callback for each
988 			 */
989 			while (ce_services->ce_completed_send_next_nolock
990 				 (CE_state, &CE_context,
991 				  &transfer_context, &buf,
992 				  &nbytes, &id, &sw_idx, &hw_idx,
993 				  &toeplitz_hash_result) ==
994 				  QDF_STATUS_SUCCESS) {
995 				if (ce_id != CE_HTT_H2T_MSG) {
996 					qdf_spin_unlock_bh(
997 						&CE_state->ce_index_lock);
998 					CE_state->send_cb(
999 						(struct CE_handle *)
1000 						CE_state, CE_context,
1001 						transfer_context, buf,
1002 						nbytes, id, sw_idx, hw_idx,
1003 						toeplitz_hash_result);
1004 					qdf_spin_lock_bh(
1005 						&CE_state->ce_index_lock);
1006 				} else {
1007 					struct HIF_CE_pipe_info *pipe_info =
1008 						(struct HIF_CE_pipe_info *)
1009 						CE_context;
1010 
1011 					qdf_spin_lock_bh(&pipe_info->
1012 						 completion_freeq_lock);
1013 					pipe_info->num_sends_allowed++;
1014 					qdf_spin_unlock_bh(&pipe_info->
1015 						   completion_freeq_lock);
1016 				}
1017 			}
1018 		}
1019 	}
1020 
1021 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1022 
1023 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1024 			NULL, NULL, 0, 0);
1025 	Q_TARGET_ACCESS_END(scn);
1026 }
1027 
1028 #endif /*ATH_11AC_TXCOMPACT */
1029 
1030 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
1031 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1032 {
1033 	// QDF_IS_EPPING_ENABLED is pre lithium feature
1034 	// CE4 completion is enabled only lithium and later
1035 	// so no need to check for EPPING
1036 	return true;
1037 }
1038 
1039 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1040 
1041 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1042 {
1043 	if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
1044 		return true;
1045 	else
1046 		return false;
1047 }
1048 
1049 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1050 
1051 /*
1052  * ce_engine_service_reg:
1053  *
1054  * Called from ce_per_engine_service and goes through the regular interrupt
1055  * handling that does not involve the WLAN fast path feature.
1056  *
1057  * Returns void
1058  */
1059 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
1060 {
1061 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1062 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1063 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1064 	void *CE_context;
1065 	void *transfer_context;
1066 	qdf_dma_addr_t buf;
1067 	unsigned int nbytes;
1068 	unsigned int id;
1069 	unsigned int flags;
1070 	unsigned int more_comp_cnt = 0;
1071 	unsigned int more_snd_comp_cnt = 0;
1072 	unsigned int sw_idx, hw_idx;
1073 	uint32_t toeplitz_hash_result;
1074 	uint32_t mode = hif_get_conparam(scn);
1075 
1076 more_completions:
1077 	if (CE_state->recv_cb) {
1078 
1079 		/* Pop completed recv buffers and call
1080 		 * the registered recv callback for each
1081 		 */
1082 		while (hif_state->ce_services->ce_completed_recv_next_nolock
1083 				(CE_state, &CE_context, &transfer_context,
1084 				&buf, &nbytes, &id, &flags) ==
1085 				QDF_STATUS_SUCCESS) {
1086 			qdf_spin_unlock(&CE_state->ce_index_lock);
1087 			CE_state->recv_cb((struct CE_handle *)CE_state,
1088 					  CE_context, transfer_context, buf,
1089 					  nbytes, id, flags);
1090 
1091 			qdf_spin_lock(&CE_state->ce_index_lock);
1092 			/*
1093 			 * EV #112693 -
1094 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
1095 			 * BSoD_0x133 occurred in VHT80 UDP_DL
1096 			 * Break out DPC by force if number of loops in
1097 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1098 			 * to avoid spending too long time in
1099 			 * DPC for each interrupt handling. Schedule another
1100 			 * DPC to avoid data loss if we had taken
1101 			 * force-break action before apply to Windows OS
1102 			 * only currently, Linux/MAC os can expand to their
1103 			 * platform if necessary
1104 			 */
1105 
1106 			/* Break the receive processes by
1107 			 * force if force_break set up
1108 			 */
1109 			if (qdf_unlikely(CE_state->force_break)) {
1110 				qdf_atomic_set(&CE_state->rx_pending, 1);
1111 				return;
1112 			}
1113 		}
1114 	}
1115 
1116 	/*
1117 	 * Attention: We may experience potential infinite loop for below
1118 	 * While Loop during Sending Stress test.
1119 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1120 	 */
1121 
1122 	if (CE_state->send_cb) {
1123 		/* Pop completed send buffers and call
1124 		 * the registered send callback for each
1125 		 */
1126 
1127 #ifdef ATH_11AC_TXCOMPACT
1128 		while (hif_state->ce_services->ce_completed_send_next_nolock
1129 			 (CE_state, &CE_context,
1130 			 &transfer_context, &buf, &nbytes,
1131 			 &id, &sw_idx, &hw_idx,
1132 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1133 
1134 			if (check_ce_id_and_epping_enabled(CE_id, mode)) {
1135 				qdf_spin_unlock(&CE_state->ce_index_lock);
1136 				CE_state->send_cb((struct CE_handle *)CE_state,
1137 						  CE_context, transfer_context,
1138 						  buf, nbytes, id, sw_idx,
1139 						  hw_idx, toeplitz_hash_result);
1140 				qdf_spin_lock(&CE_state->ce_index_lock);
1141 			} else {
1142 				struct HIF_CE_pipe_info *pipe_info =
1143 					(struct HIF_CE_pipe_info *)CE_context;
1144 
1145 				qdf_spin_lock_bh(&pipe_info->
1146 					      completion_freeq_lock);
1147 				pipe_info->num_sends_allowed++;
1148 				qdf_spin_unlock_bh(&pipe_info->
1149 						completion_freeq_lock);
1150 			}
1151 		}
1152 #else                           /*ATH_11AC_TXCOMPACT */
1153 		while (hif_state->ce_services->ce_completed_send_next_nolock
1154 			 (CE_state, &CE_context,
1155 			  &transfer_context, &buf, &nbytes,
1156 			  &id, &sw_idx, &hw_idx,
1157 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1158 			qdf_spin_unlock(&CE_state->ce_index_lock);
1159 			CE_state->send_cb((struct CE_handle *)CE_state,
1160 				  CE_context, transfer_context, buf,
1161 				  nbytes, id, sw_idx, hw_idx,
1162 				  toeplitz_hash_result);
1163 			qdf_spin_lock(&CE_state->ce_index_lock);
1164 		}
1165 #endif /*ATH_11AC_TXCOMPACT */
1166 	}
1167 
1168 more_watermarks:
1169 	if (CE_state->misc_cbs) {
1170 		if (CE_state->watermark_cb &&
1171 				hif_state->ce_services->watermark_int(CE_state,
1172 					&flags)) {
1173 			qdf_spin_unlock(&CE_state->ce_index_lock);
1174 			/* Convert HW IS bits to software flags */
1175 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1176 					CE_state->wm_context, flags);
1177 			qdf_spin_lock(&CE_state->ce_index_lock);
1178 		}
1179 	}
1180 
1181 	/*
1182 	 * Clear the misc interrupts (watermark) that were handled above,
1183 	 * and that will be checked again below.
1184 	 * Clear and check for copy-complete interrupts again, just in case
1185 	 * more copy completions happened while the misc interrupts were being
1186 	 * handled.
1187 	 */
1188 	if (!ce_srng_based(scn) && !CE_state->msi_supported) {
1189 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1190 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1191 					   CE_WATERMARK_MASK |
1192 					   HOST_IS_COPY_COMPLETE_MASK);
1193 		} else {
1194 			qdf_atomic_set(&CE_state->rx_pending, 0);
1195 			hif_err_rl("%s: target access is not allowed",
1196 				   __func__);
1197 			return;
1198 		}
1199 	}
1200 
1201 	/*
1202 	 * Now that per-engine interrupts are cleared, verify that
1203 	 * no recv interrupts arrive while processing send interrupts,
1204 	 * and no recv or send interrupts happened while processing
1205 	 * misc interrupts.Go back and check again.Keep checking until
1206 	 * we find no more events to process.
1207 	 */
1208 	if (CE_state->recv_cb &&
1209 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1210 				CE_state)) {
1211 		if (QDF_IS_EPPING_ENABLED(mode) ||
1212 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1213 			goto more_completions;
1214 		} else {
1215 			if (!ce_srng_based(scn) &&
1216 			    !CE_state->batch_intr_supported) {
1217 				hif_err_rl(
1218 					"Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1219 					CE_state->id,
1220 					CE_state->dest_ring->nentries_mask,
1221 					CE_state->dest_ring->sw_index,
1222 					CE_DEST_RING_READ_IDX_GET(scn,
1223 							  CE_state->ctrl_addr));
1224 			}
1225 		}
1226 	}
1227 
1228 	if (CE_state->send_cb &&
1229 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1230 				CE_state)) {
1231 		if (QDF_IS_EPPING_ENABLED(mode) ||
1232 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1233 			goto more_completions;
1234 		} else {
1235 			if (!ce_srng_based(scn) &&
1236 			    !CE_state->batch_intr_supported) {
1237 				hif_err_rl(
1238 					"Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
1239 					CE_state->id,
1240 					CE_state->src_ring->nentries_mask,
1241 					CE_state->src_ring->sw_index,
1242 					CE_state->src_ring->hw_index,
1243 					CE_state->src_ring->write_index,
1244 					CE_SRC_RING_READ_IDX_GET(scn,
1245 							 CE_state->ctrl_addr));
1246 			}
1247 		}
1248 	}
1249 
1250 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1251 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1252 			goto more_watermarks;
1253 	}
1254 
1255 	qdf_atomic_set(&CE_state->rx_pending, 0);
1256 }
1257 
1258 #ifdef WLAN_TRACEPOINTS
1259 /**
1260  * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling
1261  *  latency
1262  * @ce_state: CE context
1263  *
1264  * Return: None
1265  */
1266 static inline
1267 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1268 {
1269 	qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id,
1270 					      ce_state->ce_service_start_time -
1271 					      ce_state->ce_tasklet_sched_time);
1272 }
1273 #else
1274 static inline
1275 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1276 {
1277 }
1278 #endif
1279 
1280 /*
1281  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1282  *
1283  * Invokes registered callbacks for recv_complete,
1284  * send_complete, and watermarks.
1285  *
1286  * Returns: number of messages processed
1287  */
1288 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1289 {
1290 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1291 
1292 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1293 		return CE_state->receive_count;
1294 
1295 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1296 		hif_err("[premature rc=0]");
1297 		return 0; /* no work done */
1298 	}
1299 
1300 	/* Clear force_break flag and re-initialize receive_count to 0 */
1301 	CE_state->receive_count = 0;
1302 	CE_state->force_break = 0;
1303 	CE_state->ce_service_start_time = qdf_time_sched_clock();
1304 	CE_state->ce_service_yield_time =
1305 		CE_state->ce_service_start_time +
1306 		hif_get_ce_service_max_yield_time(
1307 			(struct hif_opaque_softc *)scn);
1308 
1309 	ce_trace_tasklet_sched_latency(CE_state);
1310 
1311 	qdf_spin_lock(&CE_state->ce_index_lock);
1312 
1313 	CE_state->service(scn, CE_id);
1314 
1315 	qdf_spin_unlock(&CE_state->ce_index_lock);
1316 
1317 	if (Q_TARGET_ACCESS_END(scn) < 0)
1318 		hif_err("<--[premature rc=%d]", CE_state->receive_count);
1319 	return CE_state->receive_count;
1320 }
1321 qdf_export_symbol(ce_per_engine_service);
1322 
1323 /*
1324  * Handler for per-engine interrupts on ALL active CEs.
1325  * This is used in cases where the system is sharing a
1326  * single interrupt for all CEs
1327  */
1328 
1329 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1330 {
1331 	int CE_id;
1332 	uint32_t intr_summary;
1333 
1334 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1335 		return;
1336 
1337 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1338 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1339 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1340 
1341 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1342 				qdf_atomic_set(&CE_state->rx_pending, 0);
1343 				ce_per_engine_service(scn, CE_id);
1344 			}
1345 		}
1346 
1347 		Q_TARGET_ACCESS_END(scn);
1348 		return;
1349 	}
1350 
1351 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1352 
1353 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1354 		if (intr_summary & (1 << CE_id))
1355 			intr_summary &= ~(1 << CE_id);
1356 		else
1357 			continue;       /* no intr pending on this CE */
1358 
1359 		ce_per_engine_service(scn, CE_id);
1360 	}
1361 
1362 	Q_TARGET_ACCESS_END(scn);
1363 }
1364 
1365 /*Iterate the CE_state list and disable the compl interrupt
1366  * if it has been registered already.
1367  */
1368 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1369 {
1370 	int CE_id;
1371 
1372 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1373 		return;
1374 
1375 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1376 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1377 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1378 
1379 		/* if the interrupt is currently enabled, disable it */
1380 		if (!CE_state->disable_copy_compl_intr
1381 		    && (CE_state->send_cb || CE_state->recv_cb))
1382 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1383 
1384 		if (CE_state->watermark_cb)
1385 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1386 	}
1387 	Q_TARGET_ACCESS_END(scn);
1388 }
1389 
1390 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1391 {
1392 	int CE_id;
1393 
1394 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1395 		return;
1396 
1397 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1398 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1399 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1400 
1401 		/*
1402 		 * If the CE is supposed to have copy complete interrupts
1403 		 * enabled (i.e. there a callback registered, and the
1404 		 * "disable" flag is not set), then re-enable the interrupt.
1405 		 */
1406 		if (!CE_state->disable_copy_compl_intr
1407 		    && (CE_state->send_cb || CE_state->recv_cb))
1408 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1409 
1410 		if (CE_state->watermark_cb)
1411 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1412 	}
1413 	Q_TARGET_ACCESS_END(scn);
1414 }
1415 
1416 /**
1417  * ce_send_cb_register(): register completion handler
1418  * @copyeng: CE_state representing the ce we are adding the behavior to
1419  * @fn_ptr: callback that the ce should use when processing tx completions
1420  * @ce_send_context: context to pass back in the callback
1421  * @disable_interrupts: if the interrupts should be enabled or not.
1422  *
1423  * Caller should guarantee that no transactions are in progress before
1424  * switching the callback function.
1425  *
1426  * Registers the send context before the fn pointer so that if the cb is valid
1427  * the context should be valid.
1428  *
1429  * Beware that currently this function will enable completion interrupts.
1430  */
1431 void
1432 ce_send_cb_register(struct CE_handle *copyeng,
1433 		    ce_send_cb fn_ptr,
1434 		    void *ce_send_context, int disable_interrupts)
1435 {
1436 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1437 	struct hif_softc *scn;
1438 	struct HIF_CE_state *hif_state;
1439 
1440 	if (!CE_state) {
1441 		hif_err("Error CE state = NULL");
1442 		return;
1443 	}
1444 	scn = CE_state->scn;
1445 	hif_state = HIF_GET_CE_STATE(scn);
1446 	if (!hif_state) {
1447 		hif_err("Error HIF state = NULL");
1448 		return;
1449 	}
1450 	CE_state->send_context = ce_send_context;
1451 	CE_state->send_cb = fn_ptr;
1452 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1453 							disable_interrupts);
1454 }
1455 qdf_export_symbol(ce_send_cb_register);
1456 
1457 /**
1458  * ce_recv_cb_register(): register completion handler
1459  * @copyeng: CE_state representing the ce we are adding the behavior to
1460  * @fn_ptr: callback that the ce should use when processing rx completions
1461  * @CE_recv_context: context to pass back in the callback
1462  * @disable_interrupts: if the interrupts should be enabled or not.
1463  *
1464  * Registers the send context before the fn pointer so that if the cb is valid
1465  * the context should be valid.
1466  *
1467  * Caller should guarantee that no transactions are in progress before
1468  * switching the callback function.
1469  */
1470 void
1471 ce_recv_cb_register(struct CE_handle *copyeng,
1472 		    CE_recv_cb fn_ptr,
1473 		    void *CE_recv_context, int disable_interrupts)
1474 {
1475 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1476 	struct hif_softc *scn;
1477 	struct HIF_CE_state *hif_state;
1478 
1479 	if (!CE_state) {
1480 		hif_err("ERROR CE state = NULL");
1481 		return;
1482 	}
1483 	scn = CE_state->scn;
1484 	hif_state = HIF_GET_CE_STATE(scn);
1485 	if (!hif_state) {
1486 		hif_err("Error HIF state = NULL");
1487 		return;
1488 	}
1489 	CE_state->recv_context = CE_recv_context;
1490 	CE_state->recv_cb = fn_ptr;
1491 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1492 							disable_interrupts);
1493 }
1494 qdf_export_symbol(ce_recv_cb_register);
1495 
1496 /**
1497  * ce_watermark_cb_register(): register completion handler
1498  * @copyeng: CE_state representing the ce we are adding the behavior to
1499  * @fn_ptr: callback that the ce should use when processing watermark events
1500  * @CE_wm_context: context to pass back in the callback
1501  *
1502  * Caller should guarantee that no watermark events are being processed before
1503  * switching the callback function.
1504  */
1505 void
1506 ce_watermark_cb_register(struct CE_handle *copyeng,
1507 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1508 {
1509 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1510 	struct hif_softc *scn = CE_state->scn;
1511 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1512 
1513 	CE_state->watermark_cb = fn_ptr;
1514 	CE_state->wm_context = CE_wm_context;
1515 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1516 							0);
1517 	if (fn_ptr)
1518 		CE_state->misc_cbs = 1;
1519 }
1520 
1521 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1522 void
1523 ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *),
1524 		      void *custom_cb_context)
1525 {
1526 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1527 
1528 	CE_state->custom_cb = custom_cb;
1529 	CE_state->custom_cb_context = custom_cb_context;
1530 	qdf_atomic_init(&CE_state->custom_cb_pending);
1531 }
1532 
1533 void
1534 ce_unregister_custom_cb(struct CE_handle *copyeng)
1535 {
1536 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1537 
1538 	qdf_assert_always(!qdf_atomic_read(&CE_state->custom_cb_pending));
1539 	CE_state->custom_cb = NULL;
1540 	CE_state->custom_cb_context = NULL;
1541 }
1542 
1543 void
1544 ce_enable_custom_cb(struct CE_handle *copyeng)
1545 {
1546 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1547 	int32_t custom_cb_pending;
1548 
1549 	qdf_assert_always(CE_state->custom_cb);
1550 	qdf_assert_always(CE_state->custom_cb_context);
1551 
1552 	custom_cb_pending = qdf_atomic_inc_return(&CE_state->custom_cb_pending);
1553 	qdf_assert_always(custom_cb_pending >= 1);
1554 }
1555 
1556 void
1557 ce_disable_custom_cb(struct CE_handle *copyeng)
1558 {
1559 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1560 
1561 	qdf_assert_always(CE_state->custom_cb);
1562 	qdf_assert_always(CE_state->custom_cb_context);
1563 
1564 	qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending);
1565 }
1566 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1567 
1568 bool ce_get_rx_pending(struct hif_softc *scn)
1569 {
1570 	int CE_id;
1571 
1572 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1573 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1574 
1575 		if (qdf_atomic_read(&CE_state->rx_pending))
1576 			return true;
1577 	}
1578 
1579 	return false;
1580 }
1581 
1582 /**
1583  * ce_check_rx_pending() - ce_check_rx_pending
1584  * @CE_state: context of the copy engine to check
1585  *
1586  * Return: true if there per_engine_service
1587  *	didn't process all the rx descriptors.
1588  */
1589 bool ce_check_rx_pending(struct CE_state *CE_state)
1590 {
1591 	if (qdf_atomic_read(&CE_state->rx_pending))
1592 		return true;
1593 	else
1594 		return false;
1595 }
1596 qdf_export_symbol(ce_check_rx_pending);
1597 
1598 #ifdef IPA_OFFLOAD
1599 #ifdef QCN7605_SUPPORT
1600 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1601 {
1602 	u_int32_t ctrl_addr = CE_state->ctrl_addr;
1603 	struct hif_softc *scn = CE_state->scn;
1604 	qdf_dma_addr_t wr_index_addr;
1605 
1606 	wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1607 	return wr_index_addr;
1608 }
1609 #else
1610 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1611 {
1612 	struct hif_softc *scn = CE_state->scn;
1613 	qdf_dma_addr_t wr_index_addr;
1614 
1615 	wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1616 			SR_WR_INDEX_ADDRESS;
1617 	return wr_index_addr;
1618 }
1619 #endif
1620 
1621 /**
1622  * ce_ipa_get_resource() - get uc resource on copyengine
1623  * @ce: copyengine context
1624  * @ce_sr: copyengine source ring resource info
1625  * @ce_sr_ring_size: copyengine source ring size
1626  * @ce_reg_paddr: copyengine register physical address
1627  *
1628  * Copy engine should release resource to micro controller
1629  * Micro controller needs
1630  *  - Copy engine source descriptor base address
1631  *  - Copy engine source descriptor size
1632  *  - PCI BAR address to access copy engine register
1633  *
1634  * Return: None
1635  */
1636 void ce_ipa_get_resource(struct CE_handle *ce,
1637 			 qdf_shared_mem_t **ce_sr,
1638 			 uint32_t *ce_sr_ring_size,
1639 			 qdf_dma_addr_t *ce_reg_paddr)
1640 {
1641 	struct CE_state *CE_state = (struct CE_state *)ce;
1642 	uint32_t ring_loop;
1643 	struct CE_src_desc *ce_desc;
1644 	qdf_dma_addr_t phy_mem_base;
1645 	struct hif_softc *scn = CE_state->scn;
1646 
1647 	if (CE_UNUSED == CE_state->state) {
1648 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1649 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1650 		*ce_sr_ring_size = 0;
1651 		return;
1652 	}
1653 
1654 	/* Update default value for descriptor */
1655 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1656 	     ring_loop++) {
1657 		ce_desc = (struct CE_src_desc *)
1658 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1659 			   ring_loop * (sizeof(struct CE_src_desc)));
1660 		CE_IPA_RING_INIT(ce_desc);
1661 	}
1662 
1663 	/* Get BAR address */
1664 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1665 
1666 	*ce_sr = CE_state->scn->ipa_ce_ring;
1667 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1668 		sizeof(struct CE_src_desc));
1669 	*ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1670 
1671 }
1672 
1673 #endif /* IPA_OFFLOAD */
1674 
1675 #ifdef HIF_CE_DEBUG_DATA_BUF
1676 /**
1677  * hif_dump_desc_data_buf() - record ce descriptor events
1678  * @buf: buffer to copy to
1679  * @pos: Current position till which the buf is filled
1680  * @data: Data to be copied
1681  * @data_len: Length of the data to be copied
1682  */
1683 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1684 					uint8_t *data, uint32_t data_len)
1685 {
1686 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1687 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1688 
1689 	if ((data_len > 0) && data) {
1690 		if (data_len < 16) {
1691 			hex_dump_to_buffer(data,
1692 						CE_DEBUG_DATA_PER_ROW,
1693 						16, 1, buf + pos,
1694 						(ssize_t)PAGE_SIZE - pos,
1695 						false);
1696 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1697 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1698 		} else {
1699 			uint32_t rows = (data_len / 16) + 1;
1700 			uint32_t row = 0;
1701 
1702 			for (row = 0; row < rows; row++) {
1703 				hex_dump_to_buffer(data + (row * 16),
1704 							CE_DEBUG_DATA_PER_ROW,
1705 							16, 1, buf + pos,
1706 							(ssize_t)PAGE_SIZE
1707 							- pos, false);
1708 				pos +=
1709 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1710 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1711 						"\n");
1712 			}
1713 		}
1714 	}
1715 
1716 	return pos;
1717 }
1718 #endif
1719 
1720 /*
1721  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1722  * for defined here
1723  */
1724 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1725 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1726 {
1727 	switch (type) {
1728 	case HIF_RX_DESC_POST:
1729 		return "HIF_RX_DESC_POST";
1730 	case HIF_RX_DESC_COMPLETION:
1731 		return "HIF_RX_DESC_COMPLETION";
1732 	case HIF_TX_GATHER_DESC_POST:
1733 		return "HIF_TX_GATHER_DESC_POST";
1734 	case HIF_TX_DESC_POST:
1735 		return "HIF_TX_DESC_POST";
1736 	case HIF_TX_DESC_SOFTWARE_POST:
1737 		return "HIF_TX_DESC_SOFTWARE_POST";
1738 	case HIF_TX_DESC_COMPLETION:
1739 		return "HIF_TX_DESC_COMPLETION";
1740 	case FAST_RX_WRITE_INDEX_UPDATE:
1741 		return "FAST_RX_WRITE_INDEX_UPDATE";
1742 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1743 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1744 	case FAST_TX_WRITE_INDEX_UPDATE:
1745 		return "FAST_TX_WRITE_INDEX_UPDATE";
1746 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1747 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1748 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1749 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1750 	case RESUME_WRITE_INDEX_UPDATE:
1751 		return "RESUME_WRITE_INDEX_UPDATE";
1752 	case HIF_IRQ_EVENT:
1753 		return "HIF_IRQ_EVENT";
1754 	case HIF_CE_TASKLET_ENTRY:
1755 		return "HIF_CE_TASKLET_ENTRY";
1756 	case HIF_CE_TASKLET_RESCHEDULE:
1757 		return "HIF_CE_TASKLET_RESCHEDULE";
1758 	case HIF_CE_TASKLET_EXIT:
1759 		return "HIF_CE_TASKLET_EXIT";
1760 	case HIF_CE_REAP_ENTRY:
1761 		return "HIF_CE_REAP_ENTRY";
1762 	case HIF_CE_REAP_EXIT:
1763 		return "HIF_CE_REAP_EXIT";
1764 	case NAPI_SCHEDULE:
1765 		return "NAPI_SCHEDULE";
1766 	case NAPI_POLL_ENTER:
1767 		return "NAPI_POLL_ENTER";
1768 	case NAPI_COMPLETE:
1769 		return "NAPI_COMPLETE";
1770 	case NAPI_POLL_EXIT:
1771 		return "NAPI_POLL_EXIT";
1772 	case HIF_RX_NBUF_ALLOC_FAILURE:
1773 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1774 	case HIF_RX_NBUF_MAP_FAILURE:
1775 		return "HIF_RX_NBUF_MAP_FAILURE";
1776 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1777 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1778 	default:
1779 		return "invalid";
1780 	}
1781 }
1782 
1783 /**
1784  * hif_dump_desc_event() - record ce descriptor events
1785  * @scn: HIF context
1786  * @buf: Buffer to which to be copied
1787  */
1788 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1789 {
1790 	struct hif_ce_desc_event *event;
1791 	uint64_t secs, usecs;
1792 	ssize_t len = 0;
1793 	struct ce_desc_hist *ce_hist = NULL;
1794 	struct hif_ce_desc_event *hist_ev = NULL;
1795 
1796 	if (!scn)
1797 		return -EINVAL;
1798 
1799 	ce_hist = &scn->hif_ce_desc_hist;
1800 
1801 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1802 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1803 		qdf_print("Invalid values");
1804 		return -EINVAL;
1805 	}
1806 
1807 	hist_ev =
1808 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1809 
1810 	if (!hist_ev) {
1811 		qdf_print("Low Memory");
1812 		return -EINVAL;
1813 	}
1814 
1815 	event = &hist_ev[ce_hist->hist_index];
1816 
1817 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1818 
1819 	len += snprintf(buf, PAGE_SIZE - len,
1820 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1821 			secs, usecs, ce_hist->hist_id,
1822 			ce_event_type_to_str(event->type),
1823 			event->index, event->memory);
1824 #ifdef HIF_CE_DEBUG_DATA_BUF
1825 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1826 			event->actual_data_len);
1827 #endif
1828 
1829 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1830 
1831 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1832 				16, 1, buf + len,
1833 				(ssize_t)PAGE_SIZE - len, false);
1834 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1835 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1836 
1837 #ifdef HIF_CE_DEBUG_DATA_BUF
1838 	if (ce_hist->data_enable[ce_hist->hist_id])
1839 		len = hif_dump_desc_data_buf(buf, len, event->data,
1840 						(event->actual_data_len <
1841 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1842 						event->actual_data_len :
1843 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1844 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1845 
1846 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1847 
1848 	return len;
1849 }
1850 
1851 /*
1852  * hif_store_desc_trace_buf_index() -
1853  * API to get the CE id and CE debug storage buffer index
1854  *
1855  * @dev: network device
1856  * @attr: sysfs attribute
1857  * @buf: data got from the user
1858  *
1859  * Return total length
1860  */
1861 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1862 					const char *buf, size_t size)
1863 {
1864 	struct ce_desc_hist *ce_hist = NULL;
1865 
1866 	if (!scn)
1867 		return -EINVAL;
1868 
1869 	ce_hist = &scn->hif_ce_desc_hist;
1870 
1871 	if (!size) {
1872 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1873 		return -EINVAL;
1874 	}
1875 
1876 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1877 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1878 		qdf_nofl_err("%s: Invalid input value.", __func__);
1879 		return -EINVAL;
1880 	}
1881 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1882 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1883 		qdf_print("Invalid values");
1884 		return -EINVAL;
1885 	}
1886 
1887 	return size;
1888 }
1889 
1890 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1891 
1892 #ifdef HIF_CE_DEBUG_DATA_BUF
1893 /*
1894  * hif_ce_en_desc_hist() -
1895  * API to enable recording the CE desc history
1896  *
1897  * @dev: network device
1898  * @attr: sysfs attribute
1899  * @buf: buffer to copy the data.
1900  *
1901  * Starts recording the ce desc history
1902  *
1903  * Return total length copied
1904  */
1905 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1906 {
1907 	struct ce_desc_hist *ce_hist = NULL;
1908 	uint32_t cfg = 0;
1909 	uint32_t ce_id = 0;
1910 
1911 	if (!scn)
1912 		return -EINVAL;
1913 
1914 	ce_hist = &scn->hif_ce_desc_hist;
1915 
1916 	if (!size) {
1917 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1918 		return -EINVAL;
1919 	}
1920 
1921 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1922 		   (unsigned int *)&cfg) != 2) {
1923 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1924 			     __func__);
1925 		return -EINVAL;
1926 	}
1927 	if (ce_id >= CE_COUNT_MAX) {
1928 		qdf_print("Invalid value CE Id");
1929 		return -EINVAL;
1930 	}
1931 
1932 	if ((cfg > 1 || cfg < 0)) {
1933 		qdf_print("Invalid values: enter 0 or 1");
1934 		return -EINVAL;
1935 	}
1936 
1937 	if (!ce_hist->hist_ev[ce_id])
1938 		return -EINVAL;
1939 
1940 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1941 	if (cfg == 1) {
1942 		if (ce_hist->data_enable[ce_id] == 1) {
1943 			qdf_debug("Already Enabled");
1944 		} else {
1945 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1946 							== QDF_STATUS_E_NOMEM){
1947 				ce_hist->data_enable[ce_id] = 0;
1948 				qdf_err("%s:Memory Alloc failed", __func__);
1949 			} else
1950 				ce_hist->data_enable[ce_id] = 1;
1951 		}
1952 	} else if (cfg == 0) {
1953 		if (ce_hist->data_enable[ce_id] == 0) {
1954 			qdf_debug("Already Disabled");
1955 		} else {
1956 			ce_hist->data_enable[ce_id] = 0;
1957 				free_mem_ce_debug_hist_data(scn, ce_id);
1958 		}
1959 	}
1960 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1961 
1962 	return size;
1963 }
1964 
1965 /*
1966  * hif_disp_ce_enable_desc_data_hist() -
1967  * API to display value of data_enable
1968  *
1969  * @dev: network device
1970  * @attr: sysfs attribute
1971  * @buf: buffer to copy the data.
1972  *
1973  * Return total length copied
1974  */
1975 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1976 {
1977 	ssize_t len = 0;
1978 	uint32_t ce_id = 0;
1979 	struct ce_desc_hist *ce_hist = NULL;
1980 
1981 	if (!scn)
1982 		return -EINVAL;
1983 
1984 	ce_hist = &scn->hif_ce_desc_hist;
1985 
1986 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1987 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1988 				ce_id, ce_hist->data_enable[ce_id]);
1989 	}
1990 
1991 	return len;
1992 }
1993 #endif /* HIF_CE_DEBUG_DATA_BUF */
1994 
1995 #ifdef OL_ATH_SMART_LOGGING
1996 #define GUARD_SPACE 10
1997 #define LOG_ID_SZ 4
1998 /*
1999  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
2000  * @src_ring: SRC ring state
2001  * @buf_cur: Current pointer in ring buffer
2002  * @buf_init:Start of the ring buffer
2003  * @buf_sz: Size of the ring buffer
2004  * @skb_sz: Max size of the SKB buffer to be copied
2005  *
2006  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2007  * the given buf, skb_sz is the max buffer size to be copied
2008  *
2009  * Return: Current pointer in ring buffer
2010  */
2011 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
2012 				    uint8_t *buf_cur, uint8_t *buf_init,
2013 				    uint32_t buf_sz, uint32_t skb_sz)
2014 {
2015 	struct CE_src_desc *src_ring_base;
2016 	uint32_t len, entry;
2017 	struct CE_src_desc  *src_desc;
2018 	qdf_nbuf_t nbuf;
2019 	uint32_t available_buf;
2020 
2021 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
2022 	len = sizeof(struct CE_ring_state);
2023 	available_buf = buf_sz - (buf_cur - buf_init);
2024 	if (available_buf < (len + GUARD_SPACE)) {
2025 		buf_cur = buf_init;
2026 	}
2027 
2028 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
2029 	buf_cur += sizeof(struct CE_ring_state);
2030 
2031 	for (entry = 0; entry < src_ring->nentries; entry++) {
2032 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
2033 		nbuf = src_ring->per_transfer_context[entry];
2034 		if (nbuf) {
2035 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2036 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2037 
2038 			len = sizeof(struct CE_src_desc) + skb_cp_len
2039 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2040 			available_buf = buf_sz - (buf_cur - buf_init);
2041 			if (available_buf < (len + GUARD_SPACE)) {
2042 				buf_cur = buf_init;
2043 			}
2044 			qdf_mem_copy(buf_cur, src_desc,
2045 				     sizeof(struct CE_src_desc));
2046 			buf_cur += sizeof(struct CE_src_desc);
2047 
2048 			available_buf = buf_sz - (buf_cur - buf_init);
2049 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2050 						skb_cp_len);
2051 
2052 			if (skb_cp_len) {
2053 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2054 					     skb_cp_len);
2055 				buf_cur += skb_cp_len;
2056 			}
2057 		} else {
2058 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
2059 			available_buf = buf_sz - (buf_cur - buf_init);
2060 			if (available_buf < (len + GUARD_SPACE)) {
2061 				buf_cur = buf_init;
2062 			}
2063 			qdf_mem_copy(buf_cur, src_desc,
2064 				     sizeof(struct CE_src_desc));
2065 			buf_cur += sizeof(struct CE_src_desc);
2066 			available_buf = buf_sz - (buf_cur - buf_init);
2067 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2068 		}
2069 	}
2070 
2071 	return buf_cur;
2072 }
2073 
2074 /*
2075  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
2076  * @dest_ring: SRC ring state
2077  * @buf_cur: Current pointer in ring buffer
2078  * @buf_init:Start of the ring buffer
2079  * @buf_sz: Size of the ring buffer
2080  * @skb_sz: Max size of the SKB buffer to be copied
2081  *
2082  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2083  * the given buf, skb_sz is the max buffer size to be copied
2084  *
2085  * Return: Current pointer in ring buffer
2086  */
2087 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
2088 				     uint8_t *buf_cur, uint8_t *buf_init,
2089 				     uint32_t buf_sz, uint32_t skb_sz)
2090 {
2091 	struct CE_dest_desc *dest_ring_base;
2092 	uint32_t len, entry;
2093 	struct CE_dest_desc  *dest_desc;
2094 	qdf_nbuf_t nbuf;
2095 	uint32_t available_buf;
2096 
2097 	dest_ring_base =
2098 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
2099 
2100 	len = sizeof(struct CE_ring_state);
2101 	available_buf = buf_sz - (buf_cur - buf_init);
2102 	if (available_buf < (len + GUARD_SPACE)) {
2103 		buf_cur = buf_init;
2104 	}
2105 
2106 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
2107 	buf_cur += sizeof(struct CE_ring_state);
2108 
2109 	for (entry = 0; entry < dest_ring->nentries; entry++) {
2110 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
2111 
2112 		nbuf = dest_ring->per_transfer_context[entry];
2113 		if (nbuf) {
2114 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2115 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2116 
2117 			len = sizeof(struct CE_dest_desc) + skb_cp_len
2118 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2119 
2120 			available_buf = buf_sz - (buf_cur - buf_init);
2121 			if (available_buf < (len + GUARD_SPACE)) {
2122 				buf_cur = buf_init;
2123 			}
2124 
2125 			qdf_mem_copy(buf_cur, dest_desc,
2126 				     sizeof(struct CE_dest_desc));
2127 			buf_cur += sizeof(struct CE_dest_desc);
2128 			available_buf = buf_sz - (buf_cur - buf_init);
2129 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2130 						skb_cp_len);
2131 			if (skb_cp_len) {
2132 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2133 					     skb_cp_len);
2134 				buf_cur += skb_cp_len;
2135 			}
2136 		} else {
2137 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
2138 			available_buf = buf_sz - (buf_cur - buf_init);
2139 			if (available_buf < (len + GUARD_SPACE)) {
2140 				buf_cur = buf_init;
2141 			}
2142 			qdf_mem_copy(buf_cur, dest_desc,
2143 				     sizeof(struct CE_dest_desc));
2144 			buf_cur += sizeof(struct CE_dest_desc);
2145 			available_buf = buf_sz - (buf_cur - buf_init);
2146 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2147 		}
2148 	}
2149 	return buf_cur;
2150 }
2151 
2152 /**
2153  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2154  * @scn:
2155  * @buf_cur:
2156  * @buf_init:
2157  * @buf_sz:
2158  * @ce:
2159  * @skb_sz:
2160  *
2161  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2162  * and buffers pointed by them in to the given buf
2163  */
2164 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2165 			 uint8_t *buf_init, uint32_t buf_sz,
2166 			 uint32_t ce, uint32_t skb_sz)
2167 {
2168 	struct CE_state *ce_state;
2169 	struct CE_ring_state *src_ring;
2170 	struct CE_ring_state *dest_ring;
2171 
2172 	ce_state = scn->ce_id_to_state[ce];
2173 	src_ring = ce_state->src_ring;
2174 	dest_ring = ce_state->dest_ring;
2175 
2176 	if (src_ring) {
2177 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
2178 					      buf_init, buf_sz, skb_sz);
2179 	} else if (dest_ring) {
2180 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
2181 					       buf_init, buf_sz, skb_sz);
2182 	}
2183 
2184 	return buf_cur;
2185 }
2186 
2187 qdf_export_symbol(hif_log_dump_ce);
2188 #endif /* OL_ATH_SMART_LOGGING */
2189 
2190