xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hif.h"
21 #include "hif_io32.h"
22 #include "ce_api.h"
23 #include "ce_main.h"
24 #include "ce_internal.h"
25 #include "ce_reg.h"
26 #include "qdf_lock.h"
27 #include "regtable.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_napi.h"
31 #include "qdf_module.h"
32 #include <qdf_tracepoint.h>
33 
34 #ifdef IPA_OFFLOAD
35 #ifdef QCA_WIFI_3_0
36 #define CE_IPA_RING_INIT(ce_desc)                       \
37 	do {                                            \
38 		ce_desc->gather = 0;                    \
39 		ce_desc->enable_11h = 0;                \
40 		ce_desc->meta_data_low = 0;             \
41 		ce_desc->packet_result_offset = 64;     \
42 		ce_desc->toeplitz_hash_enable = 0;      \
43 		ce_desc->addr_y_search_disable = 0;     \
44 		ce_desc->addr_x_search_disable = 0;     \
45 		ce_desc->misc_int_disable = 0;          \
46 		ce_desc->target_int_disable = 0;        \
47 		ce_desc->host_int_disable = 0;          \
48 		ce_desc->dest_byte_swap = 0;            \
49 		ce_desc->byte_swap = 0;                 \
50 		ce_desc->type = 2;                      \
51 		ce_desc->tx_classify = 1;               \
52 		ce_desc->buffer_addr_hi = 0;            \
53 		ce_desc->meta_data = 0;                 \
54 		ce_desc->nbytes = 128;                  \
55 	} while (0)
56 #else
57 #define CE_IPA_RING_INIT(ce_desc)                       \
58 	do {                                            \
59 		ce_desc->byte_swap = 0;                 \
60 		ce_desc->nbytes = 60;                   \
61 		ce_desc->gather = 0;                    \
62 	} while (0)
63 #endif /* QCA_WIFI_3_0 */
64 #endif /* IPA_OFFLOAD */
65 
66 static int war1_allow_sleep;
67 /* io32 write workaround */
68 static int hif_ce_war1;
69 
70 /**
71  * hif_ce_war_disable() - disable ce war gobally
72  */
73 void hif_ce_war_disable(void)
74 {
75 	hif_ce_war1 = 0;
76 }
77 
78 /**
79  * hif_ce_war_enable() - enable ce war gobally
80  */
81 void hif_ce_war_enable(void)
82 {
83 	hif_ce_war1 = 1;
84 }
85 
86 /*
87  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
88  * for defined here
89  */
90 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
91 
92 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
93 #define CE_DEBUG_DATA_PER_ROW 16
94 
95 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
96 
97 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
98 {
99 	int record_index = qdf_atomic_inc_return(table_index);
100 
101 	if (record_index == array_size)
102 		qdf_atomic_sub(array_size, table_index);
103 
104 	while (record_index >= array_size)
105 		record_index -= array_size;
106 
107 	return record_index;
108 }
109 
110 qdf_export_symbol(get_next_record_index);
111 
112 #ifdef HIF_CE_DEBUG_DATA_BUF
113 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
114 {
115 	uint8_t *data = NULL;
116 
117 	if (!event->data) {
118 		hif_err_rl("No ce debug memory allocated");
119 		return;
120 	}
121 
122 	if (event->memory && len > 0)
123 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
124 
125 	event->actual_data_len = 0;
126 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
127 
128 	if (data && len > 0) {
129 		qdf_mem_copy(event->data, data,
130 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
131 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
132 		event->actual_data_len = len;
133 	}
134 }
135 
136 qdf_export_symbol(hif_ce_desc_data_record);
137 
138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
139 {
140 	qdf_mem_zero(event,
141 		     offsetof(struct hif_ce_desc_event, data));
142 }
143 
144 qdf_export_symbol(hif_clear_ce_desc_debug_data);
145 #else
146 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
147 {
148 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
149 }
150 
151 qdf_export_symbol(hif_clear_ce_desc_debug_data);
152 #endif /* HIF_CE_DEBUG_DATA_BUF */
153 
154 #if defined(HIF_RECORD_PADDR)
155 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
156 				 struct hif_ce_desc_event *event,
157 				 qdf_nbuf_t memory)
158 {
159 	if (memory) {
160 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
161 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
162 					scn->qdf_dev,
163 					event->dma_addr);
164 
165 		event->virt_to_phy =
166 			virt_to_phys(qdf_nbuf_data(memory));
167 	}
168 }
169 #endif /* HIF_RECORD_RX_PADDR */
170 
171 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx)
172 {
173 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
174 	struct ce_desc_hist *ce_hist;
175 	struct latest_evt_history *evt;
176 	int i;
177 
178 	if (!scn)
179 		return;
180 
181 	ce_hist = &scn->hif_ce_desc_hist;
182 
183 	for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) {
184 		if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST])
185 			continue;
186 
187 		evt = &ce_hist->latest_evt[i];
188 		hif_info_high("CE_id:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x",
189 			      (i + HIF_CE_MAX_LATEST_HIST), evt->cpu_id,
190 			      evt->irq_entry_ts, evt->bh_entry_ts,
191 			      evt->bh_resched_ts, evt->bh_exit_ts,
192 			      evt->bh_work_ts, evt->ring_hp, evt->ring_tp);
193 	}
194 }
195 
196 void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
197 			   uint8_t type,
198 			   int ce_id, uint64_t time,
199 			   uint32_t hp, uint32_t tp)
200 {
201 	struct latest_evt_history *latest_evt;
202 
203 	if (ce_id != 2 && ce_id != 3)
204 		return;
205 
206 	latest_evt = &ce_hist->latest_evt[ce_id - HIF_CE_MAX_LATEST_HIST];
207 
208 	switch (type) {
209 	case HIF_IRQ_EVENT:
210 		latest_evt->irq_entry_ts = time;
211 		latest_evt->cpu_id = qdf_get_cpu();
212 		break;
213 	case HIF_CE_TASKLET_ENTRY:
214 		latest_evt->bh_entry_ts = time;
215 		break;
216 	case HIF_CE_TASKLET_RESCHEDULE:
217 		latest_evt->bh_resched_ts = time;
218 		break;
219 	case HIF_CE_TASKLET_EXIT:
220 		latest_evt->bh_exit_ts = time;
221 		break;
222 	case HIF_TX_DESC_COMPLETION:
223 	case HIF_CE_DEST_STATUS_RING_REAP:
224 		latest_evt->bh_work_ts = time;
225 		latest_evt->ring_hp = hp;
226 		latest_evt->ring_tp = tp;
227 		break;
228 	default:
229 		break;
230 	}
231 }
232 
233 /**
234  * hif_record_ce_desc_event() - record ce descriptor events
235  * @scn: hif_softc
236  * @ce_id: which ce is the event occurring on
237  * @type: what happened
238  * @descriptor: pointer to the descriptor posted/completed
239  * @memory: virtual address of buffer related to the descriptor
240  * @index: index that the descriptor was/will be at.
241  */
242 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
243 				enum hif_ce_event_type type,
244 				union ce_desc *descriptor,
245 				void *memory, int index,
246 				int len)
247 {
248 	int record_index;
249 	struct hif_ce_desc_event *event;
250 
251 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
252 	struct hif_ce_desc_event *hist_ev = NULL;
253 
254 	if (ce_id < CE_COUNT_MAX)
255 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
256 	else
257 		return;
258 
259 	if (ce_id >= CE_COUNT_MAX)
260 		return;
261 
262 	if (!ce_hist->enable[ce_id])
263 		return;
264 
265 	if (!hist_ev)
266 		return;
267 
268 	record_index = get_next_record_index(
269 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
270 
271 	event = &hist_ev[record_index];
272 
273 	hif_clear_ce_desc_debug_data(event);
274 
275 	event->type = type;
276 	event->time = qdf_get_log_timestamp();
277 	event->cpu_id = qdf_get_cpu();
278 
279 	if (descriptor)
280 		qdf_mem_copy(&event->descriptor, descriptor,
281 			     sizeof(union ce_desc));
282 
283 	event->memory = memory;
284 	event->index = index;
285 
286 	if (event->type == HIF_RX_DESC_POST ||
287 	    event->type == HIF_RX_DESC_COMPLETION)
288 		hif_ce_desc_record_rx_paddr(scn, event, memory);
289 
290 	if (ce_hist->data_enable[ce_id])
291 		hif_ce_desc_data_record(event, len);
292 
293 	hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0);
294 }
295 qdf_export_symbol(hif_record_ce_desc_event);
296 
297 /**
298  * ce_init_ce_desc_event_log() - initialize the ce event log
299  * @ce_id: copy engine id for which we are initializing the log
300  * @size: size of array to dedicate
301  *
302  * Currently the passed size is ignored in favor of a precompiled value.
303  */
304 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
305 {
306 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
307 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
308 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
309 }
310 
311 /**
312  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
313  * @ce_id: copy engine id for which we are deinitializing the log
314  *
315  */
316 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
317 {
318 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
319 
320 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
321 }
322 
323 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
324 void hif_record_ce_desc_event(struct hif_softc *scn,
325 		int ce_id, enum hif_ce_event_type type,
326 		union ce_desc *descriptor, void *memory,
327 		int index, int len)
328 {
329 }
330 qdf_export_symbol(hif_record_ce_desc_event);
331 
332 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
333 					int size)
334 {
335 }
336 
337 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
338 {
339 }
340 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
341 
342 #ifdef NAPI_YIELD_BUDGET_BASED
343 bool hif_ce_service_should_yield(struct hif_softc *scn,
344 				 struct CE_state *ce_state)
345 {
346 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
347 
348 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
349 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
350 	 * can happen in fast path handling as processing is happenning in
351 	 * batches.
352 	 */
353 	if (yield)
354 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
355 
356 	return yield;
357 }
358 #else
359 /**
360  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
361  * @scn: hif context
362  * @ce_state: context of the copy engine being serviced
363  *
364  * Return: true if the service should yield
365  */
366 bool hif_ce_service_should_yield(struct hif_softc *scn,
367 				 struct CE_state *ce_state)
368 {
369 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
370 
371 	time_limit_reached = qdf_time_sched_clock() >
372 					ce_state->ce_service_yield_time ? 1 : 0;
373 
374 	if (!time_limit_reached)
375 		rxpkt_thresh_reached = hif_max_num_receives_reached
376 					(scn, ce_state->receive_count);
377 
378 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
379 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
380 	 * can happen in fast path handling as processing is happenning in
381 	 * batches.
382 	 */
383 	if (rxpkt_thresh_reached)
384 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
385 
386 	yield =  time_limit_reached || rxpkt_thresh_reached;
387 
388 	if (yield &&
389 	    ce_state->htt_rx_data &&
390 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
391 		hif_napi_update_yield_stats(ce_state,
392 					    time_limit_reached,
393 					    rxpkt_thresh_reached);
394 	}
395 
396 	return yield;
397 }
398 qdf_export_symbol(hif_ce_service_should_yield);
399 #endif
400 
401 /*
402  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
403  * The caller takes responsibility for any needed locking.
404  */
405 
406 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
407 				   u32 ctrl_addr, unsigned int write_index)
408 {
409 	if (hif_ce_war1) {
410 		void __iomem *indicator_addr;
411 
412 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
413 
414 		if (!war1_allow_sleep
415 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
416 			hif_write32_mb(scn, indicator_addr,
417 				       (CDC_WAR_MAGIC_STR | write_index));
418 		} else {
419 			unsigned long irq_flags;
420 
421 			local_irq_save(irq_flags);
422 			hif_write32_mb(scn, indicator_addr, 1);
423 
424 			/*
425 			 * PCIE write waits for ACK in IPQ8K, there is no
426 			 * need to read back value.
427 			 */
428 			(void)hif_read32_mb(scn, indicator_addr);
429 			/* conservative */
430 			(void)hif_read32_mb(scn, indicator_addr);
431 
432 			CE_SRC_RING_WRITE_IDX_SET(scn,
433 						  ctrl_addr, write_index);
434 
435 			hif_write32_mb(scn, indicator_addr, 0);
436 			local_irq_restore(irq_flags);
437 		}
438 	} else {
439 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
440 	}
441 }
442 
443 qdf_export_symbol(war_ce_src_ring_write_idx_set);
444 
445 QDF_STATUS
446 ce_send(struct CE_handle *copyeng,
447 		void *per_transfer_context,
448 		qdf_dma_addr_t buffer,
449 		uint32_t nbytes,
450 		uint32_t transfer_id,
451 		uint32_t flags,
452 		uint32_t user_flag)
453 {
454 	struct CE_state *CE_state = (struct CE_state *)copyeng;
455 	QDF_STATUS status;
456 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
457 
458 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
459 	status = hif_state->ce_services->ce_send_nolock(copyeng,
460 			per_transfer_context, buffer, nbytes,
461 			transfer_id, flags, user_flag);
462 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
463 
464 	return status;
465 }
466 qdf_export_symbol(ce_send);
467 
468 unsigned int ce_sendlist_sizeof(void)
469 {
470 	return sizeof(struct ce_sendlist);
471 }
472 
473 void ce_sendlist_init(struct ce_sendlist *sendlist)
474 {
475 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
476 
477 	sl->num_items = 0;
478 }
479 
480 QDF_STATUS
481 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
482 					qdf_dma_addr_t buffer,
483 					uint32_t nbytes,
484 					uint32_t flags,
485 					uint32_t user_flags)
486 {
487 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
488 	unsigned int num_items = sl->num_items;
489 	struct ce_sendlist_item *item;
490 
491 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
492 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
493 		return QDF_STATUS_E_RESOURCES;
494 	}
495 
496 	item = &sl->item[num_items];
497 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
498 	item->data = buffer;
499 	item->u.nbytes = nbytes;
500 	item->flags = flags;
501 	item->user_flags = user_flags;
502 	sl->num_items = num_items + 1;
503 	return QDF_STATUS_SUCCESS;
504 }
505 
506 QDF_STATUS
507 ce_sendlist_send(struct CE_handle *copyeng,
508 		 void *per_transfer_context,
509 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
510 {
511 	struct CE_state *CE_state = (struct CE_state *)copyeng;
512 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
513 
514 	return hif_state->ce_services->ce_sendlist_send(copyeng,
515 			per_transfer_context, sendlist, transfer_id);
516 }
517 
518 #ifndef AH_NEED_TX_DATA_SWAP
519 #define AH_NEED_TX_DATA_SWAP 0
520 #endif
521 
522 /**
523  * ce_batch_send() - sends bunch of msdus at once
524  * @ce_tx_hdl : pointer to CE handle
525  * @msdu : list of msdus to be sent
526  * @transfer_id : transfer id
527  * @len : Downloaded length
528  * @sendhead : sendhead
529  *
530  * Assumption : Called with an array of MSDU's
531  * Function:
532  * For each msdu in the array
533  * 1. Send each msdu
534  * 2. Increment write index accordinlgy.
535  *
536  * Return: list of msds not sent
537  */
538 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
539 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
540 {
541 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
542 	struct hif_softc *scn = ce_state->scn;
543 	struct CE_ring_state *src_ring = ce_state->src_ring;
544 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
545 	/*  A_target_id_t targid = TARGID(scn);*/
546 
547 	uint32_t nentries_mask = src_ring->nentries_mask;
548 	uint32_t sw_index, write_index;
549 
550 	struct CE_src_desc *src_desc_base =
551 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
552 	uint32_t *src_desc;
553 
554 	struct CE_src_desc lsrc_desc = {0};
555 	int deltacount = 0;
556 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
557 
558 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
559 	sw_index = src_ring->sw_index;
560 	write_index = src_ring->write_index;
561 
562 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
563 
564 	while (msdu) {
565 		tempnext = qdf_nbuf_next(msdu);
566 
567 		if (deltacount < 2) {
568 			if (sendhead)
569 				return msdu;
570 			hif_err("Out of descriptors");
571 			src_ring->write_index = write_index;
572 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
573 					write_index);
574 
575 			sw_index = src_ring->sw_index;
576 			write_index = src_ring->write_index;
577 
578 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
579 					sw_index-1);
580 			if (!freelist) {
581 				freelist = msdu;
582 				hfreelist = msdu;
583 			} else {
584 				qdf_nbuf_set_next(freelist, msdu);
585 				freelist = msdu;
586 			}
587 			qdf_nbuf_set_next(msdu, NULL);
588 			msdu = tempnext;
589 			continue;
590 		}
591 
592 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
593 				write_index);
594 
595 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
596 
597 		lsrc_desc.meta_data = transfer_id;
598 		if (len  > msdu->len)
599 			len =  msdu->len;
600 		lsrc_desc.nbytes = len;
601 		/*  Data packet is a byte stream, so disable byte swap */
602 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
603 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
604 
605 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
606 
607 
608 		src_ring->per_transfer_context[write_index] = msdu;
609 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
610 
611 		if (sendhead)
612 			break;
613 		qdf_nbuf_set_next(msdu, NULL);
614 		msdu = tempnext;
615 
616 	}
617 
618 
619 	src_ring->write_index = write_index;
620 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
621 
622 	return hfreelist;
623 }
624 
625 /**
626  * ce_update_tx_ring() - Advance sw index.
627  * @ce_tx_hdl : pointer to CE handle
628  * @num_htt_cmpls : htt completions received.
629  *
630  * Function:
631  * Increment the value of sw index of src ring
632  * according to number of htt completions
633  * received.
634  *
635  * Return: void
636  */
637 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
638 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
639 {
640 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
641 	struct CE_ring_state *src_ring = ce_state->src_ring;
642 	uint32_t nentries_mask = src_ring->nentries_mask;
643 	/*
644 	 * Advance the s/w index:
645 	 * This effectively simulates completing the CE ring descriptors
646 	 */
647 	src_ring->sw_index =
648 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
649 				num_htt_cmpls);
650 }
651 #else
652 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
653 {}
654 #endif
655 
656 /**
657  * ce_send_single() - sends
658  * @ce_tx_hdl : pointer to CE handle
659  * @msdu : msdu to be sent
660  * @transfer_id : transfer id
661  * @len : Downloaded length
662  *
663  * Function:
664  * 1. Send one msdu
665  * 2. Increment write index of src ring accordinlgy.
666  *
667  * Return: QDF_STATUS: CE sent status
668  */
669 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
670 			  uint32_t transfer_id, u_int32_t len)
671 {
672 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
673 	struct hif_softc *scn = ce_state->scn;
674 	struct CE_ring_state *src_ring = ce_state->src_ring;
675 	uint32_t ctrl_addr = ce_state->ctrl_addr;
676 	/*A_target_id_t targid = TARGID(scn);*/
677 
678 	uint32_t nentries_mask = src_ring->nentries_mask;
679 	uint32_t sw_index, write_index;
680 
681 	struct CE_src_desc *src_desc_base =
682 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
683 	uint32_t *src_desc;
684 
685 	struct CE_src_desc lsrc_desc = {0};
686 	enum hif_ce_event_type event_type;
687 
688 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
689 	sw_index = src_ring->sw_index;
690 	write_index = src_ring->write_index;
691 
692 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
693 					sw_index-1) < 1)) {
694 		hif_err("ce send fail %d %d %d", nentries_mask,
695 		       write_index, sw_index);
696 		return QDF_STATUS_E_RESOURCES;
697 	}
698 
699 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
700 
701 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
702 
703 	lsrc_desc.meta_data = transfer_id;
704 	lsrc_desc.nbytes = len;
705 	/*  Data packet is a byte stream, so disable byte swap */
706 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
707 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
708 
709 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
710 
711 
712 	src_ring->per_transfer_context[write_index] = msdu;
713 
714 	if (((struct CE_src_desc *)src_desc)->gather)
715 		event_type = HIF_TX_GATHER_DESC_POST;
716 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
717 		event_type = HIF_TX_DESC_SOFTWARE_POST;
718 	else
719 		event_type = HIF_TX_DESC_POST;
720 
721 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
722 				(union ce_desc *)src_desc, msdu,
723 				write_index, len);
724 
725 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
726 
727 	src_ring->write_index = write_index;
728 
729 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
730 
731 	return QDF_STATUS_SUCCESS;
732 }
733 
734 /**
735  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
736  * @coyeng: copy engine handle
737  * @per_recv_context: virtual address of the nbuf
738  * @buffer: physical address of the nbuf
739  *
740  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
741  */
742 QDF_STATUS
743 ce_recv_buf_enqueue(struct CE_handle *copyeng,
744 		    void *per_recv_context, qdf_dma_addr_t buffer)
745 {
746 	struct CE_state *CE_state = (struct CE_state *)copyeng;
747 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
748 
749 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
750 			per_recv_context, buffer);
751 }
752 qdf_export_symbol(ce_recv_buf_enqueue);
753 
754 void
755 ce_send_watermarks_set(struct CE_handle *copyeng,
756 		       unsigned int low_alert_nentries,
757 		       unsigned int high_alert_nentries)
758 {
759 	struct CE_state *CE_state = (struct CE_state *)copyeng;
760 	uint32_t ctrl_addr = CE_state->ctrl_addr;
761 	struct hif_softc *scn = CE_state->scn;
762 
763 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
764 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
765 }
766 
767 void
768 ce_recv_watermarks_set(struct CE_handle *copyeng,
769 		       unsigned int low_alert_nentries,
770 		       unsigned int high_alert_nentries)
771 {
772 	struct CE_state *CE_state = (struct CE_state *)copyeng;
773 	uint32_t ctrl_addr = CE_state->ctrl_addr;
774 	struct hif_softc *scn = CE_state->scn;
775 
776 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
777 				low_alert_nentries);
778 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
779 				high_alert_nentries);
780 }
781 
782 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
783 {
784 	struct CE_state *CE_state = (struct CE_state *)copyeng;
785 	struct CE_ring_state *src_ring = CE_state->src_ring;
786 	unsigned int nentries_mask = src_ring->nentries_mask;
787 	unsigned int sw_index;
788 	unsigned int write_index;
789 
790 	qdf_spin_lock(&CE_state->ce_index_lock);
791 	sw_index = src_ring->sw_index;
792 	write_index = src_ring->write_index;
793 	qdf_spin_unlock(&CE_state->ce_index_lock);
794 
795 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
796 }
797 
798 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
799 {
800 	struct CE_state *CE_state = (struct CE_state *)copyeng;
801 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
802 	unsigned int nentries_mask = dest_ring->nentries_mask;
803 	unsigned int sw_index;
804 	unsigned int write_index;
805 
806 	qdf_spin_lock(&CE_state->ce_index_lock);
807 	sw_index = dest_ring->sw_index;
808 	write_index = dest_ring->write_index;
809 	qdf_spin_unlock(&CE_state->ce_index_lock);
810 
811 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
812 }
813 
814 /*
815  * Guts of ce_completed_recv_next.
816  * The caller takes responsibility for any necessary locking.
817  */
818 QDF_STATUS
819 ce_completed_recv_next(struct CE_handle *copyeng,
820 		       void **per_CE_contextp,
821 		       void **per_transfer_contextp,
822 		       qdf_dma_addr_t *bufferp,
823 		       unsigned int *nbytesp,
824 		       unsigned int *transfer_idp, unsigned int *flagsp)
825 {
826 	struct CE_state *CE_state = (struct CE_state *)copyeng;
827 	QDF_STATUS status;
828 	struct hif_softc *scn = CE_state->scn;
829 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
830 	struct ce_ops *ce_services;
831 
832 	ce_services = hif_state->ce_services;
833 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
834 	status =
835 		ce_services->ce_completed_recv_next_nolock(CE_state,
836 				per_CE_contextp, per_transfer_contextp, bufferp,
837 					      nbytesp, transfer_idp, flagsp);
838 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
839 
840 	return status;
841 }
842 
843 QDF_STATUS
844 ce_revoke_recv_next(struct CE_handle *copyeng,
845 		    void **per_CE_contextp,
846 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
847 {
848 	struct CE_state *CE_state = (struct CE_state *)copyeng;
849 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
850 
851 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
852 			per_CE_contextp, per_transfer_contextp, bufferp);
853 }
854 
855 QDF_STATUS
856 ce_cancel_send_next(struct CE_handle *copyeng,
857 		void **per_CE_contextp,
858 		void **per_transfer_contextp,
859 		qdf_dma_addr_t *bufferp,
860 		unsigned int *nbytesp,
861 		unsigned int *transfer_idp,
862 		uint32_t *toeplitz_hash_result)
863 {
864 	struct CE_state *CE_state = (struct CE_state *)copyeng;
865 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
866 
867 	return hif_state->ce_services->ce_cancel_send_next
868 		(copyeng, per_CE_contextp, per_transfer_contextp,
869 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
870 }
871 qdf_export_symbol(ce_cancel_send_next);
872 
873 QDF_STATUS
874 ce_completed_send_next(struct CE_handle *copyeng,
875 		       void **per_CE_contextp,
876 		       void **per_transfer_contextp,
877 		       qdf_dma_addr_t *bufferp,
878 		       unsigned int *nbytesp,
879 		       unsigned int *transfer_idp,
880 		       unsigned int *sw_idx,
881 		       unsigned int *hw_idx,
882 		       unsigned int *toeplitz_hash_result)
883 {
884 	struct CE_state *CE_state = (struct CE_state *)copyeng;
885 	struct hif_softc *scn = CE_state->scn;
886 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
887 	struct ce_ops *ce_services;
888 	QDF_STATUS status;
889 
890 	ce_services = hif_state->ce_services;
891 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
892 	status =
893 		ce_services->ce_completed_send_next_nolock(CE_state,
894 					per_CE_contextp, per_transfer_contextp,
895 					bufferp, nbytesp, transfer_idp, sw_idx,
896 					      hw_idx, toeplitz_hash_result);
897 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
898 
899 	return status;
900 }
901 
902 #ifdef ATH_11AC_TXCOMPACT
903 /* CE engine descriptor reap
904  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
905  * does receive and reaping of completed descriptor ,
906  * This function only handles reaping of Tx complete descriptor.
907  * The Function is called from threshold reap  poll routine
908  * hif_send_complete_check so should not countain receive functionality
909  * within it .
910  */
911 
912 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
913 {
914 	void *CE_context;
915 	void *transfer_context;
916 	qdf_dma_addr_t buf;
917 	unsigned int nbytes;
918 	unsigned int id;
919 	unsigned int sw_idx, hw_idx;
920 	uint32_t toeplitz_hash_result;
921 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
922 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
923 
924 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
925 		return;
926 
927 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
928 			NULL, NULL, 0, 0);
929 
930 	/* Since this function is called from both user context and
931 	 * tasklet context the spinlock has to lock the bottom halves.
932 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
933 	 * enabled in TX polling mode. If this is not the case, more
934 	 * bottom halve spin lock changes are needed. Due to data path
935 	 * performance concern, after internal discussion we've decided
936 	 * to make minimum change, i.e., only address the issue occurred
937 	 * in this function. The possible negative effect of this minimum
938 	 * change is that, in the future, if some other function will also
939 	 * be opened to let the user context to use, those cases need to be
940 	 * addressed by change spin_lock to spin_lock_bh also.
941 	 */
942 
943 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
944 
945 	if (CE_state->send_cb) {
946 		{
947 			struct ce_ops *ce_services = hif_state->ce_services;
948 			/* Pop completed send buffers and call the
949 			 * registered send callback for each
950 			 */
951 			while (ce_services->ce_completed_send_next_nolock
952 				 (CE_state, &CE_context,
953 				  &transfer_context, &buf,
954 				  &nbytes, &id, &sw_idx, &hw_idx,
955 				  &toeplitz_hash_result) ==
956 				  QDF_STATUS_SUCCESS) {
957 				if (ce_id != CE_HTT_H2T_MSG) {
958 					qdf_spin_unlock_bh(
959 						&CE_state->ce_index_lock);
960 					CE_state->send_cb(
961 						(struct CE_handle *)
962 						CE_state, CE_context,
963 						transfer_context, buf,
964 						nbytes, id, sw_idx, hw_idx,
965 						toeplitz_hash_result);
966 					qdf_spin_lock_bh(
967 						&CE_state->ce_index_lock);
968 				} else {
969 					struct HIF_CE_pipe_info *pipe_info =
970 						(struct HIF_CE_pipe_info *)
971 						CE_context;
972 
973 					qdf_spin_lock_bh(&pipe_info->
974 						 completion_freeq_lock);
975 					pipe_info->num_sends_allowed++;
976 					qdf_spin_unlock_bh(&pipe_info->
977 						   completion_freeq_lock);
978 				}
979 			}
980 		}
981 	}
982 
983 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
984 
985 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
986 			NULL, NULL, 0, 0);
987 	Q_TARGET_ACCESS_END(scn);
988 }
989 
990 #endif /*ATH_11AC_TXCOMPACT */
991 
992 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
993 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
994 {
995 	// QDF_IS_EPPING_ENABLED is pre lithium feature
996 	// CE4 completion is enabled only lithium and later
997 	// so no need to check for EPPING
998 	return true;
999 }
1000 
1001 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1002 
1003 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1004 {
1005 	if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
1006 		return true;
1007 	else
1008 		return false;
1009 }
1010 
1011 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1012 
1013 /*
1014  * ce_engine_service_reg:
1015  *
1016  * Called from ce_per_engine_service and goes through the regular interrupt
1017  * handling that does not involve the WLAN fast path feature.
1018  *
1019  * Returns void
1020  */
1021 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
1022 {
1023 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1024 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1025 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1026 	void *CE_context;
1027 	void *transfer_context;
1028 	qdf_dma_addr_t buf;
1029 	unsigned int nbytes;
1030 	unsigned int id;
1031 	unsigned int flags;
1032 	unsigned int more_comp_cnt = 0;
1033 	unsigned int more_snd_comp_cnt = 0;
1034 	unsigned int sw_idx, hw_idx;
1035 	uint32_t toeplitz_hash_result;
1036 	uint32_t mode = hif_get_conparam(scn);
1037 
1038 more_completions:
1039 	if (CE_state->recv_cb) {
1040 
1041 		/* Pop completed recv buffers and call
1042 		 * the registered recv callback for each
1043 		 */
1044 		while (hif_state->ce_services->ce_completed_recv_next_nolock
1045 				(CE_state, &CE_context, &transfer_context,
1046 				&buf, &nbytes, &id, &flags) ==
1047 				QDF_STATUS_SUCCESS) {
1048 			qdf_spin_unlock(&CE_state->ce_index_lock);
1049 			CE_state->recv_cb((struct CE_handle *)CE_state,
1050 					  CE_context, transfer_context, buf,
1051 					  nbytes, id, flags);
1052 
1053 			qdf_spin_lock(&CE_state->ce_index_lock);
1054 			/*
1055 			 * EV #112693 -
1056 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
1057 			 * BSoD_0x133 occurred in VHT80 UDP_DL
1058 			 * Break out DPC by force if number of loops in
1059 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1060 			 * to avoid spending too long time in
1061 			 * DPC for each interrupt handling. Schedule another
1062 			 * DPC to avoid data loss if we had taken
1063 			 * force-break action before apply to Windows OS
1064 			 * only currently, Linux/MAC os can expand to their
1065 			 * platform if necessary
1066 			 */
1067 
1068 			/* Break the receive processes by
1069 			 * force if force_break set up
1070 			 */
1071 			if (qdf_unlikely(CE_state->force_break)) {
1072 				qdf_atomic_set(&CE_state->rx_pending, 1);
1073 				return;
1074 			}
1075 		}
1076 	}
1077 
1078 	/*
1079 	 * Attention: We may experience potential infinite loop for below
1080 	 * While Loop during Sending Stress test.
1081 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1082 	 */
1083 
1084 	if (CE_state->send_cb) {
1085 		/* Pop completed send buffers and call
1086 		 * the registered send callback for each
1087 		 */
1088 
1089 #ifdef ATH_11AC_TXCOMPACT
1090 		while (hif_state->ce_services->ce_completed_send_next_nolock
1091 			 (CE_state, &CE_context,
1092 			 &transfer_context, &buf, &nbytes,
1093 			 &id, &sw_idx, &hw_idx,
1094 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1095 
1096 			if (check_ce_id_and_epping_enabled(CE_id, mode)) {
1097 				qdf_spin_unlock(&CE_state->ce_index_lock);
1098 				CE_state->send_cb((struct CE_handle *)CE_state,
1099 						  CE_context, transfer_context,
1100 						  buf, nbytes, id, sw_idx,
1101 						  hw_idx, toeplitz_hash_result);
1102 				qdf_spin_lock(&CE_state->ce_index_lock);
1103 			} else {
1104 				struct HIF_CE_pipe_info *pipe_info =
1105 					(struct HIF_CE_pipe_info *)CE_context;
1106 
1107 				qdf_spin_lock_bh(&pipe_info->
1108 					      completion_freeq_lock);
1109 				pipe_info->num_sends_allowed++;
1110 				qdf_spin_unlock_bh(&pipe_info->
1111 						completion_freeq_lock);
1112 			}
1113 		}
1114 #else                           /*ATH_11AC_TXCOMPACT */
1115 		while (hif_state->ce_services->ce_completed_send_next_nolock
1116 			 (CE_state, &CE_context,
1117 			  &transfer_context, &buf, &nbytes,
1118 			  &id, &sw_idx, &hw_idx,
1119 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1120 			qdf_spin_unlock(&CE_state->ce_index_lock);
1121 			CE_state->send_cb((struct CE_handle *)CE_state,
1122 				  CE_context, transfer_context, buf,
1123 				  nbytes, id, sw_idx, hw_idx,
1124 				  toeplitz_hash_result);
1125 			qdf_spin_lock(&CE_state->ce_index_lock);
1126 		}
1127 #endif /*ATH_11AC_TXCOMPACT */
1128 	}
1129 
1130 more_watermarks:
1131 	if (CE_state->misc_cbs) {
1132 		if (CE_state->watermark_cb &&
1133 				hif_state->ce_services->watermark_int(CE_state,
1134 					&flags)) {
1135 			qdf_spin_unlock(&CE_state->ce_index_lock);
1136 			/* Convert HW IS bits to software flags */
1137 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1138 					CE_state->wm_context, flags);
1139 			qdf_spin_lock(&CE_state->ce_index_lock);
1140 		}
1141 	}
1142 
1143 	/*
1144 	 * Clear the misc interrupts (watermark) that were handled above,
1145 	 * and that will be checked again below.
1146 	 * Clear and check for copy-complete interrupts again, just in case
1147 	 * more copy completions happened while the misc interrupts were being
1148 	 * handled.
1149 	 */
1150 	if (!ce_srng_based(scn)) {
1151 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1152 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1153 					   CE_WATERMARK_MASK |
1154 					   HOST_IS_COPY_COMPLETE_MASK);
1155 		} else {
1156 			qdf_atomic_set(&CE_state->rx_pending, 0);
1157 			hif_err_rl("%s: target access is not allowed",
1158 				   __func__);
1159 			return;
1160 		}
1161 	}
1162 
1163 	/*
1164 	 * Now that per-engine interrupts are cleared, verify that
1165 	 * no recv interrupts arrive while processing send interrupts,
1166 	 * and no recv or send interrupts happened while processing
1167 	 * misc interrupts.Go back and check again.Keep checking until
1168 	 * we find no more events to process.
1169 	 */
1170 	if (CE_state->recv_cb &&
1171 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1172 				CE_state)) {
1173 		if (QDF_IS_EPPING_ENABLED(mode) ||
1174 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1175 			goto more_completions;
1176 		} else {
1177 			if (!ce_srng_based(scn)) {
1178 				hif_err_rl(
1179 					"Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1180 					CE_state->id,
1181 					CE_state->dest_ring->nentries_mask,
1182 					CE_state->dest_ring->sw_index,
1183 					CE_DEST_RING_READ_IDX_GET(scn,
1184 							  CE_state->ctrl_addr));
1185 			}
1186 		}
1187 	}
1188 
1189 	if (CE_state->send_cb &&
1190 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1191 				CE_state)) {
1192 		if (QDF_IS_EPPING_ENABLED(mode) ||
1193 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1194 			goto more_completions;
1195 		} else {
1196 			if (!ce_srng_based(scn)) {
1197 				hif_err_rl(
1198 					"Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
1199 					CE_state->id,
1200 					CE_state->src_ring->nentries_mask,
1201 					CE_state->src_ring->sw_index,
1202 					CE_state->src_ring->hw_index,
1203 					CE_state->src_ring->write_index,
1204 					CE_SRC_RING_READ_IDX_GET(scn,
1205 							 CE_state->ctrl_addr));
1206 			}
1207 		}
1208 	}
1209 
1210 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1211 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1212 			goto more_watermarks;
1213 	}
1214 
1215 	qdf_atomic_set(&CE_state->rx_pending, 0);
1216 }
1217 
1218 #ifdef WLAN_TRACEPOINTS
1219 /**
1220  * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling
1221  *  latency
1222  * @ce_state: CE context
1223  *
1224  * Return: None
1225  */
1226 static inline
1227 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1228 {
1229 	qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id,
1230 					      ce_state->ce_service_start_time -
1231 					      ce_state->ce_tasklet_sched_time);
1232 }
1233 #else
1234 static inline
1235 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1236 {
1237 }
1238 #endif
1239 
1240 /*
1241  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1242  *
1243  * Invokes registered callbacks for recv_complete,
1244  * send_complete, and watermarks.
1245  *
1246  * Returns: number of messages processed
1247  */
1248 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1249 {
1250 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1251 
1252 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1253 		return CE_state->receive_count;
1254 
1255 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1256 		hif_err("[premature rc=0]");
1257 		return 0; /* no work done */
1258 	}
1259 
1260 	/* Clear force_break flag and re-initialize receive_count to 0 */
1261 	CE_state->receive_count = 0;
1262 	CE_state->force_break = 0;
1263 	CE_state->ce_service_start_time = qdf_time_sched_clock();
1264 	CE_state->ce_service_yield_time =
1265 		CE_state->ce_service_start_time +
1266 		hif_get_ce_service_max_yield_time(
1267 			(struct hif_opaque_softc *)scn);
1268 
1269 	ce_trace_tasklet_sched_latency(CE_state);
1270 
1271 	qdf_spin_lock(&CE_state->ce_index_lock);
1272 
1273 	CE_state->service(scn, CE_id);
1274 
1275 	qdf_spin_unlock(&CE_state->ce_index_lock);
1276 
1277 	if (Q_TARGET_ACCESS_END(scn) < 0)
1278 		hif_err("<--[premature rc=%d]", CE_state->receive_count);
1279 	return CE_state->receive_count;
1280 }
1281 qdf_export_symbol(ce_per_engine_service);
1282 
1283 /*
1284  * Handler for per-engine interrupts on ALL active CEs.
1285  * This is used in cases where the system is sharing a
1286  * single interrput for all CEs
1287  */
1288 
1289 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1290 {
1291 	int CE_id;
1292 	uint32_t intr_summary;
1293 
1294 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1295 		return;
1296 
1297 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1298 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1299 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1300 
1301 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1302 				qdf_atomic_set(&CE_state->rx_pending, 0);
1303 				ce_per_engine_service(scn, CE_id);
1304 			}
1305 		}
1306 
1307 		Q_TARGET_ACCESS_END(scn);
1308 		return;
1309 	}
1310 
1311 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1312 
1313 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1314 		if (intr_summary & (1 << CE_id))
1315 			intr_summary &= ~(1 << CE_id);
1316 		else
1317 			continue;       /* no intr pending on this CE */
1318 
1319 		ce_per_engine_service(scn, CE_id);
1320 	}
1321 
1322 	Q_TARGET_ACCESS_END(scn);
1323 }
1324 
1325 /*Iterate the CE_state list and disable the compl interrupt
1326  * if it has been registered already.
1327  */
1328 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1329 {
1330 	int CE_id;
1331 
1332 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1333 		return;
1334 
1335 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1336 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1337 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1338 
1339 		/* if the interrupt is currently enabled, disable it */
1340 		if (!CE_state->disable_copy_compl_intr
1341 		    && (CE_state->send_cb || CE_state->recv_cb))
1342 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1343 
1344 		if (CE_state->watermark_cb)
1345 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1346 	}
1347 	Q_TARGET_ACCESS_END(scn);
1348 }
1349 
1350 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1351 {
1352 	int CE_id;
1353 
1354 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1355 		return;
1356 
1357 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1358 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1359 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1360 
1361 		/*
1362 		 * If the CE is supposed to have copy complete interrupts
1363 		 * enabled (i.e. there a callback registered, and the
1364 		 * "disable" flag is not set), then re-enable the interrupt.
1365 		 */
1366 		if (!CE_state->disable_copy_compl_intr
1367 		    && (CE_state->send_cb || CE_state->recv_cb))
1368 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1369 
1370 		if (CE_state->watermark_cb)
1371 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1372 	}
1373 	Q_TARGET_ACCESS_END(scn);
1374 }
1375 
1376 /**
1377  * ce_send_cb_register(): register completion handler
1378  * @copyeng: CE_state representing the ce we are adding the behavior to
1379  * @fn_ptr: callback that the ce should use when processing tx completions
1380  * @disable_interrupts: if the interupts should be enabled or not.
1381  *
1382  * Caller should guarantee that no transactions are in progress before
1383  * switching the callback function.
1384  *
1385  * Registers the send context before the fn pointer so that if the cb is valid
1386  * the context should be valid.
1387  *
1388  * Beware that currently this function will enable completion interrupts.
1389  */
1390 void
1391 ce_send_cb_register(struct CE_handle *copyeng,
1392 		    ce_send_cb fn_ptr,
1393 		    void *ce_send_context, int disable_interrupts)
1394 {
1395 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1396 	struct hif_softc *scn;
1397 	struct HIF_CE_state *hif_state;
1398 
1399 	if (!CE_state) {
1400 		hif_err("Error CE state = NULL");
1401 		return;
1402 	}
1403 	scn = CE_state->scn;
1404 	hif_state = HIF_GET_CE_STATE(scn);
1405 	if (!hif_state) {
1406 		hif_err("Error HIF state = NULL");
1407 		return;
1408 	}
1409 	CE_state->send_context = ce_send_context;
1410 	CE_state->send_cb = fn_ptr;
1411 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1412 							disable_interrupts);
1413 }
1414 qdf_export_symbol(ce_send_cb_register);
1415 
1416 /**
1417  * ce_recv_cb_register(): register completion handler
1418  * @copyeng: CE_state representing the ce we are adding the behavior to
1419  * @fn_ptr: callback that the ce should use when processing rx completions
1420  * @disable_interrupts: if the interupts should be enabled or not.
1421  *
1422  * Registers the send context before the fn pointer so that if the cb is valid
1423  * the context should be valid.
1424  *
1425  * Caller should guarantee that no transactions are in progress before
1426  * switching the callback function.
1427  */
1428 void
1429 ce_recv_cb_register(struct CE_handle *copyeng,
1430 		    CE_recv_cb fn_ptr,
1431 		    void *CE_recv_context, int disable_interrupts)
1432 {
1433 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1434 	struct hif_softc *scn;
1435 	struct HIF_CE_state *hif_state;
1436 
1437 	if (!CE_state) {
1438 		hif_err("ERROR CE state = NULL");
1439 		return;
1440 	}
1441 	scn = CE_state->scn;
1442 	hif_state = HIF_GET_CE_STATE(scn);
1443 	if (!hif_state) {
1444 		hif_err("Error HIF state = NULL");
1445 		return;
1446 	}
1447 	CE_state->recv_context = CE_recv_context;
1448 	CE_state->recv_cb = fn_ptr;
1449 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1450 							disable_interrupts);
1451 }
1452 qdf_export_symbol(ce_recv_cb_register);
1453 
1454 /**
1455  * ce_watermark_cb_register(): register completion handler
1456  * @copyeng: CE_state representing the ce we are adding the behavior to
1457  * @fn_ptr: callback that the ce should use when processing watermark events
1458  *
1459  * Caller should guarantee that no watermark events are being processed before
1460  * switching the callback function.
1461  */
1462 void
1463 ce_watermark_cb_register(struct CE_handle *copyeng,
1464 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1465 {
1466 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1467 	struct hif_softc *scn = CE_state->scn;
1468 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1469 
1470 	CE_state->watermark_cb = fn_ptr;
1471 	CE_state->wm_context = CE_wm_context;
1472 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1473 							0);
1474 	if (fn_ptr)
1475 		CE_state->misc_cbs = 1;
1476 }
1477 
1478 bool ce_get_rx_pending(struct hif_softc *scn)
1479 {
1480 	int CE_id;
1481 
1482 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1483 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1484 
1485 		if (qdf_atomic_read(&CE_state->rx_pending))
1486 			return true;
1487 	}
1488 
1489 	return false;
1490 }
1491 
1492 /**
1493  * ce_check_rx_pending() - ce_check_rx_pending
1494  * @CE_state: context of the copy engine to check
1495  *
1496  * Return: true if there per_engine_service
1497  *	didn't process all the rx descriptors.
1498  */
1499 bool ce_check_rx_pending(struct CE_state *CE_state)
1500 {
1501 	if (qdf_atomic_read(&CE_state->rx_pending))
1502 		return true;
1503 	else
1504 		return false;
1505 }
1506 qdf_export_symbol(ce_check_rx_pending);
1507 
1508 #ifdef IPA_OFFLOAD
1509 #ifdef QCN7605_SUPPORT
1510 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1511 {
1512 	u_int32_t ctrl_addr = CE_state->ctrl_addr;
1513 	struct hif_softc *scn = CE_state->scn;
1514 	qdf_dma_addr_t wr_index_addr;
1515 
1516 	wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1517 	return wr_index_addr;
1518 }
1519 #else
1520 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1521 {
1522 	struct hif_softc *scn = CE_state->scn;
1523 	qdf_dma_addr_t wr_index_addr;
1524 
1525 	wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1526 			SR_WR_INDEX_ADDRESS;
1527 	return wr_index_addr;
1528 }
1529 #endif
1530 
1531 /**
1532  * ce_ipa_get_resource() - get uc resource on copyengine
1533  * @ce: copyengine context
1534  * @ce_sr: copyengine source ring resource info
1535  * @ce_sr_ring_size: copyengine source ring size
1536  * @ce_reg_paddr: copyengine register physical address
1537  *
1538  * Copy engine should release resource to micro controller
1539  * Micro controller needs
1540  *  - Copy engine source descriptor base address
1541  *  - Copy engine source descriptor size
1542  *  - PCI BAR address to access copy engine regiser
1543  *
1544  * Return: None
1545  */
1546 void ce_ipa_get_resource(struct CE_handle *ce,
1547 			 qdf_shared_mem_t **ce_sr,
1548 			 uint32_t *ce_sr_ring_size,
1549 			 qdf_dma_addr_t *ce_reg_paddr)
1550 {
1551 	struct CE_state *CE_state = (struct CE_state *)ce;
1552 	uint32_t ring_loop;
1553 	struct CE_src_desc *ce_desc;
1554 	qdf_dma_addr_t phy_mem_base;
1555 	struct hif_softc *scn = CE_state->scn;
1556 
1557 	if (CE_UNUSED == CE_state->state) {
1558 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1559 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1560 		*ce_sr_ring_size = 0;
1561 		return;
1562 	}
1563 
1564 	/* Update default value for descriptor */
1565 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1566 	     ring_loop++) {
1567 		ce_desc = (struct CE_src_desc *)
1568 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1569 			   ring_loop * (sizeof(struct CE_src_desc)));
1570 		CE_IPA_RING_INIT(ce_desc);
1571 	}
1572 
1573 	/* Get BAR address */
1574 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1575 
1576 	*ce_sr = CE_state->scn->ipa_ce_ring;
1577 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1578 		sizeof(struct CE_src_desc));
1579 	*ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1580 
1581 }
1582 
1583 #endif /* IPA_OFFLOAD */
1584 
1585 #ifdef HIF_CE_DEBUG_DATA_BUF
1586 /**
1587  * hif_dump_desc_data_buf() - record ce descriptor events
1588  * @buf: buffer to copy to
1589  * @pos: Current position till which the buf is filled
1590  * @data: Data to be copied
1591  * @data_len: Length of the data to be copied
1592  */
1593 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1594 					uint8_t *data, uint32_t data_len)
1595 {
1596 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1597 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1598 
1599 	if ((data_len > 0) && data) {
1600 		if (data_len < 16) {
1601 			hex_dump_to_buffer(data,
1602 						CE_DEBUG_DATA_PER_ROW,
1603 						16, 1, buf + pos,
1604 						(ssize_t)PAGE_SIZE - pos,
1605 						false);
1606 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1607 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1608 		} else {
1609 			uint32_t rows = (data_len / 16) + 1;
1610 			uint32_t row = 0;
1611 
1612 			for (row = 0; row < rows; row++) {
1613 				hex_dump_to_buffer(data + (row * 16),
1614 							CE_DEBUG_DATA_PER_ROW,
1615 							16, 1, buf + pos,
1616 							(ssize_t)PAGE_SIZE
1617 							- pos, false);
1618 				pos +=
1619 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1620 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1621 						"\n");
1622 			}
1623 		}
1624 	}
1625 
1626 	return pos;
1627 }
1628 #endif
1629 
1630 /*
1631  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1632  * for defined here
1633  */
1634 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1635 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1636 {
1637 	switch (type) {
1638 	case HIF_RX_DESC_POST:
1639 		return "HIF_RX_DESC_POST";
1640 	case HIF_RX_DESC_COMPLETION:
1641 		return "HIF_RX_DESC_COMPLETION";
1642 	case HIF_TX_GATHER_DESC_POST:
1643 		return "HIF_TX_GATHER_DESC_POST";
1644 	case HIF_TX_DESC_POST:
1645 		return "HIF_TX_DESC_POST";
1646 	case HIF_TX_DESC_SOFTWARE_POST:
1647 		return "HIF_TX_DESC_SOFTWARE_POST";
1648 	case HIF_TX_DESC_COMPLETION:
1649 		return "HIF_TX_DESC_COMPLETION";
1650 	case FAST_RX_WRITE_INDEX_UPDATE:
1651 		return "FAST_RX_WRITE_INDEX_UPDATE";
1652 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1653 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1654 	case FAST_TX_WRITE_INDEX_UPDATE:
1655 		return "FAST_TX_WRITE_INDEX_UPDATE";
1656 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1657 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1658 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1659 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1660 	case RESUME_WRITE_INDEX_UPDATE:
1661 		return "RESUME_WRITE_INDEX_UPDATE";
1662 	case HIF_IRQ_EVENT:
1663 		return "HIF_IRQ_EVENT";
1664 	case HIF_CE_TASKLET_ENTRY:
1665 		return "HIF_CE_TASKLET_ENTRY";
1666 	case HIF_CE_TASKLET_RESCHEDULE:
1667 		return "HIF_CE_TASKLET_RESCHEDULE";
1668 	case HIF_CE_TASKLET_EXIT:
1669 		return "HIF_CE_TASKLET_EXIT";
1670 	case HIF_CE_REAP_ENTRY:
1671 		return "HIF_CE_REAP_ENTRY";
1672 	case HIF_CE_REAP_EXIT:
1673 		return "HIF_CE_REAP_EXIT";
1674 	case NAPI_SCHEDULE:
1675 		return "NAPI_SCHEDULE";
1676 	case NAPI_POLL_ENTER:
1677 		return "NAPI_POLL_ENTER";
1678 	case NAPI_COMPLETE:
1679 		return "NAPI_COMPLETE";
1680 	case NAPI_POLL_EXIT:
1681 		return "NAPI_POLL_EXIT";
1682 	case HIF_RX_NBUF_ALLOC_FAILURE:
1683 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1684 	case HIF_RX_NBUF_MAP_FAILURE:
1685 		return "HIF_RX_NBUF_MAP_FAILURE";
1686 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1687 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1688 	default:
1689 		return "invalid";
1690 	}
1691 }
1692 
1693 /**
1694  * hif_dump_desc_event() - record ce descriptor events
1695  * @buf: Buffer to which to be copied
1696  * @ce_id: which ce is the event occurring on
1697  * @index: index that the descriptor was/will be at.
1698  */
1699 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1700 {
1701 	struct hif_ce_desc_event *event;
1702 	uint64_t secs, usecs;
1703 	ssize_t len = 0;
1704 	struct ce_desc_hist *ce_hist = NULL;
1705 	struct hif_ce_desc_event *hist_ev = NULL;
1706 
1707 	if (!scn)
1708 		return -EINVAL;
1709 
1710 	ce_hist = &scn->hif_ce_desc_hist;
1711 
1712 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1713 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1714 		qdf_print("Invalid values");
1715 		return -EINVAL;
1716 	}
1717 
1718 	hist_ev =
1719 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1720 
1721 	if (!hist_ev) {
1722 		qdf_print("Low Memory");
1723 		return -EINVAL;
1724 	}
1725 
1726 	event = &hist_ev[ce_hist->hist_index];
1727 
1728 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1729 
1730 	len += snprintf(buf, PAGE_SIZE - len,
1731 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1732 			secs, usecs, ce_hist->hist_id,
1733 			ce_event_type_to_str(event->type),
1734 			event->index, event->memory);
1735 #ifdef HIF_CE_DEBUG_DATA_BUF
1736 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1737 			event->actual_data_len);
1738 #endif
1739 
1740 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1741 
1742 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1743 				16, 1, buf + len,
1744 				(ssize_t)PAGE_SIZE - len, false);
1745 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1746 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1747 
1748 #ifdef HIF_CE_DEBUG_DATA_BUF
1749 	if (ce_hist->data_enable[ce_hist->hist_id])
1750 		len = hif_dump_desc_data_buf(buf, len, event->data,
1751 						(event->actual_data_len <
1752 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1753 						event->actual_data_len :
1754 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1755 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1756 
1757 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1758 
1759 	return len;
1760 }
1761 
1762 /*
1763  * hif_store_desc_trace_buf_index() -
1764  * API to get the CE id and CE debug storage buffer index
1765  *
1766  * @dev: network device
1767  * @attr: sysfs attribute
1768  * @buf: data got from the user
1769  *
1770  * Return total length
1771  */
1772 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1773 					const char *buf, size_t size)
1774 {
1775 	struct ce_desc_hist *ce_hist = NULL;
1776 
1777 	if (!scn)
1778 		return -EINVAL;
1779 
1780 	ce_hist = &scn->hif_ce_desc_hist;
1781 
1782 	if (!size) {
1783 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1784 		return -EINVAL;
1785 	}
1786 
1787 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1788 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1789 		qdf_nofl_err("%s: Invalid input value.", __func__);
1790 		return -EINVAL;
1791 	}
1792 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1793 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1794 		qdf_print("Invalid values");
1795 		return -EINVAL;
1796 	}
1797 
1798 	return size;
1799 }
1800 
1801 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1802 
1803 #ifdef HIF_CE_DEBUG_DATA_BUF
1804 /*
1805  * hif_ce_en_desc_hist() -
1806  * API to enable recording the CE desc history
1807  *
1808  * @dev: network device
1809  * @attr: sysfs attribute
1810  * @buf: buffer to copy the data.
1811  *
1812  * Starts recording the ce desc history
1813  *
1814  * Return total length copied
1815  */
1816 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1817 {
1818 	struct ce_desc_hist *ce_hist = NULL;
1819 	uint32_t cfg = 0;
1820 	uint32_t ce_id = 0;
1821 
1822 	if (!scn)
1823 		return -EINVAL;
1824 
1825 	ce_hist = &scn->hif_ce_desc_hist;
1826 
1827 	if (!size) {
1828 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1829 		return -EINVAL;
1830 	}
1831 
1832 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1833 		   (unsigned int *)&cfg) != 2) {
1834 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1835 			     __func__);
1836 		return -EINVAL;
1837 	}
1838 	if (ce_id >= CE_COUNT_MAX) {
1839 		qdf_print("Invalid value CE Id");
1840 		return -EINVAL;
1841 	}
1842 
1843 	if ((cfg > 1 || cfg < 0)) {
1844 		qdf_print("Invalid values: enter 0 or 1");
1845 		return -EINVAL;
1846 	}
1847 
1848 	if (!ce_hist->hist_ev[ce_id])
1849 		return -EINVAL;
1850 
1851 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1852 	if (cfg == 1) {
1853 		if (ce_hist->data_enable[ce_id] == 1) {
1854 			qdf_debug("Already Enabled");
1855 		} else {
1856 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1857 							== QDF_STATUS_E_NOMEM){
1858 				ce_hist->data_enable[ce_id] = 0;
1859 				qdf_err("%s:Memory Alloc failed", __func__);
1860 			} else
1861 				ce_hist->data_enable[ce_id] = 1;
1862 		}
1863 	} else if (cfg == 0) {
1864 		if (ce_hist->data_enable[ce_id] == 0) {
1865 			qdf_debug("Already Disabled");
1866 		} else {
1867 			ce_hist->data_enable[ce_id] = 0;
1868 				free_mem_ce_debug_hist_data(scn, ce_id);
1869 		}
1870 	}
1871 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1872 
1873 	return size;
1874 }
1875 
1876 /*
1877  * hif_disp_ce_enable_desc_data_hist() -
1878  * API to display value of data_enable
1879  *
1880  * @dev: network device
1881  * @attr: sysfs attribute
1882  * @buf: buffer to copy the data.
1883  *
1884  * Return total length copied
1885  */
1886 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1887 {
1888 	ssize_t len = 0;
1889 	uint32_t ce_id = 0;
1890 	struct ce_desc_hist *ce_hist = NULL;
1891 
1892 	if (!scn)
1893 		return -EINVAL;
1894 
1895 	ce_hist = &scn->hif_ce_desc_hist;
1896 
1897 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1898 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1899 				ce_id, ce_hist->data_enable[ce_id]);
1900 	}
1901 
1902 	return len;
1903 }
1904 #endif /* HIF_CE_DEBUG_DATA_BUF */
1905 
1906 #ifdef OL_ATH_SMART_LOGGING
1907 #define GUARD_SPACE 10
1908 #define LOG_ID_SZ 4
1909 /*
1910  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1911  * @src_ring: SRC ring state
1912  * @buf_cur: Current pointer in ring buffer
1913  * @buf_init:Start of the ring buffer
1914  * @buf_sz: Size of the ring buffer
1915  * @skb_sz: Max size of the SKB buffer to be copied
1916  *
1917  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1918  * the given buf, skb_sz is the max buffer size to be copied
1919  *
1920  * Return: Current pointer in ring buffer
1921  */
1922 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1923 				    uint8_t *buf_cur, uint8_t *buf_init,
1924 				    uint32_t buf_sz, uint32_t skb_sz)
1925 {
1926 	struct CE_src_desc *src_ring_base;
1927 	uint32_t len, entry;
1928 	struct CE_src_desc  *src_desc;
1929 	qdf_nbuf_t nbuf;
1930 	uint32_t available_buf;
1931 
1932 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1933 	len = sizeof(struct CE_ring_state);
1934 	available_buf = buf_sz - (buf_cur - buf_init);
1935 	if (available_buf < (len + GUARD_SPACE)) {
1936 		buf_cur = buf_init;
1937 	}
1938 
1939 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1940 	buf_cur += sizeof(struct CE_ring_state);
1941 
1942 	for (entry = 0; entry < src_ring->nentries; entry++) {
1943 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1944 		nbuf = src_ring->per_transfer_context[entry];
1945 		if (nbuf) {
1946 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1947 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1948 
1949 			len = sizeof(struct CE_src_desc) + skb_cp_len
1950 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1951 			available_buf = buf_sz - (buf_cur - buf_init);
1952 			if (available_buf < (len + GUARD_SPACE)) {
1953 				buf_cur = buf_init;
1954 			}
1955 			qdf_mem_copy(buf_cur, src_desc,
1956 				     sizeof(struct CE_src_desc));
1957 			buf_cur += sizeof(struct CE_src_desc);
1958 
1959 			available_buf = buf_sz - (buf_cur - buf_init);
1960 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1961 						skb_cp_len);
1962 
1963 			if (skb_cp_len) {
1964 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1965 					     skb_cp_len);
1966 				buf_cur += skb_cp_len;
1967 			}
1968 		} else {
1969 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1970 			available_buf = buf_sz - (buf_cur - buf_init);
1971 			if (available_buf < (len + GUARD_SPACE)) {
1972 				buf_cur = buf_init;
1973 			}
1974 			qdf_mem_copy(buf_cur, src_desc,
1975 				     sizeof(struct CE_src_desc));
1976 			buf_cur += sizeof(struct CE_src_desc);
1977 			available_buf = buf_sz - (buf_cur - buf_init);
1978 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1979 		}
1980 	}
1981 
1982 	return buf_cur;
1983 }
1984 
1985 /*
1986  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1987  * @dest_ring: SRC ring state
1988  * @buf_cur: Current pointer in ring buffer
1989  * @buf_init:Start of the ring buffer
1990  * @buf_sz: Size of the ring buffer
1991  * @skb_sz: Max size of the SKB buffer to be copied
1992  *
1993  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1994  * the given buf, skb_sz is the max buffer size to be copied
1995  *
1996  * Return: Current pointer in ring buffer
1997  */
1998 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1999 				     uint8_t *buf_cur, uint8_t *buf_init,
2000 				     uint32_t buf_sz, uint32_t skb_sz)
2001 {
2002 	struct CE_dest_desc *dest_ring_base;
2003 	uint32_t len, entry;
2004 	struct CE_dest_desc  *dest_desc;
2005 	qdf_nbuf_t nbuf;
2006 	uint32_t available_buf;
2007 
2008 	dest_ring_base =
2009 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
2010 
2011 	len = sizeof(struct CE_ring_state);
2012 	available_buf = buf_sz - (buf_cur - buf_init);
2013 	if (available_buf < (len + GUARD_SPACE)) {
2014 		buf_cur = buf_init;
2015 	}
2016 
2017 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
2018 	buf_cur += sizeof(struct CE_ring_state);
2019 
2020 	for (entry = 0; entry < dest_ring->nentries; entry++) {
2021 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
2022 
2023 		nbuf = dest_ring->per_transfer_context[entry];
2024 		if (nbuf) {
2025 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2026 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2027 
2028 			len = sizeof(struct CE_dest_desc) + skb_cp_len
2029 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2030 
2031 			available_buf = buf_sz - (buf_cur - buf_init);
2032 			if (available_buf < (len + GUARD_SPACE)) {
2033 				buf_cur = buf_init;
2034 			}
2035 
2036 			qdf_mem_copy(buf_cur, dest_desc,
2037 				     sizeof(struct CE_dest_desc));
2038 			buf_cur += sizeof(struct CE_dest_desc);
2039 			available_buf = buf_sz - (buf_cur - buf_init);
2040 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2041 						skb_cp_len);
2042 			if (skb_cp_len) {
2043 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2044 					     skb_cp_len);
2045 				buf_cur += skb_cp_len;
2046 			}
2047 		} else {
2048 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
2049 			available_buf = buf_sz - (buf_cur - buf_init);
2050 			if (available_buf < (len + GUARD_SPACE)) {
2051 				buf_cur = buf_init;
2052 			}
2053 			qdf_mem_copy(buf_cur, dest_desc,
2054 				     sizeof(struct CE_dest_desc));
2055 			buf_cur += sizeof(struct CE_dest_desc);
2056 			available_buf = buf_sz - (buf_cur - buf_init);
2057 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2058 		}
2059 	}
2060 	return buf_cur;
2061 }
2062 
2063 /**
2064  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
2065  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2066  * and buffers pointed by them in to the given buf
2067  */
2068 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2069 			 uint8_t *buf_init, uint32_t buf_sz,
2070 			 uint32_t ce, uint32_t skb_sz)
2071 {
2072 	struct CE_state *ce_state;
2073 	struct CE_ring_state *src_ring;
2074 	struct CE_ring_state *dest_ring;
2075 
2076 	ce_state = scn->ce_id_to_state[ce];
2077 	src_ring = ce_state->src_ring;
2078 	dest_ring = ce_state->dest_ring;
2079 
2080 	if (src_ring) {
2081 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
2082 					      buf_init, buf_sz, skb_sz);
2083 	} else if (dest_ring) {
2084 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
2085 					       buf_init, buf_sz, skb_sz);
2086 	}
2087 
2088 	return buf_cur;
2089 }
2090 
2091 qdf_export_symbol(hif_log_dump_ce);
2092 #endif /* OL_ATH_SMART_LOGGING */
2093 
2094