xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hif.h"
21 #include "hif_io32.h"
22 #include "ce_api.h"
23 #include "ce_main.h"
24 #include "ce_internal.h"
25 #include "ce_reg.h"
26 #include "qdf_lock.h"
27 #include "regtable.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_napi.h"
31 #include "qdf_module.h"
32 #include <qdf_tracepoint.h>
33 
34 #ifdef IPA_OFFLOAD
35 #ifdef QCA_WIFI_3_0
36 #define CE_IPA_RING_INIT(ce_desc)                       \
37 	do {                                            \
38 		ce_desc->gather = 0;                    \
39 		ce_desc->enable_11h = 0;                \
40 		ce_desc->meta_data_low = 0;             \
41 		ce_desc->packet_result_offset = 64;     \
42 		ce_desc->toeplitz_hash_enable = 0;      \
43 		ce_desc->addr_y_search_disable = 0;     \
44 		ce_desc->addr_x_search_disable = 0;     \
45 		ce_desc->misc_int_disable = 0;          \
46 		ce_desc->target_int_disable = 0;        \
47 		ce_desc->host_int_disable = 0;          \
48 		ce_desc->dest_byte_swap = 0;            \
49 		ce_desc->byte_swap = 0;                 \
50 		ce_desc->type = 2;                      \
51 		ce_desc->tx_classify = 1;               \
52 		ce_desc->buffer_addr_hi = 0;            \
53 		ce_desc->meta_data = 0;                 \
54 		ce_desc->nbytes = 128;                  \
55 	} while (0)
56 #else
57 #define CE_IPA_RING_INIT(ce_desc)                       \
58 	do {                                            \
59 		ce_desc->byte_swap = 0;                 \
60 		ce_desc->nbytes = 60;                   \
61 		ce_desc->gather = 0;                    \
62 	} while (0)
63 #endif /* QCA_WIFI_3_0 */
64 #endif /* IPA_OFFLOAD */
65 
66 static int war1_allow_sleep;
67 /* io32 write workaround */
68 static int hif_ce_war1;
69 
70 /**
71  * hif_ce_war_disable() - disable ce war gobally
72  */
73 void hif_ce_war_disable(void)
74 {
75 	hif_ce_war1 = 0;
76 }
77 
78 /**
79  * hif_ce_war_enable() - enable ce war gobally
80  */
81 void hif_ce_war_enable(void)
82 {
83 	hif_ce_war1 = 1;
84 }
85 
86 /*
87  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
88  * for defined here
89  */
90 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
91 
92 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
93 #define CE_DEBUG_DATA_PER_ROW 16
94 
95 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
96 
97 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
98 {
99 	int record_index = qdf_atomic_inc_return(table_index);
100 
101 	if (record_index == array_size)
102 		qdf_atomic_sub(array_size, table_index);
103 
104 	while (record_index >= array_size)
105 		record_index -= array_size;
106 
107 	return record_index;
108 }
109 
110 qdf_export_symbol(get_next_record_index);
111 
112 #ifdef HIF_CE_DEBUG_DATA_BUF
113 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
114 {
115 	uint8_t *data = NULL;
116 
117 	if (!event->data) {
118 		hif_err_rl("No ce debug memory allocated");
119 		return;
120 	}
121 
122 	if (event->memory && len > 0)
123 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
124 
125 	event->actual_data_len = 0;
126 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
127 
128 	if (data && len > 0) {
129 		qdf_mem_copy(event->data, data,
130 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
131 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
132 		event->actual_data_len = len;
133 	}
134 }
135 
136 qdf_export_symbol(hif_ce_desc_data_record);
137 
138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
139 {
140 	qdf_mem_zero(event,
141 		     offsetof(struct hif_ce_desc_event, data));
142 }
143 
144 qdf_export_symbol(hif_clear_ce_desc_debug_data);
145 #else
146 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
147 {
148 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
149 }
150 
151 qdf_export_symbol(hif_clear_ce_desc_debug_data);
152 #endif /* HIF_CE_DEBUG_DATA_BUF */
153 
154 #if defined(HIF_RECORD_PADDR)
155 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
156 				 struct hif_ce_desc_event *event,
157 				 qdf_nbuf_t memory)
158 {
159 	if (memory) {
160 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
161 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
162 					scn->qdf_dev,
163 					event->dma_addr);
164 
165 		event->virt_to_phy =
166 			virt_to_phys(qdf_nbuf_data(memory));
167 	}
168 }
169 #endif /* HIF_RECORD_RX_PADDR */
170 
171 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx)
172 {
173 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
174 	struct ce_desc_hist *ce_hist;
175 	struct latest_evt_history *evt;
176 	int i;
177 
178 	if (!scn)
179 		return;
180 
181 	ce_hist = &scn->hif_ce_desc_hist;
182 
183 	for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) {
184 		if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST])
185 			continue;
186 
187 		evt = &ce_hist->latest_evt[i];
188 		hif_info_high("CE_id:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x",
189 			      (i + HIF_CE_MAX_LATEST_HIST), evt->cpu_id,
190 			      evt->irq_entry_ts, evt->bh_entry_ts,
191 			      evt->bh_resched_ts, evt->bh_exit_ts,
192 			      evt->bh_work_ts, evt->ring_hp, evt->ring_tp);
193 	}
194 }
195 
196 void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
197 			   uint8_t type,
198 			   int ce_id, uint64_t time,
199 			   uint32_t hp, uint32_t tp)
200 {
201 	struct latest_evt_history *latest_evt;
202 
203 	if (ce_id != 2 && ce_id != 3)
204 		return;
205 
206 	latest_evt = &ce_hist->latest_evt[ce_id - HIF_CE_MAX_LATEST_HIST];
207 
208 	switch (type) {
209 	case HIF_IRQ_EVENT:
210 		latest_evt->irq_entry_ts = time;
211 		latest_evt->cpu_id = qdf_get_cpu();
212 		break;
213 	case HIF_CE_TASKLET_ENTRY:
214 		latest_evt->bh_entry_ts = time;
215 		break;
216 	case HIF_CE_TASKLET_RESCHEDULE:
217 		latest_evt->bh_resched_ts = time;
218 		break;
219 	case HIF_CE_TASKLET_EXIT:
220 		latest_evt->bh_exit_ts = time;
221 		break;
222 	case HIF_TX_DESC_COMPLETION:
223 	case HIF_CE_DEST_STATUS_RING_REAP:
224 		latest_evt->bh_work_ts = time;
225 		latest_evt->ring_hp = hp;
226 		latest_evt->ring_tp = tp;
227 		break;
228 	default:
229 		break;
230 	}
231 }
232 
233 /**
234  * hif_record_ce_desc_event() - record ce descriptor events
235  * @scn: hif_softc
236  * @ce_id: which ce is the event occurring on
237  * @type: what happened
238  * @descriptor: pointer to the descriptor posted/completed
239  * @memory: virtual address of buffer related to the descriptor
240  * @index: index that the descriptor was/will be at.
241  * @len:
242  */
243 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
244 				enum hif_ce_event_type type,
245 				union ce_desc *descriptor,
246 				void *memory, int index,
247 				int len)
248 {
249 	int record_index;
250 	struct hif_ce_desc_event *event;
251 
252 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
253 	struct hif_ce_desc_event *hist_ev = NULL;
254 
255 	if (ce_id < CE_COUNT_MAX)
256 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
257 	else
258 		return;
259 
260 	if (ce_id >= CE_COUNT_MAX)
261 		return;
262 
263 	if (!ce_hist->enable[ce_id])
264 		return;
265 
266 	if (!hist_ev)
267 		return;
268 
269 	record_index = get_next_record_index(
270 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
271 
272 	event = &hist_ev[record_index];
273 
274 	hif_clear_ce_desc_debug_data(event);
275 
276 	event->type = type;
277 	event->time = qdf_get_log_timestamp();
278 	event->cpu_id = qdf_get_cpu();
279 
280 	if (descriptor)
281 		qdf_mem_copy(&event->descriptor, descriptor,
282 			     sizeof(union ce_desc));
283 
284 	event->memory = memory;
285 	event->index = index;
286 
287 	if (event->type == HIF_RX_DESC_POST ||
288 	    event->type == HIF_RX_DESC_COMPLETION)
289 		hif_ce_desc_record_rx_paddr(scn, event, memory);
290 
291 	if (ce_hist->data_enable[ce_id])
292 		hif_ce_desc_data_record(event, len);
293 
294 	hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0);
295 }
296 qdf_export_symbol(hif_record_ce_desc_event);
297 
298 /**
299  * ce_init_ce_desc_event_log() - initialize the ce event log
300  * @scn: HIF context
301  * @ce_id: copy engine id for which we are initializing the log
302  * @size: size of array to dedicate
303  *
304  * Currently the passed size is ignored in favor of a precompiled value.
305  */
306 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
307 {
308 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
309 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
310 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
311 }
312 
313 /**
314  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
315  * @scn: HIF context
316  * @ce_id: copy engine id for which we are deinitializing the log
317  *
318  */
319 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
320 {
321 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
322 
323 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
324 }
325 
326 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
327 void hif_record_ce_desc_event(struct hif_softc *scn,
328 		int ce_id, enum hif_ce_event_type type,
329 		union ce_desc *descriptor, void *memory,
330 		int index, int len)
331 {
332 }
333 qdf_export_symbol(hif_record_ce_desc_event);
334 
335 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
336 					int size)
337 {
338 }
339 
340 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
341 {
342 }
343 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
344 
345 #ifdef NAPI_YIELD_BUDGET_BASED
346 bool hif_ce_service_should_yield(struct hif_softc *scn,
347 				 struct CE_state *ce_state)
348 {
349 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
350 
351 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
352 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
353 	 * can happen in fast path handling as processing is happening in
354 	 * batches.
355 	 */
356 	if (yield)
357 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
358 
359 	return yield;
360 }
361 #else
362 /**
363  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
364  * @scn: hif context
365  * @ce_state: context of the copy engine being serviced
366  *
367  * Return: true if the service should yield
368  */
369 bool hif_ce_service_should_yield(struct hif_softc *scn,
370 				 struct CE_state *ce_state)
371 {
372 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
373 
374 	time_limit_reached = qdf_time_sched_clock() >
375 					ce_state->ce_service_yield_time ? 1 : 0;
376 
377 	if (!time_limit_reached)
378 		rxpkt_thresh_reached = hif_max_num_receives_reached
379 					(scn, ce_state->receive_count);
380 
381 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
382 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
383 	 * can happen in fast path handling as processing is happening in
384 	 * batches.
385 	 */
386 	if (rxpkt_thresh_reached)
387 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
388 
389 	yield =  time_limit_reached || rxpkt_thresh_reached;
390 
391 	if (yield &&
392 	    ce_state->htt_rx_data &&
393 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
394 		hif_napi_update_yield_stats(ce_state,
395 					    time_limit_reached,
396 					    rxpkt_thresh_reached);
397 	}
398 
399 	return yield;
400 }
401 qdf_export_symbol(hif_ce_service_should_yield);
402 #endif
403 
404 /*
405  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
406  * The caller takes responsibility for any needed locking.
407  */
408 
409 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
410 				   u32 ctrl_addr, unsigned int write_index)
411 {
412 	if (hif_ce_war1) {
413 		void __iomem *indicator_addr;
414 
415 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
416 
417 		if (!war1_allow_sleep
418 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
419 			hif_write32_mb(scn, indicator_addr,
420 				       (CDC_WAR_MAGIC_STR | write_index));
421 		} else {
422 			unsigned long irq_flags;
423 
424 			local_irq_save(irq_flags);
425 			hif_write32_mb(scn, indicator_addr, 1);
426 
427 			/*
428 			 * PCIE write waits for ACK in IPQ8K, there is no
429 			 * need to read back value.
430 			 */
431 			(void)hif_read32_mb(scn, indicator_addr);
432 			/* conservative */
433 			(void)hif_read32_mb(scn, indicator_addr);
434 
435 			CE_SRC_RING_WRITE_IDX_SET(scn,
436 						  ctrl_addr, write_index);
437 
438 			hif_write32_mb(scn, indicator_addr, 0);
439 			local_irq_restore(irq_flags);
440 		}
441 	} else {
442 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
443 	}
444 }
445 
446 qdf_export_symbol(war_ce_src_ring_write_idx_set);
447 
448 QDF_STATUS
449 ce_send(struct CE_handle *copyeng,
450 		void *per_transfer_context,
451 		qdf_dma_addr_t buffer,
452 		uint32_t nbytes,
453 		uint32_t transfer_id,
454 		uint32_t flags,
455 		uint32_t user_flag)
456 {
457 	struct CE_state *CE_state = (struct CE_state *)copyeng;
458 	QDF_STATUS status;
459 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
460 
461 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
462 	status = hif_state->ce_services->ce_send_nolock(copyeng,
463 			per_transfer_context, buffer, nbytes,
464 			transfer_id, flags, user_flag);
465 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
466 
467 	return status;
468 }
469 qdf_export_symbol(ce_send);
470 
471 unsigned int ce_sendlist_sizeof(void)
472 {
473 	return sizeof(struct ce_sendlist);
474 }
475 
476 void ce_sendlist_init(struct ce_sendlist *sendlist)
477 {
478 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
479 
480 	sl->num_items = 0;
481 }
482 
483 QDF_STATUS
484 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
485 					qdf_dma_addr_t buffer,
486 					uint32_t nbytes,
487 					uint32_t flags,
488 					uint32_t user_flags)
489 {
490 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
491 	unsigned int num_items = sl->num_items;
492 	struct ce_sendlist_item *item;
493 
494 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
495 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
496 		return QDF_STATUS_E_RESOURCES;
497 	}
498 
499 	item = &sl->item[num_items];
500 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
501 	item->data = buffer;
502 	item->u.nbytes = nbytes;
503 	item->flags = flags;
504 	item->user_flags = user_flags;
505 	sl->num_items = num_items + 1;
506 	return QDF_STATUS_SUCCESS;
507 }
508 
509 QDF_STATUS
510 ce_sendlist_send(struct CE_handle *copyeng,
511 		 void *per_transfer_context,
512 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
513 {
514 	struct CE_state *CE_state = (struct CE_state *)copyeng;
515 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
516 
517 	return hif_state->ce_services->ce_sendlist_send(copyeng,
518 			per_transfer_context, sendlist, transfer_id);
519 }
520 
521 #ifndef AH_NEED_TX_DATA_SWAP
522 #define AH_NEED_TX_DATA_SWAP 0
523 #endif
524 
525 /**
526  * ce_batch_send() - sends bunch of msdus at once
527  * @ce_tx_hdl : pointer to CE handle
528  * @msdu : list of msdus to be sent
529  * @transfer_id : transfer id
530  * @len : Downloaded length
531  * @sendhead : sendhead
532  *
533  * Assumption : Called with an array of MSDU's
534  * Function:
535  * For each msdu in the array
536  * 1. Send each msdu
537  * 2. Increment write index accordinlgy.
538  *
539  * Return: list of msds not sent
540  */
541 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
542 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
543 {
544 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
545 	struct hif_softc *scn = ce_state->scn;
546 	struct CE_ring_state *src_ring = ce_state->src_ring;
547 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
548 	/*  A_target_id_t targid = TARGID(scn);*/
549 
550 	uint32_t nentries_mask = src_ring->nentries_mask;
551 	uint32_t sw_index, write_index;
552 
553 	struct CE_src_desc *src_desc_base =
554 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
555 	uint32_t *src_desc;
556 
557 	struct CE_src_desc lsrc_desc = {0};
558 	int deltacount = 0;
559 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
560 
561 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
562 	sw_index = src_ring->sw_index;
563 	write_index = src_ring->write_index;
564 
565 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
566 
567 	while (msdu) {
568 		tempnext = qdf_nbuf_next(msdu);
569 
570 		if (deltacount < 2) {
571 			if (sendhead)
572 				return msdu;
573 			hif_err("Out of descriptors");
574 			src_ring->write_index = write_index;
575 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
576 					write_index);
577 
578 			sw_index = src_ring->sw_index;
579 			write_index = src_ring->write_index;
580 
581 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
582 					sw_index-1);
583 			if (!freelist) {
584 				freelist = msdu;
585 				hfreelist = msdu;
586 			} else {
587 				qdf_nbuf_set_next(freelist, msdu);
588 				freelist = msdu;
589 			}
590 			qdf_nbuf_set_next(msdu, NULL);
591 			msdu = tempnext;
592 			continue;
593 		}
594 
595 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
596 				write_index);
597 
598 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
599 
600 		lsrc_desc.meta_data = transfer_id;
601 		if (len  > msdu->len)
602 			len =  msdu->len;
603 		lsrc_desc.nbytes = len;
604 		/*  Data packet is a byte stream, so disable byte swap */
605 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
606 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
607 
608 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
609 
610 
611 		src_ring->per_transfer_context[write_index] = msdu;
612 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
613 
614 		if (sendhead)
615 			break;
616 		qdf_nbuf_set_next(msdu, NULL);
617 		msdu = tempnext;
618 
619 	}
620 
621 
622 	src_ring->write_index = write_index;
623 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
624 
625 	return hfreelist;
626 }
627 
628 /**
629  * ce_update_tx_ring() - Advance sw index.
630  * @ce_tx_hdl : pointer to CE handle
631  * @num_htt_cmpls : htt completions received.
632  *
633  * Function:
634  * Increment the value of sw index of src ring
635  * according to number of htt completions
636  * received.
637  *
638  * Return: void
639  */
640 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
641 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
642 {
643 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
644 	struct CE_ring_state *src_ring = ce_state->src_ring;
645 	uint32_t nentries_mask = src_ring->nentries_mask;
646 	/*
647 	 * Advance the s/w index:
648 	 * This effectively simulates completing the CE ring descriptors
649 	 */
650 	src_ring->sw_index =
651 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
652 				num_htt_cmpls);
653 }
654 #else
655 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
656 {}
657 #endif
658 
659 /**
660  * ce_send_single() - sends
661  * @ce_tx_hdl : pointer to CE handle
662  * @msdu : msdu to be sent
663  * @transfer_id : transfer id
664  * @len : Downloaded length
665  *
666  * Function:
667  * 1. Send one msdu
668  * 2. Increment write index of src ring accordinlgy.
669  *
670  * Return: QDF_STATUS: CE sent status
671  */
672 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
673 			  uint32_t transfer_id, u_int32_t len)
674 {
675 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
676 	struct hif_softc *scn = ce_state->scn;
677 	struct CE_ring_state *src_ring = ce_state->src_ring;
678 	uint32_t ctrl_addr = ce_state->ctrl_addr;
679 	/*A_target_id_t targid = TARGID(scn);*/
680 
681 	uint32_t nentries_mask = src_ring->nentries_mask;
682 	uint32_t sw_index, write_index;
683 
684 	struct CE_src_desc *src_desc_base =
685 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
686 	uint32_t *src_desc;
687 
688 	struct CE_src_desc lsrc_desc = {0};
689 	enum hif_ce_event_type event_type;
690 
691 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
692 	sw_index = src_ring->sw_index;
693 	write_index = src_ring->write_index;
694 
695 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
696 					sw_index-1) < 1)) {
697 		hif_err("ce send fail %d %d %d", nentries_mask,
698 		       write_index, sw_index);
699 		return QDF_STATUS_E_RESOURCES;
700 	}
701 
702 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
703 
704 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
705 
706 	lsrc_desc.meta_data = transfer_id;
707 	lsrc_desc.nbytes = len;
708 	/*  Data packet is a byte stream, so disable byte swap */
709 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
710 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
711 
712 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
713 
714 
715 	src_ring->per_transfer_context[write_index] = msdu;
716 
717 	if (((struct CE_src_desc *)src_desc)->gather)
718 		event_type = HIF_TX_GATHER_DESC_POST;
719 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
720 		event_type = HIF_TX_DESC_SOFTWARE_POST;
721 	else
722 		event_type = HIF_TX_DESC_POST;
723 
724 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
725 				(union ce_desc *)src_desc, msdu,
726 				write_index, len);
727 
728 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
729 
730 	src_ring->write_index = write_index;
731 
732 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
733 
734 	return QDF_STATUS_SUCCESS;
735 }
736 
737 /**
738  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
739  * @copyeng: copy engine handle
740  * @per_recv_context: virtual address of the nbuf
741  * @buffer: physical address of the nbuf
742  *
743  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
744  */
745 QDF_STATUS
746 ce_recv_buf_enqueue(struct CE_handle *copyeng,
747 		    void *per_recv_context, qdf_dma_addr_t buffer)
748 {
749 	struct CE_state *CE_state = (struct CE_state *)copyeng;
750 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
751 
752 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
753 			per_recv_context, buffer);
754 }
755 qdf_export_symbol(ce_recv_buf_enqueue);
756 
757 void
758 ce_send_watermarks_set(struct CE_handle *copyeng,
759 		       unsigned int low_alert_nentries,
760 		       unsigned int high_alert_nentries)
761 {
762 	struct CE_state *CE_state = (struct CE_state *)copyeng;
763 	uint32_t ctrl_addr = CE_state->ctrl_addr;
764 	struct hif_softc *scn = CE_state->scn;
765 
766 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
767 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
768 }
769 
770 void
771 ce_recv_watermarks_set(struct CE_handle *copyeng,
772 		       unsigned int low_alert_nentries,
773 		       unsigned int high_alert_nentries)
774 {
775 	struct CE_state *CE_state = (struct CE_state *)copyeng;
776 	uint32_t ctrl_addr = CE_state->ctrl_addr;
777 	struct hif_softc *scn = CE_state->scn;
778 
779 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
780 				low_alert_nentries);
781 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
782 				high_alert_nentries);
783 }
784 
785 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
786 {
787 	struct CE_state *CE_state = (struct CE_state *)copyeng;
788 	struct CE_ring_state *src_ring = CE_state->src_ring;
789 	unsigned int nentries_mask = src_ring->nentries_mask;
790 	unsigned int sw_index;
791 	unsigned int write_index;
792 
793 	qdf_spin_lock(&CE_state->ce_index_lock);
794 	sw_index = src_ring->sw_index;
795 	write_index = src_ring->write_index;
796 	qdf_spin_unlock(&CE_state->ce_index_lock);
797 
798 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
799 }
800 
801 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
802 {
803 	struct CE_state *CE_state = (struct CE_state *)copyeng;
804 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
805 	unsigned int nentries_mask = dest_ring->nentries_mask;
806 	unsigned int sw_index;
807 	unsigned int write_index;
808 
809 	qdf_spin_lock(&CE_state->ce_index_lock);
810 	sw_index = dest_ring->sw_index;
811 	write_index = dest_ring->write_index;
812 	qdf_spin_unlock(&CE_state->ce_index_lock);
813 
814 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
815 }
816 
817 /*
818  * Guts of ce_completed_recv_next.
819  * The caller takes responsibility for any necessary locking.
820  */
821 QDF_STATUS
822 ce_completed_recv_next(struct CE_handle *copyeng,
823 		       void **per_CE_contextp,
824 		       void **per_transfer_contextp,
825 		       qdf_dma_addr_t *bufferp,
826 		       unsigned int *nbytesp,
827 		       unsigned int *transfer_idp, unsigned int *flagsp)
828 {
829 	struct CE_state *CE_state = (struct CE_state *)copyeng;
830 	QDF_STATUS status;
831 	struct hif_softc *scn = CE_state->scn;
832 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
833 	struct ce_ops *ce_services;
834 
835 	ce_services = hif_state->ce_services;
836 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
837 	status =
838 		ce_services->ce_completed_recv_next_nolock(CE_state,
839 				per_CE_contextp, per_transfer_contextp, bufferp,
840 					      nbytesp, transfer_idp, flagsp);
841 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
842 
843 	return status;
844 }
845 
846 QDF_STATUS
847 ce_revoke_recv_next(struct CE_handle *copyeng,
848 		    void **per_CE_contextp,
849 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
850 {
851 	struct CE_state *CE_state = (struct CE_state *)copyeng;
852 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
853 
854 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
855 			per_CE_contextp, per_transfer_contextp, bufferp);
856 }
857 
858 QDF_STATUS
859 ce_cancel_send_next(struct CE_handle *copyeng,
860 		void **per_CE_contextp,
861 		void **per_transfer_contextp,
862 		qdf_dma_addr_t *bufferp,
863 		unsigned int *nbytesp,
864 		unsigned int *transfer_idp,
865 		uint32_t *toeplitz_hash_result)
866 {
867 	struct CE_state *CE_state = (struct CE_state *)copyeng;
868 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
869 
870 	return hif_state->ce_services->ce_cancel_send_next
871 		(copyeng, per_CE_contextp, per_transfer_contextp,
872 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
873 }
874 qdf_export_symbol(ce_cancel_send_next);
875 
876 QDF_STATUS
877 ce_completed_send_next(struct CE_handle *copyeng,
878 		       void **per_CE_contextp,
879 		       void **per_transfer_contextp,
880 		       qdf_dma_addr_t *bufferp,
881 		       unsigned int *nbytesp,
882 		       unsigned int *transfer_idp,
883 		       unsigned int *sw_idx,
884 		       unsigned int *hw_idx,
885 		       unsigned int *toeplitz_hash_result)
886 {
887 	struct CE_state *CE_state = (struct CE_state *)copyeng;
888 	struct hif_softc *scn = CE_state->scn;
889 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
890 	struct ce_ops *ce_services;
891 	QDF_STATUS status;
892 
893 	ce_services = hif_state->ce_services;
894 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
895 	status =
896 		ce_services->ce_completed_send_next_nolock(CE_state,
897 					per_CE_contextp, per_transfer_contextp,
898 					bufferp, nbytesp, transfer_idp, sw_idx,
899 					      hw_idx, toeplitz_hash_result);
900 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
901 
902 	return status;
903 }
904 
905 #ifdef ATH_11AC_TXCOMPACT
906 /* CE engine descriptor reap
907  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
908  * does receive and reaping of completed descriptor ,
909  * This function only handles reaping of Tx complete descriptor.
910  * The Function is called from threshold reap  poll routine
911  * hif_send_complete_check so should not contain receive functionality
912  * within it .
913  */
914 
915 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
916 {
917 	void *CE_context;
918 	void *transfer_context;
919 	qdf_dma_addr_t buf;
920 	unsigned int nbytes;
921 	unsigned int id;
922 	unsigned int sw_idx, hw_idx;
923 	uint32_t toeplitz_hash_result;
924 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
925 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
926 
927 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
928 		return;
929 
930 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
931 			NULL, NULL, 0, 0);
932 
933 	/* Since this function is called from both user context and
934 	 * tasklet context the spinlock has to lock the bottom halves.
935 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
936 	 * enabled in TX polling mode. If this is not the case, more
937 	 * bottom halve spin lock changes are needed. Due to data path
938 	 * performance concern, after internal discussion we've decided
939 	 * to make minimum change, i.e., only address the issue occurred
940 	 * in this function. The possible negative effect of this minimum
941 	 * change is that, in the future, if some other function will also
942 	 * be opened to let the user context to use, those cases need to be
943 	 * addressed by change spin_lock to spin_lock_bh also.
944 	 */
945 
946 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
947 
948 	if (CE_state->send_cb) {
949 		{
950 			struct ce_ops *ce_services = hif_state->ce_services;
951 			/* Pop completed send buffers and call the
952 			 * registered send callback for each
953 			 */
954 			while (ce_services->ce_completed_send_next_nolock
955 				 (CE_state, &CE_context,
956 				  &transfer_context, &buf,
957 				  &nbytes, &id, &sw_idx, &hw_idx,
958 				  &toeplitz_hash_result) ==
959 				  QDF_STATUS_SUCCESS) {
960 				if (ce_id != CE_HTT_H2T_MSG) {
961 					qdf_spin_unlock_bh(
962 						&CE_state->ce_index_lock);
963 					CE_state->send_cb(
964 						(struct CE_handle *)
965 						CE_state, CE_context,
966 						transfer_context, buf,
967 						nbytes, id, sw_idx, hw_idx,
968 						toeplitz_hash_result);
969 					qdf_spin_lock_bh(
970 						&CE_state->ce_index_lock);
971 				} else {
972 					struct HIF_CE_pipe_info *pipe_info =
973 						(struct HIF_CE_pipe_info *)
974 						CE_context;
975 
976 					qdf_spin_lock_bh(&pipe_info->
977 						 completion_freeq_lock);
978 					pipe_info->num_sends_allowed++;
979 					qdf_spin_unlock_bh(&pipe_info->
980 						   completion_freeq_lock);
981 				}
982 			}
983 		}
984 	}
985 
986 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
987 
988 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
989 			NULL, NULL, 0, 0);
990 	Q_TARGET_ACCESS_END(scn);
991 }
992 
993 #endif /*ATH_11AC_TXCOMPACT */
994 
995 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
996 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
997 {
998 	// QDF_IS_EPPING_ENABLED is pre lithium feature
999 	// CE4 completion is enabled only lithium and later
1000 	// so no need to check for EPPING
1001 	return true;
1002 }
1003 
1004 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1005 
1006 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1007 {
1008 	if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
1009 		return true;
1010 	else
1011 		return false;
1012 }
1013 
1014 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1015 
1016 /*
1017  * ce_engine_service_reg:
1018  *
1019  * Called from ce_per_engine_service and goes through the regular interrupt
1020  * handling that does not involve the WLAN fast path feature.
1021  *
1022  * Returns void
1023  */
1024 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
1025 {
1026 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1027 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1028 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1029 	void *CE_context;
1030 	void *transfer_context;
1031 	qdf_dma_addr_t buf;
1032 	unsigned int nbytes;
1033 	unsigned int id;
1034 	unsigned int flags;
1035 	unsigned int more_comp_cnt = 0;
1036 	unsigned int more_snd_comp_cnt = 0;
1037 	unsigned int sw_idx, hw_idx;
1038 	uint32_t toeplitz_hash_result;
1039 	uint32_t mode = hif_get_conparam(scn);
1040 
1041 more_completions:
1042 	if (CE_state->recv_cb) {
1043 
1044 		/* Pop completed recv buffers and call
1045 		 * the registered recv callback for each
1046 		 */
1047 		while (hif_state->ce_services->ce_completed_recv_next_nolock
1048 				(CE_state, &CE_context, &transfer_context,
1049 				&buf, &nbytes, &id, &flags) ==
1050 				QDF_STATUS_SUCCESS) {
1051 			qdf_spin_unlock(&CE_state->ce_index_lock);
1052 			CE_state->recv_cb((struct CE_handle *)CE_state,
1053 					  CE_context, transfer_context, buf,
1054 					  nbytes, id, flags);
1055 
1056 			qdf_spin_lock(&CE_state->ce_index_lock);
1057 			/*
1058 			 * EV #112693 -
1059 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
1060 			 * BSoD_0x133 occurred in VHT80 UDP_DL
1061 			 * Break out DPC by force if number of loops in
1062 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1063 			 * to avoid spending too long time in
1064 			 * DPC for each interrupt handling. Schedule another
1065 			 * DPC to avoid data loss if we had taken
1066 			 * force-break action before apply to Windows OS
1067 			 * only currently, Linux/MAC os can expand to their
1068 			 * platform if necessary
1069 			 */
1070 
1071 			/* Break the receive processes by
1072 			 * force if force_break set up
1073 			 */
1074 			if (qdf_unlikely(CE_state->force_break)) {
1075 				qdf_atomic_set(&CE_state->rx_pending, 1);
1076 				return;
1077 			}
1078 		}
1079 	}
1080 
1081 	/*
1082 	 * Attention: We may experience potential infinite loop for below
1083 	 * While Loop during Sending Stress test.
1084 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1085 	 */
1086 
1087 	if (CE_state->send_cb) {
1088 		/* Pop completed send buffers and call
1089 		 * the registered send callback for each
1090 		 */
1091 
1092 #ifdef ATH_11AC_TXCOMPACT
1093 		while (hif_state->ce_services->ce_completed_send_next_nolock
1094 			 (CE_state, &CE_context,
1095 			 &transfer_context, &buf, &nbytes,
1096 			 &id, &sw_idx, &hw_idx,
1097 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1098 
1099 			if (check_ce_id_and_epping_enabled(CE_id, mode)) {
1100 				qdf_spin_unlock(&CE_state->ce_index_lock);
1101 				CE_state->send_cb((struct CE_handle *)CE_state,
1102 						  CE_context, transfer_context,
1103 						  buf, nbytes, id, sw_idx,
1104 						  hw_idx, toeplitz_hash_result);
1105 				qdf_spin_lock(&CE_state->ce_index_lock);
1106 			} else {
1107 				struct HIF_CE_pipe_info *pipe_info =
1108 					(struct HIF_CE_pipe_info *)CE_context;
1109 
1110 				qdf_spin_lock_bh(&pipe_info->
1111 					      completion_freeq_lock);
1112 				pipe_info->num_sends_allowed++;
1113 				qdf_spin_unlock_bh(&pipe_info->
1114 						completion_freeq_lock);
1115 			}
1116 		}
1117 #else                           /*ATH_11AC_TXCOMPACT */
1118 		while (hif_state->ce_services->ce_completed_send_next_nolock
1119 			 (CE_state, &CE_context,
1120 			  &transfer_context, &buf, &nbytes,
1121 			  &id, &sw_idx, &hw_idx,
1122 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1123 			qdf_spin_unlock(&CE_state->ce_index_lock);
1124 			CE_state->send_cb((struct CE_handle *)CE_state,
1125 				  CE_context, transfer_context, buf,
1126 				  nbytes, id, sw_idx, hw_idx,
1127 				  toeplitz_hash_result);
1128 			qdf_spin_lock(&CE_state->ce_index_lock);
1129 		}
1130 #endif /*ATH_11AC_TXCOMPACT */
1131 	}
1132 
1133 more_watermarks:
1134 	if (CE_state->misc_cbs) {
1135 		if (CE_state->watermark_cb &&
1136 				hif_state->ce_services->watermark_int(CE_state,
1137 					&flags)) {
1138 			qdf_spin_unlock(&CE_state->ce_index_lock);
1139 			/* Convert HW IS bits to software flags */
1140 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1141 					CE_state->wm_context, flags);
1142 			qdf_spin_lock(&CE_state->ce_index_lock);
1143 		}
1144 	}
1145 
1146 	/*
1147 	 * Clear the misc interrupts (watermark) that were handled above,
1148 	 * and that will be checked again below.
1149 	 * Clear and check for copy-complete interrupts again, just in case
1150 	 * more copy completions happened while the misc interrupts were being
1151 	 * handled.
1152 	 */
1153 	if (!ce_srng_based(scn)) {
1154 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1155 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1156 					   CE_WATERMARK_MASK |
1157 					   HOST_IS_COPY_COMPLETE_MASK);
1158 		} else {
1159 			qdf_atomic_set(&CE_state->rx_pending, 0);
1160 			hif_err_rl("%s: target access is not allowed",
1161 				   __func__);
1162 			return;
1163 		}
1164 	}
1165 
1166 	/*
1167 	 * Now that per-engine interrupts are cleared, verify that
1168 	 * no recv interrupts arrive while processing send interrupts,
1169 	 * and no recv or send interrupts happened while processing
1170 	 * misc interrupts.Go back and check again.Keep checking until
1171 	 * we find no more events to process.
1172 	 */
1173 	if (CE_state->recv_cb &&
1174 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1175 				CE_state)) {
1176 		if (QDF_IS_EPPING_ENABLED(mode) ||
1177 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1178 			goto more_completions;
1179 		} else {
1180 			if (!ce_srng_based(scn)) {
1181 				hif_err_rl(
1182 					"Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1183 					CE_state->id,
1184 					CE_state->dest_ring->nentries_mask,
1185 					CE_state->dest_ring->sw_index,
1186 					CE_DEST_RING_READ_IDX_GET(scn,
1187 							  CE_state->ctrl_addr));
1188 			}
1189 		}
1190 	}
1191 
1192 	if (CE_state->send_cb &&
1193 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1194 				CE_state)) {
1195 		if (QDF_IS_EPPING_ENABLED(mode) ||
1196 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1197 			goto more_completions;
1198 		} else {
1199 			if (!ce_srng_based(scn)) {
1200 				hif_err_rl(
1201 					"Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
1202 					CE_state->id,
1203 					CE_state->src_ring->nentries_mask,
1204 					CE_state->src_ring->sw_index,
1205 					CE_state->src_ring->hw_index,
1206 					CE_state->src_ring->write_index,
1207 					CE_SRC_RING_READ_IDX_GET(scn,
1208 							 CE_state->ctrl_addr));
1209 			}
1210 		}
1211 	}
1212 
1213 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1214 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1215 			goto more_watermarks;
1216 	}
1217 
1218 	qdf_atomic_set(&CE_state->rx_pending, 0);
1219 }
1220 
1221 #ifdef WLAN_TRACEPOINTS
1222 /**
1223  * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling
1224  *  latency
1225  * @ce_state: CE context
1226  *
1227  * Return: None
1228  */
1229 static inline
1230 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1231 {
1232 	qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id,
1233 					      ce_state->ce_service_start_time -
1234 					      ce_state->ce_tasklet_sched_time);
1235 }
1236 #else
1237 static inline
1238 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1239 {
1240 }
1241 #endif
1242 
1243 /*
1244  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1245  *
1246  * Invokes registered callbacks for recv_complete,
1247  * send_complete, and watermarks.
1248  *
1249  * Returns: number of messages processed
1250  */
1251 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1252 {
1253 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1254 
1255 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1256 		return CE_state->receive_count;
1257 
1258 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1259 		hif_err("[premature rc=0]");
1260 		return 0; /* no work done */
1261 	}
1262 
1263 	/* Clear force_break flag and re-initialize receive_count to 0 */
1264 	CE_state->receive_count = 0;
1265 	CE_state->force_break = 0;
1266 	CE_state->ce_service_start_time = qdf_time_sched_clock();
1267 	CE_state->ce_service_yield_time =
1268 		CE_state->ce_service_start_time +
1269 		hif_get_ce_service_max_yield_time(
1270 			(struct hif_opaque_softc *)scn);
1271 
1272 	ce_trace_tasklet_sched_latency(CE_state);
1273 
1274 	qdf_spin_lock(&CE_state->ce_index_lock);
1275 
1276 	CE_state->service(scn, CE_id);
1277 
1278 	qdf_spin_unlock(&CE_state->ce_index_lock);
1279 
1280 	if (Q_TARGET_ACCESS_END(scn) < 0)
1281 		hif_err("<--[premature rc=%d]", CE_state->receive_count);
1282 	return CE_state->receive_count;
1283 }
1284 qdf_export_symbol(ce_per_engine_service);
1285 
1286 /*
1287  * Handler for per-engine interrupts on ALL active CEs.
1288  * This is used in cases where the system is sharing a
1289  * single interrupt for all CEs
1290  */
1291 
1292 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1293 {
1294 	int CE_id;
1295 	uint32_t intr_summary;
1296 
1297 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1298 		return;
1299 
1300 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1301 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1302 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1303 
1304 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1305 				qdf_atomic_set(&CE_state->rx_pending, 0);
1306 				ce_per_engine_service(scn, CE_id);
1307 			}
1308 		}
1309 
1310 		Q_TARGET_ACCESS_END(scn);
1311 		return;
1312 	}
1313 
1314 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1315 
1316 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1317 		if (intr_summary & (1 << CE_id))
1318 			intr_summary &= ~(1 << CE_id);
1319 		else
1320 			continue;       /* no intr pending on this CE */
1321 
1322 		ce_per_engine_service(scn, CE_id);
1323 	}
1324 
1325 	Q_TARGET_ACCESS_END(scn);
1326 }
1327 
1328 /*Iterate the CE_state list and disable the compl interrupt
1329  * if it has been registered already.
1330  */
1331 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1332 {
1333 	int CE_id;
1334 
1335 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1336 		return;
1337 
1338 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1339 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1340 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1341 
1342 		/* if the interrupt is currently enabled, disable it */
1343 		if (!CE_state->disable_copy_compl_intr
1344 		    && (CE_state->send_cb || CE_state->recv_cb))
1345 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1346 
1347 		if (CE_state->watermark_cb)
1348 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1349 	}
1350 	Q_TARGET_ACCESS_END(scn);
1351 }
1352 
1353 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1354 {
1355 	int CE_id;
1356 
1357 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1358 		return;
1359 
1360 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1361 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1362 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1363 
1364 		/*
1365 		 * If the CE is supposed to have copy complete interrupts
1366 		 * enabled (i.e. there a callback registered, and the
1367 		 * "disable" flag is not set), then re-enable the interrupt.
1368 		 */
1369 		if (!CE_state->disable_copy_compl_intr
1370 		    && (CE_state->send_cb || CE_state->recv_cb))
1371 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1372 
1373 		if (CE_state->watermark_cb)
1374 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1375 	}
1376 	Q_TARGET_ACCESS_END(scn);
1377 }
1378 
1379 /**
1380  * ce_send_cb_register(): register completion handler
1381  * @copyeng: CE_state representing the ce we are adding the behavior to
1382  * @fn_ptr: callback that the ce should use when processing tx completions
1383  * @ce_send_context: context to pass back in the callback
1384  * @disable_interrupts: if the interrupts should be enabled or not.
1385  *
1386  * Caller should guarantee that no transactions are in progress before
1387  * switching the callback function.
1388  *
1389  * Registers the send context before the fn pointer so that if the cb is valid
1390  * the context should be valid.
1391  *
1392  * Beware that currently this function will enable completion interrupts.
1393  */
1394 void
1395 ce_send_cb_register(struct CE_handle *copyeng,
1396 		    ce_send_cb fn_ptr,
1397 		    void *ce_send_context, int disable_interrupts)
1398 {
1399 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1400 	struct hif_softc *scn;
1401 	struct HIF_CE_state *hif_state;
1402 
1403 	if (!CE_state) {
1404 		hif_err("Error CE state = NULL");
1405 		return;
1406 	}
1407 	scn = CE_state->scn;
1408 	hif_state = HIF_GET_CE_STATE(scn);
1409 	if (!hif_state) {
1410 		hif_err("Error HIF state = NULL");
1411 		return;
1412 	}
1413 	CE_state->send_context = ce_send_context;
1414 	CE_state->send_cb = fn_ptr;
1415 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1416 							disable_interrupts);
1417 }
1418 qdf_export_symbol(ce_send_cb_register);
1419 
1420 /**
1421  * ce_recv_cb_register(): register completion handler
1422  * @copyeng: CE_state representing the ce we are adding the behavior to
1423  * @fn_ptr: callback that the ce should use when processing rx completions
1424  * @CE_recv_context: context to pass back in the callback
1425  * @disable_interrupts: if the interrupts should be enabled or not.
1426  *
1427  * Registers the send context before the fn pointer so that if the cb is valid
1428  * the context should be valid.
1429  *
1430  * Caller should guarantee that no transactions are in progress before
1431  * switching the callback function.
1432  */
1433 void
1434 ce_recv_cb_register(struct CE_handle *copyeng,
1435 		    CE_recv_cb fn_ptr,
1436 		    void *CE_recv_context, int disable_interrupts)
1437 {
1438 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1439 	struct hif_softc *scn;
1440 	struct HIF_CE_state *hif_state;
1441 
1442 	if (!CE_state) {
1443 		hif_err("ERROR CE state = NULL");
1444 		return;
1445 	}
1446 	scn = CE_state->scn;
1447 	hif_state = HIF_GET_CE_STATE(scn);
1448 	if (!hif_state) {
1449 		hif_err("Error HIF state = NULL");
1450 		return;
1451 	}
1452 	CE_state->recv_context = CE_recv_context;
1453 	CE_state->recv_cb = fn_ptr;
1454 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1455 							disable_interrupts);
1456 }
1457 qdf_export_symbol(ce_recv_cb_register);
1458 
1459 /**
1460  * ce_watermark_cb_register(): register completion handler
1461  * @copyeng: CE_state representing the ce we are adding the behavior to
1462  * @fn_ptr: callback that the ce should use when processing watermark events
1463  * @CE_wm_context: context to pass back in the callback
1464  *
1465  * Caller should guarantee that no watermark events are being processed before
1466  * switching the callback function.
1467  */
1468 void
1469 ce_watermark_cb_register(struct CE_handle *copyeng,
1470 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1471 {
1472 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1473 	struct hif_softc *scn = CE_state->scn;
1474 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1475 
1476 	CE_state->watermark_cb = fn_ptr;
1477 	CE_state->wm_context = CE_wm_context;
1478 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1479 							0);
1480 	if (fn_ptr)
1481 		CE_state->misc_cbs = 1;
1482 }
1483 
1484 bool ce_get_rx_pending(struct hif_softc *scn)
1485 {
1486 	int CE_id;
1487 
1488 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1489 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1490 
1491 		if (qdf_atomic_read(&CE_state->rx_pending))
1492 			return true;
1493 	}
1494 
1495 	return false;
1496 }
1497 
1498 /**
1499  * ce_check_rx_pending() - ce_check_rx_pending
1500  * @CE_state: context of the copy engine to check
1501  *
1502  * Return: true if there per_engine_service
1503  *	didn't process all the rx descriptors.
1504  */
1505 bool ce_check_rx_pending(struct CE_state *CE_state)
1506 {
1507 	if (qdf_atomic_read(&CE_state->rx_pending))
1508 		return true;
1509 	else
1510 		return false;
1511 }
1512 qdf_export_symbol(ce_check_rx_pending);
1513 
1514 #ifdef IPA_OFFLOAD
1515 #ifdef QCN7605_SUPPORT
1516 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1517 {
1518 	u_int32_t ctrl_addr = CE_state->ctrl_addr;
1519 	struct hif_softc *scn = CE_state->scn;
1520 	qdf_dma_addr_t wr_index_addr;
1521 
1522 	wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1523 	return wr_index_addr;
1524 }
1525 #else
1526 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1527 {
1528 	struct hif_softc *scn = CE_state->scn;
1529 	qdf_dma_addr_t wr_index_addr;
1530 
1531 	wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1532 			SR_WR_INDEX_ADDRESS;
1533 	return wr_index_addr;
1534 }
1535 #endif
1536 
1537 /**
1538  * ce_ipa_get_resource() - get uc resource on copyengine
1539  * @ce: copyengine context
1540  * @ce_sr: copyengine source ring resource info
1541  * @ce_sr_ring_size: copyengine source ring size
1542  * @ce_reg_paddr: copyengine register physical address
1543  *
1544  * Copy engine should release resource to micro controller
1545  * Micro controller needs
1546  *  - Copy engine source descriptor base address
1547  *  - Copy engine source descriptor size
1548  *  - PCI BAR address to access copy engine register
1549  *
1550  * Return: None
1551  */
1552 void ce_ipa_get_resource(struct CE_handle *ce,
1553 			 qdf_shared_mem_t **ce_sr,
1554 			 uint32_t *ce_sr_ring_size,
1555 			 qdf_dma_addr_t *ce_reg_paddr)
1556 {
1557 	struct CE_state *CE_state = (struct CE_state *)ce;
1558 	uint32_t ring_loop;
1559 	struct CE_src_desc *ce_desc;
1560 	qdf_dma_addr_t phy_mem_base;
1561 	struct hif_softc *scn = CE_state->scn;
1562 
1563 	if (CE_UNUSED == CE_state->state) {
1564 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1565 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1566 		*ce_sr_ring_size = 0;
1567 		return;
1568 	}
1569 
1570 	/* Update default value for descriptor */
1571 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1572 	     ring_loop++) {
1573 		ce_desc = (struct CE_src_desc *)
1574 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1575 			   ring_loop * (sizeof(struct CE_src_desc)));
1576 		CE_IPA_RING_INIT(ce_desc);
1577 	}
1578 
1579 	/* Get BAR address */
1580 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1581 
1582 	*ce_sr = CE_state->scn->ipa_ce_ring;
1583 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1584 		sizeof(struct CE_src_desc));
1585 	*ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1586 
1587 }
1588 
1589 #endif /* IPA_OFFLOAD */
1590 
1591 #ifdef HIF_CE_DEBUG_DATA_BUF
1592 /**
1593  * hif_dump_desc_data_buf() - record ce descriptor events
1594  * @buf: buffer to copy to
1595  * @pos: Current position till which the buf is filled
1596  * @data: Data to be copied
1597  * @data_len: Length of the data to be copied
1598  */
1599 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1600 					uint8_t *data, uint32_t data_len)
1601 {
1602 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1603 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1604 
1605 	if ((data_len > 0) && data) {
1606 		if (data_len < 16) {
1607 			hex_dump_to_buffer(data,
1608 						CE_DEBUG_DATA_PER_ROW,
1609 						16, 1, buf + pos,
1610 						(ssize_t)PAGE_SIZE - pos,
1611 						false);
1612 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1613 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1614 		} else {
1615 			uint32_t rows = (data_len / 16) + 1;
1616 			uint32_t row = 0;
1617 
1618 			for (row = 0; row < rows; row++) {
1619 				hex_dump_to_buffer(data + (row * 16),
1620 							CE_DEBUG_DATA_PER_ROW,
1621 							16, 1, buf + pos,
1622 							(ssize_t)PAGE_SIZE
1623 							- pos, false);
1624 				pos +=
1625 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1626 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1627 						"\n");
1628 			}
1629 		}
1630 	}
1631 
1632 	return pos;
1633 }
1634 #endif
1635 
1636 /*
1637  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1638  * for defined here
1639  */
1640 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1641 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1642 {
1643 	switch (type) {
1644 	case HIF_RX_DESC_POST:
1645 		return "HIF_RX_DESC_POST";
1646 	case HIF_RX_DESC_COMPLETION:
1647 		return "HIF_RX_DESC_COMPLETION";
1648 	case HIF_TX_GATHER_DESC_POST:
1649 		return "HIF_TX_GATHER_DESC_POST";
1650 	case HIF_TX_DESC_POST:
1651 		return "HIF_TX_DESC_POST";
1652 	case HIF_TX_DESC_SOFTWARE_POST:
1653 		return "HIF_TX_DESC_SOFTWARE_POST";
1654 	case HIF_TX_DESC_COMPLETION:
1655 		return "HIF_TX_DESC_COMPLETION";
1656 	case FAST_RX_WRITE_INDEX_UPDATE:
1657 		return "FAST_RX_WRITE_INDEX_UPDATE";
1658 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1659 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1660 	case FAST_TX_WRITE_INDEX_UPDATE:
1661 		return "FAST_TX_WRITE_INDEX_UPDATE";
1662 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1663 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1664 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1665 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1666 	case RESUME_WRITE_INDEX_UPDATE:
1667 		return "RESUME_WRITE_INDEX_UPDATE";
1668 	case HIF_IRQ_EVENT:
1669 		return "HIF_IRQ_EVENT";
1670 	case HIF_CE_TASKLET_ENTRY:
1671 		return "HIF_CE_TASKLET_ENTRY";
1672 	case HIF_CE_TASKLET_RESCHEDULE:
1673 		return "HIF_CE_TASKLET_RESCHEDULE";
1674 	case HIF_CE_TASKLET_EXIT:
1675 		return "HIF_CE_TASKLET_EXIT";
1676 	case HIF_CE_REAP_ENTRY:
1677 		return "HIF_CE_REAP_ENTRY";
1678 	case HIF_CE_REAP_EXIT:
1679 		return "HIF_CE_REAP_EXIT";
1680 	case NAPI_SCHEDULE:
1681 		return "NAPI_SCHEDULE";
1682 	case NAPI_POLL_ENTER:
1683 		return "NAPI_POLL_ENTER";
1684 	case NAPI_COMPLETE:
1685 		return "NAPI_COMPLETE";
1686 	case NAPI_POLL_EXIT:
1687 		return "NAPI_POLL_EXIT";
1688 	case HIF_RX_NBUF_ALLOC_FAILURE:
1689 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1690 	case HIF_RX_NBUF_MAP_FAILURE:
1691 		return "HIF_RX_NBUF_MAP_FAILURE";
1692 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1693 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1694 	default:
1695 		return "invalid";
1696 	}
1697 }
1698 
1699 /**
1700  * hif_dump_desc_event() - record ce descriptor events
1701  * @scn: HIF context
1702  * @buf: Buffer to which to be copied
1703  */
1704 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1705 {
1706 	struct hif_ce_desc_event *event;
1707 	uint64_t secs, usecs;
1708 	ssize_t len = 0;
1709 	struct ce_desc_hist *ce_hist = NULL;
1710 	struct hif_ce_desc_event *hist_ev = NULL;
1711 
1712 	if (!scn)
1713 		return -EINVAL;
1714 
1715 	ce_hist = &scn->hif_ce_desc_hist;
1716 
1717 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1718 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1719 		qdf_print("Invalid values");
1720 		return -EINVAL;
1721 	}
1722 
1723 	hist_ev =
1724 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1725 
1726 	if (!hist_ev) {
1727 		qdf_print("Low Memory");
1728 		return -EINVAL;
1729 	}
1730 
1731 	event = &hist_ev[ce_hist->hist_index];
1732 
1733 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1734 
1735 	len += snprintf(buf, PAGE_SIZE - len,
1736 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1737 			secs, usecs, ce_hist->hist_id,
1738 			ce_event_type_to_str(event->type),
1739 			event->index, event->memory);
1740 #ifdef HIF_CE_DEBUG_DATA_BUF
1741 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1742 			event->actual_data_len);
1743 #endif
1744 
1745 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1746 
1747 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1748 				16, 1, buf + len,
1749 				(ssize_t)PAGE_SIZE - len, false);
1750 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1751 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1752 
1753 #ifdef HIF_CE_DEBUG_DATA_BUF
1754 	if (ce_hist->data_enable[ce_hist->hist_id])
1755 		len = hif_dump_desc_data_buf(buf, len, event->data,
1756 						(event->actual_data_len <
1757 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1758 						event->actual_data_len :
1759 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1760 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1761 
1762 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1763 
1764 	return len;
1765 }
1766 
1767 /*
1768  * hif_store_desc_trace_buf_index() -
1769  * API to get the CE id and CE debug storage buffer index
1770  *
1771  * @dev: network device
1772  * @attr: sysfs attribute
1773  * @buf: data got from the user
1774  *
1775  * Return total length
1776  */
1777 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1778 					const char *buf, size_t size)
1779 {
1780 	struct ce_desc_hist *ce_hist = NULL;
1781 
1782 	if (!scn)
1783 		return -EINVAL;
1784 
1785 	ce_hist = &scn->hif_ce_desc_hist;
1786 
1787 	if (!size) {
1788 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1789 		return -EINVAL;
1790 	}
1791 
1792 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1793 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1794 		qdf_nofl_err("%s: Invalid input value.", __func__);
1795 		return -EINVAL;
1796 	}
1797 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1798 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1799 		qdf_print("Invalid values");
1800 		return -EINVAL;
1801 	}
1802 
1803 	return size;
1804 }
1805 
1806 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1807 
1808 #ifdef HIF_CE_DEBUG_DATA_BUF
1809 /*
1810  * hif_ce_en_desc_hist() -
1811  * API to enable recording the CE desc history
1812  *
1813  * @dev: network device
1814  * @attr: sysfs attribute
1815  * @buf: buffer to copy the data.
1816  *
1817  * Starts recording the ce desc history
1818  *
1819  * Return total length copied
1820  */
1821 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1822 {
1823 	struct ce_desc_hist *ce_hist = NULL;
1824 	uint32_t cfg = 0;
1825 	uint32_t ce_id = 0;
1826 
1827 	if (!scn)
1828 		return -EINVAL;
1829 
1830 	ce_hist = &scn->hif_ce_desc_hist;
1831 
1832 	if (!size) {
1833 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1834 		return -EINVAL;
1835 	}
1836 
1837 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1838 		   (unsigned int *)&cfg) != 2) {
1839 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1840 			     __func__);
1841 		return -EINVAL;
1842 	}
1843 	if (ce_id >= CE_COUNT_MAX) {
1844 		qdf_print("Invalid value CE Id");
1845 		return -EINVAL;
1846 	}
1847 
1848 	if ((cfg > 1 || cfg < 0)) {
1849 		qdf_print("Invalid values: enter 0 or 1");
1850 		return -EINVAL;
1851 	}
1852 
1853 	if (!ce_hist->hist_ev[ce_id])
1854 		return -EINVAL;
1855 
1856 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1857 	if (cfg == 1) {
1858 		if (ce_hist->data_enable[ce_id] == 1) {
1859 			qdf_debug("Already Enabled");
1860 		} else {
1861 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1862 							== QDF_STATUS_E_NOMEM){
1863 				ce_hist->data_enable[ce_id] = 0;
1864 				qdf_err("%s:Memory Alloc failed", __func__);
1865 			} else
1866 				ce_hist->data_enable[ce_id] = 1;
1867 		}
1868 	} else if (cfg == 0) {
1869 		if (ce_hist->data_enable[ce_id] == 0) {
1870 			qdf_debug("Already Disabled");
1871 		} else {
1872 			ce_hist->data_enable[ce_id] = 0;
1873 				free_mem_ce_debug_hist_data(scn, ce_id);
1874 		}
1875 	}
1876 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1877 
1878 	return size;
1879 }
1880 
1881 /*
1882  * hif_disp_ce_enable_desc_data_hist() -
1883  * API to display value of data_enable
1884  *
1885  * @dev: network device
1886  * @attr: sysfs attribute
1887  * @buf: buffer to copy the data.
1888  *
1889  * Return total length copied
1890  */
1891 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1892 {
1893 	ssize_t len = 0;
1894 	uint32_t ce_id = 0;
1895 	struct ce_desc_hist *ce_hist = NULL;
1896 
1897 	if (!scn)
1898 		return -EINVAL;
1899 
1900 	ce_hist = &scn->hif_ce_desc_hist;
1901 
1902 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1903 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1904 				ce_id, ce_hist->data_enable[ce_id]);
1905 	}
1906 
1907 	return len;
1908 }
1909 #endif /* HIF_CE_DEBUG_DATA_BUF */
1910 
1911 #ifdef OL_ATH_SMART_LOGGING
1912 #define GUARD_SPACE 10
1913 #define LOG_ID_SZ 4
1914 /*
1915  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1916  * @src_ring: SRC ring state
1917  * @buf_cur: Current pointer in ring buffer
1918  * @buf_init:Start of the ring buffer
1919  * @buf_sz: Size of the ring buffer
1920  * @skb_sz: Max size of the SKB buffer to be copied
1921  *
1922  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1923  * the given buf, skb_sz is the max buffer size to be copied
1924  *
1925  * Return: Current pointer in ring buffer
1926  */
1927 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1928 				    uint8_t *buf_cur, uint8_t *buf_init,
1929 				    uint32_t buf_sz, uint32_t skb_sz)
1930 {
1931 	struct CE_src_desc *src_ring_base;
1932 	uint32_t len, entry;
1933 	struct CE_src_desc  *src_desc;
1934 	qdf_nbuf_t nbuf;
1935 	uint32_t available_buf;
1936 
1937 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1938 	len = sizeof(struct CE_ring_state);
1939 	available_buf = buf_sz - (buf_cur - buf_init);
1940 	if (available_buf < (len + GUARD_SPACE)) {
1941 		buf_cur = buf_init;
1942 	}
1943 
1944 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1945 	buf_cur += sizeof(struct CE_ring_state);
1946 
1947 	for (entry = 0; entry < src_ring->nentries; entry++) {
1948 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1949 		nbuf = src_ring->per_transfer_context[entry];
1950 		if (nbuf) {
1951 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1952 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1953 
1954 			len = sizeof(struct CE_src_desc) + skb_cp_len
1955 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1956 			available_buf = buf_sz - (buf_cur - buf_init);
1957 			if (available_buf < (len + GUARD_SPACE)) {
1958 				buf_cur = buf_init;
1959 			}
1960 			qdf_mem_copy(buf_cur, src_desc,
1961 				     sizeof(struct CE_src_desc));
1962 			buf_cur += sizeof(struct CE_src_desc);
1963 
1964 			available_buf = buf_sz - (buf_cur - buf_init);
1965 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1966 						skb_cp_len);
1967 
1968 			if (skb_cp_len) {
1969 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1970 					     skb_cp_len);
1971 				buf_cur += skb_cp_len;
1972 			}
1973 		} else {
1974 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1975 			available_buf = buf_sz - (buf_cur - buf_init);
1976 			if (available_buf < (len + GUARD_SPACE)) {
1977 				buf_cur = buf_init;
1978 			}
1979 			qdf_mem_copy(buf_cur, src_desc,
1980 				     sizeof(struct CE_src_desc));
1981 			buf_cur += sizeof(struct CE_src_desc);
1982 			available_buf = buf_sz - (buf_cur - buf_init);
1983 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1984 		}
1985 	}
1986 
1987 	return buf_cur;
1988 }
1989 
1990 /*
1991  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1992  * @dest_ring: SRC ring state
1993  * @buf_cur: Current pointer in ring buffer
1994  * @buf_init:Start of the ring buffer
1995  * @buf_sz: Size of the ring buffer
1996  * @skb_sz: Max size of the SKB buffer to be copied
1997  *
1998  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1999  * the given buf, skb_sz is the max buffer size to be copied
2000  *
2001  * Return: Current pointer in ring buffer
2002  */
2003 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
2004 				     uint8_t *buf_cur, uint8_t *buf_init,
2005 				     uint32_t buf_sz, uint32_t skb_sz)
2006 {
2007 	struct CE_dest_desc *dest_ring_base;
2008 	uint32_t len, entry;
2009 	struct CE_dest_desc  *dest_desc;
2010 	qdf_nbuf_t nbuf;
2011 	uint32_t available_buf;
2012 
2013 	dest_ring_base =
2014 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
2015 
2016 	len = sizeof(struct CE_ring_state);
2017 	available_buf = buf_sz - (buf_cur - buf_init);
2018 	if (available_buf < (len + GUARD_SPACE)) {
2019 		buf_cur = buf_init;
2020 	}
2021 
2022 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
2023 	buf_cur += sizeof(struct CE_ring_state);
2024 
2025 	for (entry = 0; entry < dest_ring->nentries; entry++) {
2026 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
2027 
2028 		nbuf = dest_ring->per_transfer_context[entry];
2029 		if (nbuf) {
2030 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2031 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2032 
2033 			len = sizeof(struct CE_dest_desc) + skb_cp_len
2034 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2035 
2036 			available_buf = buf_sz - (buf_cur - buf_init);
2037 			if (available_buf < (len + GUARD_SPACE)) {
2038 				buf_cur = buf_init;
2039 			}
2040 
2041 			qdf_mem_copy(buf_cur, dest_desc,
2042 				     sizeof(struct CE_dest_desc));
2043 			buf_cur += sizeof(struct CE_dest_desc);
2044 			available_buf = buf_sz - (buf_cur - buf_init);
2045 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2046 						skb_cp_len);
2047 			if (skb_cp_len) {
2048 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2049 					     skb_cp_len);
2050 				buf_cur += skb_cp_len;
2051 			}
2052 		} else {
2053 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
2054 			available_buf = buf_sz - (buf_cur - buf_init);
2055 			if (available_buf < (len + GUARD_SPACE)) {
2056 				buf_cur = buf_init;
2057 			}
2058 			qdf_mem_copy(buf_cur, dest_desc,
2059 				     sizeof(struct CE_dest_desc));
2060 			buf_cur += sizeof(struct CE_dest_desc);
2061 			available_buf = buf_sz - (buf_cur - buf_init);
2062 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2063 		}
2064 	}
2065 	return buf_cur;
2066 }
2067 
2068 /**
2069  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2070  * @scn:
2071  * @buf_cur:
2072  * @buf_init:
2073  * @buf_sz:
2074  * @ce:
2075  * @skb_sz:
2076  *
2077  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2078  * and buffers pointed by them in to the given buf
2079  */
2080 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2081 			 uint8_t *buf_init, uint32_t buf_sz,
2082 			 uint32_t ce, uint32_t skb_sz)
2083 {
2084 	struct CE_state *ce_state;
2085 	struct CE_ring_state *src_ring;
2086 	struct CE_ring_state *dest_ring;
2087 
2088 	ce_state = scn->ce_id_to_state[ce];
2089 	src_ring = ce_state->src_ring;
2090 	dest_ring = ce_state->dest_ring;
2091 
2092 	if (src_ring) {
2093 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
2094 					      buf_init, buf_sz, skb_sz);
2095 	} else if (dest_ring) {
2096 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
2097 					       buf_init, buf_sz, skb_sz);
2098 	}
2099 
2100 	return buf_cur;
2101 }
2102 
2103 qdf_export_symbol(hif_log_dump_ce);
2104 #endif /* OL_ATH_SMART_LOGGING */
2105 
2106