xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 static int war1_allow_sleep;
65 /* io32 write workaround */
66 static int hif_ce_war1;
67 
68 /**
69  * hif_ce_war_disable() - disable ce war gobally
70  */
71 void hif_ce_war_disable(void)
72 {
73 	hif_ce_war1 = 0;
74 }
75 
76 /**
77  * hif_ce_war_enable() - enable ce war gobally
78  */
79 void hif_ce_war_enable(void)
80 {
81 	hif_ce_war1 = 1;
82 }
83 
84 /*
85  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
86  * for defined here
87  */
88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
89 
90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
91 #define CE_DEBUG_DATA_PER_ROW 16
92 
93 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
94 
95 /**
96  * get_next_record_index() - get the next record index
97  * @table_index: atomic index variable to increment
98  * @array_size: array size of the circular buffer
99  *
100  * Increment the atomic index and reserve the value.
101  * Takes care of buffer wrap.
102  * Guaranteed to be thread safe as long as fewer than array_size contexts
103  * try to access the array.  If there are more than array_size contexts
104  * trying to access the array, full locking of the recording process would
105  * be needed to have sane logging.
106  */
107 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
108 {
109 	int record_index = qdf_atomic_inc_return(table_index);
110 
111 	if (record_index == array_size)
112 		qdf_atomic_sub(array_size, table_index);
113 
114 	while (record_index >= array_size)
115 		record_index -= array_size;
116 	return record_index;
117 }
118 
119 #ifdef HIF_CE_DEBUG_DATA_BUF
120 /**
121  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
122  * @event: structure detailing a ce event
123  * @len: length of the data
124  * Return:
125  */
126 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
127 {
128 	uint8_t *data = NULL;
129 
130 	if (!event->data)
131 		return;
132 
133 	if (event->memory && len > 0)
134 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
135 
136 	event->actual_data_len = 0;
137 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
138 
139 	if (data && len > 0) {
140 		qdf_mem_copy(event->data, data,
141 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
142 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
143 		event->actual_data_len = len;
144 	}
145 }
146 #endif
147 
148 /**
149  * hif_record_ce_desc_event() - record ce descriptor events
150  * @scn: hif_softc
151  * @ce_id: which ce is the event occurring on
152  * @type: what happened
153  * @descriptor: pointer to the descriptor posted/completed
154  * @memory: virtual address of buffer related to the descriptor
155  * @index: index that the descriptor was/will be at.
156  */
157 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
158 				enum hif_ce_event_type type,
159 				union ce_desc *descriptor,
160 				void *memory, int index,
161 				int len)
162 {
163 	int record_index;
164 	struct hif_ce_desc_event *event;
165 
166 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
167 	struct hif_ce_desc_event *hist_ev = NULL;
168 
169 	if (ce_id < CE_COUNT_MAX)
170 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
171 	else
172 		return;
173 
174 	if (ce_id >= CE_COUNT_MAX)
175 		return;
176 
177 	if (!ce_hist->enable[ce_id])
178 		return;
179 
180 	if (!hist_ev)
181 		return;
182 
183 	record_index = get_next_record_index(
184 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
185 
186 	event = &hist_ev[record_index];
187 
188 	event->type = type;
189 	event->time = qdf_get_log_timestamp();
190 
191 	if (descriptor != NULL) {
192 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
193 	} else {
194 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
195 	}
196 
197 	event->memory = memory;
198 	event->index = index;
199 
200 #ifdef HIF_CE_DEBUG_DATA_BUF
201 	if (ce_hist->data_enable[ce_id])
202 		hif_ce_desc_data_record(event, len);
203 #endif
204 }
205 qdf_export_symbol(hif_record_ce_desc_event);
206 
207 /**
208  * ce_init_ce_desc_event_log() - initialize the ce event log
209  * @ce_id: copy engine id for which we are initializing the log
210  * @size: size of array to dedicate
211  *
212  * Currently the passed size is ignored in favor of a precompiled value.
213  */
214 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
215 {
216 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
217 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
218 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
219 }
220 
221 /**
222  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
223  * @ce_id: copy engine id for which we are deinitializing the log
224  *
225  */
226 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
227 {
228 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
229 
230 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
231 }
232 
233 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
234 void hif_record_ce_desc_event(struct hif_softc *scn,
235 		int ce_id, enum hif_ce_event_type type,
236 		union ce_desc *descriptor, void *memory,
237 		int index, int len)
238 {
239 }
240 qdf_export_symbol(hif_record_ce_desc_event);
241 
242 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
243 					int size)
244 {
245 }
246 
247 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
248 {
249 }
250 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
251 
252 #ifdef NAPI_YIELD_BUDGET_BASED
253 bool hif_ce_service_should_yield(struct hif_softc *scn,
254 				 struct CE_state *ce_state)
255 {
256 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
257 
258 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
259 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
260 	 * can happen in fast path handling as processing is happenning in
261 	 * batches.
262 	 */
263 	if (yield)
264 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
265 
266 	return yield;
267 }
268 #else
269 /**
270  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
271  * @scn: hif context
272  * @ce_state: context of the copy engine being serviced
273  *
274  * Return: true if the service should yield
275  */
276 bool hif_ce_service_should_yield(struct hif_softc *scn,
277 				 struct CE_state *ce_state)
278 {
279 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
280 
281 	time_limit_reached =
282 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
283 
284 	if (!time_limit_reached)
285 		rxpkt_thresh_reached = hif_max_num_receives_reached
286 					(scn, ce_state->receive_count);
287 
288 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
289 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
290 	 * can happen in fast path handling as processing is happenning in
291 	 * batches.
292 	 */
293 	if (rxpkt_thresh_reached)
294 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
295 
296 	yield =  time_limit_reached || rxpkt_thresh_reached;
297 
298 	if (yield && ce_state->htt_rx_data)
299 		hif_napi_update_yield_stats(ce_state,
300 					    time_limit_reached,
301 					    rxpkt_thresh_reached);
302 	return yield;
303 }
304 qdf_export_symbol(hif_ce_service_should_yield);
305 #endif
306 
307 /*
308  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
309  * The caller takes responsibility for any needed locking.
310  */
311 
312 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
313 				   u32 ctrl_addr, unsigned int write_index)
314 {
315 	if (hif_ce_war1) {
316 		void __iomem *indicator_addr;
317 
318 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
319 
320 		if (!war1_allow_sleep
321 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
322 			hif_write32_mb(scn, indicator_addr,
323 				       (CDC_WAR_MAGIC_STR | write_index));
324 		} else {
325 			unsigned long irq_flags;
326 
327 			local_irq_save(irq_flags);
328 			hif_write32_mb(scn, indicator_addr, 1);
329 
330 			/*
331 			 * PCIE write waits for ACK in IPQ8K, there is no
332 			 * need to read back value.
333 			 */
334 			(void)hif_read32_mb(scn, indicator_addr);
335 			/* conservative */
336 			(void)hif_read32_mb(scn, indicator_addr);
337 
338 			CE_SRC_RING_WRITE_IDX_SET(scn,
339 						  ctrl_addr, write_index);
340 
341 			hif_write32_mb(scn, indicator_addr, 0);
342 			local_irq_restore(irq_flags);
343 		}
344 	} else {
345 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
346 	}
347 }
348 
349 qdf_export_symbol(war_ce_src_ring_write_idx_set);
350 
351 int
352 ce_send(struct CE_handle *copyeng,
353 		void *per_transfer_context,
354 		qdf_dma_addr_t buffer,
355 		uint32_t nbytes,
356 		uint32_t transfer_id,
357 		uint32_t flags,
358 		uint32_t user_flag)
359 {
360 	struct CE_state *CE_state = (struct CE_state *)copyeng;
361 	int status;
362 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
363 
364 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
365 	status = hif_state->ce_services->ce_send_nolock(copyeng,
366 			per_transfer_context, buffer, nbytes,
367 			transfer_id, flags, user_flag);
368 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
369 
370 	return status;
371 }
372 qdf_export_symbol(ce_send);
373 
374 unsigned int ce_sendlist_sizeof(void)
375 {
376 	return sizeof(struct ce_sendlist);
377 }
378 
379 void ce_sendlist_init(struct ce_sendlist *sendlist)
380 {
381 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
382 
383 	sl->num_items = 0;
384 }
385 
386 int
387 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
388 					qdf_dma_addr_t buffer,
389 					uint32_t nbytes,
390 					uint32_t flags,
391 					uint32_t user_flags)
392 {
393 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
394 	unsigned int num_items = sl->num_items;
395 	struct ce_sendlist_item *item;
396 
397 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
398 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
399 		return QDF_STATUS_E_RESOURCES;
400 	}
401 
402 	item = &sl->item[num_items];
403 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
404 	item->data = buffer;
405 	item->u.nbytes = nbytes;
406 	item->flags = flags;
407 	item->user_flags = user_flags;
408 	sl->num_items = num_items + 1;
409 	return QDF_STATUS_SUCCESS;
410 }
411 
412 int
413 ce_sendlist_send(struct CE_handle *copyeng,
414 		 void *per_transfer_context,
415 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
416 {
417 	struct CE_state *CE_state = (struct CE_state *)copyeng;
418 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
419 
420 	return hif_state->ce_services->ce_sendlist_send(copyeng,
421 			per_transfer_context, sendlist, transfer_id);
422 }
423 
424 #ifndef AH_NEED_TX_DATA_SWAP
425 #define AH_NEED_TX_DATA_SWAP 0
426 #endif
427 
428 /**
429  * ce_batch_send() - sends bunch of msdus at once
430  * @ce_tx_hdl : pointer to CE handle
431  * @msdu : list of msdus to be sent
432  * @transfer_id : transfer id
433  * @len : Downloaded length
434  * @sendhead : sendhead
435  *
436  * Assumption : Called with an array of MSDU's
437  * Function:
438  * For each msdu in the array
439  * 1. Send each msdu
440  * 2. Increment write index accordinlgy.
441  *
442  * Return: list of msds not sent
443  */
444 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
445 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
446 {
447 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
448 	struct hif_softc *scn = ce_state->scn;
449 	struct CE_ring_state *src_ring = ce_state->src_ring;
450 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
451 	/*  A_target_id_t targid = TARGID(scn);*/
452 
453 	uint32_t nentries_mask = src_ring->nentries_mask;
454 	uint32_t sw_index, write_index;
455 
456 	struct CE_src_desc *src_desc_base =
457 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
458 	uint32_t *src_desc;
459 
460 	struct CE_src_desc lsrc_desc = {0};
461 	int deltacount = 0;
462 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
463 
464 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
465 	sw_index = src_ring->sw_index;
466 	write_index = src_ring->write_index;
467 
468 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
469 
470 	while (msdu) {
471 		tempnext = qdf_nbuf_next(msdu);
472 
473 		if (deltacount < 2) {
474 			if (sendhead)
475 				return msdu;
476 			HIF_ERROR("%s: Out of descriptors", __func__);
477 			src_ring->write_index = write_index;
478 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
479 					write_index);
480 
481 			sw_index = src_ring->sw_index;
482 			write_index = src_ring->write_index;
483 
484 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
485 					sw_index-1);
486 			if (freelist == NULL) {
487 				freelist = msdu;
488 				hfreelist = msdu;
489 			} else {
490 				qdf_nbuf_set_next(freelist, msdu);
491 				freelist = msdu;
492 			}
493 			qdf_nbuf_set_next(msdu, NULL);
494 			msdu = tempnext;
495 			continue;
496 		}
497 
498 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
499 				write_index);
500 
501 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
502 
503 		lsrc_desc.meta_data = transfer_id;
504 		if (len  > msdu->len)
505 			len =  msdu->len;
506 		lsrc_desc.nbytes = len;
507 		/*  Data packet is a byte stream, so disable byte swap */
508 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
509 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
510 
511 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
512 
513 
514 		src_ring->per_transfer_context[write_index] = msdu;
515 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
516 
517 		if (sendhead)
518 			break;
519 		qdf_nbuf_set_next(msdu, NULL);
520 		msdu = tempnext;
521 
522 	}
523 
524 
525 	src_ring->write_index = write_index;
526 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
527 
528 	return hfreelist;
529 }
530 
531 /**
532  * ce_update_tx_ring() - Advance sw index.
533  * @ce_tx_hdl : pointer to CE handle
534  * @num_htt_cmpls : htt completions received.
535  *
536  * Function:
537  * Increment the value of sw index of src ring
538  * according to number of htt completions
539  * received.
540  *
541  * Return: void
542  */
543 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
544 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
545 {
546 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
547 	struct CE_ring_state *src_ring = ce_state->src_ring;
548 	uint32_t nentries_mask = src_ring->nentries_mask;
549 	/*
550 	 * Advance the s/w index:
551 	 * This effectively simulates completing the CE ring descriptors
552 	 */
553 	src_ring->sw_index =
554 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
555 				num_htt_cmpls);
556 }
557 #else
558 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
559 {}
560 #endif
561 
562 /**
563  * ce_send_single() - sends
564  * @ce_tx_hdl : pointer to CE handle
565  * @msdu : msdu to be sent
566  * @transfer_id : transfer id
567  * @len : Downloaded length
568  *
569  * Function:
570  * 1. Send one msdu
571  * 2. Increment write index of src ring accordinlgy.
572  *
573  * Return: int: CE sent status
574  */
575 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
576 		uint32_t transfer_id, u_int32_t len)
577 {
578 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
579 	struct hif_softc *scn = ce_state->scn;
580 	struct CE_ring_state *src_ring = ce_state->src_ring;
581 	uint32_t ctrl_addr = ce_state->ctrl_addr;
582 	/*A_target_id_t targid = TARGID(scn);*/
583 
584 	uint32_t nentries_mask = src_ring->nentries_mask;
585 	uint32_t sw_index, write_index;
586 
587 	struct CE_src_desc *src_desc_base =
588 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
589 	uint32_t *src_desc;
590 
591 	struct CE_src_desc lsrc_desc = {0};
592 	enum hif_ce_event_type event_type;
593 
594 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
595 	sw_index = src_ring->sw_index;
596 	write_index = src_ring->write_index;
597 
598 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
599 					sw_index-1) < 1)) {
600 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
601 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
602 			  write_index, sw_index);
603 		return 1;
604 	}
605 
606 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
607 
608 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
609 
610 	lsrc_desc.meta_data = transfer_id;
611 	lsrc_desc.nbytes = len;
612 	/*  Data packet is a byte stream, so disable byte swap */
613 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
614 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
615 
616 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
617 
618 
619 	src_ring->per_transfer_context[write_index] = msdu;
620 
621 	if (((struct CE_src_desc *)src_desc)->gather)
622 		event_type = HIF_TX_GATHER_DESC_POST;
623 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
624 		event_type = HIF_TX_DESC_SOFTWARE_POST;
625 	else
626 		event_type = HIF_TX_DESC_POST;
627 
628 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
629 				(union ce_desc *)src_desc, msdu,
630 				write_index, len);
631 
632 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
633 
634 	src_ring->write_index = write_index;
635 
636 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
637 
638 	return QDF_STATUS_SUCCESS;
639 }
640 
641 /**
642  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
643  * @coyeng: copy engine handle
644  * @per_recv_context: virtual address of the nbuf
645  * @buffer: physical address of the nbuf
646  *
647  * Return: 0 if the buffer is enqueued
648  */
649 int
650 ce_recv_buf_enqueue(struct CE_handle *copyeng,
651 		    void *per_recv_context, qdf_dma_addr_t buffer)
652 {
653 	struct CE_state *CE_state = (struct CE_state *)copyeng;
654 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
655 
656 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
657 			per_recv_context, buffer);
658 }
659 qdf_export_symbol(ce_recv_buf_enqueue);
660 
661 void
662 ce_send_watermarks_set(struct CE_handle *copyeng,
663 		       unsigned int low_alert_nentries,
664 		       unsigned int high_alert_nentries)
665 {
666 	struct CE_state *CE_state = (struct CE_state *)copyeng;
667 	uint32_t ctrl_addr = CE_state->ctrl_addr;
668 	struct hif_softc *scn = CE_state->scn;
669 
670 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
671 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
672 }
673 
674 void
675 ce_recv_watermarks_set(struct CE_handle *copyeng,
676 		       unsigned int low_alert_nentries,
677 		       unsigned int high_alert_nentries)
678 {
679 	struct CE_state *CE_state = (struct CE_state *)copyeng;
680 	uint32_t ctrl_addr = CE_state->ctrl_addr;
681 	struct hif_softc *scn = CE_state->scn;
682 
683 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
684 				low_alert_nentries);
685 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
686 				high_alert_nentries);
687 }
688 
689 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
690 {
691 	struct CE_state *CE_state = (struct CE_state *)copyeng;
692 	struct CE_ring_state *src_ring = CE_state->src_ring;
693 	unsigned int nentries_mask = src_ring->nentries_mask;
694 	unsigned int sw_index;
695 	unsigned int write_index;
696 
697 	qdf_spin_lock(&CE_state->ce_index_lock);
698 	sw_index = src_ring->sw_index;
699 	write_index = src_ring->write_index;
700 	qdf_spin_unlock(&CE_state->ce_index_lock);
701 
702 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
703 }
704 
705 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
706 {
707 	struct CE_state *CE_state = (struct CE_state *)copyeng;
708 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
709 	unsigned int nentries_mask = dest_ring->nentries_mask;
710 	unsigned int sw_index;
711 	unsigned int write_index;
712 
713 	qdf_spin_lock(&CE_state->ce_index_lock);
714 	sw_index = dest_ring->sw_index;
715 	write_index = dest_ring->write_index;
716 	qdf_spin_unlock(&CE_state->ce_index_lock);
717 
718 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
719 }
720 
721 /*
722  * Guts of ce_send_entries_done.
723  * The caller takes responsibility for any necessary locking.
724  */
725 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
726 {
727 	struct CE_state *CE_state = (struct CE_state *)copyeng;
728 	unsigned int nentries;
729 	struct hif_softc *scn = CE_state->scn;
730 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
731 
732 	qdf_spin_lock(&CE_state->ce_index_lock);
733 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
734 						CE_state->scn, CE_state);
735 	qdf_spin_unlock(&CE_state->ce_index_lock);
736 
737 	return nentries;
738 }
739 
740 /*
741  * Guts of ce_recv_entries_done.
742  * The caller takes responsibility for any necessary locking.
743  */
744 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
745 {
746 	struct CE_state *CE_state = (struct CE_state *)copyeng;
747 	unsigned int nentries;
748 	struct hif_softc *scn = CE_state->scn;
749 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
750 
751 	qdf_spin_lock(&CE_state->ce_index_lock);
752 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
753 						CE_state->scn, CE_state);
754 	qdf_spin_unlock(&CE_state->ce_index_lock);
755 
756 	return nentries;
757 }
758 
759 /*
760  * Guts of ce_completed_recv_next.
761  * The caller takes responsibility for any necessary locking.
762  */
763 int
764 ce_completed_recv_next(struct CE_handle *copyeng,
765 		       void **per_CE_contextp,
766 		       void **per_transfer_contextp,
767 		       qdf_dma_addr_t *bufferp,
768 		       unsigned int *nbytesp,
769 		       unsigned int *transfer_idp, unsigned int *flagsp)
770 {
771 	struct CE_state *CE_state = (struct CE_state *)copyeng;
772 	int status;
773 	struct hif_softc *scn = CE_state->scn;
774 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
775 	struct ce_ops *ce_services;
776 
777 	ce_services = hif_state->ce_services;
778 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
779 	status =
780 		ce_services->ce_completed_recv_next_nolock(CE_state,
781 				per_CE_contextp, per_transfer_contextp, bufferp,
782 					      nbytesp, transfer_idp, flagsp);
783 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
784 
785 	return status;
786 }
787 
788 QDF_STATUS
789 ce_revoke_recv_next(struct CE_handle *copyeng,
790 		    void **per_CE_contextp,
791 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
792 {
793 	struct CE_state *CE_state = (struct CE_state *)copyeng;
794 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
795 
796 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
797 			per_CE_contextp, per_transfer_contextp, bufferp);
798 }
799 
800 QDF_STATUS
801 ce_cancel_send_next(struct CE_handle *copyeng,
802 		void **per_CE_contextp,
803 		void **per_transfer_contextp,
804 		qdf_dma_addr_t *bufferp,
805 		unsigned int *nbytesp,
806 		unsigned int *transfer_idp,
807 		uint32_t *toeplitz_hash_result)
808 {
809 	struct CE_state *CE_state = (struct CE_state *)copyeng;
810 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
811 
812 	return hif_state->ce_services->ce_cancel_send_next
813 		(copyeng, per_CE_contextp, per_transfer_contextp,
814 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
815 }
816 qdf_export_symbol(ce_cancel_send_next);
817 
818 int
819 ce_completed_send_next(struct CE_handle *copyeng,
820 		       void **per_CE_contextp,
821 		       void **per_transfer_contextp,
822 		       qdf_dma_addr_t *bufferp,
823 		       unsigned int *nbytesp,
824 		       unsigned int *transfer_idp,
825 		       unsigned int *sw_idx,
826 		       unsigned int *hw_idx,
827 		       unsigned int *toeplitz_hash_result)
828 {
829 	struct CE_state *CE_state = (struct CE_state *)copyeng;
830 	struct hif_softc *scn = CE_state->scn;
831 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
832 	struct ce_ops *ce_services;
833 	int status;
834 
835 	ce_services = hif_state->ce_services;
836 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
837 	status =
838 		ce_services->ce_completed_send_next_nolock(CE_state,
839 					per_CE_contextp, per_transfer_contextp,
840 					bufferp, nbytesp, transfer_idp, sw_idx,
841 					      hw_idx, toeplitz_hash_result);
842 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
843 
844 	return status;
845 }
846 
847 #ifdef ATH_11AC_TXCOMPACT
848 /* CE engine descriptor reap
849  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
850  * does receive and reaping of completed descriptor ,
851  * This function only handles reaping of Tx complete descriptor.
852  * The Function is called from threshold reap  poll routine
853  * hif_send_complete_check so should not countain receive functionality
854  * within it .
855  */
856 
857 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
858 {
859 	void *CE_context;
860 	void *transfer_context;
861 	qdf_dma_addr_t buf;
862 	unsigned int nbytes;
863 	unsigned int id;
864 	unsigned int sw_idx, hw_idx;
865 	uint32_t toeplitz_hash_result;
866 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
867 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
868 
869 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
870 		return;
871 
872 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
873 			NULL, NULL, 0, 0);
874 
875 	/* Since this function is called from both user context and
876 	 * tasklet context the spinlock has to lock the bottom halves.
877 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
878 	 * enabled in TX polling mode. If this is not the case, more
879 	 * bottom halve spin lock changes are needed. Due to data path
880 	 * performance concern, after internal discussion we've decided
881 	 * to make minimum change, i.e., only address the issue occurred
882 	 * in this function. The possible negative effect of this minimum
883 	 * change is that, in the future, if some other function will also
884 	 * be opened to let the user context to use, those cases need to be
885 	 * addressed by change spin_lock to spin_lock_bh also.
886 	 */
887 
888 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
889 
890 	if (CE_state->send_cb) {
891 		{
892 			struct ce_ops *ce_services = hif_state->ce_services;
893 			/* Pop completed send buffers and call the
894 			 * registered send callback for each
895 			 */
896 			while (ce_services->ce_completed_send_next_nolock
897 				 (CE_state, &CE_context,
898 				  &transfer_context, &buf,
899 				  &nbytes, &id, &sw_idx, &hw_idx,
900 				  &toeplitz_hash_result) ==
901 				  QDF_STATUS_SUCCESS) {
902 				if (ce_id != CE_HTT_H2T_MSG) {
903 					qdf_spin_unlock_bh(
904 						&CE_state->ce_index_lock);
905 					CE_state->send_cb(
906 						(struct CE_handle *)
907 						CE_state, CE_context,
908 						transfer_context, buf,
909 						nbytes, id, sw_idx, hw_idx,
910 						toeplitz_hash_result);
911 					qdf_spin_lock_bh(
912 						&CE_state->ce_index_lock);
913 				} else {
914 					struct HIF_CE_pipe_info *pipe_info =
915 						(struct HIF_CE_pipe_info *)
916 						CE_context;
917 
918 					qdf_spin_lock_bh(&pipe_info->
919 						 completion_freeq_lock);
920 					pipe_info->num_sends_allowed++;
921 					qdf_spin_unlock_bh(&pipe_info->
922 						   completion_freeq_lock);
923 				}
924 			}
925 		}
926 	}
927 
928 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
929 
930 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
931 			NULL, NULL, 0, 0);
932 	Q_TARGET_ACCESS_END(scn);
933 }
934 
935 #endif /*ATH_11AC_TXCOMPACT */
936 
937 /*
938  * ce_engine_service_reg:
939  *
940  * Called from ce_per_engine_service and goes through the regular interrupt
941  * handling that does not involve the WLAN fast path feature.
942  *
943  * Returns void
944  */
945 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
946 {
947 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
948 	uint32_t ctrl_addr = CE_state->ctrl_addr;
949 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
950 	void *CE_context;
951 	void *transfer_context;
952 	qdf_dma_addr_t buf;
953 	unsigned int nbytes;
954 	unsigned int id;
955 	unsigned int flags;
956 	unsigned int more_comp_cnt = 0;
957 	unsigned int more_snd_comp_cnt = 0;
958 	unsigned int sw_idx, hw_idx;
959 	uint32_t toeplitz_hash_result;
960 	uint32_t mode = hif_get_conparam(scn);
961 
962 more_completions:
963 	if (CE_state->recv_cb) {
964 
965 		/* Pop completed recv buffers and call
966 		 * the registered recv callback for each
967 		 */
968 		while (hif_state->ce_services->ce_completed_recv_next_nolock
969 				(CE_state, &CE_context, &transfer_context,
970 				&buf, &nbytes, &id, &flags) ==
971 				QDF_STATUS_SUCCESS) {
972 			qdf_spin_unlock(&CE_state->ce_index_lock);
973 			CE_state->recv_cb((struct CE_handle *)CE_state,
974 					  CE_context, transfer_context, buf,
975 					  nbytes, id, flags);
976 
977 			qdf_spin_lock(&CE_state->ce_index_lock);
978 			/*
979 			 * EV #112693 -
980 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
981 			 * BSoD_0x133 occurred in VHT80 UDP_DL
982 			 * Break out DPC by force if number of loops in
983 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
984 			 * to avoid spending too long time in
985 			 * DPC for each interrupt handling. Schedule another
986 			 * DPC to avoid data loss if we had taken
987 			 * force-break action before apply to Windows OS
988 			 * only currently, Linux/MAC os can expand to their
989 			 * platform if necessary
990 			 */
991 
992 			/* Break the receive processes by
993 			 * force if force_break set up
994 			 */
995 			if (qdf_unlikely(CE_state->force_break)) {
996 				qdf_atomic_set(&CE_state->rx_pending, 1);
997 				return;
998 			}
999 		}
1000 	}
1001 
1002 	/*
1003 	 * Attention: We may experience potential infinite loop for below
1004 	 * While Loop during Sending Stress test.
1005 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1006 	 */
1007 
1008 	if (CE_state->send_cb) {
1009 		/* Pop completed send buffers and call
1010 		 * the registered send callback for each
1011 		 */
1012 
1013 #ifdef ATH_11AC_TXCOMPACT
1014 		while (hif_state->ce_services->ce_completed_send_next_nolock
1015 			 (CE_state, &CE_context,
1016 			 &transfer_context, &buf, &nbytes,
1017 			 &id, &sw_idx, &hw_idx,
1018 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1019 
1020 			if (CE_id != CE_HTT_H2T_MSG ||
1021 			    QDF_IS_EPPING_ENABLED(mode)) {
1022 				qdf_spin_unlock(&CE_state->ce_index_lock);
1023 				CE_state->send_cb((struct CE_handle *)CE_state,
1024 						  CE_context, transfer_context,
1025 						  buf, nbytes, id, sw_idx,
1026 						  hw_idx, toeplitz_hash_result);
1027 				qdf_spin_lock(&CE_state->ce_index_lock);
1028 			} else {
1029 				struct HIF_CE_pipe_info *pipe_info =
1030 					(struct HIF_CE_pipe_info *)CE_context;
1031 
1032 				qdf_spin_lock_bh(&pipe_info->
1033 					      completion_freeq_lock);
1034 				pipe_info->num_sends_allowed++;
1035 				qdf_spin_unlock_bh(&pipe_info->
1036 						completion_freeq_lock);
1037 			}
1038 		}
1039 #else                           /*ATH_11AC_TXCOMPACT */
1040 		while (hif_state->ce_services->ce_completed_send_next_nolock
1041 			 (CE_state, &CE_context,
1042 			  &transfer_context, &buf, &nbytes,
1043 			  &id, &sw_idx, &hw_idx,
1044 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1045 			qdf_spin_unlock(&CE_state->ce_index_lock);
1046 			CE_state->send_cb((struct CE_handle *)CE_state,
1047 				  CE_context, transfer_context, buf,
1048 				  nbytes, id, sw_idx, hw_idx,
1049 				  toeplitz_hash_result);
1050 			qdf_spin_lock(&CE_state->ce_index_lock);
1051 		}
1052 #endif /*ATH_11AC_TXCOMPACT */
1053 	}
1054 
1055 more_watermarks:
1056 	if (CE_state->misc_cbs) {
1057 		if (CE_state->watermark_cb &&
1058 				hif_state->ce_services->watermark_int(CE_state,
1059 					&flags)) {
1060 			qdf_spin_unlock(&CE_state->ce_index_lock);
1061 			/* Convert HW IS bits to software flags */
1062 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1063 					CE_state->wm_context, flags);
1064 			qdf_spin_lock(&CE_state->ce_index_lock);
1065 		}
1066 	}
1067 
1068 	/*
1069 	 * Clear the misc interrupts (watermark) that were handled above,
1070 	 * and that will be checked again below.
1071 	 * Clear and check for copy-complete interrupts again, just in case
1072 	 * more copy completions happened while the misc interrupts were being
1073 	 * handled.
1074 	 */
1075 	if (!ce_srng_based(scn)) {
1076 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1077 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1078 					   CE_WATERMARK_MASK |
1079 					   HOST_IS_COPY_COMPLETE_MASK);
1080 		} else {
1081 			qdf_atomic_set(&CE_state->rx_pending, 0);
1082 			hif_err_rl("%s: target access is not allowed",
1083 				   __func__);
1084 			return;
1085 		}
1086 	}
1087 
1088 	/*
1089 	 * Now that per-engine interrupts are cleared, verify that
1090 	 * no recv interrupts arrive while processing send interrupts,
1091 	 * and no recv or send interrupts happened while processing
1092 	 * misc interrupts.Go back and check again.Keep checking until
1093 	 * we find no more events to process.
1094 	 */
1095 	if (CE_state->recv_cb &&
1096 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1097 				CE_state)) {
1098 		if (QDF_IS_EPPING_ENABLED(mode) ||
1099 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1100 			goto more_completions;
1101 		} else {
1102 			if (!ce_srng_based(scn)) {
1103 				HIF_ERROR(
1104 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1105 					__func__,
1106 					CE_state->dest_ring->nentries_mask,
1107 					CE_state->dest_ring->sw_index,
1108 					CE_DEST_RING_READ_IDX_GET(scn,
1109 							  CE_state->ctrl_addr));
1110 			}
1111 		}
1112 	}
1113 
1114 	if (CE_state->send_cb &&
1115 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1116 				CE_state)) {
1117 		if (QDF_IS_EPPING_ENABLED(mode) ||
1118 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1119 			goto more_completions;
1120 		} else {
1121 			if (!ce_srng_based(scn)) {
1122 				HIF_ERROR(
1123 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1124 					__func__,
1125 					CE_state->src_ring->nentries_mask,
1126 					CE_state->src_ring->sw_index,
1127 					CE_SRC_RING_READ_IDX_GET(scn,
1128 							 CE_state->ctrl_addr));
1129 			}
1130 		}
1131 	}
1132 
1133 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1134 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1135 			goto more_watermarks;
1136 	}
1137 
1138 	qdf_atomic_set(&CE_state->rx_pending, 0);
1139 }
1140 
1141 /*
1142  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1143  *
1144  * Invokes registered callbacks for recv_complete,
1145  * send_complete, and watermarks.
1146  *
1147  * Returns: number of messages processed
1148  */
1149 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1150 {
1151 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1152 
1153 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1154 		return CE_state->receive_count;
1155 
1156 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1157 		HIF_ERROR("[premature rc=0]");
1158 		return 0; /* no work done */
1159 	}
1160 
1161 	/* Clear force_break flag and re-initialize receive_count to 0 */
1162 	CE_state->receive_count = 0;
1163 	CE_state->force_break = 0;
1164 	CE_state->ce_service_start_time = sched_clock();
1165 	CE_state->ce_service_yield_time =
1166 		CE_state->ce_service_start_time +
1167 		hif_get_ce_service_max_yield_time(
1168 			(struct hif_opaque_softc *)scn);
1169 
1170 	qdf_spin_lock(&CE_state->ce_index_lock);
1171 
1172 	CE_state->service(scn, CE_id);
1173 
1174 	qdf_spin_unlock(&CE_state->ce_index_lock);
1175 
1176 	if (Q_TARGET_ACCESS_END(scn) < 0)
1177 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
1178 	return CE_state->receive_count;
1179 }
1180 qdf_export_symbol(ce_per_engine_service);
1181 
1182 /*
1183  * Handler for per-engine interrupts on ALL active CEs.
1184  * This is used in cases where the system is sharing a
1185  * single interrput for all CEs
1186  */
1187 
1188 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1189 {
1190 	int CE_id;
1191 	uint32_t intr_summary;
1192 
1193 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1194 		return;
1195 
1196 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1197 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1198 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1199 
1200 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1201 				qdf_atomic_set(&CE_state->rx_pending, 0);
1202 				ce_per_engine_service(scn, CE_id);
1203 			}
1204 		}
1205 
1206 		Q_TARGET_ACCESS_END(scn);
1207 		return;
1208 	}
1209 
1210 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1211 
1212 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1213 		if (intr_summary & (1 << CE_id))
1214 			intr_summary &= ~(1 << CE_id);
1215 		else
1216 			continue;       /* no intr pending on this CE */
1217 
1218 		ce_per_engine_service(scn, CE_id);
1219 	}
1220 
1221 	Q_TARGET_ACCESS_END(scn);
1222 }
1223 
1224 /*Iterate the CE_state list and disable the compl interrupt
1225  * if it has been registered already.
1226  */
1227 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1228 {
1229 	int CE_id;
1230 
1231 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1232 		return;
1233 
1234 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1235 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1236 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1237 
1238 		/* if the interrupt is currently enabled, disable it */
1239 		if (!CE_state->disable_copy_compl_intr
1240 		    && (CE_state->send_cb || CE_state->recv_cb))
1241 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1242 
1243 		if (CE_state->watermark_cb)
1244 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1245 	}
1246 	Q_TARGET_ACCESS_END(scn);
1247 }
1248 
1249 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1250 {
1251 	int CE_id;
1252 
1253 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1254 		return;
1255 
1256 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1257 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1258 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1259 
1260 		/*
1261 		 * If the CE is supposed to have copy complete interrupts
1262 		 * enabled (i.e. there a callback registered, and the
1263 		 * "disable" flag is not set), then re-enable the interrupt.
1264 		 */
1265 		if (!CE_state->disable_copy_compl_intr
1266 		    && (CE_state->send_cb || CE_state->recv_cb))
1267 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1268 
1269 		if (CE_state->watermark_cb)
1270 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1271 	}
1272 	Q_TARGET_ACCESS_END(scn);
1273 }
1274 
1275 /**
1276  * ce_send_cb_register(): register completion handler
1277  * @copyeng: CE_state representing the ce we are adding the behavior to
1278  * @fn_ptr: callback that the ce should use when processing tx completions
1279  * @disable_interrupts: if the interupts should be enabled or not.
1280  *
1281  * Caller should guarantee that no transactions are in progress before
1282  * switching the callback function.
1283  *
1284  * Registers the send context before the fn pointer so that if the cb is valid
1285  * the context should be valid.
1286  *
1287  * Beware that currently this function will enable completion interrupts.
1288  */
1289 void
1290 ce_send_cb_register(struct CE_handle *copyeng,
1291 		    ce_send_cb fn_ptr,
1292 		    void *ce_send_context, int disable_interrupts)
1293 {
1294 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1295 	struct hif_softc *scn;
1296 	struct HIF_CE_state *hif_state;
1297 
1298 	if (CE_state == NULL) {
1299 		HIF_ERROR("%s: Error CE state = NULL", __func__);
1300 		return;
1301 	}
1302 	scn = CE_state->scn;
1303 	hif_state = HIF_GET_CE_STATE(scn);
1304 	if (hif_state == NULL) {
1305 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1306 		return;
1307 	}
1308 	CE_state->send_context = ce_send_context;
1309 	CE_state->send_cb = fn_ptr;
1310 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1311 							disable_interrupts);
1312 }
1313 qdf_export_symbol(ce_send_cb_register);
1314 
1315 /**
1316  * ce_recv_cb_register(): register completion handler
1317  * @copyeng: CE_state representing the ce we are adding the behavior to
1318  * @fn_ptr: callback that the ce should use when processing rx completions
1319  * @disable_interrupts: if the interupts should be enabled or not.
1320  *
1321  * Registers the send context before the fn pointer so that if the cb is valid
1322  * the context should be valid.
1323  *
1324  * Caller should guarantee that no transactions are in progress before
1325  * switching the callback function.
1326  */
1327 void
1328 ce_recv_cb_register(struct CE_handle *copyeng,
1329 		    CE_recv_cb fn_ptr,
1330 		    void *CE_recv_context, int disable_interrupts)
1331 {
1332 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1333 	struct hif_softc *scn;
1334 	struct HIF_CE_state *hif_state;
1335 
1336 	if (CE_state == NULL) {
1337 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
1338 		return;
1339 	}
1340 	scn = CE_state->scn;
1341 	hif_state = HIF_GET_CE_STATE(scn);
1342 	if (hif_state == NULL) {
1343 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1344 		return;
1345 	}
1346 	CE_state->recv_context = CE_recv_context;
1347 	CE_state->recv_cb = fn_ptr;
1348 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1349 							disable_interrupts);
1350 }
1351 qdf_export_symbol(ce_recv_cb_register);
1352 
1353 /**
1354  * ce_watermark_cb_register(): register completion handler
1355  * @copyeng: CE_state representing the ce we are adding the behavior to
1356  * @fn_ptr: callback that the ce should use when processing watermark events
1357  *
1358  * Caller should guarantee that no watermark events are being processed before
1359  * switching the callback function.
1360  */
1361 void
1362 ce_watermark_cb_register(struct CE_handle *copyeng,
1363 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1364 {
1365 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1366 	struct hif_softc *scn = CE_state->scn;
1367 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1368 
1369 	CE_state->watermark_cb = fn_ptr;
1370 	CE_state->wm_context = CE_wm_context;
1371 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1372 							0);
1373 	if (fn_ptr)
1374 		CE_state->misc_cbs = 1;
1375 }
1376 
1377 bool ce_get_rx_pending(struct hif_softc *scn)
1378 {
1379 	int CE_id;
1380 
1381 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1382 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1383 
1384 		if (qdf_atomic_read(&CE_state->rx_pending))
1385 			return true;
1386 	}
1387 
1388 	return false;
1389 }
1390 
1391 /**
1392  * ce_check_rx_pending() - ce_check_rx_pending
1393  * @CE_state: context of the copy engine to check
1394  *
1395  * Return: true if there per_engine_service
1396  *	didn't process all the rx descriptors.
1397  */
1398 bool ce_check_rx_pending(struct CE_state *CE_state)
1399 {
1400 	if (qdf_atomic_read(&CE_state->rx_pending))
1401 		return true;
1402 	else
1403 		return false;
1404 }
1405 qdf_export_symbol(ce_check_rx_pending);
1406 
1407 #ifdef IPA_OFFLOAD
1408 /**
1409  * ce_ipa_get_resource() - get uc resource on copyengine
1410  * @ce: copyengine context
1411  * @ce_sr: copyengine source ring resource info
1412  * @ce_sr_ring_size: copyengine source ring size
1413  * @ce_reg_paddr: copyengine register physical address
1414  *
1415  * Copy engine should release resource to micro controller
1416  * Micro controller needs
1417  *  - Copy engine source descriptor base address
1418  *  - Copy engine source descriptor size
1419  *  - PCI BAR address to access copy engine regiser
1420  *
1421  * Return: None
1422  */
1423 void ce_ipa_get_resource(struct CE_handle *ce,
1424 			 qdf_shared_mem_t **ce_sr,
1425 			 uint32_t *ce_sr_ring_size,
1426 			 qdf_dma_addr_t *ce_reg_paddr)
1427 {
1428 	struct CE_state *CE_state = (struct CE_state *)ce;
1429 	uint32_t ring_loop;
1430 	struct CE_src_desc *ce_desc;
1431 	qdf_dma_addr_t phy_mem_base;
1432 	struct hif_softc *scn = CE_state->scn;
1433 
1434 	if (CE_UNUSED == CE_state->state) {
1435 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1436 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1437 		*ce_sr_ring_size = 0;
1438 		return;
1439 	}
1440 
1441 	/* Update default value for descriptor */
1442 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1443 	     ring_loop++) {
1444 		ce_desc = (struct CE_src_desc *)
1445 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1446 			   ring_loop * (sizeof(struct CE_src_desc)));
1447 		CE_IPA_RING_INIT(ce_desc);
1448 	}
1449 
1450 	/* Get BAR address */
1451 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1452 
1453 	*ce_sr = CE_state->scn->ipa_ce_ring;
1454 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1455 		sizeof(struct CE_src_desc));
1456 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1457 			SR_WR_INDEX_ADDRESS;
1458 }
1459 #endif /* IPA_OFFLOAD */
1460 
1461 #ifdef HIF_CE_DEBUG_DATA_BUF
1462 /**
1463  * hif_dump_desc_data_buf() - record ce descriptor events
1464  * @buf: buffer to copy to
1465  * @pos: Current position till which the buf is filled
1466  * @data: Data to be copied
1467  * @data_len: Length of the data to be copied
1468  */
1469 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1470 					uint8_t *data, uint32_t data_len)
1471 {
1472 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1473 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1474 
1475 	if ((data_len > 0) && data) {
1476 		if (data_len < 16) {
1477 			hex_dump_to_buffer(data,
1478 						CE_DEBUG_DATA_PER_ROW,
1479 						16, 1, buf + pos,
1480 						(ssize_t)PAGE_SIZE - pos,
1481 						false);
1482 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1483 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1484 		} else {
1485 			uint32_t rows = (data_len / 16) + 1;
1486 			uint32_t row = 0;
1487 
1488 			for (row = 0; row < rows; row++) {
1489 				hex_dump_to_buffer(data + (row * 16),
1490 							CE_DEBUG_DATA_PER_ROW,
1491 							16, 1, buf + pos,
1492 							(ssize_t)PAGE_SIZE
1493 							- pos, false);
1494 				pos +=
1495 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1496 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1497 						"\n");
1498 			}
1499 		}
1500 	}
1501 
1502 	return pos;
1503 }
1504 #endif
1505 
1506 /*
1507  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1508  * for defined here
1509  */
1510 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1511 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1512 {
1513 	switch (type) {
1514 	case HIF_RX_DESC_POST:
1515 		return "HIF_RX_DESC_POST";
1516 	case HIF_RX_DESC_COMPLETION:
1517 		return "HIF_RX_DESC_COMPLETION";
1518 	case HIF_TX_GATHER_DESC_POST:
1519 		return "HIF_TX_GATHER_DESC_POST";
1520 	case HIF_TX_DESC_POST:
1521 		return "HIF_TX_DESC_POST";
1522 	case HIF_TX_DESC_SOFTWARE_POST:
1523 		return "HIF_TX_DESC_SOFTWARE_POST";
1524 	case HIF_TX_DESC_COMPLETION:
1525 		return "HIF_TX_DESC_COMPLETION";
1526 	case FAST_RX_WRITE_INDEX_UPDATE:
1527 		return "FAST_RX_WRITE_INDEX_UPDATE";
1528 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1529 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1530 	case FAST_TX_WRITE_INDEX_UPDATE:
1531 		return "FAST_TX_WRITE_INDEX_UPDATE";
1532 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1533 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1534 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1535 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1536 	case RESUME_WRITE_INDEX_UPDATE:
1537 		return "RESUME_WRITE_INDEX_UPDATE";
1538 	case HIF_IRQ_EVENT:
1539 		return "HIF_IRQ_EVENT";
1540 	case HIF_CE_TASKLET_ENTRY:
1541 		return "HIF_CE_TASKLET_ENTRY";
1542 	case HIF_CE_TASKLET_RESCHEDULE:
1543 		return "HIF_CE_TASKLET_RESCHEDULE";
1544 	case HIF_CE_TASKLET_EXIT:
1545 		return "HIF_CE_TASKLET_EXIT";
1546 	case HIF_CE_REAP_ENTRY:
1547 		return "HIF_CE_REAP_ENTRY";
1548 	case HIF_CE_REAP_EXIT:
1549 		return "HIF_CE_REAP_EXIT";
1550 	case NAPI_SCHEDULE:
1551 		return "NAPI_SCHEDULE";
1552 	case NAPI_POLL_ENTER:
1553 		return "NAPI_POLL_ENTER";
1554 	case NAPI_COMPLETE:
1555 		return "NAPI_COMPLETE";
1556 	case NAPI_POLL_EXIT:
1557 		return "NAPI_POLL_EXIT";
1558 	case HIF_RX_NBUF_ALLOC_FAILURE:
1559 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1560 	case HIF_RX_NBUF_MAP_FAILURE:
1561 		return "HIF_RX_NBUF_MAP_FAILURE";
1562 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1563 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1564 	default:
1565 		return "invalid";
1566 	}
1567 }
1568 
1569 /**
1570  * hif_dump_desc_event() - record ce descriptor events
1571  * @buf: Buffer to which to be copied
1572  * @ce_id: which ce is the event occurring on
1573  * @index: index that the descriptor was/will be at.
1574  */
1575 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1576 {
1577 	struct hif_ce_desc_event *event;
1578 	uint64_t secs, usecs;
1579 	ssize_t len = 0;
1580 	struct ce_desc_hist *ce_hist = NULL;
1581 	struct hif_ce_desc_event *hist_ev = NULL;
1582 
1583 	if (!scn)
1584 		return -EINVAL;
1585 
1586 	ce_hist = &scn->hif_ce_desc_hist;
1587 
1588 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1589 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1590 		qdf_print("Invalid values");
1591 		return -EINVAL;
1592 	}
1593 
1594 	hist_ev =
1595 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1596 
1597 	if (!hist_ev) {
1598 		qdf_print("Low Memory");
1599 		return -EINVAL;
1600 	}
1601 
1602 	event = &hist_ev[ce_hist->hist_index];
1603 
1604 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1605 
1606 	len += snprintf(buf, PAGE_SIZE - len,
1607 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1608 			secs, usecs, ce_hist->hist_id,
1609 			ce_event_type_to_str(event->type),
1610 			event->index, event->memory);
1611 #ifdef HIF_CE_DEBUG_DATA_BUF
1612 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
1613 			event->actual_data_len);
1614 #endif
1615 
1616 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1617 
1618 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1619 				16, 1, buf + len,
1620 				(ssize_t)PAGE_SIZE - len, false);
1621 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1622 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1623 
1624 #ifdef HIF_CE_DEBUG_DATA_BUF
1625 	if (ce_hist->data_enable[ce_hist->hist_id])
1626 		len = hif_dump_desc_data_buf(buf, len, event->data,
1627 						(event->actual_data_len <
1628 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1629 						event->actual_data_len :
1630 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1631 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1632 
1633 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1634 
1635 	return len;
1636 }
1637 
1638 /*
1639  * hif_store_desc_trace_buf_index() -
1640  * API to get the CE id and CE debug storage buffer index
1641  *
1642  * @dev: network device
1643  * @attr: sysfs attribute
1644  * @buf: data got from the user
1645  *
1646  * Return total length
1647  */
1648 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1649 					const char *buf, size_t size)
1650 {
1651 	struct ce_desc_hist *ce_hist = NULL;
1652 
1653 	if (!scn)
1654 		return -EINVAL;
1655 
1656 	ce_hist = &scn->hif_ce_desc_hist;
1657 
1658 	if (!size) {
1659 		pr_err("%s: Invalid input buffer.\n", __func__);
1660 		return -EINVAL;
1661 	}
1662 
1663 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1664 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1665 		pr_err("%s: Invalid input value.\n", __func__);
1666 		return -EINVAL;
1667 	}
1668 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1669 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1670 		qdf_print("Invalid values");
1671 		return -EINVAL;
1672 	}
1673 
1674 	return size;
1675 }
1676 
1677 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1678 
1679 #ifdef HIF_CE_DEBUG_DATA_BUF
1680 /*
1681  * hif_ce_en_desc_hist() -
1682  * API to enable recording the CE desc history
1683  *
1684  * @dev: network device
1685  * @attr: sysfs attribute
1686  * @buf: buffer to copy the data.
1687  *
1688  * Starts recording the ce desc history
1689  *
1690  * Return total length copied
1691  */
1692 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1693 {
1694 	struct ce_desc_hist *ce_hist = NULL;
1695 	uint32_t cfg = 0;
1696 	uint32_t ce_id = 0;
1697 
1698 	if (!scn)
1699 		return -EINVAL;
1700 
1701 	ce_hist = &scn->hif_ce_desc_hist;
1702 
1703 	if (!size) {
1704 		pr_err("%s: Invalid input buffer.\n", __func__);
1705 		return -EINVAL;
1706 	}
1707 
1708 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1709 		   (unsigned int *)&cfg) != 2) {
1710 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
1711 		return -EINVAL;
1712 	}
1713 	if (ce_id >= CE_COUNT_MAX) {
1714 		qdf_print("Invalid value CE Id");
1715 		return -EINVAL;
1716 	}
1717 
1718 	if ((cfg > 1 || cfg < 0)) {
1719 		qdf_print("Invalid values: enter 0 or 1");
1720 		return -EINVAL;
1721 	}
1722 
1723 	if (!ce_hist->hist_ev[ce_id])
1724 		return -EINVAL;
1725 
1726 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1727 	if (cfg == 1) {
1728 		if (ce_hist->data_enable[ce_id] == 1) {
1729 			qdf_print("\nAlready Enabled");
1730 		} else {
1731 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1732 							== QDF_STATUS_E_NOMEM){
1733 				ce_hist->data_enable[ce_id] = 0;
1734 				qdf_print("%s:Memory Alloc failed");
1735 			} else
1736 				ce_hist->data_enable[ce_id] = 1;
1737 		}
1738 	} else if (cfg == 0) {
1739 		if (ce_hist->data_enable[ce_id] == 0) {
1740 			qdf_print("\nAlready Disabled");
1741 		} else {
1742 			ce_hist->data_enable[ce_id] = 0;
1743 				free_mem_ce_debug_hist_data(scn, ce_id);
1744 		}
1745 	}
1746 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1747 
1748 	return size;
1749 }
1750 
1751 /*
1752  * hif_disp_ce_enable_desc_data_hist() -
1753  * API to display value of data_enable
1754  *
1755  * @dev: network device
1756  * @attr: sysfs attribute
1757  * @buf: buffer to copy the data.
1758  *
1759  * Return total length copied
1760  */
1761 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1762 {
1763 	ssize_t len = 0;
1764 	uint32_t ce_id = 0;
1765 	struct ce_desc_hist *ce_hist = NULL;
1766 
1767 	if (!scn)
1768 		return -EINVAL;
1769 
1770 	ce_hist = &scn->hif_ce_desc_hist;
1771 
1772 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1773 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1774 				ce_id, ce_hist->data_enable[ce_id]);
1775 	}
1776 
1777 	return len;
1778 }
1779 #endif /* HIF_CE_DEBUG_DATA_BUF */
1780 
1781 #ifdef OL_ATH_SMART_LOGGING
1782 #define GUARD_SPACE 10
1783 #define LOG_ID_SZ 4
1784 /*
1785  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1786  * @src_ring: SRC ring state
1787  * @buf_cur: Current pointer in ring buffer
1788  * @buf_init:Start of the ring buffer
1789  * @buf_sz: Size of the ring buffer
1790  * @skb_sz: Max size of the SKB buffer to be copied
1791  *
1792  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1793  * the given buf, skb_sz is the max buffer size to be copied
1794  *
1795  * Return: Current pointer in ring buffer
1796  */
1797 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1798 				    uint8_t *buf_cur, uint8_t *buf_init,
1799 				    uint32_t buf_sz, uint32_t skb_sz)
1800 {
1801 	struct CE_src_desc *src_ring_base;
1802 	uint32_t len, entry;
1803 	struct CE_src_desc  *src_desc;
1804 	qdf_nbuf_t nbuf;
1805 	uint32_t available_buf;
1806 
1807 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1808 	len = sizeof(struct CE_ring_state);
1809 	available_buf = buf_sz - (buf_cur - buf_init);
1810 	if (available_buf < (len + GUARD_SPACE)) {
1811 		buf_cur = buf_init;
1812 	}
1813 
1814 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1815 	buf_cur += sizeof(struct CE_ring_state);
1816 
1817 	for (entry = 0; entry < src_ring->nentries; entry++) {
1818 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1819 		nbuf = src_ring->per_transfer_context[entry];
1820 		if (nbuf) {
1821 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1822 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1823 
1824 			len = sizeof(struct CE_src_desc) + skb_cp_len
1825 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1826 			available_buf = buf_sz - (buf_cur - buf_init);
1827 			if (available_buf < (len + GUARD_SPACE)) {
1828 				buf_cur = buf_init;
1829 			}
1830 			qdf_mem_copy(buf_cur, src_desc,
1831 				     sizeof(struct CE_src_desc));
1832 			buf_cur += sizeof(struct CE_src_desc);
1833 
1834 			available_buf = buf_sz - (buf_cur - buf_init);
1835 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1836 						skb_cp_len);
1837 
1838 			if (skb_cp_len) {
1839 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1840 					     skb_cp_len);
1841 				buf_cur += skb_cp_len;
1842 			}
1843 		} else {
1844 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1845 			available_buf = buf_sz - (buf_cur - buf_init);
1846 			if (available_buf < (len + GUARD_SPACE)) {
1847 				buf_cur = buf_init;
1848 			}
1849 			qdf_mem_copy(buf_cur, src_desc,
1850 				     sizeof(struct CE_src_desc));
1851 			buf_cur += sizeof(struct CE_src_desc);
1852 			available_buf = buf_sz - (buf_cur - buf_init);
1853 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1854 		}
1855 	}
1856 
1857 	return buf_cur;
1858 }
1859 
1860 /*
1861  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1862  * @dest_ring: SRC ring state
1863  * @buf_cur: Current pointer in ring buffer
1864  * @buf_init:Start of the ring buffer
1865  * @buf_sz: Size of the ring buffer
1866  * @skb_sz: Max size of the SKB buffer to be copied
1867  *
1868  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1869  * the given buf, skb_sz is the max buffer size to be copied
1870  *
1871  * Return: Current pointer in ring buffer
1872  */
1873 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1874 				     uint8_t *buf_cur, uint8_t *buf_init,
1875 				     uint32_t buf_sz, uint32_t skb_sz)
1876 {
1877 	struct CE_dest_desc *dest_ring_base;
1878 	uint32_t len, entry;
1879 	struct CE_dest_desc  *dest_desc;
1880 	qdf_nbuf_t nbuf;
1881 	uint32_t available_buf;
1882 
1883 	dest_ring_base =
1884 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1885 
1886 	len = sizeof(struct CE_ring_state);
1887 	available_buf = buf_sz - (buf_cur - buf_init);
1888 	if (available_buf < (len + GUARD_SPACE)) {
1889 		buf_cur = buf_init;
1890 	}
1891 
1892 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
1893 	buf_cur += sizeof(struct CE_ring_state);
1894 
1895 	for (entry = 0; entry < dest_ring->nentries; entry++) {
1896 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
1897 
1898 		nbuf = dest_ring->per_transfer_context[entry];
1899 		if (nbuf) {
1900 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1901 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1902 
1903 			len = sizeof(struct CE_dest_desc) + skb_cp_len
1904 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1905 
1906 			available_buf = buf_sz - (buf_cur - buf_init);
1907 			if (available_buf < (len + GUARD_SPACE)) {
1908 				buf_cur = buf_init;
1909 			}
1910 
1911 			qdf_mem_copy(buf_cur, dest_desc,
1912 				     sizeof(struct CE_dest_desc));
1913 			buf_cur += sizeof(struct CE_dest_desc);
1914 			available_buf = buf_sz - (buf_cur - buf_init);
1915 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1916 						skb_cp_len);
1917 			if (skb_cp_len) {
1918 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1919 					     skb_cp_len);
1920 				buf_cur += skb_cp_len;
1921 			}
1922 		} else {
1923 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
1924 			available_buf = buf_sz - (buf_cur - buf_init);
1925 			if (available_buf < (len + GUARD_SPACE)) {
1926 				buf_cur = buf_init;
1927 			}
1928 			qdf_mem_copy(buf_cur, dest_desc,
1929 				     sizeof(struct CE_dest_desc));
1930 			buf_cur += sizeof(struct CE_dest_desc);
1931 			available_buf = buf_sz - (buf_cur - buf_init);
1932 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1933 		}
1934 	}
1935 	return buf_cur;
1936 }
1937 
1938 /**
1939  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1940  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1941  * and buffers pointed by them in to the given buf
1942  */
1943 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1944 			 uint8_t *buf_init, uint32_t buf_sz,
1945 			 uint32_t ce, uint32_t skb_sz)
1946 {
1947 	struct CE_state *ce_state;
1948 	struct CE_ring_state *src_ring;
1949 	struct CE_ring_state *dest_ring;
1950 
1951 	ce_state = scn->ce_id_to_state[ce];
1952 	src_ring = ce_state->src_ring;
1953 	dest_ring = ce_state->dest_ring;
1954 
1955 	if (src_ring) {
1956 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
1957 					      buf_init, buf_sz, skb_sz);
1958 	} else if (dest_ring) {
1959 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
1960 					       buf_init, buf_sz, skb_sz);
1961 	}
1962 
1963 	return buf_cur;
1964 }
1965 
1966 qdf_export_symbol(hif_log_dump_ce);
1967 #endif /* OL_ATH_SMART_LOGGING */
1968 
1969