xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 static int war1_allow_sleep;
65 /* io32 write workaround */
66 static int hif_ce_war1;
67 
68 /**
69  * hif_ce_war_disable() - disable ce war gobally
70  */
71 void hif_ce_war_disable(void)
72 {
73 	hif_ce_war1 = 0;
74 }
75 
76 /**
77  * hif_ce_war_enable() - enable ce war gobally
78  */
79 void hif_ce_war_enable(void)
80 {
81 	hif_ce_war1 = 1;
82 }
83 
84 /*
85  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
86  * for defined here
87  */
88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
89 
90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
91 #define CE_DEBUG_DATA_PER_ROW 16
92 
93 qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
94 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
95 
96 /**
97  * get_next_record_index() - get the next record index
98  * @table_index: atomic index variable to increment
99  * @array_size: array size of the circular buffer
100  *
101  * Increment the atomic index and reserve the value.
102  * Takes care of buffer wrap.
103  * Guaranteed to be thread safe as long as fewer than array_size contexts
104  * try to access the array.  If there are more than array_size contexts
105  * trying to access the array, full locking of the recording process would
106  * be needed to have sane logging.
107  */
108 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
109 {
110 	int record_index = qdf_atomic_inc_return(table_index);
111 
112 	if (record_index == array_size)
113 		qdf_atomic_sub(array_size, table_index);
114 
115 	while (record_index >= array_size)
116 		record_index -= array_size;
117 	return record_index;
118 }
119 
120 #if HIF_CE_DEBUG_DATA_BUF
121 /**
122  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
123  * @event: structure detailing a ce event
124  * @len: length of the data
125  * Return:
126  */
127 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
128 {
129 	uint8_t *data = NULL;
130 
131 	if (!event->data)
132 		return;
133 
134 	if (event->memory && len > 0)
135 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
136 
137 	event->actual_data_len = 0;
138 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
139 
140 	if (data && len > 0) {
141 		qdf_mem_copy(event->data, data,
142 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
143 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
144 		event->actual_data_len = len;
145 	}
146 }
147 #endif
148 
149 /**
150  * hif_record_ce_desc_event() - record ce descriptor events
151  * @scn: hif_softc
152  * @ce_id: which ce is the event occurring on
153  * @type: what happened
154  * @descriptor: pointer to the descriptor posted/completed
155  * @memory: virtual address of buffer related to the descriptor
156  * @index: index that the descriptor was/will be at.
157  */
158 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
159 				enum hif_ce_event_type type,
160 				union ce_desc *descriptor,
161 				void *memory, int index,
162 				int len)
163 {
164 	int record_index;
165 	struct hif_ce_desc_event *event;
166 
167 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
168 	struct hif_ce_desc_event *hist_ev = NULL;
169 
170 	if (ce_id < CE_COUNT_MAX)
171 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
172 	else
173 		return;
174 
175 	if (ce_id >= CE_COUNT_MAX)
176 		return;
177 
178 	if (!ce_hist->enable[ce_id])
179 		return;
180 
181 	if (!hist_ev)
182 		return;
183 
184 	record_index = get_next_record_index(
185 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
186 
187 	event = &hist_ev[record_index];
188 
189 	event->type = type;
190 	event->time = qdf_get_log_timestamp();
191 
192 	if (descriptor != NULL) {
193 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
194 	} else {
195 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
196 	}
197 
198 	event->memory = memory;
199 	event->index = index;
200 
201 #if HIF_CE_DEBUG_DATA_BUF
202 	if (ce_hist->data_enable[ce_id])
203 		hif_ce_desc_data_record(event, len);
204 #endif
205 }
206 qdf_export_symbol(hif_record_ce_desc_event);
207 
208 /**
209  * ce_init_ce_desc_event_log() - initialize the ce event log
210  * @ce_id: copy engine id for which we are initializing the log
211  * @size: size of array to dedicate
212  *
213  * Currently the passed size is ignored in favor of a precompiled value.
214  */
215 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
216 {
217 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
218 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
219 	qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]);
220 }
221 
222 /**
223  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
224  * @ce_id: copy engine id for which we are deinitializing the log
225  *
226  */
227 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
228 {
229 	qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]);
230 }
231 
232 #else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
233 void hif_record_ce_desc_event(struct hif_softc *scn,
234 		int ce_id, enum hif_ce_event_type type,
235 		union ce_desc *descriptor, void *memory,
236 		int index, int len)
237 {
238 }
239 qdf_export_symbol(hif_record_ce_desc_event);
240 
241 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
242 					int size)
243 {
244 }
245 
246 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
247 {
248 }
249 #endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
250 
251 #ifdef NAPI_YIELD_BUDGET_BASED
252 bool hif_ce_service_should_yield(struct hif_softc *scn,
253 				 struct CE_state *ce_state)
254 {
255 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
256 
257 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
258 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
259 	 * can happen in fast path handling as processing is happenning in
260 	 * batches.
261 	 */
262 	if (yield)
263 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
264 
265 	return yield;
266 }
267 #else
268 /**
269  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
270  * @scn: hif context
271  * @ce_state: context of the copy engine being serviced
272  *
273  * Return: true if the service should yield
274  */
275 bool hif_ce_service_should_yield(struct hif_softc *scn,
276 				 struct CE_state *ce_state)
277 {
278 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
279 
280 	time_limit_reached =
281 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
282 
283 	if (!time_limit_reached)
284 		rxpkt_thresh_reached = hif_max_num_receives_reached
285 					(scn, ce_state->receive_count);
286 
287 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
288 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
289 	 * can happen in fast path handling as processing is happenning in
290 	 * batches.
291 	 */
292 	if (rxpkt_thresh_reached)
293 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
294 
295 	yield =  time_limit_reached || rxpkt_thresh_reached;
296 
297 	if (yield && ce_state->htt_rx_data)
298 		hif_napi_update_yield_stats(ce_state,
299 					    time_limit_reached,
300 					    rxpkt_thresh_reached);
301 	return yield;
302 }
303 qdf_export_symbol(hif_ce_service_should_yield);
304 #endif
305 
306 /*
307  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
308  * The caller takes responsibility for any needed locking.
309  */
310 
311 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
312 				   u32 ctrl_addr, unsigned int write_index)
313 {
314 	if (hif_ce_war1) {
315 		void __iomem *indicator_addr;
316 
317 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
318 
319 		if (!war1_allow_sleep
320 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
321 			hif_write32_mb(scn, indicator_addr,
322 				       (CDC_WAR_MAGIC_STR | write_index));
323 		} else {
324 			unsigned long irq_flags;
325 
326 			local_irq_save(irq_flags);
327 			hif_write32_mb(scn, indicator_addr, 1);
328 
329 			/*
330 			 * PCIE write waits for ACK in IPQ8K, there is no
331 			 * need to read back value.
332 			 */
333 			(void)hif_read32_mb(scn, indicator_addr);
334 			/* conservative */
335 			(void)hif_read32_mb(scn, indicator_addr);
336 
337 			CE_SRC_RING_WRITE_IDX_SET(scn,
338 						  ctrl_addr, write_index);
339 
340 			hif_write32_mb(scn, indicator_addr, 0);
341 			local_irq_restore(irq_flags);
342 		}
343 	} else {
344 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
345 	}
346 }
347 
348 qdf_export_symbol(war_ce_src_ring_write_idx_set);
349 
350 int
351 ce_send(struct CE_handle *copyeng,
352 		void *per_transfer_context,
353 		qdf_dma_addr_t buffer,
354 		uint32_t nbytes,
355 		uint32_t transfer_id,
356 		uint32_t flags,
357 		uint32_t user_flag)
358 {
359 	struct CE_state *CE_state = (struct CE_state *)copyeng;
360 	int status;
361 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
362 
363 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
364 	status = hif_state->ce_services->ce_send_nolock(copyeng,
365 			per_transfer_context, buffer, nbytes,
366 			transfer_id, flags, user_flag);
367 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
368 
369 	return status;
370 }
371 qdf_export_symbol(ce_send);
372 
373 unsigned int ce_sendlist_sizeof(void)
374 {
375 	return sizeof(struct ce_sendlist);
376 }
377 
378 void ce_sendlist_init(struct ce_sendlist *sendlist)
379 {
380 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
381 
382 	sl->num_items = 0;
383 }
384 
385 int
386 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
387 					qdf_dma_addr_t buffer,
388 					uint32_t nbytes,
389 					uint32_t flags,
390 					uint32_t user_flags)
391 {
392 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
393 	unsigned int num_items = sl->num_items;
394 	struct ce_sendlist_item *item;
395 
396 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
397 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
398 		return QDF_STATUS_E_RESOURCES;
399 	}
400 
401 	item = &sl->item[num_items];
402 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
403 	item->data = buffer;
404 	item->u.nbytes = nbytes;
405 	item->flags = flags;
406 	item->user_flags = user_flags;
407 	sl->num_items = num_items + 1;
408 	return QDF_STATUS_SUCCESS;
409 }
410 
411 int
412 ce_sendlist_send(struct CE_handle *copyeng,
413 		 void *per_transfer_context,
414 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
415 {
416 	struct CE_state *CE_state = (struct CE_state *)copyeng;
417 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
418 
419 	return hif_state->ce_services->ce_sendlist_send(copyeng,
420 			per_transfer_context, sendlist, transfer_id);
421 }
422 
423 #ifndef AH_NEED_TX_DATA_SWAP
424 #define AH_NEED_TX_DATA_SWAP 0
425 #endif
426 
427 /**
428  * ce_batch_send() - sends bunch of msdus at once
429  * @ce_tx_hdl : pointer to CE handle
430  * @msdu : list of msdus to be sent
431  * @transfer_id : transfer id
432  * @len : Downloaded length
433  * @sendhead : sendhead
434  *
435  * Assumption : Called with an array of MSDU's
436  * Function:
437  * For each msdu in the array
438  * 1. Send each msdu
439  * 2. Increment write index accordinlgy.
440  *
441  * Return: list of msds not sent
442  */
443 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
444 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
445 {
446 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
447 	struct hif_softc *scn = ce_state->scn;
448 	struct CE_ring_state *src_ring = ce_state->src_ring;
449 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
450 	/*  A_target_id_t targid = TARGID(scn);*/
451 
452 	uint32_t nentries_mask = src_ring->nentries_mask;
453 	uint32_t sw_index, write_index;
454 
455 	struct CE_src_desc *src_desc_base =
456 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
457 	uint32_t *src_desc;
458 
459 	struct CE_src_desc lsrc_desc = {0};
460 	int deltacount = 0;
461 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
462 
463 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
464 	sw_index = src_ring->sw_index;
465 	write_index = src_ring->write_index;
466 
467 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
468 
469 	while (msdu) {
470 		tempnext = qdf_nbuf_next(msdu);
471 
472 		if (deltacount < 2) {
473 			if (sendhead)
474 				return msdu;
475 			HIF_ERROR("%s: Out of descriptors", __func__);
476 			src_ring->write_index = write_index;
477 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
478 					write_index);
479 
480 			sw_index = src_ring->sw_index;
481 			write_index = src_ring->write_index;
482 
483 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
484 					sw_index-1);
485 			if (freelist == NULL) {
486 				freelist = msdu;
487 				hfreelist = msdu;
488 			} else {
489 				qdf_nbuf_set_next(freelist, msdu);
490 				freelist = msdu;
491 			}
492 			qdf_nbuf_set_next(msdu, NULL);
493 			msdu = tempnext;
494 			continue;
495 		}
496 
497 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
498 				write_index);
499 
500 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
501 
502 		lsrc_desc.meta_data = transfer_id;
503 		if (len  > msdu->len)
504 			len =  msdu->len;
505 		lsrc_desc.nbytes = len;
506 		/*  Data packet is a byte stream, so disable byte swap */
507 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
508 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
509 
510 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
511 
512 
513 		src_ring->per_transfer_context[write_index] = msdu;
514 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
515 
516 		if (sendhead)
517 			break;
518 		qdf_nbuf_set_next(msdu, NULL);
519 		msdu = tempnext;
520 
521 	}
522 
523 
524 	src_ring->write_index = write_index;
525 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
526 
527 	return hfreelist;
528 }
529 
530 /**
531  * ce_update_tx_ring() - Advance sw index.
532  * @ce_tx_hdl : pointer to CE handle
533  * @num_htt_cmpls : htt completions received.
534  *
535  * Function:
536  * Increment the value of sw index of src ring
537  * according to number of htt completions
538  * received.
539  *
540  * Return: void
541  */
542 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
543 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
544 {
545 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
546 	struct CE_ring_state *src_ring = ce_state->src_ring;
547 	uint32_t nentries_mask = src_ring->nentries_mask;
548 	/*
549 	 * Advance the s/w index:
550 	 * This effectively simulates completing the CE ring descriptors
551 	 */
552 	src_ring->sw_index =
553 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
554 				num_htt_cmpls);
555 }
556 #else
557 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
558 {}
559 #endif
560 
561 /**
562  * ce_send_single() - sends
563  * @ce_tx_hdl : pointer to CE handle
564  * @msdu : msdu to be sent
565  * @transfer_id : transfer id
566  * @len : Downloaded length
567  *
568  * Function:
569  * 1. Send one msdu
570  * 2. Increment write index of src ring accordinlgy.
571  *
572  * Return: int: CE sent status
573  */
574 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
575 		uint32_t transfer_id, u_int32_t len)
576 {
577 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
578 	struct hif_softc *scn = ce_state->scn;
579 	struct CE_ring_state *src_ring = ce_state->src_ring;
580 	uint32_t ctrl_addr = ce_state->ctrl_addr;
581 	/*A_target_id_t targid = TARGID(scn);*/
582 
583 	uint32_t nentries_mask = src_ring->nentries_mask;
584 	uint32_t sw_index, write_index;
585 
586 	struct CE_src_desc *src_desc_base =
587 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
588 	uint32_t *src_desc;
589 
590 	struct CE_src_desc lsrc_desc = {0};
591 	enum hif_ce_event_type event_type;
592 
593 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
594 	sw_index = src_ring->sw_index;
595 	write_index = src_ring->write_index;
596 
597 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
598 					sw_index-1) < 1)) {
599 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
600 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
601 			  write_index, sw_index);
602 		return 1;
603 	}
604 
605 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
606 
607 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
608 
609 	lsrc_desc.meta_data = transfer_id;
610 	lsrc_desc.nbytes = len;
611 	/*  Data packet is a byte stream, so disable byte swap */
612 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
613 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
614 
615 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
616 
617 
618 	src_ring->per_transfer_context[write_index] = msdu;
619 
620 	if (((struct CE_src_desc *)src_desc)->gather)
621 		event_type = HIF_TX_GATHER_DESC_POST;
622 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
623 		event_type = HIF_TX_DESC_SOFTWARE_POST;
624 	else
625 		event_type = HIF_TX_DESC_POST;
626 
627 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
628 				(union ce_desc *)src_desc, msdu,
629 				write_index, len);
630 
631 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
632 
633 	src_ring->write_index = write_index;
634 
635 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
636 
637 	return QDF_STATUS_SUCCESS;
638 }
639 
640 /**
641  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
642  * @coyeng: copy engine handle
643  * @per_recv_context: virtual address of the nbuf
644  * @buffer: physical address of the nbuf
645  *
646  * Return: 0 if the buffer is enqueued
647  */
648 int
649 ce_recv_buf_enqueue(struct CE_handle *copyeng,
650 		    void *per_recv_context, qdf_dma_addr_t buffer)
651 {
652 	struct CE_state *CE_state = (struct CE_state *)copyeng;
653 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
654 
655 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
656 			per_recv_context, buffer);
657 }
658 qdf_export_symbol(ce_recv_buf_enqueue);
659 
660 void
661 ce_send_watermarks_set(struct CE_handle *copyeng,
662 		       unsigned int low_alert_nentries,
663 		       unsigned int high_alert_nentries)
664 {
665 	struct CE_state *CE_state = (struct CE_state *)copyeng;
666 	uint32_t ctrl_addr = CE_state->ctrl_addr;
667 	struct hif_softc *scn = CE_state->scn;
668 
669 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
670 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
671 }
672 
673 void
674 ce_recv_watermarks_set(struct CE_handle *copyeng,
675 		       unsigned int low_alert_nentries,
676 		       unsigned int high_alert_nentries)
677 {
678 	struct CE_state *CE_state = (struct CE_state *)copyeng;
679 	uint32_t ctrl_addr = CE_state->ctrl_addr;
680 	struct hif_softc *scn = CE_state->scn;
681 
682 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
683 				low_alert_nentries);
684 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
685 				high_alert_nentries);
686 }
687 
688 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
689 {
690 	struct CE_state *CE_state = (struct CE_state *)copyeng;
691 	struct CE_ring_state *src_ring = CE_state->src_ring;
692 	unsigned int nentries_mask = src_ring->nentries_mask;
693 	unsigned int sw_index;
694 	unsigned int write_index;
695 
696 	qdf_spin_lock(&CE_state->ce_index_lock);
697 	sw_index = src_ring->sw_index;
698 	write_index = src_ring->write_index;
699 	qdf_spin_unlock(&CE_state->ce_index_lock);
700 
701 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
702 }
703 
704 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
705 {
706 	struct CE_state *CE_state = (struct CE_state *)copyeng;
707 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
708 	unsigned int nentries_mask = dest_ring->nentries_mask;
709 	unsigned int sw_index;
710 	unsigned int write_index;
711 
712 	qdf_spin_lock(&CE_state->ce_index_lock);
713 	sw_index = dest_ring->sw_index;
714 	write_index = dest_ring->write_index;
715 	qdf_spin_unlock(&CE_state->ce_index_lock);
716 
717 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
718 }
719 
720 /*
721  * Guts of ce_send_entries_done.
722  * The caller takes responsibility for any necessary locking.
723  */
724 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
725 {
726 	struct CE_state *CE_state = (struct CE_state *)copyeng;
727 	unsigned int nentries;
728 	struct hif_softc *scn = CE_state->scn;
729 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
730 
731 	qdf_spin_lock(&CE_state->ce_index_lock);
732 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
733 						CE_state->scn, CE_state);
734 	qdf_spin_unlock(&CE_state->ce_index_lock);
735 
736 	return nentries;
737 }
738 
739 /*
740  * Guts of ce_recv_entries_done.
741  * The caller takes responsibility for any necessary locking.
742  */
743 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
744 {
745 	struct CE_state *CE_state = (struct CE_state *)copyeng;
746 	unsigned int nentries;
747 	struct hif_softc *scn = CE_state->scn;
748 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
749 
750 	qdf_spin_lock(&CE_state->ce_index_lock);
751 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
752 						CE_state->scn, CE_state);
753 	qdf_spin_unlock(&CE_state->ce_index_lock);
754 
755 	return nentries;
756 }
757 
758 /*
759  * Guts of ce_completed_recv_next.
760  * The caller takes responsibility for any necessary locking.
761  */
762 int
763 ce_completed_recv_next(struct CE_handle *copyeng,
764 		       void **per_CE_contextp,
765 		       void **per_transfer_contextp,
766 		       qdf_dma_addr_t *bufferp,
767 		       unsigned int *nbytesp,
768 		       unsigned int *transfer_idp, unsigned int *flagsp)
769 {
770 	struct CE_state *CE_state = (struct CE_state *)copyeng;
771 	int status;
772 	struct hif_softc *scn = CE_state->scn;
773 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
774 	struct ce_ops *ce_services;
775 
776 	ce_services = hif_state->ce_services;
777 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
778 	status =
779 		ce_services->ce_completed_recv_next_nolock(CE_state,
780 				per_CE_contextp, per_transfer_contextp, bufferp,
781 					      nbytesp, transfer_idp, flagsp);
782 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
783 
784 	return status;
785 }
786 
787 QDF_STATUS
788 ce_revoke_recv_next(struct CE_handle *copyeng,
789 		    void **per_CE_contextp,
790 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
791 {
792 	struct CE_state *CE_state = (struct CE_state *)copyeng;
793 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
794 
795 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
796 			per_CE_contextp, per_transfer_contextp, bufferp);
797 }
798 
799 QDF_STATUS
800 ce_cancel_send_next(struct CE_handle *copyeng,
801 		void **per_CE_contextp,
802 		void **per_transfer_contextp,
803 		qdf_dma_addr_t *bufferp,
804 		unsigned int *nbytesp,
805 		unsigned int *transfer_idp,
806 		uint32_t *toeplitz_hash_result)
807 {
808 	struct CE_state *CE_state = (struct CE_state *)copyeng;
809 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
810 
811 	return hif_state->ce_services->ce_cancel_send_next
812 		(copyeng, per_CE_contextp, per_transfer_contextp,
813 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
814 }
815 qdf_export_symbol(ce_cancel_send_next);
816 
817 int
818 ce_completed_send_next(struct CE_handle *copyeng,
819 		       void **per_CE_contextp,
820 		       void **per_transfer_contextp,
821 		       qdf_dma_addr_t *bufferp,
822 		       unsigned int *nbytesp,
823 		       unsigned int *transfer_idp,
824 		       unsigned int *sw_idx,
825 		       unsigned int *hw_idx,
826 		       unsigned int *toeplitz_hash_result)
827 {
828 	struct CE_state *CE_state = (struct CE_state *)copyeng;
829 	struct hif_softc *scn = CE_state->scn;
830 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
831 	struct ce_ops *ce_services;
832 	int status;
833 
834 	ce_services = hif_state->ce_services;
835 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
836 	status =
837 		ce_services->ce_completed_send_next_nolock(CE_state,
838 					per_CE_contextp, per_transfer_contextp,
839 					bufferp, nbytesp, transfer_idp, sw_idx,
840 					      hw_idx, toeplitz_hash_result);
841 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
842 
843 	return status;
844 }
845 
846 #ifdef ATH_11AC_TXCOMPACT
847 /* CE engine descriptor reap
848  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
849  * does receive and reaping of completed descriptor ,
850  * This function only handles reaping of Tx complete descriptor.
851  * The Function is called from threshold reap  poll routine
852  * hif_send_complete_check so should not countain receive functionality
853  * within it .
854  */
855 
856 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
857 {
858 	void *CE_context;
859 	void *transfer_context;
860 	qdf_dma_addr_t buf;
861 	unsigned int nbytes;
862 	unsigned int id;
863 	unsigned int sw_idx, hw_idx;
864 	uint32_t toeplitz_hash_result;
865 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
866 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
867 
868 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
869 		return;
870 
871 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
872 			NULL, NULL, 0, 0);
873 
874 	/* Since this function is called from both user context and
875 	 * tasklet context the spinlock has to lock the bottom halves.
876 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
877 	 * enabled in TX polling mode. If this is not the case, more
878 	 * bottom halve spin lock changes are needed. Due to data path
879 	 * performance concern, after internal discussion we've decided
880 	 * to make minimum change, i.e., only address the issue occurred
881 	 * in this function. The possible negative effect of this minimum
882 	 * change is that, in the future, if some other function will also
883 	 * be opened to let the user context to use, those cases need to be
884 	 * addressed by change spin_lock to spin_lock_bh also.
885 	 */
886 
887 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
888 
889 	if (CE_state->send_cb) {
890 		{
891 			struct ce_ops *ce_services = hif_state->ce_services;
892 			/* Pop completed send buffers and call the
893 			 * registered send callback for each
894 			 */
895 			while (ce_services->ce_completed_send_next_nolock
896 				 (CE_state, &CE_context,
897 				  &transfer_context, &buf,
898 				  &nbytes, &id, &sw_idx, &hw_idx,
899 				  &toeplitz_hash_result) ==
900 				  QDF_STATUS_SUCCESS) {
901 				if (ce_id != CE_HTT_H2T_MSG) {
902 					qdf_spin_unlock_bh(
903 						&CE_state->ce_index_lock);
904 					CE_state->send_cb(
905 						(struct CE_handle *)
906 						CE_state, CE_context,
907 						transfer_context, buf,
908 						nbytes, id, sw_idx, hw_idx,
909 						toeplitz_hash_result);
910 					qdf_spin_lock_bh(
911 						&CE_state->ce_index_lock);
912 				} else {
913 					struct HIF_CE_pipe_info *pipe_info =
914 						(struct HIF_CE_pipe_info *)
915 						CE_context;
916 
917 					qdf_spin_lock_bh(&pipe_info->
918 						 completion_freeq_lock);
919 					pipe_info->num_sends_allowed++;
920 					qdf_spin_unlock_bh(&pipe_info->
921 						   completion_freeq_lock);
922 				}
923 			}
924 		}
925 	}
926 
927 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
928 
929 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
930 			NULL, NULL, 0, 0);
931 	Q_TARGET_ACCESS_END(scn);
932 }
933 
934 #endif /*ATH_11AC_TXCOMPACT */
935 
936 /*
937  * ce_engine_service_reg:
938  *
939  * Called from ce_per_engine_service and goes through the regular interrupt
940  * handling that does not involve the WLAN fast path feature.
941  *
942  * Returns void
943  */
944 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
945 {
946 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
947 	uint32_t ctrl_addr = CE_state->ctrl_addr;
948 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
949 	void *CE_context;
950 	void *transfer_context;
951 	qdf_dma_addr_t buf;
952 	unsigned int nbytes;
953 	unsigned int id;
954 	unsigned int flags;
955 	unsigned int more_comp_cnt = 0;
956 	unsigned int more_snd_comp_cnt = 0;
957 	unsigned int sw_idx, hw_idx;
958 	uint32_t toeplitz_hash_result;
959 	uint32_t mode = hif_get_conparam(scn);
960 
961 more_completions:
962 	if (CE_state->recv_cb) {
963 
964 		/* Pop completed recv buffers and call
965 		 * the registered recv callback for each
966 		 */
967 		while (hif_state->ce_services->ce_completed_recv_next_nolock
968 				(CE_state, &CE_context, &transfer_context,
969 				&buf, &nbytes, &id, &flags) ==
970 				QDF_STATUS_SUCCESS) {
971 			qdf_spin_unlock(&CE_state->ce_index_lock);
972 			CE_state->recv_cb((struct CE_handle *)CE_state,
973 					  CE_context, transfer_context, buf,
974 					  nbytes, id, flags);
975 
976 			qdf_spin_lock(&CE_state->ce_index_lock);
977 			/*
978 			 * EV #112693 -
979 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
980 			 * BSoD_0x133 occurred in VHT80 UDP_DL
981 			 * Break out DPC by force if number of loops in
982 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
983 			 * to avoid spending too long time in
984 			 * DPC for each interrupt handling. Schedule another
985 			 * DPC to avoid data loss if we had taken
986 			 * force-break action before apply to Windows OS
987 			 * only currently, Linux/MAC os can expand to their
988 			 * platform if necessary
989 			 */
990 
991 			/* Break the receive processes by
992 			 * force if force_break set up
993 			 */
994 			if (qdf_unlikely(CE_state->force_break)) {
995 				qdf_atomic_set(&CE_state->rx_pending, 1);
996 				return;
997 			}
998 		}
999 	}
1000 
1001 	/*
1002 	 * Attention: We may experience potential infinite loop for below
1003 	 * While Loop during Sending Stress test.
1004 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1005 	 */
1006 
1007 	if (CE_state->send_cb) {
1008 		/* Pop completed send buffers and call
1009 		 * the registered send callback for each
1010 		 */
1011 
1012 #ifdef ATH_11AC_TXCOMPACT
1013 		while (hif_state->ce_services->ce_completed_send_next_nolock
1014 			 (CE_state, &CE_context,
1015 			 &transfer_context, &buf, &nbytes,
1016 			 &id, &sw_idx, &hw_idx,
1017 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1018 
1019 			if (CE_id != CE_HTT_H2T_MSG ||
1020 			    QDF_IS_EPPING_ENABLED(mode)) {
1021 				qdf_spin_unlock(&CE_state->ce_index_lock);
1022 				CE_state->send_cb((struct CE_handle *)CE_state,
1023 						  CE_context, transfer_context,
1024 						  buf, nbytes, id, sw_idx,
1025 						  hw_idx, toeplitz_hash_result);
1026 				qdf_spin_lock(&CE_state->ce_index_lock);
1027 			} else {
1028 				struct HIF_CE_pipe_info *pipe_info =
1029 					(struct HIF_CE_pipe_info *)CE_context;
1030 
1031 				qdf_spin_lock_bh(&pipe_info->
1032 					      completion_freeq_lock);
1033 				pipe_info->num_sends_allowed++;
1034 				qdf_spin_unlock_bh(&pipe_info->
1035 						completion_freeq_lock);
1036 			}
1037 		}
1038 #else                           /*ATH_11AC_TXCOMPACT */
1039 		while (hif_state->ce_services->ce_completed_send_next_nolock
1040 			 (CE_state, &CE_context,
1041 			  &transfer_context, &buf, &nbytes,
1042 			  &id, &sw_idx, &hw_idx,
1043 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1044 			qdf_spin_unlock(&CE_state->ce_index_lock);
1045 			CE_state->send_cb((struct CE_handle *)CE_state,
1046 				  CE_context, transfer_context, buf,
1047 				  nbytes, id, sw_idx, hw_idx,
1048 				  toeplitz_hash_result);
1049 			qdf_spin_lock(&CE_state->ce_index_lock);
1050 		}
1051 #endif /*ATH_11AC_TXCOMPACT */
1052 	}
1053 
1054 more_watermarks:
1055 	if (CE_state->misc_cbs) {
1056 		if (CE_state->watermark_cb &&
1057 				hif_state->ce_services->watermark_int(CE_state,
1058 					&flags)) {
1059 			qdf_spin_unlock(&CE_state->ce_index_lock);
1060 			/* Convert HW IS bits to software flags */
1061 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1062 					CE_state->wm_context, flags);
1063 			qdf_spin_lock(&CE_state->ce_index_lock);
1064 		}
1065 	}
1066 
1067 	/*
1068 	 * Clear the misc interrupts (watermark) that were handled above,
1069 	 * and that will be checked again below.
1070 	 * Clear and check for copy-complete interrupts again, just in case
1071 	 * more copy completions happened while the misc interrupts were being
1072 	 * handled.
1073 	 */
1074 	if (!ce_srng_based(scn)) {
1075 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1076 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1077 					   CE_WATERMARK_MASK |
1078 					   HOST_IS_COPY_COMPLETE_MASK);
1079 		} else {
1080 			qdf_atomic_set(&CE_state->rx_pending, 0);
1081 			hif_err_rl("%s: target access is not allowed",
1082 				   __func__);
1083 			return;
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * Now that per-engine interrupts are cleared, verify that
1089 	 * no recv interrupts arrive while processing send interrupts,
1090 	 * and no recv or send interrupts happened while processing
1091 	 * misc interrupts.Go back and check again.Keep checking until
1092 	 * we find no more events to process.
1093 	 */
1094 	if (CE_state->recv_cb &&
1095 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1096 				CE_state)) {
1097 		if (QDF_IS_EPPING_ENABLED(mode) ||
1098 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1099 			goto more_completions;
1100 		} else {
1101 			if (!ce_srng_based(scn)) {
1102 				HIF_ERROR(
1103 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1104 					__func__,
1105 					CE_state->dest_ring->nentries_mask,
1106 					CE_state->dest_ring->sw_index,
1107 					CE_DEST_RING_READ_IDX_GET(scn,
1108 							  CE_state->ctrl_addr));
1109 			}
1110 		}
1111 	}
1112 
1113 	if (CE_state->send_cb &&
1114 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1115 				CE_state)) {
1116 		if (QDF_IS_EPPING_ENABLED(mode) ||
1117 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1118 			goto more_completions;
1119 		} else {
1120 			if (!ce_srng_based(scn)) {
1121 				HIF_ERROR(
1122 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1123 					__func__,
1124 					CE_state->src_ring->nentries_mask,
1125 					CE_state->src_ring->sw_index,
1126 					CE_SRC_RING_READ_IDX_GET(scn,
1127 							 CE_state->ctrl_addr));
1128 			}
1129 		}
1130 	}
1131 
1132 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1133 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1134 			goto more_watermarks;
1135 	}
1136 
1137 	qdf_atomic_set(&CE_state->rx_pending, 0);
1138 }
1139 
1140 /*
1141  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1142  *
1143  * Invokes registered callbacks for recv_complete,
1144  * send_complete, and watermarks.
1145  *
1146  * Returns: number of messages processed
1147  */
1148 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1149 {
1150 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1151 
1152 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1153 		return CE_state->receive_count;
1154 
1155 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1156 		HIF_ERROR("[premature rc=0]");
1157 		return 0; /* no work done */
1158 	}
1159 
1160 	/* Clear force_break flag and re-initialize receive_count to 0 */
1161 	CE_state->receive_count = 0;
1162 	CE_state->force_break = 0;
1163 	CE_state->ce_service_start_time = sched_clock();
1164 	CE_state->ce_service_yield_time =
1165 		CE_state->ce_service_start_time +
1166 		hif_get_ce_service_max_yield_time(
1167 			(struct hif_opaque_softc *)scn);
1168 
1169 	qdf_spin_lock(&CE_state->ce_index_lock);
1170 
1171 	CE_state->service(scn, CE_id);
1172 
1173 	qdf_spin_unlock(&CE_state->ce_index_lock);
1174 
1175 	if (Q_TARGET_ACCESS_END(scn) < 0)
1176 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
1177 	return CE_state->receive_count;
1178 }
1179 qdf_export_symbol(ce_per_engine_service);
1180 
1181 /*
1182  * Handler for per-engine interrupts on ALL active CEs.
1183  * This is used in cases where the system is sharing a
1184  * single interrput for all CEs
1185  */
1186 
1187 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1188 {
1189 	int CE_id;
1190 	uint32_t intr_summary;
1191 
1192 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1193 		return;
1194 
1195 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1196 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1197 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1198 
1199 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1200 				qdf_atomic_set(&CE_state->rx_pending, 0);
1201 				ce_per_engine_service(scn, CE_id);
1202 			}
1203 		}
1204 
1205 		Q_TARGET_ACCESS_END(scn);
1206 		return;
1207 	}
1208 
1209 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1210 
1211 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1212 		if (intr_summary & (1 << CE_id))
1213 			intr_summary &= ~(1 << CE_id);
1214 		else
1215 			continue;       /* no intr pending on this CE */
1216 
1217 		ce_per_engine_service(scn, CE_id);
1218 	}
1219 
1220 	Q_TARGET_ACCESS_END(scn);
1221 }
1222 
1223 /*Iterate the CE_state list and disable the compl interrupt
1224  * if it has been registered already.
1225  */
1226 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1227 {
1228 	int CE_id;
1229 
1230 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1231 		return;
1232 
1233 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1234 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1235 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1236 
1237 		/* if the interrupt is currently enabled, disable it */
1238 		if (!CE_state->disable_copy_compl_intr
1239 		    && (CE_state->send_cb || CE_state->recv_cb))
1240 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1241 
1242 		if (CE_state->watermark_cb)
1243 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1244 	}
1245 	Q_TARGET_ACCESS_END(scn);
1246 }
1247 
1248 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1249 {
1250 	int CE_id;
1251 
1252 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1253 		return;
1254 
1255 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1256 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1257 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1258 
1259 		/*
1260 		 * If the CE is supposed to have copy complete interrupts
1261 		 * enabled (i.e. there a callback registered, and the
1262 		 * "disable" flag is not set), then re-enable the interrupt.
1263 		 */
1264 		if (!CE_state->disable_copy_compl_intr
1265 		    && (CE_state->send_cb || CE_state->recv_cb))
1266 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1267 
1268 		if (CE_state->watermark_cb)
1269 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1270 	}
1271 	Q_TARGET_ACCESS_END(scn);
1272 }
1273 
1274 /**
1275  * ce_send_cb_register(): register completion handler
1276  * @copyeng: CE_state representing the ce we are adding the behavior to
1277  * @fn_ptr: callback that the ce should use when processing tx completions
1278  * @disable_interrupts: if the interupts should be enabled or not.
1279  *
1280  * Caller should guarantee that no transactions are in progress before
1281  * switching the callback function.
1282  *
1283  * Registers the send context before the fn pointer so that if the cb is valid
1284  * the context should be valid.
1285  *
1286  * Beware that currently this function will enable completion interrupts.
1287  */
1288 void
1289 ce_send_cb_register(struct CE_handle *copyeng,
1290 		    ce_send_cb fn_ptr,
1291 		    void *ce_send_context, int disable_interrupts)
1292 {
1293 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1294 	struct hif_softc *scn;
1295 	struct HIF_CE_state *hif_state;
1296 
1297 	if (CE_state == NULL) {
1298 		HIF_ERROR("%s: Error CE state = NULL", __func__);
1299 		return;
1300 	}
1301 	scn = CE_state->scn;
1302 	hif_state = HIF_GET_CE_STATE(scn);
1303 	if (hif_state == NULL) {
1304 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1305 		return;
1306 	}
1307 	CE_state->send_context = ce_send_context;
1308 	CE_state->send_cb = fn_ptr;
1309 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1310 							disable_interrupts);
1311 }
1312 qdf_export_symbol(ce_send_cb_register);
1313 
1314 /**
1315  * ce_recv_cb_register(): register completion handler
1316  * @copyeng: CE_state representing the ce we are adding the behavior to
1317  * @fn_ptr: callback that the ce should use when processing rx completions
1318  * @disable_interrupts: if the interupts should be enabled or not.
1319  *
1320  * Registers the send context before the fn pointer so that if the cb is valid
1321  * the context should be valid.
1322  *
1323  * Caller should guarantee that no transactions are in progress before
1324  * switching the callback function.
1325  */
1326 void
1327 ce_recv_cb_register(struct CE_handle *copyeng,
1328 		    CE_recv_cb fn_ptr,
1329 		    void *CE_recv_context, int disable_interrupts)
1330 {
1331 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1332 	struct hif_softc *scn;
1333 	struct HIF_CE_state *hif_state;
1334 
1335 	if (CE_state == NULL) {
1336 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
1337 		return;
1338 	}
1339 	scn = CE_state->scn;
1340 	hif_state = HIF_GET_CE_STATE(scn);
1341 	if (hif_state == NULL) {
1342 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1343 		return;
1344 	}
1345 	CE_state->recv_context = CE_recv_context;
1346 	CE_state->recv_cb = fn_ptr;
1347 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1348 							disable_interrupts);
1349 }
1350 qdf_export_symbol(ce_recv_cb_register);
1351 
1352 /**
1353  * ce_watermark_cb_register(): register completion handler
1354  * @copyeng: CE_state representing the ce we are adding the behavior to
1355  * @fn_ptr: callback that the ce should use when processing watermark events
1356  *
1357  * Caller should guarantee that no watermark events are being processed before
1358  * switching the callback function.
1359  */
1360 void
1361 ce_watermark_cb_register(struct CE_handle *copyeng,
1362 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1363 {
1364 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1365 	struct hif_softc *scn = CE_state->scn;
1366 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1367 
1368 	CE_state->watermark_cb = fn_ptr;
1369 	CE_state->wm_context = CE_wm_context;
1370 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1371 							0);
1372 	if (fn_ptr)
1373 		CE_state->misc_cbs = 1;
1374 }
1375 
1376 bool ce_get_rx_pending(struct hif_softc *scn)
1377 {
1378 	int CE_id;
1379 
1380 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1381 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1382 
1383 		if (qdf_atomic_read(&CE_state->rx_pending))
1384 			return true;
1385 	}
1386 
1387 	return false;
1388 }
1389 
1390 /**
1391  * ce_check_rx_pending() - ce_check_rx_pending
1392  * @CE_state: context of the copy engine to check
1393  *
1394  * Return: true if there per_engine_service
1395  *	didn't process all the rx descriptors.
1396  */
1397 bool ce_check_rx_pending(struct CE_state *CE_state)
1398 {
1399 	if (qdf_atomic_read(&CE_state->rx_pending))
1400 		return true;
1401 	else
1402 		return false;
1403 }
1404 qdf_export_symbol(ce_check_rx_pending);
1405 
1406 #ifdef IPA_OFFLOAD
1407 /**
1408  * ce_ipa_get_resource() - get uc resource on copyengine
1409  * @ce: copyengine context
1410  * @ce_sr: copyengine source ring resource info
1411  * @ce_sr_ring_size: copyengine source ring size
1412  * @ce_reg_paddr: copyengine register physical address
1413  *
1414  * Copy engine should release resource to micro controller
1415  * Micro controller needs
1416  *  - Copy engine source descriptor base address
1417  *  - Copy engine source descriptor size
1418  *  - PCI BAR address to access copy engine regiser
1419  *
1420  * Return: None
1421  */
1422 void ce_ipa_get_resource(struct CE_handle *ce,
1423 			 qdf_shared_mem_t **ce_sr,
1424 			 uint32_t *ce_sr_ring_size,
1425 			 qdf_dma_addr_t *ce_reg_paddr)
1426 {
1427 	struct CE_state *CE_state = (struct CE_state *)ce;
1428 	uint32_t ring_loop;
1429 	struct CE_src_desc *ce_desc;
1430 	qdf_dma_addr_t phy_mem_base;
1431 	struct hif_softc *scn = CE_state->scn;
1432 
1433 	if (CE_UNUSED == CE_state->state) {
1434 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1435 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1436 		*ce_sr_ring_size = 0;
1437 		return;
1438 	}
1439 
1440 	/* Update default value for descriptor */
1441 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1442 	     ring_loop++) {
1443 		ce_desc = (struct CE_src_desc *)
1444 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1445 			   ring_loop * (sizeof(struct CE_src_desc)));
1446 		CE_IPA_RING_INIT(ce_desc);
1447 	}
1448 
1449 	/* Get BAR address */
1450 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1451 
1452 	*ce_sr = CE_state->scn->ipa_ce_ring;
1453 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1454 		sizeof(struct CE_src_desc));
1455 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1456 			SR_WR_INDEX_ADDRESS;
1457 }
1458 #endif /* IPA_OFFLOAD */
1459 
1460 #if HIF_CE_DEBUG_DATA_BUF
1461 /**
1462  * hif_dump_desc_data_buf() - record ce descriptor events
1463  * @buf: buffer to copy to
1464  * @pos: Current position till which the buf is filled
1465  * @data: Data to be copied
1466  * @data_len: Length of the data to be copied
1467  */
1468 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1469 					uint8_t *data, uint32_t data_len)
1470 {
1471 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1472 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1473 
1474 	if ((data_len > 0) && data) {
1475 		if (data_len < 16) {
1476 			hex_dump_to_buffer(data,
1477 						CE_DEBUG_DATA_PER_ROW,
1478 						16, 1, buf + pos,
1479 						(ssize_t)PAGE_SIZE - pos,
1480 						false);
1481 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1482 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1483 		} else {
1484 			uint32_t rows = (data_len / 16) + 1;
1485 			uint32_t row = 0;
1486 
1487 			for (row = 0; row < rows; row++) {
1488 				hex_dump_to_buffer(data + (row * 16),
1489 							CE_DEBUG_DATA_PER_ROW,
1490 							16, 1, buf + pos,
1491 							(ssize_t)PAGE_SIZE
1492 							- pos, false);
1493 				pos +=
1494 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1495 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1496 						"\n");
1497 			}
1498 		}
1499 	}
1500 
1501 	return pos;
1502 }
1503 #endif
1504 
1505 /*
1506  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1507  * for defined here
1508  */
1509 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1510 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1511 {
1512 	switch (type) {
1513 	case HIF_RX_DESC_POST:
1514 		return "HIF_RX_DESC_POST";
1515 	case HIF_RX_DESC_COMPLETION:
1516 		return "HIF_RX_DESC_COMPLETION";
1517 	case HIF_TX_GATHER_DESC_POST:
1518 		return "HIF_TX_GATHER_DESC_POST";
1519 	case HIF_TX_DESC_POST:
1520 		return "HIF_TX_DESC_POST";
1521 	case HIF_TX_DESC_SOFTWARE_POST:
1522 		return "HIF_TX_DESC_SOFTWARE_POST";
1523 	case HIF_TX_DESC_COMPLETION:
1524 		return "HIF_TX_DESC_COMPLETION";
1525 	case FAST_RX_WRITE_INDEX_UPDATE:
1526 		return "FAST_RX_WRITE_INDEX_UPDATE";
1527 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1528 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1529 	case FAST_TX_WRITE_INDEX_UPDATE:
1530 		return "FAST_TX_WRITE_INDEX_UPDATE";
1531 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1532 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1533 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1534 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1535 	case RESUME_WRITE_INDEX_UPDATE:
1536 		return "RESUME_WRITE_INDEX_UPDATE";
1537 	case HIF_IRQ_EVENT:
1538 		return "HIF_IRQ_EVENT";
1539 	case HIF_CE_TASKLET_ENTRY:
1540 		return "HIF_CE_TASKLET_ENTRY";
1541 	case HIF_CE_TASKLET_RESCHEDULE:
1542 		return "HIF_CE_TASKLET_RESCHEDULE";
1543 	case HIF_CE_TASKLET_EXIT:
1544 		return "HIF_CE_TASKLET_EXIT";
1545 	case HIF_CE_REAP_ENTRY:
1546 		return "HIF_CE_REAP_ENTRY";
1547 	case HIF_CE_REAP_EXIT:
1548 		return "HIF_CE_REAP_EXIT";
1549 	case NAPI_SCHEDULE:
1550 		return "NAPI_SCHEDULE";
1551 	case NAPI_POLL_ENTER:
1552 		return "NAPI_POLL_ENTER";
1553 	case NAPI_COMPLETE:
1554 		return "NAPI_COMPLETE";
1555 	case NAPI_POLL_EXIT:
1556 		return "NAPI_POLL_EXIT";
1557 	case HIF_RX_NBUF_ALLOC_FAILURE:
1558 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1559 	case HIF_RX_NBUF_MAP_FAILURE:
1560 		return "HIF_RX_NBUF_MAP_FAILURE";
1561 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1562 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1563 	default:
1564 		return "invalid";
1565 	}
1566 }
1567 
1568 /**
1569  * hif_dump_desc_event() - record ce descriptor events
1570  * @buf: Buffer to which to be copied
1571  * @ce_id: which ce is the event occurring on
1572  * @index: index that the descriptor was/will be at.
1573  */
1574 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1575 {
1576 	struct hif_ce_desc_event *event;
1577 	uint64_t secs, usecs;
1578 	ssize_t len = 0;
1579 	struct ce_desc_hist *ce_hist = NULL;
1580 	struct hif_ce_desc_event *hist_ev = NULL;
1581 
1582 	if (!scn)
1583 		return -EINVAL;
1584 
1585 	ce_hist = &scn->hif_ce_desc_hist;
1586 
1587 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1588 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1589 		qdf_print("Invalid values");
1590 		return -EINVAL;
1591 	}
1592 
1593 	hist_ev =
1594 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1595 
1596 	if (!hist_ev) {
1597 		qdf_print("Low Memory");
1598 		return -EINVAL;
1599 	}
1600 
1601 	event = &hist_ev[ce_hist->hist_index];
1602 
1603 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1604 
1605 	len += snprintf(buf, PAGE_SIZE - len,
1606 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1607 			secs, usecs, ce_hist->hist_id,
1608 			ce_event_type_to_str(event->type),
1609 			event->index, event->memory);
1610 #if HIF_CE_DEBUG_DATA_BUF
1611 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
1612 			event->actual_data_len);
1613 #endif
1614 
1615 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1616 
1617 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1618 				16, 1, buf + len,
1619 				(ssize_t)PAGE_SIZE - len, false);
1620 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1621 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1622 
1623 #if HIF_CE_DEBUG_DATA_BUF
1624 	if (ce_hist->data_enable[ce_hist->hist_id])
1625 		len = hif_dump_desc_data_buf(buf, len, event->data,
1626 						(event->actual_data_len <
1627 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1628 						event->actual_data_len :
1629 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1630 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1631 
1632 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1633 
1634 	return len;
1635 }
1636 
1637 /*
1638  * hif_store_desc_trace_buf_index() -
1639  * API to get the CE id and CE debug storage buffer index
1640  *
1641  * @dev: network device
1642  * @attr: sysfs attribute
1643  * @buf: data got from the user
1644  *
1645  * Return total length
1646  */
1647 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1648 					const char *buf, size_t size)
1649 {
1650 	struct ce_desc_hist *ce_hist = NULL;
1651 
1652 	if (!scn)
1653 		return -EINVAL;
1654 
1655 	ce_hist = &scn->hif_ce_desc_hist;
1656 
1657 	if (!size) {
1658 		pr_err("%s: Invalid input buffer.\n", __func__);
1659 		return -EINVAL;
1660 	}
1661 
1662 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1663 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1664 		pr_err("%s: Invalid input value.\n", __func__);
1665 		return -EINVAL;
1666 	}
1667 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1668 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1669 		qdf_print("Invalid values");
1670 		return -EINVAL;
1671 	}
1672 
1673 	return size;
1674 }
1675 
1676 #endif  /*For MCL,  HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
1677 
1678 #if HIF_CE_DEBUG_DATA_BUF
1679 /*
1680  * hif_ce_en_desc_hist() -
1681  * API to enable recording the CE desc history
1682  *
1683  * @dev: network device
1684  * @attr: sysfs attribute
1685  * @buf: buffer to copy the data.
1686  *
1687  * Starts recording the ce desc history
1688  *
1689  * Return total length copied
1690  */
1691 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1692 {
1693 	struct ce_desc_hist *ce_hist = NULL;
1694 	uint32_t cfg = 0;
1695 	uint32_t ce_id = 0;
1696 
1697 	if (!scn)
1698 		return -EINVAL;
1699 
1700 	ce_hist = &scn->hif_ce_desc_hist;
1701 
1702 	if (!size) {
1703 		pr_err("%s: Invalid input buffer.\n", __func__);
1704 		return -EINVAL;
1705 	}
1706 
1707 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1708 		   (unsigned int *)&cfg) != 2) {
1709 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
1710 		return -EINVAL;
1711 	}
1712 	if (ce_id >= CE_COUNT_MAX) {
1713 		qdf_print("Invalid value CE Id");
1714 		return -EINVAL;
1715 	}
1716 
1717 	if ((cfg > 1 || cfg < 0)) {
1718 		qdf_print("Invalid values: enter 0 or 1");
1719 		return -EINVAL;
1720 	}
1721 
1722 	if (!ce_hist->hist_ev[ce_id])
1723 		return -EINVAL;
1724 
1725 	qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]);
1726 	if (cfg == 1) {
1727 		if (ce_hist->data_enable[ce_id] == 1) {
1728 			qdf_print("\nAlready Enabled");
1729 		} else {
1730 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1731 							== QDF_STATUS_E_NOMEM){
1732 				ce_hist->data_enable[ce_id] = 0;
1733 				qdf_print("%s:Memory Alloc failed");
1734 			} else
1735 				ce_hist->data_enable[ce_id] = 1;
1736 		}
1737 	} else if (cfg == 0) {
1738 		if (ce_hist->data_enable[ce_id] == 0) {
1739 			qdf_print("\nAlready Disabled");
1740 		} else {
1741 			ce_hist->data_enable[ce_id] = 0;
1742 				free_mem_ce_debug_hist_data(scn, ce_id);
1743 		}
1744 	}
1745 	qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]);
1746 
1747 	return size;
1748 }
1749 
1750 /*
1751  * hif_disp_ce_enable_desc_data_hist() -
1752  * API to display value of data_enable
1753  *
1754  * @dev: network device
1755  * @attr: sysfs attribute
1756  * @buf: buffer to copy the data.
1757  *
1758  * Return total length copied
1759  */
1760 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1761 {
1762 	ssize_t len = 0;
1763 	uint32_t ce_id = 0;
1764 	struct ce_desc_hist *ce_hist = NULL;
1765 
1766 	if (!scn)
1767 		return -EINVAL;
1768 
1769 	ce_hist = &scn->hif_ce_desc_hist;
1770 
1771 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1772 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1773 				ce_id, ce_hist->data_enable[ce_id]);
1774 	}
1775 
1776 	return len;
1777 }
1778 #endif /* HIF_CE_DEBUG_DATA_BUF */
1779 
1780 #ifdef OL_ATH_SMART_LOGGING
1781 #define GUARD_SPACE 10
1782 #define LOG_ID_SZ 4
1783 /*
1784  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1785  * @src_ring: SRC ring state
1786  * @buf_cur: Current pointer in ring buffer
1787  * @buf_init:Start of the ring buffer
1788  * @buf_sz: Size of the ring buffer
1789  * @skb_sz: Max size of the SKB buffer to be copied
1790  *
1791  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1792  * the given buf, skb_sz is the max buffer size to be copied
1793  *
1794  * Return: Current pointer in ring buffer
1795  */
1796 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1797 				    uint8_t *buf_cur, uint8_t *buf_init,
1798 				    uint32_t buf_sz, uint32_t skb_sz)
1799 {
1800 	struct CE_src_desc *src_ring_base;
1801 	uint32_t len, entry;
1802 	struct CE_src_desc  *src_desc;
1803 	qdf_nbuf_t nbuf;
1804 	uint32_t available_buf;
1805 
1806 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1807 	len = sizeof(struct CE_ring_state);
1808 	available_buf = buf_sz - (buf_cur - buf_init);
1809 	if (available_buf < (len + GUARD_SPACE)) {
1810 		buf_cur = buf_init;
1811 	}
1812 
1813 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1814 	buf_cur += sizeof(struct CE_ring_state);
1815 
1816 	for (entry = 0; entry < src_ring->nentries; entry++) {
1817 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1818 		nbuf = src_ring->per_transfer_context[entry];
1819 		if (nbuf) {
1820 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1821 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1822 
1823 			len = sizeof(struct CE_src_desc) + skb_cp_len
1824 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1825 			available_buf = buf_sz - (buf_cur - buf_init);
1826 			if (available_buf < (len + GUARD_SPACE)) {
1827 				buf_cur = buf_init;
1828 			}
1829 			qdf_mem_copy(buf_cur, src_desc,
1830 				     sizeof(struct CE_src_desc));
1831 			buf_cur += sizeof(struct CE_src_desc);
1832 
1833 			available_buf = buf_sz - (buf_cur - buf_init);
1834 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1835 						skb_cp_len);
1836 
1837 			if (skb_cp_len) {
1838 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1839 					     skb_cp_len);
1840 				buf_cur += skb_cp_len;
1841 			}
1842 		} else {
1843 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1844 			available_buf = buf_sz - (buf_cur - buf_init);
1845 			if (available_buf < (len + GUARD_SPACE)) {
1846 				buf_cur = buf_init;
1847 			}
1848 			qdf_mem_copy(buf_cur, src_desc,
1849 				     sizeof(struct CE_src_desc));
1850 			buf_cur += sizeof(struct CE_src_desc);
1851 			available_buf = buf_sz - (buf_cur - buf_init);
1852 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1853 		}
1854 	}
1855 
1856 	return buf_cur;
1857 }
1858 
1859 /*
1860  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1861  * @dest_ring: SRC ring state
1862  * @buf_cur: Current pointer in ring buffer
1863  * @buf_init:Start of the ring buffer
1864  * @buf_sz: Size of the ring buffer
1865  * @skb_sz: Max size of the SKB buffer to be copied
1866  *
1867  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1868  * the given buf, skb_sz is the max buffer size to be copied
1869  *
1870  * Return: Current pointer in ring buffer
1871  */
1872 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1873 				     uint8_t *buf_cur, uint8_t *buf_init,
1874 				     uint32_t buf_sz, uint32_t skb_sz)
1875 {
1876 	struct CE_dest_desc *dest_ring_base;
1877 	uint32_t len, entry;
1878 	struct CE_dest_desc  *dest_desc;
1879 	qdf_nbuf_t nbuf;
1880 	uint32_t available_buf;
1881 
1882 	dest_ring_base =
1883 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1884 
1885 	len = sizeof(struct CE_ring_state);
1886 	available_buf = buf_sz - (buf_cur - buf_init);
1887 	if (available_buf < (len + GUARD_SPACE)) {
1888 		buf_cur = buf_init;
1889 	}
1890 
1891 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
1892 	buf_cur += sizeof(struct CE_ring_state);
1893 
1894 	for (entry = 0; entry < dest_ring->nentries; entry++) {
1895 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
1896 
1897 		nbuf = dest_ring->per_transfer_context[entry];
1898 		if (nbuf) {
1899 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1900 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1901 
1902 			len = sizeof(struct CE_dest_desc) + skb_cp_len
1903 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1904 
1905 			available_buf = buf_sz - (buf_cur - buf_init);
1906 			if (available_buf < (len + GUARD_SPACE)) {
1907 				buf_cur = buf_init;
1908 			}
1909 
1910 			qdf_mem_copy(buf_cur, dest_desc,
1911 				     sizeof(struct CE_dest_desc));
1912 			buf_cur += sizeof(struct CE_dest_desc);
1913 			available_buf = buf_sz - (buf_cur - buf_init);
1914 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1915 						skb_cp_len);
1916 			if (skb_cp_len) {
1917 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1918 					     skb_cp_len);
1919 				buf_cur += skb_cp_len;
1920 			}
1921 		} else {
1922 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
1923 			available_buf = buf_sz - (buf_cur - buf_init);
1924 			if (available_buf < (len + GUARD_SPACE)) {
1925 				buf_cur = buf_init;
1926 			}
1927 			qdf_mem_copy(buf_cur, dest_desc,
1928 				     sizeof(struct CE_dest_desc));
1929 			buf_cur += sizeof(struct CE_dest_desc);
1930 			available_buf = buf_sz - (buf_cur - buf_init);
1931 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1932 		}
1933 	}
1934 	return buf_cur;
1935 }
1936 
1937 /**
1938  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1939  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1940  * and buffers pointed by them in to the given buf
1941  */
1942 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1943 			 uint8_t *buf_init, uint32_t buf_sz,
1944 			 uint32_t ce, uint32_t skb_sz)
1945 {
1946 	struct CE_state *ce_state;
1947 	struct CE_ring_state *src_ring;
1948 	struct CE_ring_state *dest_ring;
1949 
1950 	ce_state = scn->ce_id_to_state[ce];
1951 	src_ring = ce_state->src_ring;
1952 	dest_ring = ce_state->dest_ring;
1953 
1954 	if (src_ring) {
1955 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
1956 					      buf_init, buf_sz, skb_sz);
1957 	} else if (dest_ring) {
1958 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
1959 					       buf_init, buf_sz, skb_sz);
1960 	}
1961 
1962 	return buf_cur;
1963 }
1964 
1965 qdf_export_symbol(hif_log_dump_ce);
1966 #endif /* OL_ATH_SMART_LOGGING */
1967 
1968