xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 static int war1_allow_sleep;
65 /* io32 write workaround */
66 static int hif_ce_war1;
67 
68 /**
69  * hif_ce_war_disable() - disable ce war gobally
70  */
71 void hif_ce_war_disable(void)
72 {
73 	hif_ce_war1 = 0;
74 }
75 
76 /**
77  * hif_ce_war_enable() - enable ce war gobally
78  */
79 void hif_ce_war_enable(void)
80 {
81 	hif_ce_war1 = 1;
82 }
83 
84 /*
85  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
86  * for defined here
87  */
88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
89 
90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
91 #define CE_DEBUG_DATA_PER_ROW 16
92 
93 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
94 
95 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
96 {
97 	int record_index = qdf_atomic_inc_return(table_index);
98 
99 	if (record_index == array_size)
100 		qdf_atomic_sub(array_size, table_index);
101 
102 	while (record_index >= array_size)
103 		record_index -= array_size;
104 
105 	return record_index;
106 }
107 
108 qdf_export_symbol(get_next_record_index);
109 
110 #ifdef HIF_CE_DEBUG_DATA_BUF
111 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
112 {
113 	uint8_t *data = NULL;
114 
115 	if (!event->data) {
116 		hif_err_rl("No ce debug memory allocated");
117 		return;
118 	}
119 
120 	if (event->memory && len > 0)
121 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
122 
123 	event->actual_data_len = 0;
124 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
125 
126 	if (data && len > 0) {
127 		qdf_mem_copy(event->data, data,
128 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
129 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
130 		event->actual_data_len = len;
131 	}
132 }
133 
134 qdf_export_symbol(hif_ce_desc_data_record);
135 
136 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
137 {
138 	qdf_mem_zero(event,
139 		     offsetof(struct hif_ce_desc_event, data));
140 }
141 
142 qdf_export_symbol(hif_clear_ce_desc_debug_data);
143 #else
144 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
145 {
146 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
147 }
148 
149 qdf_export_symbol(hif_clear_ce_desc_debug_data);
150 #endif /* HIF_CE_DEBUG_DATA_BUF */
151 
152 #if defined(HIF_RECORD_PADDR)
153 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
154 				 struct hif_ce_desc_event *event,
155 				 qdf_nbuf_t memory)
156 {
157 	if (memory) {
158 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
159 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
160 					scn->qdf_dev,
161 					event->dma_addr);
162 
163 		event->virt_to_phy =
164 			virt_to_phys(qdf_nbuf_data(memory));
165 	}
166 }
167 #endif /* HIF_RECORD_RX_PADDR */
168 
169 /**
170  * hif_record_ce_desc_event() - record ce descriptor events
171  * @scn: hif_softc
172  * @ce_id: which ce is the event occurring on
173  * @type: what happened
174  * @descriptor: pointer to the descriptor posted/completed
175  * @memory: virtual address of buffer related to the descriptor
176  * @index: index that the descriptor was/will be at.
177  */
178 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
179 				enum hif_ce_event_type type,
180 				union ce_desc *descriptor,
181 				void *memory, int index,
182 				int len)
183 {
184 	int record_index;
185 	struct hif_ce_desc_event *event;
186 
187 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
188 	struct hif_ce_desc_event *hist_ev = NULL;
189 
190 	if (ce_id < CE_COUNT_MAX)
191 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
192 	else
193 		return;
194 
195 	if (ce_id >= CE_COUNT_MAX)
196 		return;
197 
198 	if (!ce_hist->enable[ce_id])
199 		return;
200 
201 	if (!hist_ev)
202 		return;
203 
204 	record_index = get_next_record_index(
205 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
206 
207 	event = &hist_ev[record_index];
208 
209 	hif_clear_ce_desc_debug_data(event);
210 
211 	event->type = type;
212 	event->time = qdf_get_log_timestamp();
213 	event->cpu_id = qdf_get_cpu();
214 
215 	if (descriptor)
216 		qdf_mem_copy(&event->descriptor, descriptor,
217 			     sizeof(union ce_desc));
218 
219 	event->memory = memory;
220 	event->index = index;
221 
222 	if (event->type == HIF_RX_DESC_POST ||
223 	    event->type == HIF_RX_DESC_COMPLETION)
224 		hif_ce_desc_record_rx_paddr(scn, event, memory);
225 
226 	if (ce_hist->data_enable[ce_id])
227 		hif_ce_desc_data_record(event, len);
228 }
229 qdf_export_symbol(hif_record_ce_desc_event);
230 
231 /**
232  * ce_init_ce_desc_event_log() - initialize the ce event log
233  * @ce_id: copy engine id for which we are initializing the log
234  * @size: size of array to dedicate
235  *
236  * Currently the passed size is ignored in favor of a precompiled value.
237  */
238 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
239 {
240 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
241 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
242 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
243 }
244 
245 /**
246  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
247  * @ce_id: copy engine id for which we are deinitializing the log
248  *
249  */
250 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
251 {
252 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
253 
254 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
255 }
256 
257 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
258 void hif_record_ce_desc_event(struct hif_softc *scn,
259 		int ce_id, enum hif_ce_event_type type,
260 		union ce_desc *descriptor, void *memory,
261 		int index, int len)
262 {
263 }
264 qdf_export_symbol(hif_record_ce_desc_event);
265 
266 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
267 					int size)
268 {
269 }
270 
271 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
272 {
273 }
274 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
275 
276 #ifdef NAPI_YIELD_BUDGET_BASED
277 bool hif_ce_service_should_yield(struct hif_softc *scn,
278 				 struct CE_state *ce_state)
279 {
280 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
281 
282 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
283 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
284 	 * can happen in fast path handling as processing is happenning in
285 	 * batches.
286 	 */
287 	if (yield)
288 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
289 
290 	return yield;
291 }
292 #else
293 /**
294  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
295  * @scn: hif context
296  * @ce_state: context of the copy engine being serviced
297  *
298  * Return: true if the service should yield
299  */
300 bool hif_ce_service_should_yield(struct hif_softc *scn,
301 				 struct CE_state *ce_state)
302 {
303 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
304 
305 	time_limit_reached = qdf_time_sched_clock() >
306 					ce_state->ce_service_yield_time ? 1 : 0;
307 
308 	if (!time_limit_reached)
309 		rxpkt_thresh_reached = hif_max_num_receives_reached
310 					(scn, ce_state->receive_count);
311 
312 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
313 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
314 	 * can happen in fast path handling as processing is happenning in
315 	 * batches.
316 	 */
317 	if (rxpkt_thresh_reached)
318 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
319 
320 	yield =  time_limit_reached || rxpkt_thresh_reached;
321 
322 	if (yield &&
323 	    ce_state->htt_rx_data &&
324 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
325 		hif_napi_update_yield_stats(ce_state,
326 					    time_limit_reached,
327 					    rxpkt_thresh_reached);
328 	}
329 
330 	return yield;
331 }
332 qdf_export_symbol(hif_ce_service_should_yield);
333 #endif
334 
335 /*
336  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
337  * The caller takes responsibility for any needed locking.
338  */
339 
340 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
341 				   u32 ctrl_addr, unsigned int write_index)
342 {
343 	if (hif_ce_war1) {
344 		void __iomem *indicator_addr;
345 
346 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
347 
348 		if (!war1_allow_sleep
349 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
350 			hif_write32_mb(scn, indicator_addr,
351 				       (CDC_WAR_MAGIC_STR | write_index));
352 		} else {
353 			unsigned long irq_flags;
354 
355 			local_irq_save(irq_flags);
356 			hif_write32_mb(scn, indicator_addr, 1);
357 
358 			/*
359 			 * PCIE write waits for ACK in IPQ8K, there is no
360 			 * need to read back value.
361 			 */
362 			(void)hif_read32_mb(scn, indicator_addr);
363 			/* conservative */
364 			(void)hif_read32_mb(scn, indicator_addr);
365 
366 			CE_SRC_RING_WRITE_IDX_SET(scn,
367 						  ctrl_addr, write_index);
368 
369 			hif_write32_mb(scn, indicator_addr, 0);
370 			local_irq_restore(irq_flags);
371 		}
372 	} else {
373 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
374 	}
375 }
376 
377 qdf_export_symbol(war_ce_src_ring_write_idx_set);
378 
379 QDF_STATUS
380 ce_send(struct CE_handle *copyeng,
381 		void *per_transfer_context,
382 		qdf_dma_addr_t buffer,
383 		uint32_t nbytes,
384 		uint32_t transfer_id,
385 		uint32_t flags,
386 		uint32_t user_flag)
387 {
388 	struct CE_state *CE_state = (struct CE_state *)copyeng;
389 	QDF_STATUS status;
390 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
391 
392 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
393 	status = hif_state->ce_services->ce_send_nolock(copyeng,
394 			per_transfer_context, buffer, nbytes,
395 			transfer_id, flags, user_flag);
396 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
397 
398 	return status;
399 }
400 qdf_export_symbol(ce_send);
401 
402 unsigned int ce_sendlist_sizeof(void)
403 {
404 	return sizeof(struct ce_sendlist);
405 }
406 
407 void ce_sendlist_init(struct ce_sendlist *sendlist)
408 {
409 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
410 
411 	sl->num_items = 0;
412 }
413 
414 QDF_STATUS
415 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
416 					qdf_dma_addr_t buffer,
417 					uint32_t nbytes,
418 					uint32_t flags,
419 					uint32_t user_flags)
420 {
421 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
422 	unsigned int num_items = sl->num_items;
423 	struct ce_sendlist_item *item;
424 
425 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
426 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
427 		return QDF_STATUS_E_RESOURCES;
428 	}
429 
430 	item = &sl->item[num_items];
431 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
432 	item->data = buffer;
433 	item->u.nbytes = nbytes;
434 	item->flags = flags;
435 	item->user_flags = user_flags;
436 	sl->num_items = num_items + 1;
437 	return QDF_STATUS_SUCCESS;
438 }
439 
440 QDF_STATUS
441 ce_sendlist_send(struct CE_handle *copyeng,
442 		 void *per_transfer_context,
443 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
444 {
445 	struct CE_state *CE_state = (struct CE_state *)copyeng;
446 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
447 
448 	return hif_state->ce_services->ce_sendlist_send(copyeng,
449 			per_transfer_context, sendlist, transfer_id);
450 }
451 
452 #ifndef AH_NEED_TX_DATA_SWAP
453 #define AH_NEED_TX_DATA_SWAP 0
454 #endif
455 
456 /**
457  * ce_batch_send() - sends bunch of msdus at once
458  * @ce_tx_hdl : pointer to CE handle
459  * @msdu : list of msdus to be sent
460  * @transfer_id : transfer id
461  * @len : Downloaded length
462  * @sendhead : sendhead
463  *
464  * Assumption : Called with an array of MSDU's
465  * Function:
466  * For each msdu in the array
467  * 1. Send each msdu
468  * 2. Increment write index accordinlgy.
469  *
470  * Return: list of msds not sent
471  */
472 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
473 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
474 {
475 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
476 	struct hif_softc *scn = ce_state->scn;
477 	struct CE_ring_state *src_ring = ce_state->src_ring;
478 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
479 	/*  A_target_id_t targid = TARGID(scn);*/
480 
481 	uint32_t nentries_mask = src_ring->nentries_mask;
482 	uint32_t sw_index, write_index;
483 
484 	struct CE_src_desc *src_desc_base =
485 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
486 	uint32_t *src_desc;
487 
488 	struct CE_src_desc lsrc_desc = {0};
489 	int deltacount = 0;
490 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
491 
492 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
493 	sw_index = src_ring->sw_index;
494 	write_index = src_ring->write_index;
495 
496 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
497 
498 	while (msdu) {
499 		tempnext = qdf_nbuf_next(msdu);
500 
501 		if (deltacount < 2) {
502 			if (sendhead)
503 				return msdu;
504 			hif_err("Out of descriptors");
505 			src_ring->write_index = write_index;
506 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
507 					write_index);
508 
509 			sw_index = src_ring->sw_index;
510 			write_index = src_ring->write_index;
511 
512 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
513 					sw_index-1);
514 			if (!freelist) {
515 				freelist = msdu;
516 				hfreelist = msdu;
517 			} else {
518 				qdf_nbuf_set_next(freelist, msdu);
519 				freelist = msdu;
520 			}
521 			qdf_nbuf_set_next(msdu, NULL);
522 			msdu = tempnext;
523 			continue;
524 		}
525 
526 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
527 				write_index);
528 
529 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
530 
531 		lsrc_desc.meta_data = transfer_id;
532 		if (len  > msdu->len)
533 			len =  msdu->len;
534 		lsrc_desc.nbytes = len;
535 		/*  Data packet is a byte stream, so disable byte swap */
536 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
537 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
538 
539 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
540 
541 
542 		src_ring->per_transfer_context[write_index] = msdu;
543 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
544 
545 		if (sendhead)
546 			break;
547 		qdf_nbuf_set_next(msdu, NULL);
548 		msdu = tempnext;
549 
550 	}
551 
552 
553 	src_ring->write_index = write_index;
554 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
555 
556 	return hfreelist;
557 }
558 
559 /**
560  * ce_update_tx_ring() - Advance sw index.
561  * @ce_tx_hdl : pointer to CE handle
562  * @num_htt_cmpls : htt completions received.
563  *
564  * Function:
565  * Increment the value of sw index of src ring
566  * according to number of htt completions
567  * received.
568  *
569  * Return: void
570  */
571 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
572 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
573 {
574 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
575 	struct CE_ring_state *src_ring = ce_state->src_ring;
576 	uint32_t nentries_mask = src_ring->nentries_mask;
577 	/*
578 	 * Advance the s/w index:
579 	 * This effectively simulates completing the CE ring descriptors
580 	 */
581 	src_ring->sw_index =
582 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
583 				num_htt_cmpls);
584 }
585 #else
586 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
587 {}
588 #endif
589 
590 /**
591  * ce_send_single() - sends
592  * @ce_tx_hdl : pointer to CE handle
593  * @msdu : msdu to be sent
594  * @transfer_id : transfer id
595  * @len : Downloaded length
596  *
597  * Function:
598  * 1. Send one msdu
599  * 2. Increment write index of src ring accordinlgy.
600  *
601  * Return: QDF_STATUS: CE sent status
602  */
603 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
604 			  uint32_t transfer_id, u_int32_t len)
605 {
606 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
607 	struct hif_softc *scn = ce_state->scn;
608 	struct CE_ring_state *src_ring = ce_state->src_ring;
609 	uint32_t ctrl_addr = ce_state->ctrl_addr;
610 	/*A_target_id_t targid = TARGID(scn);*/
611 
612 	uint32_t nentries_mask = src_ring->nentries_mask;
613 	uint32_t sw_index, write_index;
614 
615 	struct CE_src_desc *src_desc_base =
616 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
617 	uint32_t *src_desc;
618 
619 	struct CE_src_desc lsrc_desc = {0};
620 	enum hif_ce_event_type event_type;
621 
622 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
623 	sw_index = src_ring->sw_index;
624 	write_index = src_ring->write_index;
625 
626 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
627 					sw_index-1) < 1)) {
628 		hif_err("ce send fail %d %d %d", nentries_mask,
629 		       write_index, sw_index);
630 		return QDF_STATUS_E_RESOURCES;
631 	}
632 
633 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
634 
635 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
636 
637 	lsrc_desc.meta_data = transfer_id;
638 	lsrc_desc.nbytes = len;
639 	/*  Data packet is a byte stream, so disable byte swap */
640 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
641 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
642 
643 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
644 
645 
646 	src_ring->per_transfer_context[write_index] = msdu;
647 
648 	if (((struct CE_src_desc *)src_desc)->gather)
649 		event_type = HIF_TX_GATHER_DESC_POST;
650 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
651 		event_type = HIF_TX_DESC_SOFTWARE_POST;
652 	else
653 		event_type = HIF_TX_DESC_POST;
654 
655 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
656 				(union ce_desc *)src_desc, msdu,
657 				write_index, len);
658 
659 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
660 
661 	src_ring->write_index = write_index;
662 
663 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
664 
665 	return QDF_STATUS_SUCCESS;
666 }
667 
668 /**
669  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
670  * @coyeng: copy engine handle
671  * @per_recv_context: virtual address of the nbuf
672  * @buffer: physical address of the nbuf
673  *
674  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
675  */
676 QDF_STATUS
677 ce_recv_buf_enqueue(struct CE_handle *copyeng,
678 		    void *per_recv_context, qdf_dma_addr_t buffer)
679 {
680 	struct CE_state *CE_state = (struct CE_state *)copyeng;
681 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
682 
683 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
684 			per_recv_context, buffer);
685 }
686 qdf_export_symbol(ce_recv_buf_enqueue);
687 
688 void
689 ce_send_watermarks_set(struct CE_handle *copyeng,
690 		       unsigned int low_alert_nentries,
691 		       unsigned int high_alert_nentries)
692 {
693 	struct CE_state *CE_state = (struct CE_state *)copyeng;
694 	uint32_t ctrl_addr = CE_state->ctrl_addr;
695 	struct hif_softc *scn = CE_state->scn;
696 
697 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
698 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
699 }
700 
701 void
702 ce_recv_watermarks_set(struct CE_handle *copyeng,
703 		       unsigned int low_alert_nentries,
704 		       unsigned int high_alert_nentries)
705 {
706 	struct CE_state *CE_state = (struct CE_state *)copyeng;
707 	uint32_t ctrl_addr = CE_state->ctrl_addr;
708 	struct hif_softc *scn = CE_state->scn;
709 
710 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
711 				low_alert_nentries);
712 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
713 				high_alert_nentries);
714 }
715 
716 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
717 {
718 	struct CE_state *CE_state = (struct CE_state *)copyeng;
719 	struct CE_ring_state *src_ring = CE_state->src_ring;
720 	unsigned int nentries_mask = src_ring->nentries_mask;
721 	unsigned int sw_index;
722 	unsigned int write_index;
723 
724 	qdf_spin_lock(&CE_state->ce_index_lock);
725 	sw_index = src_ring->sw_index;
726 	write_index = src_ring->write_index;
727 	qdf_spin_unlock(&CE_state->ce_index_lock);
728 
729 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
730 }
731 
732 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
733 {
734 	struct CE_state *CE_state = (struct CE_state *)copyeng;
735 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
736 	unsigned int nentries_mask = dest_ring->nentries_mask;
737 	unsigned int sw_index;
738 	unsigned int write_index;
739 
740 	qdf_spin_lock(&CE_state->ce_index_lock);
741 	sw_index = dest_ring->sw_index;
742 	write_index = dest_ring->write_index;
743 	qdf_spin_unlock(&CE_state->ce_index_lock);
744 
745 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
746 }
747 
748 /*
749  * Guts of ce_completed_recv_next.
750  * The caller takes responsibility for any necessary locking.
751  */
752 QDF_STATUS
753 ce_completed_recv_next(struct CE_handle *copyeng,
754 		       void **per_CE_contextp,
755 		       void **per_transfer_contextp,
756 		       qdf_dma_addr_t *bufferp,
757 		       unsigned int *nbytesp,
758 		       unsigned int *transfer_idp, unsigned int *flagsp)
759 {
760 	struct CE_state *CE_state = (struct CE_state *)copyeng;
761 	QDF_STATUS status;
762 	struct hif_softc *scn = CE_state->scn;
763 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
764 	struct ce_ops *ce_services;
765 
766 	ce_services = hif_state->ce_services;
767 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
768 	status =
769 		ce_services->ce_completed_recv_next_nolock(CE_state,
770 				per_CE_contextp, per_transfer_contextp, bufferp,
771 					      nbytesp, transfer_idp, flagsp);
772 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
773 
774 	return status;
775 }
776 
777 QDF_STATUS
778 ce_revoke_recv_next(struct CE_handle *copyeng,
779 		    void **per_CE_contextp,
780 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
781 {
782 	struct CE_state *CE_state = (struct CE_state *)copyeng;
783 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
784 
785 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
786 			per_CE_contextp, per_transfer_contextp, bufferp);
787 }
788 
789 QDF_STATUS
790 ce_cancel_send_next(struct CE_handle *copyeng,
791 		void **per_CE_contextp,
792 		void **per_transfer_contextp,
793 		qdf_dma_addr_t *bufferp,
794 		unsigned int *nbytesp,
795 		unsigned int *transfer_idp,
796 		uint32_t *toeplitz_hash_result)
797 {
798 	struct CE_state *CE_state = (struct CE_state *)copyeng;
799 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
800 
801 	return hif_state->ce_services->ce_cancel_send_next
802 		(copyeng, per_CE_contextp, per_transfer_contextp,
803 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
804 }
805 qdf_export_symbol(ce_cancel_send_next);
806 
807 QDF_STATUS
808 ce_completed_send_next(struct CE_handle *copyeng,
809 		       void **per_CE_contextp,
810 		       void **per_transfer_contextp,
811 		       qdf_dma_addr_t *bufferp,
812 		       unsigned int *nbytesp,
813 		       unsigned int *transfer_idp,
814 		       unsigned int *sw_idx,
815 		       unsigned int *hw_idx,
816 		       unsigned int *toeplitz_hash_result)
817 {
818 	struct CE_state *CE_state = (struct CE_state *)copyeng;
819 	struct hif_softc *scn = CE_state->scn;
820 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
821 	struct ce_ops *ce_services;
822 	QDF_STATUS status;
823 
824 	ce_services = hif_state->ce_services;
825 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
826 	status =
827 		ce_services->ce_completed_send_next_nolock(CE_state,
828 					per_CE_contextp, per_transfer_contextp,
829 					bufferp, nbytesp, transfer_idp, sw_idx,
830 					      hw_idx, toeplitz_hash_result);
831 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
832 
833 	return status;
834 }
835 
836 #ifdef ATH_11AC_TXCOMPACT
837 /* CE engine descriptor reap
838  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
839  * does receive and reaping of completed descriptor ,
840  * This function only handles reaping of Tx complete descriptor.
841  * The Function is called from threshold reap  poll routine
842  * hif_send_complete_check so should not countain receive functionality
843  * within it .
844  */
845 
846 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
847 {
848 	void *CE_context;
849 	void *transfer_context;
850 	qdf_dma_addr_t buf;
851 	unsigned int nbytes;
852 	unsigned int id;
853 	unsigned int sw_idx, hw_idx;
854 	uint32_t toeplitz_hash_result;
855 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
856 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
857 
858 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
859 		return;
860 
861 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
862 			NULL, NULL, 0, 0);
863 
864 	/* Since this function is called from both user context and
865 	 * tasklet context the spinlock has to lock the bottom halves.
866 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
867 	 * enabled in TX polling mode. If this is not the case, more
868 	 * bottom halve spin lock changes are needed. Due to data path
869 	 * performance concern, after internal discussion we've decided
870 	 * to make minimum change, i.e., only address the issue occurred
871 	 * in this function. The possible negative effect of this minimum
872 	 * change is that, in the future, if some other function will also
873 	 * be opened to let the user context to use, those cases need to be
874 	 * addressed by change spin_lock to spin_lock_bh also.
875 	 */
876 
877 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
878 
879 	if (CE_state->send_cb) {
880 		{
881 			struct ce_ops *ce_services = hif_state->ce_services;
882 			/* Pop completed send buffers and call the
883 			 * registered send callback for each
884 			 */
885 			while (ce_services->ce_completed_send_next_nolock
886 				 (CE_state, &CE_context,
887 				  &transfer_context, &buf,
888 				  &nbytes, &id, &sw_idx, &hw_idx,
889 				  &toeplitz_hash_result) ==
890 				  QDF_STATUS_SUCCESS) {
891 				if (ce_id != CE_HTT_H2T_MSG) {
892 					qdf_spin_unlock_bh(
893 						&CE_state->ce_index_lock);
894 					CE_state->send_cb(
895 						(struct CE_handle *)
896 						CE_state, CE_context,
897 						transfer_context, buf,
898 						nbytes, id, sw_idx, hw_idx,
899 						toeplitz_hash_result);
900 					qdf_spin_lock_bh(
901 						&CE_state->ce_index_lock);
902 				} else {
903 					struct HIF_CE_pipe_info *pipe_info =
904 						(struct HIF_CE_pipe_info *)
905 						CE_context;
906 
907 					qdf_spin_lock_bh(&pipe_info->
908 						 completion_freeq_lock);
909 					pipe_info->num_sends_allowed++;
910 					qdf_spin_unlock_bh(&pipe_info->
911 						   completion_freeq_lock);
912 				}
913 			}
914 		}
915 	}
916 
917 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
918 
919 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
920 			NULL, NULL, 0, 0);
921 	Q_TARGET_ACCESS_END(scn);
922 }
923 
924 #endif /*ATH_11AC_TXCOMPACT */
925 
926 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
927 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
928 {
929 	// QDF_IS_EPPING_ENABLED is pre lithium feature
930 	// CE4 completion is enabled only lithium and later
931 	// so no need to check for EPPING
932 	return true;
933 }
934 
935 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
936 
937 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
938 {
939 	if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
940 		return true;
941 	else
942 		return false;
943 }
944 
945 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
946 
947 /*
948  * ce_engine_service_reg:
949  *
950  * Called from ce_per_engine_service and goes through the regular interrupt
951  * handling that does not involve the WLAN fast path feature.
952  *
953  * Returns void
954  */
955 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
956 {
957 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
958 	uint32_t ctrl_addr = CE_state->ctrl_addr;
959 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
960 	void *CE_context;
961 	void *transfer_context;
962 	qdf_dma_addr_t buf;
963 	unsigned int nbytes;
964 	unsigned int id;
965 	unsigned int flags;
966 	unsigned int more_comp_cnt = 0;
967 	unsigned int more_snd_comp_cnt = 0;
968 	unsigned int sw_idx, hw_idx;
969 	uint32_t toeplitz_hash_result;
970 	uint32_t mode = hif_get_conparam(scn);
971 
972 more_completions:
973 	if (CE_state->recv_cb) {
974 
975 		/* Pop completed recv buffers and call
976 		 * the registered recv callback for each
977 		 */
978 		while (hif_state->ce_services->ce_completed_recv_next_nolock
979 				(CE_state, &CE_context, &transfer_context,
980 				&buf, &nbytes, &id, &flags) ==
981 				QDF_STATUS_SUCCESS) {
982 			qdf_spin_unlock(&CE_state->ce_index_lock);
983 			CE_state->recv_cb((struct CE_handle *)CE_state,
984 					  CE_context, transfer_context, buf,
985 					  nbytes, id, flags);
986 
987 			qdf_spin_lock(&CE_state->ce_index_lock);
988 			/*
989 			 * EV #112693 -
990 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
991 			 * BSoD_0x133 occurred in VHT80 UDP_DL
992 			 * Break out DPC by force if number of loops in
993 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
994 			 * to avoid spending too long time in
995 			 * DPC for each interrupt handling. Schedule another
996 			 * DPC to avoid data loss if we had taken
997 			 * force-break action before apply to Windows OS
998 			 * only currently, Linux/MAC os can expand to their
999 			 * platform if necessary
1000 			 */
1001 
1002 			/* Break the receive processes by
1003 			 * force if force_break set up
1004 			 */
1005 			if (qdf_unlikely(CE_state->force_break)) {
1006 				qdf_atomic_set(&CE_state->rx_pending, 1);
1007 				return;
1008 			}
1009 		}
1010 	}
1011 
1012 	/*
1013 	 * Attention: We may experience potential infinite loop for below
1014 	 * While Loop during Sending Stress test.
1015 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1016 	 */
1017 
1018 	if (CE_state->send_cb) {
1019 		/* Pop completed send buffers and call
1020 		 * the registered send callback for each
1021 		 */
1022 
1023 #ifdef ATH_11AC_TXCOMPACT
1024 		while (hif_state->ce_services->ce_completed_send_next_nolock
1025 			 (CE_state, &CE_context,
1026 			 &transfer_context, &buf, &nbytes,
1027 			 &id, &sw_idx, &hw_idx,
1028 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1029 
1030 			if (check_ce_id_and_epping_enabled(CE_id, mode)) {
1031 				qdf_spin_unlock(&CE_state->ce_index_lock);
1032 				CE_state->send_cb((struct CE_handle *)CE_state,
1033 						  CE_context, transfer_context,
1034 						  buf, nbytes, id, sw_idx,
1035 						  hw_idx, toeplitz_hash_result);
1036 				qdf_spin_lock(&CE_state->ce_index_lock);
1037 			} else {
1038 				struct HIF_CE_pipe_info *pipe_info =
1039 					(struct HIF_CE_pipe_info *)CE_context;
1040 
1041 				qdf_spin_lock_bh(&pipe_info->
1042 					      completion_freeq_lock);
1043 				pipe_info->num_sends_allowed++;
1044 				qdf_spin_unlock_bh(&pipe_info->
1045 						completion_freeq_lock);
1046 			}
1047 		}
1048 #else                           /*ATH_11AC_TXCOMPACT */
1049 		while (hif_state->ce_services->ce_completed_send_next_nolock
1050 			 (CE_state, &CE_context,
1051 			  &transfer_context, &buf, &nbytes,
1052 			  &id, &sw_idx, &hw_idx,
1053 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1054 			qdf_spin_unlock(&CE_state->ce_index_lock);
1055 			CE_state->send_cb((struct CE_handle *)CE_state,
1056 				  CE_context, transfer_context, buf,
1057 				  nbytes, id, sw_idx, hw_idx,
1058 				  toeplitz_hash_result);
1059 			qdf_spin_lock(&CE_state->ce_index_lock);
1060 		}
1061 #endif /*ATH_11AC_TXCOMPACT */
1062 	}
1063 
1064 more_watermarks:
1065 	if (CE_state->misc_cbs) {
1066 		if (CE_state->watermark_cb &&
1067 				hif_state->ce_services->watermark_int(CE_state,
1068 					&flags)) {
1069 			qdf_spin_unlock(&CE_state->ce_index_lock);
1070 			/* Convert HW IS bits to software flags */
1071 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1072 					CE_state->wm_context, flags);
1073 			qdf_spin_lock(&CE_state->ce_index_lock);
1074 		}
1075 	}
1076 
1077 	/*
1078 	 * Clear the misc interrupts (watermark) that were handled above,
1079 	 * and that will be checked again below.
1080 	 * Clear and check for copy-complete interrupts again, just in case
1081 	 * more copy completions happened while the misc interrupts were being
1082 	 * handled.
1083 	 */
1084 	if (!ce_srng_based(scn)) {
1085 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1086 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1087 					   CE_WATERMARK_MASK |
1088 					   HOST_IS_COPY_COMPLETE_MASK);
1089 		} else {
1090 			qdf_atomic_set(&CE_state->rx_pending, 0);
1091 			hif_err_rl("%s: target access is not allowed",
1092 				   __func__);
1093 			return;
1094 		}
1095 	}
1096 
1097 	/*
1098 	 * Now that per-engine interrupts are cleared, verify that
1099 	 * no recv interrupts arrive while processing send interrupts,
1100 	 * and no recv or send interrupts happened while processing
1101 	 * misc interrupts.Go back and check again.Keep checking until
1102 	 * we find no more events to process.
1103 	 */
1104 	if (CE_state->recv_cb &&
1105 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1106 				CE_state)) {
1107 		if (QDF_IS_EPPING_ENABLED(mode) ||
1108 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1109 			goto more_completions;
1110 		} else {
1111 			if (!ce_srng_based(scn)) {
1112 				hif_err_rl(
1113 					"Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1114 					CE_state->id,
1115 					CE_state->dest_ring->nentries_mask,
1116 					CE_state->dest_ring->sw_index,
1117 					CE_DEST_RING_READ_IDX_GET(scn,
1118 							  CE_state->ctrl_addr));
1119 			}
1120 		}
1121 	}
1122 
1123 	if (CE_state->send_cb &&
1124 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1125 				CE_state)) {
1126 		if (QDF_IS_EPPING_ENABLED(mode) ||
1127 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1128 			goto more_completions;
1129 		} else {
1130 			if (!ce_srng_based(scn)) {
1131 				hif_err_rl(
1132 					"Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
1133 					CE_state->id,
1134 					CE_state->src_ring->nentries_mask,
1135 					CE_state->src_ring->sw_index,
1136 					CE_state->src_ring->hw_index,
1137 					CE_state->src_ring->write_index,
1138 					CE_SRC_RING_READ_IDX_GET(scn,
1139 							 CE_state->ctrl_addr));
1140 			}
1141 		}
1142 	}
1143 
1144 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1145 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1146 			goto more_watermarks;
1147 	}
1148 
1149 	qdf_atomic_set(&CE_state->rx_pending, 0);
1150 }
1151 
1152 /*
1153  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1154  *
1155  * Invokes registered callbacks for recv_complete,
1156  * send_complete, and watermarks.
1157  *
1158  * Returns: number of messages processed
1159  */
1160 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1161 {
1162 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1163 
1164 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1165 		return CE_state->receive_count;
1166 
1167 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1168 		hif_err("[premature rc=0]");
1169 		return 0; /* no work done */
1170 	}
1171 
1172 	/* Clear force_break flag and re-initialize receive_count to 0 */
1173 	CE_state->receive_count = 0;
1174 	CE_state->force_break = 0;
1175 	CE_state->ce_service_start_time = qdf_time_sched_clock();
1176 	CE_state->ce_service_yield_time =
1177 		CE_state->ce_service_start_time +
1178 		hif_get_ce_service_max_yield_time(
1179 			(struct hif_opaque_softc *)scn);
1180 
1181 	qdf_spin_lock(&CE_state->ce_index_lock);
1182 
1183 	CE_state->service(scn, CE_id);
1184 
1185 	qdf_spin_unlock(&CE_state->ce_index_lock);
1186 
1187 	if (Q_TARGET_ACCESS_END(scn) < 0)
1188 		hif_err("<--[premature rc=%d]", CE_state->receive_count);
1189 	return CE_state->receive_count;
1190 }
1191 qdf_export_symbol(ce_per_engine_service);
1192 
1193 /*
1194  * Handler for per-engine interrupts on ALL active CEs.
1195  * This is used in cases where the system is sharing a
1196  * single interrput for all CEs
1197  */
1198 
1199 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1200 {
1201 	int CE_id;
1202 	uint32_t intr_summary;
1203 
1204 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1205 		return;
1206 
1207 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1208 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1209 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1210 
1211 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1212 				qdf_atomic_set(&CE_state->rx_pending, 0);
1213 				ce_per_engine_service(scn, CE_id);
1214 			}
1215 		}
1216 
1217 		Q_TARGET_ACCESS_END(scn);
1218 		return;
1219 	}
1220 
1221 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1222 
1223 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1224 		if (intr_summary & (1 << CE_id))
1225 			intr_summary &= ~(1 << CE_id);
1226 		else
1227 			continue;       /* no intr pending on this CE */
1228 
1229 		ce_per_engine_service(scn, CE_id);
1230 	}
1231 
1232 	Q_TARGET_ACCESS_END(scn);
1233 }
1234 
1235 /*Iterate the CE_state list and disable the compl interrupt
1236  * if it has been registered already.
1237  */
1238 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1239 {
1240 	int CE_id;
1241 
1242 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1243 		return;
1244 
1245 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1246 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1247 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1248 
1249 		/* if the interrupt is currently enabled, disable it */
1250 		if (!CE_state->disable_copy_compl_intr
1251 		    && (CE_state->send_cb || CE_state->recv_cb))
1252 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1253 
1254 		if (CE_state->watermark_cb)
1255 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1256 	}
1257 	Q_TARGET_ACCESS_END(scn);
1258 }
1259 
1260 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1261 {
1262 	int CE_id;
1263 
1264 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1265 		return;
1266 
1267 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1268 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1269 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1270 
1271 		/*
1272 		 * If the CE is supposed to have copy complete interrupts
1273 		 * enabled (i.e. there a callback registered, and the
1274 		 * "disable" flag is not set), then re-enable the interrupt.
1275 		 */
1276 		if (!CE_state->disable_copy_compl_intr
1277 		    && (CE_state->send_cb || CE_state->recv_cb))
1278 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1279 
1280 		if (CE_state->watermark_cb)
1281 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1282 	}
1283 	Q_TARGET_ACCESS_END(scn);
1284 }
1285 
1286 /**
1287  * ce_send_cb_register(): register completion handler
1288  * @copyeng: CE_state representing the ce we are adding the behavior to
1289  * @fn_ptr: callback that the ce should use when processing tx completions
1290  * @disable_interrupts: if the interupts should be enabled or not.
1291  *
1292  * Caller should guarantee that no transactions are in progress before
1293  * switching the callback function.
1294  *
1295  * Registers the send context before the fn pointer so that if the cb is valid
1296  * the context should be valid.
1297  *
1298  * Beware that currently this function will enable completion interrupts.
1299  */
1300 void
1301 ce_send_cb_register(struct CE_handle *copyeng,
1302 		    ce_send_cb fn_ptr,
1303 		    void *ce_send_context, int disable_interrupts)
1304 {
1305 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1306 	struct hif_softc *scn;
1307 	struct HIF_CE_state *hif_state;
1308 
1309 	if (!CE_state) {
1310 		hif_err("Error CE state = NULL");
1311 		return;
1312 	}
1313 	scn = CE_state->scn;
1314 	hif_state = HIF_GET_CE_STATE(scn);
1315 	if (!hif_state) {
1316 		hif_err("Error HIF state = NULL");
1317 		return;
1318 	}
1319 	CE_state->send_context = ce_send_context;
1320 	CE_state->send_cb = fn_ptr;
1321 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1322 							disable_interrupts);
1323 }
1324 qdf_export_symbol(ce_send_cb_register);
1325 
1326 /**
1327  * ce_recv_cb_register(): register completion handler
1328  * @copyeng: CE_state representing the ce we are adding the behavior to
1329  * @fn_ptr: callback that the ce should use when processing rx completions
1330  * @disable_interrupts: if the interupts should be enabled or not.
1331  *
1332  * Registers the send context before the fn pointer so that if the cb is valid
1333  * the context should be valid.
1334  *
1335  * Caller should guarantee that no transactions are in progress before
1336  * switching the callback function.
1337  */
1338 void
1339 ce_recv_cb_register(struct CE_handle *copyeng,
1340 		    CE_recv_cb fn_ptr,
1341 		    void *CE_recv_context, int disable_interrupts)
1342 {
1343 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1344 	struct hif_softc *scn;
1345 	struct HIF_CE_state *hif_state;
1346 
1347 	if (!CE_state) {
1348 		hif_err("ERROR CE state = NULL");
1349 		return;
1350 	}
1351 	scn = CE_state->scn;
1352 	hif_state = HIF_GET_CE_STATE(scn);
1353 	if (!hif_state) {
1354 		hif_err("Error HIF state = NULL");
1355 		return;
1356 	}
1357 	CE_state->recv_context = CE_recv_context;
1358 	CE_state->recv_cb = fn_ptr;
1359 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1360 							disable_interrupts);
1361 }
1362 qdf_export_symbol(ce_recv_cb_register);
1363 
1364 /**
1365  * ce_watermark_cb_register(): register completion handler
1366  * @copyeng: CE_state representing the ce we are adding the behavior to
1367  * @fn_ptr: callback that the ce should use when processing watermark events
1368  *
1369  * Caller should guarantee that no watermark events are being processed before
1370  * switching the callback function.
1371  */
1372 void
1373 ce_watermark_cb_register(struct CE_handle *copyeng,
1374 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1375 {
1376 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1377 	struct hif_softc *scn = CE_state->scn;
1378 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1379 
1380 	CE_state->watermark_cb = fn_ptr;
1381 	CE_state->wm_context = CE_wm_context;
1382 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1383 							0);
1384 	if (fn_ptr)
1385 		CE_state->misc_cbs = 1;
1386 }
1387 
1388 bool ce_get_rx_pending(struct hif_softc *scn)
1389 {
1390 	int CE_id;
1391 
1392 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1393 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1394 
1395 		if (qdf_atomic_read(&CE_state->rx_pending))
1396 			return true;
1397 	}
1398 
1399 	return false;
1400 }
1401 
1402 /**
1403  * ce_check_rx_pending() - ce_check_rx_pending
1404  * @CE_state: context of the copy engine to check
1405  *
1406  * Return: true if there per_engine_service
1407  *	didn't process all the rx descriptors.
1408  */
1409 bool ce_check_rx_pending(struct CE_state *CE_state)
1410 {
1411 	if (qdf_atomic_read(&CE_state->rx_pending))
1412 		return true;
1413 	else
1414 		return false;
1415 }
1416 qdf_export_symbol(ce_check_rx_pending);
1417 
1418 #ifdef IPA_OFFLOAD
1419 #ifdef QCN7605_SUPPORT
1420 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1421 {
1422 	u_int32_t ctrl_addr = CE_state->ctrl_addr;
1423 	struct hif_softc *scn = CE_state->scn;
1424 	qdf_dma_addr_t wr_index_addr;
1425 
1426 	wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1427 	return wr_index_addr;
1428 }
1429 #else
1430 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1431 {
1432 	struct hif_softc *scn = CE_state->scn;
1433 	qdf_dma_addr_t wr_index_addr;
1434 
1435 	wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1436 			SR_WR_INDEX_ADDRESS;
1437 	return wr_index_addr;
1438 }
1439 #endif
1440 
1441 /**
1442  * ce_ipa_get_resource() - get uc resource on copyengine
1443  * @ce: copyengine context
1444  * @ce_sr: copyengine source ring resource info
1445  * @ce_sr_ring_size: copyengine source ring size
1446  * @ce_reg_paddr: copyengine register physical address
1447  *
1448  * Copy engine should release resource to micro controller
1449  * Micro controller needs
1450  *  - Copy engine source descriptor base address
1451  *  - Copy engine source descriptor size
1452  *  - PCI BAR address to access copy engine regiser
1453  *
1454  * Return: None
1455  */
1456 void ce_ipa_get_resource(struct CE_handle *ce,
1457 			 qdf_shared_mem_t **ce_sr,
1458 			 uint32_t *ce_sr_ring_size,
1459 			 qdf_dma_addr_t *ce_reg_paddr)
1460 {
1461 	struct CE_state *CE_state = (struct CE_state *)ce;
1462 	uint32_t ring_loop;
1463 	struct CE_src_desc *ce_desc;
1464 	qdf_dma_addr_t phy_mem_base;
1465 	struct hif_softc *scn = CE_state->scn;
1466 
1467 	if (CE_UNUSED == CE_state->state) {
1468 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1469 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1470 		*ce_sr_ring_size = 0;
1471 		return;
1472 	}
1473 
1474 	/* Update default value for descriptor */
1475 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1476 	     ring_loop++) {
1477 		ce_desc = (struct CE_src_desc *)
1478 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1479 			   ring_loop * (sizeof(struct CE_src_desc)));
1480 		CE_IPA_RING_INIT(ce_desc);
1481 	}
1482 
1483 	/* Get BAR address */
1484 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1485 
1486 	*ce_sr = CE_state->scn->ipa_ce_ring;
1487 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1488 		sizeof(struct CE_src_desc));
1489 	*ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1490 
1491 }
1492 
1493 #endif /* IPA_OFFLOAD */
1494 
1495 #ifdef HIF_CE_DEBUG_DATA_BUF
1496 /**
1497  * hif_dump_desc_data_buf() - record ce descriptor events
1498  * @buf: buffer to copy to
1499  * @pos: Current position till which the buf is filled
1500  * @data: Data to be copied
1501  * @data_len: Length of the data to be copied
1502  */
1503 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1504 					uint8_t *data, uint32_t data_len)
1505 {
1506 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1507 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1508 
1509 	if ((data_len > 0) && data) {
1510 		if (data_len < 16) {
1511 			hex_dump_to_buffer(data,
1512 						CE_DEBUG_DATA_PER_ROW,
1513 						16, 1, buf + pos,
1514 						(ssize_t)PAGE_SIZE - pos,
1515 						false);
1516 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1517 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1518 		} else {
1519 			uint32_t rows = (data_len / 16) + 1;
1520 			uint32_t row = 0;
1521 
1522 			for (row = 0; row < rows; row++) {
1523 				hex_dump_to_buffer(data + (row * 16),
1524 							CE_DEBUG_DATA_PER_ROW,
1525 							16, 1, buf + pos,
1526 							(ssize_t)PAGE_SIZE
1527 							- pos, false);
1528 				pos +=
1529 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1530 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1531 						"\n");
1532 			}
1533 		}
1534 	}
1535 
1536 	return pos;
1537 }
1538 #endif
1539 
1540 /*
1541  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1542  * for defined here
1543  */
1544 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1545 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1546 {
1547 	switch (type) {
1548 	case HIF_RX_DESC_POST:
1549 		return "HIF_RX_DESC_POST";
1550 	case HIF_RX_DESC_COMPLETION:
1551 		return "HIF_RX_DESC_COMPLETION";
1552 	case HIF_TX_GATHER_DESC_POST:
1553 		return "HIF_TX_GATHER_DESC_POST";
1554 	case HIF_TX_DESC_POST:
1555 		return "HIF_TX_DESC_POST";
1556 	case HIF_TX_DESC_SOFTWARE_POST:
1557 		return "HIF_TX_DESC_SOFTWARE_POST";
1558 	case HIF_TX_DESC_COMPLETION:
1559 		return "HIF_TX_DESC_COMPLETION";
1560 	case FAST_RX_WRITE_INDEX_UPDATE:
1561 		return "FAST_RX_WRITE_INDEX_UPDATE";
1562 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1563 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1564 	case FAST_TX_WRITE_INDEX_UPDATE:
1565 		return "FAST_TX_WRITE_INDEX_UPDATE";
1566 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1567 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1568 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1569 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1570 	case RESUME_WRITE_INDEX_UPDATE:
1571 		return "RESUME_WRITE_INDEX_UPDATE";
1572 	case HIF_IRQ_EVENT:
1573 		return "HIF_IRQ_EVENT";
1574 	case HIF_CE_TASKLET_ENTRY:
1575 		return "HIF_CE_TASKLET_ENTRY";
1576 	case HIF_CE_TASKLET_RESCHEDULE:
1577 		return "HIF_CE_TASKLET_RESCHEDULE";
1578 	case HIF_CE_TASKLET_EXIT:
1579 		return "HIF_CE_TASKLET_EXIT";
1580 	case HIF_CE_REAP_ENTRY:
1581 		return "HIF_CE_REAP_ENTRY";
1582 	case HIF_CE_REAP_EXIT:
1583 		return "HIF_CE_REAP_EXIT";
1584 	case NAPI_SCHEDULE:
1585 		return "NAPI_SCHEDULE";
1586 	case NAPI_POLL_ENTER:
1587 		return "NAPI_POLL_ENTER";
1588 	case NAPI_COMPLETE:
1589 		return "NAPI_COMPLETE";
1590 	case NAPI_POLL_EXIT:
1591 		return "NAPI_POLL_EXIT";
1592 	case HIF_RX_NBUF_ALLOC_FAILURE:
1593 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1594 	case HIF_RX_NBUF_MAP_FAILURE:
1595 		return "HIF_RX_NBUF_MAP_FAILURE";
1596 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1597 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1598 	default:
1599 		return "invalid";
1600 	}
1601 }
1602 
1603 /**
1604  * hif_dump_desc_event() - record ce descriptor events
1605  * @buf: Buffer to which to be copied
1606  * @ce_id: which ce is the event occurring on
1607  * @index: index that the descriptor was/will be at.
1608  */
1609 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1610 {
1611 	struct hif_ce_desc_event *event;
1612 	uint64_t secs, usecs;
1613 	ssize_t len = 0;
1614 	struct ce_desc_hist *ce_hist = NULL;
1615 	struct hif_ce_desc_event *hist_ev = NULL;
1616 
1617 	if (!scn)
1618 		return -EINVAL;
1619 
1620 	ce_hist = &scn->hif_ce_desc_hist;
1621 
1622 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1623 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1624 		qdf_print("Invalid values");
1625 		return -EINVAL;
1626 	}
1627 
1628 	hist_ev =
1629 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1630 
1631 	if (!hist_ev) {
1632 		qdf_print("Low Memory");
1633 		return -EINVAL;
1634 	}
1635 
1636 	event = &hist_ev[ce_hist->hist_index];
1637 
1638 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1639 
1640 	len += snprintf(buf, PAGE_SIZE - len,
1641 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1642 			secs, usecs, ce_hist->hist_id,
1643 			ce_event_type_to_str(event->type),
1644 			event->index, event->memory);
1645 #ifdef HIF_CE_DEBUG_DATA_BUF
1646 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1647 			event->actual_data_len);
1648 #endif
1649 
1650 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1651 
1652 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1653 				16, 1, buf + len,
1654 				(ssize_t)PAGE_SIZE - len, false);
1655 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1656 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1657 
1658 #ifdef HIF_CE_DEBUG_DATA_BUF
1659 	if (ce_hist->data_enable[ce_hist->hist_id])
1660 		len = hif_dump_desc_data_buf(buf, len, event->data,
1661 						(event->actual_data_len <
1662 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1663 						event->actual_data_len :
1664 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1665 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1666 
1667 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1668 
1669 	return len;
1670 }
1671 
1672 /*
1673  * hif_store_desc_trace_buf_index() -
1674  * API to get the CE id and CE debug storage buffer index
1675  *
1676  * @dev: network device
1677  * @attr: sysfs attribute
1678  * @buf: data got from the user
1679  *
1680  * Return total length
1681  */
1682 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1683 					const char *buf, size_t size)
1684 {
1685 	struct ce_desc_hist *ce_hist = NULL;
1686 
1687 	if (!scn)
1688 		return -EINVAL;
1689 
1690 	ce_hist = &scn->hif_ce_desc_hist;
1691 
1692 	if (!size) {
1693 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1694 		return -EINVAL;
1695 	}
1696 
1697 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1698 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1699 		qdf_nofl_err("%s: Invalid input value.", __func__);
1700 		return -EINVAL;
1701 	}
1702 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1703 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1704 		qdf_print("Invalid values");
1705 		return -EINVAL;
1706 	}
1707 
1708 	return size;
1709 }
1710 
1711 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1712 
1713 #ifdef HIF_CE_DEBUG_DATA_BUF
1714 /*
1715  * hif_ce_en_desc_hist() -
1716  * API to enable recording the CE desc history
1717  *
1718  * @dev: network device
1719  * @attr: sysfs attribute
1720  * @buf: buffer to copy the data.
1721  *
1722  * Starts recording the ce desc history
1723  *
1724  * Return total length copied
1725  */
1726 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1727 {
1728 	struct ce_desc_hist *ce_hist = NULL;
1729 	uint32_t cfg = 0;
1730 	uint32_t ce_id = 0;
1731 
1732 	if (!scn)
1733 		return -EINVAL;
1734 
1735 	ce_hist = &scn->hif_ce_desc_hist;
1736 
1737 	if (!size) {
1738 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1739 		return -EINVAL;
1740 	}
1741 
1742 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1743 		   (unsigned int *)&cfg) != 2) {
1744 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1745 			     __func__);
1746 		return -EINVAL;
1747 	}
1748 	if (ce_id >= CE_COUNT_MAX) {
1749 		qdf_print("Invalid value CE Id");
1750 		return -EINVAL;
1751 	}
1752 
1753 	if ((cfg > 1 || cfg < 0)) {
1754 		qdf_print("Invalid values: enter 0 or 1");
1755 		return -EINVAL;
1756 	}
1757 
1758 	if (!ce_hist->hist_ev[ce_id])
1759 		return -EINVAL;
1760 
1761 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1762 	if (cfg == 1) {
1763 		if (ce_hist->data_enable[ce_id] == 1) {
1764 			qdf_debug("Already Enabled");
1765 		} else {
1766 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1767 							== QDF_STATUS_E_NOMEM){
1768 				ce_hist->data_enable[ce_id] = 0;
1769 				qdf_err("%s:Memory Alloc failed", __func__);
1770 			} else
1771 				ce_hist->data_enable[ce_id] = 1;
1772 		}
1773 	} else if (cfg == 0) {
1774 		if (ce_hist->data_enable[ce_id] == 0) {
1775 			qdf_debug("Already Disabled");
1776 		} else {
1777 			ce_hist->data_enable[ce_id] = 0;
1778 				free_mem_ce_debug_hist_data(scn, ce_id);
1779 		}
1780 	}
1781 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1782 
1783 	return size;
1784 }
1785 
1786 /*
1787  * hif_disp_ce_enable_desc_data_hist() -
1788  * API to display value of data_enable
1789  *
1790  * @dev: network device
1791  * @attr: sysfs attribute
1792  * @buf: buffer to copy the data.
1793  *
1794  * Return total length copied
1795  */
1796 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1797 {
1798 	ssize_t len = 0;
1799 	uint32_t ce_id = 0;
1800 	struct ce_desc_hist *ce_hist = NULL;
1801 
1802 	if (!scn)
1803 		return -EINVAL;
1804 
1805 	ce_hist = &scn->hif_ce_desc_hist;
1806 
1807 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1808 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1809 				ce_id, ce_hist->data_enable[ce_id]);
1810 	}
1811 
1812 	return len;
1813 }
1814 #endif /* HIF_CE_DEBUG_DATA_BUF */
1815 
1816 #ifdef OL_ATH_SMART_LOGGING
1817 #define GUARD_SPACE 10
1818 #define LOG_ID_SZ 4
1819 /*
1820  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1821  * @src_ring: SRC ring state
1822  * @buf_cur: Current pointer in ring buffer
1823  * @buf_init:Start of the ring buffer
1824  * @buf_sz: Size of the ring buffer
1825  * @skb_sz: Max size of the SKB buffer to be copied
1826  *
1827  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1828  * the given buf, skb_sz is the max buffer size to be copied
1829  *
1830  * Return: Current pointer in ring buffer
1831  */
1832 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1833 				    uint8_t *buf_cur, uint8_t *buf_init,
1834 				    uint32_t buf_sz, uint32_t skb_sz)
1835 {
1836 	struct CE_src_desc *src_ring_base;
1837 	uint32_t len, entry;
1838 	struct CE_src_desc  *src_desc;
1839 	qdf_nbuf_t nbuf;
1840 	uint32_t available_buf;
1841 
1842 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1843 	len = sizeof(struct CE_ring_state);
1844 	available_buf = buf_sz - (buf_cur - buf_init);
1845 	if (available_buf < (len + GUARD_SPACE)) {
1846 		buf_cur = buf_init;
1847 	}
1848 
1849 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1850 	buf_cur += sizeof(struct CE_ring_state);
1851 
1852 	for (entry = 0; entry < src_ring->nentries; entry++) {
1853 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1854 		nbuf = src_ring->per_transfer_context[entry];
1855 		if (nbuf) {
1856 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1857 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1858 
1859 			len = sizeof(struct CE_src_desc) + skb_cp_len
1860 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1861 			available_buf = buf_sz - (buf_cur - buf_init);
1862 			if (available_buf < (len + GUARD_SPACE)) {
1863 				buf_cur = buf_init;
1864 			}
1865 			qdf_mem_copy(buf_cur, src_desc,
1866 				     sizeof(struct CE_src_desc));
1867 			buf_cur += sizeof(struct CE_src_desc);
1868 
1869 			available_buf = buf_sz - (buf_cur - buf_init);
1870 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1871 						skb_cp_len);
1872 
1873 			if (skb_cp_len) {
1874 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1875 					     skb_cp_len);
1876 				buf_cur += skb_cp_len;
1877 			}
1878 		} else {
1879 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1880 			available_buf = buf_sz - (buf_cur - buf_init);
1881 			if (available_buf < (len + GUARD_SPACE)) {
1882 				buf_cur = buf_init;
1883 			}
1884 			qdf_mem_copy(buf_cur, src_desc,
1885 				     sizeof(struct CE_src_desc));
1886 			buf_cur += sizeof(struct CE_src_desc);
1887 			available_buf = buf_sz - (buf_cur - buf_init);
1888 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1889 		}
1890 	}
1891 
1892 	return buf_cur;
1893 }
1894 
1895 /*
1896  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1897  * @dest_ring: SRC ring state
1898  * @buf_cur: Current pointer in ring buffer
1899  * @buf_init:Start of the ring buffer
1900  * @buf_sz: Size of the ring buffer
1901  * @skb_sz: Max size of the SKB buffer to be copied
1902  *
1903  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1904  * the given buf, skb_sz is the max buffer size to be copied
1905  *
1906  * Return: Current pointer in ring buffer
1907  */
1908 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1909 				     uint8_t *buf_cur, uint8_t *buf_init,
1910 				     uint32_t buf_sz, uint32_t skb_sz)
1911 {
1912 	struct CE_dest_desc *dest_ring_base;
1913 	uint32_t len, entry;
1914 	struct CE_dest_desc  *dest_desc;
1915 	qdf_nbuf_t nbuf;
1916 	uint32_t available_buf;
1917 
1918 	dest_ring_base =
1919 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1920 
1921 	len = sizeof(struct CE_ring_state);
1922 	available_buf = buf_sz - (buf_cur - buf_init);
1923 	if (available_buf < (len + GUARD_SPACE)) {
1924 		buf_cur = buf_init;
1925 	}
1926 
1927 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
1928 	buf_cur += sizeof(struct CE_ring_state);
1929 
1930 	for (entry = 0; entry < dest_ring->nentries; entry++) {
1931 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
1932 
1933 		nbuf = dest_ring->per_transfer_context[entry];
1934 		if (nbuf) {
1935 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1936 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1937 
1938 			len = sizeof(struct CE_dest_desc) + skb_cp_len
1939 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1940 
1941 			available_buf = buf_sz - (buf_cur - buf_init);
1942 			if (available_buf < (len + GUARD_SPACE)) {
1943 				buf_cur = buf_init;
1944 			}
1945 
1946 			qdf_mem_copy(buf_cur, dest_desc,
1947 				     sizeof(struct CE_dest_desc));
1948 			buf_cur += sizeof(struct CE_dest_desc);
1949 			available_buf = buf_sz - (buf_cur - buf_init);
1950 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1951 						skb_cp_len);
1952 			if (skb_cp_len) {
1953 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1954 					     skb_cp_len);
1955 				buf_cur += skb_cp_len;
1956 			}
1957 		} else {
1958 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
1959 			available_buf = buf_sz - (buf_cur - buf_init);
1960 			if (available_buf < (len + GUARD_SPACE)) {
1961 				buf_cur = buf_init;
1962 			}
1963 			qdf_mem_copy(buf_cur, dest_desc,
1964 				     sizeof(struct CE_dest_desc));
1965 			buf_cur += sizeof(struct CE_dest_desc);
1966 			available_buf = buf_sz - (buf_cur - buf_init);
1967 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1968 		}
1969 	}
1970 	return buf_cur;
1971 }
1972 
1973 /**
1974  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1975  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1976  * and buffers pointed by them in to the given buf
1977  */
1978 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1979 			 uint8_t *buf_init, uint32_t buf_sz,
1980 			 uint32_t ce, uint32_t skb_sz)
1981 {
1982 	struct CE_state *ce_state;
1983 	struct CE_ring_state *src_ring;
1984 	struct CE_ring_state *dest_ring;
1985 
1986 	ce_state = scn->ce_id_to_state[ce];
1987 	src_ring = ce_state->src_ring;
1988 	dest_ring = ce_state->dest_ring;
1989 
1990 	if (src_ring) {
1991 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
1992 					      buf_init, buf_sz, skb_sz);
1993 	} else if (dest_ring) {
1994 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
1995 					       buf_init, buf_sz, skb_sz);
1996 	}
1997 
1998 	return buf_cur;
1999 }
2000 
2001 qdf_export_symbol(hif_log_dump_ce);
2002 #endif /* OL_ATH_SMART_LOGGING */
2003 
2004