xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision d57e7836dc389f88871517cfeedfdd0f572e4b31)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 static int war1_allow_sleep;
65 /* io32 write workaround */
66 static int hif_ce_war1;
67 
68 /**
69  * hif_ce_war_disable() - disable ce war gobally
70  */
71 void hif_ce_war_disable(void)
72 {
73 	hif_ce_war1 = 0;
74 }
75 
76 /**
77  * hif_ce_war_enable() - enable ce war gobally
78  */
79 void hif_ce_war_enable(void)
80 {
81 	hif_ce_war1 = 1;
82 }
83 
84 /*
85  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
86  * for defined here
87  */
88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
89 
90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
91 #define CE_DEBUG_DATA_PER_ROW 16
92 
93 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
94 
95 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
96 {
97 	int record_index = qdf_atomic_inc_return(table_index);
98 
99 	if (record_index == array_size)
100 		qdf_atomic_sub(array_size, table_index);
101 
102 	while (record_index >= array_size)
103 		record_index -= array_size;
104 
105 	return record_index;
106 }
107 
108 qdf_export_symbol(get_next_record_index);
109 
110 #ifdef HIF_CE_DEBUG_DATA_BUF
111 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
112 {
113 	uint8_t *data = NULL;
114 
115 	if (!event->data) {
116 		hif_err_rl("No ce debug memory allocated");
117 		return;
118 	}
119 
120 	if (event->memory && len > 0)
121 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
122 
123 	event->actual_data_len = 0;
124 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
125 
126 	if (data && len > 0) {
127 		qdf_mem_copy(event->data, data,
128 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
129 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
130 		event->actual_data_len = len;
131 	}
132 }
133 
134 qdf_export_symbol(hif_ce_desc_data_record);
135 
136 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
137 {
138 	qdf_mem_zero(event,
139 		     offsetof(struct hif_ce_desc_event, data));
140 }
141 
142 qdf_export_symbol(hif_clear_ce_desc_debug_data);
143 #else
144 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
145 {
146 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
147 }
148 
149 qdf_export_symbol(hif_clear_ce_desc_debug_data);
150 #endif /* HIF_CE_DEBUG_DATA_BUF */
151 
152 #if defined(HIF_RECORD_PADDR)
153 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
154 				 struct hif_ce_desc_event *event,
155 				 qdf_nbuf_t memory)
156 {
157 	if (memory) {
158 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
159 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
160 					scn->qdf_dev,
161 					event->dma_addr);
162 
163 		event->virt_to_phy =
164 			virt_to_phys(qdf_nbuf_data(memory));
165 	}
166 }
167 #endif /* HIF_RECORD_RX_PADDR */
168 
169 /**
170  * hif_record_ce_desc_event() - record ce descriptor events
171  * @scn: hif_softc
172  * @ce_id: which ce is the event occurring on
173  * @type: what happened
174  * @descriptor: pointer to the descriptor posted/completed
175  * @memory: virtual address of buffer related to the descriptor
176  * @index: index that the descriptor was/will be at.
177  */
178 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
179 				enum hif_ce_event_type type,
180 				union ce_desc *descriptor,
181 				void *memory, int index,
182 				int len)
183 {
184 	int record_index;
185 	struct hif_ce_desc_event *event;
186 
187 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
188 	struct hif_ce_desc_event *hist_ev = NULL;
189 
190 	if (ce_id < CE_COUNT_MAX)
191 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
192 	else
193 		return;
194 
195 	if (ce_id >= CE_COUNT_MAX)
196 		return;
197 
198 	if (!ce_hist->enable[ce_id])
199 		return;
200 
201 	if (!hist_ev)
202 		return;
203 
204 	record_index = get_next_record_index(
205 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
206 
207 	event = &hist_ev[record_index];
208 
209 	hif_clear_ce_desc_debug_data(event);
210 
211 	event->type = type;
212 	event->time = qdf_get_log_timestamp();
213 	event->cpu_id = qdf_get_cpu();
214 
215 	if (descriptor)
216 		qdf_mem_copy(&event->descriptor, descriptor,
217 			     sizeof(union ce_desc));
218 
219 	event->memory = memory;
220 	event->index = index;
221 
222 	if (event->type == HIF_RX_DESC_POST ||
223 	    event->type == HIF_RX_DESC_COMPLETION)
224 		hif_ce_desc_record_rx_paddr(scn, event, memory);
225 
226 	if (ce_hist->data_enable[ce_id])
227 		hif_ce_desc_data_record(event, len);
228 }
229 qdf_export_symbol(hif_record_ce_desc_event);
230 
231 /**
232  * ce_init_ce_desc_event_log() - initialize the ce event log
233  * @ce_id: copy engine id for which we are initializing the log
234  * @size: size of array to dedicate
235  *
236  * Currently the passed size is ignored in favor of a precompiled value.
237  */
238 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
239 {
240 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
241 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
242 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
243 }
244 
245 /**
246  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
247  * @ce_id: copy engine id for which we are deinitializing the log
248  *
249  */
250 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
251 {
252 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
253 
254 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
255 }
256 
257 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
258 void hif_record_ce_desc_event(struct hif_softc *scn,
259 		int ce_id, enum hif_ce_event_type type,
260 		union ce_desc *descriptor, void *memory,
261 		int index, int len)
262 {
263 }
264 qdf_export_symbol(hif_record_ce_desc_event);
265 
266 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
267 					int size)
268 {
269 }
270 
271 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
272 {
273 }
274 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
275 
276 #ifdef NAPI_YIELD_BUDGET_BASED
277 bool hif_ce_service_should_yield(struct hif_softc *scn,
278 				 struct CE_state *ce_state)
279 {
280 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
281 
282 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
283 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
284 	 * can happen in fast path handling as processing is happenning in
285 	 * batches.
286 	 */
287 	if (yield)
288 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
289 
290 	return yield;
291 }
292 #else
293 /**
294  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
295  * @scn: hif context
296  * @ce_state: context of the copy engine being serviced
297  *
298  * Return: true if the service should yield
299  */
300 bool hif_ce_service_should_yield(struct hif_softc *scn,
301 				 struct CE_state *ce_state)
302 {
303 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
304 
305 	time_limit_reached =
306 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
307 
308 	if (!time_limit_reached)
309 		rxpkt_thresh_reached = hif_max_num_receives_reached
310 					(scn, ce_state->receive_count);
311 
312 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
313 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
314 	 * can happen in fast path handling as processing is happenning in
315 	 * batches.
316 	 */
317 	if (rxpkt_thresh_reached)
318 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
319 
320 	yield =  time_limit_reached || rxpkt_thresh_reached;
321 
322 	if (yield &&
323 	    ce_state->htt_rx_data &&
324 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
325 		hif_napi_update_yield_stats(ce_state,
326 					    time_limit_reached,
327 					    rxpkt_thresh_reached);
328 	}
329 
330 	return yield;
331 }
332 qdf_export_symbol(hif_ce_service_should_yield);
333 #endif
334 
335 /*
336  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
337  * The caller takes responsibility for any needed locking.
338  */
339 
340 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
341 				   u32 ctrl_addr, unsigned int write_index)
342 {
343 	if (hif_ce_war1) {
344 		void __iomem *indicator_addr;
345 
346 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
347 
348 		if (!war1_allow_sleep
349 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
350 			hif_write32_mb(scn, indicator_addr,
351 				       (CDC_WAR_MAGIC_STR | write_index));
352 		} else {
353 			unsigned long irq_flags;
354 
355 			local_irq_save(irq_flags);
356 			hif_write32_mb(scn, indicator_addr, 1);
357 
358 			/*
359 			 * PCIE write waits for ACK in IPQ8K, there is no
360 			 * need to read back value.
361 			 */
362 			(void)hif_read32_mb(scn, indicator_addr);
363 			/* conservative */
364 			(void)hif_read32_mb(scn, indicator_addr);
365 
366 			CE_SRC_RING_WRITE_IDX_SET(scn,
367 						  ctrl_addr, write_index);
368 
369 			hif_write32_mb(scn, indicator_addr, 0);
370 			local_irq_restore(irq_flags);
371 		}
372 	} else {
373 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
374 	}
375 }
376 
377 qdf_export_symbol(war_ce_src_ring_write_idx_set);
378 
379 QDF_STATUS
380 ce_send(struct CE_handle *copyeng,
381 		void *per_transfer_context,
382 		qdf_dma_addr_t buffer,
383 		uint32_t nbytes,
384 		uint32_t transfer_id,
385 		uint32_t flags,
386 		uint32_t user_flag)
387 {
388 	struct CE_state *CE_state = (struct CE_state *)copyeng;
389 	QDF_STATUS status;
390 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
391 
392 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
393 	status = hif_state->ce_services->ce_send_nolock(copyeng,
394 			per_transfer_context, buffer, nbytes,
395 			transfer_id, flags, user_flag);
396 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
397 
398 	return status;
399 }
400 qdf_export_symbol(ce_send);
401 
402 unsigned int ce_sendlist_sizeof(void)
403 {
404 	return sizeof(struct ce_sendlist);
405 }
406 
407 void ce_sendlist_init(struct ce_sendlist *sendlist)
408 {
409 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
410 
411 	sl->num_items = 0;
412 }
413 
414 QDF_STATUS
415 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
416 					qdf_dma_addr_t buffer,
417 					uint32_t nbytes,
418 					uint32_t flags,
419 					uint32_t user_flags)
420 {
421 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
422 	unsigned int num_items = sl->num_items;
423 	struct ce_sendlist_item *item;
424 
425 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
426 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
427 		return QDF_STATUS_E_RESOURCES;
428 	}
429 
430 	item = &sl->item[num_items];
431 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
432 	item->data = buffer;
433 	item->u.nbytes = nbytes;
434 	item->flags = flags;
435 	item->user_flags = user_flags;
436 	sl->num_items = num_items + 1;
437 	return QDF_STATUS_SUCCESS;
438 }
439 
440 QDF_STATUS
441 ce_sendlist_send(struct CE_handle *copyeng,
442 		 void *per_transfer_context,
443 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
444 {
445 	struct CE_state *CE_state = (struct CE_state *)copyeng;
446 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
447 
448 	return hif_state->ce_services->ce_sendlist_send(copyeng,
449 			per_transfer_context, sendlist, transfer_id);
450 }
451 
452 #ifndef AH_NEED_TX_DATA_SWAP
453 #define AH_NEED_TX_DATA_SWAP 0
454 #endif
455 
456 /**
457  * ce_batch_send() - sends bunch of msdus at once
458  * @ce_tx_hdl : pointer to CE handle
459  * @msdu : list of msdus to be sent
460  * @transfer_id : transfer id
461  * @len : Downloaded length
462  * @sendhead : sendhead
463  *
464  * Assumption : Called with an array of MSDU's
465  * Function:
466  * For each msdu in the array
467  * 1. Send each msdu
468  * 2. Increment write index accordinlgy.
469  *
470  * Return: list of msds not sent
471  */
472 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
473 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
474 {
475 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
476 	struct hif_softc *scn = ce_state->scn;
477 	struct CE_ring_state *src_ring = ce_state->src_ring;
478 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
479 	/*  A_target_id_t targid = TARGID(scn);*/
480 
481 	uint32_t nentries_mask = src_ring->nentries_mask;
482 	uint32_t sw_index, write_index;
483 
484 	struct CE_src_desc *src_desc_base =
485 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
486 	uint32_t *src_desc;
487 
488 	struct CE_src_desc lsrc_desc = {0};
489 	int deltacount = 0;
490 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
491 
492 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
493 	sw_index = src_ring->sw_index;
494 	write_index = src_ring->write_index;
495 
496 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
497 
498 	while (msdu) {
499 		tempnext = qdf_nbuf_next(msdu);
500 
501 		if (deltacount < 2) {
502 			if (sendhead)
503 				return msdu;
504 			HIF_ERROR("%s: Out of descriptors", __func__);
505 			src_ring->write_index = write_index;
506 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
507 					write_index);
508 
509 			sw_index = src_ring->sw_index;
510 			write_index = src_ring->write_index;
511 
512 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
513 					sw_index-1);
514 			if (!freelist) {
515 				freelist = msdu;
516 				hfreelist = msdu;
517 			} else {
518 				qdf_nbuf_set_next(freelist, msdu);
519 				freelist = msdu;
520 			}
521 			qdf_nbuf_set_next(msdu, NULL);
522 			msdu = tempnext;
523 			continue;
524 		}
525 
526 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
527 				write_index);
528 
529 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
530 
531 		lsrc_desc.meta_data = transfer_id;
532 		if (len  > msdu->len)
533 			len =  msdu->len;
534 		lsrc_desc.nbytes = len;
535 		/*  Data packet is a byte stream, so disable byte swap */
536 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
537 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
538 
539 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
540 
541 
542 		src_ring->per_transfer_context[write_index] = msdu;
543 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
544 
545 		if (sendhead)
546 			break;
547 		qdf_nbuf_set_next(msdu, NULL);
548 		msdu = tempnext;
549 
550 	}
551 
552 
553 	src_ring->write_index = write_index;
554 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
555 
556 	return hfreelist;
557 }
558 
559 /**
560  * ce_update_tx_ring() - Advance sw index.
561  * @ce_tx_hdl : pointer to CE handle
562  * @num_htt_cmpls : htt completions received.
563  *
564  * Function:
565  * Increment the value of sw index of src ring
566  * according to number of htt completions
567  * received.
568  *
569  * Return: void
570  */
571 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
572 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
573 {
574 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
575 	struct CE_ring_state *src_ring = ce_state->src_ring;
576 	uint32_t nentries_mask = src_ring->nentries_mask;
577 	/*
578 	 * Advance the s/w index:
579 	 * This effectively simulates completing the CE ring descriptors
580 	 */
581 	src_ring->sw_index =
582 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
583 				num_htt_cmpls);
584 }
585 #else
586 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
587 {}
588 #endif
589 
590 /**
591  * ce_send_single() - sends
592  * @ce_tx_hdl : pointer to CE handle
593  * @msdu : msdu to be sent
594  * @transfer_id : transfer id
595  * @len : Downloaded length
596  *
597  * Function:
598  * 1. Send one msdu
599  * 2. Increment write index of src ring accordinlgy.
600  *
601  * Return: QDF_STATUS: CE sent status
602  */
603 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
604 			  uint32_t transfer_id, u_int32_t len)
605 {
606 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
607 	struct hif_softc *scn = ce_state->scn;
608 	struct CE_ring_state *src_ring = ce_state->src_ring;
609 	uint32_t ctrl_addr = ce_state->ctrl_addr;
610 	/*A_target_id_t targid = TARGID(scn);*/
611 
612 	uint32_t nentries_mask = src_ring->nentries_mask;
613 	uint32_t sw_index, write_index;
614 
615 	struct CE_src_desc *src_desc_base =
616 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
617 	uint32_t *src_desc;
618 
619 	struct CE_src_desc lsrc_desc = {0};
620 	enum hif_ce_event_type event_type;
621 
622 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
623 	sw_index = src_ring->sw_index;
624 	write_index = src_ring->write_index;
625 
626 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
627 					sw_index-1) < 1)) {
628 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
629 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
630 			  write_index, sw_index);
631 		return QDF_STATUS_E_RESOURCES;
632 	}
633 
634 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
635 
636 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
637 
638 	lsrc_desc.meta_data = transfer_id;
639 	lsrc_desc.nbytes = len;
640 	/*  Data packet is a byte stream, so disable byte swap */
641 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
642 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
643 
644 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
645 
646 
647 	src_ring->per_transfer_context[write_index] = msdu;
648 
649 	if (((struct CE_src_desc *)src_desc)->gather)
650 		event_type = HIF_TX_GATHER_DESC_POST;
651 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
652 		event_type = HIF_TX_DESC_SOFTWARE_POST;
653 	else
654 		event_type = HIF_TX_DESC_POST;
655 
656 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
657 				(union ce_desc *)src_desc, msdu,
658 				write_index, len);
659 
660 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
661 
662 	src_ring->write_index = write_index;
663 
664 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
665 
666 	return QDF_STATUS_SUCCESS;
667 }
668 
669 /**
670  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
671  * @coyeng: copy engine handle
672  * @per_recv_context: virtual address of the nbuf
673  * @buffer: physical address of the nbuf
674  *
675  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
676  */
677 QDF_STATUS
678 ce_recv_buf_enqueue(struct CE_handle *copyeng,
679 		    void *per_recv_context, qdf_dma_addr_t buffer)
680 {
681 	struct CE_state *CE_state = (struct CE_state *)copyeng;
682 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
683 
684 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
685 			per_recv_context, buffer);
686 }
687 qdf_export_symbol(ce_recv_buf_enqueue);
688 
689 void
690 ce_send_watermarks_set(struct CE_handle *copyeng,
691 		       unsigned int low_alert_nentries,
692 		       unsigned int high_alert_nentries)
693 {
694 	struct CE_state *CE_state = (struct CE_state *)copyeng;
695 	uint32_t ctrl_addr = CE_state->ctrl_addr;
696 	struct hif_softc *scn = CE_state->scn;
697 
698 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
699 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
700 }
701 
702 void
703 ce_recv_watermarks_set(struct CE_handle *copyeng,
704 		       unsigned int low_alert_nentries,
705 		       unsigned int high_alert_nentries)
706 {
707 	struct CE_state *CE_state = (struct CE_state *)copyeng;
708 	uint32_t ctrl_addr = CE_state->ctrl_addr;
709 	struct hif_softc *scn = CE_state->scn;
710 
711 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
712 				low_alert_nentries);
713 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
714 				high_alert_nentries);
715 }
716 
717 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
718 {
719 	struct CE_state *CE_state = (struct CE_state *)copyeng;
720 	struct CE_ring_state *src_ring = CE_state->src_ring;
721 	unsigned int nentries_mask = src_ring->nentries_mask;
722 	unsigned int sw_index;
723 	unsigned int write_index;
724 
725 	qdf_spin_lock(&CE_state->ce_index_lock);
726 	sw_index = src_ring->sw_index;
727 	write_index = src_ring->write_index;
728 	qdf_spin_unlock(&CE_state->ce_index_lock);
729 
730 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
731 }
732 
733 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
734 {
735 	struct CE_state *CE_state = (struct CE_state *)copyeng;
736 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
737 	unsigned int nentries_mask = dest_ring->nentries_mask;
738 	unsigned int sw_index;
739 	unsigned int write_index;
740 
741 	qdf_spin_lock(&CE_state->ce_index_lock);
742 	sw_index = dest_ring->sw_index;
743 	write_index = dest_ring->write_index;
744 	qdf_spin_unlock(&CE_state->ce_index_lock);
745 
746 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
747 }
748 
749 /*
750  * Guts of ce_completed_recv_next.
751  * The caller takes responsibility for any necessary locking.
752  */
753 QDF_STATUS
754 ce_completed_recv_next(struct CE_handle *copyeng,
755 		       void **per_CE_contextp,
756 		       void **per_transfer_contextp,
757 		       qdf_dma_addr_t *bufferp,
758 		       unsigned int *nbytesp,
759 		       unsigned int *transfer_idp, unsigned int *flagsp)
760 {
761 	struct CE_state *CE_state = (struct CE_state *)copyeng;
762 	QDF_STATUS status;
763 	struct hif_softc *scn = CE_state->scn;
764 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
765 	struct ce_ops *ce_services;
766 
767 	ce_services = hif_state->ce_services;
768 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
769 	status =
770 		ce_services->ce_completed_recv_next_nolock(CE_state,
771 				per_CE_contextp, per_transfer_contextp, bufferp,
772 					      nbytesp, transfer_idp, flagsp);
773 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
774 
775 	return status;
776 }
777 
778 QDF_STATUS
779 ce_revoke_recv_next(struct CE_handle *copyeng,
780 		    void **per_CE_contextp,
781 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
782 {
783 	struct CE_state *CE_state = (struct CE_state *)copyeng;
784 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
785 
786 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
787 			per_CE_contextp, per_transfer_contextp, bufferp);
788 }
789 
790 QDF_STATUS
791 ce_cancel_send_next(struct CE_handle *copyeng,
792 		void **per_CE_contextp,
793 		void **per_transfer_contextp,
794 		qdf_dma_addr_t *bufferp,
795 		unsigned int *nbytesp,
796 		unsigned int *transfer_idp,
797 		uint32_t *toeplitz_hash_result)
798 {
799 	struct CE_state *CE_state = (struct CE_state *)copyeng;
800 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
801 
802 	return hif_state->ce_services->ce_cancel_send_next
803 		(copyeng, per_CE_contextp, per_transfer_contextp,
804 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
805 }
806 qdf_export_symbol(ce_cancel_send_next);
807 
808 QDF_STATUS
809 ce_completed_send_next(struct CE_handle *copyeng,
810 		       void **per_CE_contextp,
811 		       void **per_transfer_contextp,
812 		       qdf_dma_addr_t *bufferp,
813 		       unsigned int *nbytesp,
814 		       unsigned int *transfer_idp,
815 		       unsigned int *sw_idx,
816 		       unsigned int *hw_idx,
817 		       unsigned int *toeplitz_hash_result)
818 {
819 	struct CE_state *CE_state = (struct CE_state *)copyeng;
820 	struct hif_softc *scn = CE_state->scn;
821 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
822 	struct ce_ops *ce_services;
823 	QDF_STATUS status;
824 
825 	ce_services = hif_state->ce_services;
826 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
827 	status =
828 		ce_services->ce_completed_send_next_nolock(CE_state,
829 					per_CE_contextp, per_transfer_contextp,
830 					bufferp, nbytesp, transfer_idp, sw_idx,
831 					      hw_idx, toeplitz_hash_result);
832 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
833 
834 	return status;
835 }
836 
837 #ifdef ATH_11AC_TXCOMPACT
838 /* CE engine descriptor reap
839  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
840  * does receive and reaping of completed descriptor ,
841  * This function only handles reaping of Tx complete descriptor.
842  * The Function is called from threshold reap  poll routine
843  * hif_send_complete_check so should not countain receive functionality
844  * within it .
845  */
846 
847 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
848 {
849 	void *CE_context;
850 	void *transfer_context;
851 	qdf_dma_addr_t buf;
852 	unsigned int nbytes;
853 	unsigned int id;
854 	unsigned int sw_idx, hw_idx;
855 	uint32_t toeplitz_hash_result;
856 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
857 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
858 
859 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
860 		return;
861 
862 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
863 			NULL, NULL, 0, 0);
864 
865 	/* Since this function is called from both user context and
866 	 * tasklet context the spinlock has to lock the bottom halves.
867 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
868 	 * enabled in TX polling mode. If this is not the case, more
869 	 * bottom halve spin lock changes are needed. Due to data path
870 	 * performance concern, after internal discussion we've decided
871 	 * to make minimum change, i.e., only address the issue occurred
872 	 * in this function. The possible negative effect of this minimum
873 	 * change is that, in the future, if some other function will also
874 	 * be opened to let the user context to use, those cases need to be
875 	 * addressed by change spin_lock to spin_lock_bh also.
876 	 */
877 
878 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
879 
880 	if (CE_state->send_cb) {
881 		{
882 			struct ce_ops *ce_services = hif_state->ce_services;
883 			/* Pop completed send buffers and call the
884 			 * registered send callback for each
885 			 */
886 			while (ce_services->ce_completed_send_next_nolock
887 				 (CE_state, &CE_context,
888 				  &transfer_context, &buf,
889 				  &nbytes, &id, &sw_idx, &hw_idx,
890 				  &toeplitz_hash_result) ==
891 				  QDF_STATUS_SUCCESS) {
892 				if (ce_id != CE_HTT_H2T_MSG) {
893 					qdf_spin_unlock_bh(
894 						&CE_state->ce_index_lock);
895 					CE_state->send_cb(
896 						(struct CE_handle *)
897 						CE_state, CE_context,
898 						transfer_context, buf,
899 						nbytes, id, sw_idx, hw_idx,
900 						toeplitz_hash_result);
901 					qdf_spin_lock_bh(
902 						&CE_state->ce_index_lock);
903 				} else {
904 					struct HIF_CE_pipe_info *pipe_info =
905 						(struct HIF_CE_pipe_info *)
906 						CE_context;
907 
908 					qdf_spin_lock_bh(&pipe_info->
909 						 completion_freeq_lock);
910 					pipe_info->num_sends_allowed++;
911 					qdf_spin_unlock_bh(&pipe_info->
912 						   completion_freeq_lock);
913 				}
914 			}
915 		}
916 	}
917 
918 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
919 
920 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
921 			NULL, NULL, 0, 0);
922 	Q_TARGET_ACCESS_END(scn);
923 }
924 
925 #endif /*ATH_11AC_TXCOMPACT */
926 
927 /*
928  * ce_engine_service_reg:
929  *
930  * Called from ce_per_engine_service and goes through the regular interrupt
931  * handling that does not involve the WLAN fast path feature.
932  *
933  * Returns void
934  */
935 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
936 {
937 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
938 	uint32_t ctrl_addr = CE_state->ctrl_addr;
939 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
940 	void *CE_context;
941 	void *transfer_context;
942 	qdf_dma_addr_t buf;
943 	unsigned int nbytes;
944 	unsigned int id;
945 	unsigned int flags;
946 	unsigned int more_comp_cnt = 0;
947 	unsigned int more_snd_comp_cnt = 0;
948 	unsigned int sw_idx, hw_idx;
949 	uint32_t toeplitz_hash_result;
950 	uint32_t mode = hif_get_conparam(scn);
951 
952 more_completions:
953 	if (CE_state->recv_cb) {
954 
955 		/* Pop completed recv buffers and call
956 		 * the registered recv callback for each
957 		 */
958 		while (hif_state->ce_services->ce_completed_recv_next_nolock
959 				(CE_state, &CE_context, &transfer_context,
960 				&buf, &nbytes, &id, &flags) ==
961 				QDF_STATUS_SUCCESS) {
962 			qdf_spin_unlock(&CE_state->ce_index_lock);
963 			CE_state->recv_cb((struct CE_handle *)CE_state,
964 					  CE_context, transfer_context, buf,
965 					  nbytes, id, flags);
966 
967 			qdf_spin_lock(&CE_state->ce_index_lock);
968 			/*
969 			 * EV #112693 -
970 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
971 			 * BSoD_0x133 occurred in VHT80 UDP_DL
972 			 * Break out DPC by force if number of loops in
973 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
974 			 * to avoid spending too long time in
975 			 * DPC for each interrupt handling. Schedule another
976 			 * DPC to avoid data loss if we had taken
977 			 * force-break action before apply to Windows OS
978 			 * only currently, Linux/MAC os can expand to their
979 			 * platform if necessary
980 			 */
981 
982 			/* Break the receive processes by
983 			 * force if force_break set up
984 			 */
985 			if (qdf_unlikely(CE_state->force_break)) {
986 				qdf_atomic_set(&CE_state->rx_pending, 1);
987 				return;
988 			}
989 		}
990 	}
991 
992 	/*
993 	 * Attention: We may experience potential infinite loop for below
994 	 * While Loop during Sending Stress test.
995 	 * Resolve the same way as Receive Case (Refer to EV #112693)
996 	 */
997 
998 	if (CE_state->send_cb) {
999 		/* Pop completed send buffers and call
1000 		 * the registered send callback for each
1001 		 */
1002 
1003 #ifdef ATH_11AC_TXCOMPACT
1004 		while (hif_state->ce_services->ce_completed_send_next_nolock
1005 			 (CE_state, &CE_context,
1006 			 &transfer_context, &buf, &nbytes,
1007 			 &id, &sw_idx, &hw_idx,
1008 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1009 
1010 			if (CE_id != CE_HTT_H2T_MSG ||
1011 			    QDF_IS_EPPING_ENABLED(mode)) {
1012 				qdf_spin_unlock(&CE_state->ce_index_lock);
1013 				CE_state->send_cb((struct CE_handle *)CE_state,
1014 						  CE_context, transfer_context,
1015 						  buf, nbytes, id, sw_idx,
1016 						  hw_idx, toeplitz_hash_result);
1017 				qdf_spin_lock(&CE_state->ce_index_lock);
1018 			} else {
1019 				struct HIF_CE_pipe_info *pipe_info =
1020 					(struct HIF_CE_pipe_info *)CE_context;
1021 
1022 				qdf_spin_lock_bh(&pipe_info->
1023 					      completion_freeq_lock);
1024 				pipe_info->num_sends_allowed++;
1025 				qdf_spin_unlock_bh(&pipe_info->
1026 						completion_freeq_lock);
1027 			}
1028 		}
1029 #else                           /*ATH_11AC_TXCOMPACT */
1030 		while (hif_state->ce_services->ce_completed_send_next_nolock
1031 			 (CE_state, &CE_context,
1032 			  &transfer_context, &buf, &nbytes,
1033 			  &id, &sw_idx, &hw_idx,
1034 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1035 			qdf_spin_unlock(&CE_state->ce_index_lock);
1036 			CE_state->send_cb((struct CE_handle *)CE_state,
1037 				  CE_context, transfer_context, buf,
1038 				  nbytes, id, sw_idx, hw_idx,
1039 				  toeplitz_hash_result);
1040 			qdf_spin_lock(&CE_state->ce_index_lock);
1041 		}
1042 #endif /*ATH_11AC_TXCOMPACT */
1043 	}
1044 
1045 more_watermarks:
1046 	if (CE_state->misc_cbs) {
1047 		if (CE_state->watermark_cb &&
1048 				hif_state->ce_services->watermark_int(CE_state,
1049 					&flags)) {
1050 			qdf_spin_unlock(&CE_state->ce_index_lock);
1051 			/* Convert HW IS bits to software flags */
1052 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1053 					CE_state->wm_context, flags);
1054 			qdf_spin_lock(&CE_state->ce_index_lock);
1055 		}
1056 	}
1057 
1058 	/*
1059 	 * Clear the misc interrupts (watermark) that were handled above,
1060 	 * and that will be checked again below.
1061 	 * Clear and check for copy-complete interrupts again, just in case
1062 	 * more copy completions happened while the misc interrupts were being
1063 	 * handled.
1064 	 */
1065 	if (!ce_srng_based(scn)) {
1066 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1067 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1068 					   CE_WATERMARK_MASK |
1069 					   HOST_IS_COPY_COMPLETE_MASK);
1070 		} else {
1071 			qdf_atomic_set(&CE_state->rx_pending, 0);
1072 			hif_err_rl("%s: target access is not allowed",
1073 				   __func__);
1074 			return;
1075 		}
1076 	}
1077 
1078 	/*
1079 	 * Now that per-engine interrupts are cleared, verify that
1080 	 * no recv interrupts arrive while processing send interrupts,
1081 	 * and no recv or send interrupts happened while processing
1082 	 * misc interrupts.Go back and check again.Keep checking until
1083 	 * we find no more events to process.
1084 	 */
1085 	if (CE_state->recv_cb &&
1086 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1087 				CE_state)) {
1088 		if (QDF_IS_EPPING_ENABLED(mode) ||
1089 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1090 			goto more_completions;
1091 		} else {
1092 			if (!ce_srng_based(scn)) {
1093 				HIF_ERROR(
1094 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1095 					__func__,
1096 					CE_state->dest_ring->nentries_mask,
1097 					CE_state->dest_ring->sw_index,
1098 					CE_DEST_RING_READ_IDX_GET(scn,
1099 							  CE_state->ctrl_addr));
1100 			}
1101 		}
1102 	}
1103 
1104 	if (CE_state->send_cb &&
1105 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1106 				CE_state)) {
1107 		if (QDF_IS_EPPING_ENABLED(mode) ||
1108 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1109 			goto more_completions;
1110 		} else {
1111 			if (!ce_srng_based(scn)) {
1112 				HIF_ERROR(
1113 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1114 					__func__,
1115 					CE_state->src_ring->nentries_mask,
1116 					CE_state->src_ring->sw_index,
1117 					CE_SRC_RING_READ_IDX_GET(scn,
1118 							 CE_state->ctrl_addr));
1119 			}
1120 		}
1121 	}
1122 
1123 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1124 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1125 			goto more_watermarks;
1126 	}
1127 
1128 	qdf_atomic_set(&CE_state->rx_pending, 0);
1129 }
1130 
1131 /*
1132  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1133  *
1134  * Invokes registered callbacks for recv_complete,
1135  * send_complete, and watermarks.
1136  *
1137  * Returns: number of messages processed
1138  */
1139 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1140 {
1141 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1142 
1143 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1144 		return CE_state->receive_count;
1145 
1146 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1147 		HIF_ERROR("[premature rc=0]");
1148 		return 0; /* no work done */
1149 	}
1150 
1151 	/* Clear force_break flag and re-initialize receive_count to 0 */
1152 	CE_state->receive_count = 0;
1153 	CE_state->force_break = 0;
1154 	CE_state->ce_service_start_time = sched_clock();
1155 	CE_state->ce_service_yield_time =
1156 		CE_state->ce_service_start_time +
1157 		hif_get_ce_service_max_yield_time(
1158 			(struct hif_opaque_softc *)scn);
1159 
1160 	qdf_spin_lock(&CE_state->ce_index_lock);
1161 
1162 	CE_state->service(scn, CE_id);
1163 
1164 	qdf_spin_unlock(&CE_state->ce_index_lock);
1165 
1166 	if (Q_TARGET_ACCESS_END(scn) < 0)
1167 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
1168 	return CE_state->receive_count;
1169 }
1170 qdf_export_symbol(ce_per_engine_service);
1171 
1172 /*
1173  * Handler for per-engine interrupts on ALL active CEs.
1174  * This is used in cases where the system is sharing a
1175  * single interrput for all CEs
1176  */
1177 
1178 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1179 {
1180 	int CE_id;
1181 	uint32_t intr_summary;
1182 
1183 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1184 		return;
1185 
1186 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1187 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1188 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1189 
1190 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1191 				qdf_atomic_set(&CE_state->rx_pending, 0);
1192 				ce_per_engine_service(scn, CE_id);
1193 			}
1194 		}
1195 
1196 		Q_TARGET_ACCESS_END(scn);
1197 		return;
1198 	}
1199 
1200 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1201 
1202 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1203 		if (intr_summary & (1 << CE_id))
1204 			intr_summary &= ~(1 << CE_id);
1205 		else
1206 			continue;       /* no intr pending on this CE */
1207 
1208 		ce_per_engine_service(scn, CE_id);
1209 	}
1210 
1211 	Q_TARGET_ACCESS_END(scn);
1212 }
1213 
1214 /*Iterate the CE_state list and disable the compl interrupt
1215  * if it has been registered already.
1216  */
1217 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1218 {
1219 	int CE_id;
1220 
1221 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1222 		return;
1223 
1224 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1225 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1226 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1227 
1228 		/* if the interrupt is currently enabled, disable it */
1229 		if (!CE_state->disable_copy_compl_intr
1230 		    && (CE_state->send_cb || CE_state->recv_cb))
1231 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1232 
1233 		if (CE_state->watermark_cb)
1234 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1235 	}
1236 	Q_TARGET_ACCESS_END(scn);
1237 }
1238 
1239 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1240 {
1241 	int CE_id;
1242 
1243 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1244 		return;
1245 
1246 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1247 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1248 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1249 
1250 		/*
1251 		 * If the CE is supposed to have copy complete interrupts
1252 		 * enabled (i.e. there a callback registered, and the
1253 		 * "disable" flag is not set), then re-enable the interrupt.
1254 		 */
1255 		if (!CE_state->disable_copy_compl_intr
1256 		    && (CE_state->send_cb || CE_state->recv_cb))
1257 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1258 
1259 		if (CE_state->watermark_cb)
1260 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1261 	}
1262 	Q_TARGET_ACCESS_END(scn);
1263 }
1264 
1265 /**
1266  * ce_send_cb_register(): register completion handler
1267  * @copyeng: CE_state representing the ce we are adding the behavior to
1268  * @fn_ptr: callback that the ce should use when processing tx completions
1269  * @disable_interrupts: if the interupts should be enabled or not.
1270  *
1271  * Caller should guarantee that no transactions are in progress before
1272  * switching the callback function.
1273  *
1274  * Registers the send context before the fn pointer so that if the cb is valid
1275  * the context should be valid.
1276  *
1277  * Beware that currently this function will enable completion interrupts.
1278  */
1279 void
1280 ce_send_cb_register(struct CE_handle *copyeng,
1281 		    ce_send_cb fn_ptr,
1282 		    void *ce_send_context, int disable_interrupts)
1283 {
1284 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1285 	struct hif_softc *scn;
1286 	struct HIF_CE_state *hif_state;
1287 
1288 	if (!CE_state) {
1289 		HIF_ERROR("%s: Error CE state = NULL", __func__);
1290 		return;
1291 	}
1292 	scn = CE_state->scn;
1293 	hif_state = HIF_GET_CE_STATE(scn);
1294 	if (!hif_state) {
1295 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1296 		return;
1297 	}
1298 	CE_state->send_context = ce_send_context;
1299 	CE_state->send_cb = fn_ptr;
1300 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1301 							disable_interrupts);
1302 }
1303 qdf_export_symbol(ce_send_cb_register);
1304 
1305 /**
1306  * ce_recv_cb_register(): register completion handler
1307  * @copyeng: CE_state representing the ce we are adding the behavior to
1308  * @fn_ptr: callback that the ce should use when processing rx completions
1309  * @disable_interrupts: if the interupts should be enabled or not.
1310  *
1311  * Registers the send context before the fn pointer so that if the cb is valid
1312  * the context should be valid.
1313  *
1314  * Caller should guarantee that no transactions are in progress before
1315  * switching the callback function.
1316  */
1317 void
1318 ce_recv_cb_register(struct CE_handle *copyeng,
1319 		    CE_recv_cb fn_ptr,
1320 		    void *CE_recv_context, int disable_interrupts)
1321 {
1322 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1323 	struct hif_softc *scn;
1324 	struct HIF_CE_state *hif_state;
1325 
1326 	if (!CE_state) {
1327 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
1328 		return;
1329 	}
1330 	scn = CE_state->scn;
1331 	hif_state = HIF_GET_CE_STATE(scn);
1332 	if (!hif_state) {
1333 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1334 		return;
1335 	}
1336 	CE_state->recv_context = CE_recv_context;
1337 	CE_state->recv_cb = fn_ptr;
1338 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1339 							disable_interrupts);
1340 }
1341 qdf_export_symbol(ce_recv_cb_register);
1342 
1343 /**
1344  * ce_watermark_cb_register(): register completion handler
1345  * @copyeng: CE_state representing the ce we are adding the behavior to
1346  * @fn_ptr: callback that the ce should use when processing watermark events
1347  *
1348  * Caller should guarantee that no watermark events are being processed before
1349  * switching the callback function.
1350  */
1351 void
1352 ce_watermark_cb_register(struct CE_handle *copyeng,
1353 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1354 {
1355 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1356 	struct hif_softc *scn = CE_state->scn;
1357 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1358 
1359 	CE_state->watermark_cb = fn_ptr;
1360 	CE_state->wm_context = CE_wm_context;
1361 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1362 							0);
1363 	if (fn_ptr)
1364 		CE_state->misc_cbs = 1;
1365 }
1366 
1367 bool ce_get_rx_pending(struct hif_softc *scn)
1368 {
1369 	int CE_id;
1370 
1371 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1372 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1373 
1374 		if (qdf_atomic_read(&CE_state->rx_pending))
1375 			return true;
1376 	}
1377 
1378 	return false;
1379 }
1380 
1381 /**
1382  * ce_check_rx_pending() - ce_check_rx_pending
1383  * @CE_state: context of the copy engine to check
1384  *
1385  * Return: true if there per_engine_service
1386  *	didn't process all the rx descriptors.
1387  */
1388 bool ce_check_rx_pending(struct CE_state *CE_state)
1389 {
1390 	if (qdf_atomic_read(&CE_state->rx_pending))
1391 		return true;
1392 	else
1393 		return false;
1394 }
1395 qdf_export_symbol(ce_check_rx_pending);
1396 
1397 #ifdef IPA_OFFLOAD
1398 #ifdef QCN7605_SUPPORT
1399 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1400 {
1401 	u_int32_t ctrl_addr = CE_state->ctrl_addr;
1402 	struct hif_softc *scn = CE_state->scn;
1403 	qdf_dma_addr_t wr_index_addr;
1404 
1405 	wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1406 	return wr_index_addr;
1407 }
1408 #else
1409 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1410 {
1411 	struct hif_softc *scn = CE_state->scn;
1412 	qdf_dma_addr_t wr_index_addr;
1413 
1414 	wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1415 			SR_WR_INDEX_ADDRESS;
1416 	return wr_index_addr;
1417 }
1418 #endif
1419 
1420 /**
1421  * ce_ipa_get_resource() - get uc resource on copyengine
1422  * @ce: copyengine context
1423  * @ce_sr: copyengine source ring resource info
1424  * @ce_sr_ring_size: copyengine source ring size
1425  * @ce_reg_paddr: copyengine register physical address
1426  *
1427  * Copy engine should release resource to micro controller
1428  * Micro controller needs
1429  *  - Copy engine source descriptor base address
1430  *  - Copy engine source descriptor size
1431  *  - PCI BAR address to access copy engine regiser
1432  *
1433  * Return: None
1434  */
1435 void ce_ipa_get_resource(struct CE_handle *ce,
1436 			 qdf_shared_mem_t **ce_sr,
1437 			 uint32_t *ce_sr_ring_size,
1438 			 qdf_dma_addr_t *ce_reg_paddr)
1439 {
1440 	struct CE_state *CE_state = (struct CE_state *)ce;
1441 	uint32_t ring_loop;
1442 	struct CE_src_desc *ce_desc;
1443 	qdf_dma_addr_t phy_mem_base;
1444 	struct hif_softc *scn = CE_state->scn;
1445 
1446 	if (CE_UNUSED == CE_state->state) {
1447 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1448 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1449 		*ce_sr_ring_size = 0;
1450 		return;
1451 	}
1452 
1453 	/* Update default value for descriptor */
1454 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1455 	     ring_loop++) {
1456 		ce_desc = (struct CE_src_desc *)
1457 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1458 			   ring_loop * (sizeof(struct CE_src_desc)));
1459 		CE_IPA_RING_INIT(ce_desc);
1460 	}
1461 
1462 	/* Get BAR address */
1463 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1464 
1465 	*ce_sr = CE_state->scn->ipa_ce_ring;
1466 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1467 		sizeof(struct CE_src_desc));
1468 	*ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1469 
1470 }
1471 
1472 #endif /* IPA_OFFLOAD */
1473 
1474 #ifdef HIF_CE_DEBUG_DATA_BUF
1475 /**
1476  * hif_dump_desc_data_buf() - record ce descriptor events
1477  * @buf: buffer to copy to
1478  * @pos: Current position till which the buf is filled
1479  * @data: Data to be copied
1480  * @data_len: Length of the data to be copied
1481  */
1482 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1483 					uint8_t *data, uint32_t data_len)
1484 {
1485 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1486 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1487 
1488 	if ((data_len > 0) && data) {
1489 		if (data_len < 16) {
1490 			hex_dump_to_buffer(data,
1491 						CE_DEBUG_DATA_PER_ROW,
1492 						16, 1, buf + pos,
1493 						(ssize_t)PAGE_SIZE - pos,
1494 						false);
1495 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1496 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1497 		} else {
1498 			uint32_t rows = (data_len / 16) + 1;
1499 			uint32_t row = 0;
1500 
1501 			for (row = 0; row < rows; row++) {
1502 				hex_dump_to_buffer(data + (row * 16),
1503 							CE_DEBUG_DATA_PER_ROW,
1504 							16, 1, buf + pos,
1505 							(ssize_t)PAGE_SIZE
1506 							- pos, false);
1507 				pos +=
1508 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1509 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1510 						"\n");
1511 			}
1512 		}
1513 	}
1514 
1515 	return pos;
1516 }
1517 #endif
1518 
1519 /*
1520  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1521  * for defined here
1522  */
1523 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1524 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1525 {
1526 	switch (type) {
1527 	case HIF_RX_DESC_POST:
1528 		return "HIF_RX_DESC_POST";
1529 	case HIF_RX_DESC_COMPLETION:
1530 		return "HIF_RX_DESC_COMPLETION";
1531 	case HIF_TX_GATHER_DESC_POST:
1532 		return "HIF_TX_GATHER_DESC_POST";
1533 	case HIF_TX_DESC_POST:
1534 		return "HIF_TX_DESC_POST";
1535 	case HIF_TX_DESC_SOFTWARE_POST:
1536 		return "HIF_TX_DESC_SOFTWARE_POST";
1537 	case HIF_TX_DESC_COMPLETION:
1538 		return "HIF_TX_DESC_COMPLETION";
1539 	case FAST_RX_WRITE_INDEX_UPDATE:
1540 		return "FAST_RX_WRITE_INDEX_UPDATE";
1541 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1542 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1543 	case FAST_TX_WRITE_INDEX_UPDATE:
1544 		return "FAST_TX_WRITE_INDEX_UPDATE";
1545 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1546 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1547 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1548 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1549 	case RESUME_WRITE_INDEX_UPDATE:
1550 		return "RESUME_WRITE_INDEX_UPDATE";
1551 	case HIF_IRQ_EVENT:
1552 		return "HIF_IRQ_EVENT";
1553 	case HIF_CE_TASKLET_ENTRY:
1554 		return "HIF_CE_TASKLET_ENTRY";
1555 	case HIF_CE_TASKLET_RESCHEDULE:
1556 		return "HIF_CE_TASKLET_RESCHEDULE";
1557 	case HIF_CE_TASKLET_EXIT:
1558 		return "HIF_CE_TASKLET_EXIT";
1559 	case HIF_CE_REAP_ENTRY:
1560 		return "HIF_CE_REAP_ENTRY";
1561 	case HIF_CE_REAP_EXIT:
1562 		return "HIF_CE_REAP_EXIT";
1563 	case NAPI_SCHEDULE:
1564 		return "NAPI_SCHEDULE";
1565 	case NAPI_POLL_ENTER:
1566 		return "NAPI_POLL_ENTER";
1567 	case NAPI_COMPLETE:
1568 		return "NAPI_COMPLETE";
1569 	case NAPI_POLL_EXIT:
1570 		return "NAPI_POLL_EXIT";
1571 	case HIF_RX_NBUF_ALLOC_FAILURE:
1572 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1573 	case HIF_RX_NBUF_MAP_FAILURE:
1574 		return "HIF_RX_NBUF_MAP_FAILURE";
1575 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1576 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1577 	default:
1578 		return "invalid";
1579 	}
1580 }
1581 
1582 /**
1583  * hif_dump_desc_event() - record ce descriptor events
1584  * @buf: Buffer to which to be copied
1585  * @ce_id: which ce is the event occurring on
1586  * @index: index that the descriptor was/will be at.
1587  */
1588 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1589 {
1590 	struct hif_ce_desc_event *event;
1591 	uint64_t secs, usecs;
1592 	ssize_t len = 0;
1593 	struct ce_desc_hist *ce_hist = NULL;
1594 	struct hif_ce_desc_event *hist_ev = NULL;
1595 
1596 	if (!scn)
1597 		return -EINVAL;
1598 
1599 	ce_hist = &scn->hif_ce_desc_hist;
1600 
1601 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1602 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1603 		qdf_print("Invalid values");
1604 		return -EINVAL;
1605 	}
1606 
1607 	hist_ev =
1608 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1609 
1610 	if (!hist_ev) {
1611 		qdf_print("Low Memory");
1612 		return -EINVAL;
1613 	}
1614 
1615 	event = &hist_ev[ce_hist->hist_index];
1616 
1617 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1618 
1619 	len += snprintf(buf, PAGE_SIZE - len,
1620 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1621 			secs, usecs, ce_hist->hist_id,
1622 			ce_event_type_to_str(event->type),
1623 			event->index, event->memory);
1624 #ifdef HIF_CE_DEBUG_DATA_BUF
1625 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1626 			event->actual_data_len);
1627 #endif
1628 
1629 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1630 
1631 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1632 				16, 1, buf + len,
1633 				(ssize_t)PAGE_SIZE - len, false);
1634 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1635 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1636 
1637 #ifdef HIF_CE_DEBUG_DATA_BUF
1638 	if (ce_hist->data_enable[ce_hist->hist_id])
1639 		len = hif_dump_desc_data_buf(buf, len, event->data,
1640 						(event->actual_data_len <
1641 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1642 						event->actual_data_len :
1643 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1644 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1645 
1646 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1647 
1648 	return len;
1649 }
1650 
1651 /*
1652  * hif_store_desc_trace_buf_index() -
1653  * API to get the CE id and CE debug storage buffer index
1654  *
1655  * @dev: network device
1656  * @attr: sysfs attribute
1657  * @buf: data got from the user
1658  *
1659  * Return total length
1660  */
1661 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1662 					const char *buf, size_t size)
1663 {
1664 	struct ce_desc_hist *ce_hist = NULL;
1665 
1666 	if (!scn)
1667 		return -EINVAL;
1668 
1669 	ce_hist = &scn->hif_ce_desc_hist;
1670 
1671 	if (!size) {
1672 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1673 		return -EINVAL;
1674 	}
1675 
1676 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1677 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1678 		qdf_nofl_err("%s: Invalid input value.", __func__);
1679 		return -EINVAL;
1680 	}
1681 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1682 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1683 		qdf_print("Invalid values");
1684 		return -EINVAL;
1685 	}
1686 
1687 	return size;
1688 }
1689 
1690 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1691 
1692 #ifdef HIF_CE_DEBUG_DATA_BUF
1693 /*
1694  * hif_ce_en_desc_hist() -
1695  * API to enable recording the CE desc history
1696  *
1697  * @dev: network device
1698  * @attr: sysfs attribute
1699  * @buf: buffer to copy the data.
1700  *
1701  * Starts recording the ce desc history
1702  *
1703  * Return total length copied
1704  */
1705 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1706 {
1707 	struct ce_desc_hist *ce_hist = NULL;
1708 	uint32_t cfg = 0;
1709 	uint32_t ce_id = 0;
1710 
1711 	if (!scn)
1712 		return -EINVAL;
1713 
1714 	ce_hist = &scn->hif_ce_desc_hist;
1715 
1716 	if (!size) {
1717 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1718 		return -EINVAL;
1719 	}
1720 
1721 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1722 		   (unsigned int *)&cfg) != 2) {
1723 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1724 			     __func__);
1725 		return -EINVAL;
1726 	}
1727 	if (ce_id >= CE_COUNT_MAX) {
1728 		qdf_print("Invalid value CE Id");
1729 		return -EINVAL;
1730 	}
1731 
1732 	if ((cfg > 1 || cfg < 0)) {
1733 		qdf_print("Invalid values: enter 0 or 1");
1734 		return -EINVAL;
1735 	}
1736 
1737 	if (!ce_hist->hist_ev[ce_id])
1738 		return -EINVAL;
1739 
1740 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1741 	if (cfg == 1) {
1742 		if (ce_hist->data_enable[ce_id] == 1) {
1743 			qdf_debug("Already Enabled");
1744 		} else {
1745 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1746 							== QDF_STATUS_E_NOMEM){
1747 				ce_hist->data_enable[ce_id] = 0;
1748 				qdf_err("%s:Memory Alloc failed", __func__);
1749 			} else
1750 				ce_hist->data_enable[ce_id] = 1;
1751 		}
1752 	} else if (cfg == 0) {
1753 		if (ce_hist->data_enable[ce_id] == 0) {
1754 			qdf_debug("Already Disabled");
1755 		} else {
1756 			ce_hist->data_enable[ce_id] = 0;
1757 				free_mem_ce_debug_hist_data(scn, ce_id);
1758 		}
1759 	}
1760 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1761 
1762 	return size;
1763 }
1764 
1765 /*
1766  * hif_disp_ce_enable_desc_data_hist() -
1767  * API to display value of data_enable
1768  *
1769  * @dev: network device
1770  * @attr: sysfs attribute
1771  * @buf: buffer to copy the data.
1772  *
1773  * Return total length copied
1774  */
1775 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1776 {
1777 	ssize_t len = 0;
1778 	uint32_t ce_id = 0;
1779 	struct ce_desc_hist *ce_hist = NULL;
1780 
1781 	if (!scn)
1782 		return -EINVAL;
1783 
1784 	ce_hist = &scn->hif_ce_desc_hist;
1785 
1786 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1787 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1788 				ce_id, ce_hist->data_enable[ce_id]);
1789 	}
1790 
1791 	return len;
1792 }
1793 #endif /* HIF_CE_DEBUG_DATA_BUF */
1794 
1795 #ifdef OL_ATH_SMART_LOGGING
1796 #define GUARD_SPACE 10
1797 #define LOG_ID_SZ 4
1798 /*
1799  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1800  * @src_ring: SRC ring state
1801  * @buf_cur: Current pointer in ring buffer
1802  * @buf_init:Start of the ring buffer
1803  * @buf_sz: Size of the ring buffer
1804  * @skb_sz: Max size of the SKB buffer to be copied
1805  *
1806  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1807  * the given buf, skb_sz is the max buffer size to be copied
1808  *
1809  * Return: Current pointer in ring buffer
1810  */
1811 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1812 				    uint8_t *buf_cur, uint8_t *buf_init,
1813 				    uint32_t buf_sz, uint32_t skb_sz)
1814 {
1815 	struct CE_src_desc *src_ring_base;
1816 	uint32_t len, entry;
1817 	struct CE_src_desc  *src_desc;
1818 	qdf_nbuf_t nbuf;
1819 	uint32_t available_buf;
1820 
1821 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1822 	len = sizeof(struct CE_ring_state);
1823 	available_buf = buf_sz - (buf_cur - buf_init);
1824 	if (available_buf < (len + GUARD_SPACE)) {
1825 		buf_cur = buf_init;
1826 	}
1827 
1828 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1829 	buf_cur += sizeof(struct CE_ring_state);
1830 
1831 	for (entry = 0; entry < src_ring->nentries; entry++) {
1832 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1833 		nbuf = src_ring->per_transfer_context[entry];
1834 		if (nbuf) {
1835 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1836 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1837 
1838 			len = sizeof(struct CE_src_desc) + skb_cp_len
1839 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1840 			available_buf = buf_sz - (buf_cur - buf_init);
1841 			if (available_buf < (len + GUARD_SPACE)) {
1842 				buf_cur = buf_init;
1843 			}
1844 			qdf_mem_copy(buf_cur, src_desc,
1845 				     sizeof(struct CE_src_desc));
1846 			buf_cur += sizeof(struct CE_src_desc);
1847 
1848 			available_buf = buf_sz - (buf_cur - buf_init);
1849 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1850 						skb_cp_len);
1851 
1852 			if (skb_cp_len) {
1853 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1854 					     skb_cp_len);
1855 				buf_cur += skb_cp_len;
1856 			}
1857 		} else {
1858 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1859 			available_buf = buf_sz - (buf_cur - buf_init);
1860 			if (available_buf < (len + GUARD_SPACE)) {
1861 				buf_cur = buf_init;
1862 			}
1863 			qdf_mem_copy(buf_cur, src_desc,
1864 				     sizeof(struct CE_src_desc));
1865 			buf_cur += sizeof(struct CE_src_desc);
1866 			available_buf = buf_sz - (buf_cur - buf_init);
1867 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1868 		}
1869 	}
1870 
1871 	return buf_cur;
1872 }
1873 
1874 /*
1875  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1876  * @dest_ring: SRC ring state
1877  * @buf_cur: Current pointer in ring buffer
1878  * @buf_init:Start of the ring buffer
1879  * @buf_sz: Size of the ring buffer
1880  * @skb_sz: Max size of the SKB buffer to be copied
1881  *
1882  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1883  * the given buf, skb_sz is the max buffer size to be copied
1884  *
1885  * Return: Current pointer in ring buffer
1886  */
1887 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1888 				     uint8_t *buf_cur, uint8_t *buf_init,
1889 				     uint32_t buf_sz, uint32_t skb_sz)
1890 {
1891 	struct CE_dest_desc *dest_ring_base;
1892 	uint32_t len, entry;
1893 	struct CE_dest_desc  *dest_desc;
1894 	qdf_nbuf_t nbuf;
1895 	uint32_t available_buf;
1896 
1897 	dest_ring_base =
1898 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1899 
1900 	len = sizeof(struct CE_ring_state);
1901 	available_buf = buf_sz - (buf_cur - buf_init);
1902 	if (available_buf < (len + GUARD_SPACE)) {
1903 		buf_cur = buf_init;
1904 	}
1905 
1906 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
1907 	buf_cur += sizeof(struct CE_ring_state);
1908 
1909 	for (entry = 0; entry < dest_ring->nentries; entry++) {
1910 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
1911 
1912 		nbuf = dest_ring->per_transfer_context[entry];
1913 		if (nbuf) {
1914 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1915 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1916 
1917 			len = sizeof(struct CE_dest_desc) + skb_cp_len
1918 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1919 
1920 			available_buf = buf_sz - (buf_cur - buf_init);
1921 			if (available_buf < (len + GUARD_SPACE)) {
1922 				buf_cur = buf_init;
1923 			}
1924 
1925 			qdf_mem_copy(buf_cur, dest_desc,
1926 				     sizeof(struct CE_dest_desc));
1927 			buf_cur += sizeof(struct CE_dest_desc);
1928 			available_buf = buf_sz - (buf_cur - buf_init);
1929 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1930 						skb_cp_len);
1931 			if (skb_cp_len) {
1932 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1933 					     skb_cp_len);
1934 				buf_cur += skb_cp_len;
1935 			}
1936 		} else {
1937 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
1938 			available_buf = buf_sz - (buf_cur - buf_init);
1939 			if (available_buf < (len + GUARD_SPACE)) {
1940 				buf_cur = buf_init;
1941 			}
1942 			qdf_mem_copy(buf_cur, dest_desc,
1943 				     sizeof(struct CE_dest_desc));
1944 			buf_cur += sizeof(struct CE_dest_desc);
1945 			available_buf = buf_sz - (buf_cur - buf_init);
1946 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1947 		}
1948 	}
1949 	return buf_cur;
1950 }
1951 
1952 /**
1953  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1954  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1955  * and buffers pointed by them in to the given buf
1956  */
1957 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1958 			 uint8_t *buf_init, uint32_t buf_sz,
1959 			 uint32_t ce, uint32_t skb_sz)
1960 {
1961 	struct CE_state *ce_state;
1962 	struct CE_ring_state *src_ring;
1963 	struct CE_ring_state *dest_ring;
1964 
1965 	ce_state = scn->ce_id_to_state[ce];
1966 	src_ring = ce_state->src_ring;
1967 	dest_ring = ce_state->dest_ring;
1968 
1969 	if (src_ring) {
1970 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
1971 					      buf_init, buf_sz, skb_sz);
1972 	} else if (dest_ring) {
1973 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
1974 					       buf_init, buf_sz, skb_sz);
1975 	}
1976 
1977 	return buf_cur;
1978 }
1979 
1980 qdf_export_symbol(hif_log_dump_ce);
1981 #endif /* OL_ATH_SMART_LOGGING */
1982 
1983