xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 static int war1_allow_sleep;
65 /* io32 write workaround */
66 static int hif_ce_war1;
67 
68 /**
69  * hif_ce_war_disable() - disable ce war gobally
70  */
71 void hif_ce_war_disable(void)
72 {
73 	hif_ce_war1 = 0;
74 }
75 
76 /**
77  * hif_ce_war_enable() - enable ce war gobally
78  */
79 void hif_ce_war_enable(void)
80 {
81 	hif_ce_war1 = 1;
82 }
83 
84 /*
85  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
86  * for defined here
87  */
88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
89 
90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
91 #define CE_DEBUG_DATA_PER_ROW 16
92 
93 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
94 
95 /**
96  * get_next_record_index() - get the next record index
97  * @table_index: atomic index variable to increment
98  * @array_size: array size of the circular buffer
99  *
100  * Increment the atomic index and reserve the value.
101  * Takes care of buffer wrap.
102  * Guaranteed to be thread safe as long as fewer than array_size contexts
103  * try to access the array.  If there are more than array_size contexts
104  * trying to access the array, full locking of the recording process would
105  * be needed to have sane logging.
106  */
107 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
108 {
109 	int record_index = qdf_atomic_inc_return(table_index);
110 
111 	if (record_index == array_size)
112 		qdf_atomic_sub(array_size, table_index);
113 
114 	while (record_index >= array_size)
115 		record_index -= array_size;
116 	return record_index;
117 }
118 
119 #ifdef HIF_CE_DEBUG_DATA_BUF
120 /**
121  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
122  * @event: structure detailing a ce event
123  * @len: length of the data
124  * Return:
125  */
126 static inline
127 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
128 {
129 	uint8_t *data = NULL;
130 
131 	if (!event->data) {
132 		hif_err("No memory allocated");
133 		return;
134 	}
135 
136 	if (event->memory && len > 0)
137 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
138 
139 	event->actual_data_len = 0;
140 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
141 
142 	if (data && len > 0) {
143 		qdf_mem_copy(event->data, data,
144 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
145 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
146 		event->actual_data_len = len;
147 	}
148 }
149 #else
150 static inline
151 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
152 {
153 }
154 #endif /* HIF_CE_DEBUG_DATA_BUF */
155 
156 /**
157  * hif_record_ce_desc_event() - record ce descriptor events
158  * @scn: hif_softc
159  * @ce_id: which ce is the event occurring on
160  * @type: what happened
161  * @descriptor: pointer to the descriptor posted/completed
162  * @memory: virtual address of buffer related to the descriptor
163  * @index: index that the descriptor was/will be at.
164  */
165 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
166 				enum hif_ce_event_type type,
167 				union ce_desc *descriptor,
168 				void *memory, int index,
169 				int len)
170 {
171 	int record_index;
172 	struct hif_ce_desc_event *event;
173 
174 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
175 	struct hif_ce_desc_event *hist_ev = NULL;
176 
177 	if (ce_id < CE_COUNT_MAX)
178 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
179 	else
180 		return;
181 
182 	if (ce_id >= CE_COUNT_MAX)
183 		return;
184 
185 	if (!ce_hist->enable[ce_id])
186 		return;
187 
188 	if (!hist_ev)
189 		return;
190 
191 	record_index = get_next_record_index(
192 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
193 
194 	event = &hist_ev[record_index];
195 
196 	event->type = type;
197 	event->time = qdf_get_log_timestamp();
198 
199 	if (descriptor) {
200 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
201 	} else {
202 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
203 	}
204 
205 	event->memory = memory;
206 	event->index = index;
207 
208 	if (ce_hist->data_enable[ce_id])
209 		hif_ce_desc_data_record(event, len);
210 }
211 qdf_export_symbol(hif_record_ce_desc_event);
212 
213 /**
214  * ce_init_ce_desc_event_log() - initialize the ce event log
215  * @ce_id: copy engine id for which we are initializing the log
216  * @size: size of array to dedicate
217  *
218  * Currently the passed size is ignored in favor of a precompiled value.
219  */
220 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
221 {
222 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
223 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
224 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
225 }
226 
227 /**
228  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
229  * @ce_id: copy engine id for which we are deinitializing the log
230  *
231  */
232 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
233 {
234 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
235 
236 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
237 }
238 
239 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
240 void hif_record_ce_desc_event(struct hif_softc *scn,
241 		int ce_id, enum hif_ce_event_type type,
242 		union ce_desc *descriptor, void *memory,
243 		int index, int len)
244 {
245 }
246 qdf_export_symbol(hif_record_ce_desc_event);
247 
248 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
249 					int size)
250 {
251 }
252 
253 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
254 {
255 }
256 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
257 
258 #ifdef NAPI_YIELD_BUDGET_BASED
259 bool hif_ce_service_should_yield(struct hif_softc *scn,
260 				 struct CE_state *ce_state)
261 {
262 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
263 
264 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
265 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
266 	 * can happen in fast path handling as processing is happenning in
267 	 * batches.
268 	 */
269 	if (yield)
270 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
271 
272 	return yield;
273 }
274 #else
275 /**
276  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
277  * @scn: hif context
278  * @ce_state: context of the copy engine being serviced
279  *
280  * Return: true if the service should yield
281  */
282 bool hif_ce_service_should_yield(struct hif_softc *scn,
283 				 struct CE_state *ce_state)
284 {
285 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
286 
287 	time_limit_reached =
288 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
289 
290 	if (!time_limit_reached)
291 		rxpkt_thresh_reached = hif_max_num_receives_reached
292 					(scn, ce_state->receive_count);
293 
294 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
295 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
296 	 * can happen in fast path handling as processing is happenning in
297 	 * batches.
298 	 */
299 	if (rxpkt_thresh_reached)
300 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
301 
302 	yield =  time_limit_reached || rxpkt_thresh_reached;
303 
304 	if (yield &&
305 	    ce_state->htt_rx_data &&
306 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
307 		hif_napi_update_yield_stats(ce_state,
308 					    time_limit_reached,
309 					    rxpkt_thresh_reached);
310 	}
311 
312 	return yield;
313 }
314 qdf_export_symbol(hif_ce_service_should_yield);
315 #endif
316 
317 /*
318  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
319  * The caller takes responsibility for any needed locking.
320  */
321 
322 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
323 				   u32 ctrl_addr, unsigned int write_index)
324 {
325 	if (hif_ce_war1) {
326 		void __iomem *indicator_addr;
327 
328 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
329 
330 		if (!war1_allow_sleep
331 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
332 			hif_write32_mb(scn, indicator_addr,
333 				       (CDC_WAR_MAGIC_STR | write_index));
334 		} else {
335 			unsigned long irq_flags;
336 
337 			local_irq_save(irq_flags);
338 			hif_write32_mb(scn, indicator_addr, 1);
339 
340 			/*
341 			 * PCIE write waits for ACK in IPQ8K, there is no
342 			 * need to read back value.
343 			 */
344 			(void)hif_read32_mb(scn, indicator_addr);
345 			/* conservative */
346 			(void)hif_read32_mb(scn, indicator_addr);
347 
348 			CE_SRC_RING_WRITE_IDX_SET(scn,
349 						  ctrl_addr, write_index);
350 
351 			hif_write32_mb(scn, indicator_addr, 0);
352 			local_irq_restore(irq_flags);
353 		}
354 	} else {
355 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
356 	}
357 }
358 
359 qdf_export_symbol(war_ce_src_ring_write_idx_set);
360 
361 int
362 ce_send(struct CE_handle *copyeng,
363 		void *per_transfer_context,
364 		qdf_dma_addr_t buffer,
365 		uint32_t nbytes,
366 		uint32_t transfer_id,
367 		uint32_t flags,
368 		uint32_t user_flag)
369 {
370 	struct CE_state *CE_state = (struct CE_state *)copyeng;
371 	int status;
372 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
373 
374 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
375 	status = hif_state->ce_services->ce_send_nolock(copyeng,
376 			per_transfer_context, buffer, nbytes,
377 			transfer_id, flags, user_flag);
378 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
379 
380 	return status;
381 }
382 qdf_export_symbol(ce_send);
383 
384 unsigned int ce_sendlist_sizeof(void)
385 {
386 	return sizeof(struct ce_sendlist);
387 }
388 
389 void ce_sendlist_init(struct ce_sendlist *sendlist)
390 {
391 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
392 
393 	sl->num_items = 0;
394 }
395 
396 int
397 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
398 					qdf_dma_addr_t buffer,
399 					uint32_t nbytes,
400 					uint32_t flags,
401 					uint32_t user_flags)
402 {
403 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
404 	unsigned int num_items = sl->num_items;
405 	struct ce_sendlist_item *item;
406 
407 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
408 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
409 		return QDF_STATUS_E_RESOURCES;
410 	}
411 
412 	item = &sl->item[num_items];
413 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
414 	item->data = buffer;
415 	item->u.nbytes = nbytes;
416 	item->flags = flags;
417 	item->user_flags = user_flags;
418 	sl->num_items = num_items + 1;
419 	return QDF_STATUS_SUCCESS;
420 }
421 
422 int
423 ce_sendlist_send(struct CE_handle *copyeng,
424 		 void *per_transfer_context,
425 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
426 {
427 	struct CE_state *CE_state = (struct CE_state *)copyeng;
428 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
429 
430 	return hif_state->ce_services->ce_sendlist_send(copyeng,
431 			per_transfer_context, sendlist, transfer_id);
432 }
433 
434 #ifndef AH_NEED_TX_DATA_SWAP
435 #define AH_NEED_TX_DATA_SWAP 0
436 #endif
437 
438 /**
439  * ce_batch_send() - sends bunch of msdus at once
440  * @ce_tx_hdl : pointer to CE handle
441  * @msdu : list of msdus to be sent
442  * @transfer_id : transfer id
443  * @len : Downloaded length
444  * @sendhead : sendhead
445  *
446  * Assumption : Called with an array of MSDU's
447  * Function:
448  * For each msdu in the array
449  * 1. Send each msdu
450  * 2. Increment write index accordinlgy.
451  *
452  * Return: list of msds not sent
453  */
454 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
455 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
456 {
457 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
458 	struct hif_softc *scn = ce_state->scn;
459 	struct CE_ring_state *src_ring = ce_state->src_ring;
460 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
461 	/*  A_target_id_t targid = TARGID(scn);*/
462 
463 	uint32_t nentries_mask = src_ring->nentries_mask;
464 	uint32_t sw_index, write_index;
465 
466 	struct CE_src_desc *src_desc_base =
467 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
468 	uint32_t *src_desc;
469 
470 	struct CE_src_desc lsrc_desc = {0};
471 	int deltacount = 0;
472 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
473 
474 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
475 	sw_index = src_ring->sw_index;
476 	write_index = src_ring->write_index;
477 
478 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
479 
480 	while (msdu) {
481 		tempnext = qdf_nbuf_next(msdu);
482 
483 		if (deltacount < 2) {
484 			if (sendhead)
485 				return msdu;
486 			HIF_ERROR("%s: Out of descriptors", __func__);
487 			src_ring->write_index = write_index;
488 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
489 					write_index);
490 
491 			sw_index = src_ring->sw_index;
492 			write_index = src_ring->write_index;
493 
494 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
495 					sw_index-1);
496 			if (!freelist) {
497 				freelist = msdu;
498 				hfreelist = msdu;
499 			} else {
500 				qdf_nbuf_set_next(freelist, msdu);
501 				freelist = msdu;
502 			}
503 			qdf_nbuf_set_next(msdu, NULL);
504 			msdu = tempnext;
505 			continue;
506 		}
507 
508 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
509 				write_index);
510 
511 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
512 
513 		lsrc_desc.meta_data = transfer_id;
514 		if (len  > msdu->len)
515 			len =  msdu->len;
516 		lsrc_desc.nbytes = len;
517 		/*  Data packet is a byte stream, so disable byte swap */
518 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
519 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
520 
521 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
522 
523 
524 		src_ring->per_transfer_context[write_index] = msdu;
525 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
526 
527 		if (sendhead)
528 			break;
529 		qdf_nbuf_set_next(msdu, NULL);
530 		msdu = tempnext;
531 
532 	}
533 
534 
535 	src_ring->write_index = write_index;
536 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
537 
538 	return hfreelist;
539 }
540 
541 /**
542  * ce_update_tx_ring() - Advance sw index.
543  * @ce_tx_hdl : pointer to CE handle
544  * @num_htt_cmpls : htt completions received.
545  *
546  * Function:
547  * Increment the value of sw index of src ring
548  * according to number of htt completions
549  * received.
550  *
551  * Return: void
552  */
553 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
554 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
555 {
556 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
557 	struct CE_ring_state *src_ring = ce_state->src_ring;
558 	uint32_t nentries_mask = src_ring->nentries_mask;
559 	/*
560 	 * Advance the s/w index:
561 	 * This effectively simulates completing the CE ring descriptors
562 	 */
563 	src_ring->sw_index =
564 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
565 				num_htt_cmpls);
566 }
567 #else
568 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
569 {}
570 #endif
571 
572 /**
573  * ce_send_single() - sends
574  * @ce_tx_hdl : pointer to CE handle
575  * @msdu : msdu to be sent
576  * @transfer_id : transfer id
577  * @len : Downloaded length
578  *
579  * Function:
580  * 1. Send one msdu
581  * 2. Increment write index of src ring accordinlgy.
582  *
583  * Return: int: CE sent status
584  */
585 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
586 		uint32_t transfer_id, u_int32_t len)
587 {
588 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
589 	struct hif_softc *scn = ce_state->scn;
590 	struct CE_ring_state *src_ring = ce_state->src_ring;
591 	uint32_t ctrl_addr = ce_state->ctrl_addr;
592 	/*A_target_id_t targid = TARGID(scn);*/
593 
594 	uint32_t nentries_mask = src_ring->nentries_mask;
595 	uint32_t sw_index, write_index;
596 
597 	struct CE_src_desc *src_desc_base =
598 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
599 	uint32_t *src_desc;
600 
601 	struct CE_src_desc lsrc_desc = {0};
602 	enum hif_ce_event_type event_type;
603 
604 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
605 	sw_index = src_ring->sw_index;
606 	write_index = src_ring->write_index;
607 
608 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
609 					sw_index-1) < 1)) {
610 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
611 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
612 			  write_index, sw_index);
613 		return 1;
614 	}
615 
616 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
617 
618 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
619 
620 	lsrc_desc.meta_data = transfer_id;
621 	lsrc_desc.nbytes = len;
622 	/*  Data packet is a byte stream, so disable byte swap */
623 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
624 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
625 
626 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
627 
628 
629 	src_ring->per_transfer_context[write_index] = msdu;
630 
631 	if (((struct CE_src_desc *)src_desc)->gather)
632 		event_type = HIF_TX_GATHER_DESC_POST;
633 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
634 		event_type = HIF_TX_DESC_SOFTWARE_POST;
635 	else
636 		event_type = HIF_TX_DESC_POST;
637 
638 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
639 				(union ce_desc *)src_desc, msdu,
640 				write_index, len);
641 
642 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
643 
644 	src_ring->write_index = write_index;
645 
646 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
647 
648 	return QDF_STATUS_SUCCESS;
649 }
650 
651 /**
652  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
653  * @coyeng: copy engine handle
654  * @per_recv_context: virtual address of the nbuf
655  * @buffer: physical address of the nbuf
656  *
657  * Return: 0 if the buffer is enqueued
658  */
659 int
660 ce_recv_buf_enqueue(struct CE_handle *copyeng,
661 		    void *per_recv_context, qdf_dma_addr_t buffer)
662 {
663 	struct CE_state *CE_state = (struct CE_state *)copyeng;
664 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
665 
666 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
667 			per_recv_context, buffer);
668 }
669 qdf_export_symbol(ce_recv_buf_enqueue);
670 
671 void
672 ce_send_watermarks_set(struct CE_handle *copyeng,
673 		       unsigned int low_alert_nentries,
674 		       unsigned int high_alert_nentries)
675 {
676 	struct CE_state *CE_state = (struct CE_state *)copyeng;
677 	uint32_t ctrl_addr = CE_state->ctrl_addr;
678 	struct hif_softc *scn = CE_state->scn;
679 
680 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
681 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
682 }
683 
684 void
685 ce_recv_watermarks_set(struct CE_handle *copyeng,
686 		       unsigned int low_alert_nentries,
687 		       unsigned int high_alert_nentries)
688 {
689 	struct CE_state *CE_state = (struct CE_state *)copyeng;
690 	uint32_t ctrl_addr = CE_state->ctrl_addr;
691 	struct hif_softc *scn = CE_state->scn;
692 
693 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
694 				low_alert_nentries);
695 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
696 				high_alert_nentries);
697 }
698 
699 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
700 {
701 	struct CE_state *CE_state = (struct CE_state *)copyeng;
702 	struct CE_ring_state *src_ring = CE_state->src_ring;
703 	unsigned int nentries_mask = src_ring->nentries_mask;
704 	unsigned int sw_index;
705 	unsigned int write_index;
706 
707 	qdf_spin_lock(&CE_state->ce_index_lock);
708 	sw_index = src_ring->sw_index;
709 	write_index = src_ring->write_index;
710 	qdf_spin_unlock(&CE_state->ce_index_lock);
711 
712 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
713 }
714 
715 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
716 {
717 	struct CE_state *CE_state = (struct CE_state *)copyeng;
718 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
719 	unsigned int nentries_mask = dest_ring->nentries_mask;
720 	unsigned int sw_index;
721 	unsigned int write_index;
722 
723 	qdf_spin_lock(&CE_state->ce_index_lock);
724 	sw_index = dest_ring->sw_index;
725 	write_index = dest_ring->write_index;
726 	qdf_spin_unlock(&CE_state->ce_index_lock);
727 
728 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
729 }
730 
731 /*
732  * Guts of ce_completed_recv_next.
733  * The caller takes responsibility for any necessary locking.
734  */
735 int
736 ce_completed_recv_next(struct CE_handle *copyeng,
737 		       void **per_CE_contextp,
738 		       void **per_transfer_contextp,
739 		       qdf_dma_addr_t *bufferp,
740 		       unsigned int *nbytesp,
741 		       unsigned int *transfer_idp, unsigned int *flagsp)
742 {
743 	struct CE_state *CE_state = (struct CE_state *)copyeng;
744 	int status;
745 	struct hif_softc *scn = CE_state->scn;
746 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
747 	struct ce_ops *ce_services;
748 
749 	ce_services = hif_state->ce_services;
750 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
751 	status =
752 		ce_services->ce_completed_recv_next_nolock(CE_state,
753 				per_CE_contextp, per_transfer_contextp, bufferp,
754 					      nbytesp, transfer_idp, flagsp);
755 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
756 
757 	return status;
758 }
759 
760 QDF_STATUS
761 ce_revoke_recv_next(struct CE_handle *copyeng,
762 		    void **per_CE_contextp,
763 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
764 {
765 	struct CE_state *CE_state = (struct CE_state *)copyeng;
766 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
767 
768 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
769 			per_CE_contextp, per_transfer_contextp, bufferp);
770 }
771 
772 QDF_STATUS
773 ce_cancel_send_next(struct CE_handle *copyeng,
774 		void **per_CE_contextp,
775 		void **per_transfer_contextp,
776 		qdf_dma_addr_t *bufferp,
777 		unsigned int *nbytesp,
778 		unsigned int *transfer_idp,
779 		uint32_t *toeplitz_hash_result)
780 {
781 	struct CE_state *CE_state = (struct CE_state *)copyeng;
782 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
783 
784 	return hif_state->ce_services->ce_cancel_send_next
785 		(copyeng, per_CE_contextp, per_transfer_contextp,
786 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
787 }
788 qdf_export_symbol(ce_cancel_send_next);
789 
790 int
791 ce_completed_send_next(struct CE_handle *copyeng,
792 		       void **per_CE_contextp,
793 		       void **per_transfer_contextp,
794 		       qdf_dma_addr_t *bufferp,
795 		       unsigned int *nbytesp,
796 		       unsigned int *transfer_idp,
797 		       unsigned int *sw_idx,
798 		       unsigned int *hw_idx,
799 		       unsigned int *toeplitz_hash_result)
800 {
801 	struct CE_state *CE_state = (struct CE_state *)copyeng;
802 	struct hif_softc *scn = CE_state->scn;
803 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
804 	struct ce_ops *ce_services;
805 	int status;
806 
807 	ce_services = hif_state->ce_services;
808 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
809 	status =
810 		ce_services->ce_completed_send_next_nolock(CE_state,
811 					per_CE_contextp, per_transfer_contextp,
812 					bufferp, nbytesp, transfer_idp, sw_idx,
813 					      hw_idx, toeplitz_hash_result);
814 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
815 
816 	return status;
817 }
818 
819 #ifdef ATH_11AC_TXCOMPACT
820 /* CE engine descriptor reap
821  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
822  * does receive and reaping of completed descriptor ,
823  * This function only handles reaping of Tx complete descriptor.
824  * The Function is called from threshold reap  poll routine
825  * hif_send_complete_check so should not countain receive functionality
826  * within it .
827  */
828 
829 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
830 {
831 	void *CE_context;
832 	void *transfer_context;
833 	qdf_dma_addr_t buf;
834 	unsigned int nbytes;
835 	unsigned int id;
836 	unsigned int sw_idx, hw_idx;
837 	uint32_t toeplitz_hash_result;
838 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
839 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
840 
841 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
842 		return;
843 
844 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
845 			NULL, NULL, 0, 0);
846 
847 	/* Since this function is called from both user context and
848 	 * tasklet context the spinlock has to lock the bottom halves.
849 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
850 	 * enabled in TX polling mode. If this is not the case, more
851 	 * bottom halve spin lock changes are needed. Due to data path
852 	 * performance concern, after internal discussion we've decided
853 	 * to make minimum change, i.e., only address the issue occurred
854 	 * in this function. The possible negative effect of this minimum
855 	 * change is that, in the future, if some other function will also
856 	 * be opened to let the user context to use, those cases need to be
857 	 * addressed by change spin_lock to spin_lock_bh also.
858 	 */
859 
860 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
861 
862 	if (CE_state->send_cb) {
863 		{
864 			struct ce_ops *ce_services = hif_state->ce_services;
865 			/* Pop completed send buffers and call the
866 			 * registered send callback for each
867 			 */
868 			while (ce_services->ce_completed_send_next_nolock
869 				 (CE_state, &CE_context,
870 				  &transfer_context, &buf,
871 				  &nbytes, &id, &sw_idx, &hw_idx,
872 				  &toeplitz_hash_result) ==
873 				  QDF_STATUS_SUCCESS) {
874 				if (ce_id != CE_HTT_H2T_MSG) {
875 					qdf_spin_unlock_bh(
876 						&CE_state->ce_index_lock);
877 					CE_state->send_cb(
878 						(struct CE_handle *)
879 						CE_state, CE_context,
880 						transfer_context, buf,
881 						nbytes, id, sw_idx, hw_idx,
882 						toeplitz_hash_result);
883 					qdf_spin_lock_bh(
884 						&CE_state->ce_index_lock);
885 				} else {
886 					struct HIF_CE_pipe_info *pipe_info =
887 						(struct HIF_CE_pipe_info *)
888 						CE_context;
889 
890 					qdf_spin_lock_bh(&pipe_info->
891 						 completion_freeq_lock);
892 					pipe_info->num_sends_allowed++;
893 					qdf_spin_unlock_bh(&pipe_info->
894 						   completion_freeq_lock);
895 				}
896 			}
897 		}
898 	}
899 
900 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
901 
902 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
903 			NULL, NULL, 0, 0);
904 	Q_TARGET_ACCESS_END(scn);
905 }
906 
907 #endif /*ATH_11AC_TXCOMPACT */
908 
909 /*
910  * ce_engine_service_reg:
911  *
912  * Called from ce_per_engine_service and goes through the regular interrupt
913  * handling that does not involve the WLAN fast path feature.
914  *
915  * Returns void
916  */
917 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
918 {
919 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
920 	uint32_t ctrl_addr = CE_state->ctrl_addr;
921 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
922 	void *CE_context;
923 	void *transfer_context;
924 	qdf_dma_addr_t buf;
925 	unsigned int nbytes;
926 	unsigned int id;
927 	unsigned int flags;
928 	unsigned int more_comp_cnt = 0;
929 	unsigned int more_snd_comp_cnt = 0;
930 	unsigned int sw_idx, hw_idx;
931 	uint32_t toeplitz_hash_result;
932 	uint32_t mode = hif_get_conparam(scn);
933 
934 more_completions:
935 	if (CE_state->recv_cb) {
936 
937 		/* Pop completed recv buffers and call
938 		 * the registered recv callback for each
939 		 */
940 		while (hif_state->ce_services->ce_completed_recv_next_nolock
941 				(CE_state, &CE_context, &transfer_context,
942 				&buf, &nbytes, &id, &flags) ==
943 				QDF_STATUS_SUCCESS) {
944 			qdf_spin_unlock(&CE_state->ce_index_lock);
945 			CE_state->recv_cb((struct CE_handle *)CE_state,
946 					  CE_context, transfer_context, buf,
947 					  nbytes, id, flags);
948 
949 			qdf_spin_lock(&CE_state->ce_index_lock);
950 			/*
951 			 * EV #112693 -
952 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
953 			 * BSoD_0x133 occurred in VHT80 UDP_DL
954 			 * Break out DPC by force if number of loops in
955 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
956 			 * to avoid spending too long time in
957 			 * DPC for each interrupt handling. Schedule another
958 			 * DPC to avoid data loss if we had taken
959 			 * force-break action before apply to Windows OS
960 			 * only currently, Linux/MAC os can expand to their
961 			 * platform if necessary
962 			 */
963 
964 			/* Break the receive processes by
965 			 * force if force_break set up
966 			 */
967 			if (qdf_unlikely(CE_state->force_break)) {
968 				qdf_atomic_set(&CE_state->rx_pending, 1);
969 				return;
970 			}
971 		}
972 	}
973 
974 	/*
975 	 * Attention: We may experience potential infinite loop for below
976 	 * While Loop during Sending Stress test.
977 	 * Resolve the same way as Receive Case (Refer to EV #112693)
978 	 */
979 
980 	if (CE_state->send_cb) {
981 		/* Pop completed send buffers and call
982 		 * the registered send callback for each
983 		 */
984 
985 #ifdef ATH_11AC_TXCOMPACT
986 		while (hif_state->ce_services->ce_completed_send_next_nolock
987 			 (CE_state, &CE_context,
988 			 &transfer_context, &buf, &nbytes,
989 			 &id, &sw_idx, &hw_idx,
990 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
991 
992 			if (CE_id != CE_HTT_H2T_MSG ||
993 			    QDF_IS_EPPING_ENABLED(mode)) {
994 				qdf_spin_unlock(&CE_state->ce_index_lock);
995 				CE_state->send_cb((struct CE_handle *)CE_state,
996 						  CE_context, transfer_context,
997 						  buf, nbytes, id, sw_idx,
998 						  hw_idx, toeplitz_hash_result);
999 				qdf_spin_lock(&CE_state->ce_index_lock);
1000 			} else {
1001 				struct HIF_CE_pipe_info *pipe_info =
1002 					(struct HIF_CE_pipe_info *)CE_context;
1003 
1004 				qdf_spin_lock_bh(&pipe_info->
1005 					      completion_freeq_lock);
1006 				pipe_info->num_sends_allowed++;
1007 				qdf_spin_unlock_bh(&pipe_info->
1008 						completion_freeq_lock);
1009 			}
1010 		}
1011 #else                           /*ATH_11AC_TXCOMPACT */
1012 		while (hif_state->ce_services->ce_completed_send_next_nolock
1013 			 (CE_state, &CE_context,
1014 			  &transfer_context, &buf, &nbytes,
1015 			  &id, &sw_idx, &hw_idx,
1016 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1017 			qdf_spin_unlock(&CE_state->ce_index_lock);
1018 			CE_state->send_cb((struct CE_handle *)CE_state,
1019 				  CE_context, transfer_context, buf,
1020 				  nbytes, id, sw_idx, hw_idx,
1021 				  toeplitz_hash_result);
1022 			qdf_spin_lock(&CE_state->ce_index_lock);
1023 		}
1024 #endif /*ATH_11AC_TXCOMPACT */
1025 	}
1026 
1027 more_watermarks:
1028 	if (CE_state->misc_cbs) {
1029 		if (CE_state->watermark_cb &&
1030 				hif_state->ce_services->watermark_int(CE_state,
1031 					&flags)) {
1032 			qdf_spin_unlock(&CE_state->ce_index_lock);
1033 			/* Convert HW IS bits to software flags */
1034 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1035 					CE_state->wm_context, flags);
1036 			qdf_spin_lock(&CE_state->ce_index_lock);
1037 		}
1038 	}
1039 
1040 	/*
1041 	 * Clear the misc interrupts (watermark) that were handled above,
1042 	 * and that will be checked again below.
1043 	 * Clear and check for copy-complete interrupts again, just in case
1044 	 * more copy completions happened while the misc interrupts were being
1045 	 * handled.
1046 	 */
1047 	if (!ce_srng_based(scn)) {
1048 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1049 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1050 					   CE_WATERMARK_MASK |
1051 					   HOST_IS_COPY_COMPLETE_MASK);
1052 		} else {
1053 			qdf_atomic_set(&CE_state->rx_pending, 0);
1054 			hif_err_rl("%s: target access is not allowed",
1055 				   __func__);
1056 			return;
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Now that per-engine interrupts are cleared, verify that
1062 	 * no recv interrupts arrive while processing send interrupts,
1063 	 * and no recv or send interrupts happened while processing
1064 	 * misc interrupts.Go back and check again.Keep checking until
1065 	 * we find no more events to process.
1066 	 */
1067 	if (CE_state->recv_cb &&
1068 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1069 				CE_state)) {
1070 		if (QDF_IS_EPPING_ENABLED(mode) ||
1071 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1072 			goto more_completions;
1073 		} else {
1074 			if (!ce_srng_based(scn)) {
1075 				HIF_ERROR(
1076 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1077 					__func__,
1078 					CE_state->dest_ring->nentries_mask,
1079 					CE_state->dest_ring->sw_index,
1080 					CE_DEST_RING_READ_IDX_GET(scn,
1081 							  CE_state->ctrl_addr));
1082 			}
1083 		}
1084 	}
1085 
1086 	if (CE_state->send_cb &&
1087 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1088 				CE_state)) {
1089 		if (QDF_IS_EPPING_ENABLED(mode) ||
1090 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1091 			goto more_completions;
1092 		} else {
1093 			if (!ce_srng_based(scn)) {
1094 				HIF_ERROR(
1095 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1096 					__func__,
1097 					CE_state->src_ring->nentries_mask,
1098 					CE_state->src_ring->sw_index,
1099 					CE_SRC_RING_READ_IDX_GET(scn,
1100 							 CE_state->ctrl_addr));
1101 			}
1102 		}
1103 	}
1104 
1105 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1106 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1107 			goto more_watermarks;
1108 	}
1109 
1110 	qdf_atomic_set(&CE_state->rx_pending, 0);
1111 }
1112 
1113 /*
1114  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1115  *
1116  * Invokes registered callbacks for recv_complete,
1117  * send_complete, and watermarks.
1118  *
1119  * Returns: number of messages processed
1120  */
1121 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1122 {
1123 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1124 
1125 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1126 		return CE_state->receive_count;
1127 
1128 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1129 		HIF_ERROR("[premature rc=0]");
1130 		return 0; /* no work done */
1131 	}
1132 
1133 	/* Clear force_break flag and re-initialize receive_count to 0 */
1134 	CE_state->receive_count = 0;
1135 	CE_state->force_break = 0;
1136 	CE_state->ce_service_start_time = sched_clock();
1137 	CE_state->ce_service_yield_time =
1138 		CE_state->ce_service_start_time +
1139 		hif_get_ce_service_max_yield_time(
1140 			(struct hif_opaque_softc *)scn);
1141 
1142 	qdf_spin_lock(&CE_state->ce_index_lock);
1143 
1144 	CE_state->service(scn, CE_id);
1145 
1146 	qdf_spin_unlock(&CE_state->ce_index_lock);
1147 
1148 	if (Q_TARGET_ACCESS_END(scn) < 0)
1149 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
1150 	return CE_state->receive_count;
1151 }
1152 qdf_export_symbol(ce_per_engine_service);
1153 
1154 /*
1155  * Handler for per-engine interrupts on ALL active CEs.
1156  * This is used in cases where the system is sharing a
1157  * single interrput for all CEs
1158  */
1159 
1160 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1161 {
1162 	int CE_id;
1163 	uint32_t intr_summary;
1164 
1165 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1166 		return;
1167 
1168 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1169 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1170 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1171 
1172 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1173 				qdf_atomic_set(&CE_state->rx_pending, 0);
1174 				ce_per_engine_service(scn, CE_id);
1175 			}
1176 		}
1177 
1178 		Q_TARGET_ACCESS_END(scn);
1179 		return;
1180 	}
1181 
1182 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1183 
1184 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1185 		if (intr_summary & (1 << CE_id))
1186 			intr_summary &= ~(1 << CE_id);
1187 		else
1188 			continue;       /* no intr pending on this CE */
1189 
1190 		ce_per_engine_service(scn, CE_id);
1191 	}
1192 
1193 	Q_TARGET_ACCESS_END(scn);
1194 }
1195 
1196 /*Iterate the CE_state list and disable the compl interrupt
1197  * if it has been registered already.
1198  */
1199 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1200 {
1201 	int CE_id;
1202 
1203 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1204 		return;
1205 
1206 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1207 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1208 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1209 
1210 		/* if the interrupt is currently enabled, disable it */
1211 		if (!CE_state->disable_copy_compl_intr
1212 		    && (CE_state->send_cb || CE_state->recv_cb))
1213 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1214 
1215 		if (CE_state->watermark_cb)
1216 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1217 	}
1218 	Q_TARGET_ACCESS_END(scn);
1219 }
1220 
1221 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1222 {
1223 	int CE_id;
1224 
1225 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1226 		return;
1227 
1228 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1229 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1230 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1231 
1232 		/*
1233 		 * If the CE is supposed to have copy complete interrupts
1234 		 * enabled (i.e. there a callback registered, and the
1235 		 * "disable" flag is not set), then re-enable the interrupt.
1236 		 */
1237 		if (!CE_state->disable_copy_compl_intr
1238 		    && (CE_state->send_cb || CE_state->recv_cb))
1239 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1240 
1241 		if (CE_state->watermark_cb)
1242 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1243 	}
1244 	Q_TARGET_ACCESS_END(scn);
1245 }
1246 
1247 /**
1248  * ce_send_cb_register(): register completion handler
1249  * @copyeng: CE_state representing the ce we are adding the behavior to
1250  * @fn_ptr: callback that the ce should use when processing tx completions
1251  * @disable_interrupts: if the interupts should be enabled or not.
1252  *
1253  * Caller should guarantee that no transactions are in progress before
1254  * switching the callback function.
1255  *
1256  * Registers the send context before the fn pointer so that if the cb is valid
1257  * the context should be valid.
1258  *
1259  * Beware that currently this function will enable completion interrupts.
1260  */
1261 void
1262 ce_send_cb_register(struct CE_handle *copyeng,
1263 		    ce_send_cb fn_ptr,
1264 		    void *ce_send_context, int disable_interrupts)
1265 {
1266 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1267 	struct hif_softc *scn;
1268 	struct HIF_CE_state *hif_state;
1269 
1270 	if (!CE_state) {
1271 		HIF_ERROR("%s: Error CE state = NULL", __func__);
1272 		return;
1273 	}
1274 	scn = CE_state->scn;
1275 	hif_state = HIF_GET_CE_STATE(scn);
1276 	if (!hif_state) {
1277 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1278 		return;
1279 	}
1280 	CE_state->send_context = ce_send_context;
1281 	CE_state->send_cb = fn_ptr;
1282 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1283 							disable_interrupts);
1284 }
1285 qdf_export_symbol(ce_send_cb_register);
1286 
1287 /**
1288  * ce_recv_cb_register(): register completion handler
1289  * @copyeng: CE_state representing the ce we are adding the behavior to
1290  * @fn_ptr: callback that the ce should use when processing rx completions
1291  * @disable_interrupts: if the interupts should be enabled or not.
1292  *
1293  * Registers the send context before the fn pointer so that if the cb is valid
1294  * the context should be valid.
1295  *
1296  * Caller should guarantee that no transactions are in progress before
1297  * switching the callback function.
1298  */
1299 void
1300 ce_recv_cb_register(struct CE_handle *copyeng,
1301 		    CE_recv_cb fn_ptr,
1302 		    void *CE_recv_context, int disable_interrupts)
1303 {
1304 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1305 	struct hif_softc *scn;
1306 	struct HIF_CE_state *hif_state;
1307 
1308 	if (!CE_state) {
1309 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
1310 		return;
1311 	}
1312 	scn = CE_state->scn;
1313 	hif_state = HIF_GET_CE_STATE(scn);
1314 	if (!hif_state) {
1315 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1316 		return;
1317 	}
1318 	CE_state->recv_context = CE_recv_context;
1319 	CE_state->recv_cb = fn_ptr;
1320 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1321 							disable_interrupts);
1322 }
1323 qdf_export_symbol(ce_recv_cb_register);
1324 
1325 /**
1326  * ce_watermark_cb_register(): register completion handler
1327  * @copyeng: CE_state representing the ce we are adding the behavior to
1328  * @fn_ptr: callback that the ce should use when processing watermark events
1329  *
1330  * Caller should guarantee that no watermark events are being processed before
1331  * switching the callback function.
1332  */
1333 void
1334 ce_watermark_cb_register(struct CE_handle *copyeng,
1335 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1336 {
1337 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1338 	struct hif_softc *scn = CE_state->scn;
1339 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1340 
1341 	CE_state->watermark_cb = fn_ptr;
1342 	CE_state->wm_context = CE_wm_context;
1343 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1344 							0);
1345 	if (fn_ptr)
1346 		CE_state->misc_cbs = 1;
1347 }
1348 
1349 bool ce_get_rx_pending(struct hif_softc *scn)
1350 {
1351 	int CE_id;
1352 
1353 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1354 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1355 
1356 		if (qdf_atomic_read(&CE_state->rx_pending))
1357 			return true;
1358 	}
1359 
1360 	return false;
1361 }
1362 
1363 /**
1364  * ce_check_rx_pending() - ce_check_rx_pending
1365  * @CE_state: context of the copy engine to check
1366  *
1367  * Return: true if there per_engine_service
1368  *	didn't process all the rx descriptors.
1369  */
1370 bool ce_check_rx_pending(struct CE_state *CE_state)
1371 {
1372 	if (qdf_atomic_read(&CE_state->rx_pending))
1373 		return true;
1374 	else
1375 		return false;
1376 }
1377 qdf_export_symbol(ce_check_rx_pending);
1378 
1379 #ifdef IPA_OFFLOAD
1380 /**
1381  * ce_ipa_get_resource() - get uc resource on copyengine
1382  * @ce: copyengine context
1383  * @ce_sr: copyengine source ring resource info
1384  * @ce_sr_ring_size: copyengine source ring size
1385  * @ce_reg_paddr: copyengine register physical address
1386  *
1387  * Copy engine should release resource to micro controller
1388  * Micro controller needs
1389  *  - Copy engine source descriptor base address
1390  *  - Copy engine source descriptor size
1391  *  - PCI BAR address to access copy engine regiser
1392  *
1393  * Return: None
1394  */
1395 void ce_ipa_get_resource(struct CE_handle *ce,
1396 			 qdf_shared_mem_t **ce_sr,
1397 			 uint32_t *ce_sr_ring_size,
1398 			 qdf_dma_addr_t *ce_reg_paddr)
1399 {
1400 	struct CE_state *CE_state = (struct CE_state *)ce;
1401 	uint32_t ring_loop;
1402 	struct CE_src_desc *ce_desc;
1403 	qdf_dma_addr_t phy_mem_base;
1404 	struct hif_softc *scn = CE_state->scn;
1405 
1406 	if (CE_UNUSED == CE_state->state) {
1407 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1408 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1409 		*ce_sr_ring_size = 0;
1410 		return;
1411 	}
1412 
1413 	/* Update default value for descriptor */
1414 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1415 	     ring_loop++) {
1416 		ce_desc = (struct CE_src_desc *)
1417 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1418 			   ring_loop * (sizeof(struct CE_src_desc)));
1419 		CE_IPA_RING_INIT(ce_desc);
1420 	}
1421 
1422 	/* Get BAR address */
1423 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1424 
1425 	*ce_sr = CE_state->scn->ipa_ce_ring;
1426 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1427 		sizeof(struct CE_src_desc));
1428 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1429 			SR_WR_INDEX_ADDRESS;
1430 }
1431 #endif /* IPA_OFFLOAD */
1432 
1433 #ifdef HIF_CE_DEBUG_DATA_BUF
1434 /**
1435  * hif_dump_desc_data_buf() - record ce descriptor events
1436  * @buf: buffer to copy to
1437  * @pos: Current position till which the buf is filled
1438  * @data: Data to be copied
1439  * @data_len: Length of the data to be copied
1440  */
1441 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1442 					uint8_t *data, uint32_t data_len)
1443 {
1444 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1445 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1446 
1447 	if ((data_len > 0) && data) {
1448 		if (data_len < 16) {
1449 			hex_dump_to_buffer(data,
1450 						CE_DEBUG_DATA_PER_ROW,
1451 						16, 1, buf + pos,
1452 						(ssize_t)PAGE_SIZE - pos,
1453 						false);
1454 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1455 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1456 		} else {
1457 			uint32_t rows = (data_len / 16) + 1;
1458 			uint32_t row = 0;
1459 
1460 			for (row = 0; row < rows; row++) {
1461 				hex_dump_to_buffer(data + (row * 16),
1462 							CE_DEBUG_DATA_PER_ROW,
1463 							16, 1, buf + pos,
1464 							(ssize_t)PAGE_SIZE
1465 							- pos, false);
1466 				pos +=
1467 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1468 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1469 						"\n");
1470 			}
1471 		}
1472 	}
1473 
1474 	return pos;
1475 }
1476 #endif
1477 
1478 /*
1479  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1480  * for defined here
1481  */
1482 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1483 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1484 {
1485 	switch (type) {
1486 	case HIF_RX_DESC_POST:
1487 		return "HIF_RX_DESC_POST";
1488 	case HIF_RX_DESC_COMPLETION:
1489 		return "HIF_RX_DESC_COMPLETION";
1490 	case HIF_TX_GATHER_DESC_POST:
1491 		return "HIF_TX_GATHER_DESC_POST";
1492 	case HIF_TX_DESC_POST:
1493 		return "HIF_TX_DESC_POST";
1494 	case HIF_TX_DESC_SOFTWARE_POST:
1495 		return "HIF_TX_DESC_SOFTWARE_POST";
1496 	case HIF_TX_DESC_COMPLETION:
1497 		return "HIF_TX_DESC_COMPLETION";
1498 	case FAST_RX_WRITE_INDEX_UPDATE:
1499 		return "FAST_RX_WRITE_INDEX_UPDATE";
1500 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1501 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1502 	case FAST_TX_WRITE_INDEX_UPDATE:
1503 		return "FAST_TX_WRITE_INDEX_UPDATE";
1504 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1505 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1506 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1507 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1508 	case RESUME_WRITE_INDEX_UPDATE:
1509 		return "RESUME_WRITE_INDEX_UPDATE";
1510 	case HIF_IRQ_EVENT:
1511 		return "HIF_IRQ_EVENT";
1512 	case HIF_CE_TASKLET_ENTRY:
1513 		return "HIF_CE_TASKLET_ENTRY";
1514 	case HIF_CE_TASKLET_RESCHEDULE:
1515 		return "HIF_CE_TASKLET_RESCHEDULE";
1516 	case HIF_CE_TASKLET_EXIT:
1517 		return "HIF_CE_TASKLET_EXIT";
1518 	case HIF_CE_REAP_ENTRY:
1519 		return "HIF_CE_REAP_ENTRY";
1520 	case HIF_CE_REAP_EXIT:
1521 		return "HIF_CE_REAP_EXIT";
1522 	case NAPI_SCHEDULE:
1523 		return "NAPI_SCHEDULE";
1524 	case NAPI_POLL_ENTER:
1525 		return "NAPI_POLL_ENTER";
1526 	case NAPI_COMPLETE:
1527 		return "NAPI_COMPLETE";
1528 	case NAPI_POLL_EXIT:
1529 		return "NAPI_POLL_EXIT";
1530 	case HIF_RX_NBUF_ALLOC_FAILURE:
1531 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1532 	case HIF_RX_NBUF_MAP_FAILURE:
1533 		return "HIF_RX_NBUF_MAP_FAILURE";
1534 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1535 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1536 	default:
1537 		return "invalid";
1538 	}
1539 }
1540 
1541 /**
1542  * hif_dump_desc_event() - record ce descriptor events
1543  * @buf: Buffer to which to be copied
1544  * @ce_id: which ce is the event occurring on
1545  * @index: index that the descriptor was/will be at.
1546  */
1547 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1548 {
1549 	struct hif_ce_desc_event *event;
1550 	uint64_t secs, usecs;
1551 	ssize_t len = 0;
1552 	struct ce_desc_hist *ce_hist = NULL;
1553 	struct hif_ce_desc_event *hist_ev = NULL;
1554 
1555 	if (!scn)
1556 		return -EINVAL;
1557 
1558 	ce_hist = &scn->hif_ce_desc_hist;
1559 
1560 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1561 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1562 		qdf_print("Invalid values");
1563 		return -EINVAL;
1564 	}
1565 
1566 	hist_ev =
1567 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1568 
1569 	if (!hist_ev) {
1570 		qdf_print("Low Memory");
1571 		return -EINVAL;
1572 	}
1573 
1574 	event = &hist_ev[ce_hist->hist_index];
1575 
1576 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1577 
1578 	len += snprintf(buf, PAGE_SIZE - len,
1579 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1580 			secs, usecs, ce_hist->hist_id,
1581 			ce_event_type_to_str(event->type),
1582 			event->index, event->memory);
1583 #ifdef HIF_CE_DEBUG_DATA_BUF
1584 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%lu",
1585 			event->actual_data_len);
1586 #endif
1587 
1588 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1589 
1590 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1591 				16, 1, buf + len,
1592 				(ssize_t)PAGE_SIZE - len, false);
1593 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1594 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1595 
1596 #ifdef HIF_CE_DEBUG_DATA_BUF
1597 	if (ce_hist->data_enable[ce_hist->hist_id])
1598 		len = hif_dump_desc_data_buf(buf, len, event->data,
1599 						(event->actual_data_len <
1600 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1601 						event->actual_data_len :
1602 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1603 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1604 
1605 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1606 
1607 	return len;
1608 }
1609 
1610 /*
1611  * hif_store_desc_trace_buf_index() -
1612  * API to get the CE id and CE debug storage buffer index
1613  *
1614  * @dev: network device
1615  * @attr: sysfs attribute
1616  * @buf: data got from the user
1617  *
1618  * Return total length
1619  */
1620 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1621 					const char *buf, size_t size)
1622 {
1623 	struct ce_desc_hist *ce_hist = NULL;
1624 
1625 	if (!scn)
1626 		return -EINVAL;
1627 
1628 	ce_hist = &scn->hif_ce_desc_hist;
1629 
1630 	if (!size) {
1631 		pr_err("%s: Invalid input buffer.\n", __func__);
1632 		return -EINVAL;
1633 	}
1634 
1635 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1636 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1637 		pr_err("%s: Invalid input value.\n", __func__);
1638 		return -EINVAL;
1639 	}
1640 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1641 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1642 		qdf_print("Invalid values");
1643 		return -EINVAL;
1644 	}
1645 
1646 	return size;
1647 }
1648 
1649 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1650 
1651 #ifdef HIF_CE_DEBUG_DATA_BUF
1652 /*
1653  * hif_ce_en_desc_hist() -
1654  * API to enable recording the CE desc history
1655  *
1656  * @dev: network device
1657  * @attr: sysfs attribute
1658  * @buf: buffer to copy the data.
1659  *
1660  * Starts recording the ce desc history
1661  *
1662  * Return total length copied
1663  */
1664 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1665 {
1666 	struct ce_desc_hist *ce_hist = NULL;
1667 	uint32_t cfg = 0;
1668 	uint32_t ce_id = 0;
1669 
1670 	if (!scn)
1671 		return -EINVAL;
1672 
1673 	ce_hist = &scn->hif_ce_desc_hist;
1674 
1675 	if (!size) {
1676 		pr_err("%s: Invalid input buffer.\n", __func__);
1677 		return -EINVAL;
1678 	}
1679 
1680 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1681 		   (unsigned int *)&cfg) != 2) {
1682 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
1683 		return -EINVAL;
1684 	}
1685 	if (ce_id >= CE_COUNT_MAX) {
1686 		qdf_print("Invalid value CE Id");
1687 		return -EINVAL;
1688 	}
1689 
1690 	if ((cfg > 1 || cfg < 0)) {
1691 		qdf_print("Invalid values: enter 0 or 1");
1692 		return -EINVAL;
1693 	}
1694 
1695 	if (!ce_hist->hist_ev[ce_id])
1696 		return -EINVAL;
1697 
1698 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1699 	if (cfg == 1) {
1700 		if (ce_hist->data_enable[ce_id] == 1) {
1701 			qdf_debug("Already Enabled");
1702 		} else {
1703 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1704 							== QDF_STATUS_E_NOMEM){
1705 				ce_hist->data_enable[ce_id] = 0;
1706 				qdf_err("%s:Memory Alloc failed", __func__);
1707 			} else
1708 				ce_hist->data_enable[ce_id] = 1;
1709 		}
1710 	} else if (cfg == 0) {
1711 		if (ce_hist->data_enable[ce_id] == 0) {
1712 			qdf_debug("Already Disabled");
1713 		} else {
1714 			ce_hist->data_enable[ce_id] = 0;
1715 				free_mem_ce_debug_hist_data(scn, ce_id);
1716 		}
1717 	}
1718 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1719 
1720 	return size;
1721 }
1722 
1723 /*
1724  * hif_disp_ce_enable_desc_data_hist() -
1725  * API to display value of data_enable
1726  *
1727  * @dev: network device
1728  * @attr: sysfs attribute
1729  * @buf: buffer to copy the data.
1730  *
1731  * Return total length copied
1732  */
1733 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1734 {
1735 	ssize_t len = 0;
1736 	uint32_t ce_id = 0;
1737 	struct ce_desc_hist *ce_hist = NULL;
1738 
1739 	if (!scn)
1740 		return -EINVAL;
1741 
1742 	ce_hist = &scn->hif_ce_desc_hist;
1743 
1744 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1745 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1746 				ce_id, ce_hist->data_enable[ce_id]);
1747 	}
1748 
1749 	return len;
1750 }
1751 #endif /* HIF_CE_DEBUG_DATA_BUF */
1752 
1753 #ifdef OL_ATH_SMART_LOGGING
1754 #define GUARD_SPACE 10
1755 #define LOG_ID_SZ 4
1756 /*
1757  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1758  * @src_ring: SRC ring state
1759  * @buf_cur: Current pointer in ring buffer
1760  * @buf_init:Start of the ring buffer
1761  * @buf_sz: Size of the ring buffer
1762  * @skb_sz: Max size of the SKB buffer to be copied
1763  *
1764  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1765  * the given buf, skb_sz is the max buffer size to be copied
1766  *
1767  * Return: Current pointer in ring buffer
1768  */
1769 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1770 				    uint8_t *buf_cur, uint8_t *buf_init,
1771 				    uint32_t buf_sz, uint32_t skb_sz)
1772 {
1773 	struct CE_src_desc *src_ring_base;
1774 	uint32_t len, entry;
1775 	struct CE_src_desc  *src_desc;
1776 	qdf_nbuf_t nbuf;
1777 	uint32_t available_buf;
1778 
1779 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1780 	len = sizeof(struct CE_ring_state);
1781 	available_buf = buf_sz - (buf_cur - buf_init);
1782 	if (available_buf < (len + GUARD_SPACE)) {
1783 		buf_cur = buf_init;
1784 	}
1785 
1786 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1787 	buf_cur += sizeof(struct CE_ring_state);
1788 
1789 	for (entry = 0; entry < src_ring->nentries; entry++) {
1790 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1791 		nbuf = src_ring->per_transfer_context[entry];
1792 		if (nbuf) {
1793 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1794 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1795 
1796 			len = sizeof(struct CE_src_desc) + skb_cp_len
1797 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1798 			available_buf = buf_sz - (buf_cur - buf_init);
1799 			if (available_buf < (len + GUARD_SPACE)) {
1800 				buf_cur = buf_init;
1801 			}
1802 			qdf_mem_copy(buf_cur, src_desc,
1803 				     sizeof(struct CE_src_desc));
1804 			buf_cur += sizeof(struct CE_src_desc);
1805 
1806 			available_buf = buf_sz - (buf_cur - buf_init);
1807 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1808 						skb_cp_len);
1809 
1810 			if (skb_cp_len) {
1811 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1812 					     skb_cp_len);
1813 				buf_cur += skb_cp_len;
1814 			}
1815 		} else {
1816 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1817 			available_buf = buf_sz - (buf_cur - buf_init);
1818 			if (available_buf < (len + GUARD_SPACE)) {
1819 				buf_cur = buf_init;
1820 			}
1821 			qdf_mem_copy(buf_cur, src_desc,
1822 				     sizeof(struct CE_src_desc));
1823 			buf_cur += sizeof(struct CE_src_desc);
1824 			available_buf = buf_sz - (buf_cur - buf_init);
1825 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1826 		}
1827 	}
1828 
1829 	return buf_cur;
1830 }
1831 
1832 /*
1833  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1834  * @dest_ring: SRC ring state
1835  * @buf_cur: Current pointer in ring buffer
1836  * @buf_init:Start of the ring buffer
1837  * @buf_sz: Size of the ring buffer
1838  * @skb_sz: Max size of the SKB buffer to be copied
1839  *
1840  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1841  * the given buf, skb_sz is the max buffer size to be copied
1842  *
1843  * Return: Current pointer in ring buffer
1844  */
1845 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1846 				     uint8_t *buf_cur, uint8_t *buf_init,
1847 				     uint32_t buf_sz, uint32_t skb_sz)
1848 {
1849 	struct CE_dest_desc *dest_ring_base;
1850 	uint32_t len, entry;
1851 	struct CE_dest_desc  *dest_desc;
1852 	qdf_nbuf_t nbuf;
1853 	uint32_t available_buf;
1854 
1855 	dest_ring_base =
1856 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1857 
1858 	len = sizeof(struct CE_ring_state);
1859 	available_buf = buf_sz - (buf_cur - buf_init);
1860 	if (available_buf < (len + GUARD_SPACE)) {
1861 		buf_cur = buf_init;
1862 	}
1863 
1864 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
1865 	buf_cur += sizeof(struct CE_ring_state);
1866 
1867 	for (entry = 0; entry < dest_ring->nentries; entry++) {
1868 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
1869 
1870 		nbuf = dest_ring->per_transfer_context[entry];
1871 		if (nbuf) {
1872 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1873 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1874 
1875 			len = sizeof(struct CE_dest_desc) + skb_cp_len
1876 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1877 
1878 			available_buf = buf_sz - (buf_cur - buf_init);
1879 			if (available_buf < (len + GUARD_SPACE)) {
1880 				buf_cur = buf_init;
1881 			}
1882 
1883 			qdf_mem_copy(buf_cur, dest_desc,
1884 				     sizeof(struct CE_dest_desc));
1885 			buf_cur += sizeof(struct CE_dest_desc);
1886 			available_buf = buf_sz - (buf_cur - buf_init);
1887 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1888 						skb_cp_len);
1889 			if (skb_cp_len) {
1890 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1891 					     skb_cp_len);
1892 				buf_cur += skb_cp_len;
1893 			}
1894 		} else {
1895 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
1896 			available_buf = buf_sz - (buf_cur - buf_init);
1897 			if (available_buf < (len + GUARD_SPACE)) {
1898 				buf_cur = buf_init;
1899 			}
1900 			qdf_mem_copy(buf_cur, dest_desc,
1901 				     sizeof(struct CE_dest_desc));
1902 			buf_cur += sizeof(struct CE_dest_desc);
1903 			available_buf = buf_sz - (buf_cur - buf_init);
1904 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1905 		}
1906 	}
1907 	return buf_cur;
1908 }
1909 
1910 /**
1911  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1912  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1913  * and buffers pointed by them in to the given buf
1914  */
1915 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1916 			 uint8_t *buf_init, uint32_t buf_sz,
1917 			 uint32_t ce, uint32_t skb_sz)
1918 {
1919 	struct CE_state *ce_state;
1920 	struct CE_ring_state *src_ring;
1921 	struct CE_ring_state *dest_ring;
1922 
1923 	ce_state = scn->ce_id_to_state[ce];
1924 	src_ring = ce_state->src_ring;
1925 	dest_ring = ce_state->dest_ring;
1926 
1927 	if (src_ring) {
1928 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
1929 					      buf_init, buf_sz, skb_sz);
1930 	} else if (dest_ring) {
1931 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
1932 					       buf_init, buf_sz, skb_sz);
1933 	}
1934 
1935 	return buf_cur;
1936 }
1937 
1938 qdf_export_symbol(hif_log_dump_ce);
1939 #endif /* OL_ATH_SMART_LOGGING */
1940 
1941