xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 static int war1_allow_sleep;
65 /* io32 write workaround */
66 static int hif_ce_war1;
67 
68 /**
69  * hif_ce_war_disable() - disable ce war gobally
70  */
71 void hif_ce_war_disable(void)
72 {
73 	hif_ce_war1 = 0;
74 }
75 
76 /**
77  * hif_ce_war_enable() - enable ce war gobally
78  */
79 void hif_ce_war_enable(void)
80 {
81 	hif_ce_war1 = 1;
82 }
83 
84 /*
85  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
86  * for defined here
87  */
88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
89 
90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
91 #define CE_DEBUG_DATA_PER_ROW 16
92 
93 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
94 
95 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
96 {
97 	int record_index = qdf_atomic_inc_return(table_index);
98 
99 	if (record_index == array_size)
100 		qdf_atomic_sub(array_size, table_index);
101 
102 	while (record_index >= array_size)
103 		record_index -= array_size;
104 
105 	return record_index;
106 }
107 
108 #ifdef HIF_CE_DEBUG_DATA_BUF
109 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
110 {
111 	uint8_t *data = NULL;
112 
113 	if (!event->data) {
114 		hif_err_rl("No ce debug memory allocated");
115 		return;
116 	}
117 
118 	if (event->memory && len > 0)
119 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
120 
121 	event->actual_data_len = 0;
122 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
123 
124 	if (data && len > 0) {
125 		qdf_mem_copy(event->data, data,
126 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
127 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
128 		event->actual_data_len = len;
129 	}
130 }
131 
132 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
133 {
134 	qdf_mem_zero(event,
135 		     offsetof(struct hif_ce_desc_event, data));
136 }
137 #else
138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
139 {
140 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
141 }
142 #endif /* HIF_CE_DEBUG_DATA_BUF */
143 
144 #if defined(HIF_RECORD_PADDR)
145 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
146 				 struct hif_ce_desc_event *event,
147 				 qdf_nbuf_t memory)
148 {
149 	if (memory) {
150 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
151 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
152 					scn->qdf_dev,
153 					event->dma_addr);
154 
155 		event->virt_to_phy =
156 			virt_to_phys(qdf_nbuf_data(memory));
157 	}
158 }
159 #endif /* HIF_RECORD_RX_PADDR */
160 
161 /**
162  * hif_record_ce_desc_event() - record ce descriptor events
163  * @scn: hif_softc
164  * @ce_id: which ce is the event occurring on
165  * @type: what happened
166  * @descriptor: pointer to the descriptor posted/completed
167  * @memory: virtual address of buffer related to the descriptor
168  * @index: index that the descriptor was/will be at.
169  */
170 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
171 				enum hif_ce_event_type type,
172 				union ce_desc *descriptor,
173 				void *memory, int index,
174 				int len)
175 {
176 	int record_index;
177 	struct hif_ce_desc_event *event;
178 
179 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
180 	struct hif_ce_desc_event *hist_ev = NULL;
181 
182 	if (ce_id < CE_COUNT_MAX)
183 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
184 	else
185 		return;
186 
187 	if (ce_id >= CE_COUNT_MAX)
188 		return;
189 
190 	if (!ce_hist->enable[ce_id])
191 		return;
192 
193 	if (!hist_ev)
194 		return;
195 
196 	record_index = get_next_record_index(
197 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
198 
199 	event = &hist_ev[record_index];
200 
201 	hif_clear_ce_desc_debug_data(event);
202 
203 	event->type = type;
204 	event->time = qdf_get_log_timestamp();
205 
206 	if (descriptor)
207 		qdf_mem_copy(&event->descriptor, descriptor,
208 			     sizeof(union ce_desc));
209 
210 	event->memory = memory;
211 	event->index = index;
212 
213 	if (event->type == HIF_RX_DESC_POST ||
214 	    event->type == HIF_RX_DESC_COMPLETION)
215 		hif_ce_desc_record_rx_paddr(scn, event, memory);
216 
217 	if (ce_hist->data_enable[ce_id])
218 		hif_ce_desc_data_record(event, len);
219 }
220 qdf_export_symbol(hif_record_ce_desc_event);
221 
222 /**
223  * ce_init_ce_desc_event_log() - initialize the ce event log
224  * @ce_id: copy engine id for which we are initializing the log
225  * @size: size of array to dedicate
226  *
227  * Currently the passed size is ignored in favor of a precompiled value.
228  */
229 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
230 {
231 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
232 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
233 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
234 }
235 
236 /**
237  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
238  * @ce_id: copy engine id for which we are deinitializing the log
239  *
240  */
241 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
242 {
243 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
244 
245 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
246 }
247 
248 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
249 void hif_record_ce_desc_event(struct hif_softc *scn,
250 		int ce_id, enum hif_ce_event_type type,
251 		union ce_desc *descriptor, void *memory,
252 		int index, int len)
253 {
254 }
255 qdf_export_symbol(hif_record_ce_desc_event);
256 
257 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
258 					int size)
259 {
260 }
261 
262 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
263 {
264 }
265 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
266 
267 #ifdef NAPI_YIELD_BUDGET_BASED
268 bool hif_ce_service_should_yield(struct hif_softc *scn,
269 				 struct CE_state *ce_state)
270 {
271 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
272 
273 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
274 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
275 	 * can happen in fast path handling as processing is happenning in
276 	 * batches.
277 	 */
278 	if (yield)
279 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
280 
281 	return yield;
282 }
283 #else
284 /**
285  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
286  * @scn: hif context
287  * @ce_state: context of the copy engine being serviced
288  *
289  * Return: true if the service should yield
290  */
291 bool hif_ce_service_should_yield(struct hif_softc *scn,
292 				 struct CE_state *ce_state)
293 {
294 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
295 
296 	time_limit_reached =
297 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
298 
299 	if (!time_limit_reached)
300 		rxpkt_thresh_reached = hif_max_num_receives_reached
301 					(scn, ce_state->receive_count);
302 
303 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
304 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
305 	 * can happen in fast path handling as processing is happenning in
306 	 * batches.
307 	 */
308 	if (rxpkt_thresh_reached)
309 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
310 
311 	yield =  time_limit_reached || rxpkt_thresh_reached;
312 
313 	if (yield &&
314 	    ce_state->htt_rx_data &&
315 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
316 		hif_napi_update_yield_stats(ce_state,
317 					    time_limit_reached,
318 					    rxpkt_thresh_reached);
319 	}
320 
321 	return yield;
322 }
323 qdf_export_symbol(hif_ce_service_should_yield);
324 #endif
325 
326 /*
327  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
328  * The caller takes responsibility for any needed locking.
329  */
330 
331 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
332 				   u32 ctrl_addr, unsigned int write_index)
333 {
334 	if (hif_ce_war1) {
335 		void __iomem *indicator_addr;
336 
337 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
338 
339 		if (!war1_allow_sleep
340 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
341 			hif_write32_mb(scn, indicator_addr,
342 				       (CDC_WAR_MAGIC_STR | write_index));
343 		} else {
344 			unsigned long irq_flags;
345 
346 			local_irq_save(irq_flags);
347 			hif_write32_mb(scn, indicator_addr, 1);
348 
349 			/*
350 			 * PCIE write waits for ACK in IPQ8K, there is no
351 			 * need to read back value.
352 			 */
353 			(void)hif_read32_mb(scn, indicator_addr);
354 			/* conservative */
355 			(void)hif_read32_mb(scn, indicator_addr);
356 
357 			CE_SRC_RING_WRITE_IDX_SET(scn,
358 						  ctrl_addr, write_index);
359 
360 			hif_write32_mb(scn, indicator_addr, 0);
361 			local_irq_restore(irq_flags);
362 		}
363 	} else {
364 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
365 	}
366 }
367 
368 qdf_export_symbol(war_ce_src_ring_write_idx_set);
369 
370 int
371 ce_send(struct CE_handle *copyeng,
372 		void *per_transfer_context,
373 		qdf_dma_addr_t buffer,
374 		uint32_t nbytes,
375 		uint32_t transfer_id,
376 		uint32_t flags,
377 		uint32_t user_flag)
378 {
379 	struct CE_state *CE_state = (struct CE_state *)copyeng;
380 	int status;
381 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
382 
383 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
384 	status = hif_state->ce_services->ce_send_nolock(copyeng,
385 			per_transfer_context, buffer, nbytes,
386 			transfer_id, flags, user_flag);
387 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
388 
389 	return status;
390 }
391 qdf_export_symbol(ce_send);
392 
393 unsigned int ce_sendlist_sizeof(void)
394 {
395 	return sizeof(struct ce_sendlist);
396 }
397 
398 void ce_sendlist_init(struct ce_sendlist *sendlist)
399 {
400 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
401 
402 	sl->num_items = 0;
403 }
404 
405 int
406 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
407 					qdf_dma_addr_t buffer,
408 					uint32_t nbytes,
409 					uint32_t flags,
410 					uint32_t user_flags)
411 {
412 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
413 	unsigned int num_items = sl->num_items;
414 	struct ce_sendlist_item *item;
415 
416 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
417 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
418 		return QDF_STATUS_E_RESOURCES;
419 	}
420 
421 	item = &sl->item[num_items];
422 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
423 	item->data = buffer;
424 	item->u.nbytes = nbytes;
425 	item->flags = flags;
426 	item->user_flags = user_flags;
427 	sl->num_items = num_items + 1;
428 	return QDF_STATUS_SUCCESS;
429 }
430 
431 int
432 ce_sendlist_send(struct CE_handle *copyeng,
433 		 void *per_transfer_context,
434 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
435 {
436 	struct CE_state *CE_state = (struct CE_state *)copyeng;
437 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
438 
439 	return hif_state->ce_services->ce_sendlist_send(copyeng,
440 			per_transfer_context, sendlist, transfer_id);
441 }
442 
443 #ifndef AH_NEED_TX_DATA_SWAP
444 #define AH_NEED_TX_DATA_SWAP 0
445 #endif
446 
447 /**
448  * ce_batch_send() - sends bunch of msdus at once
449  * @ce_tx_hdl : pointer to CE handle
450  * @msdu : list of msdus to be sent
451  * @transfer_id : transfer id
452  * @len : Downloaded length
453  * @sendhead : sendhead
454  *
455  * Assumption : Called with an array of MSDU's
456  * Function:
457  * For each msdu in the array
458  * 1. Send each msdu
459  * 2. Increment write index accordinlgy.
460  *
461  * Return: list of msds not sent
462  */
463 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
464 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
465 {
466 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
467 	struct hif_softc *scn = ce_state->scn;
468 	struct CE_ring_state *src_ring = ce_state->src_ring;
469 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
470 	/*  A_target_id_t targid = TARGID(scn);*/
471 
472 	uint32_t nentries_mask = src_ring->nentries_mask;
473 	uint32_t sw_index, write_index;
474 
475 	struct CE_src_desc *src_desc_base =
476 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
477 	uint32_t *src_desc;
478 
479 	struct CE_src_desc lsrc_desc = {0};
480 	int deltacount = 0;
481 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
482 
483 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
484 	sw_index = src_ring->sw_index;
485 	write_index = src_ring->write_index;
486 
487 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
488 
489 	while (msdu) {
490 		tempnext = qdf_nbuf_next(msdu);
491 
492 		if (deltacount < 2) {
493 			if (sendhead)
494 				return msdu;
495 			HIF_ERROR("%s: Out of descriptors", __func__);
496 			src_ring->write_index = write_index;
497 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
498 					write_index);
499 
500 			sw_index = src_ring->sw_index;
501 			write_index = src_ring->write_index;
502 
503 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
504 					sw_index-1);
505 			if (!freelist) {
506 				freelist = msdu;
507 				hfreelist = msdu;
508 			} else {
509 				qdf_nbuf_set_next(freelist, msdu);
510 				freelist = msdu;
511 			}
512 			qdf_nbuf_set_next(msdu, NULL);
513 			msdu = tempnext;
514 			continue;
515 		}
516 
517 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
518 				write_index);
519 
520 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
521 
522 		lsrc_desc.meta_data = transfer_id;
523 		if (len  > msdu->len)
524 			len =  msdu->len;
525 		lsrc_desc.nbytes = len;
526 		/*  Data packet is a byte stream, so disable byte swap */
527 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
528 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
529 
530 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
531 
532 
533 		src_ring->per_transfer_context[write_index] = msdu;
534 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
535 
536 		if (sendhead)
537 			break;
538 		qdf_nbuf_set_next(msdu, NULL);
539 		msdu = tempnext;
540 
541 	}
542 
543 
544 	src_ring->write_index = write_index;
545 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
546 
547 	return hfreelist;
548 }
549 
550 /**
551  * ce_update_tx_ring() - Advance sw index.
552  * @ce_tx_hdl : pointer to CE handle
553  * @num_htt_cmpls : htt completions received.
554  *
555  * Function:
556  * Increment the value of sw index of src ring
557  * according to number of htt completions
558  * received.
559  *
560  * Return: void
561  */
562 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
563 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
564 {
565 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
566 	struct CE_ring_state *src_ring = ce_state->src_ring;
567 	uint32_t nentries_mask = src_ring->nentries_mask;
568 	/*
569 	 * Advance the s/w index:
570 	 * This effectively simulates completing the CE ring descriptors
571 	 */
572 	src_ring->sw_index =
573 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
574 				num_htt_cmpls);
575 }
576 #else
577 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
578 {}
579 #endif
580 
581 /**
582  * ce_send_single() - sends
583  * @ce_tx_hdl : pointer to CE handle
584  * @msdu : msdu to be sent
585  * @transfer_id : transfer id
586  * @len : Downloaded length
587  *
588  * Function:
589  * 1. Send one msdu
590  * 2. Increment write index of src ring accordinlgy.
591  *
592  * Return: QDF_STATUS: CE sent status
593  */
594 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
595 			  uint32_t transfer_id, u_int32_t len)
596 {
597 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
598 	struct hif_softc *scn = ce_state->scn;
599 	struct CE_ring_state *src_ring = ce_state->src_ring;
600 	uint32_t ctrl_addr = ce_state->ctrl_addr;
601 	/*A_target_id_t targid = TARGID(scn);*/
602 
603 	uint32_t nentries_mask = src_ring->nentries_mask;
604 	uint32_t sw_index, write_index;
605 
606 	struct CE_src_desc *src_desc_base =
607 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
608 	uint32_t *src_desc;
609 
610 	struct CE_src_desc lsrc_desc = {0};
611 	enum hif_ce_event_type event_type;
612 
613 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
614 	sw_index = src_ring->sw_index;
615 	write_index = src_ring->write_index;
616 
617 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
618 					sw_index-1) < 1)) {
619 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
620 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
621 			  write_index, sw_index);
622 		return QDF_STATUS_E_RESOURCES;
623 	}
624 
625 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
626 
627 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
628 
629 	lsrc_desc.meta_data = transfer_id;
630 	lsrc_desc.nbytes = len;
631 	/*  Data packet is a byte stream, so disable byte swap */
632 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
633 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
634 
635 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
636 
637 
638 	src_ring->per_transfer_context[write_index] = msdu;
639 
640 	if (((struct CE_src_desc *)src_desc)->gather)
641 		event_type = HIF_TX_GATHER_DESC_POST;
642 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
643 		event_type = HIF_TX_DESC_SOFTWARE_POST;
644 	else
645 		event_type = HIF_TX_DESC_POST;
646 
647 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
648 				(union ce_desc *)src_desc, msdu,
649 				write_index, len);
650 
651 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
652 
653 	src_ring->write_index = write_index;
654 
655 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
656 
657 	return QDF_STATUS_SUCCESS;
658 }
659 
660 /**
661  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
662  * @coyeng: copy engine handle
663  * @per_recv_context: virtual address of the nbuf
664  * @buffer: physical address of the nbuf
665  *
666  * Return: 0 if the buffer is enqueued
667  */
668 int
669 ce_recv_buf_enqueue(struct CE_handle *copyeng,
670 		    void *per_recv_context, qdf_dma_addr_t buffer)
671 {
672 	struct CE_state *CE_state = (struct CE_state *)copyeng;
673 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
674 
675 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
676 			per_recv_context, buffer);
677 }
678 qdf_export_symbol(ce_recv_buf_enqueue);
679 
680 void
681 ce_send_watermarks_set(struct CE_handle *copyeng,
682 		       unsigned int low_alert_nentries,
683 		       unsigned int high_alert_nentries)
684 {
685 	struct CE_state *CE_state = (struct CE_state *)copyeng;
686 	uint32_t ctrl_addr = CE_state->ctrl_addr;
687 	struct hif_softc *scn = CE_state->scn;
688 
689 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
690 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
691 }
692 
693 void
694 ce_recv_watermarks_set(struct CE_handle *copyeng,
695 		       unsigned int low_alert_nentries,
696 		       unsigned int high_alert_nentries)
697 {
698 	struct CE_state *CE_state = (struct CE_state *)copyeng;
699 	uint32_t ctrl_addr = CE_state->ctrl_addr;
700 	struct hif_softc *scn = CE_state->scn;
701 
702 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
703 				low_alert_nentries);
704 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
705 				high_alert_nentries);
706 }
707 
708 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
709 {
710 	struct CE_state *CE_state = (struct CE_state *)copyeng;
711 	struct CE_ring_state *src_ring = CE_state->src_ring;
712 	unsigned int nentries_mask = src_ring->nentries_mask;
713 	unsigned int sw_index;
714 	unsigned int write_index;
715 
716 	qdf_spin_lock(&CE_state->ce_index_lock);
717 	sw_index = src_ring->sw_index;
718 	write_index = src_ring->write_index;
719 	qdf_spin_unlock(&CE_state->ce_index_lock);
720 
721 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
722 }
723 
724 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
725 {
726 	struct CE_state *CE_state = (struct CE_state *)copyeng;
727 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
728 	unsigned int nentries_mask = dest_ring->nentries_mask;
729 	unsigned int sw_index;
730 	unsigned int write_index;
731 
732 	qdf_spin_lock(&CE_state->ce_index_lock);
733 	sw_index = dest_ring->sw_index;
734 	write_index = dest_ring->write_index;
735 	qdf_spin_unlock(&CE_state->ce_index_lock);
736 
737 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
738 }
739 
740 /*
741  * Guts of ce_completed_recv_next.
742  * The caller takes responsibility for any necessary locking.
743  */
744 int
745 ce_completed_recv_next(struct CE_handle *copyeng,
746 		       void **per_CE_contextp,
747 		       void **per_transfer_contextp,
748 		       qdf_dma_addr_t *bufferp,
749 		       unsigned int *nbytesp,
750 		       unsigned int *transfer_idp, unsigned int *flagsp)
751 {
752 	struct CE_state *CE_state = (struct CE_state *)copyeng;
753 	int status;
754 	struct hif_softc *scn = CE_state->scn;
755 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
756 	struct ce_ops *ce_services;
757 
758 	ce_services = hif_state->ce_services;
759 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
760 	status =
761 		ce_services->ce_completed_recv_next_nolock(CE_state,
762 				per_CE_contextp, per_transfer_contextp, bufferp,
763 					      nbytesp, transfer_idp, flagsp);
764 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
765 
766 	return status;
767 }
768 
769 QDF_STATUS
770 ce_revoke_recv_next(struct CE_handle *copyeng,
771 		    void **per_CE_contextp,
772 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
773 {
774 	struct CE_state *CE_state = (struct CE_state *)copyeng;
775 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
776 
777 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
778 			per_CE_contextp, per_transfer_contextp, bufferp);
779 }
780 
781 QDF_STATUS
782 ce_cancel_send_next(struct CE_handle *copyeng,
783 		void **per_CE_contextp,
784 		void **per_transfer_contextp,
785 		qdf_dma_addr_t *bufferp,
786 		unsigned int *nbytesp,
787 		unsigned int *transfer_idp,
788 		uint32_t *toeplitz_hash_result)
789 {
790 	struct CE_state *CE_state = (struct CE_state *)copyeng;
791 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
792 
793 	return hif_state->ce_services->ce_cancel_send_next
794 		(copyeng, per_CE_contextp, per_transfer_contextp,
795 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
796 }
797 qdf_export_symbol(ce_cancel_send_next);
798 
799 int
800 ce_completed_send_next(struct CE_handle *copyeng,
801 		       void **per_CE_contextp,
802 		       void **per_transfer_contextp,
803 		       qdf_dma_addr_t *bufferp,
804 		       unsigned int *nbytesp,
805 		       unsigned int *transfer_idp,
806 		       unsigned int *sw_idx,
807 		       unsigned int *hw_idx,
808 		       unsigned int *toeplitz_hash_result)
809 {
810 	struct CE_state *CE_state = (struct CE_state *)copyeng;
811 	struct hif_softc *scn = CE_state->scn;
812 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
813 	struct ce_ops *ce_services;
814 	int status;
815 
816 	ce_services = hif_state->ce_services;
817 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
818 	status =
819 		ce_services->ce_completed_send_next_nolock(CE_state,
820 					per_CE_contextp, per_transfer_contextp,
821 					bufferp, nbytesp, transfer_idp, sw_idx,
822 					      hw_idx, toeplitz_hash_result);
823 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
824 
825 	return status;
826 }
827 
828 #ifdef ATH_11AC_TXCOMPACT
829 /* CE engine descriptor reap
830  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
831  * does receive and reaping of completed descriptor ,
832  * This function only handles reaping of Tx complete descriptor.
833  * The Function is called from threshold reap  poll routine
834  * hif_send_complete_check so should not countain receive functionality
835  * within it .
836  */
837 
838 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
839 {
840 	void *CE_context;
841 	void *transfer_context;
842 	qdf_dma_addr_t buf;
843 	unsigned int nbytes;
844 	unsigned int id;
845 	unsigned int sw_idx, hw_idx;
846 	uint32_t toeplitz_hash_result;
847 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
848 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
849 
850 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
851 		return;
852 
853 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
854 			NULL, NULL, 0, 0);
855 
856 	/* Since this function is called from both user context and
857 	 * tasklet context the spinlock has to lock the bottom halves.
858 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
859 	 * enabled in TX polling mode. If this is not the case, more
860 	 * bottom halve spin lock changes are needed. Due to data path
861 	 * performance concern, after internal discussion we've decided
862 	 * to make minimum change, i.e., only address the issue occurred
863 	 * in this function. The possible negative effect of this minimum
864 	 * change is that, in the future, if some other function will also
865 	 * be opened to let the user context to use, those cases need to be
866 	 * addressed by change spin_lock to spin_lock_bh also.
867 	 */
868 
869 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
870 
871 	if (CE_state->send_cb) {
872 		{
873 			struct ce_ops *ce_services = hif_state->ce_services;
874 			/* Pop completed send buffers and call the
875 			 * registered send callback for each
876 			 */
877 			while (ce_services->ce_completed_send_next_nolock
878 				 (CE_state, &CE_context,
879 				  &transfer_context, &buf,
880 				  &nbytes, &id, &sw_idx, &hw_idx,
881 				  &toeplitz_hash_result) ==
882 				  QDF_STATUS_SUCCESS) {
883 				if (ce_id != CE_HTT_H2T_MSG) {
884 					qdf_spin_unlock_bh(
885 						&CE_state->ce_index_lock);
886 					CE_state->send_cb(
887 						(struct CE_handle *)
888 						CE_state, CE_context,
889 						transfer_context, buf,
890 						nbytes, id, sw_idx, hw_idx,
891 						toeplitz_hash_result);
892 					qdf_spin_lock_bh(
893 						&CE_state->ce_index_lock);
894 				} else {
895 					struct HIF_CE_pipe_info *pipe_info =
896 						(struct HIF_CE_pipe_info *)
897 						CE_context;
898 
899 					qdf_spin_lock_bh(&pipe_info->
900 						 completion_freeq_lock);
901 					pipe_info->num_sends_allowed++;
902 					qdf_spin_unlock_bh(&pipe_info->
903 						   completion_freeq_lock);
904 				}
905 			}
906 		}
907 	}
908 
909 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
910 
911 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
912 			NULL, NULL, 0, 0);
913 	Q_TARGET_ACCESS_END(scn);
914 }
915 
916 #endif /*ATH_11AC_TXCOMPACT */
917 
918 /*
919  * ce_engine_service_reg:
920  *
921  * Called from ce_per_engine_service and goes through the regular interrupt
922  * handling that does not involve the WLAN fast path feature.
923  *
924  * Returns void
925  */
926 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
927 {
928 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
929 	uint32_t ctrl_addr = CE_state->ctrl_addr;
930 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
931 	void *CE_context;
932 	void *transfer_context;
933 	qdf_dma_addr_t buf;
934 	unsigned int nbytes;
935 	unsigned int id;
936 	unsigned int flags;
937 	unsigned int more_comp_cnt = 0;
938 	unsigned int more_snd_comp_cnt = 0;
939 	unsigned int sw_idx, hw_idx;
940 	uint32_t toeplitz_hash_result;
941 	uint32_t mode = hif_get_conparam(scn);
942 
943 more_completions:
944 	if (CE_state->recv_cb) {
945 
946 		/* Pop completed recv buffers and call
947 		 * the registered recv callback for each
948 		 */
949 		while (hif_state->ce_services->ce_completed_recv_next_nolock
950 				(CE_state, &CE_context, &transfer_context,
951 				&buf, &nbytes, &id, &flags) ==
952 				QDF_STATUS_SUCCESS) {
953 			qdf_spin_unlock(&CE_state->ce_index_lock);
954 			CE_state->recv_cb((struct CE_handle *)CE_state,
955 					  CE_context, transfer_context, buf,
956 					  nbytes, id, flags);
957 
958 			qdf_spin_lock(&CE_state->ce_index_lock);
959 			/*
960 			 * EV #112693 -
961 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
962 			 * BSoD_0x133 occurred in VHT80 UDP_DL
963 			 * Break out DPC by force if number of loops in
964 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
965 			 * to avoid spending too long time in
966 			 * DPC for each interrupt handling. Schedule another
967 			 * DPC to avoid data loss if we had taken
968 			 * force-break action before apply to Windows OS
969 			 * only currently, Linux/MAC os can expand to their
970 			 * platform if necessary
971 			 */
972 
973 			/* Break the receive processes by
974 			 * force if force_break set up
975 			 */
976 			if (qdf_unlikely(CE_state->force_break)) {
977 				qdf_atomic_set(&CE_state->rx_pending, 1);
978 				return;
979 			}
980 		}
981 	}
982 
983 	/*
984 	 * Attention: We may experience potential infinite loop for below
985 	 * While Loop during Sending Stress test.
986 	 * Resolve the same way as Receive Case (Refer to EV #112693)
987 	 */
988 
989 	if (CE_state->send_cb) {
990 		/* Pop completed send buffers and call
991 		 * the registered send callback for each
992 		 */
993 
994 #ifdef ATH_11AC_TXCOMPACT
995 		while (hif_state->ce_services->ce_completed_send_next_nolock
996 			 (CE_state, &CE_context,
997 			 &transfer_context, &buf, &nbytes,
998 			 &id, &sw_idx, &hw_idx,
999 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1000 
1001 			if (CE_id != CE_HTT_H2T_MSG ||
1002 			    QDF_IS_EPPING_ENABLED(mode)) {
1003 				qdf_spin_unlock(&CE_state->ce_index_lock);
1004 				CE_state->send_cb((struct CE_handle *)CE_state,
1005 						  CE_context, transfer_context,
1006 						  buf, nbytes, id, sw_idx,
1007 						  hw_idx, toeplitz_hash_result);
1008 				qdf_spin_lock(&CE_state->ce_index_lock);
1009 			} else {
1010 				struct HIF_CE_pipe_info *pipe_info =
1011 					(struct HIF_CE_pipe_info *)CE_context;
1012 
1013 				qdf_spin_lock_bh(&pipe_info->
1014 					      completion_freeq_lock);
1015 				pipe_info->num_sends_allowed++;
1016 				qdf_spin_unlock_bh(&pipe_info->
1017 						completion_freeq_lock);
1018 			}
1019 		}
1020 #else                           /*ATH_11AC_TXCOMPACT */
1021 		while (hif_state->ce_services->ce_completed_send_next_nolock
1022 			 (CE_state, &CE_context,
1023 			  &transfer_context, &buf, &nbytes,
1024 			  &id, &sw_idx, &hw_idx,
1025 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1026 			qdf_spin_unlock(&CE_state->ce_index_lock);
1027 			CE_state->send_cb((struct CE_handle *)CE_state,
1028 				  CE_context, transfer_context, buf,
1029 				  nbytes, id, sw_idx, hw_idx,
1030 				  toeplitz_hash_result);
1031 			qdf_spin_lock(&CE_state->ce_index_lock);
1032 		}
1033 #endif /*ATH_11AC_TXCOMPACT */
1034 	}
1035 
1036 more_watermarks:
1037 	if (CE_state->misc_cbs) {
1038 		if (CE_state->watermark_cb &&
1039 				hif_state->ce_services->watermark_int(CE_state,
1040 					&flags)) {
1041 			qdf_spin_unlock(&CE_state->ce_index_lock);
1042 			/* Convert HW IS bits to software flags */
1043 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1044 					CE_state->wm_context, flags);
1045 			qdf_spin_lock(&CE_state->ce_index_lock);
1046 		}
1047 	}
1048 
1049 	/*
1050 	 * Clear the misc interrupts (watermark) that were handled above,
1051 	 * and that will be checked again below.
1052 	 * Clear and check for copy-complete interrupts again, just in case
1053 	 * more copy completions happened while the misc interrupts were being
1054 	 * handled.
1055 	 */
1056 	if (!ce_srng_based(scn)) {
1057 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1058 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1059 					   CE_WATERMARK_MASK |
1060 					   HOST_IS_COPY_COMPLETE_MASK);
1061 		} else {
1062 			qdf_atomic_set(&CE_state->rx_pending, 0);
1063 			hif_err_rl("%s: target access is not allowed",
1064 				   __func__);
1065 			return;
1066 		}
1067 	}
1068 
1069 	/*
1070 	 * Now that per-engine interrupts are cleared, verify that
1071 	 * no recv interrupts arrive while processing send interrupts,
1072 	 * and no recv or send interrupts happened while processing
1073 	 * misc interrupts.Go back and check again.Keep checking until
1074 	 * we find no more events to process.
1075 	 */
1076 	if (CE_state->recv_cb &&
1077 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1078 				CE_state)) {
1079 		if (QDF_IS_EPPING_ENABLED(mode) ||
1080 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1081 			goto more_completions;
1082 		} else {
1083 			if (!ce_srng_based(scn)) {
1084 				HIF_ERROR(
1085 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1086 					__func__,
1087 					CE_state->dest_ring->nentries_mask,
1088 					CE_state->dest_ring->sw_index,
1089 					CE_DEST_RING_READ_IDX_GET(scn,
1090 							  CE_state->ctrl_addr));
1091 			}
1092 		}
1093 	}
1094 
1095 	if (CE_state->send_cb &&
1096 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1097 				CE_state)) {
1098 		if (QDF_IS_EPPING_ENABLED(mode) ||
1099 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1100 			goto more_completions;
1101 		} else {
1102 			if (!ce_srng_based(scn)) {
1103 				HIF_ERROR(
1104 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1105 					__func__,
1106 					CE_state->src_ring->nentries_mask,
1107 					CE_state->src_ring->sw_index,
1108 					CE_SRC_RING_READ_IDX_GET(scn,
1109 							 CE_state->ctrl_addr));
1110 			}
1111 		}
1112 	}
1113 
1114 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1115 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1116 			goto more_watermarks;
1117 	}
1118 
1119 	qdf_atomic_set(&CE_state->rx_pending, 0);
1120 }
1121 
1122 /*
1123  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1124  *
1125  * Invokes registered callbacks for recv_complete,
1126  * send_complete, and watermarks.
1127  *
1128  * Returns: number of messages processed
1129  */
1130 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1131 {
1132 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1133 
1134 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1135 		return CE_state->receive_count;
1136 
1137 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1138 		HIF_ERROR("[premature rc=0]");
1139 		return 0; /* no work done */
1140 	}
1141 
1142 	/* Clear force_break flag and re-initialize receive_count to 0 */
1143 	CE_state->receive_count = 0;
1144 	CE_state->force_break = 0;
1145 	CE_state->ce_service_start_time = sched_clock();
1146 	CE_state->ce_service_yield_time =
1147 		CE_state->ce_service_start_time +
1148 		hif_get_ce_service_max_yield_time(
1149 			(struct hif_opaque_softc *)scn);
1150 
1151 	qdf_spin_lock(&CE_state->ce_index_lock);
1152 
1153 	CE_state->service(scn, CE_id);
1154 
1155 	qdf_spin_unlock(&CE_state->ce_index_lock);
1156 
1157 	if (Q_TARGET_ACCESS_END(scn) < 0)
1158 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
1159 	return CE_state->receive_count;
1160 }
1161 qdf_export_symbol(ce_per_engine_service);
1162 
1163 /*
1164  * Handler for per-engine interrupts on ALL active CEs.
1165  * This is used in cases where the system is sharing a
1166  * single interrput for all CEs
1167  */
1168 
1169 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1170 {
1171 	int CE_id;
1172 	uint32_t intr_summary;
1173 
1174 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1175 		return;
1176 
1177 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1178 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1179 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1180 
1181 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1182 				qdf_atomic_set(&CE_state->rx_pending, 0);
1183 				ce_per_engine_service(scn, CE_id);
1184 			}
1185 		}
1186 
1187 		Q_TARGET_ACCESS_END(scn);
1188 		return;
1189 	}
1190 
1191 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1192 
1193 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1194 		if (intr_summary & (1 << CE_id))
1195 			intr_summary &= ~(1 << CE_id);
1196 		else
1197 			continue;       /* no intr pending on this CE */
1198 
1199 		ce_per_engine_service(scn, CE_id);
1200 	}
1201 
1202 	Q_TARGET_ACCESS_END(scn);
1203 }
1204 
1205 /*Iterate the CE_state list and disable the compl interrupt
1206  * if it has been registered already.
1207  */
1208 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1209 {
1210 	int CE_id;
1211 
1212 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1213 		return;
1214 
1215 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1216 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1217 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1218 
1219 		/* if the interrupt is currently enabled, disable it */
1220 		if (!CE_state->disable_copy_compl_intr
1221 		    && (CE_state->send_cb || CE_state->recv_cb))
1222 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1223 
1224 		if (CE_state->watermark_cb)
1225 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1226 	}
1227 	Q_TARGET_ACCESS_END(scn);
1228 }
1229 
1230 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1231 {
1232 	int CE_id;
1233 
1234 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1235 		return;
1236 
1237 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1238 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1239 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1240 
1241 		/*
1242 		 * If the CE is supposed to have copy complete interrupts
1243 		 * enabled (i.e. there a callback registered, and the
1244 		 * "disable" flag is not set), then re-enable the interrupt.
1245 		 */
1246 		if (!CE_state->disable_copy_compl_intr
1247 		    && (CE_state->send_cb || CE_state->recv_cb))
1248 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1249 
1250 		if (CE_state->watermark_cb)
1251 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1252 	}
1253 	Q_TARGET_ACCESS_END(scn);
1254 }
1255 
1256 /**
1257  * ce_send_cb_register(): register completion handler
1258  * @copyeng: CE_state representing the ce we are adding the behavior to
1259  * @fn_ptr: callback that the ce should use when processing tx completions
1260  * @disable_interrupts: if the interupts should be enabled or not.
1261  *
1262  * Caller should guarantee that no transactions are in progress before
1263  * switching the callback function.
1264  *
1265  * Registers the send context before the fn pointer so that if the cb is valid
1266  * the context should be valid.
1267  *
1268  * Beware that currently this function will enable completion interrupts.
1269  */
1270 void
1271 ce_send_cb_register(struct CE_handle *copyeng,
1272 		    ce_send_cb fn_ptr,
1273 		    void *ce_send_context, int disable_interrupts)
1274 {
1275 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1276 	struct hif_softc *scn;
1277 	struct HIF_CE_state *hif_state;
1278 
1279 	if (!CE_state) {
1280 		HIF_ERROR("%s: Error CE state = NULL", __func__);
1281 		return;
1282 	}
1283 	scn = CE_state->scn;
1284 	hif_state = HIF_GET_CE_STATE(scn);
1285 	if (!hif_state) {
1286 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1287 		return;
1288 	}
1289 	CE_state->send_context = ce_send_context;
1290 	CE_state->send_cb = fn_ptr;
1291 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1292 							disable_interrupts);
1293 }
1294 qdf_export_symbol(ce_send_cb_register);
1295 
1296 /**
1297  * ce_recv_cb_register(): register completion handler
1298  * @copyeng: CE_state representing the ce we are adding the behavior to
1299  * @fn_ptr: callback that the ce should use when processing rx completions
1300  * @disable_interrupts: if the interupts should be enabled or not.
1301  *
1302  * Registers the send context before the fn pointer so that if the cb is valid
1303  * the context should be valid.
1304  *
1305  * Caller should guarantee that no transactions are in progress before
1306  * switching the callback function.
1307  */
1308 void
1309 ce_recv_cb_register(struct CE_handle *copyeng,
1310 		    CE_recv_cb fn_ptr,
1311 		    void *CE_recv_context, int disable_interrupts)
1312 {
1313 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1314 	struct hif_softc *scn;
1315 	struct HIF_CE_state *hif_state;
1316 
1317 	if (!CE_state) {
1318 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
1319 		return;
1320 	}
1321 	scn = CE_state->scn;
1322 	hif_state = HIF_GET_CE_STATE(scn);
1323 	if (!hif_state) {
1324 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1325 		return;
1326 	}
1327 	CE_state->recv_context = CE_recv_context;
1328 	CE_state->recv_cb = fn_ptr;
1329 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1330 							disable_interrupts);
1331 }
1332 qdf_export_symbol(ce_recv_cb_register);
1333 
1334 /**
1335  * ce_watermark_cb_register(): register completion handler
1336  * @copyeng: CE_state representing the ce we are adding the behavior to
1337  * @fn_ptr: callback that the ce should use when processing watermark events
1338  *
1339  * Caller should guarantee that no watermark events are being processed before
1340  * switching the callback function.
1341  */
1342 void
1343 ce_watermark_cb_register(struct CE_handle *copyeng,
1344 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1345 {
1346 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1347 	struct hif_softc *scn = CE_state->scn;
1348 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1349 
1350 	CE_state->watermark_cb = fn_ptr;
1351 	CE_state->wm_context = CE_wm_context;
1352 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1353 							0);
1354 	if (fn_ptr)
1355 		CE_state->misc_cbs = 1;
1356 }
1357 
1358 bool ce_get_rx_pending(struct hif_softc *scn)
1359 {
1360 	int CE_id;
1361 
1362 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1363 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1364 
1365 		if (qdf_atomic_read(&CE_state->rx_pending))
1366 			return true;
1367 	}
1368 
1369 	return false;
1370 }
1371 
1372 /**
1373  * ce_check_rx_pending() - ce_check_rx_pending
1374  * @CE_state: context of the copy engine to check
1375  *
1376  * Return: true if there per_engine_service
1377  *	didn't process all the rx descriptors.
1378  */
1379 bool ce_check_rx_pending(struct CE_state *CE_state)
1380 {
1381 	if (qdf_atomic_read(&CE_state->rx_pending))
1382 		return true;
1383 	else
1384 		return false;
1385 }
1386 qdf_export_symbol(ce_check_rx_pending);
1387 
1388 #ifdef IPA_OFFLOAD
1389 /**
1390  * ce_ipa_get_resource() - get uc resource on copyengine
1391  * @ce: copyengine context
1392  * @ce_sr: copyengine source ring resource info
1393  * @ce_sr_ring_size: copyengine source ring size
1394  * @ce_reg_paddr: copyengine register physical address
1395  *
1396  * Copy engine should release resource to micro controller
1397  * Micro controller needs
1398  *  - Copy engine source descriptor base address
1399  *  - Copy engine source descriptor size
1400  *  - PCI BAR address to access copy engine regiser
1401  *
1402  * Return: None
1403  */
1404 void ce_ipa_get_resource(struct CE_handle *ce,
1405 			 qdf_shared_mem_t **ce_sr,
1406 			 uint32_t *ce_sr_ring_size,
1407 			 qdf_dma_addr_t *ce_reg_paddr)
1408 {
1409 	struct CE_state *CE_state = (struct CE_state *)ce;
1410 	uint32_t ring_loop;
1411 	struct CE_src_desc *ce_desc;
1412 	qdf_dma_addr_t phy_mem_base;
1413 	struct hif_softc *scn = CE_state->scn;
1414 
1415 	if (CE_UNUSED == CE_state->state) {
1416 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1417 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1418 		*ce_sr_ring_size = 0;
1419 		return;
1420 	}
1421 
1422 	/* Update default value for descriptor */
1423 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1424 	     ring_loop++) {
1425 		ce_desc = (struct CE_src_desc *)
1426 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1427 			   ring_loop * (sizeof(struct CE_src_desc)));
1428 		CE_IPA_RING_INIT(ce_desc);
1429 	}
1430 
1431 	/* Get BAR address */
1432 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1433 
1434 	*ce_sr = CE_state->scn->ipa_ce_ring;
1435 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1436 		sizeof(struct CE_src_desc));
1437 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1438 			SR_WR_INDEX_ADDRESS;
1439 }
1440 #endif /* IPA_OFFLOAD */
1441 
1442 #ifdef HIF_CE_DEBUG_DATA_BUF
1443 /**
1444  * hif_dump_desc_data_buf() - record ce descriptor events
1445  * @buf: buffer to copy to
1446  * @pos: Current position till which the buf is filled
1447  * @data: Data to be copied
1448  * @data_len: Length of the data to be copied
1449  */
1450 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1451 					uint8_t *data, uint32_t data_len)
1452 {
1453 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1454 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1455 
1456 	if ((data_len > 0) && data) {
1457 		if (data_len < 16) {
1458 			hex_dump_to_buffer(data,
1459 						CE_DEBUG_DATA_PER_ROW,
1460 						16, 1, buf + pos,
1461 						(ssize_t)PAGE_SIZE - pos,
1462 						false);
1463 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1464 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1465 		} else {
1466 			uint32_t rows = (data_len / 16) + 1;
1467 			uint32_t row = 0;
1468 
1469 			for (row = 0; row < rows; row++) {
1470 				hex_dump_to_buffer(data + (row * 16),
1471 							CE_DEBUG_DATA_PER_ROW,
1472 							16, 1, buf + pos,
1473 							(ssize_t)PAGE_SIZE
1474 							- pos, false);
1475 				pos +=
1476 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1477 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1478 						"\n");
1479 			}
1480 		}
1481 	}
1482 
1483 	return pos;
1484 }
1485 #endif
1486 
1487 /*
1488  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1489  * for defined here
1490  */
1491 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1492 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1493 {
1494 	switch (type) {
1495 	case HIF_RX_DESC_POST:
1496 		return "HIF_RX_DESC_POST";
1497 	case HIF_RX_DESC_COMPLETION:
1498 		return "HIF_RX_DESC_COMPLETION";
1499 	case HIF_TX_GATHER_DESC_POST:
1500 		return "HIF_TX_GATHER_DESC_POST";
1501 	case HIF_TX_DESC_POST:
1502 		return "HIF_TX_DESC_POST";
1503 	case HIF_TX_DESC_SOFTWARE_POST:
1504 		return "HIF_TX_DESC_SOFTWARE_POST";
1505 	case HIF_TX_DESC_COMPLETION:
1506 		return "HIF_TX_DESC_COMPLETION";
1507 	case FAST_RX_WRITE_INDEX_UPDATE:
1508 		return "FAST_RX_WRITE_INDEX_UPDATE";
1509 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1510 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1511 	case FAST_TX_WRITE_INDEX_UPDATE:
1512 		return "FAST_TX_WRITE_INDEX_UPDATE";
1513 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1514 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1515 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1516 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1517 	case RESUME_WRITE_INDEX_UPDATE:
1518 		return "RESUME_WRITE_INDEX_UPDATE";
1519 	case HIF_IRQ_EVENT:
1520 		return "HIF_IRQ_EVENT";
1521 	case HIF_CE_TASKLET_ENTRY:
1522 		return "HIF_CE_TASKLET_ENTRY";
1523 	case HIF_CE_TASKLET_RESCHEDULE:
1524 		return "HIF_CE_TASKLET_RESCHEDULE";
1525 	case HIF_CE_TASKLET_EXIT:
1526 		return "HIF_CE_TASKLET_EXIT";
1527 	case HIF_CE_REAP_ENTRY:
1528 		return "HIF_CE_REAP_ENTRY";
1529 	case HIF_CE_REAP_EXIT:
1530 		return "HIF_CE_REAP_EXIT";
1531 	case NAPI_SCHEDULE:
1532 		return "NAPI_SCHEDULE";
1533 	case NAPI_POLL_ENTER:
1534 		return "NAPI_POLL_ENTER";
1535 	case NAPI_COMPLETE:
1536 		return "NAPI_COMPLETE";
1537 	case NAPI_POLL_EXIT:
1538 		return "NAPI_POLL_EXIT";
1539 	case HIF_RX_NBUF_ALLOC_FAILURE:
1540 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1541 	case HIF_RX_NBUF_MAP_FAILURE:
1542 		return "HIF_RX_NBUF_MAP_FAILURE";
1543 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1544 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1545 	default:
1546 		return "invalid";
1547 	}
1548 }
1549 
1550 /**
1551  * hif_dump_desc_event() - record ce descriptor events
1552  * @buf: Buffer to which to be copied
1553  * @ce_id: which ce is the event occurring on
1554  * @index: index that the descriptor was/will be at.
1555  */
1556 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1557 {
1558 	struct hif_ce_desc_event *event;
1559 	uint64_t secs, usecs;
1560 	ssize_t len = 0;
1561 	struct ce_desc_hist *ce_hist = NULL;
1562 	struct hif_ce_desc_event *hist_ev = NULL;
1563 
1564 	if (!scn)
1565 		return -EINVAL;
1566 
1567 	ce_hist = &scn->hif_ce_desc_hist;
1568 
1569 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1570 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1571 		qdf_print("Invalid values");
1572 		return -EINVAL;
1573 	}
1574 
1575 	hist_ev =
1576 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1577 
1578 	if (!hist_ev) {
1579 		qdf_print("Low Memory");
1580 		return -EINVAL;
1581 	}
1582 
1583 	event = &hist_ev[ce_hist->hist_index];
1584 
1585 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1586 
1587 	len += snprintf(buf, PAGE_SIZE - len,
1588 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1589 			secs, usecs, ce_hist->hist_id,
1590 			ce_event_type_to_str(event->type),
1591 			event->index, event->memory);
1592 #ifdef HIF_CE_DEBUG_DATA_BUF
1593 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%lu",
1594 			event->actual_data_len);
1595 #endif
1596 
1597 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1598 
1599 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1600 				16, 1, buf + len,
1601 				(ssize_t)PAGE_SIZE - len, false);
1602 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1603 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1604 
1605 #ifdef HIF_CE_DEBUG_DATA_BUF
1606 	if (ce_hist->data_enable[ce_hist->hist_id])
1607 		len = hif_dump_desc_data_buf(buf, len, event->data,
1608 						(event->actual_data_len <
1609 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1610 						event->actual_data_len :
1611 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1612 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1613 
1614 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1615 
1616 	return len;
1617 }
1618 
1619 /*
1620  * hif_store_desc_trace_buf_index() -
1621  * API to get the CE id and CE debug storage buffer index
1622  *
1623  * @dev: network device
1624  * @attr: sysfs attribute
1625  * @buf: data got from the user
1626  *
1627  * Return total length
1628  */
1629 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1630 					const char *buf, size_t size)
1631 {
1632 	struct ce_desc_hist *ce_hist = NULL;
1633 
1634 	if (!scn)
1635 		return -EINVAL;
1636 
1637 	ce_hist = &scn->hif_ce_desc_hist;
1638 
1639 	if (!size) {
1640 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1641 		return -EINVAL;
1642 	}
1643 
1644 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1645 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1646 		qdf_nofl_err("%s: Invalid input value.", __func__);
1647 		return -EINVAL;
1648 	}
1649 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1650 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1651 		qdf_print("Invalid values");
1652 		return -EINVAL;
1653 	}
1654 
1655 	return size;
1656 }
1657 
1658 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1659 
1660 #ifdef HIF_CE_DEBUG_DATA_BUF
1661 /*
1662  * hif_ce_en_desc_hist() -
1663  * API to enable recording the CE desc history
1664  *
1665  * @dev: network device
1666  * @attr: sysfs attribute
1667  * @buf: buffer to copy the data.
1668  *
1669  * Starts recording the ce desc history
1670  *
1671  * Return total length copied
1672  */
1673 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1674 {
1675 	struct ce_desc_hist *ce_hist = NULL;
1676 	uint32_t cfg = 0;
1677 	uint32_t ce_id = 0;
1678 
1679 	if (!scn)
1680 		return -EINVAL;
1681 
1682 	ce_hist = &scn->hif_ce_desc_hist;
1683 
1684 	if (!size) {
1685 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1686 		return -EINVAL;
1687 	}
1688 
1689 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1690 		   (unsigned int *)&cfg) != 2) {
1691 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1692 			     __func__);
1693 		return -EINVAL;
1694 	}
1695 	if (ce_id >= CE_COUNT_MAX) {
1696 		qdf_print("Invalid value CE Id");
1697 		return -EINVAL;
1698 	}
1699 
1700 	if ((cfg > 1 || cfg < 0)) {
1701 		qdf_print("Invalid values: enter 0 or 1");
1702 		return -EINVAL;
1703 	}
1704 
1705 	if (!ce_hist->hist_ev[ce_id])
1706 		return -EINVAL;
1707 
1708 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1709 	if (cfg == 1) {
1710 		if (ce_hist->data_enable[ce_id] == 1) {
1711 			qdf_debug("Already Enabled");
1712 		} else {
1713 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1714 							== QDF_STATUS_E_NOMEM){
1715 				ce_hist->data_enable[ce_id] = 0;
1716 				qdf_err("%s:Memory Alloc failed", __func__);
1717 			} else
1718 				ce_hist->data_enable[ce_id] = 1;
1719 		}
1720 	} else if (cfg == 0) {
1721 		if (ce_hist->data_enable[ce_id] == 0) {
1722 			qdf_debug("Already Disabled");
1723 		} else {
1724 			ce_hist->data_enable[ce_id] = 0;
1725 				free_mem_ce_debug_hist_data(scn, ce_id);
1726 		}
1727 	}
1728 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1729 
1730 	return size;
1731 }
1732 
1733 /*
1734  * hif_disp_ce_enable_desc_data_hist() -
1735  * API to display value of data_enable
1736  *
1737  * @dev: network device
1738  * @attr: sysfs attribute
1739  * @buf: buffer to copy the data.
1740  *
1741  * Return total length copied
1742  */
1743 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1744 {
1745 	ssize_t len = 0;
1746 	uint32_t ce_id = 0;
1747 	struct ce_desc_hist *ce_hist = NULL;
1748 
1749 	if (!scn)
1750 		return -EINVAL;
1751 
1752 	ce_hist = &scn->hif_ce_desc_hist;
1753 
1754 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1755 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
1756 				ce_id, ce_hist->data_enable[ce_id]);
1757 	}
1758 
1759 	return len;
1760 }
1761 #endif /* HIF_CE_DEBUG_DATA_BUF */
1762 
1763 #ifdef OL_ATH_SMART_LOGGING
1764 #define GUARD_SPACE 10
1765 #define LOG_ID_SZ 4
1766 /*
1767  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
1768  * @src_ring: SRC ring state
1769  * @buf_cur: Current pointer in ring buffer
1770  * @buf_init:Start of the ring buffer
1771  * @buf_sz: Size of the ring buffer
1772  * @skb_sz: Max size of the SKB buffer to be copied
1773  *
1774  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1775  * the given buf, skb_sz is the max buffer size to be copied
1776  *
1777  * Return: Current pointer in ring buffer
1778  */
1779 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
1780 				    uint8_t *buf_cur, uint8_t *buf_init,
1781 				    uint32_t buf_sz, uint32_t skb_sz)
1782 {
1783 	struct CE_src_desc *src_ring_base;
1784 	uint32_t len, entry;
1785 	struct CE_src_desc  *src_desc;
1786 	qdf_nbuf_t nbuf;
1787 	uint32_t available_buf;
1788 
1789 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
1790 	len = sizeof(struct CE_ring_state);
1791 	available_buf = buf_sz - (buf_cur - buf_init);
1792 	if (available_buf < (len + GUARD_SPACE)) {
1793 		buf_cur = buf_init;
1794 	}
1795 
1796 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
1797 	buf_cur += sizeof(struct CE_ring_state);
1798 
1799 	for (entry = 0; entry < src_ring->nentries; entry++) {
1800 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
1801 		nbuf = src_ring->per_transfer_context[entry];
1802 		if (nbuf) {
1803 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1804 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1805 
1806 			len = sizeof(struct CE_src_desc) + skb_cp_len
1807 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1808 			available_buf = buf_sz - (buf_cur - buf_init);
1809 			if (available_buf < (len + GUARD_SPACE)) {
1810 				buf_cur = buf_init;
1811 			}
1812 			qdf_mem_copy(buf_cur, src_desc,
1813 				     sizeof(struct CE_src_desc));
1814 			buf_cur += sizeof(struct CE_src_desc);
1815 
1816 			available_buf = buf_sz - (buf_cur - buf_init);
1817 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1818 						skb_cp_len);
1819 
1820 			if (skb_cp_len) {
1821 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1822 					     skb_cp_len);
1823 				buf_cur += skb_cp_len;
1824 			}
1825 		} else {
1826 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
1827 			available_buf = buf_sz - (buf_cur - buf_init);
1828 			if (available_buf < (len + GUARD_SPACE)) {
1829 				buf_cur = buf_init;
1830 			}
1831 			qdf_mem_copy(buf_cur, src_desc,
1832 				     sizeof(struct CE_src_desc));
1833 			buf_cur += sizeof(struct CE_src_desc);
1834 			available_buf = buf_sz - (buf_cur - buf_init);
1835 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1836 		}
1837 	}
1838 
1839 	return buf_cur;
1840 }
1841 
1842 /*
1843  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
1844  * @dest_ring: SRC ring state
1845  * @buf_cur: Current pointer in ring buffer
1846  * @buf_init:Start of the ring buffer
1847  * @buf_sz: Size of the ring buffer
1848  * @skb_sz: Max size of the SKB buffer to be copied
1849  *
1850  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
1851  * the given buf, skb_sz is the max buffer size to be copied
1852  *
1853  * Return: Current pointer in ring buffer
1854  */
1855 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
1856 				     uint8_t *buf_cur, uint8_t *buf_init,
1857 				     uint32_t buf_sz, uint32_t skb_sz)
1858 {
1859 	struct CE_dest_desc *dest_ring_base;
1860 	uint32_t len, entry;
1861 	struct CE_dest_desc  *dest_desc;
1862 	qdf_nbuf_t nbuf;
1863 	uint32_t available_buf;
1864 
1865 	dest_ring_base =
1866 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1867 
1868 	len = sizeof(struct CE_ring_state);
1869 	available_buf = buf_sz - (buf_cur - buf_init);
1870 	if (available_buf < (len + GUARD_SPACE)) {
1871 		buf_cur = buf_init;
1872 	}
1873 
1874 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
1875 	buf_cur += sizeof(struct CE_ring_state);
1876 
1877 	for (entry = 0; entry < dest_ring->nentries; entry++) {
1878 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
1879 
1880 		nbuf = dest_ring->per_transfer_context[entry];
1881 		if (nbuf) {
1882 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
1883 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
1884 
1885 			len = sizeof(struct CE_dest_desc) + skb_cp_len
1886 				+ LOG_ID_SZ + sizeof(skb_cp_len);
1887 
1888 			available_buf = buf_sz - (buf_cur - buf_init);
1889 			if (available_buf < (len + GUARD_SPACE)) {
1890 				buf_cur = buf_init;
1891 			}
1892 
1893 			qdf_mem_copy(buf_cur, dest_desc,
1894 				     sizeof(struct CE_dest_desc));
1895 			buf_cur += sizeof(struct CE_dest_desc);
1896 			available_buf = buf_sz - (buf_cur - buf_init);
1897 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
1898 						skb_cp_len);
1899 			if (skb_cp_len) {
1900 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
1901 					     skb_cp_len);
1902 				buf_cur += skb_cp_len;
1903 			}
1904 		} else {
1905 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
1906 			available_buf = buf_sz - (buf_cur - buf_init);
1907 			if (available_buf < (len + GUARD_SPACE)) {
1908 				buf_cur = buf_init;
1909 			}
1910 			qdf_mem_copy(buf_cur, dest_desc,
1911 				     sizeof(struct CE_dest_desc));
1912 			buf_cur += sizeof(struct CE_dest_desc);
1913 			available_buf = buf_sz - (buf_cur - buf_init);
1914 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
1915 		}
1916 	}
1917 	return buf_cur;
1918 }
1919 
1920 /**
1921  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1922  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1923  * and buffers pointed by them in to the given buf
1924  */
1925 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1926 			 uint8_t *buf_init, uint32_t buf_sz,
1927 			 uint32_t ce, uint32_t skb_sz)
1928 {
1929 	struct CE_state *ce_state;
1930 	struct CE_ring_state *src_ring;
1931 	struct CE_ring_state *dest_ring;
1932 
1933 	ce_state = scn->ce_id_to_state[ce];
1934 	src_ring = ce_state->src_ring;
1935 	dest_ring = ce_state->dest_ring;
1936 
1937 	if (src_ring) {
1938 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
1939 					      buf_init, buf_sz, skb_sz);
1940 	} else if (dest_ring) {
1941 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
1942 					       buf_init, buf_sz, skb_sz);
1943 	}
1944 
1945 	return buf_cur;
1946 }
1947 
1948 qdf_export_symbol(hif_log_dump_ce);
1949 #endif /* OL_ATH_SMART_LOGGING */
1950 
1951