xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008) !
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
65 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
66 	do {                                            		\
67 		x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); 	\
68 	} while (0);
69 #else
70 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
71 #endif
72 
73 static int war1_allow_sleep;
74 /* io32 write workaround */
75 static int hif_ce_war1;
76 
77 /**
78  * hif_ce_war_disable() - disable ce war gobally
79  */
80 void hif_ce_war_disable(void)
81 {
82 	hif_ce_war1 = 0;
83 }
84 
85 /**
86  * hif_ce_war_enable() - enable ce war gobally
87  */
88 void hif_ce_war_enable(void)
89 {
90 	hif_ce_war1 = 1;
91 }
92 
93 /*
94  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
95  * for defined here
96  */
97 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
98 
99 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
100 #define CE_DEBUG_DATA_PER_ROW 16
101 
102 qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
103 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
104 
105 /**
106  * get_next_record_index() - get the next record index
107  * @table_index: atomic index variable to increment
108  * @array_size: array size of the circular buffer
109  *
110  * Increment the atomic index and reserve the value.
111  * Takes care of buffer wrap.
112  * Guaranteed to be thread safe as long as fewer than array_size contexts
113  * try to access the array.  If there are more than array_size contexts
114  * trying to access the array, full locking of the recording process would
115  * be needed to have sane logging.
116  */
117 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
118 {
119 	int record_index = qdf_atomic_inc_return(table_index);
120 
121 	if (record_index == array_size)
122 		qdf_atomic_sub(array_size, table_index);
123 
124 	while (record_index >= array_size)
125 		record_index -= array_size;
126 	return record_index;
127 }
128 
129 #if HIF_CE_DEBUG_DATA_BUF
130 /**
131  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
132  * @event: structure detailing a ce event
133  * @len: length of the data
134  * Return:
135  */
136 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
137 {
138 	uint8_t *data = NULL;
139 
140 	if (!event->data)
141 		return;
142 
143 	if (event->memory && len > 0)
144 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
145 
146 	event->actual_data_len = 0;
147 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
148 
149 	if (data && len > 0) {
150 		qdf_mem_copy(event->data, data,
151 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
152 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
153 		event->actual_data_len = len;
154 	}
155 }
156 #endif
157 
158 /**
159  * hif_record_ce_desc_event() - record ce descriptor events
160  * @scn: hif_softc
161  * @ce_id: which ce is the event occurring on
162  * @type: what happened
163  * @descriptor: pointer to the descriptor posted/completed
164  * @memory: virtual address of buffer related to the descriptor
165  * @index: index that the descriptor was/will be at.
166  */
167 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
168 				enum hif_ce_event_type type,
169 				union ce_desc *descriptor,
170 				void *memory, int index,
171 				int len)
172 {
173 	int record_index;
174 	struct hif_ce_desc_event *event;
175 
176 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
177 	struct hif_ce_desc_event *hist_ev = NULL;
178 
179 	if (ce_id < CE_COUNT_MAX)
180 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
181 	else
182 		return;
183 
184 	if (ce_id >= CE_COUNT_MAX)
185 		return;
186 
187 	if (!ce_hist->enable[ce_id])
188 		return;
189 
190 	if (!hist_ev)
191 		return;
192 
193 	record_index = get_next_record_index(
194 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
195 
196 	event = &hist_ev[record_index];
197 
198 	event->type = type;
199 	event->time = qdf_get_log_timestamp();
200 
201 	if (descriptor != NULL) {
202 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
203 	} else {
204 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
205 	}
206 
207 	event->memory = memory;
208 	event->index = index;
209 
210 #if HIF_CE_DEBUG_DATA_BUF
211 	if (ce_hist->data_enable[ce_id])
212 		hif_ce_desc_data_record(event, len);
213 #endif
214 }
215 qdf_export_symbol(hif_record_ce_desc_event);
216 
217 /**
218  * ce_init_ce_desc_event_log() - initialize the ce event log
219  * @ce_id: copy engine id for which we are initializing the log
220  * @size: size of array to dedicate
221  *
222  * Currently the passed size is ignored in favor of a precompiled value.
223  */
224 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
225 {
226 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
227 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
228 	qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]);
229 }
230 
231 /**
232  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
233  * @ce_id: copy engine id for which we are deinitializing the log
234  *
235  */
236 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
237 {
238 	qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]);
239 }
240 
241 #else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
242 void hif_record_ce_desc_event(struct hif_softc *scn,
243 		int ce_id, enum hif_ce_event_type type,
244 		union ce_desc *descriptor, void *memory,
245 		int index, int len)
246 {
247 }
248 qdf_export_symbol(hif_record_ce_desc_event);
249 
250 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
251 					int size)
252 {
253 }
254 
255 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
256 {
257 }
258 #endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
259 
260 #ifdef NAPI_YIELD_BUDGET_BASED
261 bool hif_ce_service_should_yield(struct hif_softc *scn,
262 				 struct CE_state *ce_state)
263 {
264 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
265 	return yield;
266 }
267 #else
268 /**
269  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
270  * @scn: hif context
271  * @ce_state: context of the copy engine being serviced
272  *
273  * Return: true if the service should yield
274  */
275 bool hif_ce_service_should_yield(struct hif_softc *scn,
276 				 struct CE_state *ce_state)
277 {
278 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
279 
280 	time_limit_reached =
281 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
282 
283 	if (!time_limit_reached)
284 		rxpkt_thresh_reached = hif_max_num_receives_reached
285 					(scn, ce_state->receive_count);
286 
287 	yield =  time_limit_reached || rxpkt_thresh_reached;
288 
289 	if (yield && ce_state->htt_rx_data)
290 		hif_napi_update_yield_stats(ce_state,
291 					    time_limit_reached,
292 					    rxpkt_thresh_reached);
293 	return yield;
294 }
295 #endif
296 
297 /*
298  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
299  * The caller takes responsibility for any needed locking.
300  */
301 
302 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
303 				   u32 ctrl_addr, unsigned int write_index)
304 {
305 	if (hif_ce_war1) {
306 		void __iomem *indicator_addr;
307 
308 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
309 
310 		if (!war1_allow_sleep
311 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
312 			hif_write32_mb(scn, indicator_addr,
313 				       (CDC_WAR_MAGIC_STR | write_index));
314 		} else {
315 			unsigned long irq_flags;
316 
317 			local_irq_save(irq_flags);
318 			hif_write32_mb(scn, indicator_addr, 1);
319 
320 			/*
321 			 * PCIE write waits for ACK in IPQ8K, there is no
322 			 * need to read back value.
323 			 */
324 			(void)hif_read32_mb(scn, indicator_addr);
325 			/* conservative */
326 			(void)hif_read32_mb(scn, indicator_addr);
327 
328 			CE_SRC_RING_WRITE_IDX_SET(scn,
329 						  ctrl_addr, write_index);
330 
331 			hif_write32_mb(scn, indicator_addr, 0);
332 			local_irq_restore(irq_flags);
333 		}
334 	} else {
335 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
336 	}
337 }
338 
339 qdf_export_symbol(war_ce_src_ring_write_idx_set);
340 
341 int
342 ce_send(struct CE_handle *copyeng,
343 		void *per_transfer_context,
344 		qdf_dma_addr_t buffer,
345 		uint32_t nbytes,
346 		uint32_t transfer_id,
347 		uint32_t flags,
348 		uint32_t user_flag)
349 {
350 	struct CE_state *CE_state = (struct CE_state *)copyeng;
351 	int status;
352 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
353 
354 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
355 	status = hif_state->ce_services->ce_send_nolock(copyeng,
356 			per_transfer_context, buffer, nbytes,
357 			transfer_id, flags, user_flag);
358 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
359 
360 	return status;
361 }
362 
363 unsigned int ce_sendlist_sizeof(void)
364 {
365 	return sizeof(struct ce_sendlist);
366 }
367 
368 void ce_sendlist_init(struct ce_sendlist *sendlist)
369 {
370 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
371 
372 	sl->num_items = 0;
373 }
374 
375 int
376 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
377 					qdf_dma_addr_t buffer,
378 					uint32_t nbytes,
379 					uint32_t flags,
380 					uint32_t user_flags)
381 {
382 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
383 	unsigned int num_items = sl->num_items;
384 	struct ce_sendlist_item *item;
385 
386 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
387 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
388 		return QDF_STATUS_E_RESOURCES;
389 	}
390 
391 	item = &sl->item[num_items];
392 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
393 	item->data = buffer;
394 	item->u.nbytes = nbytes;
395 	item->flags = flags;
396 	item->user_flags = user_flags;
397 	sl->num_items = num_items + 1;
398 	return QDF_STATUS_SUCCESS;
399 }
400 
401 int
402 ce_sendlist_send(struct CE_handle *copyeng,
403 		 void *per_transfer_context,
404 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
405 {
406 	struct CE_state *CE_state = (struct CE_state *)copyeng;
407 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
408 
409 	return hif_state->ce_services->ce_sendlist_send(copyeng,
410 			per_transfer_context, sendlist, transfer_id);
411 }
412 
413 /*
414  * TODO : Fast path implementatiom must be de-coupled from generic service
415  * APIs shared between SRNG and Legacy CE implementations and must be moved
416  * to ce_service_legacy.c.
417  * CR-2315620
418  */
419 #ifdef WLAN_FEATURE_FASTPATH
420 #ifdef QCA_WIFI_3_0
421 static inline void
422 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
423 		      uint64_t dma_addr,
424 		      uint32_t user_flags)
425 {
426 	shadow_src_desc->buffer_addr_hi =
427 			(uint32_t)((dma_addr >> 32) & 0x1F);
428 	user_flags |= shadow_src_desc->buffer_addr_hi;
429 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
430 			sizeof(uint32_t));
431 }
432 #else
433 static inline void
434 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
435 		      uint64_t dma_addr,
436 		      uint32_t user_flags)
437 {
438 }
439 #endif
440 
441 #define SLOTS_PER_DATAPATH_TX 2
442 
443 /**
444  * ce_send_fast() CE layer Tx buffer posting function
445  * @copyeng: copy engine handle
446  * @msdu: msdu to be sent
447  * @transfer_id: transfer_id
448  * @download_len: packet download length
449  *
450  * Assumption : Called with an array of MSDU's
451  * Function:
452  * For each msdu in the array
453  * 1. Check no. of available entries
454  * 2. Create src ring entries (allocated in consistent memory
455  * 3. Write index to h/w
456  *
457  * Return: No. of packets that could be sent
458  */
459 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
460 		 unsigned int transfer_id, uint32_t download_len)
461 {
462 	struct CE_state *ce_state = (struct CE_state *)copyeng;
463 	struct hif_softc *scn = ce_state->scn;
464 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
465 	struct CE_ring_state *src_ring = ce_state->src_ring;
466 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
467 	unsigned int nentries_mask = src_ring->nentries_mask;
468 	unsigned int write_index;
469 	unsigned int sw_index;
470 	unsigned int frag_len;
471 	uint64_t dma_addr;
472 	uint32_t user_flags;
473 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
474 	bool ok_to_send = true;
475 
476 	/*
477 	 * Create a log assuming the call will go through, and if not, we would
478 	 * add an error trace as well.
479 	 * Please add the same failure log for any additional error paths.
480 	 */
481 	DPTRACE(qdf_dp_trace(msdu,
482 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
483 			QDF_TRACE_DEFAULT_PDEV_ID,
484 			qdf_nbuf_data_addr(msdu),
485 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
486 
487 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
488 
489 	/*
490 	 * Request runtime PM resume if it has already suspended and make
491 	 * sure there is no PCIe link access.
492 	 */
493 	if (hif_pm_runtime_get(hif_hdl) != 0)
494 		ok_to_send = false;
495 
496 	if (ok_to_send) {
497 		Q_TARGET_ACCESS_BEGIN(scn);
498 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
499 	}
500 
501 	write_index = src_ring->write_index;
502 	sw_index = src_ring->sw_index;
503 	hif_record_ce_desc_event(scn, ce_state->id,
504 				FAST_TX_SOFTWARE_INDEX_UPDATE,
505 				NULL, NULL, sw_index, 0);
506 
507 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
508 			 < SLOTS_PER_DATAPATH_TX)) {
509 		hif_err_rl("Source ring full, required %d, available %d",
510 			   SLOTS_PER_DATAPATH_TX,
511 			   CE_RING_DELTA(nentries_mask, write_index,
512 					 sw_index - 1));
513 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
514 		if (ok_to_send)
515 			Q_TARGET_ACCESS_END(scn);
516 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
517 
518 		DPTRACE(qdf_dp_trace(NULL,
519 				QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
520 				QDF_TRACE_DEFAULT_PDEV_ID,
521 				NULL, 0, QDF_TX));
522 
523 		return 0;
524 	}
525 
526 	{
527 		struct CE_src_desc *src_ring_base =
528 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
529 		struct CE_src_desc *shadow_base =
530 			(struct CE_src_desc *)src_ring->shadow_base;
531 		struct CE_src_desc *src_desc =
532 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
533 		struct CE_src_desc *shadow_src_desc =
534 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
535 
536 		hif_pm_runtime_get_noresume(hif_hdl);
537 
538 		/*
539 		 * First fill out the ring descriptor for the HTC HTT frame
540 		 * header. These are uncached writes. Should we use a local
541 		 * structure instead?
542 		 */
543 		/* HTT/HTC header can be passed as a argument */
544 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
545 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
546 							  0xFFFFFFFF);
547 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
548 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
549 			shadow_src_desc->meta_data = transfer_id;
550 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
551 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
552 		download_len -= shadow_src_desc->nbytes;
553 		/*
554 		 * HTC HTT header is a word stream, so byte swap if CE byte
555 		 * swap enabled
556 		 */
557 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
558 					CE_ATTR_BYTE_SWAP_DATA) != 0);
559 		/* For the first one, it still does not need to write */
560 		shadow_src_desc->gather = 1;
561 		*src_desc = *shadow_src_desc;
562 		/* By default we could initialize the transfer context to this
563 		 * value
564 		 */
565 		src_ring->per_transfer_context[write_index] =
566 			CE_SENDLIST_ITEM_CTXT;
567 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
568 
569 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
570 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
571 		/*
572 		 * Now fill out the ring descriptor for the actual data
573 		 * packet
574 		 */
575 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
576 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
577 							  0xFFFFFFFF);
578 		/*
579 		 * Clear packet offset for all but the first CE desc.
580 		 */
581 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
582 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
583 		shadow_src_desc->meta_data = transfer_id;
584 
585 		/* get actual packet length */
586 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
587 
588 		/* download remaining bytes of payload */
589 		shadow_src_desc->nbytes =  download_len;
590 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
591 		if (shadow_src_desc->nbytes > frag_len)
592 			shadow_src_desc->nbytes = frag_len;
593 
594 		/*  Data packet is a byte stream, so disable byte swap */
595 		shadow_src_desc->byte_swap = 0;
596 		/* For the last one, gather is not set */
597 		shadow_src_desc->gather    = 0;
598 		*src_desc = *shadow_src_desc;
599 		src_ring->per_transfer_context[write_index] = msdu;
600 
601 		hif_record_ce_desc_event(scn, ce_state->id, type,
602 					(union ce_desc *)src_desc,
603 				src_ring->per_transfer_context[write_index],
604 					write_index, shadow_src_desc->nbytes);
605 
606 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
607 
608 		DPTRACE(qdf_dp_trace(msdu,
609 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
610 			QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(msdu),
611 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
612 	}
613 
614 	src_ring->write_index = write_index;
615 
616 	if (ok_to_send) {
617 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
618 			type = FAST_TX_WRITE_INDEX_UPDATE;
619 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
620 				write_index);
621 			Q_TARGET_ACCESS_END(scn);
622 		} else
623 			ce_state->state = CE_PENDING;
624 		hif_pm_runtime_put(hif_hdl);
625 	}
626 
627 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
628 
629 	/* sent 1 packet */
630 	return 1;
631 }
632 
633 /**
634  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
635  * fastpath is enabled.
636  * @ce_state: handle to copy engine
637  *
638  * Return: true if fastpath handler is registered for datapath CE.
639  */
640 static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
641 {
642 	if (ce_state->fastpath_handler)
643 		return true;
644 	else
645 		return false;
646 }
647 
648 #else
649 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
650 {
651 	return false;
652 }
653 #endif /* WLAN_FEATURE_FASTPATH */
654 
655 #ifndef AH_NEED_TX_DATA_SWAP
656 #define AH_NEED_TX_DATA_SWAP 0
657 #endif
658 
659 /**
660  * ce_batch_send() - sends bunch of msdus at once
661  * @ce_tx_hdl : pointer to CE handle
662  * @msdu : list of msdus to be sent
663  * @transfer_id : transfer id
664  * @len : Downloaded length
665  * @sendhead : sendhead
666  *
667  * Assumption : Called with an array of MSDU's
668  * Function:
669  * For each msdu in the array
670  * 1. Send each msdu
671  * 2. Increment write index accordinlgy.
672  *
673  * Return: list of msds not sent
674  */
675 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
676 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
677 {
678 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
679 	struct hif_softc *scn = ce_state->scn;
680 	struct CE_ring_state *src_ring = ce_state->src_ring;
681 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
682 	/*  A_target_id_t targid = TARGID(scn);*/
683 
684 	uint32_t nentries_mask = src_ring->nentries_mask;
685 	uint32_t sw_index, write_index;
686 
687 	struct CE_src_desc *src_desc_base =
688 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
689 	uint32_t *src_desc;
690 
691 	struct CE_src_desc lsrc_desc = {0};
692 	int deltacount = 0;
693 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
694 
695 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
696 	sw_index = src_ring->sw_index;
697 	write_index = src_ring->write_index;
698 
699 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
700 
701 	while (msdu) {
702 		tempnext = qdf_nbuf_next(msdu);
703 
704 		if (deltacount < 2) {
705 			if (sendhead)
706 				return msdu;
707 			HIF_ERROR("%s: Out of descriptors", __func__);
708 			src_ring->write_index = write_index;
709 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
710 					write_index);
711 
712 			sw_index = src_ring->sw_index;
713 			write_index = src_ring->write_index;
714 
715 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
716 					sw_index-1);
717 			if (freelist == NULL) {
718 				freelist = msdu;
719 				hfreelist = msdu;
720 			} else {
721 				qdf_nbuf_set_next(freelist, msdu);
722 				freelist = msdu;
723 			}
724 			qdf_nbuf_set_next(msdu, NULL);
725 			msdu = tempnext;
726 			continue;
727 		}
728 
729 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
730 				write_index);
731 
732 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
733 
734 		lsrc_desc.meta_data = transfer_id;
735 		if (len  > msdu->len)
736 			len =  msdu->len;
737 		lsrc_desc.nbytes = len;
738 		/*  Data packet is a byte stream, so disable byte swap */
739 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
740 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
741 
742 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
743 
744 
745 		src_ring->per_transfer_context[write_index] = msdu;
746 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
747 
748 		if (sendhead)
749 			break;
750 		qdf_nbuf_set_next(msdu, NULL);
751 		msdu = tempnext;
752 
753 	}
754 
755 
756 	src_ring->write_index = write_index;
757 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
758 
759 	return hfreelist;
760 }
761 
762 /**
763  * ce_update_tx_ring() - Advance sw index.
764  * @ce_tx_hdl : pointer to CE handle
765  * @num_htt_cmpls : htt completions received.
766  *
767  * Function:
768  * Increment the value of sw index of src ring
769  * according to number of htt completions
770  * received.
771  *
772  * Return: void
773  */
774 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
775 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
776 {
777 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
778 	struct CE_ring_state *src_ring = ce_state->src_ring;
779 	uint32_t nentries_mask = src_ring->nentries_mask;
780 	/*
781 	 * Advance the s/w index:
782 	 * This effectively simulates completing the CE ring descriptors
783 	 */
784 	src_ring->sw_index =
785 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
786 				num_htt_cmpls);
787 }
788 #else
789 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
790 {}
791 #endif
792 
793 /**
794  * ce_send_single() - sends
795  * @ce_tx_hdl : pointer to CE handle
796  * @msdu : msdu to be sent
797  * @transfer_id : transfer id
798  * @len : Downloaded length
799  *
800  * Function:
801  * 1. Send one msdu
802  * 2. Increment write index of src ring accordinlgy.
803  *
804  * Return: int: CE sent status
805  */
806 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
807 		uint32_t transfer_id, u_int32_t len)
808 {
809 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
810 	struct hif_softc *scn = ce_state->scn;
811 	struct CE_ring_state *src_ring = ce_state->src_ring;
812 	uint32_t ctrl_addr = ce_state->ctrl_addr;
813 	/*A_target_id_t targid = TARGID(scn);*/
814 
815 	uint32_t nentries_mask = src_ring->nentries_mask;
816 	uint32_t sw_index, write_index;
817 
818 	struct CE_src_desc *src_desc_base =
819 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
820 	uint32_t *src_desc;
821 
822 	struct CE_src_desc lsrc_desc = {0};
823 	enum hif_ce_event_type event_type;
824 
825 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
826 	sw_index = src_ring->sw_index;
827 	write_index = src_ring->write_index;
828 
829 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
830 					sw_index-1) < 1)) {
831 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
832 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
833 			  write_index, sw_index);
834 		return 1;
835 	}
836 
837 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
838 
839 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
840 
841 	lsrc_desc.meta_data = transfer_id;
842 	lsrc_desc.nbytes = len;
843 	/*  Data packet is a byte stream, so disable byte swap */
844 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
845 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
846 
847 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
848 
849 
850 	src_ring->per_transfer_context[write_index] = msdu;
851 
852 	if (((struct CE_src_desc *)src_desc)->gather)
853 		event_type = HIF_TX_GATHER_DESC_POST;
854 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
855 		event_type = HIF_TX_DESC_SOFTWARE_POST;
856 	else
857 		event_type = HIF_TX_DESC_POST;
858 
859 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
860 				(union ce_desc *)src_desc, msdu,
861 				write_index, len);
862 
863 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
864 
865 	src_ring->write_index = write_index;
866 
867 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
868 
869 	return QDF_STATUS_SUCCESS;
870 }
871 
872 /**
873  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
874  * @coyeng: copy engine handle
875  * @per_recv_context: virtual address of the nbuf
876  * @buffer: physical address of the nbuf
877  *
878  * Return: 0 if the buffer is enqueued
879  */
880 int
881 ce_recv_buf_enqueue(struct CE_handle *copyeng,
882 		    void *per_recv_context, qdf_dma_addr_t buffer)
883 {
884 	struct CE_state *CE_state = (struct CE_state *)copyeng;
885 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
886 
887 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
888 			per_recv_context, buffer);
889 }
890 
891 void
892 ce_send_watermarks_set(struct CE_handle *copyeng,
893 		       unsigned int low_alert_nentries,
894 		       unsigned int high_alert_nentries)
895 {
896 	struct CE_state *CE_state = (struct CE_state *)copyeng;
897 	uint32_t ctrl_addr = CE_state->ctrl_addr;
898 	struct hif_softc *scn = CE_state->scn;
899 
900 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
901 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
902 }
903 
904 void
905 ce_recv_watermarks_set(struct CE_handle *copyeng,
906 		       unsigned int low_alert_nentries,
907 		       unsigned int high_alert_nentries)
908 {
909 	struct CE_state *CE_state = (struct CE_state *)copyeng;
910 	uint32_t ctrl_addr = CE_state->ctrl_addr;
911 	struct hif_softc *scn = CE_state->scn;
912 
913 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
914 				low_alert_nentries);
915 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
916 				high_alert_nentries);
917 }
918 
919 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
920 {
921 	struct CE_state *CE_state = (struct CE_state *)copyeng;
922 	struct CE_ring_state *src_ring = CE_state->src_ring;
923 	unsigned int nentries_mask = src_ring->nentries_mask;
924 	unsigned int sw_index;
925 	unsigned int write_index;
926 
927 	qdf_spin_lock(&CE_state->ce_index_lock);
928 	sw_index = src_ring->sw_index;
929 	write_index = src_ring->write_index;
930 	qdf_spin_unlock(&CE_state->ce_index_lock);
931 
932 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
933 }
934 
935 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
936 {
937 	struct CE_state *CE_state = (struct CE_state *)copyeng;
938 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
939 	unsigned int nentries_mask = dest_ring->nentries_mask;
940 	unsigned int sw_index;
941 	unsigned int write_index;
942 
943 	qdf_spin_lock(&CE_state->ce_index_lock);
944 	sw_index = dest_ring->sw_index;
945 	write_index = dest_ring->write_index;
946 	qdf_spin_unlock(&CE_state->ce_index_lock);
947 
948 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
949 }
950 
951 /*
952  * Guts of ce_send_entries_done.
953  * The caller takes responsibility for any necessary locking.
954  */
955 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
956 {
957 	struct CE_state *CE_state = (struct CE_state *)copyeng;
958 	unsigned int nentries;
959 	struct hif_softc *scn = CE_state->scn;
960 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
961 
962 	qdf_spin_lock(&CE_state->ce_index_lock);
963 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
964 						CE_state->scn, CE_state);
965 	qdf_spin_unlock(&CE_state->ce_index_lock);
966 
967 	return nentries;
968 }
969 
970 /*
971  * Guts of ce_recv_entries_done.
972  * The caller takes responsibility for any necessary locking.
973  */
974 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
975 {
976 	struct CE_state *CE_state = (struct CE_state *)copyeng;
977 	unsigned int nentries;
978 	struct hif_softc *scn = CE_state->scn;
979 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
980 
981 	qdf_spin_lock(&CE_state->ce_index_lock);
982 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
983 						CE_state->scn, CE_state);
984 	qdf_spin_unlock(&CE_state->ce_index_lock);
985 
986 	return nentries;
987 }
988 
989 /*
990  * Guts of ce_completed_recv_next.
991  * The caller takes responsibility for any necessary locking.
992  */
993 int
994 ce_completed_recv_next(struct CE_handle *copyeng,
995 		       void **per_CE_contextp,
996 		       void **per_transfer_contextp,
997 		       qdf_dma_addr_t *bufferp,
998 		       unsigned int *nbytesp,
999 		       unsigned int *transfer_idp, unsigned int *flagsp)
1000 {
1001 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1002 	int status;
1003 	struct hif_softc *scn = CE_state->scn;
1004 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1005 	struct ce_ops *ce_services;
1006 
1007 	ce_services = hif_state->ce_services;
1008 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1009 	status =
1010 		ce_services->ce_completed_recv_next_nolock(CE_state,
1011 				per_CE_contextp, per_transfer_contextp, bufferp,
1012 					      nbytesp, transfer_idp, flagsp);
1013 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1014 
1015 	return status;
1016 }
1017 
1018 QDF_STATUS
1019 ce_revoke_recv_next(struct CE_handle *copyeng,
1020 		    void **per_CE_contextp,
1021 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1022 {
1023 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1024 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1025 
1026 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
1027 			per_CE_contextp, per_transfer_contextp, bufferp);
1028 }
1029 
1030 QDF_STATUS
1031 ce_cancel_send_next(struct CE_handle *copyeng,
1032 		void **per_CE_contextp,
1033 		void **per_transfer_contextp,
1034 		qdf_dma_addr_t *bufferp,
1035 		unsigned int *nbytesp,
1036 		unsigned int *transfer_idp,
1037 		uint32_t *toeplitz_hash_result)
1038 {
1039 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1040 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1041 
1042 	return hif_state->ce_services->ce_cancel_send_next
1043 		(copyeng, per_CE_contextp, per_transfer_contextp,
1044 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
1045 }
1046 
1047 int
1048 ce_completed_send_next(struct CE_handle *copyeng,
1049 		       void **per_CE_contextp,
1050 		       void **per_transfer_contextp,
1051 		       qdf_dma_addr_t *bufferp,
1052 		       unsigned int *nbytesp,
1053 		       unsigned int *transfer_idp,
1054 		       unsigned int *sw_idx,
1055 		       unsigned int *hw_idx,
1056 		       unsigned int *toeplitz_hash_result)
1057 {
1058 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1059 	struct hif_softc *scn = CE_state->scn;
1060 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1061 	struct ce_ops *ce_services;
1062 	int status;
1063 
1064 	ce_services = hif_state->ce_services;
1065 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1066 	status =
1067 		ce_services->ce_completed_send_next_nolock(CE_state,
1068 					per_CE_contextp, per_transfer_contextp,
1069 					bufferp, nbytesp, transfer_idp, sw_idx,
1070 					      hw_idx, toeplitz_hash_result);
1071 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1072 
1073 	return status;
1074 }
1075 
1076 #ifdef ATH_11AC_TXCOMPACT
1077 /* CE engine descriptor reap
1078  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1079  * does receive and reaping of completed descriptor ,
1080  * This function only handles reaping of Tx complete descriptor.
1081  * The Function is called from threshold reap  poll routine
1082  * hif_send_complete_check so should not countain receive functionality
1083  * within it .
1084  */
1085 
1086 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
1087 {
1088 	void *CE_context;
1089 	void *transfer_context;
1090 	qdf_dma_addr_t buf;
1091 	unsigned int nbytes;
1092 	unsigned int id;
1093 	unsigned int sw_idx, hw_idx;
1094 	uint32_t toeplitz_hash_result;
1095 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1096 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1097 
1098 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1099 		return;
1100 
1101 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
1102 			NULL, NULL, 0, 0);
1103 
1104 	/* Since this function is called from both user context and
1105 	 * tasklet context the spinlock has to lock the bottom halves.
1106 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1107 	 * enabled in TX polling mode. If this is not the case, more
1108 	 * bottom halve spin lock changes are needed. Due to data path
1109 	 * performance concern, after internal discussion we've decided
1110 	 * to make minimum change, i.e., only address the issue occurred
1111 	 * in this function. The possible negative effect of this minimum
1112 	 * change is that, in the future, if some other function will also
1113 	 * be opened to let the user context to use, those cases need to be
1114 	 * addressed by change spin_lock to spin_lock_bh also.
1115 	 */
1116 
1117 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1118 
1119 	if (CE_state->send_cb) {
1120 		{
1121 			struct ce_ops *ce_services = hif_state->ce_services;
1122 			/* Pop completed send buffers and call the
1123 			 * registered send callback for each
1124 			 */
1125 			while (ce_services->ce_completed_send_next_nolock
1126 				 (CE_state, &CE_context,
1127 				  &transfer_context, &buf,
1128 				  &nbytes, &id, &sw_idx, &hw_idx,
1129 				  &toeplitz_hash_result) ==
1130 				  QDF_STATUS_SUCCESS) {
1131 				if (ce_id != CE_HTT_H2T_MSG) {
1132 					qdf_spin_unlock_bh(
1133 						&CE_state->ce_index_lock);
1134 					CE_state->send_cb(
1135 						(struct CE_handle *)
1136 						CE_state, CE_context,
1137 						transfer_context, buf,
1138 						nbytes, id, sw_idx, hw_idx,
1139 						toeplitz_hash_result);
1140 					qdf_spin_lock_bh(
1141 						&CE_state->ce_index_lock);
1142 				} else {
1143 					struct HIF_CE_pipe_info *pipe_info =
1144 						(struct HIF_CE_pipe_info *)
1145 						CE_context;
1146 
1147 					qdf_spin_lock_bh(&pipe_info->
1148 						 completion_freeq_lock);
1149 					pipe_info->num_sends_allowed++;
1150 					qdf_spin_unlock_bh(&pipe_info->
1151 						   completion_freeq_lock);
1152 				}
1153 			}
1154 		}
1155 	}
1156 
1157 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1158 
1159 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1160 			NULL, NULL, 0, 0);
1161 	Q_TARGET_ACCESS_END(scn);
1162 }
1163 
1164 #endif /*ATH_11AC_TXCOMPACT */
1165 
1166 /*
1167  * Number of times to check for any pending tx/rx completion on
1168  * a copy engine, this count should be big enough. Once we hit
1169  * this threashold we'll not check for any Tx/Rx comlpetion in same
1170  * interrupt handling. Note that this threashold is only used for
1171  * Rx interrupt processing, this can be used tor Tx as well if we
1172  * suspect any infinite loop in checking for pending Tx completion.
1173  */
1174 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
1175 
1176 /*
1177  * TODO : Fast path implementatiom must be de-coupled from generic service
1178  * APIs shared between SRNG and Legacy CE implementations and must be moved
1179  * to ce_service_legacy.c.
1180  * CR-2315620
1181  */
1182 #ifdef WLAN_FEATURE_FASTPATH
1183 /**
1184  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1185  * @ce_state: handle to copy engine state
1186  * @cmpl_msdus: Rx msdus
1187  * @num_cmpls: number of Rx msdus
1188  * @ctrl_addr: CE control address
1189  *
1190  * Return: None
1191  */
1192 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1193 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1194 				  uint32_t ctrl_addr)
1195 {
1196 	struct hif_softc *scn = ce_state->scn;
1197 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1198 	uint32_t nentries_mask = dest_ring->nentries_mask;
1199 	uint32_t write_index;
1200 
1201 	qdf_spin_unlock(&ce_state->ce_index_lock);
1202 	(ce_state->fastpath_handler)(ce_state->context,	cmpl_msdus, num_cmpls);
1203 	qdf_spin_lock(&ce_state->ce_index_lock);
1204 
1205 	/* Update Destination Ring Write Index */
1206 	write_index = dest_ring->write_index;
1207 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1208 
1209 	hif_record_ce_desc_event(scn, ce_state->id,
1210 			FAST_RX_WRITE_INDEX_UPDATE,
1211 			NULL, NULL, write_index, 0);
1212 
1213 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1214 	dest_ring->write_index = write_index;
1215 }
1216 
1217 /**
1218  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
1219  * @scn: hif_context
1220  * @ce_id: Copy engine ID
1221  * 1) Go through the CE ring, and find the completions
1222  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1223  * 3) Unmap buffer & accumulate in an array.
1224  * 4) Call message handler when array is full or when exiting the handler
1225  *
1226  * Return: void
1227  */
1228 
1229 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1230 {
1231 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1232 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1233 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1234 	struct CE_dest_desc *dest_ring_base =
1235 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1236 
1237 	uint32_t nentries_mask = dest_ring->nentries_mask;
1238 	uint32_t sw_index = dest_ring->sw_index;
1239 	uint32_t nbytes;
1240 	qdf_nbuf_t nbuf;
1241 	dma_addr_t paddr;
1242 	struct CE_dest_desc *dest_desc;
1243 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1244 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1245 	uint32_t nbuf_cmpl_idx = 0;
1246 	unsigned int more_comp_cnt = 0;
1247 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1248 	struct ce_ops *ce_services = hif_state->ce_services;
1249 
1250 more_data:
1251 	for (;;) {
1252 
1253 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1254 						 sw_index);
1255 
1256 		/*
1257 		 * The following 2 reads are from non-cached memory
1258 		 */
1259 		nbytes = dest_desc->nbytes;
1260 
1261 		/* If completion is invalid, break */
1262 		if (qdf_unlikely(nbytes == 0))
1263 			break;
1264 
1265 
1266 		/*
1267 		 * Build the nbuf list from valid completions
1268 		 */
1269 		nbuf = dest_ring->per_transfer_context[sw_index];
1270 
1271 		/*
1272 		 * No lock is needed here, since this is the only thread
1273 		 * that accesses the sw_index
1274 		 */
1275 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1276 
1277 		/*
1278 		 * CAREFUL : Uncached write, but still less expensive,
1279 		 * since most modern caches use "write-combining" to
1280 		 * flush multiple cache-writes all at once.
1281 		 */
1282 		dest_desc->nbytes = 0;
1283 
1284 		/*
1285 		 * Per our understanding this is not required on our
1286 		 * since we are doing the same cache invalidation
1287 		 * operation on the same buffer twice in succession,
1288 		 * without any modifiication to this buffer by CPU in
1289 		 * between.
1290 		 * However, this code with 2 syncs in succession has
1291 		 * been undergoing some testing at a customer site,
1292 		 * and seemed to be showing no problems so far. Would
1293 		 * like to validate from the customer, that this line
1294 		 * is really not required, before we remove this line
1295 		 * completely.
1296 		 */
1297 		paddr = QDF_NBUF_CB_PADDR(nbuf);
1298 
1299 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
1300 				(skb_end_pointer(nbuf) - (nbuf)->data),
1301 				DMA_FROM_DEVICE);
1302 
1303 		qdf_nbuf_put_tail(nbuf, nbytes);
1304 
1305 		qdf_assert_always(nbuf->data != NULL);
1306 
1307 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
1308 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
1309 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1310 
1311 		/*
1312 		 * we are not posting the buffers back instead
1313 		 * reusing the buffers
1314 		 */
1315 		if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
1316 			hif_record_ce_desc_event(scn, ce_state->id,
1317 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
1318 						 NULL, NULL, sw_index, 0);
1319 			dest_ring->sw_index = sw_index;
1320 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1321 						nbuf_cmpl_idx, ctrl_addr);
1322 
1323 			ce_state->receive_count += nbuf_cmpl_idx;
1324 			if (qdf_unlikely(hif_ce_service_should_yield(
1325 						scn, ce_state))) {
1326 				ce_state->force_break = 1;
1327 				qdf_atomic_set(&ce_state->rx_pending, 1);
1328 				return;
1329 			}
1330 
1331 			nbuf_cmpl_idx = 0;
1332 			more_comp_cnt = 0;
1333 		}
1334 	}
1335 
1336 	hif_record_ce_desc_event(scn, ce_state->id,
1337 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
1338 				 NULL, NULL, sw_index, 0);
1339 
1340 	dest_ring->sw_index = sw_index;
1341 
1342 	/*
1343 	 * If there are not enough completions to fill the array,
1344 	 * just call the message handler here
1345 	 */
1346 	if (nbuf_cmpl_idx) {
1347 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1348 				      nbuf_cmpl_idx, ctrl_addr);
1349 
1350 		ce_state->receive_count += nbuf_cmpl_idx;
1351 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1352 			ce_state->force_break = 1;
1353 			qdf_atomic_set(&ce_state->rx_pending, 1);
1354 			return;
1355 		}
1356 
1357 		/* check for more packets after upper layer processing */
1358 		nbuf_cmpl_idx = 0;
1359 		more_comp_cnt = 0;
1360 		goto more_data;
1361 	}
1362 
1363 	hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu());
1364 
1365 	qdf_atomic_set(&ce_state->rx_pending, 0);
1366 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1367 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1368 					   HOST_IS_COPY_COMPLETE_MASK);
1369 	} else {
1370 		hif_err_rl("%s: target access is not allowed", __func__);
1371 		return;
1372 	}
1373 
1374 	if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) {
1375 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1376 			goto more_data;
1377 		} else {
1378 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1379 				  __func__, nentries_mask,
1380 				  ce_state->dest_ring->sw_index,
1381 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1382 		}
1383 	}
1384 #ifdef NAPI_YIELD_BUDGET_BASED
1385 	/* Caution : Before you modify this code, please refer hif_napi_poll function
1386 	to understand how napi_complete gets called and make the necessary changes
1387 	Force break has to be done till WIN disables the interrupt at source */
1388 	ce_state->force_break = 1;
1389 #endif
1390 }
1391 
1392 #else
1393 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1394 {
1395 }
1396 #endif /* WLAN_FEATURE_FASTPATH */
1397 
1398 /*
1399  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1400  *
1401  * Invokes registered callbacks for recv_complete,
1402  * send_complete, and watermarks.
1403  *
1404  * Returns: number of messages processed
1405  */
1406 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1407 {
1408 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1409 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1410 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1411 	void *CE_context;
1412 	void *transfer_context;
1413 	qdf_dma_addr_t buf;
1414 	unsigned int nbytes;
1415 	unsigned int id;
1416 	unsigned int flags;
1417 	unsigned int more_comp_cnt = 0;
1418 	unsigned int more_snd_comp_cnt = 0;
1419 	unsigned int sw_idx, hw_idx;
1420 	uint32_t toeplitz_hash_result;
1421 	uint32_t mode = hif_get_conparam(scn);
1422 
1423 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1424 		return CE_state->receive_count;
1425 
1426 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1427 		HIF_ERROR("[premature rc=0]");
1428 		return 0; /* no work done */
1429 	}
1430 
1431 	/* Clear force_break flag and re-initialize receive_count to 0 */
1432 	CE_state->receive_count = 0;
1433 	CE_state->force_break = 0;
1434 	CE_state->ce_service_start_time = sched_clock();
1435 	CE_state->ce_service_yield_time =
1436 		CE_state->ce_service_start_time +
1437 		hif_get_ce_service_max_yield_time(
1438 			(struct hif_opaque_softc *)scn);
1439 
1440 	qdf_spin_lock(&CE_state->ce_index_lock);
1441 	/*
1442 	 * With below check we make sure CE we are handling is datapath CE and
1443 	 * fastpath is enabled.
1444 	 */
1445 	if (ce_is_fastpath_handler_registered(CE_state)) {
1446 		/* For datapath only Rx CEs */
1447 		ce_per_engine_service_fast(scn, CE_id);
1448 		goto unlock_end;
1449 	}
1450 
1451 more_completions:
1452 	if (CE_state->recv_cb) {
1453 
1454 		/* Pop completed recv buffers and call
1455 		 * the registered recv callback for each
1456 		 */
1457 		while (hif_state->ce_services->ce_completed_recv_next_nolock
1458 				(CE_state, &CE_context, &transfer_context,
1459 				&buf, &nbytes, &id, &flags) ==
1460 				QDF_STATUS_SUCCESS) {
1461 			qdf_spin_unlock(&CE_state->ce_index_lock);
1462 			CE_state->recv_cb((struct CE_handle *)CE_state,
1463 					  CE_context, transfer_context, buf,
1464 					  nbytes, id, flags);
1465 
1466 			/*
1467 			 * EV #112693 -
1468 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
1469 			 * BSoD_0x133 occurred in VHT80 UDP_DL
1470 			 * Break out DPC by force if number of loops in
1471 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1472 			 * to avoid spending too long time in
1473 			 * DPC for each interrupt handling. Schedule another
1474 			 * DPC to avoid data loss if we had taken
1475 			 * force-break action before apply to Windows OS
1476 			 * only currently, Linux/MAC os can expand to their
1477 			 * platform if necessary
1478 			 */
1479 
1480 			/* Break the receive processes by
1481 			 * force if force_break set up
1482 			 */
1483 			if (qdf_unlikely(CE_state->force_break)) {
1484 				qdf_atomic_set(&CE_state->rx_pending, 1);
1485 				goto target_access_end;
1486 			}
1487 			qdf_spin_lock(&CE_state->ce_index_lock);
1488 		}
1489 	}
1490 
1491 	/*
1492 	 * Attention: We may experience potential infinite loop for below
1493 	 * While Loop during Sending Stress test.
1494 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1495 	 */
1496 
1497 	if (CE_state->send_cb) {
1498 		/* Pop completed send buffers and call
1499 		 * the registered send callback for each
1500 		 */
1501 
1502 #ifdef ATH_11AC_TXCOMPACT
1503 		while (hif_state->ce_services->ce_completed_send_next_nolock
1504 			 (CE_state, &CE_context,
1505 			 &transfer_context, &buf, &nbytes,
1506 			 &id, &sw_idx, &hw_idx,
1507 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1508 
1509 			if (CE_id != CE_HTT_H2T_MSG ||
1510 			    QDF_IS_EPPING_ENABLED(mode)) {
1511 				qdf_spin_unlock(&CE_state->ce_index_lock);
1512 				CE_state->send_cb((struct CE_handle *)CE_state,
1513 						  CE_context, transfer_context,
1514 						  buf, nbytes, id, sw_idx,
1515 						  hw_idx, toeplitz_hash_result);
1516 				qdf_spin_lock(&CE_state->ce_index_lock);
1517 			} else {
1518 				struct HIF_CE_pipe_info *pipe_info =
1519 					(struct HIF_CE_pipe_info *)CE_context;
1520 
1521 				qdf_spin_lock_bh(&pipe_info->
1522 					      completion_freeq_lock);
1523 				pipe_info->num_sends_allowed++;
1524 				qdf_spin_unlock_bh(&pipe_info->
1525 						completion_freeq_lock);
1526 			}
1527 		}
1528 #else                           /*ATH_11AC_TXCOMPACT */
1529 		while (hif_state->ce_services->ce_completed_send_next_nolock
1530 			 (CE_state, &CE_context,
1531 			  &transfer_context, &buf, &nbytes,
1532 			  &id, &sw_idx, &hw_idx,
1533 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1534 			qdf_spin_unlock(&CE_state->ce_index_lock);
1535 			CE_state->send_cb((struct CE_handle *)CE_state,
1536 				  CE_context, transfer_context, buf,
1537 				  nbytes, id, sw_idx, hw_idx,
1538 				  toeplitz_hash_result);
1539 			qdf_spin_lock(&CE_state->ce_index_lock);
1540 		}
1541 #endif /*ATH_11AC_TXCOMPACT */
1542 	}
1543 
1544 more_watermarks:
1545 	if (CE_state->misc_cbs) {
1546 		if (CE_state->watermark_cb &&
1547 				hif_state->ce_services->watermark_int(CE_state,
1548 					&flags)) {
1549 			qdf_spin_unlock(&CE_state->ce_index_lock);
1550 			/* Convert HW IS bits to software flags */
1551 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1552 					CE_state->wm_context, flags);
1553 			qdf_spin_lock(&CE_state->ce_index_lock);
1554 		}
1555 	}
1556 
1557 	/*
1558 	 * Clear the misc interrupts (watermark) that were handled above,
1559 	 * and that will be checked again below.
1560 	 * Clear and check for copy-complete interrupts again, just in case
1561 	 * more copy completions happened while the misc interrupts were being
1562 	 * handled.
1563 	 */
1564 	if (!ce_srng_based(scn)) {
1565 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1566 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1567 					   CE_WATERMARK_MASK |
1568 					   HOST_IS_COPY_COMPLETE_MASK);
1569 		} else {
1570 			hif_err_rl("%s: target access is not allowed",
1571 				   __func__);
1572 			goto unlock_end;
1573 		}
1574 	}
1575 
1576 	/*
1577 	 * Now that per-engine interrupts are cleared, verify that
1578 	 * no recv interrupts arrive while processing send interrupts,
1579 	 * and no recv or send interrupts happened while processing
1580 	 * misc interrupts.Go back and check again.Keep checking until
1581 	 * we find no more events to process.
1582 	 */
1583 	if (CE_state->recv_cb &&
1584 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1585 				CE_state)) {
1586 		if (QDF_IS_EPPING_ENABLED(mode) ||
1587 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1588 			goto more_completions;
1589 		} else {
1590 			if (!ce_srng_based(scn)) {
1591 				HIF_ERROR(
1592 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1593 					__func__,
1594 					CE_state->dest_ring->nentries_mask,
1595 					CE_state->dest_ring->sw_index,
1596 					CE_DEST_RING_READ_IDX_GET(scn,
1597 							  CE_state->ctrl_addr));
1598 			}
1599 		}
1600 	}
1601 
1602 	if (CE_state->send_cb &&
1603 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1604 				CE_state)) {
1605 		if (QDF_IS_EPPING_ENABLED(mode) ||
1606 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1607 			goto more_completions;
1608 		} else {
1609 			if (!ce_srng_based(scn)) {
1610 				HIF_ERROR(
1611 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1612 					__func__,
1613 					CE_state->src_ring->nentries_mask,
1614 					CE_state->src_ring->sw_index,
1615 					CE_SRC_RING_READ_IDX_GET(scn,
1616 							 CE_state->ctrl_addr));
1617 			}
1618 		}
1619 	}
1620 
1621 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1622 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1623 			goto more_watermarks;
1624 	}
1625 
1626 	qdf_atomic_set(&CE_state->rx_pending, 0);
1627 
1628 unlock_end:
1629 	qdf_spin_unlock(&CE_state->ce_index_lock);
1630 target_access_end:
1631 	if (Q_TARGET_ACCESS_END(scn) < 0)
1632 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
1633 	return CE_state->receive_count;
1634 }
1635 qdf_export_symbol(ce_per_engine_service);
1636 
1637 /*
1638  * Handler for per-engine interrupts on ALL active CEs.
1639  * This is used in cases where the system is sharing a
1640  * single interrput for all CEs
1641  */
1642 
1643 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1644 {
1645 	int CE_id;
1646 	uint32_t intr_summary;
1647 
1648 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1649 		return;
1650 
1651 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1652 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1653 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1654 
1655 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1656 				qdf_atomic_set(&CE_state->rx_pending, 0);
1657 				ce_per_engine_service(scn, CE_id);
1658 			}
1659 		}
1660 
1661 		Q_TARGET_ACCESS_END(scn);
1662 		return;
1663 	}
1664 
1665 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1666 
1667 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1668 		if (intr_summary & (1 << CE_id))
1669 			intr_summary &= ~(1 << CE_id);
1670 		else
1671 			continue;       /* no intr pending on this CE */
1672 
1673 		ce_per_engine_service(scn, CE_id);
1674 	}
1675 
1676 	Q_TARGET_ACCESS_END(scn);
1677 }
1678 
1679 /*Iterate the CE_state list and disable the compl interrupt
1680  * if it has been registered already.
1681  */
1682 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1683 {
1684 	int CE_id;
1685 
1686 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1687 		return;
1688 
1689 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1690 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1691 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1692 
1693 		/* if the interrupt is currently enabled, disable it */
1694 		if (!CE_state->disable_copy_compl_intr
1695 		    && (CE_state->send_cb || CE_state->recv_cb))
1696 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1697 
1698 		if (CE_state->watermark_cb)
1699 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1700 	}
1701 	Q_TARGET_ACCESS_END(scn);
1702 }
1703 
1704 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1705 {
1706 	int CE_id;
1707 
1708 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1709 		return;
1710 
1711 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1712 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1713 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1714 
1715 		/*
1716 		 * If the CE is supposed to have copy complete interrupts
1717 		 * enabled (i.e. there a callback registered, and the
1718 		 * "disable" flag is not set), then re-enable the interrupt.
1719 		 */
1720 		if (!CE_state->disable_copy_compl_intr
1721 		    && (CE_state->send_cb || CE_state->recv_cb))
1722 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1723 
1724 		if (CE_state->watermark_cb)
1725 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1726 	}
1727 	Q_TARGET_ACCESS_END(scn);
1728 }
1729 
1730 /**
1731  * ce_send_cb_register(): register completion handler
1732  * @copyeng: CE_state representing the ce we are adding the behavior to
1733  * @fn_ptr: callback that the ce should use when processing tx completions
1734  * @disable_interrupts: if the interupts should be enabled or not.
1735  *
1736  * Caller should guarantee that no transactions are in progress before
1737  * switching the callback function.
1738  *
1739  * Registers the send context before the fn pointer so that if the cb is valid
1740  * the context should be valid.
1741  *
1742  * Beware that currently this function will enable completion interrupts.
1743  */
1744 void
1745 ce_send_cb_register(struct CE_handle *copyeng,
1746 		    ce_send_cb fn_ptr,
1747 		    void *ce_send_context, int disable_interrupts)
1748 {
1749 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1750 	struct hif_softc *scn;
1751 	struct HIF_CE_state *hif_state;
1752 
1753 	if (CE_state == NULL) {
1754 		HIF_ERROR("%s: Error CE state = NULL", __func__);
1755 		return;
1756 	}
1757 	scn = CE_state->scn;
1758 	hif_state = HIF_GET_CE_STATE(scn);
1759 	if (hif_state == NULL) {
1760 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1761 		return;
1762 	}
1763 	CE_state->send_context = ce_send_context;
1764 	CE_state->send_cb = fn_ptr;
1765 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1766 							disable_interrupts);
1767 }
1768 
1769 /**
1770  * ce_recv_cb_register(): register completion handler
1771  * @copyeng: CE_state representing the ce we are adding the behavior to
1772  * @fn_ptr: callback that the ce should use when processing rx completions
1773  * @disable_interrupts: if the interupts should be enabled or not.
1774  *
1775  * Registers the send context before the fn pointer so that if the cb is valid
1776  * the context should be valid.
1777  *
1778  * Caller should guarantee that no transactions are in progress before
1779  * switching the callback function.
1780  */
1781 void
1782 ce_recv_cb_register(struct CE_handle *copyeng,
1783 		    CE_recv_cb fn_ptr,
1784 		    void *CE_recv_context, int disable_interrupts)
1785 {
1786 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1787 	struct hif_softc *scn;
1788 	struct HIF_CE_state *hif_state;
1789 
1790 	if (CE_state == NULL) {
1791 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
1792 		return;
1793 	}
1794 	scn = CE_state->scn;
1795 	hif_state = HIF_GET_CE_STATE(scn);
1796 	if (hif_state == NULL) {
1797 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
1798 		return;
1799 	}
1800 	CE_state->recv_context = CE_recv_context;
1801 	CE_state->recv_cb = fn_ptr;
1802 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1803 							disable_interrupts);
1804 }
1805 
1806 /**
1807  * ce_watermark_cb_register(): register completion handler
1808  * @copyeng: CE_state representing the ce we are adding the behavior to
1809  * @fn_ptr: callback that the ce should use when processing watermark events
1810  *
1811  * Caller should guarantee that no watermark events are being processed before
1812  * switching the callback function.
1813  */
1814 void
1815 ce_watermark_cb_register(struct CE_handle *copyeng,
1816 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1817 {
1818 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1819 	struct hif_softc *scn = CE_state->scn;
1820 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1821 
1822 	CE_state->watermark_cb = fn_ptr;
1823 	CE_state->wm_context = CE_wm_context;
1824 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1825 							0);
1826 	if (fn_ptr)
1827 		CE_state->misc_cbs = 1;
1828 }
1829 
1830 bool ce_get_rx_pending(struct hif_softc *scn)
1831 {
1832 	int CE_id;
1833 
1834 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1835 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1836 
1837 		if (qdf_atomic_read(&CE_state->rx_pending))
1838 			return true;
1839 	}
1840 
1841 	return false;
1842 }
1843 
1844 /**
1845  * ce_check_rx_pending() - ce_check_rx_pending
1846  * @CE_state: context of the copy engine to check
1847  *
1848  * Return: true if there per_engine_service
1849  *	didn't process all the rx descriptors.
1850  */
1851 bool ce_check_rx_pending(struct CE_state *CE_state)
1852 {
1853 	if (qdf_atomic_read(&CE_state->rx_pending))
1854 		return true;
1855 	else
1856 		return false;
1857 }
1858 qdf_export_symbol(ce_check_rx_pending);
1859 
1860 #ifdef IPA_OFFLOAD
1861 /**
1862  * ce_ipa_get_resource() - get uc resource on copyengine
1863  * @ce: copyengine context
1864  * @ce_sr: copyengine source ring resource info
1865  * @ce_sr_ring_size: copyengine source ring size
1866  * @ce_reg_paddr: copyengine register physical address
1867  *
1868  * Copy engine should release resource to micro controller
1869  * Micro controller needs
1870  *  - Copy engine source descriptor base address
1871  *  - Copy engine source descriptor size
1872  *  - PCI BAR address to access copy engine regiser
1873  *
1874  * Return: None
1875  */
1876 void ce_ipa_get_resource(struct CE_handle *ce,
1877 			 qdf_shared_mem_t **ce_sr,
1878 			 uint32_t *ce_sr_ring_size,
1879 			 qdf_dma_addr_t *ce_reg_paddr)
1880 {
1881 	struct CE_state *CE_state = (struct CE_state *)ce;
1882 	uint32_t ring_loop;
1883 	struct CE_src_desc *ce_desc;
1884 	qdf_dma_addr_t phy_mem_base;
1885 	struct hif_softc *scn = CE_state->scn;
1886 
1887 	if (CE_UNUSED == CE_state->state) {
1888 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1889 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1890 		*ce_sr_ring_size = 0;
1891 		return;
1892 	}
1893 
1894 	/* Update default value for descriptor */
1895 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1896 	     ring_loop++) {
1897 		ce_desc = (struct CE_src_desc *)
1898 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1899 			   ring_loop * (sizeof(struct CE_src_desc)));
1900 		CE_IPA_RING_INIT(ce_desc);
1901 	}
1902 
1903 	/* Get BAR address */
1904 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1905 
1906 	*ce_sr = CE_state->scn->ipa_ce_ring;
1907 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1908 		sizeof(struct CE_src_desc));
1909 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1910 			SR_WR_INDEX_ADDRESS;
1911 }
1912 #endif /* IPA_OFFLOAD */
1913 
1914 #if HIF_CE_DEBUG_DATA_BUF
1915 /**
1916  * hif_dump_desc_data_buf() - record ce descriptor events
1917  * @buf: buffer to copy to
1918  * @pos: Current position till which the buf is filled
1919  * @data: Data to be copied
1920  * @data_len: Length of the data to be copied
1921  */
1922 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1923 					uint8_t *data, uint32_t data_len)
1924 {
1925 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1926 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1927 
1928 	if ((data_len > 0) && data) {
1929 		if (data_len < 16) {
1930 			hex_dump_to_buffer(data,
1931 						CE_DEBUG_DATA_PER_ROW,
1932 						16, 1, buf + pos,
1933 						(ssize_t)PAGE_SIZE - pos,
1934 						false);
1935 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1936 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1937 		} else {
1938 			uint32_t rows = (data_len / 16) + 1;
1939 			uint32_t row = 0;
1940 
1941 			for (row = 0; row < rows; row++) {
1942 				hex_dump_to_buffer(data + (row * 16),
1943 							CE_DEBUG_DATA_PER_ROW,
1944 							16, 1, buf + pos,
1945 							(ssize_t)PAGE_SIZE
1946 							- pos, false);
1947 				pos +=
1948 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1949 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1950 						"\n");
1951 			}
1952 		}
1953 	}
1954 
1955 	return pos;
1956 }
1957 #endif
1958 
1959 /*
1960  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1961  * for defined here
1962  */
1963 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1964 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1965 {
1966 	switch (type) {
1967 	case HIF_RX_DESC_POST:
1968 		return "HIF_RX_DESC_POST";
1969 	case HIF_RX_DESC_COMPLETION:
1970 		return "HIF_RX_DESC_COMPLETION";
1971 	case HIF_TX_GATHER_DESC_POST:
1972 		return "HIF_TX_GATHER_DESC_POST";
1973 	case HIF_TX_DESC_POST:
1974 		return "HIF_TX_DESC_POST";
1975 	case HIF_TX_DESC_SOFTWARE_POST:
1976 		return "HIF_TX_DESC_SOFTWARE_POST";
1977 	case HIF_TX_DESC_COMPLETION:
1978 		return "HIF_TX_DESC_COMPLETION";
1979 	case FAST_RX_WRITE_INDEX_UPDATE:
1980 		return "FAST_RX_WRITE_INDEX_UPDATE";
1981 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1982 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1983 	case FAST_TX_WRITE_INDEX_UPDATE:
1984 		return "FAST_TX_WRITE_INDEX_UPDATE";
1985 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1986 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1987 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1988 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1989 	case RESUME_WRITE_INDEX_UPDATE:
1990 		return "RESUME_WRITE_INDEX_UPDATE";
1991 	case HIF_IRQ_EVENT:
1992 		return "HIF_IRQ_EVENT";
1993 	case HIF_CE_TASKLET_ENTRY:
1994 		return "HIF_CE_TASKLET_ENTRY";
1995 	case HIF_CE_TASKLET_RESCHEDULE:
1996 		return "HIF_CE_TASKLET_RESCHEDULE";
1997 	case HIF_CE_TASKLET_EXIT:
1998 		return "HIF_CE_TASKLET_EXIT";
1999 	case HIF_CE_REAP_ENTRY:
2000 		return "HIF_CE_REAP_ENTRY";
2001 	case HIF_CE_REAP_EXIT:
2002 		return "HIF_CE_REAP_EXIT";
2003 	case NAPI_SCHEDULE:
2004 		return "NAPI_SCHEDULE";
2005 	case NAPI_POLL_ENTER:
2006 		return "NAPI_POLL_ENTER";
2007 	case NAPI_COMPLETE:
2008 		return "NAPI_COMPLETE";
2009 	case NAPI_POLL_EXIT:
2010 		return "NAPI_POLL_EXIT";
2011 	case HIF_RX_NBUF_ALLOC_FAILURE:
2012 		return "HIF_RX_NBUF_ALLOC_FAILURE";
2013 	case HIF_RX_NBUF_MAP_FAILURE:
2014 		return "HIF_RX_NBUF_MAP_FAILURE";
2015 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
2016 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
2017 	default:
2018 		return "invalid";
2019 	}
2020 }
2021 
2022 /**
2023  * hif_dump_desc_event() - record ce descriptor events
2024  * @buf: Buffer to which to be copied
2025  * @ce_id: which ce is the event occurring on
2026  * @index: index that the descriptor was/will be at.
2027  */
2028 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
2029 {
2030 	struct hif_ce_desc_event *event;
2031 	uint64_t secs, usecs;
2032 	ssize_t len = 0;
2033 	struct ce_desc_hist *ce_hist = NULL;
2034 	struct hif_ce_desc_event *hist_ev = NULL;
2035 
2036 	if (!scn)
2037 		return -EINVAL;
2038 
2039 	ce_hist = &scn->hif_ce_desc_hist;
2040 
2041 	hist_ev =
2042 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
2043 
2044 	if (!hist_ev) {
2045 		qdf_print("Low Memory");
2046 		return -EINVAL;
2047 	}
2048 
2049 	event = &hist_ev[ce_hist->hist_index];
2050 
2051 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2052 		(ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2053 		qdf_print("Invalid values");
2054 		return -EINVAL;
2055 	}
2056 
2057 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
2058 
2059 	len += snprintf(buf, PAGE_SIZE - len,
2060 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
2061 			secs, usecs, ce_hist->hist_id,
2062 			ce_event_type_to_str(event->type),
2063 			event->index, event->memory);
2064 #if HIF_CE_DEBUG_DATA_BUF
2065 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
2066 			event->actual_data_len);
2067 #endif
2068 
2069 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
2070 
2071 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
2072 				16, 1, buf + len,
2073 				(ssize_t)PAGE_SIZE - len, false);
2074 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
2075 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
2076 
2077 #if HIF_CE_DEBUG_DATA_BUF
2078 	if (ce_hist->data_enable[ce_hist->hist_id])
2079 		len = hif_dump_desc_data_buf(buf, len, event->data,
2080 						(event->actual_data_len <
2081 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
2082 						event->actual_data_len :
2083 						CE_DEBUG_MAX_DATA_BUF_SIZE);
2084 #endif /*HIF_CE_DEBUG_DATA_BUF*/
2085 
2086 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
2087 
2088 	return len;
2089 }
2090 
2091 /*
2092  * hif_store_desc_trace_buf_index() -
2093  * API to get the CE id and CE debug storage buffer index
2094  *
2095  * @dev: network device
2096  * @attr: sysfs attribute
2097  * @buf: data got from the user
2098  *
2099  * Return total length
2100  */
2101 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2102 					const char *buf, size_t size)
2103 {
2104 	struct ce_desc_hist *ce_hist = NULL;
2105 
2106 	if (!scn)
2107 		return -EINVAL;
2108 
2109 	ce_hist = &scn->hif_ce_desc_hist;
2110 
2111 	if (!size) {
2112 		pr_err("%s: Invalid input buffer.\n", __func__);
2113 		return -EINVAL;
2114 	}
2115 
2116 	if (sscanf(buf, "%d %d", &ce_hist->hist_id,
2117 			&ce_hist->hist_index) != 2) {
2118 		pr_err("%s: Invalid input value.\n", __func__);
2119 		return -EINVAL;
2120 	}
2121 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2122 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2123 		qdf_print("Invalid values");
2124 		return -EINVAL;
2125 	}
2126 
2127 	return size;
2128 }
2129 
2130 #endif  /*For MCL,  HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
2131 
2132 #if HIF_CE_DEBUG_DATA_BUF
2133 /*
2134  * hif_ce_en_desc_hist() -
2135  * API to enable recording the CE desc history
2136  *
2137  * @dev: network device
2138  * @attr: sysfs attribute
2139  * @buf: buffer to copy the data.
2140  *
2141  * Starts recording the ce desc history
2142  *
2143  * Return total length copied
2144  */
2145 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
2146 {
2147 	struct ce_desc_hist *ce_hist = NULL;
2148 	uint32_t cfg = 0;
2149 	uint32_t ce_id = 0;
2150 
2151 	if (!scn)
2152 		return -EINVAL;
2153 
2154 	ce_hist = &scn->hif_ce_desc_hist;
2155 
2156 	if (!size) {
2157 		pr_err("%s: Invalid input buffer.\n", __func__);
2158 		return -EINVAL;
2159 	}
2160 
2161 	if (sscanf(buf, "%d %d", &ce_id, &cfg) != 2) {
2162 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
2163 		return -EINVAL;
2164 	}
2165 	if (ce_id >= CE_COUNT_MAX) {
2166 		qdf_print("Invalid value CE Id");
2167 		return -EINVAL;
2168 	}
2169 
2170 	if ((cfg > 1 || cfg < 0)) {
2171 		qdf_print("Invalid values: enter 0 or 1");
2172 		return -EINVAL;
2173 	}
2174 
2175 	if (!ce_hist->hist_ev[ce_id])
2176 		return -EINVAL;
2177 
2178 	qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]);
2179 	if (cfg == 1) {
2180 		if (ce_hist->data_enable[ce_id] == 1) {
2181 			qdf_print("\nAlready Enabled");
2182 		} else {
2183 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
2184 							== QDF_STATUS_E_NOMEM){
2185 				ce_hist->data_enable[ce_id] = 0;
2186 				qdf_print("%s:Memory Alloc failed");
2187 			} else
2188 				ce_hist->data_enable[ce_id] = 1;
2189 		}
2190 	} else if (cfg == 0) {
2191 		if (ce_hist->data_enable[ce_id] == 0) {
2192 			qdf_print("\nAlready Disabled");
2193 		} else {
2194 			ce_hist->data_enable[ce_id] = 0;
2195 				free_mem_ce_debug_hist_data(scn, ce_id);
2196 		}
2197 	}
2198 	qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]);
2199 
2200 	return size;
2201 }
2202 
2203 /*
2204  * hif_disp_ce_enable_desc_data_hist() -
2205  * API to display value of data_enable
2206  *
2207  * @dev: network device
2208  * @attr: sysfs attribute
2209  * @buf: buffer to copy the data.
2210  *
2211  * Return total length copied
2212  */
2213 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
2214 {
2215 	ssize_t len = 0;
2216 	uint32_t ce_id = 0;
2217 	struct ce_desc_hist *ce_hist = NULL;
2218 
2219 	if (!scn)
2220 		return -EINVAL;
2221 
2222 	ce_hist = &scn->hif_ce_desc_hist;
2223 
2224 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
2225 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
2226 				ce_id, ce_hist->data_enable[ce_id]);
2227 	}
2228 
2229 	return len;
2230 }
2231 #endif /* HIF_CE_DEBUG_DATA_BUF */
2232 
2233 #ifdef OL_ATH_SMART_LOGGING
2234 #define GUARD_SPACE 10
2235 #define LOG_ID_SZ 4
2236 /*
2237  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
2238  * @src_ring: SRC ring state
2239  * @buf_cur: Current pointer in ring buffer
2240  * @buf_init:Start of the ring buffer
2241  * @buf_sz: Size of the ring buffer
2242  * @skb_sz: Max size of the SKB buffer to be copied
2243  *
2244  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2245  * the given buf, skb_sz is the max buffer size to be copied
2246  *
2247  * Return: Current pointer in ring buffer
2248  */
2249 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
2250 				    uint8_t *buf_cur, uint8_t *buf_init,
2251 				    uint32_t buf_sz, uint32_t skb_sz)
2252 {
2253 	struct CE_src_desc *src_ring_base;
2254 	uint32_t len, entry;
2255 	struct CE_src_desc  *src_desc;
2256 	qdf_nbuf_t nbuf;
2257 	uint32_t available_buf;
2258 
2259 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
2260 	len = sizeof(struct CE_ring_state);
2261 	available_buf = buf_sz - (buf_cur - buf_init);
2262 	if (available_buf < (len + GUARD_SPACE)) {
2263 		buf_cur = buf_init;
2264 	}
2265 
2266 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
2267 	buf_cur += sizeof(struct CE_ring_state);
2268 
2269 	for (entry = 0; entry < src_ring->nentries; entry++) {
2270 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
2271 		nbuf = src_ring->per_transfer_context[entry];
2272 		if (nbuf) {
2273 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2274 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2275 
2276 			len = sizeof(struct CE_src_desc) + skb_cp_len
2277 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2278 			available_buf = buf_sz - (buf_cur - buf_init);
2279 			if (available_buf < (len + GUARD_SPACE)) {
2280 				buf_cur = buf_init;
2281 			}
2282 			qdf_mem_copy(buf_cur, src_desc,
2283 				     sizeof(struct CE_src_desc));
2284 			buf_cur += sizeof(struct CE_src_desc);
2285 
2286 			available_buf = buf_sz - (buf_cur - buf_init);
2287 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2288 						skb_cp_len);
2289 
2290 			if (skb_cp_len) {
2291 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2292 					     skb_cp_len);
2293 				buf_cur += skb_cp_len;
2294 			}
2295 		} else {
2296 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
2297 			available_buf = buf_sz - (buf_cur - buf_init);
2298 			if (available_buf < (len + GUARD_SPACE)) {
2299 				buf_cur = buf_init;
2300 			}
2301 			qdf_mem_copy(buf_cur, src_desc,
2302 				     sizeof(struct CE_src_desc));
2303 			buf_cur += sizeof(struct CE_src_desc);
2304 			available_buf = buf_sz - (buf_cur - buf_init);
2305 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2306 		}
2307 	}
2308 
2309 	return buf_cur;
2310 }
2311 
2312 /*
2313  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
2314  * @dest_ring: SRC ring state
2315  * @buf_cur: Current pointer in ring buffer
2316  * @buf_init:Start of the ring buffer
2317  * @buf_sz: Size of the ring buffer
2318  * @skb_sz: Max size of the SKB buffer to be copied
2319  *
2320  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2321  * the given buf, skb_sz is the max buffer size to be copied
2322  *
2323  * Return: Current pointer in ring buffer
2324  */
2325 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
2326 				     uint8_t *buf_cur, uint8_t *buf_init,
2327 				     uint32_t buf_sz, uint32_t skb_sz)
2328 {
2329 	struct CE_dest_desc *dest_ring_base;
2330 	uint32_t len, entry;
2331 	struct CE_dest_desc  *dest_desc;
2332 	qdf_nbuf_t nbuf;
2333 	uint32_t available_buf;
2334 
2335 	dest_ring_base =
2336 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
2337 
2338 	len = sizeof(struct CE_ring_state);
2339 	available_buf = buf_sz - (buf_cur - buf_init);
2340 	if (available_buf < (len + GUARD_SPACE)) {
2341 		buf_cur = buf_init;
2342 	}
2343 
2344 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
2345 	buf_cur += sizeof(struct CE_ring_state);
2346 
2347 	for (entry = 0; entry < dest_ring->nentries; entry++) {
2348 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
2349 
2350 		nbuf = dest_ring->per_transfer_context[entry];
2351 		if (nbuf) {
2352 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2353 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2354 
2355 			len = sizeof(struct CE_dest_desc) + skb_cp_len
2356 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2357 
2358 			available_buf = buf_sz - (buf_cur - buf_init);
2359 			if (available_buf < (len + GUARD_SPACE)) {
2360 				buf_cur = buf_init;
2361 			}
2362 
2363 			qdf_mem_copy(buf_cur, dest_desc,
2364 				     sizeof(struct CE_dest_desc));
2365 			buf_cur += sizeof(struct CE_dest_desc);
2366 			available_buf = buf_sz - (buf_cur - buf_init);
2367 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2368 						skb_cp_len);
2369 			if (skb_cp_len) {
2370 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2371 					     skb_cp_len);
2372 				buf_cur += skb_cp_len;
2373 			}
2374 		} else {
2375 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
2376 			available_buf = buf_sz - (buf_cur - buf_init);
2377 			if (available_buf < (len + GUARD_SPACE)) {
2378 				buf_cur = buf_init;
2379 			}
2380 			qdf_mem_copy(buf_cur, dest_desc,
2381 				     sizeof(struct CE_dest_desc));
2382 			buf_cur += sizeof(struct CE_dest_desc);
2383 			available_buf = buf_sz - (buf_cur - buf_init);
2384 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2385 		}
2386 	}
2387 	return buf_cur;
2388 }
2389 
2390 /**
2391  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
2392  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2393  * and buffers pointed by them in to the given buf
2394  */
2395 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2396 			 uint8_t *buf_init, uint32_t buf_sz,
2397 			 uint32_t ce, uint32_t skb_sz)
2398 {
2399 	struct CE_state *ce_state;
2400 	struct CE_ring_state *src_ring;
2401 	struct CE_ring_state *dest_ring;
2402 
2403 	ce_state = scn->ce_id_to_state[ce];
2404 	src_ring = ce_state->src_ring;
2405 	dest_ring = ce_state->dest_ring;
2406 
2407 	if (src_ring) {
2408 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
2409 					      buf_init, buf_sz, skb_sz);
2410 	} else if (dest_ring) {
2411 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
2412 					       buf_init, buf_sz, skb_sz);
2413 	}
2414 
2415 	return buf_cur;
2416 }
2417 #endif /* OL_ATH_SMART_LOGGING */
2418 
2419