xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 
45 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
46 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
47 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
48 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
49 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
50 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
53 wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
54 
55 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
56 	.pktlog_init = pktlog_init,
57 	.pktlog_enable = pktlog_enable,
58 	.pktlog_setsize = pktlog_setsize,
59 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
60 };
61 
62 struct pktlog_dev_t pl_dev = {
63 	.pl_funcs = &ol_pl_funcs,
64 };
65 
66 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
67 		     struct hif_opaque_softc *scn)
68 {
69 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
70 	*pl_handle = &pl_dev;
71 }
72 
73 void pktlog_set_callback_regtype(
74 		enum pktlog_callback_regtype callback_type)
75 {
76 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
77 
78 	if (!pl_dev) {
79 		qdf_print("Invalid pl_dev");
80 		return;
81 	}
82 
83 	pl_dev->callback_type = callback_type;
84 }
85 
86 #ifdef CONFIG_MCL
87 struct pktlog_dev_t *get_pktlog_handle(void)
88 {
89 	struct cdp_pdev *pdev_txrx_handle =
90 				cds_get_context(QDF_MODULE_ID_TXRX);
91 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
92 
93 	return cdp_get_pldev(soc, pdev_txrx_handle);
94 }
95 
96 /*
97  * Get current txrx context
98  */
99 void *get_txrx_context(void)
100 {
101 	return cds_get_context(QDF_MODULE_ID_TXRX);
102 }
103 
104 #else
105 /* TODO: Need to use WIN implementation to return pktlog_dev handle */
106 static inline struct pktlog_dev_t *get_pktlog_handle(void)
107 {
108 	return NULL;
109 }
110 static struct pktlog_dev_t *get_txrx_context(void) { }
111 #endif
112 
113 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
114 				    WMI_CMD_ID cmd_id, bool ini_triggered,
115 				    uint8_t user_triggered)
116 {
117 	struct scheduler_msg msg = { 0 };
118 	QDF_STATUS status;
119 	struct ath_pktlog_wmi_params *param;
120 
121 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
122 
123 	if (!param)
124 		return A_NO_MEMORY;
125 
126 	param->cmd_id = cmd_id;
127 	param->pktlog_event = event_types;
128 	param->ini_triggered = ini_triggered;
129 	param->user_triggered = user_triggered;
130 
131 	msg.type = WMA_PKTLOG_ENABLE_REQ;
132 	msg.bodyptr = param;
133 	msg.bodyval = 0;
134 
135 	status = scheduler_post_message(QDF_MODULE_ID_WMA,
136 					QDF_MODULE_ID_WMA,
137 					QDF_MODULE_ID_WMA, &msg);
138 
139 	if (status != QDF_STATUS_SUCCESS) {
140 		qdf_mem_free(param);
141 		return A_ERROR;
142 	}
143 
144 	return A_OK;
145 }
146 
147 static inline A_STATUS
148 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
149 		 bool ini_triggered, uint8_t user_triggered)
150 {
151 	uint32_t types = 0;
152 
153 	if (log_state & ATH_PKTLOG_TX)
154 		types |= WMI_PKTLOG_EVENT_TX;
155 
156 	if (log_state & ATH_PKTLOG_RX)
157 		types |= WMI_PKTLOG_EVENT_RX;
158 
159 	if (log_state & ATH_PKTLOG_RCFIND)
160 		types |= WMI_PKTLOG_EVENT_RCF;
161 
162 	if (log_state & ATH_PKTLOG_RCUPDATE)
163 		types |= WMI_PKTLOG_EVENT_RCU;
164 
165 	if (log_state & ATH_PKTLOG_SW_EVENT)
166 		types |= WMI_PKTLOG_EVENT_SW;
167 
168 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
169 		  "%s: Pktlog events: %d", __func__, types);
170 
171 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
172 				   ini_triggered, user_triggered);
173 }
174 
175 #ifdef HELIUMPLUS
176 /**
177  * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
178  * @cdp_pdev: abstract pdev handle
179  * @log_state: Pktlog registration
180  *
181  * Return: zero on success, non-zero on failure
182  */
183 static inline A_STATUS
184 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
185 {
186 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
187 
188 	if (!cdp_pdev) {
189 		qdf_print("Invalid pdev in %s", __func__);
190 		return A_ERROR;
191 	}
192 
193 	if (log_state & ATH_PKTLOG_TX) {
194 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
195 				WDI_EVENT_TX_STATUS)) {
196 			return A_ERROR;
197 		}
198 	}
199 	if (log_state & ATH_PKTLOG_RX) {
200 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
201 				WDI_EVENT_RX_DESC)) {
202 			return A_ERROR;
203 		}
204 		if (cdp_wdi_event_sub(soc, cdp_pdev,
205 				&PKTLOG_RX_REMOTE_SUBSCRIBER,
206 				WDI_EVENT_RX_DESC_REMOTE)) {
207 			return A_ERROR;
208 		}
209 	}
210 	if (log_state & ATH_PKTLOG_RCFIND) {
211 		if (cdp_wdi_event_sub(soc, cdp_pdev,
212 				  &PKTLOG_RCFIND_SUBSCRIBER,
213 				  WDI_EVENT_RATE_FIND)) {
214 			return A_ERROR;
215 		}
216 	}
217 	if (log_state & ATH_PKTLOG_RCUPDATE) {
218 		if (cdp_wdi_event_sub(soc, cdp_pdev,
219 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
220 				  WDI_EVENT_RATE_UPDATE)) {
221 			return A_ERROR;
222 		}
223 	}
224 	if (log_state & ATH_PKTLOG_SW_EVENT) {
225 		if (cdp_wdi_event_sub(soc, cdp_pdev,
226 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
227 				  WDI_EVENT_SW_EVENT)) {
228 			return A_ERROR;
229 		}
230 	}
231 
232 	return A_OK;
233 }
234 #else
235 static inline A_STATUS
236 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
237 {
238 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
239 
240 	if (!cdp_pdev) {
241 		qdf_print("Invalid pdev in %s", __func__);
242 		return A_ERROR;
243 	}
244 
245 	if ((log_state & ATH_PKTLOG_TX) ||
246 	    (log_state  & ATH_PKTLOG_RCFIND) ||
247 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
248 	    (log_state & ATH_PKTLOG_RX)) {
249 		if (cdp_wdi_event_sub(soc,
250 				      cdp_pdev,
251 				      &PKTLOG_OFFLOAD_SUBSCRIBER,
252 				      WDI_EVENT_OFFLOAD_ALL)) {
253 			return A_ERROR;
254 		}
255 	}
256 
257 	if (log_state & ATH_PKTLOG_RX) {
258 		if (cdp_wdi_event_sub(soc, cdp_pdev,
259 				      &PKTLOG_RX_SUBSCRIBER,
260 				      WDI_EVENT_RX_DESC)) {
261 			return A_ERROR;
262 		}
263 	}
264 
265 	if (log_state & ATH_PKTLOG_SW_EVENT) {
266 		if (cdp_wdi_event_sub(soc, cdp_pdev,
267 				      &PKTLOG_SW_EVENT_SUBSCRIBER,
268 				      WDI_EVENT_SW_EVENT)) {
269 			return A_ERROR;
270 		}
271 	}
272 
273 	if (log_state & ATH_PKTLOG_LITE_T2H) {
274 		if (cdp_wdi_event_sub(soc, cdp_pdev,
275 				      &PKTLOG_LITE_T2H_SUBSCRIBER,
276 				      WDI_EVENT_LITE_T2H)) {
277 			return A_ERROR;
278 		}
279 	}
280 
281 	if (log_state & ATH_PKTLOG_LITE_RX) {
282 		if (cdp_wdi_event_sub(soc, cdp_pdev,
283 				      &PKTLOG_LITE_RX_SUBSCRIBER,
284 				      WDI_EVENT_LITE_RX)) {
285 			return A_ERROR;
286 		}
287 	}
288 
289 	return A_OK;
290 }
291 #endif
292 
293 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
294 		u_int16_t peer_id, uint32_t status)
295 {
296 	switch (event) {
297 	case WDI_EVENT_OFFLOAD_ALL:
298 	{
299 		if (process_offload_pktlog(pdev, log_data)) {
300 			qdf_print("Unable to process offload info");
301 			return;
302 		}
303 		break;
304 	}
305 	case WDI_EVENT_TX_STATUS:
306 	{
307 		/*
308 		 * process TX message
309 		 */
310 		if (process_tx_info(pdev, log_data)) {
311 			qdf_print("Unable to process TX info");
312 			return;
313 		}
314 		break;
315 	}
316 	case WDI_EVENT_RX_DESC:
317 	{
318 		/*
319 		 * process RX message for local frames
320 		 */
321 		if (process_rx_info(pdev, log_data)) {
322 			qdf_print("Unable to process RX info");
323 			return;
324 		}
325 		break;
326 	}
327 	case WDI_EVENT_RX_DESC_REMOTE:
328 	{
329 		/*
330 		 * process RX message for remote frames
331 		 */
332 		if (process_rx_info_remote(pdev, log_data)) {
333 			qdf_print("Unable to process RX info");
334 			return;
335 		}
336 		break;
337 	}
338 	case WDI_EVENT_RATE_FIND:
339 	{
340 		/*
341 		 * process RATE_FIND message
342 		 */
343 		if (process_rate_find(pdev, log_data)) {
344 			qdf_print("Unable to process RC_FIND info");
345 			return;
346 		}
347 		break;
348 	}
349 	case WDI_EVENT_RATE_UPDATE:
350 	{
351 		/*
352 		 * process RATE_UPDATE message
353 		 */
354 		if (process_rate_update(pdev, log_data)) {
355 			qdf_print("Unable to process RC_UPDATE");
356 			return;
357 		}
358 		break;
359 	}
360 	case WDI_EVENT_SW_EVENT:
361 	{
362 		/*
363 		 * process SW EVENT message
364 		 */
365 		if (process_sw_event(pdev, log_data)) {
366 			qdf_print("Unable to process SW_EVENT");
367 			return;
368 		}
369 		break;
370 	}
371 	default:
372 		break;
373 	}
374 }
375 
376 void
377 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
378 			u_int16_t peer_id, uint32_t status)
379 {
380 	switch (event) {
381 	case WDI_EVENT_RX_DESC:
382 	{
383 		if (process_rx_desc_remote(context, log_data)) {
384 			qdf_print("Unable to process RX info");
385 			return;
386 		}
387 		break;
388 	}
389 	case WDI_EVENT_LITE_T2H:
390 	{
391 		if (process_pktlog_lite(context, log_data,
392 					PKTLOG_TYPE_LITE_T2H)) {
393 			qdf_print("Unable to process lite_t2h");
394 			return;
395 		}
396 		break;
397 	}
398 	case WDI_EVENT_LITE_RX:
399 	{
400 		if (process_pktlog_lite(context, log_data,
401 					PKTLOG_TYPE_LITE_RX)) {
402 			qdf_print("Unable to process lite_rx");
403 			return;
404 		}
405 		break;
406 	}
407 	default:
408 		break;
409 	}
410 }
411 
412 #ifdef HELIUMPLUS
413 /**
414  * wdi_pktlog_unsubscribe() - Unsubscribe pktlog callbacks
415  * @cdp_pdev: abstract pdev handle
416  * @log_state: Pktlog registration
417  *
418  * Return: zero on success, non-zero on failure
419  */
420 A_STATUS
421 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
422 {
423 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
424 	/* TODO: WIN implementation to get soc */
425 
426 	if (log_state & ATH_PKTLOG_TX) {
427 		if (cdp_wdi_event_unsub(soc, pdev,
428 				    &PKTLOG_TX_SUBSCRIBER,
429 				    WDI_EVENT_TX_STATUS)) {
430 			return A_ERROR;
431 		}
432 	}
433 	if (log_state & ATH_PKTLOG_RX) {
434 		if (cdp_wdi_event_unsub(soc, pdev,
435 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
436 			return A_ERROR;
437 		}
438 		if (cdp_wdi_event_unsub(soc, pdev,
439 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
440 				    WDI_EVENT_RX_DESC_REMOTE)) {
441 			return A_ERROR;
442 		}
443 	}
444 
445 	if (log_state & ATH_PKTLOG_RCFIND) {
446 		if (cdp_wdi_event_unsub(soc, pdev,
447 				    &PKTLOG_RCFIND_SUBSCRIBER,
448 				    WDI_EVENT_RATE_FIND)) {
449 			return A_ERROR;
450 		}
451 	}
452 	if (log_state & ATH_PKTLOG_RCUPDATE) {
453 		if (cdp_wdi_event_unsub(soc, pdev,
454 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
455 				    WDI_EVENT_RATE_UPDATE)) {
456 			return A_ERROR;
457 		}
458 	}
459 	if (log_state & ATH_PKTLOG_RCUPDATE) {
460 		if (cdp_wdi_event_unsub(soc, pdev,
461 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
462 				    WDI_EVENT_SW_EVENT)) {
463 			return A_ERROR;
464 		}
465 	}
466 
467 	return A_OK;
468 }
469 #else
470 A_STATUS
471 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
472 {
473 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
474 
475 	if ((log_state & ATH_PKTLOG_TX) ||
476 	    (log_state  & ATH_PKTLOG_RCFIND) ||
477 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
478 	    (log_state & ATH_PKTLOG_RX)) {
479 		if (cdp_wdi_event_unsub(soc,
480 					pdev,
481 					&PKTLOG_OFFLOAD_SUBSCRIBER,
482 					WDI_EVENT_OFFLOAD_ALL)) {
483 			return A_ERROR;
484 		}
485 	}
486 	if (log_state & ATH_PKTLOG_RX) {
487 		if (cdp_wdi_event_unsub(soc, pdev,
488 					&PKTLOG_RX_SUBSCRIBER,
489 					WDI_EVENT_RX_DESC)) {
490 			return A_ERROR;
491 		}
492 	}
493 	if (log_state & ATH_PKTLOG_LITE_T2H) {
494 		if (cdp_wdi_event_unsub(soc, pdev,
495 					&PKTLOG_LITE_T2H_SUBSCRIBER,
496 					WDI_EVENT_LITE_T2H)) {
497 			return A_ERROR;
498 		}
499 	}
500 	if (log_state & ATH_PKTLOG_LITE_RX) {
501 		if (cdp_wdi_event_unsub(soc, pdev,
502 					&PKTLOG_LITE_RX_SUBSCRIBER,
503 					WDI_EVENT_LITE_RX)) {
504 			return A_ERROR;
505 		}
506 	}
507 
508 	return A_OK;
509 }
510 #endif
511 
512 int pktlog_disable(struct hif_opaque_softc *scn)
513 {
514 	struct pktlog_dev_t *pl_dev;
515 	struct ath_pktlog_info *pl_info;
516 	uint8_t save_pktlog_state;
517 	struct cdp_pdev *txrx_pdev = get_txrx_context();
518 
519 	pl_dev = get_pktlog_handle();
520 
521 	if (!pl_dev) {
522 		qdf_print("Invalid pl_dev");
523 		return -EINVAL;
524 	}
525 
526 	pl_info = pl_dev->pl_info;
527 
528 	if (!pl_dev->pl_info) {
529 		qdf_print("Invalid pl_info");
530 		return -EINVAL;
531 	}
532 
533 	if (!txrx_pdev) {
534 		qdf_print("Invalid cdp_pdev");
535 		return -EINVAL;
536 	}
537 
538 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
539 	    pl_info->curr_pkt_state ==
540 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
541 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
542 	    pl_info->curr_pkt_state ==
543 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
544 		return -EBUSY;
545 
546 	save_pktlog_state = pl_info->curr_pkt_state;
547 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
548 
549 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
550 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
551 		qdf_print("Failed to disable pktlog in target");
552 		return -EINVAL;
553 	}
554 
555 	if (pl_dev->is_pktlog_cb_subscribed &&
556 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
557 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
558 		qdf_print("Cannot unsubscribe pktlog from the WDI");
559 		return -EINVAL;
560 	}
561 	pl_dev->is_pktlog_cb_subscribed = false;
562 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
563 		pl_info->curr_pkt_state =
564 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
565 	else
566 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
567 	return 0;
568 }
569 
570 void pktlog_init(struct hif_opaque_softc *scn)
571 {
572 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
573 	struct ath_pktlog_info *pl_info;
574 
575 	if (pl_dev == NULL || pl_dev->pl_info == NULL) {
576 		qdf_print("pl_dev or pl_info is invalid");
577 		return;
578 	}
579 
580 	pl_info = pl_dev->pl_info;
581 
582 	OS_MEMZERO(pl_info, sizeof(*pl_info));
583 	PKTLOG_LOCK_INIT(pl_info);
584 	mutex_init(&pl_info->pktlog_mutex);
585 
586 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
587 	pl_info->buf = NULL;
588 	pl_info->log_state = 0;
589 	pl_info->init_saved_state = 0;
590 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
591 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
592 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
593 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
594 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
595 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
596 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
597 	pl_info->pktlen = 0;
598 	pl_info->start_time_thruput = 0;
599 	pl_info->start_time_per = 0;
600 	pl_dev->vendor_cmd_send = false;
601 
602 	if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
603 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
604 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
605 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
606 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
607 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
608 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
609 	} else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
610 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
611 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
612 		PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
613 	}
614 }
615 
616 static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
617 		 bool ini_triggered, uint8_t user_triggered,
618 		 uint32_t is_iwpriv_command)
619 {
620 	struct pktlog_dev_t *pl_dev;
621 	struct ath_pktlog_info *pl_info;
622 	struct cdp_pdev *cdp_pdev;
623 	int error;
624 
625 	if (!scn) {
626 		qdf_print("%s: Invalid scn context", __func__);
627 		ASSERT(0);
628 		return -EINVAL;
629 	}
630 
631 	pl_dev = get_pktlog_handle();
632 	if (!pl_dev) {
633 		qdf_print("%s: Invalid pktlog context", __func__);
634 		ASSERT(0);
635 		return -EINVAL;
636 	}
637 
638 	cdp_pdev = get_txrx_context();
639 	if (!cdp_pdev) {
640 		qdf_print("%s: Invalid txrx context", __func__);
641 		ASSERT(0);
642 		return -EINVAL;
643 	}
644 
645 	pl_info = pl_dev->pl_info;
646 	if (!pl_info) {
647 		qdf_print("%s: Invalid pl_info context", __func__);
648 		ASSERT(0);
649 		return -EINVAL;
650 	}
651 
652 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
653 		return -EBUSY;
654 
655 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
656 	/* is_iwpriv_command : 0 indicates its a vendor command
657 	 * log_state: 0 indicates pktlog disable command
658 	 * vendor_cmd_send flag; false means no vendor pktlog enable
659 	 * command was sent previously
660 	 */
661 	if (is_iwpriv_command == 0 && log_state == 0 &&
662 	    pl_dev->vendor_cmd_send == false) {
663 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
664 		qdf_print("%s: pktlog operation not in progress", __func__);
665 		return 0;
666 	}
667 
668 	if (!pl_dev->tgt_pktlog_alloced) {
669 		if (pl_info->buf == NULL) {
670 			error = pktlog_alloc_buf(scn);
671 
672 			if (error != 0) {
673 				pl_info->curr_pkt_state =
674 					PKTLOG_OPR_NOT_IN_PROGRESS;
675 				qdf_print("%s: pktlog buff alloc failed",
676 					  __func__);
677 				return -ENOMEM;
678 			}
679 
680 			if (!pl_info->buf) {
681 				pl_info->curr_pkt_state =
682 					PKTLOG_OPR_NOT_IN_PROGRESS;
683 				qdf_print("%s: pktlog buf alloc failed",
684 					  __func__);
685 				ASSERT(0);
686 				return -ENOMEM;
687 			}
688 
689 		}
690 
691 		qdf_spin_lock_bh(&pl_info->log_lock);
692 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
693 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
694 		pl_info->buf->wr_offset = 0;
695 		pl_info->buf->rd_offset = -1;
696 		/* These below variables are used by per packet stats*/
697 		pl_info->buf->bytes_written = 0;
698 		pl_info->buf->msg_index = 1;
699 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
700 		qdf_spin_unlock_bh(&pl_info->log_lock);
701 
702 		pl_info->start_time_thruput = os_get_timestamp();
703 		pl_info->start_time_per = pl_info->start_time_thruput;
704 
705 		pl_dev->tgt_pktlog_alloced = true;
706 	}
707 	if (log_state != 0) {
708 		/* WDI subscribe */
709 		if (!pl_dev->is_pktlog_cb_subscribed) {
710 			error = wdi_pktlog_subscribe(cdp_pdev, log_state);
711 			if (error) {
712 				pl_info->curr_pkt_state =
713 						PKTLOG_OPR_NOT_IN_PROGRESS;
714 				qdf_print("Unable to subscribe to the WDI %s",
715 					  __func__);
716 				return -EINVAL;
717 			}
718 		} else {
719 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
720 			qdf_print("Unable to subscribe %d to the WDI %s",
721 				  log_state, __func__);
722 			return -EINVAL;
723 		}
724 		/* WMI command to enable pktlog on the firmware */
725 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
726 				user_triggered)) {
727 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
728 			qdf_print("Device cannot be enabled, %s", __func__);
729 			return -EINVAL;
730 		}
731 		pl_dev->is_pktlog_cb_subscribed = true;
732 
733 		if (is_iwpriv_command == 0)
734 			pl_dev->vendor_cmd_send = true;
735 	} else {
736 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
737 		pl_dev->pl_funcs->pktlog_disable(scn);
738 		if (is_iwpriv_command == 0)
739 			pl_dev->vendor_cmd_send = false;
740 	}
741 
742 	pl_info->log_state = log_state;
743 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
744 	return 0;
745 }
746 
747 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
748 		 bool ini_triggered, uint8_t user_triggered,
749 		 uint32_t is_iwpriv_command)
750 {
751 	struct pktlog_dev_t *pl_dev;
752 	struct ath_pktlog_info *pl_info;
753 	int err;
754 
755 	pl_dev = get_pktlog_handle();
756 
757 	if (!pl_dev) {
758 		qdf_print("%s: invalid pl_dev handle", __func__);
759 		return -EINVAL;
760 	}
761 
762 	pl_info = pl_dev->pl_info;
763 
764 	if (!pl_info) {
765 		qdf_print("%s: invalid pl_info handle", __func__);
766 		return -EINVAL;
767 	}
768 
769 	mutex_lock(&pl_info->pktlog_mutex);
770 	err = __pktlog_enable(scn, log_state, ini_triggered,
771 				user_triggered, is_iwpriv_command);
772 	mutex_unlock(&pl_info->pktlog_mutex);
773 	return err;
774 }
775 
776 #define ONE_MEGABYTE (1024 * 1024)
777 #define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE)
778 
779 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
780 {
781 	struct pktlog_dev_t *pl_dev;
782 	struct ath_pktlog_info *pl_info;
783 	struct cdp_pdev *pdev;
784 
785 	pl_dev = get_pktlog_handle();
786 
787 	if (!pl_dev) {
788 		qdf_print("%s: invalid pl_dev handle", __func__);
789 		return -EINVAL;
790 	}
791 
792 	pl_info = pl_dev->pl_info;
793 
794 	if (!pl_info) {
795 		qdf_print("%s: invalid pl_dev handle", __func__);
796 		return -EINVAL;
797 	}
798 
799 	pdev = get_txrx_context();
800 
801 	if (!pdev) {
802 		qdf_print("%s: invalid pdev handle", __func__);
803 		return -EINVAL;
804 	}
805 
806 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
807 		qdf_print("%s: pktlog is not configured", __func__);
808 		return -EBUSY;
809 	}
810 
811 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
812 
813 	if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
814 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
815 			  __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
816 			  (MAX_ALLOWED_PKTLOG_SIZE / ONE_MEGABYTE));
817 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
818 		qdf_print("%s: Invalid requested buff size", __func__);
819 		return -EINVAL;
820 	}
821 
822 	if (size == pl_info->buf_size) {
823 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
824 		qdf_print("%s: Pktlog Buff Size is already of same size.",
825 			  __func__);
826 		return 0;
827 	}
828 
829 	if (pl_info->log_state) {
830 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
831 		qdf_print("%s: Logging should be disabled before changing"
832 			  "buffer size.", __func__);
833 		return -EINVAL;
834 	}
835 
836 	qdf_spin_lock_bh(&pl_info->log_lock);
837 	if (pl_info->buf != NULL) {
838 		if (pl_dev->is_pktlog_cb_subscribed &&
839 			wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
840 			pl_info->curr_pkt_state =
841 				PKTLOG_OPR_NOT_IN_PROGRESS;
842 			qdf_spin_unlock_bh(&pl_info->log_lock);
843 			qdf_print("Cannot unsubscribe pktlog from the WDI");
844 			return -EFAULT;
845 		}
846 		pktlog_release_buf(scn);
847 		pl_dev->is_pktlog_cb_subscribed = false;
848 		pl_dev->tgt_pktlog_alloced = false;
849 	}
850 
851 	if (size != 0) {
852 		qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
853 		pl_info->buf_size = size;
854 	}
855 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
856 	qdf_spin_unlock_bh(&pl_info->log_lock);
857 	return 0;
858 }
859 
860 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
861 {
862 	struct pktlog_dev_t *pl_dev;
863 	struct ath_pktlog_info *pl_info;
864 	int status;
865 
866 	pl_dev = get_pktlog_handle();
867 
868 	if (!pl_dev) {
869 		qdf_print("%s: invalid pl_dev handle", __func__);
870 		return -EINVAL;
871 	}
872 
873 	pl_info = pl_dev->pl_info;
874 
875 	if (!pl_info) {
876 		qdf_print("%s: invalid pl_dev handle", __func__);
877 		return -EINVAL;
878 	}
879 
880 	mutex_lock(&pl_info->pktlog_mutex);
881 	status = __pktlog_setsize(scn, size);
882 	mutex_unlock(&pl_info->pktlog_mutex);
883 
884 	return status;
885 }
886 
887 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
888 {
889 	struct pktlog_dev_t *pl_dev;
890 	struct ath_pktlog_info *pl_info;
891 	uint8_t save_pktlog_state;
892 
893 	pl_dev = get_pktlog_handle();
894 
895 	if (!pl_dev) {
896 		qdf_print("%s: invalid pl_dev handle", __func__);
897 		return -EINVAL;
898 	}
899 
900 	pl_info = pl_dev->pl_info;
901 
902 	if (!pl_info) {
903 		qdf_print("%s: invalid pl_dev handle", __func__);
904 		return -EINVAL;
905 	}
906 
907 	if (!clear_buff)
908 		return -EINVAL;
909 
910 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
911 	    pl_info->curr_pkt_state ==
912 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
913 		return -EBUSY;
914 
915 	save_pktlog_state = pl_info->curr_pkt_state;
916 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
917 
918 	if (pl_info->log_state) {
919 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
920 		qdf_print("%s: Logging should be disabled before clearing "
921 			  "pktlog buffer.", __func__);
922 		return -EINVAL;
923 	}
924 
925 	if (pl_info->buf != NULL) {
926 		if (pl_info->buf_size > 0) {
927 			qdf_print("%s: pktlog buffer is cleared.", __func__);
928 			memset(pl_info->buf, 0, pl_info->buf_size);
929 			pl_dev->is_pktlog_cb_subscribed = false;
930 			pl_dev->tgt_pktlog_alloced = false;
931 			pl_info->buf->rd_offset = -1;
932 		} else {
933 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
934 			qdf_print("%s: pktlog buffer size is not proper. "
935 				  "Existing Buf size %d", __func__,
936 				  pl_info->buf_size);
937 			return -EFAULT;
938 		}
939 	} else {
940 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
941 		qdf_print("%s: pktlog buff is NULL", __func__);
942 		return -EFAULT;
943 	}
944 
945 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
946 		pl_info->curr_pkt_state =
947 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
948 	else
949 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
950 
951 	return 0;
952 }
953 
954 /**
955  * pktlog_process_fw_msg() - process packetlog message
956  * @buff: buffer
957  *
958  * Return: None
959  */
960 void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
961 {
962 	uint32_t *pl_hdr;
963 	uint32_t log_type;
964 	struct cdp_pdev *pdev = get_txrx_context();
965 	struct ol_fw_data pl_fw_data;
966 
967 	if (!pdev) {
968 		qdf_print("%s: txrx_pdev is NULL", __func__);
969 		return;
970 	}
971 	pl_hdr = buff;
972 	pl_fw_data.data = pl_hdr;
973 	pl_fw_data.len = len;
974 
975 	log_type =
976 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
977 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
978 
979 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
980 		|| (log_type == PKTLOG_TYPE_TX_STAT)
981 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
982 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
983 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
984 		wdi_event_handler(WDI_EVENT_TX_STATUS,
985 				  pdev, &pl_fw_data);
986 	else if (log_type == PKTLOG_TYPE_RC_FIND)
987 		wdi_event_handler(WDI_EVENT_RATE_FIND,
988 				  pdev, &pl_fw_data);
989 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
990 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
991 				  pdev, &pl_fw_data);
992 	else if (log_type == PKTLOG_TYPE_RX_STAT)
993 		wdi_event_handler(WDI_EVENT_RX_DESC,
994 				  pdev, &pl_fw_data);
995 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
996 		wdi_event_handler(WDI_EVENT_SW_EVENT,
997 				  pdev, &pl_fw_data);
998 }
999 
1000 #if defined(QCA_WIFI_3_0_ADRASTEA)
1001 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
1002 {
1003 	int rc = 0; /* sane */
1004 
1005 	if ((!nbuf) ||
1006 	    (nbuf->data < nbuf->head) ||
1007 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
1008 		rc = -EINVAL;
1009 
1010 	return rc;
1011 }
1012 /**
1013  * pktlog_t2h_msg_handler() - Target to host message handler
1014  * @context: pdev context
1015  * @pkt: HTC packet
1016  *
1017  * Return: None
1018  */
1019 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
1020 {
1021 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
1022 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
1023 	uint32_t *msg_word;
1024 	uint32_t msg_len;
1025 
1026 	/* check for sanity of the packet, have seen corrupted pkts */
1027 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
1028 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
1029 			  __func__, pktlog_t2h_msg);
1030 		/* do not free; may crash! */
1031 		QDF_ASSERT(0);
1032 		return;
1033 	}
1034 
1035 	/* check for successful message reception */
1036 	if (pkt->Status != QDF_STATUS_SUCCESS) {
1037 		if (pkt->Status != QDF_STATUS_E_CANCELED)
1038 			pdev->htc_err_cnt++;
1039 		qdf_nbuf_free(pktlog_t2h_msg);
1040 		return;
1041 	}
1042 
1043 	/* confirm alignment */
1044 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
1045 
1046 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
1047 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
1048 	pktlog_process_fw_msg(msg_word, msg_len);
1049 
1050 	qdf_nbuf_free(pktlog_t2h_msg);
1051 }
1052 
1053 /**
1054  * pktlog_tx_resume_handler() - resume callback
1055  * @context: pdev context
1056  *
1057  * Return: None
1058  */
1059 static void pktlog_tx_resume_handler(void *context)
1060 {
1061 	qdf_print("%s: Not expected", __func__);
1062 	qdf_assert(0);
1063 }
1064 
1065 /**
1066  * pktlog_h2t_send_complete() - send complete indication
1067  * @context: pdev context
1068  * @htc_pkt: HTC packet
1069  *
1070  * Return: None
1071  */
1072 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
1073 {
1074 	qdf_print("%s: Not expected", __func__);
1075 	qdf_assert(0);
1076 }
1077 
1078 /**
1079  * pktlog_h2t_full() - queue full indication
1080  * @context: pdev context
1081  * @pkt: HTC packet
1082  *
1083  * Return: HTC action
1084  */
1085 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
1086 {
1087 	return HTC_SEND_FULL_KEEP;
1088 }
1089 
1090 /**
1091  * pktlog_htc_connect_service() - create new endpoint for packetlog
1092  * @pdev - pktlog pdev
1093  *
1094  * Return: 0 for success/failure
1095  */
1096 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1097 {
1098 	struct htc_service_connect_req connect;
1099 	struct htc_service_connect_resp response;
1100 	QDF_STATUS status;
1101 
1102 	qdf_mem_set(&connect, sizeof(connect), 0);
1103 	qdf_mem_set(&response, sizeof(response), 0);
1104 
1105 	connect.pMetaData = NULL;
1106 	connect.MetaDataLength = 0;
1107 	connect.EpCallbacks.pContext = pdev;
1108 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1109 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1110 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1111 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1112 
1113 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1114 	connect.EpCallbacks.EpRecvRefill = NULL;
1115 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1116 	/* N/A, fill is done by HIF */
1117 
1118 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1119 	/*
1120 	 * Specify how deep to let a queue get before htc_send_pkt will
1121 	 * call the EpSendFull function due to excessive send queue depth.
1122 	 */
1123 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1124 
1125 	/* disable flow control for HTT data message service */
1126 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1127 
1128 	/* connect to control service */
1129 	connect.service_id = PACKET_LOG_SVC;
1130 
1131 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1132 
1133 	if (status != QDF_STATUS_SUCCESS) {
1134 		pdev->mt_pktlog_enabled = false;
1135 		return -EIO;       /* failure */
1136 	}
1137 
1138 	pdev->htc_endpoint = response.Endpoint;
1139 	pdev->mt_pktlog_enabled = true;
1140 
1141 	return 0;               /* success */
1142 }
1143 
1144 /**
1145  * pktlog_htc_attach() - attach pktlog HTC service
1146  *
1147  * Return: 0 for success/failure
1148  */
1149 int pktlog_htc_attach(void)
1150 {
1151 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1152 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1153 
1154 	if ((!pl_pdev) || (!htc_pdev)) {
1155 		qdf_print("Invalid pl_dev or htc_pdev handle");
1156 		return -EINVAL;
1157 	}
1158 
1159 	pl_pdev->htc_pdev = htc_pdev;
1160 	return pktlog_htc_connect_service(pl_pdev);
1161 }
1162 #else
1163 int pktlog_htc_attach(void)
1164 {
1165 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1166 
1167 	if (!pl_dev) {
1168 		qdf_print("Invalid pl_dev handle");
1169 		return -EINVAL;
1170 	}
1171 
1172 	pl_dev->mt_pktlog_enabled = false;
1173 	return 0;
1174 }
1175 #endif
1176 #endif /* REMOVE_PKT_LOG */
1177