xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 #ifdef PKTLOG_LEGACY
45 #include "pktlog_wifi2.h"
46 #else
47 #include "pktlog_wifi3.h"
48 #endif /* PKTLOG_LEGACY */
49 
50 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
53 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
54 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
55 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
56 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
57 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
58 wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
59 
60 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
61 	.pktlog_init = pktlog_init,
62 	.pktlog_enable = pktlog_enable,
63 	.pktlog_setsize = pktlog_setsize,
64 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
65 };
66 
67 struct pktlog_dev_t pl_dev = {
68 	.pl_funcs = &ol_pl_funcs,
69 };
70 
71 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
72 		     struct hif_opaque_softc *scn)
73 {
74 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
75 	*pl_handle = &pl_dev;
76 }
77 
78 void pktlog_set_pdev_id(struct pktlog_dev_t *pl_dev, uint8_t pdev_id)
79 {
80 	pl_dev->pdev_id = pdev_id;
81 }
82 
83 void pktlog_set_callback_regtype(
84 		enum pktlog_callback_regtype callback_type)
85 {
86 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
87 
88 	if (!pl_dev) {
89 		qdf_print("Invalid pl_dev");
90 		return;
91 	}
92 
93 	pl_dev->callback_type = callback_type;
94 }
95 
96 struct pktlog_dev_t *get_pktlog_handle(void)
97 {
98 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
99 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
100 
101 	return cdp_get_pldev(soc, pdev_id);
102 }
103 
104 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
105 				    WMI_CMD_ID cmd_id, bool ini_triggered,
106 				    uint8_t user_triggered)
107 {
108 	struct scheduler_msg msg = { 0 };
109 	QDF_STATUS status;
110 	struct ath_pktlog_wmi_params *param;
111 
112 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
113 
114 	if (!param)
115 		return A_NO_MEMORY;
116 
117 	param->cmd_id = cmd_id;
118 	param->pktlog_event = event_types;
119 	param->ini_triggered = ini_triggered;
120 	param->user_triggered = user_triggered;
121 
122 	msg.type = WMA_PKTLOG_ENABLE_REQ;
123 	msg.bodyptr = param;
124 	msg.bodyval = 0;
125 
126 	status = scheduler_post_message(QDF_MODULE_ID_WMA,
127 					QDF_MODULE_ID_WMA,
128 					QDF_MODULE_ID_WMA, &msg);
129 
130 	if (status != QDF_STATUS_SUCCESS) {
131 		qdf_mem_free(param);
132 		return A_ERROR;
133 	}
134 
135 	return A_OK;
136 }
137 
138 static inline A_STATUS
139 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
140 		 bool ini_triggered, uint8_t user_triggered)
141 {
142 	uint32_t types = 0;
143 
144 	if (log_state & ATH_PKTLOG_TX)
145 		types |= WMI_PKTLOG_EVENT_TX;
146 
147 	if (log_state & ATH_PKTLOG_RX)
148 		types |= WMI_PKTLOG_EVENT_RX;
149 
150 	if (log_state & ATH_PKTLOG_RCFIND)
151 		types |= WMI_PKTLOG_EVENT_RCF;
152 
153 	if (log_state & ATH_PKTLOG_RCUPDATE)
154 		types |= WMI_PKTLOG_EVENT_RCU;
155 
156 	if (log_state & ATH_PKTLOG_SW_EVENT)
157 		types |= WMI_PKTLOG_EVENT_SW;
158 
159 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
160 		  "%s: Pktlog events: %d", __func__, types);
161 
162 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
163 				   ini_triggered, user_triggered);
164 }
165 
166 #ifdef PKTLOG_LEGACY
167 /**
168  * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
169  * @pdev_id: pdev id
170  * @log_state: Pktlog registration
171  *
172  * Return: zero on success, non-zero on failure
173  */
174 static inline A_STATUS
175 wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
176 {
177 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
178 
179 	if (pdev_id < 0) {
180 		qdf_print("Invalid pdev");
181 		return A_ERROR;
182 	}
183 
184 	if (log_state & ATH_PKTLOG_TX) {
185 		if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_TX_SUBSCRIBER,
186 				      WDI_EVENT_TX_STATUS)) {
187 			return A_ERROR;
188 		}
189 	}
190 	if (log_state & ATH_PKTLOG_RX) {
191 		if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_RX_SUBSCRIBER,
192 				      WDI_EVENT_RX_DESC)) {
193 			return A_ERROR;
194 		}
195 		if (cdp_wdi_event_sub(soc, pdev_id,
196 				      &PKTLOG_RX_REMOTE_SUBSCRIBER,
197 				      WDI_EVENT_RX_DESC_REMOTE)) {
198 			return A_ERROR;
199 		}
200 	}
201 	if (log_state & ATH_PKTLOG_RCFIND) {
202 		if (cdp_wdi_event_sub(soc, pdev_id,
203 				      &PKTLOG_RCFIND_SUBSCRIBER,
204 				      WDI_EVENT_RATE_FIND)) {
205 			return A_ERROR;
206 		}
207 	}
208 	if (log_state & ATH_PKTLOG_RCUPDATE) {
209 		if (cdp_wdi_event_sub(soc, pdev_id,
210 				      &PKTLOG_RCUPDATE_SUBSCRIBER,
211 				      WDI_EVENT_RATE_UPDATE)) {
212 			return A_ERROR;
213 		}
214 	}
215 	if (log_state & ATH_PKTLOG_SW_EVENT) {
216 		if (cdp_wdi_event_sub(soc, pdev_id,
217 				      &PKTLOG_SW_EVENT_SUBSCRIBER,
218 				      WDI_EVENT_SW_EVENT)) {
219 			return A_ERROR;
220 		}
221 	}
222 
223 	return A_OK;
224 }
225 #else
226 static inline A_STATUS
227 wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
228 {
229 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
230 
231 	if (pdev_id < 0) {
232 		qdf_print("Invalid pdev");
233 		return A_ERROR;
234 	}
235 
236 	if ((log_state & ATH_PKTLOG_TX) ||
237 	    (log_state  & ATH_PKTLOG_RCFIND) ||
238 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
239 	    (log_state & ATH_PKTLOG_SW_EVENT)) {
240 		if (cdp_wdi_event_sub(soc,
241 				      pdev_id,
242 				      &PKTLOG_OFFLOAD_SUBSCRIBER,
243 				      WDI_EVENT_OFFLOAD_ALL)) {
244 			return A_ERROR;
245 		}
246 	}
247 
248 	if (log_state & ATH_PKTLOG_RX) {
249 		if (cdp_wdi_event_sub(soc, pdev_id,
250 				      &PKTLOG_RX_SUBSCRIBER,
251 				      WDI_EVENT_RX_DESC)) {
252 			return A_ERROR;
253 		}
254 	}
255 
256 	if (log_state & ATH_PKTLOG_SW_EVENT) {
257 		if (cdp_wdi_event_sub(soc, pdev_id,
258 				      &PKTLOG_SW_EVENT_SUBSCRIBER,
259 				      WDI_EVENT_SW_EVENT)) {
260 			return A_ERROR;
261 		}
262 	}
263 
264 	if (log_state & ATH_PKTLOG_LITE_T2H) {
265 		if (cdp_wdi_event_sub(soc, pdev_id,
266 				      &PKTLOG_LITE_T2H_SUBSCRIBER,
267 				      WDI_EVENT_LITE_T2H)) {
268 			return A_ERROR;
269 		}
270 	}
271 
272 	if (log_state & ATH_PKTLOG_LITE_RX) {
273 		if (cdp_wdi_event_sub(soc, pdev_id,
274 				      &PKTLOG_LITE_RX_SUBSCRIBER,
275 				      WDI_EVENT_LITE_RX)) {
276 			return A_ERROR;
277 		}
278 	}
279 
280 	return A_OK;
281 }
282 #endif
283 
284 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
285 		u_int16_t peer_id, uint32_t status)
286 {
287 	switch (event) {
288 	case WDI_EVENT_OFFLOAD_ALL:
289 	{
290 		if (process_offload_pktlog_wifi3(pdev, log_data)) {
291 			qdf_print("Unable to process offload info");
292 			return;
293 		}
294 		break;
295 	}
296 	case WDI_EVENT_TX_STATUS:
297 	{
298 		/*
299 		 * process TX message
300 		 */
301 		if (process_tx_info(pdev, log_data)) {
302 			qdf_print("Unable to process TX info");
303 			return;
304 		}
305 		break;
306 	}
307 	case WDI_EVENT_RX_DESC:
308 	{
309 		/*
310 		 * process RX message for local frames
311 		 */
312 		if (process_rx_info(pdev, log_data)) {
313 			qdf_print("Unable to process RX info");
314 			return;
315 		}
316 		break;
317 	}
318 	case WDI_EVENT_RX_DESC_REMOTE:
319 	{
320 		/*
321 		 * process RX message for remote frames
322 		 */
323 		if (process_rx_info_remote(pdev, log_data)) {
324 			qdf_print("Unable to process RX info");
325 			return;
326 		}
327 		break;
328 	}
329 	case WDI_EVENT_RATE_FIND:
330 	{
331 		/*
332 		 * process RATE_FIND message
333 		 */
334 		if (process_rate_find(pdev, log_data)) {
335 			qdf_print("Unable to process RC_FIND info");
336 			return;
337 		}
338 		break;
339 	}
340 	case WDI_EVENT_RATE_UPDATE:
341 	{
342 		/*
343 		 * process RATE_UPDATE message
344 		 */
345 		if (process_rate_update(pdev, log_data)) {
346 			qdf_print("Unable to process RC_UPDATE");
347 			return;
348 		}
349 		break;
350 	}
351 	case WDI_EVENT_SW_EVENT:
352 	{
353 		/*
354 		 * process SW EVENT message
355 		 */
356 		if (process_sw_event(pdev, log_data)) {
357 			qdf_print("Unable to process SW_EVENT");
358 			return;
359 		}
360 		break;
361 	}
362 	default:
363 		break;
364 	}
365 }
366 
367 void
368 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
369 			u_int16_t peer_id, uint32_t status)
370 {
371 	switch (event) {
372 	case WDI_EVENT_RX_DESC:
373 	{
374 		if (process_rx_desc_remote_wifi3(context, log_data)) {
375 			qdf_print("Unable to process RX info");
376 			return;
377 		}
378 		break;
379 	}
380 	case WDI_EVENT_LITE_T2H:
381 	{
382 		if (process_pktlog_lite_wifi3(context, log_data,
383 					      PKTLOG_TYPE_LITE_T2H)) {
384 			qdf_print("Unable to process lite_t2h");
385 			return;
386 		}
387 		break;
388 	}
389 	case WDI_EVENT_LITE_RX:
390 	{
391 		if (process_pktlog_lite_wifi3(context, log_data,
392 					      PKTLOG_TYPE_LITE_RX)) {
393 			qdf_print("Unable to process lite_rx");
394 			return;
395 		}
396 		break;
397 	}
398 	default:
399 		break;
400 	}
401 }
402 
403 #ifdef PKTLOG_LEGACY
404 A_STATUS
405 wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
406 {
407 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
408 	/* TODO: WIN implementation to get soc */
409 
410 	if (log_state & ATH_PKTLOG_TX) {
411 		if (cdp_wdi_event_unsub(soc, pdev_id,
412 					&PKTLOG_TX_SUBSCRIBER,
413 					WDI_EVENT_TX_STATUS)) {
414 			return A_ERROR;
415 		}
416 	}
417 	if (log_state & ATH_PKTLOG_RX) {
418 		if (cdp_wdi_event_unsub(soc, pdev_id,
419 					&PKTLOG_RX_SUBSCRIBER,
420 					WDI_EVENT_RX_DESC)) {
421 			return A_ERROR;
422 		}
423 		if (cdp_wdi_event_unsub(soc, pdev_id,
424 					&PKTLOG_RX_REMOTE_SUBSCRIBER,
425 					WDI_EVENT_RX_DESC_REMOTE)) {
426 			return A_ERROR;
427 		}
428 	}
429 
430 	if (log_state & ATH_PKTLOG_RCFIND) {
431 		if (cdp_wdi_event_unsub(soc, pdev_id,
432 					&PKTLOG_RCFIND_SUBSCRIBER,
433 					WDI_EVENT_RATE_FIND)) {
434 			return A_ERROR;
435 		}
436 	}
437 	if (log_state & ATH_PKTLOG_RCUPDATE) {
438 		if (cdp_wdi_event_unsub(soc, pdev_id,
439 					&PKTLOG_RCUPDATE_SUBSCRIBER,
440 					WDI_EVENT_RATE_UPDATE)) {
441 			return A_ERROR;
442 		}
443 	}
444 	if (log_state & ATH_PKTLOG_RCUPDATE) {
445 		if (cdp_wdi_event_unsub(soc, pdev_id,
446 					&PKTLOG_SW_EVENT_SUBSCRIBER,
447 					WDI_EVENT_SW_EVENT)) {
448 			return A_ERROR;
449 		}
450 	}
451 
452 	return A_OK;
453 }
454 #else
455 A_STATUS
456 wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
457 {
458 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
459 
460 	if ((log_state & ATH_PKTLOG_TX) ||
461 	    (log_state  & ATH_PKTLOG_RCFIND) ||
462 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
463 	    (log_state & ATH_PKTLOG_SW_EVENT)) {
464 		if (cdp_wdi_event_unsub(soc,
465 					pdev_id,
466 					&PKTLOG_OFFLOAD_SUBSCRIBER,
467 					WDI_EVENT_OFFLOAD_ALL)) {
468 			return A_ERROR;
469 		}
470 	}
471 	if (log_state & ATH_PKTLOG_RX) {
472 		if (cdp_wdi_event_unsub(soc, pdev_id,
473 					&PKTLOG_RX_SUBSCRIBER,
474 					WDI_EVENT_RX_DESC)) {
475 			return A_ERROR;
476 		}
477 	}
478 	if (log_state & ATH_PKTLOG_LITE_T2H) {
479 		if (cdp_wdi_event_unsub(soc, pdev_id,
480 					&PKTLOG_LITE_T2H_SUBSCRIBER,
481 					WDI_EVENT_LITE_T2H)) {
482 			return A_ERROR;
483 		}
484 	}
485 	if (log_state & ATH_PKTLOG_LITE_RX) {
486 		if (cdp_wdi_event_unsub(soc, pdev_id,
487 					&PKTLOG_LITE_RX_SUBSCRIBER,
488 					WDI_EVENT_LITE_RX)) {
489 			return A_ERROR;
490 		}
491 	}
492 
493 	return A_OK;
494 }
495 #endif
496 
497 int pktlog_disable(struct hif_opaque_softc *scn)
498 {
499 	struct pktlog_dev_t *pl_dev;
500 	struct ath_pktlog_info *pl_info;
501 	uint8_t save_pktlog_state;
502 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
503 
504 	pl_dev = get_pktlog_handle();
505 
506 	if (!pl_dev) {
507 		qdf_print("Invalid pl_dev");
508 		return -EINVAL;
509 	}
510 
511 	pl_info = pl_dev->pl_info;
512 
513 	if (!pl_dev->pl_info) {
514 		qdf_print("Invalid pl_info");
515 		return -EINVAL;
516 	}
517 
518 	if (pdev_id < 0) {
519 		qdf_print("Invalid pdev");
520 		return -EINVAL;
521 	}
522 
523 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
524 	    pl_info->curr_pkt_state ==
525 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
526 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
527 	    pl_info->curr_pkt_state ==
528 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
529 		return -EBUSY;
530 
531 	save_pktlog_state = pl_info->curr_pkt_state;
532 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
533 
534 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
535 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
536 		qdf_print("Failed to disable pktlog in target");
537 		return -EINVAL;
538 	}
539 
540 	if (pl_dev->is_pktlog_cb_subscribed &&
541 		wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
542 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
543 		qdf_print("Cannot unsubscribe pktlog from the WDI");
544 		return -EINVAL;
545 	}
546 	pl_dev->is_pktlog_cb_subscribed = false;
547 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
548 		pl_info->curr_pkt_state =
549 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
550 	else
551 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
552 	return 0;
553 }
554 
555 #ifdef PKTLOG_LEGACY
556 /**
557  * pktlog_callback_registration() - Register pktlog handlers based on
558  *                                  on callback type
559  * @callback_type: pktlog full or lite registration
560  *
561  * Return: None
562  */
563 static void pktlog_callback_registration(uint8_t callback_type)
564 {
565 	if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
566 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
567 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
568 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
569 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
570 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
571 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
572 	}
573 }
574 #else
575 static void pktlog_callback_registration(uint8_t callback_type)
576 {
577 	if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
578 		PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback;
579 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
580 		PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
581 	} else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
582 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
583 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
584 	}
585 }
586 #endif
587 
588 #define ONE_MEGABYTE (1024 * 1024)
589 
590 void pktlog_init(struct hif_opaque_softc *scn)
591 {
592 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
593 	struct ath_pktlog_info *pl_info;
594 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
595 	uint32_t buff_size;
596 
597 	if (!pl_dev || !pl_dev->pl_info) {
598 		qdf_print("pl_dev or pl_info is invalid");
599 		return;
600 	}
601 
602 	pl_info = pl_dev->pl_info;
603 
604 	OS_MEMZERO(pl_info, sizeof(*pl_info));
605 	PKTLOG_LOCK_INIT(pl_info);
606 	mutex_init(&pl_info->pktlog_mutex);
607 
608 	buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
609 
610 	pl_info->buf_size = (buff_size ? buff_size : ONE_MEGABYTE);
611 	pl_info->buf = NULL;
612 	pl_info->log_state = 0;
613 	pl_info->init_saved_state = 0;
614 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
615 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
616 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
617 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
618 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
619 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
620 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
621 	pl_info->pktlen = 0;
622 	pl_info->start_time_thruput = 0;
623 	pl_info->start_time_per = 0;
624 	pl_dev->vendor_cmd_send = false;
625 
626 	pktlog_callback_registration(pl_dev->callback_type);
627 }
628 
629 int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
630 		    bool ini_triggered, uint8_t user_triggered,
631 		    uint32_t is_iwpriv_command)
632 {
633 	struct pktlog_dev_t *pl_dev;
634 	struct ath_pktlog_info *pl_info;
635 	uint8_t pdev_id;
636 	int error;
637 
638 	if (!scn) {
639 		qdf_print("Invalid scn context");
640 		ASSERT(0);
641 		return -EINVAL;
642 	}
643 
644 	pl_dev = get_pktlog_handle();
645 	if (!pl_dev) {
646 		qdf_print("Invalid pktlog context");
647 		ASSERT(0);
648 		return -EINVAL;
649 	}
650 
651 	pdev_id = WMI_PDEV_ID_SOC;
652 	if (pdev_id < 0) {
653 		qdf_print("Invalid txrx context");
654 		ASSERT(0);
655 		return -EINVAL;
656 	}
657 
658 	pl_info = pl_dev->pl_info;
659 	if (!pl_info) {
660 		qdf_print("Invalid pl_info context");
661 		ASSERT(0);
662 		return -EINVAL;
663 	}
664 
665 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
666 		return -EBUSY;
667 
668 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
669 	/* is_iwpriv_command : 0 indicates its a vendor command
670 	 * log_state: 0 indicates pktlog disable command
671 	 * vendor_cmd_send flag; false means no vendor pktlog enable
672 	 * command was sent previously
673 	 */
674 	if (is_iwpriv_command == 0 && log_state == 0 &&
675 	    pl_dev->vendor_cmd_send == false) {
676 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
677 		qdf_print("pktlog operation not in progress");
678 		return 0;
679 	}
680 
681 	if (!pl_dev->tgt_pktlog_alloced) {
682 		if (!pl_info->buf) {
683 			error = pktlog_alloc_buf(scn);
684 
685 			if (error != 0) {
686 				pl_info->curr_pkt_state =
687 					PKTLOG_OPR_NOT_IN_PROGRESS;
688 				qdf_print("pktlog buff alloc failed");
689 				return -ENOMEM;
690 			}
691 
692 			if (!pl_info->buf) {
693 				pl_info->curr_pkt_state =
694 					PKTLOG_OPR_NOT_IN_PROGRESS;
695 				qdf_print("pktlog buf alloc failed");
696 				ASSERT(0);
697 				return -ENOMEM;
698 			}
699 
700 		}
701 
702 		qdf_spin_lock_bh(&pl_info->log_lock);
703 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
704 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
705 		pl_info->buf->wr_offset = 0;
706 		pl_info->buf->rd_offset = -1;
707 		/* These below variables are used by per packet stats*/
708 		pl_info->buf->bytes_written = 0;
709 		pl_info->buf->msg_index = 1;
710 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
711 		qdf_spin_unlock_bh(&pl_info->log_lock);
712 
713 		pl_info->start_time_thruput = os_get_timestamp();
714 		pl_info->start_time_per = pl_info->start_time_thruput;
715 
716 		pl_dev->tgt_pktlog_alloced = true;
717 	}
718 	if (log_state != 0) {
719 		/* WDI subscribe */
720 		if (!pl_dev->is_pktlog_cb_subscribed) {
721 			error = wdi_pktlog_subscribe(pdev_id, log_state);
722 			if (error) {
723 				pl_info->curr_pkt_state =
724 						PKTLOG_OPR_NOT_IN_PROGRESS;
725 				qdf_print("Unable to subscribe to the WDI");
726 				return -EINVAL;
727 			}
728 		} else {
729 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
730 			qdf_print("Unable to subscribe %d to the WDI",
731 				  log_state);
732 			return -EINVAL;
733 		}
734 		/* WMI command to enable pktlog on the firmware */
735 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
736 				user_triggered)) {
737 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
738 			qdf_print("Device cannot be enabled");
739 			return -EINVAL;
740 		}
741 		pl_dev->is_pktlog_cb_subscribed = true;
742 
743 		if (is_iwpriv_command == 0)
744 			pl_dev->vendor_cmd_send = true;
745 	} else {
746 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
747 		pl_dev->pl_funcs->pktlog_disable(scn);
748 		if (is_iwpriv_command == 0)
749 			pl_dev->vendor_cmd_send = false;
750 	}
751 
752 	pl_info->log_state = log_state;
753 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
754 	return 0;
755 }
756 
757 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
758 		 bool ini_triggered, uint8_t user_triggered,
759 		 uint32_t is_iwpriv_command)
760 {
761 	struct pktlog_dev_t *pl_dev;
762 	struct ath_pktlog_info *pl_info;
763 	int err;
764 
765 	pl_dev = get_pktlog_handle();
766 
767 	if (!pl_dev) {
768 		qdf_print("Invalid pl_dev handle");
769 		return -EINVAL;
770 	}
771 
772 	pl_info = pl_dev->pl_info;
773 
774 	if (!pl_info) {
775 		qdf_print("Invalid pl_info handle");
776 		return -EINVAL;
777 	}
778 
779 	mutex_lock(&pl_info->pktlog_mutex);
780 	err = __pktlog_enable(scn, log_state, ini_triggered,
781 				user_triggered, is_iwpriv_command);
782 	mutex_unlock(&pl_info->pktlog_mutex);
783 	return err;
784 }
785 
786 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
787 {
788 	struct pktlog_dev_t *pl_dev;
789 	struct ath_pktlog_info *pl_info;
790 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
791 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
792 	uint32_t buff_size;
793 	uint32_t max_allowed_buff_size;
794 
795 	pl_dev = get_pktlog_handle();
796 
797 	if (!pl_dev) {
798 		qdf_print("Invalid pl_dev handle");
799 		return -EINVAL;
800 	}
801 
802 	pl_info = pl_dev->pl_info;
803 
804 	if (!pl_info) {
805 		qdf_print("Invalid pl_dev handle");
806 		return -EINVAL;
807 	}
808 
809 	if (pdev_id < 0) {
810 		qdf_print("Invalid pdev");
811 		return -EINVAL;
812 	}
813 
814 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
815 		qdf_print("pktlog is not configured");
816 		return -EBUSY;
817 	}
818 
819 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
820 
821 	buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
822 	max_allowed_buff_size = (buff_size ? buff_size : ONE_MEGABYTE);
823 
824 	if (size < ONE_MEGABYTE || size > max_allowed_buff_size) {
825 		qdf_print("Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB",
826 			  size, (ONE_MEGABYTE / ONE_MEGABYTE),
827 			  (max_allowed_buff_size / ONE_MEGABYTE));
828 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
829 		qdf_print("Invalid requested buff size");
830 		return -EINVAL;
831 	}
832 
833 	if (size == pl_info->buf_size) {
834 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
835 		qdf_print("Pktlog Buff Size is already of same size");
836 		return 0;
837 	}
838 
839 	if (pl_info->log_state) {
840 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
841 		qdf_print("Logging should be disabled before changing buffer size");
842 		return -EINVAL;
843 	}
844 
845 	qdf_spin_lock_bh(&pl_info->log_lock);
846 	if (pl_info->buf) {
847 		if (pl_dev->is_pktlog_cb_subscribed &&
848 			wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
849 			pl_info->curr_pkt_state =
850 				PKTLOG_OPR_NOT_IN_PROGRESS;
851 			qdf_spin_unlock_bh(&pl_info->log_lock);
852 			qdf_print("Cannot unsubscribe pktlog from the WDI");
853 			return -EFAULT;
854 		}
855 		pktlog_release_buf(scn);
856 		pl_dev->is_pktlog_cb_subscribed = false;
857 		pl_dev->tgt_pktlog_alloced = false;
858 	}
859 
860 	if (size != 0) {
861 		qdf_print("New Pktlog Buff Size is %d", size);
862 		pl_info->buf_size = size;
863 	}
864 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
865 	qdf_spin_unlock_bh(&pl_info->log_lock);
866 	return 0;
867 }
868 
869 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
870 {
871 	struct pktlog_dev_t *pl_dev;
872 	struct ath_pktlog_info *pl_info;
873 	int status;
874 
875 	pl_dev = get_pktlog_handle();
876 
877 	if (!pl_dev) {
878 		qdf_print("Invalid pl_dev handle");
879 		return -EINVAL;
880 	}
881 
882 	pl_info = pl_dev->pl_info;
883 
884 	if (!pl_info) {
885 		qdf_print("Invalid pl_dev handle");
886 		return -EINVAL;
887 	}
888 
889 	mutex_lock(&pl_info->pktlog_mutex);
890 	status = __pktlog_setsize(scn, size);
891 	mutex_unlock(&pl_info->pktlog_mutex);
892 
893 	return status;
894 }
895 
896 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
897 {
898 	struct pktlog_dev_t *pl_dev;
899 	struct ath_pktlog_info *pl_info;
900 	uint8_t save_pktlog_state;
901 
902 	pl_dev = get_pktlog_handle();
903 
904 	if (!pl_dev) {
905 		qdf_print("Invalid pl_dev handle");
906 		return -EINVAL;
907 	}
908 
909 	pl_info = pl_dev->pl_info;
910 
911 	if (!pl_info) {
912 		qdf_print("Invalid pl_dev handle");
913 		return -EINVAL;
914 	}
915 
916 	if (!clear_buff)
917 		return -EINVAL;
918 
919 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
920 	    pl_info->curr_pkt_state ==
921 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
922 		return -EBUSY;
923 
924 	save_pktlog_state = pl_info->curr_pkt_state;
925 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
926 
927 	if (pl_info->log_state) {
928 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
929 		qdf_print("Logging should be disabled before clearing pktlog buffer");
930 		return -EINVAL;
931 	}
932 
933 	if (pl_info->buf) {
934 		if (pl_info->buf_size > 0) {
935 			qdf_debug("pktlog buffer is cleared");
936 			memset(pl_info->buf, 0, pl_info->buf_size);
937 			pl_dev->is_pktlog_cb_subscribed = false;
938 			pl_dev->tgt_pktlog_alloced = false;
939 			pl_info->buf->rd_offset = -1;
940 		} else {
941 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
942 			qdf_print("pktlog buffer size is not proper. "
943 				  "Existing Buf size %d",
944 				  pl_info->buf_size);
945 			return -EFAULT;
946 		}
947 	} else {
948 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
949 		qdf_print("pktlog buff is NULL");
950 		return -EFAULT;
951 	}
952 
953 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
954 		pl_info->curr_pkt_state =
955 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
956 	else
957 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
958 
959 	return 0;
960 }
961 
962 void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *buff, uint32_t len)
963 {
964 	uint32_t *pl_hdr;
965 	uint32_t log_type;
966 	struct ol_fw_data pl_fw_data;
967 
968 	if (pdev_id == OL_TXRX_INVALID_PDEV_ID) {
969 		qdf_print("txrx pdev_id is invalid");
970 		return;
971 	}
972 	pl_hdr = buff;
973 	pl_fw_data.data = pl_hdr;
974 	pl_fw_data.len = len;
975 
976 	log_type =
977 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
978 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
979 
980 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
981 		|| (log_type == PKTLOG_TYPE_TX_STAT)
982 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
983 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
984 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
985 		wdi_event_handler(WDI_EVENT_TX_STATUS,
986 				  pdev_id, &pl_fw_data);
987 	else if (log_type == PKTLOG_TYPE_RC_FIND)
988 		wdi_event_handler(WDI_EVENT_RATE_FIND,
989 				  pdev_id, &pl_fw_data);
990 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
991 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
992 				  pdev_id, &pl_fw_data);
993 	else if (log_type == PKTLOG_TYPE_RX_STAT)
994 		wdi_event_handler(WDI_EVENT_RX_DESC,
995 				  pdev_id, &pl_fw_data);
996 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
997 		wdi_event_handler(WDI_EVENT_SW_EVENT,
998 				  pdev_id, &pl_fw_data);
999 }
1000 
1001 #if defined(QCA_WIFI_3_0_ADRASTEA)
1002 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
1003 {
1004 	int rc = 0; /* sane */
1005 
1006 	if ((!nbuf) ||
1007 	    (nbuf->data < nbuf->head) ||
1008 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
1009 		rc = -EINVAL;
1010 
1011 	return rc;
1012 }
1013 /**
1014  * pktlog_t2h_msg_handler() - Target to host message handler
1015  * @context: pdev context
1016  * @pkt: HTC packet
1017  *
1018  * Return: None
1019  */
1020 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
1021 {
1022 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
1023 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
1024 	uint32_t *msg_word;
1025 	uint32_t msg_len;
1026 
1027 	/* check for sanity of the packet, have seen corrupted pkts */
1028 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
1029 		qdf_print("packet 0x%pK corrupted? Leaking...",
1030 			  pktlog_t2h_msg);
1031 		/* do not free; may crash! */
1032 		QDF_ASSERT(0);
1033 		return;
1034 	}
1035 
1036 	/* check for successful message reception */
1037 	if (pkt->Status != QDF_STATUS_SUCCESS) {
1038 		if (pkt->Status != QDF_STATUS_E_CANCELED)
1039 			pdev->htc_err_cnt++;
1040 		qdf_nbuf_free(pktlog_t2h_msg);
1041 		return;
1042 	}
1043 
1044 	/* confirm alignment */
1045 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
1046 
1047 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
1048 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
1049 	pktlog_process_fw_msg(pdev->pdev_id, msg_word, msg_len);
1050 
1051 	qdf_nbuf_free(pktlog_t2h_msg);
1052 }
1053 
1054 /**
1055  * pktlog_tx_resume_handler() - resume callback
1056  * @context: pdev context
1057  *
1058  * Return: None
1059  */
1060 static void pktlog_tx_resume_handler(void *context)
1061 {
1062 	qdf_print("Not expected");
1063 	qdf_assert(0);
1064 }
1065 
1066 /**
1067  * pktlog_h2t_send_complete() - send complete indication
1068  * @context: pdev context
1069  * @htc_pkt: HTC packet
1070  *
1071  * Return: None
1072  */
1073 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
1074 {
1075 	qdf_print("Not expected");
1076 	qdf_assert(0);
1077 }
1078 
1079 /**
1080  * pktlog_h2t_full() - queue full indication
1081  * @context: pdev context
1082  * @pkt: HTC packet
1083  *
1084  * Return: HTC action
1085  */
1086 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
1087 {
1088 	return HTC_SEND_FULL_KEEP;
1089 }
1090 
1091 /**
1092  * pktlog_htc_connect_service() - create new endpoint for packetlog
1093  * @pdev - pktlog pdev
1094  *
1095  * Return: 0 for success/failure
1096  */
1097 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1098 {
1099 	struct htc_service_connect_req connect;
1100 	struct htc_service_connect_resp response;
1101 	QDF_STATUS status;
1102 
1103 	qdf_mem_zero(&connect, sizeof(connect));
1104 	qdf_mem_zero(&response, sizeof(response));
1105 
1106 	connect.pMetaData = NULL;
1107 	connect.MetaDataLength = 0;
1108 	connect.EpCallbacks.pContext = pdev;
1109 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1110 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1111 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1112 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1113 
1114 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1115 	connect.EpCallbacks.EpRecvRefill = NULL;
1116 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1117 	/* N/A, fill is done by HIF */
1118 
1119 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1120 	/*
1121 	 * Specify how deep to let a queue get before htc_send_pkt will
1122 	 * call the EpSendFull function due to excessive send queue depth.
1123 	 */
1124 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1125 
1126 	/* disable flow control for HTT data message service */
1127 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1128 
1129 	/* connect to control service */
1130 	connect.service_id = PACKET_LOG_SVC;
1131 
1132 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1133 
1134 	if (status != QDF_STATUS_SUCCESS) {
1135 		pdev->mt_pktlog_enabled = false;
1136 		return -EIO;       /* failure */
1137 	}
1138 
1139 	pdev->htc_endpoint = response.Endpoint;
1140 	pdev->mt_pktlog_enabled = true;
1141 
1142 	return 0;               /* success */
1143 }
1144 
1145 /**
1146  * pktlog_htc_attach() - attach pktlog HTC service
1147  *
1148  * Return: 0 for success/failure
1149  */
1150 int pktlog_htc_attach(void)
1151 {
1152 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1153 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1154 
1155 	if ((!pl_pdev) || (!htc_pdev)) {
1156 		qdf_print("Invalid pl_dev or htc_pdev handle");
1157 		return -EINVAL;
1158 	}
1159 
1160 	pl_pdev->htc_pdev = htc_pdev;
1161 	return pktlog_htc_connect_service(pl_pdev);
1162 }
1163 #else
1164 int pktlog_htc_attach(void)
1165 {
1166 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1167 
1168 	if (!pl_dev) {
1169 		qdf_print("Invalid pl_dev handle");
1170 		return -EINVAL;
1171 	}
1172 
1173 	pl_dev->mt_pktlog_enabled = false;
1174 	return 0;
1175 }
1176 #endif
1177 #endif /* REMOVE_PKT_LOG */
1178