xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 #ifdef PKTLOG_LEGACY
45 #include "pktlog_wifi2.h"
46 #else
47 #include "pktlog_wifi3.h"
48 #endif /* PKTLOG_LEGACY */
49 
50 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
53 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
54 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
55 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
56 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
57 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
58 wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
59 
60 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
61 	.pktlog_init = pktlog_init,
62 	.pktlog_enable = pktlog_enable,
63 	.pktlog_setsize = pktlog_setsize,
64 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
65 };
66 
67 struct pktlog_dev_t pl_dev = {
68 	.pl_funcs = &ol_pl_funcs,
69 };
70 
71 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
72 		     struct hif_opaque_softc *scn)
73 {
74 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
75 	*pl_handle = &pl_dev;
76 }
77 
78 void pktlog_set_pdev_id(struct pktlog_dev_t *pl_dev, uint8_t pdev_id)
79 {
80 	pl_dev->pdev_id = pdev_id;
81 }
82 
83 void pktlog_set_callback_regtype(
84 		enum pktlog_callback_regtype callback_type)
85 {
86 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
87 
88 	if (!pl_dev) {
89 		qdf_print("Invalid pl_dev");
90 		return;
91 	}
92 
93 	pl_dev->callback_type = callback_type;
94 }
95 
96 struct pktlog_dev_t *get_pktlog_handle(void)
97 {
98 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
99 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
100 
101 	return cdp_get_pldev(soc, pdev_id);
102 }
103 
104 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
105 				    WMI_CMD_ID cmd_id, bool ini_triggered,
106 				    uint8_t user_triggered)
107 {
108 	struct scheduler_msg msg = { 0 };
109 	QDF_STATUS status;
110 	struct ath_pktlog_wmi_params *param;
111 
112 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
113 
114 	if (!param)
115 		return A_NO_MEMORY;
116 
117 	param->cmd_id = cmd_id;
118 	param->pktlog_event = event_types;
119 	param->ini_triggered = ini_triggered;
120 	param->user_triggered = user_triggered;
121 
122 	msg.type = WMA_PKTLOG_ENABLE_REQ;
123 	msg.bodyptr = param;
124 	msg.bodyval = 0;
125 
126 	status = scheduler_post_message(QDF_MODULE_ID_WMA,
127 					QDF_MODULE_ID_WMA,
128 					QDF_MODULE_ID_WMA, &msg);
129 
130 	if (status != QDF_STATUS_SUCCESS) {
131 		qdf_mem_free(param);
132 		return A_ERROR;
133 	}
134 
135 	return A_OK;
136 }
137 
138 static inline A_STATUS
139 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
140 		 bool ini_triggered, uint8_t user_triggered)
141 {
142 	uint32_t types = 0;
143 
144 	if (log_state & ATH_PKTLOG_TX)
145 		types |= WMI_PKTLOG_EVENT_TX;
146 
147 	if (log_state & ATH_PKTLOG_RX)
148 		types |= WMI_PKTLOG_EVENT_RX;
149 
150 	if (log_state & ATH_PKTLOG_RCFIND)
151 		types |= WMI_PKTLOG_EVENT_RCF;
152 
153 	if (log_state & ATH_PKTLOG_RCUPDATE)
154 		types |= WMI_PKTLOG_EVENT_RCU;
155 
156 	if (log_state & ATH_PKTLOG_SW_EVENT)
157 		types |= WMI_PKTLOG_EVENT_SW;
158 
159 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
160 		  "%s: Pktlog events: %d", __func__, types);
161 
162 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
163 				   ini_triggered, user_triggered);
164 }
165 
166 #ifdef PKTLOG_LEGACY
167 /**
168  * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
169  * @pdev_id: pdev id
170  * @log_state: Pktlog registration
171  *
172  * Return: zero on success, non-zero on failure
173  */
174 static inline A_STATUS
175 wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
176 {
177 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
178 
179 	if (pdev_id < 0) {
180 		qdf_print("Invalid pdev in %s", __func__);
181 		return A_ERROR;
182 	}
183 
184 	if (log_state & ATH_PKTLOG_TX) {
185 		if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_TX_SUBSCRIBER,
186 				      WDI_EVENT_TX_STATUS)) {
187 			return A_ERROR;
188 		}
189 	}
190 	if (log_state & ATH_PKTLOG_RX) {
191 		if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_RX_SUBSCRIBER,
192 				      WDI_EVENT_RX_DESC)) {
193 			return A_ERROR;
194 		}
195 		if (cdp_wdi_event_sub(soc, pdev_id,
196 				      &PKTLOG_RX_REMOTE_SUBSCRIBER,
197 				      WDI_EVENT_RX_DESC_REMOTE)) {
198 			return A_ERROR;
199 		}
200 	}
201 	if (log_state & ATH_PKTLOG_RCFIND) {
202 		if (cdp_wdi_event_sub(soc, pdev_id,
203 				      &PKTLOG_RCFIND_SUBSCRIBER,
204 				      WDI_EVENT_RATE_FIND)) {
205 			return A_ERROR;
206 		}
207 	}
208 	if (log_state & ATH_PKTLOG_RCUPDATE) {
209 		if (cdp_wdi_event_sub(soc, pdev_id,
210 				      &PKTLOG_RCUPDATE_SUBSCRIBER,
211 				      WDI_EVENT_RATE_UPDATE)) {
212 			return A_ERROR;
213 		}
214 	}
215 	if (log_state & ATH_PKTLOG_SW_EVENT) {
216 		if (cdp_wdi_event_sub(soc, pdev_id,
217 				      &PKTLOG_SW_EVENT_SUBSCRIBER,
218 				      WDI_EVENT_SW_EVENT)) {
219 			return A_ERROR;
220 		}
221 	}
222 
223 	return A_OK;
224 }
225 #else
226 static inline A_STATUS
227 wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
228 {
229 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
230 
231 	if (pdev_id < 0) {
232 		qdf_print("Invalid pdev in %s", __func__);
233 		return A_ERROR;
234 	}
235 
236 	if ((log_state & ATH_PKTLOG_TX) ||
237 	    (log_state  & ATH_PKTLOG_RCFIND) ||
238 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
239 	    (log_state & ATH_PKTLOG_SW_EVENT)) {
240 		if (cdp_wdi_event_sub(soc,
241 				      pdev_id,
242 				      &PKTLOG_OFFLOAD_SUBSCRIBER,
243 				      WDI_EVENT_OFFLOAD_ALL)) {
244 			return A_ERROR;
245 		}
246 	}
247 
248 	if (log_state & ATH_PKTLOG_RX) {
249 		if (cdp_wdi_event_sub(soc, pdev_id,
250 				      &PKTLOG_RX_SUBSCRIBER,
251 				      WDI_EVENT_RX_DESC)) {
252 			return A_ERROR;
253 		}
254 	}
255 
256 	if (log_state & ATH_PKTLOG_SW_EVENT) {
257 		if (cdp_wdi_event_sub(soc, pdev_id,
258 				      &PKTLOG_SW_EVENT_SUBSCRIBER,
259 				      WDI_EVENT_SW_EVENT)) {
260 			return A_ERROR;
261 		}
262 	}
263 
264 	if (log_state & ATH_PKTLOG_LITE_T2H) {
265 		if (cdp_wdi_event_sub(soc, pdev_id,
266 				      &PKTLOG_LITE_T2H_SUBSCRIBER,
267 				      WDI_EVENT_LITE_T2H)) {
268 			return A_ERROR;
269 		}
270 	}
271 
272 	if (log_state & ATH_PKTLOG_LITE_RX) {
273 		if (cdp_wdi_event_sub(soc, pdev_id,
274 				      &PKTLOG_LITE_RX_SUBSCRIBER,
275 				      WDI_EVENT_LITE_RX)) {
276 			return A_ERROR;
277 		}
278 	}
279 
280 	return A_OK;
281 }
282 #endif
283 
284 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
285 		u_int16_t peer_id, uint32_t status)
286 {
287 	switch (event) {
288 	case WDI_EVENT_OFFLOAD_ALL:
289 	{
290 		if (process_offload_pktlog_wifi3(pdev, log_data)) {
291 			qdf_print("Unable to process offload info");
292 			return;
293 		}
294 		break;
295 	}
296 	case WDI_EVENT_TX_STATUS:
297 	{
298 		/*
299 		 * process TX message
300 		 */
301 		if (process_tx_info(pdev, log_data)) {
302 			qdf_print("Unable to process TX info");
303 			return;
304 		}
305 		break;
306 	}
307 	case WDI_EVENT_RX_DESC:
308 	{
309 		/*
310 		 * process RX message for local frames
311 		 */
312 		if (process_rx_info(pdev, log_data)) {
313 			qdf_print("Unable to process RX info");
314 			return;
315 		}
316 		break;
317 	}
318 	case WDI_EVENT_RX_DESC_REMOTE:
319 	{
320 		/*
321 		 * process RX message for remote frames
322 		 */
323 		if (process_rx_info_remote(pdev, log_data)) {
324 			qdf_print("Unable to process RX info");
325 			return;
326 		}
327 		break;
328 	}
329 	case WDI_EVENT_RATE_FIND:
330 	{
331 		/*
332 		 * process RATE_FIND message
333 		 */
334 		if (process_rate_find(pdev, log_data)) {
335 			qdf_print("Unable to process RC_FIND info");
336 			return;
337 		}
338 		break;
339 	}
340 	case WDI_EVENT_RATE_UPDATE:
341 	{
342 		/*
343 		 * process RATE_UPDATE message
344 		 */
345 		if (process_rate_update(pdev, log_data)) {
346 			qdf_print("Unable to process RC_UPDATE");
347 			return;
348 		}
349 		break;
350 	}
351 	case WDI_EVENT_SW_EVENT:
352 	{
353 		/*
354 		 * process SW EVENT message
355 		 */
356 		if (process_sw_event(pdev, log_data)) {
357 			qdf_print("Unable to process SW_EVENT");
358 			return;
359 		}
360 		break;
361 	}
362 	default:
363 		break;
364 	}
365 }
366 
367 void
368 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
369 			u_int16_t peer_id, uint32_t status)
370 {
371 	switch (event) {
372 	case WDI_EVENT_RX_DESC:
373 	{
374 		if (process_rx_desc_remote_wifi3(context, log_data)) {
375 			qdf_print("Unable to process RX info");
376 			return;
377 		}
378 		break;
379 	}
380 	case WDI_EVENT_LITE_T2H:
381 	{
382 		if (process_pktlog_lite_wifi3(context, log_data,
383 					      PKTLOG_TYPE_LITE_T2H)) {
384 			qdf_print("Unable to process lite_t2h");
385 			return;
386 		}
387 		break;
388 	}
389 	case WDI_EVENT_LITE_RX:
390 	{
391 		if (process_pktlog_lite_wifi3(context, log_data,
392 					      PKTLOG_TYPE_LITE_RX)) {
393 			qdf_print("Unable to process lite_rx");
394 			return;
395 		}
396 		break;
397 	}
398 	default:
399 		break;
400 	}
401 }
402 
403 #ifdef PKTLOG_LEGACY
404 A_STATUS
405 wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
406 {
407 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
408 	/* TODO: WIN implementation to get soc */
409 
410 	if (log_state & ATH_PKTLOG_TX) {
411 		if (cdp_wdi_event_unsub(soc, pdev_id,
412 					&PKTLOG_TX_SUBSCRIBER,
413 					WDI_EVENT_TX_STATUS)) {
414 			return A_ERROR;
415 		}
416 	}
417 	if (log_state & ATH_PKTLOG_RX) {
418 		if (cdp_wdi_event_unsub(soc, pdev_id,
419 					&PKTLOG_RX_SUBSCRIBER,
420 					WDI_EVENT_RX_DESC)) {
421 			return A_ERROR;
422 		}
423 		if (cdp_wdi_event_unsub(soc, pdev_id,
424 					&PKTLOG_RX_REMOTE_SUBSCRIBER,
425 					WDI_EVENT_RX_DESC_REMOTE)) {
426 			return A_ERROR;
427 		}
428 	}
429 
430 	if (log_state & ATH_PKTLOG_RCFIND) {
431 		if (cdp_wdi_event_unsub(soc, pdev_id,
432 					&PKTLOG_RCFIND_SUBSCRIBER,
433 					WDI_EVENT_RATE_FIND)) {
434 			return A_ERROR;
435 		}
436 	}
437 	if (log_state & ATH_PKTLOG_RCUPDATE) {
438 		if (cdp_wdi_event_unsub(soc, pdev_id,
439 					&PKTLOG_RCUPDATE_SUBSCRIBER,
440 					WDI_EVENT_RATE_UPDATE)) {
441 			return A_ERROR;
442 		}
443 	}
444 	if (log_state & ATH_PKTLOG_RCUPDATE) {
445 		if (cdp_wdi_event_unsub(soc, pdev_id,
446 					&PKTLOG_SW_EVENT_SUBSCRIBER,
447 					WDI_EVENT_SW_EVENT)) {
448 			return A_ERROR;
449 		}
450 	}
451 
452 	return A_OK;
453 }
454 #else
455 A_STATUS
456 wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
457 {
458 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
459 
460 	if ((log_state & ATH_PKTLOG_TX) ||
461 	    (log_state  & ATH_PKTLOG_RCFIND) ||
462 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
463 	    (log_state & ATH_PKTLOG_SW_EVENT)) {
464 		if (cdp_wdi_event_unsub(soc,
465 					pdev_id,
466 					&PKTLOG_OFFLOAD_SUBSCRIBER,
467 					WDI_EVENT_OFFLOAD_ALL)) {
468 			return A_ERROR;
469 		}
470 	}
471 	if (log_state & ATH_PKTLOG_RX) {
472 		if (cdp_wdi_event_unsub(soc, pdev_id,
473 					&PKTLOG_RX_SUBSCRIBER,
474 					WDI_EVENT_RX_DESC)) {
475 			return A_ERROR;
476 		}
477 	}
478 	if (log_state & ATH_PKTLOG_LITE_T2H) {
479 		if (cdp_wdi_event_unsub(soc, pdev_id,
480 					&PKTLOG_LITE_T2H_SUBSCRIBER,
481 					WDI_EVENT_LITE_T2H)) {
482 			return A_ERROR;
483 		}
484 	}
485 	if (log_state & ATH_PKTLOG_LITE_RX) {
486 		if (cdp_wdi_event_unsub(soc, pdev_id,
487 					&PKTLOG_LITE_RX_SUBSCRIBER,
488 					WDI_EVENT_LITE_RX)) {
489 			return A_ERROR;
490 		}
491 	}
492 
493 	return A_OK;
494 }
495 #endif
496 
497 int pktlog_disable(struct hif_opaque_softc *scn)
498 {
499 	struct pktlog_dev_t *pl_dev;
500 	struct ath_pktlog_info *pl_info;
501 	uint8_t save_pktlog_state;
502 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
503 
504 	pl_dev = get_pktlog_handle();
505 
506 	if (!pl_dev) {
507 		qdf_print("Invalid pl_dev");
508 		return -EINVAL;
509 	}
510 
511 	pl_info = pl_dev->pl_info;
512 
513 	if (!pl_dev->pl_info) {
514 		qdf_print("Invalid pl_info");
515 		return -EINVAL;
516 	}
517 
518 	if (pdev_id < 0) {
519 		qdf_print("Invalid pdev");
520 		return -EINVAL;
521 	}
522 
523 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
524 	    pl_info->curr_pkt_state ==
525 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
526 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
527 	    pl_info->curr_pkt_state ==
528 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
529 		return -EBUSY;
530 
531 	save_pktlog_state = pl_info->curr_pkt_state;
532 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
533 
534 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
535 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
536 		qdf_print("Failed to disable pktlog in target");
537 		return -EINVAL;
538 	}
539 
540 	if (pl_dev->is_pktlog_cb_subscribed &&
541 		wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
542 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
543 		qdf_print("Cannot unsubscribe pktlog from the WDI");
544 		return -EINVAL;
545 	}
546 	pl_dev->is_pktlog_cb_subscribed = false;
547 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
548 		pl_info->curr_pkt_state =
549 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
550 	else
551 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
552 	return 0;
553 }
554 
555 #ifdef PKTLOG_LEGACY
556 /**
557  * pktlog_callback_registration() - Register pktlog handlers based on
558  *                                  on callback type
559  * @callback_type: pktlog full or lite registration
560  *
561  * Return: None
562  */
563 static void pktlog_callback_registration(uint8_t callback_type)
564 {
565 	if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
566 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
567 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
568 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
569 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
570 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
571 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
572 	}
573 }
574 #else
575 static void pktlog_callback_registration(uint8_t callback_type)
576 {
577 	if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
578 		PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback;
579 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
580 		PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
581 	} else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
582 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
583 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
584 	}
585 }
586 #endif
587 
588 #define ONE_MEGABYTE (1024 * 1024)
589 
590 void pktlog_init(struct hif_opaque_softc *scn)
591 {
592 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
593 	struct ath_pktlog_info *pl_info;
594 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
595 	uint32_t buff_size;
596 
597 	if (!pl_dev || !pl_dev->pl_info) {
598 		qdf_print("pl_dev or pl_info is invalid");
599 		return;
600 	}
601 
602 	pl_info = pl_dev->pl_info;
603 
604 	OS_MEMZERO(pl_info, sizeof(*pl_info));
605 	PKTLOG_LOCK_INIT(pl_info);
606 	mutex_init(&pl_info->pktlog_mutex);
607 
608 	buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
609 
610 	pl_info->buf_size = (buff_size ? buff_size : ONE_MEGABYTE);
611 	pl_info->buf = NULL;
612 	pl_info->log_state = 0;
613 	pl_info->init_saved_state = 0;
614 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
615 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
616 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
617 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
618 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
619 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
620 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
621 	pl_info->pktlen = 0;
622 	pl_info->start_time_thruput = 0;
623 	pl_info->start_time_per = 0;
624 	pl_dev->vendor_cmd_send = false;
625 
626 	pktlog_callback_registration(pl_dev->callback_type);
627 }
628 
629 int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
630 		    bool ini_triggered, uint8_t user_triggered,
631 		    uint32_t is_iwpriv_command)
632 {
633 	struct pktlog_dev_t *pl_dev;
634 	struct ath_pktlog_info *pl_info;
635 	uint8_t pdev_id;
636 	int error;
637 
638 	if (!scn) {
639 		qdf_print("%s: Invalid scn context", __func__);
640 		ASSERT(0);
641 		return -EINVAL;
642 	}
643 
644 	pl_dev = get_pktlog_handle();
645 	if (!pl_dev) {
646 		qdf_print("%s: Invalid pktlog context", __func__);
647 		ASSERT(0);
648 		return -EINVAL;
649 	}
650 
651 	pdev_id = WMI_PDEV_ID_SOC;
652 	if (pdev_id < 0) {
653 		qdf_print("%s: Invalid txrx context", __func__);
654 		ASSERT(0);
655 		return -EINVAL;
656 	}
657 
658 	pl_info = pl_dev->pl_info;
659 	if (!pl_info) {
660 		qdf_print("%s: Invalid pl_info context", __func__);
661 		ASSERT(0);
662 		return -EINVAL;
663 	}
664 
665 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
666 		return -EBUSY;
667 
668 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
669 	/* is_iwpriv_command : 0 indicates its a vendor command
670 	 * log_state: 0 indicates pktlog disable command
671 	 * vendor_cmd_send flag; false means no vendor pktlog enable
672 	 * command was sent previously
673 	 */
674 	if (is_iwpriv_command == 0 && log_state == 0 &&
675 	    pl_dev->vendor_cmd_send == false) {
676 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
677 		qdf_print("%s: pktlog operation not in progress", __func__);
678 		return 0;
679 	}
680 
681 	if (!pl_dev->tgt_pktlog_alloced) {
682 		if (!pl_info->buf) {
683 			error = pktlog_alloc_buf(scn);
684 
685 			if (error != 0) {
686 				pl_info->curr_pkt_state =
687 					PKTLOG_OPR_NOT_IN_PROGRESS;
688 				qdf_print("%s: pktlog buff alloc failed",
689 					  __func__);
690 				return -ENOMEM;
691 			}
692 
693 			if (!pl_info->buf) {
694 				pl_info->curr_pkt_state =
695 					PKTLOG_OPR_NOT_IN_PROGRESS;
696 				qdf_print("%s: pktlog buf alloc failed",
697 					  __func__);
698 				ASSERT(0);
699 				return -ENOMEM;
700 			}
701 
702 		}
703 
704 		qdf_spin_lock_bh(&pl_info->log_lock);
705 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
706 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
707 		pl_info->buf->wr_offset = 0;
708 		pl_info->buf->rd_offset = -1;
709 		/* These below variables are used by per packet stats*/
710 		pl_info->buf->bytes_written = 0;
711 		pl_info->buf->msg_index = 1;
712 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
713 		qdf_spin_unlock_bh(&pl_info->log_lock);
714 
715 		pl_info->start_time_thruput = os_get_timestamp();
716 		pl_info->start_time_per = pl_info->start_time_thruput;
717 
718 		pl_dev->tgt_pktlog_alloced = true;
719 	}
720 	if (log_state != 0) {
721 		/* WDI subscribe */
722 		if (!pl_dev->is_pktlog_cb_subscribed) {
723 			error = wdi_pktlog_subscribe(pdev_id, log_state);
724 			if (error) {
725 				pl_info->curr_pkt_state =
726 						PKTLOG_OPR_NOT_IN_PROGRESS;
727 				qdf_print("Unable to subscribe to the WDI %s",
728 					  __func__);
729 				return -EINVAL;
730 			}
731 		} else {
732 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
733 			qdf_print("Unable to subscribe %d to the WDI %s",
734 				  log_state, __func__);
735 			return -EINVAL;
736 		}
737 		/* WMI command to enable pktlog on the firmware */
738 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
739 				user_triggered)) {
740 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
741 			qdf_print("Device cannot be enabled, %s", __func__);
742 			return -EINVAL;
743 		}
744 		pl_dev->is_pktlog_cb_subscribed = true;
745 
746 		if (is_iwpriv_command == 0)
747 			pl_dev->vendor_cmd_send = true;
748 	} else {
749 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
750 		pl_dev->pl_funcs->pktlog_disable(scn);
751 		if (is_iwpriv_command == 0)
752 			pl_dev->vendor_cmd_send = false;
753 	}
754 
755 	pl_info->log_state = log_state;
756 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
757 	return 0;
758 }
759 
760 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
761 		 bool ini_triggered, uint8_t user_triggered,
762 		 uint32_t is_iwpriv_command)
763 {
764 	struct pktlog_dev_t *pl_dev;
765 	struct ath_pktlog_info *pl_info;
766 	int err;
767 
768 	pl_dev = get_pktlog_handle();
769 
770 	if (!pl_dev) {
771 		qdf_print("%s: invalid pl_dev handle", __func__);
772 		return -EINVAL;
773 	}
774 
775 	pl_info = pl_dev->pl_info;
776 
777 	if (!pl_info) {
778 		qdf_print("%s: invalid pl_info handle", __func__);
779 		return -EINVAL;
780 	}
781 
782 	mutex_lock(&pl_info->pktlog_mutex);
783 	err = __pktlog_enable(scn, log_state, ini_triggered,
784 				user_triggered, is_iwpriv_command);
785 	mutex_unlock(&pl_info->pktlog_mutex);
786 	return err;
787 }
788 
789 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
790 {
791 	struct pktlog_dev_t *pl_dev;
792 	struct ath_pktlog_info *pl_info;
793 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
794 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
795 	uint32_t buff_size;
796 	uint32_t max_allowed_buff_size;
797 
798 	pl_dev = get_pktlog_handle();
799 
800 	if (!pl_dev) {
801 		qdf_print("%s: invalid pl_dev handle", __func__);
802 		return -EINVAL;
803 	}
804 
805 	pl_info = pl_dev->pl_info;
806 
807 	if (!pl_info) {
808 		qdf_print("%s: invalid pl_dev handle", __func__);
809 		return -EINVAL;
810 	}
811 
812 	if (pdev_id < 0) {
813 		qdf_print("%s: invalid pdev", __func__);
814 		return -EINVAL;
815 	}
816 
817 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
818 		qdf_print("%s: pktlog is not configured", __func__);
819 		return -EBUSY;
820 	}
821 
822 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
823 
824 	buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
825 	max_allowed_buff_size = (buff_size ? buff_size : ONE_MEGABYTE);
826 
827 	if (size < ONE_MEGABYTE || size > max_allowed_buff_size) {
828 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
829 			  __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
830 			  (max_allowed_buff_size / ONE_MEGABYTE));
831 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
832 		qdf_print("%s: Invalid requested buff size", __func__);
833 		return -EINVAL;
834 	}
835 
836 	if (size == pl_info->buf_size) {
837 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
838 		qdf_print("%s: Pktlog Buff Size is already of same size.",
839 			  __func__);
840 		return 0;
841 	}
842 
843 	if (pl_info->log_state) {
844 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
845 		qdf_print("%s: Logging should be disabled before changing"
846 			  "buffer size.", __func__);
847 		return -EINVAL;
848 	}
849 
850 	qdf_spin_lock_bh(&pl_info->log_lock);
851 	if (pl_info->buf) {
852 		if (pl_dev->is_pktlog_cb_subscribed &&
853 			wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
854 			pl_info->curr_pkt_state =
855 				PKTLOG_OPR_NOT_IN_PROGRESS;
856 			qdf_spin_unlock_bh(&pl_info->log_lock);
857 			qdf_print("Cannot unsubscribe pktlog from the WDI");
858 			return -EFAULT;
859 		}
860 		pktlog_release_buf(scn);
861 		pl_dev->is_pktlog_cb_subscribed = false;
862 		pl_dev->tgt_pktlog_alloced = false;
863 	}
864 
865 	if (size != 0) {
866 		qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
867 		pl_info->buf_size = size;
868 	}
869 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
870 	qdf_spin_unlock_bh(&pl_info->log_lock);
871 	return 0;
872 }
873 
874 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
875 {
876 	struct pktlog_dev_t *pl_dev;
877 	struct ath_pktlog_info *pl_info;
878 	int status;
879 
880 	pl_dev = get_pktlog_handle();
881 
882 	if (!pl_dev) {
883 		qdf_print("%s: invalid pl_dev handle", __func__);
884 		return -EINVAL;
885 	}
886 
887 	pl_info = pl_dev->pl_info;
888 
889 	if (!pl_info) {
890 		qdf_print("%s: invalid pl_dev handle", __func__);
891 		return -EINVAL;
892 	}
893 
894 	mutex_lock(&pl_info->pktlog_mutex);
895 	status = __pktlog_setsize(scn, size);
896 	mutex_unlock(&pl_info->pktlog_mutex);
897 
898 	return status;
899 }
900 
901 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
902 {
903 	struct pktlog_dev_t *pl_dev;
904 	struct ath_pktlog_info *pl_info;
905 	uint8_t save_pktlog_state;
906 
907 	pl_dev = get_pktlog_handle();
908 
909 	if (!pl_dev) {
910 		qdf_print("%s: invalid pl_dev handle", __func__);
911 		return -EINVAL;
912 	}
913 
914 	pl_info = pl_dev->pl_info;
915 
916 	if (!pl_info) {
917 		qdf_print("%s: invalid pl_dev handle", __func__);
918 		return -EINVAL;
919 	}
920 
921 	if (!clear_buff)
922 		return -EINVAL;
923 
924 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
925 	    pl_info->curr_pkt_state ==
926 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
927 		return -EBUSY;
928 
929 	save_pktlog_state = pl_info->curr_pkt_state;
930 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
931 
932 	if (pl_info->log_state) {
933 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
934 		qdf_print("%s: Logging should be disabled before clearing "
935 			  "pktlog buffer.", __func__);
936 		return -EINVAL;
937 	}
938 
939 	if (pl_info->buf) {
940 		if (pl_info->buf_size > 0) {
941 			qdf_debug("pktlog buffer is cleared");
942 			memset(pl_info->buf, 0, pl_info->buf_size);
943 			pl_dev->is_pktlog_cb_subscribed = false;
944 			pl_dev->tgt_pktlog_alloced = false;
945 			pl_info->buf->rd_offset = -1;
946 		} else {
947 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
948 			qdf_print("%s: pktlog buffer size is not proper. "
949 				  "Existing Buf size %d", __func__,
950 				  pl_info->buf_size);
951 			return -EFAULT;
952 		}
953 	} else {
954 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
955 		qdf_print("%s: pktlog buff is NULL", __func__);
956 		return -EFAULT;
957 	}
958 
959 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
960 		pl_info->curr_pkt_state =
961 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
962 	else
963 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
964 
965 	return 0;
966 }
967 
968 void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *buff, uint32_t len)
969 {
970 	uint32_t *pl_hdr;
971 	uint32_t log_type;
972 	struct ol_fw_data pl_fw_data;
973 
974 	if (pdev_id == OL_TXRX_INVALID_PDEV_ID) {
975 		qdf_print("%s: txrx pdev_id is invalid", __func__);
976 		return;
977 	}
978 	pl_hdr = buff;
979 	pl_fw_data.data = pl_hdr;
980 	pl_fw_data.len = len;
981 
982 	log_type =
983 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
984 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
985 
986 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
987 		|| (log_type == PKTLOG_TYPE_TX_STAT)
988 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
989 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
990 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
991 		wdi_event_handler(WDI_EVENT_TX_STATUS,
992 				  pdev_id, &pl_fw_data);
993 	else if (log_type == PKTLOG_TYPE_RC_FIND)
994 		wdi_event_handler(WDI_EVENT_RATE_FIND,
995 				  pdev_id, &pl_fw_data);
996 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
997 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
998 				  pdev_id, &pl_fw_data);
999 	else if (log_type == PKTLOG_TYPE_RX_STAT)
1000 		wdi_event_handler(WDI_EVENT_RX_DESC,
1001 				  pdev_id, &pl_fw_data);
1002 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
1003 		wdi_event_handler(WDI_EVENT_SW_EVENT,
1004 				  pdev_id, &pl_fw_data);
1005 }
1006 
1007 #if defined(QCA_WIFI_3_0_ADRASTEA)
1008 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
1009 {
1010 	int rc = 0; /* sane */
1011 
1012 	if ((!nbuf) ||
1013 	    (nbuf->data < nbuf->head) ||
1014 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
1015 		rc = -EINVAL;
1016 
1017 	return rc;
1018 }
1019 /**
1020  * pktlog_t2h_msg_handler() - Target to host message handler
1021  * @context: pdev context
1022  * @pkt: HTC packet
1023  *
1024  * Return: None
1025  */
1026 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
1027 {
1028 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
1029 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
1030 	uint32_t *msg_word;
1031 	uint32_t msg_len;
1032 
1033 	/* check for sanity of the packet, have seen corrupted pkts */
1034 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
1035 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
1036 			  __func__, pktlog_t2h_msg);
1037 		/* do not free; may crash! */
1038 		QDF_ASSERT(0);
1039 		return;
1040 	}
1041 
1042 	/* check for successful message reception */
1043 	if (pkt->Status != QDF_STATUS_SUCCESS) {
1044 		if (pkt->Status != QDF_STATUS_E_CANCELED)
1045 			pdev->htc_err_cnt++;
1046 		qdf_nbuf_free(pktlog_t2h_msg);
1047 		return;
1048 	}
1049 
1050 	/* confirm alignment */
1051 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
1052 
1053 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
1054 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
1055 	pktlog_process_fw_msg(pdev->pdev_id, msg_word, msg_len);
1056 
1057 	qdf_nbuf_free(pktlog_t2h_msg);
1058 }
1059 
1060 /**
1061  * pktlog_tx_resume_handler() - resume callback
1062  * @context: pdev context
1063  *
1064  * Return: None
1065  */
1066 static void pktlog_tx_resume_handler(void *context)
1067 {
1068 	qdf_print("%s: Not expected", __func__);
1069 	qdf_assert(0);
1070 }
1071 
1072 /**
1073  * pktlog_h2t_send_complete() - send complete indication
1074  * @context: pdev context
1075  * @htc_pkt: HTC packet
1076  *
1077  * Return: None
1078  */
1079 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
1080 {
1081 	qdf_print("%s: Not expected", __func__);
1082 	qdf_assert(0);
1083 }
1084 
1085 /**
1086  * pktlog_h2t_full() - queue full indication
1087  * @context: pdev context
1088  * @pkt: HTC packet
1089  *
1090  * Return: HTC action
1091  */
1092 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
1093 {
1094 	return HTC_SEND_FULL_KEEP;
1095 }
1096 
1097 /**
1098  * pktlog_htc_connect_service() - create new endpoint for packetlog
1099  * @pdev - pktlog pdev
1100  *
1101  * Return: 0 for success/failure
1102  */
1103 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1104 {
1105 	struct htc_service_connect_req connect;
1106 	struct htc_service_connect_resp response;
1107 	QDF_STATUS status;
1108 
1109 	qdf_mem_zero(&connect, sizeof(connect));
1110 	qdf_mem_zero(&response, sizeof(response));
1111 
1112 	connect.pMetaData = NULL;
1113 	connect.MetaDataLength = 0;
1114 	connect.EpCallbacks.pContext = pdev;
1115 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1116 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1117 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1118 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1119 
1120 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1121 	connect.EpCallbacks.EpRecvRefill = NULL;
1122 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1123 	/* N/A, fill is done by HIF */
1124 
1125 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1126 	/*
1127 	 * Specify how deep to let a queue get before htc_send_pkt will
1128 	 * call the EpSendFull function due to excessive send queue depth.
1129 	 */
1130 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1131 
1132 	/* disable flow control for HTT data message service */
1133 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1134 
1135 	/* connect to control service */
1136 	connect.service_id = PACKET_LOG_SVC;
1137 
1138 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1139 
1140 	if (status != QDF_STATUS_SUCCESS) {
1141 		pdev->mt_pktlog_enabled = false;
1142 		return -EIO;       /* failure */
1143 	}
1144 
1145 	pdev->htc_endpoint = response.Endpoint;
1146 	pdev->mt_pktlog_enabled = true;
1147 
1148 	return 0;               /* success */
1149 }
1150 
1151 /**
1152  * pktlog_htc_attach() - attach pktlog HTC service
1153  *
1154  * Return: 0 for success/failure
1155  */
1156 int pktlog_htc_attach(void)
1157 {
1158 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1159 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1160 
1161 	if ((!pl_pdev) || (!htc_pdev)) {
1162 		qdf_print("Invalid pl_dev or htc_pdev handle");
1163 		return -EINVAL;
1164 	}
1165 
1166 	pl_pdev->htc_pdev = htc_pdev;
1167 	return pktlog_htc_connect_service(pl_pdev);
1168 }
1169 #else
1170 int pktlog_htc_attach(void)
1171 {
1172 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1173 
1174 	if (!pl_dev) {
1175 		qdf_print("Invalid pl_dev handle");
1176 		return -EINVAL;
1177 	}
1178 
1179 	pl_dev->mt_pktlog_enabled = false;
1180 	return 0;
1181 }
1182 #endif
1183 #endif /* REMOVE_PKT_LOG */
1184