xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 
45 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
46 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
47 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
48 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
49 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
50 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
53 wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
54 
55 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
56 	.pktlog_init = pktlog_init,
57 	.pktlog_enable = pktlog_enable,
58 	.pktlog_setsize = pktlog_setsize,
59 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
60 };
61 
62 struct pktlog_dev_t pl_dev = {
63 	.pl_funcs = &ol_pl_funcs,
64 };
65 
66 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
67 		     struct hif_opaque_softc *scn)
68 {
69 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
70 	*pl_handle = &pl_dev;
71 }
72 
73 void pktlog_set_callback_regtype(
74 		enum pktlog_callback_regtype callback_type)
75 {
76 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
77 
78 	if (!pl_dev) {
79 		qdf_print("Invalid pl_dev");
80 		return;
81 	}
82 
83 	pl_dev->callback_type = callback_type;
84 }
85 
86 struct pktlog_dev_t *get_pktlog_handle(void)
87 {
88 	struct cdp_pdev *pdev_txrx_handle =
89 				cds_get_context(QDF_MODULE_ID_TXRX);
90 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
91 
92 	return cdp_get_pldev(soc, pdev_txrx_handle);
93 }
94 
95 /*
96  * Get current txrx context
97  */
98 void *get_txrx_context(void)
99 {
100 	return cds_get_context(QDF_MODULE_ID_TXRX);
101 }
102 
103 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
104 				    WMI_CMD_ID cmd_id, bool ini_triggered,
105 				    uint8_t user_triggered)
106 {
107 	struct scheduler_msg msg = { 0 };
108 	QDF_STATUS status;
109 	struct ath_pktlog_wmi_params *param;
110 
111 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
112 
113 	if (!param)
114 		return A_NO_MEMORY;
115 
116 	param->cmd_id = cmd_id;
117 	param->pktlog_event = event_types;
118 	param->ini_triggered = ini_triggered;
119 	param->user_triggered = user_triggered;
120 
121 	msg.type = WMA_PKTLOG_ENABLE_REQ;
122 	msg.bodyptr = param;
123 	msg.bodyval = 0;
124 
125 	status = scheduler_post_message(QDF_MODULE_ID_WMA,
126 					QDF_MODULE_ID_WMA,
127 					QDF_MODULE_ID_WMA, &msg);
128 
129 	if (status != QDF_STATUS_SUCCESS) {
130 		qdf_mem_free(param);
131 		return A_ERROR;
132 	}
133 
134 	return A_OK;
135 }
136 
137 static inline A_STATUS
138 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
139 		 bool ini_triggered, uint8_t user_triggered)
140 {
141 	uint32_t types = 0;
142 
143 	if (log_state & ATH_PKTLOG_TX)
144 		types |= WMI_PKTLOG_EVENT_TX;
145 
146 	if (log_state & ATH_PKTLOG_RX)
147 		types |= WMI_PKTLOG_EVENT_RX;
148 
149 	if (log_state & ATH_PKTLOG_RCFIND)
150 		types |= WMI_PKTLOG_EVENT_RCF;
151 
152 	if (log_state & ATH_PKTLOG_RCUPDATE)
153 		types |= WMI_PKTLOG_EVENT_RCU;
154 
155 	if (log_state & ATH_PKTLOG_SW_EVENT)
156 		types |= WMI_PKTLOG_EVENT_SW;
157 
158 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
159 		  "%s: Pktlog events: %d", __func__, types);
160 
161 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
162 				   ini_triggered, user_triggered);
163 }
164 
165 #ifdef HELIUMPLUS
166 /**
167  * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
168  * @cdp_pdev: abstract pdev handle
169  * @log_state: Pktlog registration
170  *
171  * Return: zero on success, non-zero on failure
172  */
173 static inline A_STATUS
174 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
175 {
176 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
177 
178 	if (!cdp_pdev) {
179 		qdf_print("Invalid pdev in %s", __func__);
180 		return A_ERROR;
181 	}
182 
183 	if (log_state & ATH_PKTLOG_TX) {
184 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
185 				WDI_EVENT_TX_STATUS)) {
186 			return A_ERROR;
187 		}
188 	}
189 	if (log_state & ATH_PKTLOG_RX) {
190 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
191 				WDI_EVENT_RX_DESC)) {
192 			return A_ERROR;
193 		}
194 		if (cdp_wdi_event_sub(soc, cdp_pdev,
195 				&PKTLOG_RX_REMOTE_SUBSCRIBER,
196 				WDI_EVENT_RX_DESC_REMOTE)) {
197 			return A_ERROR;
198 		}
199 	}
200 	if (log_state & ATH_PKTLOG_RCFIND) {
201 		if (cdp_wdi_event_sub(soc, cdp_pdev,
202 				  &PKTLOG_RCFIND_SUBSCRIBER,
203 				  WDI_EVENT_RATE_FIND)) {
204 			return A_ERROR;
205 		}
206 	}
207 	if (log_state & ATH_PKTLOG_RCUPDATE) {
208 		if (cdp_wdi_event_sub(soc, cdp_pdev,
209 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
210 				  WDI_EVENT_RATE_UPDATE)) {
211 			return A_ERROR;
212 		}
213 	}
214 	if (log_state & ATH_PKTLOG_SW_EVENT) {
215 		if (cdp_wdi_event_sub(soc, cdp_pdev,
216 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
217 				  WDI_EVENT_SW_EVENT)) {
218 			return A_ERROR;
219 		}
220 	}
221 
222 	return A_OK;
223 }
224 #else
225 static inline A_STATUS
226 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
227 {
228 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
229 
230 	if (!cdp_pdev) {
231 		qdf_print("Invalid pdev in %s", __func__);
232 		return A_ERROR;
233 	}
234 
235 	if ((log_state & ATH_PKTLOG_TX) ||
236 	    (log_state  & ATH_PKTLOG_RCFIND) ||
237 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
238 	    (log_state & ATH_PKTLOG_SW_EVENT)) {
239 		if (cdp_wdi_event_sub(soc,
240 				      cdp_pdev,
241 				      &PKTLOG_OFFLOAD_SUBSCRIBER,
242 				      WDI_EVENT_OFFLOAD_ALL)) {
243 			return A_ERROR;
244 		}
245 	}
246 
247 	if (log_state & ATH_PKTLOG_RX) {
248 		if (cdp_wdi_event_sub(soc, cdp_pdev,
249 					&PKTLOG_RX_SUBSCRIBER,
250 					WDI_EVENT_RX_DESC)) {
251 			return A_ERROR;
252 		}
253 	}
254 
255 	if (log_state & ATH_PKTLOG_SW_EVENT) {
256 		if (cdp_wdi_event_sub(soc, cdp_pdev,
257 				      &PKTLOG_SW_EVENT_SUBSCRIBER,
258 				      WDI_EVENT_SW_EVENT)) {
259 			return A_ERROR;
260 		}
261 	}
262 
263 	if (log_state & ATH_PKTLOG_LITE_T2H) {
264 		if (cdp_wdi_event_sub(soc, cdp_pdev,
265 				      &PKTLOG_LITE_T2H_SUBSCRIBER,
266 				      WDI_EVENT_LITE_T2H)) {
267 			return A_ERROR;
268 		}
269 	}
270 
271 	if (log_state & ATH_PKTLOG_LITE_RX) {
272 		if (cdp_wdi_event_sub(soc, cdp_pdev,
273 				      &PKTLOG_LITE_RX_SUBSCRIBER,
274 				      WDI_EVENT_LITE_RX)) {
275 			return A_ERROR;
276 		}
277 	}
278 
279 	return A_OK;
280 }
281 #endif
282 
283 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
284 		u_int16_t peer_id, uint32_t status)
285 {
286 	switch (event) {
287 	case WDI_EVENT_OFFLOAD_ALL:
288 	{
289 		if (process_offload_pktlog(pdev, log_data)) {
290 			qdf_print("Unable to process offload info");
291 			return;
292 		}
293 		break;
294 	}
295 	case WDI_EVENT_TX_STATUS:
296 	{
297 		/*
298 		 * process TX message
299 		 */
300 		if (process_tx_info(pdev, log_data)) {
301 			qdf_print("Unable to process TX info");
302 			return;
303 		}
304 		break;
305 	}
306 	case WDI_EVENT_RX_DESC:
307 	{
308 		/*
309 		 * process RX message for local frames
310 		 */
311 		if (process_rx_info(pdev, log_data)) {
312 			qdf_print("Unable to process RX info");
313 			return;
314 		}
315 		break;
316 	}
317 	case WDI_EVENT_RX_DESC_REMOTE:
318 	{
319 		/*
320 		 * process RX message for remote frames
321 		 */
322 		if (process_rx_info_remote(pdev, log_data)) {
323 			qdf_print("Unable to process RX info");
324 			return;
325 		}
326 		break;
327 	}
328 	case WDI_EVENT_RATE_FIND:
329 	{
330 		/*
331 		 * process RATE_FIND message
332 		 */
333 		if (process_rate_find(pdev, log_data)) {
334 			qdf_print("Unable to process RC_FIND info");
335 			return;
336 		}
337 		break;
338 	}
339 	case WDI_EVENT_RATE_UPDATE:
340 	{
341 		/*
342 		 * process RATE_UPDATE message
343 		 */
344 		if (process_rate_update(pdev, log_data)) {
345 			qdf_print("Unable to process RC_UPDATE");
346 			return;
347 		}
348 		break;
349 	}
350 	case WDI_EVENT_SW_EVENT:
351 	{
352 		/*
353 		 * process SW EVENT message
354 		 */
355 		if (process_sw_event(pdev, log_data)) {
356 			qdf_print("Unable to process SW_EVENT");
357 			return;
358 		}
359 		break;
360 	}
361 	default:
362 		break;
363 	}
364 }
365 
366 void
367 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
368 			u_int16_t peer_id, uint32_t status)
369 {
370 	switch (event) {
371 	case WDI_EVENT_RX_DESC:
372 	{
373 		if (process_rx_desc_remote(context, log_data)) {
374 			qdf_print("Unable to process RX info");
375 			return;
376 		}
377 		break;
378 	}
379 	case WDI_EVENT_LITE_T2H:
380 	{
381 		if (process_pktlog_lite(context, log_data,
382 					PKTLOG_TYPE_LITE_T2H)) {
383 			qdf_print("Unable to process lite_t2h");
384 			return;
385 		}
386 		break;
387 	}
388 	case WDI_EVENT_LITE_RX:
389 	{
390 		if (process_pktlog_lite(context, log_data,
391 					PKTLOG_TYPE_LITE_RX)) {
392 			qdf_print("Unable to process lite_rx");
393 			return;
394 		}
395 		break;
396 	}
397 	default:
398 		break;
399 	}
400 }
401 
402 #ifdef HELIUMPLUS
403 /**
404  * wdi_pktlog_unsubscribe() - Unsubscribe pktlog callbacks
405  * @cdp_pdev: abstract pdev handle
406  * @log_state: Pktlog registration
407  *
408  * Return: zero on success, non-zero on failure
409  */
410 A_STATUS
411 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
412 {
413 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
414 	/* TODO: WIN implementation to get soc */
415 
416 	if (log_state & ATH_PKTLOG_TX) {
417 		if (cdp_wdi_event_unsub(soc, pdev,
418 				    &PKTLOG_TX_SUBSCRIBER,
419 				    WDI_EVENT_TX_STATUS)) {
420 			return A_ERROR;
421 		}
422 	}
423 	if (log_state & ATH_PKTLOG_RX) {
424 		if (cdp_wdi_event_unsub(soc, pdev,
425 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
426 			return A_ERROR;
427 		}
428 		if (cdp_wdi_event_unsub(soc, pdev,
429 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
430 				    WDI_EVENT_RX_DESC_REMOTE)) {
431 			return A_ERROR;
432 		}
433 	}
434 
435 	if (log_state & ATH_PKTLOG_RCFIND) {
436 		if (cdp_wdi_event_unsub(soc, pdev,
437 				    &PKTLOG_RCFIND_SUBSCRIBER,
438 				    WDI_EVENT_RATE_FIND)) {
439 			return A_ERROR;
440 		}
441 	}
442 	if (log_state & ATH_PKTLOG_RCUPDATE) {
443 		if (cdp_wdi_event_unsub(soc, pdev,
444 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
445 				    WDI_EVENT_RATE_UPDATE)) {
446 			return A_ERROR;
447 		}
448 	}
449 	if (log_state & ATH_PKTLOG_RCUPDATE) {
450 		if (cdp_wdi_event_unsub(soc, pdev,
451 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
452 				    WDI_EVENT_SW_EVENT)) {
453 			return A_ERROR;
454 		}
455 	}
456 
457 	return A_OK;
458 }
459 #else
460 A_STATUS
461 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
462 {
463 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
464 
465 	if ((log_state & ATH_PKTLOG_TX) ||
466 	    (log_state  & ATH_PKTLOG_RCFIND) ||
467 	    (log_state & ATH_PKTLOG_RCUPDATE) ||
468 	    (log_state & ATH_PKTLOG_SW_EVENT)) {
469 		if (cdp_wdi_event_unsub(soc,
470 					pdev,
471 					&PKTLOG_OFFLOAD_SUBSCRIBER,
472 					WDI_EVENT_OFFLOAD_ALL)) {
473 			return A_ERROR;
474 		}
475 	}
476 	if (log_state & ATH_PKTLOG_RX) {
477 		if (cdp_wdi_event_unsub(soc, pdev,
478 					&PKTLOG_RX_SUBSCRIBER,
479 					WDI_EVENT_RX_DESC)) {
480 			return A_ERROR;
481 		}
482 	}
483 	if (log_state & ATH_PKTLOG_LITE_T2H) {
484 		if (cdp_wdi_event_unsub(soc, pdev,
485 					&PKTLOG_LITE_T2H_SUBSCRIBER,
486 					WDI_EVENT_LITE_T2H)) {
487 			return A_ERROR;
488 		}
489 	}
490 	if (log_state & ATH_PKTLOG_LITE_RX) {
491 		if (cdp_wdi_event_unsub(soc, pdev,
492 					&PKTLOG_LITE_RX_SUBSCRIBER,
493 					WDI_EVENT_LITE_RX)) {
494 			return A_ERROR;
495 		}
496 	}
497 
498 	return A_OK;
499 }
500 #endif
501 
502 int pktlog_disable(struct hif_opaque_softc *scn)
503 {
504 	struct pktlog_dev_t *pl_dev;
505 	struct ath_pktlog_info *pl_info;
506 	uint8_t save_pktlog_state;
507 	struct cdp_pdev *txrx_pdev = get_txrx_context();
508 
509 	pl_dev = get_pktlog_handle();
510 
511 	if (!pl_dev) {
512 		qdf_print("Invalid pl_dev");
513 		return -EINVAL;
514 	}
515 
516 	pl_info = pl_dev->pl_info;
517 
518 	if (!pl_dev->pl_info) {
519 		qdf_print("Invalid pl_info");
520 		return -EINVAL;
521 	}
522 
523 	if (!txrx_pdev) {
524 		qdf_print("Invalid cdp_pdev");
525 		return -EINVAL;
526 	}
527 
528 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
529 	    pl_info->curr_pkt_state ==
530 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
531 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
532 	    pl_info->curr_pkt_state ==
533 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
534 		return -EBUSY;
535 
536 	save_pktlog_state = pl_info->curr_pkt_state;
537 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
538 
539 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
540 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
541 		qdf_print("Failed to disable pktlog in target");
542 		return -EINVAL;
543 	}
544 
545 	if (pl_dev->is_pktlog_cb_subscribed &&
546 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
547 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
548 		qdf_print("Cannot unsubscribe pktlog from the WDI");
549 		return -EINVAL;
550 	}
551 	pl_dev->is_pktlog_cb_subscribed = false;
552 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
553 		pl_info->curr_pkt_state =
554 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
555 	else
556 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
557 	return 0;
558 }
559 
560 #ifdef HELIUMPLUS
561 /**
562  * pktlog_callback_registration() - Register pktlog handlers based on
563  *                                  on callback type
564  * @callback_type: pktlog full or lite registration
565  *
566  * Return: None
567  */
568 static void pktlog_callback_registration(uint8_t callback_type)
569 {
570 	if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
571 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
572 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
573 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
574 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
575 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
576 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
577 	}
578 }
579 #else
580 static void pktlog_callback_registration(uint8_t callback_type)
581 {
582 	if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
583 		PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback;
584 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
585 		PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
586 	} else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
587 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
588 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
589 	}
590 }
591 #endif
592 
593 void pktlog_init(struct hif_opaque_softc *scn)
594 {
595 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
596 	struct ath_pktlog_info *pl_info;
597 
598 	if (!pl_dev || !pl_dev->pl_info) {
599 		qdf_print("pl_dev or pl_info is invalid");
600 		return;
601 	}
602 
603 	pl_info = pl_dev->pl_info;
604 
605 	OS_MEMZERO(pl_info, sizeof(*pl_info));
606 	PKTLOG_LOCK_INIT(pl_info);
607 	mutex_init(&pl_info->pktlog_mutex);
608 
609 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
610 	pl_info->buf = NULL;
611 	pl_info->log_state = 0;
612 	pl_info->init_saved_state = 0;
613 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
614 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
615 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
616 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
617 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
618 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
619 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
620 	pl_info->pktlen = 0;
621 	pl_info->start_time_thruput = 0;
622 	pl_info->start_time_per = 0;
623 	pl_dev->vendor_cmd_send = false;
624 
625 	pktlog_callback_registration(pl_dev->callback_type);
626 }
627 
628 static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
629 		 bool ini_triggered, uint8_t user_triggered,
630 		 uint32_t is_iwpriv_command)
631 {
632 	struct pktlog_dev_t *pl_dev;
633 	struct ath_pktlog_info *pl_info;
634 	struct cdp_pdev *cdp_pdev;
635 	int error;
636 
637 	if (!scn) {
638 		qdf_print("%s: Invalid scn context", __func__);
639 		ASSERT(0);
640 		return -EINVAL;
641 	}
642 
643 	pl_dev = get_pktlog_handle();
644 	if (!pl_dev) {
645 		qdf_print("%s: Invalid pktlog context", __func__);
646 		ASSERT(0);
647 		return -EINVAL;
648 	}
649 
650 	cdp_pdev = get_txrx_context();
651 	if (!cdp_pdev) {
652 		qdf_print("%s: Invalid txrx context", __func__);
653 		ASSERT(0);
654 		return -EINVAL;
655 	}
656 
657 	pl_info = pl_dev->pl_info;
658 	if (!pl_info) {
659 		qdf_print("%s: Invalid pl_info context", __func__);
660 		ASSERT(0);
661 		return -EINVAL;
662 	}
663 
664 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
665 		return -EBUSY;
666 
667 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
668 	/* is_iwpriv_command : 0 indicates its a vendor command
669 	 * log_state: 0 indicates pktlog disable command
670 	 * vendor_cmd_send flag; false means no vendor pktlog enable
671 	 * command was sent previously
672 	 */
673 	if (is_iwpriv_command == 0 && log_state == 0 &&
674 	    pl_dev->vendor_cmd_send == false) {
675 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
676 		qdf_print("%s: pktlog operation not in progress", __func__);
677 		return 0;
678 	}
679 
680 	if (!pl_dev->tgt_pktlog_alloced) {
681 		if (!pl_info->buf) {
682 			error = pktlog_alloc_buf(scn);
683 
684 			if (error != 0) {
685 				pl_info->curr_pkt_state =
686 					PKTLOG_OPR_NOT_IN_PROGRESS;
687 				qdf_print("%s: pktlog buff alloc failed",
688 					  __func__);
689 				return -ENOMEM;
690 			}
691 
692 			if (!pl_info->buf) {
693 				pl_info->curr_pkt_state =
694 					PKTLOG_OPR_NOT_IN_PROGRESS;
695 				qdf_print("%s: pktlog buf alloc failed",
696 					  __func__);
697 				ASSERT(0);
698 				return -ENOMEM;
699 			}
700 
701 		}
702 
703 		qdf_spin_lock_bh(&pl_info->log_lock);
704 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
705 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
706 		pl_info->buf->wr_offset = 0;
707 		pl_info->buf->rd_offset = -1;
708 		/* These below variables are used by per packet stats*/
709 		pl_info->buf->bytes_written = 0;
710 		pl_info->buf->msg_index = 1;
711 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
712 		qdf_spin_unlock_bh(&pl_info->log_lock);
713 
714 		pl_info->start_time_thruput = os_get_timestamp();
715 		pl_info->start_time_per = pl_info->start_time_thruput;
716 
717 		pl_dev->tgt_pktlog_alloced = true;
718 	}
719 	if (log_state != 0) {
720 		/* WDI subscribe */
721 		if (!pl_dev->is_pktlog_cb_subscribed) {
722 			error = wdi_pktlog_subscribe(cdp_pdev, log_state);
723 			if (error) {
724 				pl_info->curr_pkt_state =
725 						PKTLOG_OPR_NOT_IN_PROGRESS;
726 				qdf_print("Unable to subscribe to the WDI %s",
727 					  __func__);
728 				return -EINVAL;
729 			}
730 		} else {
731 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
732 			qdf_print("Unable to subscribe %d to the WDI %s",
733 				  log_state, __func__);
734 			return -EINVAL;
735 		}
736 		/* WMI command to enable pktlog on the firmware */
737 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
738 				user_triggered)) {
739 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
740 			qdf_print("Device cannot be enabled, %s", __func__);
741 			return -EINVAL;
742 		}
743 		pl_dev->is_pktlog_cb_subscribed = true;
744 
745 		if (is_iwpriv_command == 0)
746 			pl_dev->vendor_cmd_send = true;
747 	} else {
748 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
749 		pl_dev->pl_funcs->pktlog_disable(scn);
750 		if (is_iwpriv_command == 0)
751 			pl_dev->vendor_cmd_send = false;
752 	}
753 
754 	pl_info->log_state = log_state;
755 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
756 	return 0;
757 }
758 
759 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
760 		 bool ini_triggered, uint8_t user_triggered,
761 		 uint32_t is_iwpriv_command)
762 {
763 	struct pktlog_dev_t *pl_dev;
764 	struct ath_pktlog_info *pl_info;
765 	int err;
766 
767 	pl_dev = get_pktlog_handle();
768 
769 	if (!pl_dev) {
770 		qdf_print("%s: invalid pl_dev handle", __func__);
771 		return -EINVAL;
772 	}
773 
774 	pl_info = pl_dev->pl_info;
775 
776 	if (!pl_info) {
777 		qdf_print("%s: invalid pl_info handle", __func__);
778 		return -EINVAL;
779 	}
780 
781 	mutex_lock(&pl_info->pktlog_mutex);
782 	err = __pktlog_enable(scn, log_state, ini_triggered,
783 				user_triggered, is_iwpriv_command);
784 	mutex_unlock(&pl_info->pktlog_mutex);
785 	return err;
786 }
787 
788 #define ONE_MEGABYTE (1024 * 1024)
789 #define MAX_ALLOWED_PKTLOG_SIZE (64 * ONE_MEGABYTE)
790 
791 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
792 {
793 	struct pktlog_dev_t *pl_dev;
794 	struct ath_pktlog_info *pl_info;
795 	struct cdp_pdev *pdev;
796 
797 	pl_dev = get_pktlog_handle();
798 
799 	if (!pl_dev) {
800 		qdf_print("%s: invalid pl_dev handle", __func__);
801 		return -EINVAL;
802 	}
803 
804 	pl_info = pl_dev->pl_info;
805 
806 	if (!pl_info) {
807 		qdf_print("%s: invalid pl_dev handle", __func__);
808 		return -EINVAL;
809 	}
810 
811 	pdev = get_txrx_context();
812 
813 	if (!pdev) {
814 		qdf_print("%s: invalid pdev handle", __func__);
815 		return -EINVAL;
816 	}
817 
818 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
819 		qdf_print("%s: pktlog is not configured", __func__);
820 		return -EBUSY;
821 	}
822 
823 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
824 
825 	if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
826 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
827 			  __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
828 			  (MAX_ALLOWED_PKTLOG_SIZE / ONE_MEGABYTE));
829 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
830 		qdf_print("%s: Invalid requested buff size", __func__);
831 		return -EINVAL;
832 	}
833 
834 	if (size == pl_info->buf_size) {
835 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
836 		qdf_print("%s: Pktlog Buff Size is already of same size.",
837 			  __func__);
838 		return 0;
839 	}
840 
841 	if (pl_info->log_state) {
842 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
843 		qdf_print("%s: Logging should be disabled before changing"
844 			  "buffer size.", __func__);
845 		return -EINVAL;
846 	}
847 
848 	qdf_spin_lock_bh(&pl_info->log_lock);
849 	if (pl_info->buf) {
850 		if (pl_dev->is_pktlog_cb_subscribed &&
851 			wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
852 			pl_info->curr_pkt_state =
853 				PKTLOG_OPR_NOT_IN_PROGRESS;
854 			qdf_spin_unlock_bh(&pl_info->log_lock);
855 			qdf_print("Cannot unsubscribe pktlog from the WDI");
856 			return -EFAULT;
857 		}
858 		pktlog_release_buf(scn);
859 		pl_dev->is_pktlog_cb_subscribed = false;
860 		pl_dev->tgt_pktlog_alloced = false;
861 	}
862 
863 	if (size != 0) {
864 		qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
865 		pl_info->buf_size = size;
866 	}
867 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
868 	qdf_spin_unlock_bh(&pl_info->log_lock);
869 	return 0;
870 }
871 
872 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
873 {
874 	struct pktlog_dev_t *pl_dev;
875 	struct ath_pktlog_info *pl_info;
876 	int status;
877 
878 	pl_dev = get_pktlog_handle();
879 
880 	if (!pl_dev) {
881 		qdf_print("%s: invalid pl_dev handle", __func__);
882 		return -EINVAL;
883 	}
884 
885 	pl_info = pl_dev->pl_info;
886 
887 	if (!pl_info) {
888 		qdf_print("%s: invalid pl_dev handle", __func__);
889 		return -EINVAL;
890 	}
891 
892 	mutex_lock(&pl_info->pktlog_mutex);
893 	status = __pktlog_setsize(scn, size);
894 	mutex_unlock(&pl_info->pktlog_mutex);
895 
896 	return status;
897 }
898 
899 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
900 {
901 	struct pktlog_dev_t *pl_dev;
902 	struct ath_pktlog_info *pl_info;
903 	uint8_t save_pktlog_state;
904 
905 	pl_dev = get_pktlog_handle();
906 
907 	if (!pl_dev) {
908 		qdf_print("%s: invalid pl_dev handle", __func__);
909 		return -EINVAL;
910 	}
911 
912 	pl_info = pl_dev->pl_info;
913 
914 	if (!pl_info) {
915 		qdf_print("%s: invalid pl_dev handle", __func__);
916 		return -EINVAL;
917 	}
918 
919 	if (!clear_buff)
920 		return -EINVAL;
921 
922 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
923 	    pl_info->curr_pkt_state ==
924 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
925 		return -EBUSY;
926 
927 	save_pktlog_state = pl_info->curr_pkt_state;
928 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
929 
930 	if (pl_info->log_state) {
931 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
932 		qdf_print("%s: Logging should be disabled before clearing "
933 			  "pktlog buffer.", __func__);
934 		return -EINVAL;
935 	}
936 
937 	if (pl_info->buf) {
938 		if (pl_info->buf_size > 0) {
939 			qdf_debug("pktlog buffer is cleared");
940 			memset(pl_info->buf, 0, pl_info->buf_size);
941 			pl_dev->is_pktlog_cb_subscribed = false;
942 			pl_dev->tgt_pktlog_alloced = false;
943 			pl_info->buf->rd_offset = -1;
944 		} else {
945 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
946 			qdf_print("%s: pktlog buffer size is not proper. "
947 				  "Existing Buf size %d", __func__,
948 				  pl_info->buf_size);
949 			return -EFAULT;
950 		}
951 	} else {
952 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
953 		qdf_print("%s: pktlog buff is NULL", __func__);
954 		return -EFAULT;
955 	}
956 
957 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
958 		pl_info->curr_pkt_state =
959 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
960 	else
961 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
962 
963 	return 0;
964 }
965 
966 /**
967  * pktlog_process_fw_msg() - process packetlog message
968  * @buff: buffer
969  *
970  * Return: None
971  */
972 void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
973 {
974 	uint32_t *pl_hdr;
975 	uint32_t log_type;
976 	struct cdp_pdev *pdev = get_txrx_context();
977 	struct ol_fw_data pl_fw_data;
978 
979 	if (!pdev) {
980 		qdf_print("%s: txrx_pdev is NULL", __func__);
981 		return;
982 	}
983 	pl_hdr = buff;
984 	pl_fw_data.data = pl_hdr;
985 	pl_fw_data.len = len;
986 
987 	log_type =
988 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
989 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
990 
991 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
992 		|| (log_type == PKTLOG_TYPE_TX_STAT)
993 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
994 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
995 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
996 		wdi_event_handler(WDI_EVENT_TX_STATUS,
997 				  pdev, &pl_fw_data);
998 	else if (log_type == PKTLOG_TYPE_RC_FIND)
999 		wdi_event_handler(WDI_EVENT_RATE_FIND,
1000 				  pdev, &pl_fw_data);
1001 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
1002 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
1003 				  pdev, &pl_fw_data);
1004 	else if (log_type == PKTLOG_TYPE_RX_STAT)
1005 		wdi_event_handler(WDI_EVENT_RX_DESC,
1006 				  pdev, &pl_fw_data);
1007 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
1008 		wdi_event_handler(WDI_EVENT_SW_EVENT,
1009 				  pdev, &pl_fw_data);
1010 }
1011 
1012 #if defined(QCA_WIFI_3_0_ADRASTEA)
1013 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
1014 {
1015 	int rc = 0; /* sane */
1016 
1017 	if ((!nbuf) ||
1018 	    (nbuf->data < nbuf->head) ||
1019 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
1020 		rc = -EINVAL;
1021 
1022 	return rc;
1023 }
1024 /**
1025  * pktlog_t2h_msg_handler() - Target to host message handler
1026  * @context: pdev context
1027  * @pkt: HTC packet
1028  *
1029  * Return: None
1030  */
1031 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
1032 {
1033 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
1034 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
1035 	uint32_t *msg_word;
1036 	uint32_t msg_len;
1037 
1038 	/* check for sanity of the packet, have seen corrupted pkts */
1039 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
1040 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
1041 			  __func__, pktlog_t2h_msg);
1042 		/* do not free; may crash! */
1043 		QDF_ASSERT(0);
1044 		return;
1045 	}
1046 
1047 	/* check for successful message reception */
1048 	if (pkt->Status != QDF_STATUS_SUCCESS) {
1049 		if (pkt->Status != QDF_STATUS_E_CANCELED)
1050 			pdev->htc_err_cnt++;
1051 		qdf_nbuf_free(pktlog_t2h_msg);
1052 		return;
1053 	}
1054 
1055 	/* confirm alignment */
1056 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
1057 
1058 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
1059 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
1060 	pktlog_process_fw_msg(msg_word, msg_len);
1061 
1062 	qdf_nbuf_free(pktlog_t2h_msg);
1063 }
1064 
1065 /**
1066  * pktlog_tx_resume_handler() - resume callback
1067  * @context: pdev context
1068  *
1069  * Return: None
1070  */
1071 static void pktlog_tx_resume_handler(void *context)
1072 {
1073 	qdf_print("%s: Not expected", __func__);
1074 	qdf_assert(0);
1075 }
1076 
1077 /**
1078  * pktlog_h2t_send_complete() - send complete indication
1079  * @context: pdev context
1080  * @htc_pkt: HTC packet
1081  *
1082  * Return: None
1083  */
1084 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
1085 {
1086 	qdf_print("%s: Not expected", __func__);
1087 	qdf_assert(0);
1088 }
1089 
1090 /**
1091  * pktlog_h2t_full() - queue full indication
1092  * @context: pdev context
1093  * @pkt: HTC packet
1094  *
1095  * Return: HTC action
1096  */
1097 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
1098 {
1099 	return HTC_SEND_FULL_KEEP;
1100 }
1101 
1102 /**
1103  * pktlog_htc_connect_service() - create new endpoint for packetlog
1104  * @pdev - pktlog pdev
1105  *
1106  * Return: 0 for success/failure
1107  */
1108 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1109 {
1110 	struct htc_service_connect_req connect;
1111 	struct htc_service_connect_resp response;
1112 	QDF_STATUS status;
1113 
1114 	qdf_mem_zero(&connect, sizeof(connect));
1115 	qdf_mem_zero(&response, sizeof(response));
1116 
1117 	connect.pMetaData = NULL;
1118 	connect.MetaDataLength = 0;
1119 	connect.EpCallbacks.pContext = pdev;
1120 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1121 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1122 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1123 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1124 
1125 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1126 	connect.EpCallbacks.EpRecvRefill = NULL;
1127 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1128 	/* N/A, fill is done by HIF */
1129 
1130 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1131 	/*
1132 	 * Specify how deep to let a queue get before htc_send_pkt will
1133 	 * call the EpSendFull function due to excessive send queue depth.
1134 	 */
1135 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1136 
1137 	/* disable flow control for HTT data message service */
1138 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1139 
1140 	/* connect to control service */
1141 	connect.service_id = PACKET_LOG_SVC;
1142 
1143 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1144 
1145 	if (status != QDF_STATUS_SUCCESS) {
1146 		pdev->mt_pktlog_enabled = false;
1147 		return -EIO;       /* failure */
1148 	}
1149 
1150 	pdev->htc_endpoint = response.Endpoint;
1151 	pdev->mt_pktlog_enabled = true;
1152 
1153 	return 0;               /* success */
1154 }
1155 
1156 /**
1157  * pktlog_htc_attach() - attach pktlog HTC service
1158  *
1159  * Return: 0 for success/failure
1160  */
1161 int pktlog_htc_attach(void)
1162 {
1163 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1164 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1165 
1166 	if ((!pl_pdev) || (!htc_pdev)) {
1167 		qdf_print("Invalid pl_dev or htc_pdev handle");
1168 		return -EINVAL;
1169 	}
1170 
1171 	pl_pdev->htc_pdev = htc_pdev;
1172 	return pktlog_htc_connect_service(pl_pdev);
1173 }
1174 #else
1175 int pktlog_htc_attach(void)
1176 {
1177 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1178 
1179 	if (!pl_dev) {
1180 		qdf_print("Invalid pl_dev handle");
1181 		return -EINVAL;
1182 	}
1183 
1184 	pl_dev->mt_pktlog_enabled = false;
1185 	return 0;
1186 }
1187 #endif
1188 #endif /* REMOVE_PKT_LOG */
1189