xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 
45 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
46 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
47 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
48 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
49 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
50 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
53 
54 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
55 	.pktlog_init = pktlog_init,
56 	.pktlog_enable = pktlog_enable,
57 	.pktlog_setsize = pktlog_setsize,
58 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
59 };
60 
61 struct pktlog_dev_t pl_dev = {
62 	.pl_funcs = &ol_pl_funcs,
63 };
64 
65 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
66 		     struct hif_opaque_softc *scn)
67 {
68 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
69 	*pl_handle = &pl_dev;
70 }
71 
72 void pktlog_set_callback_regtype(
73 		enum pktlog_callback_regtype callback_type)
74 {
75 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
76 
77 	if (!pl_dev) {
78 		qdf_print("Invalid pl_dev");
79 		return;
80 	}
81 
82 	pl_dev->callback_type = callback_type;
83 }
84 
85 #ifdef CONFIG_MCL
86 struct pktlog_dev_t *get_pktlog_handle(void)
87 {
88 	struct cdp_pdev *pdev_txrx_handle =
89 				cds_get_context(QDF_MODULE_ID_TXRX);
90 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
91 
92 	return cdp_get_pldev(soc, pdev_txrx_handle);
93 }
94 
95 /*
96  * Get current txrx context
97  */
98 void *get_txrx_context(void)
99 {
100 	return cds_get_context(QDF_MODULE_ID_TXRX);
101 }
102 
103 #else
104 /* TODO: Need to use WIN implementation to return pktlog_dev handle */
105 static inline struct pktlog_dev_t *get_pktlog_handle(void)
106 {
107 	return NULL;
108 }
109 static struct pktlog_dev_t *get_txrx_context(void) { }
110 #endif
111 
112 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
113 				    WMI_CMD_ID cmd_id, bool ini_triggered,
114 				    uint8_t user_triggered)
115 {
116 	struct scheduler_msg msg = { 0 };
117 	QDF_STATUS status;
118 	struct ath_pktlog_wmi_params *param;
119 
120 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
121 
122 	if (!param)
123 		return A_NO_MEMORY;
124 
125 	param->cmd_id = cmd_id;
126 	param->pktlog_event = event_types;
127 	param->ini_triggered = ini_triggered;
128 	param->user_triggered = user_triggered;
129 
130 	msg.type = WMA_PKTLOG_ENABLE_REQ;
131 	msg.bodyptr = param;
132 	msg.bodyval = 0;
133 
134 	status = scheduler_post_msg(QDF_MODULE_ID_WMA, &msg);
135 
136 	if (status != QDF_STATUS_SUCCESS) {
137 		qdf_mem_free(param);
138 		return A_ERROR;
139 	}
140 
141 	return A_OK;
142 }
143 
144 static inline A_STATUS
145 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
146 		 bool ini_triggered, uint8_t user_triggered)
147 {
148 	uint32_t types = 0;
149 
150 	if (log_state & ATH_PKTLOG_TX)
151 		types |= WMI_PKTLOG_EVENT_TX;
152 
153 	if (log_state & ATH_PKTLOG_RX)
154 		types |= WMI_PKTLOG_EVENT_RX;
155 
156 	if (log_state & ATH_PKTLOG_RCFIND)
157 		types |= WMI_PKTLOG_EVENT_RCF;
158 
159 	if (log_state & ATH_PKTLOG_RCUPDATE)
160 		types |= WMI_PKTLOG_EVENT_RCU;
161 
162 	if (log_state & ATH_PKTLOG_SW_EVENT)
163 		types |= WMI_PKTLOG_EVENT_SW;
164 
165 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
166 				   ini_triggered, user_triggered);
167 }
168 
169 static inline A_STATUS
170 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
171 {
172 #ifdef CONFIG_MCL
173 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
174 #else
175 	/*TODO: WIN implementation to get soc */
176 #endif
177 
178 	if (!cdp_pdev) {
179 		qdf_print("Invalid pdev in %s\n", __func__);
180 		return A_ERROR;
181 	}
182 
183 	if (log_state & ATH_PKTLOG_TX) {
184 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
185 				WDI_EVENT_TX_STATUS)) {
186 			return A_ERROR;
187 		}
188 	}
189 	if (log_state & ATH_PKTLOG_RX) {
190 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
191 				WDI_EVENT_RX_DESC)) {
192 			return A_ERROR;
193 		}
194 		if (cdp_wdi_event_sub(soc, cdp_pdev,
195 				&PKTLOG_RX_REMOTE_SUBSCRIBER,
196 				WDI_EVENT_RX_DESC_REMOTE)) {
197 			return A_ERROR;
198 		}
199 	}
200 	if (log_state & ATH_PKTLOG_RCFIND) {
201 		if (cdp_wdi_event_sub(soc, cdp_pdev,
202 				  &PKTLOG_RCFIND_SUBSCRIBER,
203 				  WDI_EVENT_RATE_FIND)) {
204 			return A_ERROR;
205 		}
206 	}
207 	if (log_state & ATH_PKTLOG_RCUPDATE) {
208 		if (cdp_wdi_event_sub(soc, cdp_pdev,
209 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
210 				  WDI_EVENT_RATE_UPDATE)) {
211 			return A_ERROR;
212 		}
213 	}
214 	if (log_state & ATH_PKTLOG_SW_EVENT) {
215 		if (cdp_wdi_event_sub(soc, cdp_pdev,
216 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
217 				  WDI_EVENT_SW_EVENT)) {
218 			return A_ERROR;
219 		}
220 	}
221 	if (log_state & ATH_PKTLOG_LITE_T2H) {
222 		if (cdp_wdi_event_sub(soc, cdp_pdev,
223 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
224 				  WDI_EVENT_LITE_T2H)) {
225 			return A_ERROR;
226 		}
227 	}
228 	if (log_state & ATH_PKTLOG_LITE_RX) {
229 		if (cdp_wdi_event_sub(soc, cdp_pdev,
230 				&PKTLOG_LITE_RX_SUBSCRIBER,
231 				WDI_EVENT_LITE_RX)) {
232 			return A_ERROR;
233 		}
234 	}
235 
236 	return A_OK;
237 }
238 
239 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
240 		u_int16_t peer_id, uint32_t status)
241 {
242 	switch (event) {
243 	case WDI_EVENT_TX_STATUS:
244 	{
245 		/*
246 		 * process TX message
247 		 */
248 		if (process_tx_info(pdev, log_data)) {
249 			qdf_print("Unable to process TX info\n");
250 			return;
251 		}
252 		break;
253 	}
254 	case WDI_EVENT_RX_DESC:
255 	{
256 		/*
257 		 * process RX message for local frames
258 		 */
259 		if (process_rx_info(pdev, log_data)) {
260 			qdf_print("Unable to process RX info\n");
261 			return;
262 		}
263 		break;
264 	}
265 	case WDI_EVENT_RX_DESC_REMOTE:
266 	{
267 		/*
268 		 * process RX message for remote frames
269 		 */
270 		if (process_rx_info_remote(pdev, log_data)) {
271 			qdf_print("Unable to process RX info\n");
272 			return;
273 		}
274 		break;
275 	}
276 	case WDI_EVENT_RATE_FIND:
277 	{
278 		/*
279 		 * process RATE_FIND message
280 		 */
281 		if (process_rate_find(pdev, log_data)) {
282 			qdf_print("Unable to process RC_FIND info\n");
283 			return;
284 		}
285 		break;
286 	}
287 	case WDI_EVENT_RATE_UPDATE:
288 	{
289 		/*
290 		 * process RATE_UPDATE message
291 		 */
292 		if (process_rate_update(pdev, log_data)) {
293 			qdf_print("Unable to process RC_UPDATE\n");
294 			return;
295 		}
296 		break;
297 	}
298 	case WDI_EVENT_SW_EVENT:
299 	{
300 		/*
301 		 * process SW EVENT message
302 		 */
303 		if (process_sw_event(pdev, log_data)) {
304 			qdf_print("Unable to process SW_EVENT\n");
305 			return;
306 		}
307 		break;
308 	}
309 	default:
310 		break;
311 	}
312 }
313 
314 void
315 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
316 			u_int16_t peer_id, uint32_t status)
317 {
318 	switch (event) {
319 	case WDI_EVENT_RX_DESC:
320 	{
321 		if (process_rx_desc_remote(context, log_data)) {
322 			qdf_print("Unable to process RX info\n");
323 			return;
324 		}
325 		break;
326 	}
327 	case WDI_EVENT_LITE_T2H:
328 	{
329 		if (process_pktlog_lite(context, log_data,
330 					PKTLOG_TYPE_LITE_T2H)) {
331 			qdf_print("Unable to process lite_t2h\n");
332 			return;
333 		}
334 		break;
335 	}
336 	case WDI_EVENT_LITE_RX:
337 	{
338 		if (process_pktlog_lite(context, log_data,
339 					PKTLOG_TYPE_LITE_RX)) {
340 			qdf_print("Unable to process lite_rx\n");
341 			return;
342 		}
343 		break;
344 	}
345 	default:
346 		break;
347 	}
348 }
349 
350 A_STATUS
351 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
352 {
353 #ifdef CONFIG_MCL
354 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
355 #else
356 	/* TODO: WIN implementation to get soc */
357 #endif
358 
359 	if (log_state & ATH_PKTLOG_TX) {
360 		if (cdp_wdi_event_unsub(soc, pdev,
361 				    &PKTLOG_TX_SUBSCRIBER,
362 				    WDI_EVENT_TX_STATUS)) {
363 			return A_ERROR;
364 		}
365 	}
366 	if (log_state & ATH_PKTLOG_RX) {
367 		if (cdp_wdi_event_unsub(soc, pdev,
368 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
369 			return A_ERROR;
370 		}
371 		if (cdp_wdi_event_unsub(soc, pdev,
372 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
373 				    WDI_EVENT_RX_DESC_REMOTE)) {
374 			return A_ERROR;
375 		}
376 	}
377 	if (log_state & ATH_PKTLOG_RCFIND) {
378 		if (cdp_wdi_event_unsub(soc, pdev,
379 				    &PKTLOG_RCFIND_SUBSCRIBER,
380 				    WDI_EVENT_RATE_FIND)) {
381 			return A_ERROR;
382 		}
383 	}
384 	if (log_state & ATH_PKTLOG_RCUPDATE) {
385 		if (cdp_wdi_event_unsub(soc, pdev,
386 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
387 				    WDI_EVENT_RATE_UPDATE)) {
388 			return A_ERROR;
389 		}
390 	}
391 	if (log_state & ATH_PKTLOG_RCUPDATE) {
392 		if (cdp_wdi_event_unsub(soc, pdev,
393 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
394 				    WDI_EVENT_SW_EVENT)) {
395 			return A_ERROR;
396 		}
397 	}
398 	if (log_state & ATH_PKTLOG_LITE_T2H) {
399 		if (cdp_wdi_event_unsub(soc, pdev,
400 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
401 				  WDI_EVENT_LITE_T2H)) {
402 			return A_ERROR;
403 		}
404 	}
405 	if (log_state & ATH_PKTLOG_LITE_RX) {
406 		if (cdp_wdi_event_unsub(soc, pdev,
407 				&PKTLOG_LITE_RX_SUBSCRIBER,
408 				WDI_EVENT_LITE_RX)) {
409 			return A_ERROR;
410 		}
411 	}
412 
413 	return A_OK;
414 }
415 
416 int pktlog_disable(struct hif_opaque_softc *scn)
417 {
418 	struct pktlog_dev_t *pl_dev;
419 	struct ath_pktlog_info *pl_info;
420 	uint8_t save_pktlog_state;
421 	struct cdp_pdev *txrx_pdev = get_txrx_context();
422 
423 	pl_dev = get_pktlog_handle();
424 
425 	if (!pl_dev) {
426 		qdf_print("Invalid pl_dev");
427 		return -EINVAL;
428 	}
429 
430 	pl_info = pl_dev->pl_info;
431 
432 	if (!pl_dev->pl_info) {
433 		qdf_print("Invalid pl_info");
434 		return -EINVAL;
435 	}
436 
437 	if (!txrx_pdev) {
438 		qdf_print("Invalid cdp_pdev");
439 		return -EINVAL;
440 	}
441 
442 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
443 	    pl_info->curr_pkt_state ==
444 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
445 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
446 	    pl_info->curr_pkt_state ==
447 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
448 		return -EBUSY;
449 
450 	save_pktlog_state = pl_info->curr_pkt_state;
451 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
452 
453 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
454 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
455 		qdf_print("Failed to disable pktlog in target\n");
456 		return -EINVAL;
457 	}
458 
459 	if (pl_dev->is_pktlog_cb_subscribed &&
460 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
461 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
462 		qdf_print("Cannot unsubscribe pktlog from the WDI\n");
463 		return -EINVAL;
464 	}
465 	pl_dev->is_pktlog_cb_subscribed = false;
466 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
467 		pl_info->curr_pkt_state =
468 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
469 	else
470 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
471 	return 0;
472 }
473 
474 void pktlog_init(struct hif_opaque_softc *scn)
475 {
476 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
477 	struct ath_pktlog_info *pl_info;
478 
479 	if (pl_dev == NULL || pl_dev->pl_info == NULL) {
480 		qdf_print("pl_dev or pl_info is invalid\n");
481 		return;
482 	}
483 
484 	pl_info = pl_dev->pl_info;
485 
486 	OS_MEMZERO(pl_info, sizeof(*pl_info));
487 	PKTLOG_LOCK_INIT(pl_info);
488 	mutex_init(&pl_info->pktlog_mutex);
489 
490 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
491 	pl_info->buf = NULL;
492 	pl_info->log_state = 0;
493 	pl_info->init_saved_state = 0;
494 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
495 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
496 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
497 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
498 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
499 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
500 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
501 	pl_info->pktlen = 0;
502 	pl_info->start_time_thruput = 0;
503 	pl_info->start_time_per = 0;
504 	pl_dev->vendor_cmd_send = false;
505 
506 	if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
507 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
508 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
509 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
510 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
511 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
512 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
513 	} else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
514 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
515 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
516 	}
517 }
518 
519 static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
520 		 bool ini_triggered, uint8_t user_triggered,
521 		 uint32_t is_iwpriv_command)
522 {
523 	struct pktlog_dev_t *pl_dev;
524 	struct ath_pktlog_info *pl_info;
525 	struct cdp_pdev *cdp_pdev;
526 	int error;
527 
528 	if (!scn) {
529 		qdf_print("%s: Invalid scn context\n", __func__);
530 		ASSERT(0);
531 		return -EINVAL;
532 	}
533 
534 	pl_dev = get_pktlog_handle();
535 	if (!pl_dev) {
536 		qdf_print("%s: Invalid pktlog context\n", __func__);
537 		ASSERT(0);
538 		return -EINVAL;
539 	}
540 
541 	cdp_pdev = get_txrx_context();
542 	if (!cdp_pdev) {
543 		qdf_print("%s: Invalid txrx context\n", __func__);
544 		ASSERT(0);
545 		return -EINVAL;
546 	}
547 
548 	pl_info = pl_dev->pl_info;
549 	if (!pl_info) {
550 		qdf_print("%s: Invalid pl_info context\n", __func__);
551 		ASSERT(0);
552 		return -EINVAL;
553 	}
554 
555 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
556 		return -EBUSY;
557 
558 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
559 	/* is_iwpriv_command : 0 indicates its a vendor command
560 	 * log_state: 0 indicates pktlog disable command
561 	 * vendor_cmd_send flag; false means no vendor pktlog enable
562 	 * command was sent previously
563 	 */
564 	if (is_iwpriv_command == 0 && log_state == 0 &&
565 	    pl_dev->vendor_cmd_send == false) {
566 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
567 		qdf_print("%s: pktlog operation not in progress\n", __func__);
568 		return 0;
569 	}
570 
571 	if (!pl_dev->tgt_pktlog_alloced) {
572 		if (pl_info->buf == NULL) {
573 			error = pktlog_alloc_buf(scn);
574 
575 			if (error != 0) {
576 				pl_info->curr_pkt_state =
577 					PKTLOG_OPR_NOT_IN_PROGRESS;
578 				qdf_print("%s: pktlog buff alloc failed\n",
579 					__func__);
580 				return -ENOMEM;
581 			}
582 
583 			if (!pl_info->buf) {
584 				pl_info->curr_pkt_state =
585 					PKTLOG_OPR_NOT_IN_PROGRESS;
586 				qdf_print("%s: pktlog buf alloc failed\n",
587 				       __func__);
588 				ASSERT(0);
589 				return -ENOMEM;
590 			}
591 
592 		}
593 
594 		spin_lock_bh(&pl_info->log_lock);
595 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
596 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
597 		pl_info->buf->wr_offset = 0;
598 		pl_info->buf->rd_offset = -1;
599 		/* These below variables are used by per packet stats*/
600 		pl_info->buf->bytes_written = 0;
601 		pl_info->buf->msg_index = 1;
602 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
603 		spin_unlock_bh(&pl_info->log_lock);
604 
605 		pl_info->start_time_thruput = os_get_timestamp();
606 		pl_info->start_time_per = pl_info->start_time_thruput;
607 
608 		pl_dev->tgt_pktlog_alloced = true;
609 	}
610 	if (log_state != 0) {
611 		/* WDI subscribe */
612 		if (!pl_dev->is_pktlog_cb_subscribed) {
613 			error = wdi_pktlog_subscribe(cdp_pdev, log_state);
614 			if (error) {
615 				pl_info->curr_pkt_state =
616 						PKTLOG_OPR_NOT_IN_PROGRESS;
617 				qdf_print("Unable to subscribe to the WDI %s\n",
618 					__func__);
619 				return -EINVAL;
620 			}
621 		} else {
622 			qdf_print("Unable to subscribe %d to the WDI %s\n",
623 				  log_state, __func__);
624 			return -EINVAL;
625 		}
626 		/* WMI command to enable pktlog on the firmware */
627 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
628 				user_triggered)) {
629 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
630 			qdf_print("Device cannot be enabled, %s\n", __func__);
631 			return -EINVAL;
632 		}
633 		pl_dev->is_pktlog_cb_subscribed = true;
634 
635 		if (is_iwpriv_command == 0)
636 			pl_dev->vendor_cmd_send = true;
637 	} else {
638 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
639 		pl_dev->pl_funcs->pktlog_disable(scn);
640 		if (is_iwpriv_command == 0)
641 			pl_dev->vendor_cmd_send = false;
642 	}
643 
644 	pl_info->log_state = log_state;
645 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
646 	return 0;
647 }
648 
649 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
650 		 bool ini_triggered, uint8_t user_triggered,
651 		 uint32_t is_iwpriv_command)
652 {
653 	struct pktlog_dev_t *pl_dev;
654 	struct ath_pktlog_info *pl_info;
655 	int err;
656 
657 	pl_dev = get_pktlog_handle();
658 
659 	if (!pl_dev) {
660 		qdf_print("%s: invalid pl_dev handle", __func__);
661 		return -EINVAL;
662 	}
663 
664 	pl_info = pl_dev->pl_info;
665 
666 	if (!pl_info) {
667 		qdf_print("%s: invalid pl_info handle", __func__);
668 		return -EINVAL;
669 	}
670 
671 	mutex_lock(&pl_info->pktlog_mutex);
672 	err = __pktlog_enable(scn, log_state, ini_triggered,
673 				user_triggered, is_iwpriv_command);
674 	mutex_unlock(&pl_info->pktlog_mutex);
675 	return err;
676 }
677 
678 #define ONE_MEGABYTE (1024 * 1024)
679 #define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE)
680 
681 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
682 {
683 	struct pktlog_dev_t *pl_dev;
684 	struct ath_pktlog_info *pl_info;
685 	struct cdp_pdev *pdev;
686 
687 	pl_dev = get_pktlog_handle();
688 
689 	if (!pl_dev) {
690 		qdf_print("%s: invalid pl_dev handle", __func__);
691 		return -EINVAL;
692 	}
693 
694 	pl_info = pl_dev->pl_info;
695 
696 	if (!pl_info) {
697 		qdf_print("%s: invalid pl_dev handle", __func__);
698 		return -EINVAL;
699 	}
700 
701 	pdev = get_txrx_context();
702 
703 	if (!pdev) {
704 		qdf_print("%s: invalid pdev handle", __func__);
705 		return -EINVAL;
706 	}
707 
708 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
709 		qdf_print("%s: pktlog is not configured", __func__);
710 		return -EBUSY;
711 	}
712 
713 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
714 
715 	if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
716 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes."
717 			"Min required is %d MB and Max allowed is %d MB.\n",
718 			__func__, size, (ONE_MEGABYTE/ONE_MEGABYTE),
719 			(MAX_ALLOWED_PKTLOG_SIZE/ONE_MEGABYTE));
720 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
721 		qdf_print("%s: Invalid requested buff size", __func__);
722 		return -EINVAL;
723 	}
724 
725 	if (size == pl_info->buf_size) {
726 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
727 		qdf_print("%s: Pktlog Buff Size is already of same size.",
728 			  __func__);
729 		return 0;
730 	}
731 
732 	if (pl_info->log_state) {
733 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
734 		qdf_print("%s: Logging should be disabled before changing"
735 			  "buffer size.", __func__);
736 		return -EINVAL;
737 	}
738 
739 	spin_lock_bh(&pl_info->log_lock);
740 	if (pl_info->buf != NULL) {
741 		if (pl_dev->is_pktlog_cb_subscribed &&
742 			wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
743 			pl_info->curr_pkt_state =
744 				PKTLOG_OPR_NOT_IN_PROGRESS;
745 			qdf_print("Cannot unsubscribe pktlog from the WDI\n");
746 			spin_unlock_bh(&pl_info->log_lock);
747 			return -EFAULT;
748 		}
749 		pktlog_release_buf(scn);
750 		pl_dev->is_pktlog_cb_subscribed = false;
751 		pl_dev->tgt_pktlog_alloced = false;
752 	}
753 
754 	if (size != 0) {
755 		qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size);
756 		pl_info->buf_size = size;
757 	}
758 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
759 	spin_unlock_bh(&pl_info->log_lock);
760 	return 0;
761 }
762 
763 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
764 {
765 	struct pktlog_dev_t *pl_dev;
766 	struct ath_pktlog_info *pl_info;
767 	int status;
768 
769 	pl_dev = get_pktlog_handle();
770 
771 	if (!pl_dev) {
772 		qdf_print("%s: invalid pl_dev handle", __func__);
773 		return -EINVAL;
774 	}
775 
776 	pl_info = pl_dev->pl_info;
777 
778 	if (!pl_info) {
779 		qdf_print("%s: invalid pl_dev handle", __func__);
780 		return -EINVAL;
781 	}
782 
783 	mutex_lock(&pl_info->pktlog_mutex);
784 	status = __pktlog_setsize(scn, size);
785 	mutex_unlock(&pl_info->pktlog_mutex);
786 
787 	return status;
788 }
789 
790 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
791 {
792 	struct pktlog_dev_t *pl_dev;
793 	struct ath_pktlog_info *pl_info;
794 	uint8_t save_pktlog_state;
795 
796 	pl_dev = get_pktlog_handle();
797 
798 	if (!pl_dev) {
799 		qdf_print("%s: invalid pl_dev handle", __func__);
800 		return -EINVAL;
801 	}
802 
803 	pl_info = pl_dev->pl_info;
804 
805 	if (!pl_info) {
806 		qdf_print("%s: invalid pl_dev handle", __func__);
807 		return -EINVAL;
808 	}
809 
810 	if (!clear_buff)
811 		return -EINVAL;
812 
813 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
814 	    pl_info->curr_pkt_state ==
815 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
816 		return -EBUSY;
817 
818 	save_pktlog_state = pl_info->curr_pkt_state;
819 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
820 
821 	if (pl_info->log_state) {
822 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
823 		qdf_print("%s: Logging should be disabled before clearing "
824 			  "pktlog buffer.", __func__);
825 		return -EINVAL;
826 	}
827 
828 	if (pl_info->buf != NULL) {
829 		if (pl_info->buf_size > 0) {
830 			qdf_print("%s: pktlog buffer is cleared.", __func__);
831 			memset(pl_info->buf, 0, pl_info->buf_size);
832 			pl_dev->is_pktlog_cb_subscribed = false;
833 			pl_dev->tgt_pktlog_alloced = false;
834 			pl_info->buf->rd_offset = -1;
835 		} else {
836 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
837 			qdf_print("%s: pktlog buffer size is not proper. "
838 				  "Existing Buf size %d", __func__,
839 				  pl_info->buf_size);
840 			return -EFAULT;
841 		}
842 	} else {
843 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
844 		qdf_print("%s: pktlog buff is NULL", __func__);
845 		return -EFAULT;
846 	}
847 
848 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
849 		pl_info->curr_pkt_state =
850 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
851 	else
852 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
853 
854 	return 0;
855 }
856 
857 /**
858  * pktlog_process_fw_msg() - process packetlog message
859  * @buff: buffer
860  *
861  * Return: None
862  */
863 void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
864 {
865 	uint32_t *pl_hdr;
866 	uint32_t log_type;
867 	struct cdp_pdev *pdev = get_txrx_context();
868 	struct ol_fw_data pl_fw_data;
869 
870 	if (!pdev) {
871 		qdf_print("%s: txrx_pdev is NULL", __func__);
872 		return;
873 	}
874 	pl_hdr = buff;
875 	pl_fw_data.data = pl_hdr;
876 	pl_fw_data.len = len;
877 
878 	log_type =
879 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
880 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
881 
882 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
883 		|| (log_type == PKTLOG_TYPE_TX_STAT)
884 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
885 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
886 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
887 		wdi_event_handler(WDI_EVENT_TX_STATUS,
888 				  pdev, &pl_fw_data);
889 	else if (log_type == PKTLOG_TYPE_RC_FIND)
890 		wdi_event_handler(WDI_EVENT_RATE_FIND,
891 				  pdev, &pl_fw_data);
892 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
893 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
894 				  pdev, &pl_fw_data);
895 	else if (log_type == PKTLOG_TYPE_RX_STAT)
896 		wdi_event_handler(WDI_EVENT_RX_DESC,
897 				  pdev, &pl_fw_data);
898 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
899 		wdi_event_handler(WDI_EVENT_SW_EVENT,
900 				  pdev, &pl_fw_data);
901 }
902 
903 #if defined(QCA_WIFI_3_0_ADRASTEA)
904 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
905 {
906 	int rc = 0; /* sane */
907 
908 	if ((!nbuf) ||
909 	    (nbuf->data < nbuf->head) ||
910 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
911 		rc = -EINVAL;
912 
913 	return rc;
914 }
915 /**
916  * pktlog_t2h_msg_handler() - Target to host message handler
917  * @context: pdev context
918  * @pkt: HTC packet
919  *
920  * Return: None
921  */
922 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
923 {
924 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
925 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
926 	uint32_t *msg_word;
927 	uint32_t msg_len;
928 
929 	/* check for sanity of the packet, have seen corrupted pkts */
930 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
931 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
932 			  __func__, pktlog_t2h_msg);
933 		/* do not free; may crash! */
934 		QDF_ASSERT(0);
935 		return;
936 	}
937 
938 	/* check for successful message reception */
939 	if (pkt->Status != QDF_STATUS_SUCCESS) {
940 		if (pkt->Status != QDF_STATUS_E_CANCELED)
941 			pdev->htc_err_cnt++;
942 		qdf_nbuf_free(pktlog_t2h_msg);
943 		return;
944 	}
945 
946 	/* confirm alignment */
947 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
948 
949 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
950 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
951 	pktlog_process_fw_msg(msg_word, msg_len);
952 
953 	qdf_nbuf_free(pktlog_t2h_msg);
954 }
955 
956 /**
957  * pktlog_tx_resume_handler() - resume callback
958  * @context: pdev context
959  *
960  * Return: None
961  */
962 static void pktlog_tx_resume_handler(void *context)
963 {
964 	qdf_print("%s: Not expected", __func__);
965 	qdf_assert(0);
966 }
967 
968 /**
969  * pktlog_h2t_send_complete() - send complete indication
970  * @context: pdev context
971  * @htc_pkt: HTC packet
972  *
973  * Return: None
974  */
975 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
976 {
977 	qdf_print("%s: Not expected", __func__);
978 	qdf_assert(0);
979 }
980 
981 /**
982  * pktlog_h2t_full() - queue full indication
983  * @context: pdev context
984  * @pkt: HTC packet
985  *
986  * Return: HTC action
987  */
988 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
989 {
990 	return HTC_SEND_FULL_KEEP;
991 }
992 
993 /**
994  * pktlog_htc_connect_service() - create new endpoint for packetlog
995  * @pdev - pktlog pdev
996  *
997  * Return: 0 for success/failure
998  */
999 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1000 {
1001 	struct htc_service_connect_req connect;
1002 	struct htc_service_connect_resp response;
1003 	QDF_STATUS status;
1004 
1005 	qdf_mem_set(&connect, sizeof(connect), 0);
1006 	qdf_mem_set(&response, sizeof(response), 0);
1007 
1008 	connect.pMetaData = NULL;
1009 	connect.MetaDataLength = 0;
1010 	connect.EpCallbacks.pContext = pdev;
1011 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1012 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1013 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1014 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1015 
1016 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1017 	connect.EpCallbacks.EpRecvRefill = NULL;
1018 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1019 	/* N/A, fill is done by HIF */
1020 
1021 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1022 	/*
1023 	 * Specify how deep to let a queue get before htc_send_pkt will
1024 	 * call the EpSendFull function due to excessive send queue depth.
1025 	 */
1026 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1027 
1028 	/* disable flow control for HTT data message service */
1029 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1030 
1031 	/* connect to control service */
1032 	connect.service_id = PACKET_LOG_SVC;
1033 
1034 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1035 
1036 	if (status != QDF_STATUS_SUCCESS) {
1037 		pdev->mt_pktlog_enabled = false;
1038 		return -EIO;       /* failure */
1039 	}
1040 
1041 	pdev->htc_endpoint = response.Endpoint;
1042 	pdev->mt_pktlog_enabled = true;
1043 
1044 	return 0;               /* success */
1045 }
1046 
1047 /**
1048  * pktlog_htc_attach() - attach pktlog HTC service
1049  *
1050  * Return: 0 for success/failure
1051  */
1052 int pktlog_htc_attach(void)
1053 {
1054 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1055 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1056 
1057 	if ((!pl_pdev) || (!htc_pdev)) {
1058 		qdf_print("Invalid pl_dev or htc_pdev handle");
1059 		return -EINVAL;
1060 	}
1061 
1062 	pl_pdev->htc_pdev = htc_pdev;
1063 	return pktlog_htc_connect_service(pl_pdev);
1064 }
1065 #else
1066 int pktlog_htc_attach(void)
1067 {
1068 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1069 
1070 	if (!pl_dev) {
1071 		qdf_print("Invalid pl_dev handle");
1072 		return -EINVAL;
1073 	}
1074 
1075 	pl_dev->mt_pktlog_enabled = false;
1076 	return 0;
1077 }
1078 #endif
1079 #endif /* REMOVE_PKT_LOG */
1080