xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /*
29  *
30  * Permission to use, copy, modify, and/or distribute this software for any
31  * purpose with or without fee is hereby granted, provided that the above
32  * copyright notice and this permission notice appear in all copies.
33  *
34  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
35  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
36  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
37  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
38  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
39  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
40  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
41  */
42 
43 #ifndef REMOVE_PKT_LOG
44 
45 #include "qdf_mem.h"
46 #include "athdefs.h"
47 #include "pktlog_ac_i.h"
48 #include "cds_api.h"
49 #include "wma_types.h"
50 #include "htc.h"
51 #include <cdp_txrx_cmn_struct.h>
52 #include <cdp_txrx_ctrl.h>
53 
54 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
55 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
56 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
57 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
58 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
59 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
60 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
61 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
62 
63 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
64 	.pktlog_init = pktlog_init,
65 	.pktlog_enable = pktlog_enable,
66 	.pktlog_setsize = pktlog_setsize,
67 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
68 };
69 
70 struct pktlog_dev_t pl_dev = {
71 	.pl_funcs = &ol_pl_funcs,
72 };
73 
74 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
75 		     struct hif_opaque_softc *scn)
76 {
77 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
78 	*pl_handle = &pl_dev;
79 }
80 
81 void pktlog_set_callback_regtype(
82 		enum pktlog_callback_regtype callback_type)
83 {
84 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
85 
86 	if (!pl_dev) {
87 		qdf_print("Invalid pl_dev");
88 		return;
89 	}
90 
91 	pl_dev->callback_type = callback_type;
92 }
93 
94 #ifdef CONFIG_MCL
95 struct pktlog_dev_t *get_pktlog_handle(void)
96 {
97 	struct cdp_pdev *pdev_txrx_handle =
98 				cds_get_context(QDF_MODULE_ID_TXRX);
99 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
100 
101 	return cdp_get_pldev(soc, pdev_txrx_handle);
102 }
103 
104 /*
105  * Get current txrx context
106  */
107 void *get_txrx_context(void)
108 {
109 	return cds_get_context(QDF_MODULE_ID_TXRX);
110 }
111 
112 #else
113 /* TODO: Need to use WIN implementation to return pktlog_dev handle */
114 static inline struct pktlog_dev_t *get_pktlog_handle(void)
115 {
116 	return NULL;
117 }
118 static struct pktlog_dev_t *get_txrx_context(void) { }
119 #endif
120 
121 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
122 				    WMI_CMD_ID cmd_id, bool ini_triggered,
123 				    uint8_t user_triggered)
124 {
125 	struct scheduler_msg msg = { 0 };
126 	QDF_STATUS status;
127 	struct ath_pktlog_wmi_params *param;
128 
129 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
130 
131 	if (!param)
132 		return A_NO_MEMORY;
133 
134 	param->cmd_id = cmd_id;
135 	param->pktlog_event = event_types;
136 	param->ini_triggered = ini_triggered;
137 	param->user_triggered = user_triggered;
138 
139 	msg.type = WMA_PKTLOG_ENABLE_REQ;
140 	msg.bodyptr = param;
141 	msg.bodyval = 0;
142 
143 	status = scheduler_post_msg(QDF_MODULE_ID_WMA, &msg);
144 
145 	if (status != QDF_STATUS_SUCCESS) {
146 		qdf_mem_free(param);
147 		return A_ERROR;
148 	}
149 
150 	return A_OK;
151 }
152 
153 static inline A_STATUS
154 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
155 		 bool ini_triggered, uint8_t user_triggered)
156 {
157 	uint32_t types = 0;
158 
159 	if (log_state & ATH_PKTLOG_TX)
160 		types |= WMI_PKTLOG_EVENT_TX;
161 
162 	if (log_state & ATH_PKTLOG_RX)
163 		types |= WMI_PKTLOG_EVENT_RX;
164 
165 	if (log_state & ATH_PKTLOG_RCFIND)
166 		types |= WMI_PKTLOG_EVENT_RCF;
167 
168 	if (log_state & ATH_PKTLOG_RCUPDATE)
169 		types |= WMI_PKTLOG_EVENT_RCU;
170 
171 	if (log_state & ATH_PKTLOG_SW_EVENT)
172 		types |= WMI_PKTLOG_EVENT_SW;
173 
174 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
175 				   ini_triggered, user_triggered);
176 }
177 
178 static inline A_STATUS
179 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
180 {
181 #ifdef CONFIG_MCL
182 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
183 #else
184 	/*TODO: WIN implementation to get soc */
185 #endif
186 
187 	if (!cdp_pdev) {
188 		qdf_print("Invalid pdev in %s\n", __func__);
189 		return A_ERROR;
190 	}
191 
192 	if (log_state & ATH_PKTLOG_TX) {
193 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
194 				WDI_EVENT_TX_STATUS)) {
195 			return A_ERROR;
196 		}
197 	}
198 	if (log_state & ATH_PKTLOG_RX) {
199 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
200 				WDI_EVENT_RX_DESC)) {
201 			return A_ERROR;
202 		}
203 		if (cdp_wdi_event_sub(soc, cdp_pdev,
204 				&PKTLOG_RX_REMOTE_SUBSCRIBER,
205 				WDI_EVENT_RX_DESC_REMOTE)) {
206 			return A_ERROR;
207 		}
208 	}
209 	if (log_state & ATH_PKTLOG_RCFIND) {
210 		if (cdp_wdi_event_sub(soc, cdp_pdev,
211 				  &PKTLOG_RCFIND_SUBSCRIBER,
212 				  WDI_EVENT_RATE_FIND)) {
213 			return A_ERROR;
214 		}
215 	}
216 	if (log_state & ATH_PKTLOG_RCUPDATE) {
217 		if (cdp_wdi_event_sub(soc, cdp_pdev,
218 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
219 				  WDI_EVENT_RATE_UPDATE)) {
220 			return A_ERROR;
221 		}
222 	}
223 	if (log_state & ATH_PKTLOG_SW_EVENT) {
224 		if (cdp_wdi_event_sub(soc, cdp_pdev,
225 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
226 				  WDI_EVENT_SW_EVENT)) {
227 			return A_ERROR;
228 		}
229 	}
230 	if (log_state & ATH_PKTLOG_LITE_T2H) {
231 		if (cdp_wdi_event_sub(soc, cdp_pdev,
232 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
233 				  WDI_EVENT_LITE_T2H)) {
234 			return A_ERROR;
235 		}
236 	}
237 	if (log_state & ATH_PKTLOG_LITE_RX) {
238 		if (cdp_wdi_event_sub(soc, cdp_pdev,
239 				&PKTLOG_LITE_RX_SUBSCRIBER,
240 				WDI_EVENT_LITE_RX)) {
241 			return A_ERROR;
242 		}
243 	}
244 
245 	return A_OK;
246 }
247 
248 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
249 		u_int16_t peer_id, uint32_t status)
250 {
251 	switch (event) {
252 	case WDI_EVENT_TX_STATUS:
253 	{
254 		/*
255 		 * process TX message
256 		 */
257 		if (process_tx_info(pdev, log_data)) {
258 			qdf_print("Unable to process TX info\n");
259 			return;
260 		}
261 		break;
262 	}
263 	case WDI_EVENT_RX_DESC:
264 	{
265 		/*
266 		 * process RX message for local frames
267 		 */
268 		if (process_rx_info(pdev, log_data)) {
269 			qdf_print("Unable to process RX info\n");
270 			return;
271 		}
272 		break;
273 	}
274 	case WDI_EVENT_RX_DESC_REMOTE:
275 	{
276 		/*
277 		 * process RX message for remote frames
278 		 */
279 		if (process_rx_info_remote(pdev, log_data)) {
280 			qdf_print("Unable to process RX info\n");
281 			return;
282 		}
283 		break;
284 	}
285 	case WDI_EVENT_RATE_FIND:
286 	{
287 		/*
288 		 * process RATE_FIND message
289 		 */
290 		if (process_rate_find(pdev, log_data)) {
291 			qdf_print("Unable to process RC_FIND info\n");
292 			return;
293 		}
294 		break;
295 	}
296 	case WDI_EVENT_RATE_UPDATE:
297 	{
298 		/*
299 		 * process RATE_UPDATE message
300 		 */
301 		if (process_rate_update(pdev, log_data)) {
302 			qdf_print("Unable to process RC_UPDATE\n");
303 			return;
304 		}
305 		break;
306 	}
307 	case WDI_EVENT_SW_EVENT:
308 	{
309 		/*
310 		 * process SW EVENT message
311 		 */
312 		if (process_sw_event(pdev, log_data)) {
313 			qdf_print("Unable to process SW_EVENT\n");
314 			return;
315 		}
316 		break;
317 	}
318 	default:
319 		break;
320 	}
321 }
322 
323 void
324 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
325 			u_int16_t peer_id, uint32_t status)
326 {
327 	switch (event) {
328 	case WDI_EVENT_RX_DESC:
329 	{
330 		if (process_rx_desc_remote(context, log_data)) {
331 			qdf_print("Unable to process RX info\n");
332 			return;
333 		}
334 		break;
335 	}
336 	case WDI_EVENT_LITE_T2H:
337 	{
338 		if (process_pktlog_lite(context, log_data,
339 					PKTLOG_TYPE_LITE_T2H)) {
340 			qdf_print("Unable to process lite_t2h\n");
341 			return;
342 		}
343 		break;
344 	}
345 	case WDI_EVENT_LITE_RX:
346 	{
347 		if (process_pktlog_lite(context, log_data,
348 					PKTLOG_TYPE_LITE_RX)) {
349 			qdf_print("Unable to process lite_rx\n");
350 			return;
351 		}
352 		break;
353 	}
354 	default:
355 		break;
356 	}
357 }
358 
359 A_STATUS
360 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
361 {
362 #ifdef CONFIG_MCL
363 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
364 #else
365 	/* TODO: WIN implementation to get soc */
366 #endif
367 
368 	if (log_state & ATH_PKTLOG_TX) {
369 		if (cdp_wdi_event_unsub(soc, pdev,
370 				    &PKTLOG_TX_SUBSCRIBER,
371 				    WDI_EVENT_TX_STATUS)) {
372 			return A_ERROR;
373 		}
374 	}
375 	if (log_state & ATH_PKTLOG_RX) {
376 		if (cdp_wdi_event_unsub(soc, pdev,
377 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
378 			return A_ERROR;
379 		}
380 		if (cdp_wdi_event_unsub(soc, pdev,
381 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
382 				    WDI_EVENT_RX_DESC_REMOTE)) {
383 			return A_ERROR;
384 		}
385 	}
386 	if (log_state & ATH_PKTLOG_RCFIND) {
387 		if (cdp_wdi_event_unsub(soc, pdev,
388 				    &PKTLOG_RCFIND_SUBSCRIBER,
389 				    WDI_EVENT_RATE_FIND)) {
390 			return A_ERROR;
391 		}
392 	}
393 	if (log_state & ATH_PKTLOG_RCUPDATE) {
394 		if (cdp_wdi_event_unsub(soc, pdev,
395 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
396 				    WDI_EVENT_RATE_UPDATE)) {
397 			return A_ERROR;
398 		}
399 	}
400 	if (log_state & ATH_PKTLOG_RCUPDATE) {
401 		if (cdp_wdi_event_unsub(soc, pdev,
402 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
403 				    WDI_EVENT_SW_EVENT)) {
404 			return A_ERROR;
405 		}
406 	}
407 	if (log_state & ATH_PKTLOG_LITE_T2H) {
408 		if (cdp_wdi_event_unsub(soc, pdev,
409 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
410 				  WDI_EVENT_LITE_T2H)) {
411 			return A_ERROR;
412 		}
413 	}
414 	if (log_state & ATH_PKTLOG_LITE_RX) {
415 		if (cdp_wdi_event_unsub(soc, pdev,
416 				&PKTLOG_LITE_RX_SUBSCRIBER,
417 				WDI_EVENT_LITE_RX)) {
418 			return A_ERROR;
419 		}
420 	}
421 
422 	return A_OK;
423 }
424 
425 int pktlog_disable(struct hif_opaque_softc *scn)
426 {
427 	struct pktlog_dev_t *pl_dev;
428 	struct ath_pktlog_info *pl_info;
429 	uint8_t save_pktlog_state;
430 	struct cdp_pdev *txrx_pdev = get_txrx_context();
431 
432 	pl_dev = get_pktlog_handle();
433 
434 	if (!pl_dev) {
435 		qdf_print("Invalid pl_dev");
436 		return -EINVAL;
437 	}
438 
439 	pl_info = pl_dev->pl_info;
440 
441 	if (!pl_dev->pl_info) {
442 		qdf_print("Invalid pl_info");
443 		return -EINVAL;
444 	}
445 
446 	if (!txrx_pdev) {
447 		qdf_print("Invalid cdp_pdev");
448 		return -EINVAL;
449 	}
450 
451 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
452 	    pl_info->curr_pkt_state ==
453 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
454 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
455 	    pl_info->curr_pkt_state ==
456 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
457 		return -EBUSY;
458 
459 	save_pktlog_state = pl_info->curr_pkt_state;
460 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
461 
462 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
463 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
464 		qdf_print("Failed to disable pktlog in target\n");
465 		return -EINVAL;
466 	}
467 
468 	if (pl_dev->is_pktlog_cb_subscribed &&
469 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
470 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
471 		qdf_print("Cannot unsubscribe pktlog from the WDI\n");
472 		return -EINVAL;
473 	}
474 	pl_dev->is_pktlog_cb_subscribed = false;
475 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
476 		pl_info->curr_pkt_state =
477 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
478 	else
479 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
480 	return 0;
481 }
482 
483 void pktlog_init(struct hif_opaque_softc *scn)
484 {
485 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
486 	struct ath_pktlog_info *pl_info;
487 
488 	if (pl_dev == NULL || pl_dev->pl_info == NULL) {
489 		qdf_print("pl_dev or pl_info is invalid\n");
490 		return;
491 	}
492 
493 	pl_info = pl_dev->pl_info;
494 
495 	OS_MEMZERO(pl_info, sizeof(*pl_info));
496 	PKTLOG_LOCK_INIT(pl_info);
497 	mutex_init(&pl_info->pktlog_mutex);
498 
499 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
500 	pl_info->buf = NULL;
501 	pl_info->log_state = 0;
502 	pl_info->init_saved_state = 0;
503 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
504 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
505 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
506 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
507 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
508 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
509 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
510 	pl_info->pktlen = 0;
511 	pl_info->start_time_thruput = 0;
512 	pl_info->start_time_per = 0;
513 	pl_dev->vendor_cmd_send = false;
514 
515 	if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
516 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
517 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
518 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
519 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
520 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
521 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
522 	} else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
523 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
524 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
525 	}
526 }
527 
528 static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
529 		 bool ini_triggered, uint8_t user_triggered,
530 		 uint32_t is_iwpriv_command)
531 {
532 	struct pktlog_dev_t *pl_dev;
533 	struct ath_pktlog_info *pl_info;
534 	struct cdp_pdev *cdp_pdev;
535 	int error;
536 
537 	if (!scn) {
538 		qdf_print("%s: Invalid scn context\n", __func__);
539 		ASSERT(0);
540 		return -EINVAL;
541 	}
542 
543 	pl_dev = get_pktlog_handle();
544 	if (!pl_dev) {
545 		qdf_print("%s: Invalid pktlog context\n", __func__);
546 		ASSERT(0);
547 		return -EINVAL;
548 	}
549 
550 	cdp_pdev = get_txrx_context();
551 	if (!cdp_pdev) {
552 		qdf_print("%s: Invalid txrx context\n", __func__);
553 		ASSERT(0);
554 		return -EINVAL;
555 	}
556 
557 	pl_info = pl_dev->pl_info;
558 	if (!pl_info) {
559 		qdf_print("%s: Invalid pl_info context\n", __func__);
560 		ASSERT(0);
561 		return -EINVAL;
562 	}
563 
564 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
565 		return -EBUSY;
566 
567 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
568 	/* is_iwpriv_command : 0 indicates its a vendor command
569 	 * log_state: 0 indicates pktlog disable command
570 	 * vendor_cmd_send flag; false means no vendor pktlog enable
571 	 * command was sent previously
572 	 */
573 	if (is_iwpriv_command == 0 && log_state == 0 &&
574 	    pl_dev->vendor_cmd_send == false) {
575 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
576 		qdf_print("%s: pktlog operation not in progress\n", __func__);
577 		return 0;
578 	}
579 
580 	if (!pl_dev->tgt_pktlog_alloced) {
581 		if (pl_info->buf == NULL) {
582 			error = pktlog_alloc_buf(scn);
583 
584 			if (error != 0) {
585 				pl_info->curr_pkt_state =
586 					PKTLOG_OPR_NOT_IN_PROGRESS;
587 				qdf_print("%s: pktlog buff alloc failed\n",
588 					__func__);
589 				return -ENOMEM;
590 			}
591 
592 			if (!pl_info->buf) {
593 				pl_info->curr_pkt_state =
594 					PKTLOG_OPR_NOT_IN_PROGRESS;
595 				qdf_print("%s: pktlog buf alloc failed\n",
596 				       __func__);
597 				ASSERT(0);
598 				return -ENOMEM;
599 			}
600 
601 		}
602 
603 		spin_lock_bh(&pl_info->log_lock);
604 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
605 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
606 		pl_info->buf->wr_offset = 0;
607 		pl_info->buf->rd_offset = -1;
608 		/* These below variables are used by per packet stats*/
609 		pl_info->buf->bytes_written = 0;
610 		pl_info->buf->msg_index = 1;
611 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
612 		spin_unlock_bh(&pl_info->log_lock);
613 
614 		pl_info->start_time_thruput = os_get_timestamp();
615 		pl_info->start_time_per = pl_info->start_time_thruput;
616 
617 		pl_dev->tgt_pktlog_alloced = true;
618 	}
619 	if (log_state != 0) {
620 		/* WDI subscribe */
621 		if (!pl_dev->is_pktlog_cb_subscribed) {
622 			error = wdi_pktlog_subscribe(cdp_pdev, log_state);
623 			if (error) {
624 				pl_info->curr_pkt_state =
625 						PKTLOG_OPR_NOT_IN_PROGRESS;
626 				qdf_print("Unable to subscribe to the WDI %s\n",
627 					__func__);
628 				return -EINVAL;
629 			}
630 		}
631 		pl_dev->is_pktlog_cb_subscribed = true;
632 		/* WMI command to enable pktlog on the firmware */
633 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
634 				user_triggered)) {
635 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
636 			qdf_print("Device cannot be enabled, %s\n", __func__);
637 			return -EINVAL;
638 		}
639 
640 		if (is_iwpriv_command == 0)
641 			pl_dev->vendor_cmd_send = true;
642 	} else {
643 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
644 		pl_dev->pl_funcs->pktlog_disable(scn);
645 		if (is_iwpriv_command == 0)
646 			pl_dev->vendor_cmd_send = false;
647 	}
648 
649 	pl_info->log_state = log_state;
650 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
651 	return 0;
652 }
653 
654 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
655 		 bool ini_triggered, uint8_t user_triggered,
656 		 uint32_t is_iwpriv_command)
657 {
658 	struct pktlog_dev_t *pl_dev;
659 	struct ath_pktlog_info *pl_info;
660 	int err;
661 
662 	pl_dev = get_pktlog_handle();
663 
664 	if (!pl_dev) {
665 		qdf_print("%s: invalid pl_dev handle", __func__);
666 		return -EINVAL;
667 	}
668 
669 	pl_info = pl_dev->pl_info;
670 
671 	if (!pl_info) {
672 		qdf_print("%s: invalid pl_info handle", __func__);
673 		return -EINVAL;
674 	}
675 
676 	mutex_lock(&pl_info->pktlog_mutex);
677 	err = __pktlog_enable(scn, log_state, ini_triggered,
678 				user_triggered, is_iwpriv_command);
679 	mutex_unlock(&pl_info->pktlog_mutex);
680 	return err;
681 }
682 
683 #define ONE_MEGABYTE (1024 * 1024)
684 #define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE)
685 
686 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
687 {
688 	struct pktlog_dev_t *pl_dev;
689 	struct ath_pktlog_info *pl_info;
690 	struct cdp_pdev *pdev;
691 
692 	pl_dev = get_pktlog_handle();
693 
694 	if (!pl_dev) {
695 		qdf_print("%s: invalid pl_dev handle", __func__);
696 		return -EINVAL;
697 	}
698 
699 	pl_info = pl_dev->pl_info;
700 
701 	if (!pl_info) {
702 		qdf_print("%s: invalid pl_dev handle", __func__);
703 		return -EINVAL;
704 	}
705 
706 	pdev = get_txrx_context();
707 
708 	if (!pdev) {
709 		qdf_print("%s: invalid pdev handle", __func__);
710 		return -EINVAL;
711 	}
712 
713 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
714 		qdf_print("%s: pktlog is not configured", __func__);
715 		return -EBUSY;
716 	}
717 
718 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
719 
720 	if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
721 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes."
722 			"Min required is %d MB and Max allowed is %d MB.\n",
723 			__func__, size, (ONE_MEGABYTE/ONE_MEGABYTE),
724 			(MAX_ALLOWED_PKTLOG_SIZE/ONE_MEGABYTE));
725 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
726 		qdf_print("%s: Invalid requested buff size", __func__);
727 		return -EINVAL;
728 	}
729 
730 	if (size == pl_info->buf_size) {
731 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
732 		qdf_print("%s: Pktlog Buff Size is already of same size.",
733 			  __func__);
734 		return 0;
735 	}
736 
737 	if (pl_info->log_state) {
738 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
739 		qdf_print("%s: Logging should be disabled before changing"
740 			  "buffer size.", __func__);
741 		return -EINVAL;
742 	}
743 
744 	spin_lock_bh(&pl_info->log_lock);
745 	if (pl_info->buf != NULL) {
746 		if (pl_dev->is_pktlog_cb_subscribed &&
747 			wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
748 			pl_info->curr_pkt_state =
749 				PKTLOG_OPR_NOT_IN_PROGRESS;
750 			qdf_print("Cannot unsubscribe pktlog from the WDI\n");
751 			spin_unlock_bh(&pl_info->log_lock);
752 			return -EFAULT;
753 		}
754 		pktlog_release_buf(scn);
755 		pl_dev->is_pktlog_cb_subscribed = false;
756 		pl_dev->tgt_pktlog_alloced = false;
757 	}
758 
759 	if (size != 0) {
760 		qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size);
761 		pl_info->buf_size = size;
762 	}
763 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
764 	spin_unlock_bh(&pl_info->log_lock);
765 	return 0;
766 }
767 
768 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
769 {
770 	struct pktlog_dev_t *pl_dev;
771 	struct ath_pktlog_info *pl_info;
772 	int status;
773 
774 	pl_dev = get_pktlog_handle();
775 
776 	if (!pl_dev) {
777 		qdf_print("%s: invalid pl_dev handle", __func__);
778 		return -EINVAL;
779 	}
780 
781 	pl_info = pl_dev->pl_info;
782 
783 	if (!pl_info) {
784 		qdf_print("%s: invalid pl_dev handle", __func__);
785 		return -EINVAL;
786 	}
787 
788 	mutex_lock(&pl_info->pktlog_mutex);
789 	status = __pktlog_setsize(scn, size);
790 	mutex_unlock(&pl_info->pktlog_mutex);
791 
792 	return status;
793 }
794 
795 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
796 {
797 	struct pktlog_dev_t *pl_dev;
798 	struct ath_pktlog_info *pl_info;
799 	uint8_t save_pktlog_state;
800 
801 	pl_dev = get_pktlog_handle();
802 
803 	if (!pl_dev) {
804 		qdf_print("%s: invalid pl_dev handle", __func__);
805 		return -EINVAL;
806 	}
807 
808 	pl_info = pl_dev->pl_info;
809 
810 	if (!pl_info) {
811 		qdf_print("%s: invalid pl_dev handle", __func__);
812 		return -EINVAL;
813 	}
814 
815 	if (!clear_buff)
816 		return -EINVAL;
817 
818 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
819 	    pl_info->curr_pkt_state ==
820 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
821 		return -EBUSY;
822 
823 	save_pktlog_state = pl_info->curr_pkt_state;
824 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
825 
826 	if (pl_info->log_state) {
827 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
828 		qdf_print("%s: Logging should be disabled before clearing "
829 			  "pktlog buffer.", __func__);
830 		return -EINVAL;
831 	}
832 
833 	if (pl_info->buf != NULL) {
834 		if (pl_info->buf_size > 0) {
835 			qdf_print("%s: pktlog buffer is cleared.", __func__);
836 			memset(pl_info->buf, 0, pl_info->buf_size);
837 			pl_dev->is_pktlog_cb_subscribed = false;
838 			pl_dev->tgt_pktlog_alloced = false;
839 			pl_info->buf->rd_offset = -1;
840 		} else {
841 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
842 			qdf_print("%s: pktlog buffer size is not proper. "
843 				  "Existing Buf size %d", __func__,
844 				  pl_info->buf_size);
845 			return -EFAULT;
846 		}
847 	} else {
848 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
849 		qdf_print("%s: pktlog buff is NULL", __func__);
850 		return -EFAULT;
851 	}
852 
853 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
854 		pl_info->curr_pkt_state =
855 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
856 	else
857 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
858 
859 	return 0;
860 }
861 
862 /**
863  * pktlog_process_fw_msg() - process packetlog message
864  * @buff: buffer
865  *
866  * Return: None
867  */
868 void pktlog_process_fw_msg(uint32_t *buff)
869 {
870 	uint32_t *pl_hdr;
871 	uint32_t log_type;
872 	struct cdp_pdev *pdev = get_txrx_context();
873 
874 	if (!pdev) {
875 		qdf_print("%s: txrx_pdev is NULL", __func__);
876 		return;
877 	}
878 
879 	pl_hdr = buff;
880 	log_type =
881 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
882 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
883 
884 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
885 		|| (log_type == PKTLOG_TYPE_TX_STAT)
886 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
887 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
888 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
889 		wdi_event_handler(WDI_EVENT_TX_STATUS,
890 				  pdev, pl_hdr);
891 	else if (log_type == PKTLOG_TYPE_RC_FIND)
892 		wdi_event_handler(WDI_EVENT_RATE_FIND,
893 				  pdev, pl_hdr);
894 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
895 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
896 				  pdev, pl_hdr);
897 	else if (log_type == PKTLOG_TYPE_RX_STAT)
898 		wdi_event_handler(WDI_EVENT_RX_DESC,
899 				  pdev, pl_hdr);
900 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
901 		wdi_event_handler(WDI_EVENT_SW_EVENT,
902 				  pdev, pl_hdr);
903 }
904 
905 #if defined(QCA_WIFI_3_0_ADRASTEA)
906 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
907 {
908 	int rc = 0; /* sane */
909 
910 	if ((!nbuf) ||
911 	    (nbuf->data < nbuf->head) ||
912 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
913 		rc = -EINVAL;
914 
915 	return rc;
916 }
917 /**
918  * pktlog_t2h_msg_handler() - Target to host message handler
919  * @context: pdev context
920  * @pkt: HTC packet
921  *
922  * Return: None
923  */
924 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
925 {
926 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
927 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
928 	uint32_t *msg_word;
929 
930 	/* check for sanity of the packet, have seen corrupted pkts */
931 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
932 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
933 			  __func__, pktlog_t2h_msg);
934 		/* do not free; may crash! */
935 		QDF_ASSERT(0);
936 		return;
937 	}
938 
939 	/* check for successful message reception */
940 	if (pkt->Status != QDF_STATUS_SUCCESS) {
941 		if (pkt->Status != QDF_STATUS_E_CANCELED)
942 			pdev->htc_err_cnt++;
943 		qdf_nbuf_free(pktlog_t2h_msg);
944 		return;
945 	}
946 
947 	/* confirm alignment */
948 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
949 
950 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
951 	pktlog_process_fw_msg(msg_word);
952 
953 	qdf_nbuf_free(pktlog_t2h_msg);
954 }
955 
956 /**
957  * pktlog_tx_resume_handler() - resume callback
958  * @context: pdev context
959  *
960  * Return: None
961  */
962 static void pktlog_tx_resume_handler(void *context)
963 {
964 	qdf_print("%s: Not expected", __func__);
965 	qdf_assert(0);
966 }
967 
968 /**
969  * pktlog_h2t_send_complete() - send complete indication
970  * @context: pdev context
971  * @htc_pkt: HTC packet
972  *
973  * Return: None
974  */
975 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
976 {
977 	qdf_print("%s: Not expected", __func__);
978 	qdf_assert(0);
979 }
980 
981 /**
982  * pktlog_h2t_full() - queue full indication
983  * @context: pdev context
984  * @pkt: HTC packet
985  *
986  * Return: HTC action
987  */
988 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
989 {
990 	return HTC_SEND_FULL_KEEP;
991 }
992 
993 /**
994  * pktlog_htc_connect_service() - create new endpoint for packetlog
995  * @pdev - pktlog pdev
996  *
997  * Return: 0 for success/failure
998  */
999 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1000 {
1001 	struct htc_service_connect_req connect;
1002 	struct htc_service_connect_resp response;
1003 	QDF_STATUS status;
1004 
1005 	qdf_mem_set(&connect, sizeof(connect), 0);
1006 	qdf_mem_set(&response, sizeof(response), 0);
1007 
1008 	connect.pMetaData = NULL;
1009 	connect.MetaDataLength = 0;
1010 	connect.EpCallbacks.pContext = pdev;
1011 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1012 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1013 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1014 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1015 
1016 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1017 	connect.EpCallbacks.EpRecvRefill = NULL;
1018 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1019 	/* N/A, fill is done by HIF */
1020 
1021 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1022 	/*
1023 	 * Specify how deep to let a queue get before htc_send_pkt will
1024 	 * call the EpSendFull function due to excessive send queue depth.
1025 	 */
1026 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1027 
1028 	/* disable flow control for HTT data message service */
1029 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1030 
1031 	/* connect to control service */
1032 	connect.service_id = PACKET_LOG_SVC;
1033 
1034 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1035 
1036 	if (status != QDF_STATUS_SUCCESS) {
1037 		pdev->mt_pktlog_enabled = false;
1038 		return -EIO;       /* failure */
1039 	}
1040 
1041 	pdev->htc_endpoint = response.Endpoint;
1042 	pdev->mt_pktlog_enabled = true;
1043 
1044 	return 0;               /* success */
1045 }
1046 
1047 /**
1048  * pktlog_htc_attach() - attach pktlog HTC service
1049  *
1050  * Return: 0 for success/failure
1051  */
1052 int pktlog_htc_attach(void)
1053 {
1054 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1055 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1056 
1057 	if ((!pl_pdev) || (!htc_pdev)) {
1058 		qdf_print("Invalid pl_dev or htc_pdev handle");
1059 		return -EINVAL;
1060 	}
1061 
1062 	pl_pdev->htc_pdev = htc_pdev;
1063 	return pktlog_htc_connect_service(pl_pdev);
1064 }
1065 #else
1066 int pktlog_htc_attach(void)
1067 {
1068 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1069 
1070 	if (!pl_dev) {
1071 		qdf_print("Invalid pl_dev handle");
1072 		return -EINVAL;
1073 	}
1074 
1075 	pl_dev->mt_pktlog_enabled = false;
1076 	return 0;
1077 }
1078 #endif
1079 #endif /* REMOVE_PKT_LOG */
1080