xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 
45 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
46 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
47 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
48 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
49 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
50 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
53 
54 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
55 	.pktlog_init = pktlog_init,
56 	.pktlog_enable = pktlog_enable,
57 	.pktlog_setsize = pktlog_setsize,
58 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
59 };
60 
61 struct pktlog_dev_t pl_dev = {
62 	.pl_funcs = &ol_pl_funcs,
63 };
64 
65 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
66 		     struct hif_opaque_softc *scn)
67 {
68 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
69 	*pl_handle = &pl_dev;
70 }
71 
72 void pktlog_set_callback_regtype(
73 		enum pktlog_callback_regtype callback_type)
74 {
75 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
76 
77 	if (!pl_dev) {
78 		qdf_print("Invalid pl_dev");
79 		return;
80 	}
81 
82 	pl_dev->callback_type = callback_type;
83 }
84 
85 #ifdef CONFIG_MCL
86 struct pktlog_dev_t *get_pktlog_handle(void)
87 {
88 	struct cdp_pdev *pdev_txrx_handle =
89 				cds_get_context(QDF_MODULE_ID_TXRX);
90 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
91 
92 	return cdp_get_pldev(soc, pdev_txrx_handle);
93 }
94 
95 /*
96  * Get current txrx context
97  */
98 void *get_txrx_context(void)
99 {
100 	return cds_get_context(QDF_MODULE_ID_TXRX);
101 }
102 
103 #else
104 /* TODO: Need to use WIN implementation to return pktlog_dev handle */
105 static inline struct pktlog_dev_t *get_pktlog_handle(void)
106 {
107 	return NULL;
108 }
109 static struct pktlog_dev_t *get_txrx_context(void) { }
110 #endif
111 
112 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
113 				    WMI_CMD_ID cmd_id, bool ini_triggered,
114 				    uint8_t user_triggered)
115 {
116 	struct scheduler_msg msg = { 0 };
117 	QDF_STATUS status;
118 	struct ath_pktlog_wmi_params *param;
119 
120 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
121 
122 	if (!param)
123 		return A_NO_MEMORY;
124 
125 	param->cmd_id = cmd_id;
126 	param->pktlog_event = event_types;
127 	param->ini_triggered = ini_triggered;
128 	param->user_triggered = user_triggered;
129 
130 	msg.type = WMA_PKTLOG_ENABLE_REQ;
131 	msg.bodyptr = param;
132 	msg.bodyval = 0;
133 
134 	status = scheduler_post_msg(QDF_MODULE_ID_WMA, &msg);
135 
136 	if (status != QDF_STATUS_SUCCESS) {
137 		qdf_mem_free(param);
138 		return A_ERROR;
139 	}
140 
141 	return A_OK;
142 }
143 
144 static inline A_STATUS
145 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
146 		 bool ini_triggered, uint8_t user_triggered)
147 {
148 	uint32_t types = 0;
149 
150 	if (log_state & ATH_PKTLOG_TX)
151 		types |= WMI_PKTLOG_EVENT_TX;
152 
153 	if (log_state & ATH_PKTLOG_RX)
154 		types |= WMI_PKTLOG_EVENT_RX;
155 
156 	if (log_state & ATH_PKTLOG_RCFIND)
157 		types |= WMI_PKTLOG_EVENT_RCF;
158 
159 	if (log_state & ATH_PKTLOG_RCUPDATE)
160 		types |= WMI_PKTLOG_EVENT_RCU;
161 
162 	if (log_state & ATH_PKTLOG_SW_EVENT)
163 		types |= WMI_PKTLOG_EVENT_SW;
164 
165 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
166 				   ini_triggered, user_triggered);
167 }
168 
169 static inline A_STATUS
170 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
171 {
172 #ifdef CONFIG_MCL
173 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
174 #else
175 	/*TODO: WIN implementation to get soc */
176 #endif
177 
178 	if (!cdp_pdev) {
179 		qdf_print("Invalid pdev in %s\n", __func__);
180 		return A_ERROR;
181 	}
182 
183 	if (log_state & ATH_PKTLOG_TX) {
184 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
185 				WDI_EVENT_TX_STATUS)) {
186 			return A_ERROR;
187 		}
188 	}
189 	if (log_state & ATH_PKTLOG_RX) {
190 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
191 				WDI_EVENT_RX_DESC)) {
192 			return A_ERROR;
193 		}
194 		if (cdp_wdi_event_sub(soc, cdp_pdev,
195 				&PKTLOG_RX_REMOTE_SUBSCRIBER,
196 				WDI_EVENT_RX_DESC_REMOTE)) {
197 			return A_ERROR;
198 		}
199 	}
200 	if (log_state & ATH_PKTLOG_RCFIND) {
201 		if (cdp_wdi_event_sub(soc, cdp_pdev,
202 				  &PKTLOG_RCFIND_SUBSCRIBER,
203 				  WDI_EVENT_RATE_FIND)) {
204 			return A_ERROR;
205 		}
206 	}
207 	if (log_state & ATH_PKTLOG_RCUPDATE) {
208 		if (cdp_wdi_event_sub(soc, cdp_pdev,
209 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
210 				  WDI_EVENT_RATE_UPDATE)) {
211 			return A_ERROR;
212 		}
213 	}
214 	if (log_state & ATH_PKTLOG_SW_EVENT) {
215 		if (cdp_wdi_event_sub(soc, cdp_pdev,
216 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
217 				  WDI_EVENT_SW_EVENT)) {
218 			return A_ERROR;
219 		}
220 	}
221 	if (log_state & ATH_PKTLOG_LITE_T2H) {
222 		if (cdp_wdi_event_sub(soc, cdp_pdev,
223 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
224 				  WDI_EVENT_LITE_T2H)) {
225 			return A_ERROR;
226 		}
227 	}
228 	if (log_state & ATH_PKTLOG_LITE_RX) {
229 		if (cdp_wdi_event_sub(soc, cdp_pdev,
230 				&PKTLOG_LITE_RX_SUBSCRIBER,
231 				WDI_EVENT_LITE_RX)) {
232 			return A_ERROR;
233 		}
234 	}
235 
236 	return A_OK;
237 }
238 
239 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
240 		u_int16_t peer_id, uint32_t status)
241 {
242 	switch (event) {
243 	case WDI_EVENT_TX_STATUS:
244 	{
245 		/*
246 		 * process TX message
247 		 */
248 		if (process_tx_info(pdev, log_data)) {
249 			qdf_print("Unable to process TX info\n");
250 			return;
251 		}
252 		break;
253 	}
254 	case WDI_EVENT_RX_DESC:
255 	{
256 		/*
257 		 * process RX message for local frames
258 		 */
259 		if (process_rx_info(pdev, log_data)) {
260 			qdf_print("Unable to process RX info\n");
261 			return;
262 		}
263 		break;
264 	}
265 	case WDI_EVENT_RX_DESC_REMOTE:
266 	{
267 		/*
268 		 * process RX message for remote frames
269 		 */
270 		if (process_rx_info_remote(pdev, log_data)) {
271 			qdf_print("Unable to process RX info\n");
272 			return;
273 		}
274 		break;
275 	}
276 	case WDI_EVENT_RATE_FIND:
277 	{
278 		/*
279 		 * process RATE_FIND message
280 		 */
281 		if (process_rate_find(pdev, log_data)) {
282 			qdf_print("Unable to process RC_FIND info\n");
283 			return;
284 		}
285 		break;
286 	}
287 	case WDI_EVENT_RATE_UPDATE:
288 	{
289 		/*
290 		 * process RATE_UPDATE message
291 		 */
292 		if (process_rate_update(pdev, log_data)) {
293 			qdf_print("Unable to process RC_UPDATE\n");
294 			return;
295 		}
296 		break;
297 	}
298 	case WDI_EVENT_SW_EVENT:
299 	{
300 		/*
301 		 * process SW EVENT message
302 		 */
303 		if (process_sw_event(pdev, log_data)) {
304 			qdf_print("Unable to process SW_EVENT\n");
305 			return;
306 		}
307 		break;
308 	}
309 	default:
310 		break;
311 	}
312 }
313 
314 void
315 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
316 			u_int16_t peer_id, uint32_t status)
317 {
318 	switch (event) {
319 	case WDI_EVENT_RX_DESC:
320 	{
321 		if (process_rx_desc_remote(context, log_data)) {
322 			qdf_print("Unable to process RX info\n");
323 			return;
324 		}
325 		break;
326 	}
327 	case WDI_EVENT_LITE_T2H:
328 	{
329 		if (process_pktlog_lite(context, log_data,
330 					PKTLOG_TYPE_LITE_T2H)) {
331 			qdf_print("Unable to process lite_t2h\n");
332 			return;
333 		}
334 		break;
335 	}
336 	case WDI_EVENT_LITE_RX:
337 	{
338 		if (process_pktlog_lite(context, log_data,
339 					PKTLOG_TYPE_LITE_RX)) {
340 			qdf_print("Unable to process lite_rx\n");
341 			return;
342 		}
343 		break;
344 	}
345 	default:
346 		break;
347 	}
348 }
349 
350 A_STATUS
351 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
352 {
353 #ifdef CONFIG_MCL
354 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
355 #else
356 	/* TODO: WIN implementation to get soc */
357 #endif
358 
359 	if (log_state & ATH_PKTLOG_TX) {
360 		if (cdp_wdi_event_unsub(soc, pdev,
361 				    &PKTLOG_TX_SUBSCRIBER,
362 				    WDI_EVENT_TX_STATUS)) {
363 			return A_ERROR;
364 		}
365 	}
366 	if (log_state & ATH_PKTLOG_RX) {
367 		if (cdp_wdi_event_unsub(soc, pdev,
368 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
369 			return A_ERROR;
370 		}
371 		if (cdp_wdi_event_unsub(soc, pdev,
372 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
373 				    WDI_EVENT_RX_DESC_REMOTE)) {
374 			return A_ERROR;
375 		}
376 	}
377 	if (log_state & ATH_PKTLOG_RCFIND) {
378 		if (cdp_wdi_event_unsub(soc, pdev,
379 				    &PKTLOG_RCFIND_SUBSCRIBER,
380 				    WDI_EVENT_RATE_FIND)) {
381 			return A_ERROR;
382 		}
383 	}
384 	if (log_state & ATH_PKTLOG_RCUPDATE) {
385 		if (cdp_wdi_event_unsub(soc, pdev,
386 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
387 				    WDI_EVENT_RATE_UPDATE)) {
388 			return A_ERROR;
389 		}
390 	}
391 	if (log_state & ATH_PKTLOG_RCUPDATE) {
392 		if (cdp_wdi_event_unsub(soc, pdev,
393 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
394 				    WDI_EVENT_SW_EVENT)) {
395 			return A_ERROR;
396 		}
397 	}
398 	if (log_state & ATH_PKTLOG_LITE_T2H) {
399 		if (cdp_wdi_event_unsub(soc, pdev,
400 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
401 				  WDI_EVENT_LITE_T2H)) {
402 			return A_ERROR;
403 		}
404 	}
405 	if (log_state & ATH_PKTLOG_LITE_RX) {
406 		if (cdp_wdi_event_unsub(soc, pdev,
407 				&PKTLOG_LITE_RX_SUBSCRIBER,
408 				WDI_EVENT_LITE_RX)) {
409 			return A_ERROR;
410 		}
411 	}
412 
413 	return A_OK;
414 }
415 
416 int pktlog_disable(struct hif_opaque_softc *scn)
417 {
418 	struct pktlog_dev_t *pl_dev;
419 	struct ath_pktlog_info *pl_info;
420 	uint8_t save_pktlog_state;
421 	struct cdp_pdev *txrx_pdev = get_txrx_context();
422 
423 	pl_dev = get_pktlog_handle();
424 
425 	if (!pl_dev) {
426 		qdf_print("Invalid pl_dev");
427 		return -EINVAL;
428 	}
429 
430 	pl_info = pl_dev->pl_info;
431 
432 	if (!pl_dev->pl_info) {
433 		qdf_print("Invalid pl_info");
434 		return -EINVAL;
435 	}
436 
437 	if (!txrx_pdev) {
438 		qdf_print("Invalid cdp_pdev");
439 		return -EINVAL;
440 	}
441 
442 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
443 	    pl_info->curr_pkt_state ==
444 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
445 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
446 	    pl_info->curr_pkt_state ==
447 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
448 		return -EBUSY;
449 
450 	save_pktlog_state = pl_info->curr_pkt_state;
451 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
452 
453 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
454 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
455 		qdf_print("Failed to disable pktlog in target\n");
456 		return -EINVAL;
457 	}
458 
459 	if (pl_dev->is_pktlog_cb_subscribed &&
460 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
461 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
462 		qdf_print("Cannot unsubscribe pktlog from the WDI\n");
463 		return -EINVAL;
464 	}
465 	pl_dev->is_pktlog_cb_subscribed = false;
466 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
467 		pl_info->curr_pkt_state =
468 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
469 	else
470 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
471 	return 0;
472 }
473 
474 void pktlog_init(struct hif_opaque_softc *scn)
475 {
476 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
477 	struct ath_pktlog_info *pl_info;
478 
479 	if (pl_dev == NULL || pl_dev->pl_info == NULL) {
480 		qdf_print("pl_dev or pl_info is invalid\n");
481 		return;
482 	}
483 
484 	pl_info = pl_dev->pl_info;
485 
486 	OS_MEMZERO(pl_info, sizeof(*pl_info));
487 	PKTLOG_LOCK_INIT(pl_info);
488 	mutex_init(&pl_info->pktlog_mutex);
489 
490 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
491 	pl_info->buf = NULL;
492 	pl_info->log_state = 0;
493 	pl_info->init_saved_state = 0;
494 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
495 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
496 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
497 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
498 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
499 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
500 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
501 	pl_info->pktlen = 0;
502 	pl_info->start_time_thruput = 0;
503 	pl_info->start_time_per = 0;
504 	pl_dev->vendor_cmd_send = false;
505 
506 	if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
507 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
508 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
509 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
510 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
511 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
512 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
513 	} else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
514 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
515 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
516 	}
517 }
518 
519 static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
520 		 bool ini_triggered, uint8_t user_triggered,
521 		 uint32_t is_iwpriv_command)
522 {
523 	struct pktlog_dev_t *pl_dev;
524 	struct ath_pktlog_info *pl_info;
525 	struct cdp_pdev *cdp_pdev;
526 	int error;
527 
528 	if (!scn) {
529 		qdf_print("%s: Invalid scn context\n", __func__);
530 		ASSERT(0);
531 		return -EINVAL;
532 	}
533 
534 	pl_dev = get_pktlog_handle();
535 	if (!pl_dev) {
536 		qdf_print("%s: Invalid pktlog context\n", __func__);
537 		ASSERT(0);
538 		return -EINVAL;
539 	}
540 
541 	cdp_pdev = get_txrx_context();
542 	if (!cdp_pdev) {
543 		qdf_print("%s: Invalid txrx context\n", __func__);
544 		ASSERT(0);
545 		return -EINVAL;
546 	}
547 
548 	pl_info = pl_dev->pl_info;
549 	if (!pl_info) {
550 		qdf_print("%s: Invalid pl_info context\n", __func__);
551 		ASSERT(0);
552 		return -EINVAL;
553 	}
554 
555 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
556 		return -EBUSY;
557 
558 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
559 	/* is_iwpriv_command : 0 indicates its a vendor command
560 	 * log_state: 0 indicates pktlog disable command
561 	 * vendor_cmd_send flag; false means no vendor pktlog enable
562 	 * command was sent previously
563 	 */
564 	if (is_iwpriv_command == 0 && log_state == 0 &&
565 	    pl_dev->vendor_cmd_send == false) {
566 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
567 		qdf_print("%s: pktlog operation not in progress\n", __func__);
568 		return 0;
569 	}
570 
571 	if (!pl_dev->tgt_pktlog_alloced) {
572 		if (pl_info->buf == NULL) {
573 			error = pktlog_alloc_buf(scn);
574 
575 			if (error != 0) {
576 				pl_info->curr_pkt_state =
577 					PKTLOG_OPR_NOT_IN_PROGRESS;
578 				qdf_print("%s: pktlog buff alloc failed\n",
579 					__func__);
580 				return -ENOMEM;
581 			}
582 
583 			if (!pl_info->buf) {
584 				pl_info->curr_pkt_state =
585 					PKTLOG_OPR_NOT_IN_PROGRESS;
586 				qdf_print("%s: pktlog buf alloc failed\n",
587 				       __func__);
588 				ASSERT(0);
589 				return -ENOMEM;
590 			}
591 
592 		}
593 
594 		spin_lock_bh(&pl_info->log_lock);
595 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
596 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
597 		pl_info->buf->wr_offset = 0;
598 		pl_info->buf->rd_offset = -1;
599 		/* These below variables are used by per packet stats*/
600 		pl_info->buf->bytes_written = 0;
601 		pl_info->buf->msg_index = 1;
602 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
603 		spin_unlock_bh(&pl_info->log_lock);
604 
605 		pl_info->start_time_thruput = os_get_timestamp();
606 		pl_info->start_time_per = pl_info->start_time_thruput;
607 
608 		pl_dev->tgt_pktlog_alloced = true;
609 	}
610 	if (log_state != 0) {
611 		/* WDI subscribe */
612 		if (!pl_dev->is_pktlog_cb_subscribed) {
613 			error = wdi_pktlog_subscribe(cdp_pdev, log_state);
614 			if (error) {
615 				pl_info->curr_pkt_state =
616 						PKTLOG_OPR_NOT_IN_PROGRESS;
617 				qdf_print("Unable to subscribe to the WDI %s\n",
618 					__func__);
619 				return -EINVAL;
620 			}
621 		}
622 		pl_dev->is_pktlog_cb_subscribed = true;
623 		/* WMI command to enable pktlog on the firmware */
624 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
625 				user_triggered)) {
626 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
627 			qdf_print("Device cannot be enabled, %s\n", __func__);
628 			return -EINVAL;
629 		}
630 
631 		if (is_iwpriv_command == 0)
632 			pl_dev->vendor_cmd_send = true;
633 	} else {
634 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
635 		pl_dev->pl_funcs->pktlog_disable(scn);
636 		if (is_iwpriv_command == 0)
637 			pl_dev->vendor_cmd_send = false;
638 	}
639 
640 	pl_info->log_state = log_state;
641 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
642 	return 0;
643 }
644 
645 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
646 		 bool ini_triggered, uint8_t user_triggered,
647 		 uint32_t is_iwpriv_command)
648 {
649 	struct pktlog_dev_t *pl_dev;
650 	struct ath_pktlog_info *pl_info;
651 	int err;
652 
653 	pl_dev = get_pktlog_handle();
654 
655 	if (!pl_dev) {
656 		qdf_print("%s: invalid pl_dev handle", __func__);
657 		return -EINVAL;
658 	}
659 
660 	pl_info = pl_dev->pl_info;
661 
662 	if (!pl_info) {
663 		qdf_print("%s: invalid pl_info handle", __func__);
664 		return -EINVAL;
665 	}
666 
667 	mutex_lock(&pl_info->pktlog_mutex);
668 	err = __pktlog_enable(scn, log_state, ini_triggered,
669 				user_triggered, is_iwpriv_command);
670 	mutex_unlock(&pl_info->pktlog_mutex);
671 	return err;
672 }
673 
674 #define ONE_MEGABYTE (1024 * 1024)
675 #define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE)
676 
677 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
678 {
679 	struct pktlog_dev_t *pl_dev;
680 	struct ath_pktlog_info *pl_info;
681 	struct cdp_pdev *pdev;
682 
683 	pl_dev = get_pktlog_handle();
684 
685 	if (!pl_dev) {
686 		qdf_print("%s: invalid pl_dev handle", __func__);
687 		return -EINVAL;
688 	}
689 
690 	pl_info = pl_dev->pl_info;
691 
692 	if (!pl_info) {
693 		qdf_print("%s: invalid pl_dev handle", __func__);
694 		return -EINVAL;
695 	}
696 
697 	pdev = get_txrx_context();
698 
699 	if (!pdev) {
700 		qdf_print("%s: invalid pdev handle", __func__);
701 		return -EINVAL;
702 	}
703 
704 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
705 		qdf_print("%s: pktlog is not configured", __func__);
706 		return -EBUSY;
707 	}
708 
709 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
710 
711 	if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
712 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes."
713 			"Min required is %d MB and Max allowed is %d MB.\n",
714 			__func__, size, (ONE_MEGABYTE/ONE_MEGABYTE),
715 			(MAX_ALLOWED_PKTLOG_SIZE/ONE_MEGABYTE));
716 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
717 		qdf_print("%s: Invalid requested buff size", __func__);
718 		return -EINVAL;
719 	}
720 
721 	if (size == pl_info->buf_size) {
722 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
723 		qdf_print("%s: Pktlog Buff Size is already of same size.",
724 			  __func__);
725 		return 0;
726 	}
727 
728 	if (pl_info->log_state) {
729 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
730 		qdf_print("%s: Logging should be disabled before changing"
731 			  "buffer size.", __func__);
732 		return -EINVAL;
733 	}
734 
735 	spin_lock_bh(&pl_info->log_lock);
736 	if (pl_info->buf != NULL) {
737 		if (pl_dev->is_pktlog_cb_subscribed &&
738 			wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
739 			pl_info->curr_pkt_state =
740 				PKTLOG_OPR_NOT_IN_PROGRESS;
741 			qdf_print("Cannot unsubscribe pktlog from the WDI\n");
742 			spin_unlock_bh(&pl_info->log_lock);
743 			return -EFAULT;
744 		}
745 		pktlog_release_buf(scn);
746 		pl_dev->is_pktlog_cb_subscribed = false;
747 		pl_dev->tgt_pktlog_alloced = false;
748 	}
749 
750 	if (size != 0) {
751 		qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size);
752 		pl_info->buf_size = size;
753 	}
754 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
755 	spin_unlock_bh(&pl_info->log_lock);
756 	return 0;
757 }
758 
759 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
760 {
761 	struct pktlog_dev_t *pl_dev;
762 	struct ath_pktlog_info *pl_info;
763 	int status;
764 
765 	pl_dev = get_pktlog_handle();
766 
767 	if (!pl_dev) {
768 		qdf_print("%s: invalid pl_dev handle", __func__);
769 		return -EINVAL;
770 	}
771 
772 	pl_info = pl_dev->pl_info;
773 
774 	if (!pl_info) {
775 		qdf_print("%s: invalid pl_dev handle", __func__);
776 		return -EINVAL;
777 	}
778 
779 	mutex_lock(&pl_info->pktlog_mutex);
780 	status = __pktlog_setsize(scn, size);
781 	mutex_unlock(&pl_info->pktlog_mutex);
782 
783 	return status;
784 }
785 
786 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
787 {
788 	struct pktlog_dev_t *pl_dev;
789 	struct ath_pktlog_info *pl_info;
790 	uint8_t save_pktlog_state;
791 
792 	pl_dev = get_pktlog_handle();
793 
794 	if (!pl_dev) {
795 		qdf_print("%s: invalid pl_dev handle", __func__);
796 		return -EINVAL;
797 	}
798 
799 	pl_info = pl_dev->pl_info;
800 
801 	if (!pl_info) {
802 		qdf_print("%s: invalid pl_dev handle", __func__);
803 		return -EINVAL;
804 	}
805 
806 	if (!clear_buff)
807 		return -EINVAL;
808 
809 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
810 	    pl_info->curr_pkt_state ==
811 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
812 		return -EBUSY;
813 
814 	save_pktlog_state = pl_info->curr_pkt_state;
815 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
816 
817 	if (pl_info->log_state) {
818 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
819 		qdf_print("%s: Logging should be disabled before clearing "
820 			  "pktlog buffer.", __func__);
821 		return -EINVAL;
822 	}
823 
824 	if (pl_info->buf != NULL) {
825 		if (pl_info->buf_size > 0) {
826 			qdf_print("%s: pktlog buffer is cleared.", __func__);
827 			memset(pl_info->buf, 0, pl_info->buf_size);
828 			pl_dev->is_pktlog_cb_subscribed = false;
829 			pl_dev->tgt_pktlog_alloced = false;
830 			pl_info->buf->rd_offset = -1;
831 		} else {
832 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
833 			qdf_print("%s: pktlog buffer size is not proper. "
834 				  "Existing Buf size %d", __func__,
835 				  pl_info->buf_size);
836 			return -EFAULT;
837 		}
838 	} else {
839 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
840 		qdf_print("%s: pktlog buff is NULL", __func__);
841 		return -EFAULT;
842 	}
843 
844 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
845 		pl_info->curr_pkt_state =
846 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
847 	else
848 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
849 
850 	return 0;
851 }
852 
853 /**
854  * pktlog_process_fw_msg() - process packetlog message
855  * @buff: buffer
856  *
857  * Return: None
858  */
859 void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
860 {
861 	uint32_t *pl_hdr;
862 	uint32_t log_type;
863 	struct cdp_pdev *pdev = get_txrx_context();
864 	struct ol_fw_data pl_fw_data;
865 
866 	if (!pdev) {
867 		qdf_print("%s: txrx_pdev is NULL", __func__);
868 		return;
869 	}
870 	pl_hdr = buff;
871 	pl_fw_data.data = pl_hdr;
872 	pl_fw_data.len = len;
873 
874 	log_type =
875 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
876 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
877 
878 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
879 		|| (log_type == PKTLOG_TYPE_TX_STAT)
880 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
881 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
882 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
883 		wdi_event_handler(WDI_EVENT_TX_STATUS,
884 				  pdev, &pl_fw_data);
885 	else if (log_type == PKTLOG_TYPE_RC_FIND)
886 		wdi_event_handler(WDI_EVENT_RATE_FIND,
887 				  pdev, &pl_fw_data);
888 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
889 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
890 				  pdev, &pl_fw_data);
891 	else if (log_type == PKTLOG_TYPE_RX_STAT)
892 		wdi_event_handler(WDI_EVENT_RX_DESC,
893 				  pdev, &pl_fw_data);
894 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
895 		wdi_event_handler(WDI_EVENT_SW_EVENT,
896 				  pdev, &pl_fw_data);
897 }
898 
899 #if defined(QCA_WIFI_3_0_ADRASTEA)
900 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
901 {
902 	int rc = 0; /* sane */
903 
904 	if ((!nbuf) ||
905 	    (nbuf->data < nbuf->head) ||
906 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
907 		rc = -EINVAL;
908 
909 	return rc;
910 }
911 /**
912  * pktlog_t2h_msg_handler() - Target to host message handler
913  * @context: pdev context
914  * @pkt: HTC packet
915  *
916  * Return: None
917  */
918 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
919 {
920 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
921 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
922 	uint32_t *msg_word;
923 	uint32_t msg_len;
924 
925 	/* check for sanity of the packet, have seen corrupted pkts */
926 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
927 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
928 			  __func__, pktlog_t2h_msg);
929 		/* do not free; may crash! */
930 		QDF_ASSERT(0);
931 		return;
932 	}
933 
934 	/* check for successful message reception */
935 	if (pkt->Status != QDF_STATUS_SUCCESS) {
936 		if (pkt->Status != QDF_STATUS_E_CANCELED)
937 			pdev->htc_err_cnt++;
938 		qdf_nbuf_free(pktlog_t2h_msg);
939 		return;
940 	}
941 
942 	/* confirm alignment */
943 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
944 
945 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
946 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
947 	pktlog_process_fw_msg(msg_word, msg_len);
948 
949 	qdf_nbuf_free(pktlog_t2h_msg);
950 }
951 
952 /**
953  * pktlog_tx_resume_handler() - resume callback
954  * @context: pdev context
955  *
956  * Return: None
957  */
958 static void pktlog_tx_resume_handler(void *context)
959 {
960 	qdf_print("%s: Not expected", __func__);
961 	qdf_assert(0);
962 }
963 
964 /**
965  * pktlog_h2t_send_complete() - send complete indication
966  * @context: pdev context
967  * @htc_pkt: HTC packet
968  *
969  * Return: None
970  */
971 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
972 {
973 	qdf_print("%s: Not expected", __func__);
974 	qdf_assert(0);
975 }
976 
977 /**
978  * pktlog_h2t_full() - queue full indication
979  * @context: pdev context
980  * @pkt: HTC packet
981  *
982  * Return: HTC action
983  */
984 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
985 {
986 	return HTC_SEND_FULL_KEEP;
987 }
988 
989 /**
990  * pktlog_htc_connect_service() - create new endpoint for packetlog
991  * @pdev - pktlog pdev
992  *
993  * Return: 0 for success/failure
994  */
995 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
996 {
997 	struct htc_service_connect_req connect;
998 	struct htc_service_connect_resp response;
999 	QDF_STATUS status;
1000 
1001 	qdf_mem_set(&connect, sizeof(connect), 0);
1002 	qdf_mem_set(&response, sizeof(response), 0);
1003 
1004 	connect.pMetaData = NULL;
1005 	connect.MetaDataLength = 0;
1006 	connect.EpCallbacks.pContext = pdev;
1007 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1008 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1009 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1010 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1011 
1012 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1013 	connect.EpCallbacks.EpRecvRefill = NULL;
1014 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1015 	/* N/A, fill is done by HIF */
1016 
1017 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1018 	/*
1019 	 * Specify how deep to let a queue get before htc_send_pkt will
1020 	 * call the EpSendFull function due to excessive send queue depth.
1021 	 */
1022 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1023 
1024 	/* disable flow control for HTT data message service */
1025 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1026 
1027 	/* connect to control service */
1028 	connect.service_id = PACKET_LOG_SVC;
1029 
1030 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1031 
1032 	if (status != QDF_STATUS_SUCCESS) {
1033 		pdev->mt_pktlog_enabled = false;
1034 		return -EIO;       /* failure */
1035 	}
1036 
1037 	pdev->htc_endpoint = response.Endpoint;
1038 	pdev->mt_pktlog_enabled = true;
1039 
1040 	return 0;               /* success */
1041 }
1042 
1043 /**
1044  * pktlog_htc_attach() - attach pktlog HTC service
1045  *
1046  * Return: 0 for success/failure
1047  */
1048 int pktlog_htc_attach(void)
1049 {
1050 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1051 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1052 
1053 	if ((!pl_pdev) || (!htc_pdev)) {
1054 		qdf_print("Invalid pl_dev or htc_pdev handle");
1055 		return -EINVAL;
1056 	}
1057 
1058 	pl_pdev->htc_pdev = htc_pdev;
1059 	return pktlog_htc_connect_service(pl_pdev);
1060 }
1061 #else
1062 int pktlog_htc_attach(void)
1063 {
1064 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1065 
1066 	if (!pl_dev) {
1067 		qdf_print("Invalid pl_dev handle");
1068 		return -EINVAL;
1069 	}
1070 
1071 	pl_dev->mt_pktlog_enabled = false;
1072 	return 0;
1073 }
1074 #endif
1075 #endif /* REMOVE_PKT_LOG */
1076