xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  *
21  * Permission to use, copy, modify, and/or distribute this software for any
22  * purpose with or without fee is hereby granted, provided that the above
23  * copyright notice and this permission notice appear in all copies.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
26  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
28  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
29  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
30  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
31  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
32  */
33 
34 #ifndef REMOVE_PKT_LOG
35 
36 #include "qdf_mem.h"
37 #include "athdefs.h"
38 #include "pktlog_ac_i.h"
39 #include "cds_api.h"
40 #include "wma_types.h"
41 #include "htc.h"
42 #include <cdp_txrx_cmn_struct.h>
43 #include <cdp_txrx_ctrl.h>
44 
45 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
46 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
47 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
48 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
49 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
50 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
51 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
53 
54 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
55 	.pktlog_init = pktlog_init,
56 	.pktlog_enable = pktlog_enable,
57 	.pktlog_setsize = pktlog_setsize,
58 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
59 };
60 
61 struct pktlog_dev_t pl_dev = {
62 	.pl_funcs = &ol_pl_funcs,
63 };
64 
65 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
66 		     struct hif_opaque_softc *scn)
67 {
68 	pl_dev.scn = (ol_ath_generic_softc_handle) scn;
69 	*pl_handle = &pl_dev;
70 }
71 
72 void pktlog_set_callback_regtype(
73 		enum pktlog_callback_regtype callback_type)
74 {
75 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
76 
77 	if (!pl_dev) {
78 		qdf_print("Invalid pl_dev");
79 		return;
80 	}
81 
82 	pl_dev->callback_type = callback_type;
83 }
84 
85 #ifdef CONFIG_MCL
86 struct pktlog_dev_t *get_pktlog_handle(void)
87 {
88 	struct cdp_pdev *pdev_txrx_handle =
89 				cds_get_context(QDF_MODULE_ID_TXRX);
90 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
91 
92 	return cdp_get_pldev(soc, pdev_txrx_handle);
93 }
94 
95 /*
96  * Get current txrx context
97  */
98 void *get_txrx_context(void)
99 {
100 	return cds_get_context(QDF_MODULE_ID_TXRX);
101 }
102 
103 #else
104 /* TODO: Need to use WIN implementation to return pktlog_dev handle */
105 static inline struct pktlog_dev_t *get_pktlog_handle(void)
106 {
107 	return NULL;
108 }
109 static struct pktlog_dev_t *get_txrx_context(void) { }
110 #endif
111 
112 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
113 				    WMI_CMD_ID cmd_id, bool ini_triggered,
114 				    uint8_t user_triggered)
115 {
116 	struct scheduler_msg msg = { 0 };
117 	QDF_STATUS status;
118 	struct ath_pktlog_wmi_params *param;
119 
120 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
121 
122 	if (!param)
123 		return A_NO_MEMORY;
124 
125 	param->cmd_id = cmd_id;
126 	param->pktlog_event = event_types;
127 	param->ini_triggered = ini_triggered;
128 	param->user_triggered = user_triggered;
129 
130 	msg.type = WMA_PKTLOG_ENABLE_REQ;
131 	msg.bodyptr = param;
132 	msg.bodyval = 0;
133 
134 	status = scheduler_post_message(QDF_MODULE_ID_WMA,
135 					QDF_MODULE_ID_WMA,
136 					QDF_MODULE_ID_WMA, &msg);
137 
138 	if (status != QDF_STATUS_SUCCESS) {
139 		qdf_mem_free(param);
140 		return A_ERROR;
141 	}
142 
143 	return A_OK;
144 }
145 
146 static inline A_STATUS
147 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
148 		 bool ini_triggered, uint8_t user_triggered)
149 {
150 	uint32_t types = 0;
151 
152 	if (log_state & ATH_PKTLOG_TX)
153 		types |= WMI_PKTLOG_EVENT_TX;
154 
155 	if (log_state & ATH_PKTLOG_RX)
156 		types |= WMI_PKTLOG_EVENT_RX;
157 
158 	if (log_state & ATH_PKTLOG_RCFIND)
159 		types |= WMI_PKTLOG_EVENT_RCF;
160 
161 	if (log_state & ATH_PKTLOG_RCUPDATE)
162 		types |= WMI_PKTLOG_EVENT_RCU;
163 
164 	if (log_state & ATH_PKTLOG_SW_EVENT)
165 		types |= WMI_PKTLOG_EVENT_SW;
166 
167 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
168 				   ini_triggered, user_triggered);
169 }
170 
171 static inline A_STATUS
172 wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
173 {
174 #ifdef CONFIG_MCL
175 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
176 #else
177 	/*TODO: WIN implementation to get soc */
178 #endif
179 
180 	if (!cdp_pdev) {
181 		qdf_print("Invalid pdev in %s", __func__);
182 		return A_ERROR;
183 	}
184 
185 	if (log_state & ATH_PKTLOG_TX) {
186 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
187 				WDI_EVENT_TX_STATUS)) {
188 			return A_ERROR;
189 		}
190 	}
191 	if (log_state & ATH_PKTLOG_RX) {
192 		if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
193 				WDI_EVENT_RX_DESC)) {
194 			return A_ERROR;
195 		}
196 		if (cdp_wdi_event_sub(soc, cdp_pdev,
197 				&PKTLOG_RX_REMOTE_SUBSCRIBER,
198 				WDI_EVENT_RX_DESC_REMOTE)) {
199 			return A_ERROR;
200 		}
201 	}
202 	if (log_state & ATH_PKTLOG_RCFIND) {
203 		if (cdp_wdi_event_sub(soc, cdp_pdev,
204 				  &PKTLOG_RCFIND_SUBSCRIBER,
205 				  WDI_EVENT_RATE_FIND)) {
206 			return A_ERROR;
207 		}
208 	}
209 	if (log_state & ATH_PKTLOG_RCUPDATE) {
210 		if (cdp_wdi_event_sub(soc, cdp_pdev,
211 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
212 				  WDI_EVENT_RATE_UPDATE)) {
213 			return A_ERROR;
214 		}
215 	}
216 	if (log_state & ATH_PKTLOG_SW_EVENT) {
217 		if (cdp_wdi_event_sub(soc, cdp_pdev,
218 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
219 				  WDI_EVENT_SW_EVENT)) {
220 			return A_ERROR;
221 		}
222 	}
223 	if (log_state & ATH_PKTLOG_LITE_T2H) {
224 		if (cdp_wdi_event_sub(soc, cdp_pdev,
225 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
226 				  WDI_EVENT_LITE_T2H)) {
227 			return A_ERROR;
228 		}
229 	}
230 	if (log_state & ATH_PKTLOG_LITE_RX) {
231 		if (cdp_wdi_event_sub(soc, cdp_pdev,
232 				&PKTLOG_LITE_RX_SUBSCRIBER,
233 				WDI_EVENT_LITE_RX)) {
234 			return A_ERROR;
235 		}
236 	}
237 
238 	return A_OK;
239 }
240 
241 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
242 		u_int16_t peer_id, uint32_t status)
243 {
244 	switch (event) {
245 	case WDI_EVENT_TX_STATUS:
246 	{
247 		/*
248 		 * process TX message
249 		 */
250 		if (process_tx_info(pdev, log_data)) {
251 			qdf_print("Unable to process TX info");
252 			return;
253 		}
254 		break;
255 	}
256 	case WDI_EVENT_RX_DESC:
257 	{
258 		/*
259 		 * process RX message for local frames
260 		 */
261 		if (process_rx_info(pdev, log_data)) {
262 			qdf_print("Unable to process RX info");
263 			return;
264 		}
265 		break;
266 	}
267 	case WDI_EVENT_RX_DESC_REMOTE:
268 	{
269 		/*
270 		 * process RX message for remote frames
271 		 */
272 		if (process_rx_info_remote(pdev, log_data)) {
273 			qdf_print("Unable to process RX info");
274 			return;
275 		}
276 		break;
277 	}
278 	case WDI_EVENT_RATE_FIND:
279 	{
280 		/*
281 		 * process RATE_FIND message
282 		 */
283 		if (process_rate_find(pdev, log_data)) {
284 			qdf_print("Unable to process RC_FIND info");
285 			return;
286 		}
287 		break;
288 	}
289 	case WDI_EVENT_RATE_UPDATE:
290 	{
291 		/*
292 		 * process RATE_UPDATE message
293 		 */
294 		if (process_rate_update(pdev, log_data)) {
295 			qdf_print("Unable to process RC_UPDATE");
296 			return;
297 		}
298 		break;
299 	}
300 	case WDI_EVENT_SW_EVENT:
301 	{
302 		/*
303 		 * process SW EVENT message
304 		 */
305 		if (process_sw_event(pdev, log_data)) {
306 			qdf_print("Unable to process SW_EVENT");
307 			return;
308 		}
309 		break;
310 	}
311 	default:
312 		break;
313 	}
314 }
315 
316 void
317 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
318 			u_int16_t peer_id, uint32_t status)
319 {
320 	switch (event) {
321 	case WDI_EVENT_RX_DESC:
322 	{
323 		if (process_rx_desc_remote(context, log_data)) {
324 			qdf_print("Unable to process RX info");
325 			return;
326 		}
327 		break;
328 	}
329 	case WDI_EVENT_LITE_T2H:
330 	{
331 		if (process_pktlog_lite(context, log_data,
332 					PKTLOG_TYPE_LITE_T2H)) {
333 			qdf_print("Unable to process lite_t2h");
334 			return;
335 		}
336 		break;
337 	}
338 	case WDI_EVENT_LITE_RX:
339 	{
340 		if (process_pktlog_lite(context, log_data,
341 					PKTLOG_TYPE_LITE_RX)) {
342 			qdf_print("Unable to process lite_rx");
343 			return;
344 		}
345 		break;
346 	}
347 	default:
348 		break;
349 	}
350 }
351 
352 A_STATUS
353 wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
354 {
355 #ifdef CONFIG_MCL
356 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
357 #else
358 	/* TODO: WIN implementation to get soc */
359 #endif
360 
361 	if (log_state & ATH_PKTLOG_TX) {
362 		if (cdp_wdi_event_unsub(soc, pdev,
363 				    &PKTLOG_TX_SUBSCRIBER,
364 				    WDI_EVENT_TX_STATUS)) {
365 			return A_ERROR;
366 		}
367 	}
368 	if (log_state & ATH_PKTLOG_RX) {
369 		if (cdp_wdi_event_unsub(soc, pdev,
370 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
371 			return A_ERROR;
372 		}
373 		if (cdp_wdi_event_unsub(soc, pdev,
374 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
375 				    WDI_EVENT_RX_DESC_REMOTE)) {
376 			return A_ERROR;
377 		}
378 	}
379 	if (log_state & ATH_PKTLOG_RCFIND) {
380 		if (cdp_wdi_event_unsub(soc, pdev,
381 				    &PKTLOG_RCFIND_SUBSCRIBER,
382 				    WDI_EVENT_RATE_FIND)) {
383 			return A_ERROR;
384 		}
385 	}
386 	if (log_state & ATH_PKTLOG_RCUPDATE) {
387 		if (cdp_wdi_event_unsub(soc, pdev,
388 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
389 				    WDI_EVENT_RATE_UPDATE)) {
390 			return A_ERROR;
391 		}
392 	}
393 	if (log_state & ATH_PKTLOG_RCUPDATE) {
394 		if (cdp_wdi_event_unsub(soc, pdev,
395 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
396 				    WDI_EVENT_SW_EVENT)) {
397 			return A_ERROR;
398 		}
399 	}
400 	if (log_state & ATH_PKTLOG_LITE_T2H) {
401 		if (cdp_wdi_event_unsub(soc, pdev,
402 				  &PKTLOG_LITE_T2H_SUBSCRIBER,
403 				  WDI_EVENT_LITE_T2H)) {
404 			return A_ERROR;
405 		}
406 	}
407 	if (log_state & ATH_PKTLOG_LITE_RX) {
408 		if (cdp_wdi_event_unsub(soc, pdev,
409 				&PKTLOG_LITE_RX_SUBSCRIBER,
410 				WDI_EVENT_LITE_RX)) {
411 			return A_ERROR;
412 		}
413 	}
414 
415 	return A_OK;
416 }
417 
418 int pktlog_disable(struct hif_opaque_softc *scn)
419 {
420 	struct pktlog_dev_t *pl_dev;
421 	struct ath_pktlog_info *pl_info;
422 	uint8_t save_pktlog_state;
423 	struct cdp_pdev *txrx_pdev = get_txrx_context();
424 
425 	pl_dev = get_pktlog_handle();
426 
427 	if (!pl_dev) {
428 		qdf_print("Invalid pl_dev");
429 		return -EINVAL;
430 	}
431 
432 	pl_info = pl_dev->pl_info;
433 
434 	if (!pl_dev->pl_info) {
435 		qdf_print("Invalid pl_info");
436 		return -EINVAL;
437 	}
438 
439 	if (!txrx_pdev) {
440 		qdf_print("Invalid cdp_pdev");
441 		return -EINVAL;
442 	}
443 
444 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
445 	    pl_info->curr_pkt_state ==
446 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
447 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
448 	    pl_info->curr_pkt_state ==
449 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
450 		return -EBUSY;
451 
452 	save_pktlog_state = pl_info->curr_pkt_state;
453 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
454 
455 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
456 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
457 		qdf_print("Failed to disable pktlog in target");
458 		return -EINVAL;
459 	}
460 
461 	if (pl_dev->is_pktlog_cb_subscribed &&
462 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
463 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
464 		qdf_print("Cannot unsubscribe pktlog from the WDI");
465 		return -EINVAL;
466 	}
467 	pl_dev->is_pktlog_cb_subscribed = false;
468 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
469 		pl_info->curr_pkt_state =
470 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
471 	else
472 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
473 	return 0;
474 }
475 
476 void pktlog_init(struct hif_opaque_softc *scn)
477 {
478 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
479 	struct ath_pktlog_info *pl_info;
480 
481 	if (pl_dev == NULL || pl_dev->pl_info == NULL) {
482 		qdf_print("pl_dev or pl_info is invalid");
483 		return;
484 	}
485 
486 	pl_info = pl_dev->pl_info;
487 
488 	OS_MEMZERO(pl_info, sizeof(*pl_info));
489 	PKTLOG_LOCK_INIT(pl_info);
490 	mutex_init(&pl_info->pktlog_mutex);
491 
492 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
493 	pl_info->buf = NULL;
494 	pl_info->log_state = 0;
495 	pl_info->init_saved_state = 0;
496 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
497 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
498 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
499 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
500 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
501 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
502 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
503 	pl_info->pktlen = 0;
504 	pl_info->start_time_thruput = 0;
505 	pl_info->start_time_per = 0;
506 	pl_dev->vendor_cmd_send = false;
507 
508 	if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
509 		PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
510 		PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
511 		PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
512 		PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
513 		PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
514 		PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
515 	} else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
516 		PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
517 		PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
518 	}
519 }
520 
521 static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
522 		 bool ini_triggered, uint8_t user_triggered,
523 		 uint32_t is_iwpriv_command)
524 {
525 	struct pktlog_dev_t *pl_dev;
526 	struct ath_pktlog_info *pl_info;
527 	struct cdp_pdev *cdp_pdev;
528 	int error;
529 
530 	if (!scn) {
531 		qdf_print("%s: Invalid scn context", __func__);
532 		ASSERT(0);
533 		return -EINVAL;
534 	}
535 
536 	pl_dev = get_pktlog_handle();
537 	if (!pl_dev) {
538 		qdf_print("%s: Invalid pktlog context", __func__);
539 		ASSERT(0);
540 		return -EINVAL;
541 	}
542 
543 	cdp_pdev = get_txrx_context();
544 	if (!cdp_pdev) {
545 		qdf_print("%s: Invalid txrx context", __func__);
546 		ASSERT(0);
547 		return -EINVAL;
548 	}
549 
550 	pl_info = pl_dev->pl_info;
551 	if (!pl_info) {
552 		qdf_print("%s: Invalid pl_info context", __func__);
553 		ASSERT(0);
554 		return -EINVAL;
555 	}
556 
557 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
558 		return -EBUSY;
559 
560 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
561 	/* is_iwpriv_command : 0 indicates its a vendor command
562 	 * log_state: 0 indicates pktlog disable command
563 	 * vendor_cmd_send flag; false means no vendor pktlog enable
564 	 * command was sent previously
565 	 */
566 	if (is_iwpriv_command == 0 && log_state == 0 &&
567 	    pl_dev->vendor_cmd_send == false) {
568 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
569 		qdf_print("%s: pktlog operation not in progress", __func__);
570 		return 0;
571 	}
572 
573 	if (!pl_dev->tgt_pktlog_alloced) {
574 		if (pl_info->buf == NULL) {
575 			error = pktlog_alloc_buf(scn);
576 
577 			if (error != 0) {
578 				pl_info->curr_pkt_state =
579 					PKTLOG_OPR_NOT_IN_PROGRESS;
580 				qdf_print("%s: pktlog buff alloc failed",
581 					  __func__);
582 				return -ENOMEM;
583 			}
584 
585 			if (!pl_info->buf) {
586 				pl_info->curr_pkt_state =
587 					PKTLOG_OPR_NOT_IN_PROGRESS;
588 				qdf_print("%s: pktlog buf alloc failed",
589 					  __func__);
590 				ASSERT(0);
591 				return -ENOMEM;
592 			}
593 
594 		}
595 
596 		spin_lock_bh(&pl_info->log_lock);
597 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
598 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
599 		pl_info->buf->wr_offset = 0;
600 		pl_info->buf->rd_offset = -1;
601 		/* These below variables are used by per packet stats*/
602 		pl_info->buf->bytes_written = 0;
603 		pl_info->buf->msg_index = 1;
604 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
605 		spin_unlock_bh(&pl_info->log_lock);
606 
607 		pl_info->start_time_thruput = os_get_timestamp();
608 		pl_info->start_time_per = pl_info->start_time_thruput;
609 
610 		pl_dev->tgt_pktlog_alloced = true;
611 	}
612 	if (log_state != 0) {
613 		/* WDI subscribe */
614 		if (!pl_dev->is_pktlog_cb_subscribed) {
615 			error = wdi_pktlog_subscribe(cdp_pdev, log_state);
616 			if (error) {
617 				pl_info->curr_pkt_state =
618 						PKTLOG_OPR_NOT_IN_PROGRESS;
619 				qdf_print("Unable to subscribe to the WDI %s",
620 					  __func__);
621 				return -EINVAL;
622 			}
623 		} else {
624 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
625 			qdf_print("Unable to subscribe %d to the WDI %s",
626 				  log_state, __func__);
627 			return -EINVAL;
628 		}
629 		/* WMI command to enable pktlog on the firmware */
630 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
631 				user_triggered)) {
632 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
633 			qdf_print("Device cannot be enabled, %s", __func__);
634 			return -EINVAL;
635 		}
636 		pl_dev->is_pktlog_cb_subscribed = true;
637 
638 		if (is_iwpriv_command == 0)
639 			pl_dev->vendor_cmd_send = true;
640 	} else {
641 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
642 		pl_dev->pl_funcs->pktlog_disable(scn);
643 		if (is_iwpriv_command == 0)
644 			pl_dev->vendor_cmd_send = false;
645 	}
646 
647 	pl_info->log_state = log_state;
648 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
649 	return 0;
650 }
651 
652 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
653 		 bool ini_triggered, uint8_t user_triggered,
654 		 uint32_t is_iwpriv_command)
655 {
656 	struct pktlog_dev_t *pl_dev;
657 	struct ath_pktlog_info *pl_info;
658 	int err;
659 
660 	pl_dev = get_pktlog_handle();
661 
662 	if (!pl_dev) {
663 		qdf_print("%s: invalid pl_dev handle", __func__);
664 		return -EINVAL;
665 	}
666 
667 	pl_info = pl_dev->pl_info;
668 
669 	if (!pl_info) {
670 		qdf_print("%s: invalid pl_info handle", __func__);
671 		return -EINVAL;
672 	}
673 
674 	mutex_lock(&pl_info->pktlog_mutex);
675 	err = __pktlog_enable(scn, log_state, ini_triggered,
676 				user_triggered, is_iwpriv_command);
677 	mutex_unlock(&pl_info->pktlog_mutex);
678 	return err;
679 }
680 
681 #define ONE_MEGABYTE (1024 * 1024)
682 #define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE)
683 
684 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
685 {
686 	struct pktlog_dev_t *pl_dev;
687 	struct ath_pktlog_info *pl_info;
688 	struct cdp_pdev *pdev;
689 
690 	pl_dev = get_pktlog_handle();
691 
692 	if (!pl_dev) {
693 		qdf_print("%s: invalid pl_dev handle", __func__);
694 		return -EINVAL;
695 	}
696 
697 	pl_info = pl_dev->pl_info;
698 
699 	if (!pl_info) {
700 		qdf_print("%s: invalid pl_dev handle", __func__);
701 		return -EINVAL;
702 	}
703 
704 	pdev = get_txrx_context();
705 
706 	if (!pdev) {
707 		qdf_print("%s: invalid pdev handle", __func__);
708 		return -EINVAL;
709 	}
710 
711 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
712 		qdf_print("%s: pktlog is not configured", __func__);
713 		return -EBUSY;
714 	}
715 
716 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
717 
718 	if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
719 		qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
720 			  __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
721 			  (MAX_ALLOWED_PKTLOG_SIZE / ONE_MEGABYTE));
722 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
723 		qdf_print("%s: Invalid requested buff size", __func__);
724 		return -EINVAL;
725 	}
726 
727 	if (size == pl_info->buf_size) {
728 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
729 		qdf_print("%s: Pktlog Buff Size is already of same size.",
730 			  __func__);
731 		return 0;
732 	}
733 
734 	if (pl_info->log_state) {
735 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
736 		qdf_print("%s: Logging should be disabled before changing"
737 			  "buffer size.", __func__);
738 		return -EINVAL;
739 	}
740 
741 	spin_lock_bh(&pl_info->log_lock);
742 	if (pl_info->buf != NULL) {
743 		if (pl_dev->is_pktlog_cb_subscribed &&
744 			wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
745 			pl_info->curr_pkt_state =
746 				PKTLOG_OPR_NOT_IN_PROGRESS;
747 			qdf_print("Cannot unsubscribe pktlog from the WDI");
748 			spin_unlock_bh(&pl_info->log_lock);
749 			return -EFAULT;
750 		}
751 		pktlog_release_buf(scn);
752 		pl_dev->is_pktlog_cb_subscribed = false;
753 		pl_dev->tgt_pktlog_alloced = false;
754 	}
755 
756 	if (size != 0) {
757 		qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
758 		pl_info->buf_size = size;
759 	}
760 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
761 	spin_unlock_bh(&pl_info->log_lock);
762 	return 0;
763 }
764 
765 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
766 {
767 	struct pktlog_dev_t *pl_dev;
768 	struct ath_pktlog_info *pl_info;
769 	int status;
770 
771 	pl_dev = get_pktlog_handle();
772 
773 	if (!pl_dev) {
774 		qdf_print("%s: invalid pl_dev handle", __func__);
775 		return -EINVAL;
776 	}
777 
778 	pl_info = pl_dev->pl_info;
779 
780 	if (!pl_info) {
781 		qdf_print("%s: invalid pl_dev handle", __func__);
782 		return -EINVAL;
783 	}
784 
785 	mutex_lock(&pl_info->pktlog_mutex);
786 	status = __pktlog_setsize(scn, size);
787 	mutex_unlock(&pl_info->pktlog_mutex);
788 
789 	return status;
790 }
791 
792 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
793 {
794 	struct pktlog_dev_t *pl_dev;
795 	struct ath_pktlog_info *pl_info;
796 	uint8_t save_pktlog_state;
797 
798 	pl_dev = get_pktlog_handle();
799 
800 	if (!pl_dev) {
801 		qdf_print("%s: invalid pl_dev handle", __func__);
802 		return -EINVAL;
803 	}
804 
805 	pl_info = pl_dev->pl_info;
806 
807 	if (!pl_info) {
808 		qdf_print("%s: invalid pl_dev handle", __func__);
809 		return -EINVAL;
810 	}
811 
812 	if (!clear_buff)
813 		return -EINVAL;
814 
815 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
816 	    pl_info->curr_pkt_state ==
817 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
818 		return -EBUSY;
819 
820 	save_pktlog_state = pl_info->curr_pkt_state;
821 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
822 
823 	if (pl_info->log_state) {
824 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
825 		qdf_print("%s: Logging should be disabled before clearing "
826 			  "pktlog buffer.", __func__);
827 		return -EINVAL;
828 	}
829 
830 	if (pl_info->buf != NULL) {
831 		if (pl_info->buf_size > 0) {
832 			qdf_print("%s: pktlog buffer is cleared.", __func__);
833 			memset(pl_info->buf, 0, pl_info->buf_size);
834 			pl_dev->is_pktlog_cb_subscribed = false;
835 			pl_dev->tgt_pktlog_alloced = false;
836 			pl_info->buf->rd_offset = -1;
837 		} else {
838 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
839 			qdf_print("%s: pktlog buffer size is not proper. "
840 				  "Existing Buf size %d", __func__,
841 				  pl_info->buf_size);
842 			return -EFAULT;
843 		}
844 	} else {
845 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
846 		qdf_print("%s: pktlog buff is NULL", __func__);
847 		return -EFAULT;
848 	}
849 
850 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
851 		pl_info->curr_pkt_state =
852 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
853 	else
854 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
855 
856 	return 0;
857 }
858 
859 /**
860  * pktlog_process_fw_msg() - process packetlog message
861  * @buff: buffer
862  *
863  * Return: None
864  */
865 void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
866 {
867 	uint32_t *pl_hdr;
868 	uint32_t log_type;
869 	struct cdp_pdev *pdev = get_txrx_context();
870 	struct ol_fw_data pl_fw_data;
871 
872 	if (!pdev) {
873 		qdf_print("%s: txrx_pdev is NULL", __func__);
874 		return;
875 	}
876 	pl_hdr = buff;
877 	pl_fw_data.data = pl_hdr;
878 	pl_fw_data.len = len;
879 
880 	log_type =
881 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
882 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
883 
884 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
885 		|| (log_type == PKTLOG_TYPE_TX_STAT)
886 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
887 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
888 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
889 		wdi_event_handler(WDI_EVENT_TX_STATUS,
890 				  pdev, &pl_fw_data);
891 	else if (log_type == PKTLOG_TYPE_RC_FIND)
892 		wdi_event_handler(WDI_EVENT_RATE_FIND,
893 				  pdev, &pl_fw_data);
894 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
895 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
896 				  pdev, &pl_fw_data);
897 	else if (log_type == PKTLOG_TYPE_RX_STAT)
898 		wdi_event_handler(WDI_EVENT_RX_DESC,
899 				  pdev, &pl_fw_data);
900 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
901 		wdi_event_handler(WDI_EVENT_SW_EVENT,
902 				  pdev, &pl_fw_data);
903 }
904 
905 #if defined(QCA_WIFI_3_0_ADRASTEA)
906 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
907 {
908 	int rc = 0; /* sane */
909 
910 	if ((!nbuf) ||
911 	    (nbuf->data < nbuf->head) ||
912 	    ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
913 		rc = -EINVAL;
914 
915 	return rc;
916 }
917 /**
918  * pktlog_t2h_msg_handler() - Target to host message handler
919  * @context: pdev context
920  * @pkt: HTC packet
921  *
922  * Return: None
923  */
924 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
925 {
926 	struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
927 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
928 	uint32_t *msg_word;
929 	uint32_t msg_len;
930 
931 	/* check for sanity of the packet, have seen corrupted pkts */
932 	if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
933 		qdf_print("%s: packet 0x%pK corrupted? Leaking...",
934 			  __func__, pktlog_t2h_msg);
935 		/* do not free; may crash! */
936 		QDF_ASSERT(0);
937 		return;
938 	}
939 
940 	/* check for successful message reception */
941 	if (pkt->Status != QDF_STATUS_SUCCESS) {
942 		if (pkt->Status != QDF_STATUS_E_CANCELED)
943 			pdev->htc_err_cnt++;
944 		qdf_nbuf_free(pktlog_t2h_msg);
945 		return;
946 	}
947 
948 	/* confirm alignment */
949 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
950 
951 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
952 	msg_len = qdf_nbuf_len(pktlog_t2h_msg);
953 	pktlog_process_fw_msg(msg_word, msg_len);
954 
955 	qdf_nbuf_free(pktlog_t2h_msg);
956 }
957 
958 /**
959  * pktlog_tx_resume_handler() - resume callback
960  * @context: pdev context
961  *
962  * Return: None
963  */
964 static void pktlog_tx_resume_handler(void *context)
965 {
966 	qdf_print("%s: Not expected", __func__);
967 	qdf_assert(0);
968 }
969 
970 /**
971  * pktlog_h2t_send_complete() - send complete indication
972  * @context: pdev context
973  * @htc_pkt: HTC packet
974  *
975  * Return: None
976  */
977 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
978 {
979 	qdf_print("%s: Not expected", __func__);
980 	qdf_assert(0);
981 }
982 
983 /**
984  * pktlog_h2t_full() - queue full indication
985  * @context: pdev context
986  * @pkt: HTC packet
987  *
988  * Return: HTC action
989  */
990 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
991 {
992 	return HTC_SEND_FULL_KEEP;
993 }
994 
995 /**
996  * pktlog_htc_connect_service() - create new endpoint for packetlog
997  * @pdev - pktlog pdev
998  *
999  * Return: 0 for success/failure
1000  */
1001 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1002 {
1003 	struct htc_service_connect_req connect;
1004 	struct htc_service_connect_resp response;
1005 	QDF_STATUS status;
1006 
1007 	qdf_mem_set(&connect, sizeof(connect), 0);
1008 	qdf_mem_set(&response, sizeof(response), 0);
1009 
1010 	connect.pMetaData = NULL;
1011 	connect.MetaDataLength = 0;
1012 	connect.EpCallbacks.pContext = pdev;
1013 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1014 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1015 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1016 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1017 
1018 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
1019 	connect.EpCallbacks.EpRecvRefill = NULL;
1020 	connect.EpCallbacks.RecvRefillWaterMark = 1;
1021 	/* N/A, fill is done by HIF */
1022 
1023 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1024 	/*
1025 	 * Specify how deep to let a queue get before htc_send_pkt will
1026 	 * call the EpSendFull function due to excessive send queue depth.
1027 	 */
1028 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1029 
1030 	/* disable flow control for HTT data message service */
1031 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1032 
1033 	/* connect to control service */
1034 	connect.service_id = PACKET_LOG_SVC;
1035 
1036 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1037 
1038 	if (status != QDF_STATUS_SUCCESS) {
1039 		pdev->mt_pktlog_enabled = false;
1040 		return -EIO;       /* failure */
1041 	}
1042 
1043 	pdev->htc_endpoint = response.Endpoint;
1044 	pdev->mt_pktlog_enabled = true;
1045 
1046 	return 0;               /* success */
1047 }
1048 
1049 /**
1050  * pktlog_htc_attach() - attach pktlog HTC service
1051  *
1052  * Return: 0 for success/failure
1053  */
1054 int pktlog_htc_attach(void)
1055 {
1056 	struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1057 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1058 
1059 	if ((!pl_pdev) || (!htc_pdev)) {
1060 		qdf_print("Invalid pl_dev or htc_pdev handle");
1061 		return -EINVAL;
1062 	}
1063 
1064 	pl_pdev->htc_pdev = htc_pdev;
1065 	return pktlog_htc_connect_service(pl_pdev);
1066 }
1067 #else
1068 int pktlog_htc_attach(void)
1069 {
1070 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1071 
1072 	if (!pl_dev) {
1073 		qdf_print("Invalid pl_dev handle");
1074 		return -EINVAL;
1075 	}
1076 
1077 	pl_dev->mt_pktlog_enabled = false;
1078 	return 0;
1079 }
1080 #endif
1081 #endif /* REMOVE_PKT_LOG */
1082