xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c (revision e1d3d092f61a07549ab97f6f1f0c86554e0c642f)
1 /*
2  * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /*
29  *
30  * Permission to use, copy, modify, and/or distribute this software for any
31  * purpose with or without fee is hereby granted, provided that the above
32  * copyright notice and this permission notice appear in all copies.
33  *
34  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
35  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
36  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
37  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
38  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
39  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
40  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
41  */
42 
43 #ifndef REMOVE_PKT_LOG
44 #include "qdf_mem.h"
45 #include "athdefs.h"
46 #include "pktlog_ac_i.h"
47 #include "cds_api.h"
48 #include "wma_types.h"
49 #include "htc.h"
50 
51 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
53 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
54 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
55 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
56 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
57 
58 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
59 	.pktlog_init = pktlog_init,
60 	.pktlog_enable = pktlog_enable,
61 	.pktlog_setsize = pktlog_setsize,
62 	.pktlog_disable = pktlog_disable,       /* valid for f/w disable */
63 };
64 
65 struct ol_pktlog_dev_t ol_pl_dev = {
66 	.pl_funcs = &ol_pl_funcs,
67 };
68 
69 void ol_pl_sethandle(ol_pktlog_dev_handle *pl_handle,
70 		     struct hif_opaque_softc *scn)
71 {
72 	ol_pl_dev.scn = (ol_ath_generic_softc_handle) scn;
73 	*pl_handle = &ol_pl_dev;
74 }
75 
76 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
77 				    WMI_CMD_ID cmd_id, bool ini_triggered,
78 				    uint8_t user_triggered)
79 {
80 	struct scheduler_msg msg = { 0 };
81 	QDF_STATUS status;
82 	struct ath_pktlog_wmi_params *param;
83 
84 	param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
85 
86 	if (!param)
87 		return A_NO_MEMORY;
88 
89 	param->cmd_id = cmd_id;
90 	param->pktlog_event = event_types;
91 	param->ini_triggered = ini_triggered;
92 	param->user_triggered = user_triggered;
93 
94 	msg.type = WMA_PKTLOG_ENABLE_REQ;
95 	msg.bodyptr = param;
96 	msg.bodyval = 0;
97 
98 	status = scheduler_post_msg(QDF_MODULE_ID_WMA, &msg);
99 
100 	if (status != QDF_STATUS_SUCCESS) {
101 		qdf_mem_free(param);
102 		return A_ERROR;
103 	}
104 
105 	return A_OK;
106 }
107 
108 static inline A_STATUS
109 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
110 		 bool ini_triggered, uint8_t user_triggered)
111 {
112 	uint32_t types = 0;
113 
114 	if (log_state & ATH_PKTLOG_TX)
115 		types |= WMI_PKTLOG_EVENT_TX;
116 
117 	if (log_state & ATH_PKTLOG_RX)
118 		types |= WMI_PKTLOG_EVENT_RX;
119 
120 	if (log_state & ATH_PKTLOG_RCFIND)
121 		types |= WMI_PKTLOG_EVENT_RCF;
122 
123 	if (log_state & ATH_PKTLOG_RCUPDATE)
124 		types |= WMI_PKTLOG_EVENT_RCU;
125 
126 	if (log_state & ATH_PKTLOG_SW_EVENT)
127 		types |= WMI_PKTLOG_EVENT_SW;
128 
129 	return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
130 				   ini_triggered, user_triggered);
131 }
132 
133 static inline A_STATUS
134 wdi_pktlog_subscribe(struct ol_txrx_pdev_t *txrx_pdev, int32_t log_state)
135 {
136 	if (!txrx_pdev) {
137 		printk("Invalid pdev in %s\n", __func__);
138 		return A_ERROR;
139 	}
140 	if (log_state & ATH_PKTLOG_TX) {
141 		if (wdi_event_sub(txrx_pdev,
142 				  &PKTLOG_TX_SUBSCRIBER, WDI_EVENT_TX_STATUS)) {
143 			return A_ERROR;
144 		}
145 	}
146 	if (log_state & ATH_PKTLOG_RX) {
147 		if (wdi_event_sub(txrx_pdev,
148 				  &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
149 			return A_ERROR;
150 		}
151 		if (wdi_event_sub(txrx_pdev,
152 				  &PKTLOG_RX_REMOTE_SUBSCRIBER,
153 				  WDI_EVENT_RX_DESC_REMOTE)) {
154 			return A_ERROR;
155 		}
156 	}
157 	if (log_state & ATH_PKTLOG_RCFIND) {
158 		if (wdi_event_sub(txrx_pdev,
159 				  &PKTLOG_RCFIND_SUBSCRIBER,
160 				  WDI_EVENT_RATE_FIND)) {
161 			return A_ERROR;
162 		}
163 	}
164 	if (log_state & ATH_PKTLOG_RCUPDATE) {
165 		if (wdi_event_sub(txrx_pdev,
166 				  &PKTLOG_RCUPDATE_SUBSCRIBER,
167 				  WDI_EVENT_RATE_UPDATE)) {
168 			return A_ERROR;
169 		}
170 	}
171 	if (log_state & ATH_PKTLOG_SW_EVENT) {
172 		if (wdi_event_sub(txrx_pdev,
173 				  &PKTLOG_SW_EVENT_SUBSCRIBER,
174 				  WDI_EVENT_SW_EVENT)) {
175 			return A_ERROR;
176 		}
177 	}
178 
179 	return A_OK;
180 }
181 
182 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data)
183 {
184 	switch (event) {
185 	case WDI_EVENT_TX_STATUS:
186 	{
187 		/*
188 		 * process TX message
189 		 */
190 		if (process_tx_info(pdev, log_data)) {
191 			printk("Unable to process TX info\n");
192 			return;
193 		}
194 		break;
195 	}
196 	case WDI_EVENT_RX_DESC:
197 	{
198 		/*
199 		 * process RX message for local frames
200 		 */
201 		if (process_rx_info(pdev, log_data)) {
202 			printk("Unable to process RX info\n");
203 			return;
204 		}
205 		break;
206 	}
207 	case WDI_EVENT_RX_DESC_REMOTE:
208 	{
209 		/*
210 		 * process RX message for remote frames
211 		 */
212 		if (process_rx_info_remote(pdev, log_data)) {
213 			printk("Unable to process RX info\n");
214 			return;
215 		}
216 		break;
217 	}
218 	case WDI_EVENT_RATE_FIND:
219 	{
220 		/*
221 		 * process RATE_FIND message
222 		 */
223 		if (process_rate_find(pdev, log_data)) {
224 			printk("Unable to process RC_FIND info\n");
225 			return;
226 		}
227 		break;
228 	}
229 	case WDI_EVENT_RATE_UPDATE:
230 	{
231 		/*
232 		 * process RATE_UPDATE message
233 		 */
234 		if (process_rate_update(pdev, log_data)) {
235 			printk("Unable to process RC_UPDATE\n");
236 			return;
237 		}
238 		break;
239 	}
240 	case WDI_EVENT_SW_EVENT:
241 	{
242 		/*
243 		 * process SW EVENT message
244 		 */
245 		if (process_sw_event(pdev, log_data)) {
246 			printk("Unable to process SW_EVENT\n");
247 			return;
248 		}
249 		break;
250 	}
251 	default:
252 		break;
253 	}
254 }
255 
256 A_STATUS
257 wdi_pktlog_unsubscribe(struct ol_txrx_pdev_t *txrx_pdev, uint32_t log_state)
258 {
259 	if (log_state & ATH_PKTLOG_TX) {
260 		if (wdi_event_unsub(txrx_pdev,
261 				    &PKTLOG_TX_SUBSCRIBER,
262 				    WDI_EVENT_TX_STATUS)) {
263 			return A_ERROR;
264 		}
265 	}
266 	if (log_state & ATH_PKTLOG_RX) {
267 		if (wdi_event_unsub(txrx_pdev,
268 				    &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
269 			return A_ERROR;
270 		}
271 		if (wdi_event_unsub(txrx_pdev,
272 				    &PKTLOG_RX_REMOTE_SUBSCRIBER,
273 				    WDI_EVENT_RX_DESC_REMOTE)) {
274 			return A_ERROR;
275 		}
276 	}
277 	if (log_state & ATH_PKTLOG_RCFIND) {
278 		if (wdi_event_unsub(txrx_pdev,
279 				    &PKTLOG_RCFIND_SUBSCRIBER,
280 				    WDI_EVENT_RATE_FIND)) {
281 			return A_ERROR;
282 		}
283 	}
284 	if (log_state & ATH_PKTLOG_RCUPDATE) {
285 		if (wdi_event_unsub(txrx_pdev,
286 				    &PKTLOG_RCUPDATE_SUBSCRIBER,
287 				    WDI_EVENT_RATE_UPDATE)) {
288 			return A_ERROR;
289 		}
290 	}
291 	if (log_state & ATH_PKTLOG_RCUPDATE) {
292 		if (wdi_event_unsub(txrx_pdev,
293 				    &PKTLOG_SW_EVENT_SUBSCRIBER,
294 				    WDI_EVENT_SW_EVENT)) {
295 			return A_ERROR;
296 		}
297 	}
298 	return A_OK;
299 }
300 
301 int pktlog_disable(struct hif_opaque_softc *scn)
302 {
303 	struct ol_txrx_pdev_t *txrx_pdev =
304 		cds_get_context(QDF_MODULE_ID_TXRX);
305 	struct ol_pktlog_dev_t *pl_dev;
306 	struct ath_pktlog_info *pl_info;
307 	uint8_t save_pktlog_state;
308 
309 	if (txrx_pdev == NULL ||
310 			txrx_pdev->pl_dev == NULL ||
311 			txrx_pdev->pl_dev->pl_info == NULL)
312 		return -EFAULT;
313 
314 	pl_dev = txrx_pdev->pl_dev;
315 	pl_info = pl_dev->pl_info;
316 
317 	if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
318 	    pl_info->curr_pkt_state ==
319 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
320 	    pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
321 	    pl_info->curr_pkt_state ==
322 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
323 		return -EBUSY;
324 
325 	save_pktlog_state = pl_info->curr_pkt_state;
326 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
327 
328 	if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
329 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
330 		printk("Failed to disable pktlog in target\n");
331 		return -EINVAL;
332 	}
333 
334 	if (pl_dev->is_pktlog_cb_subscribed &&
335 		wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
336 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
337 		printk("Cannot unsubscribe pktlog from the WDI\n");
338 		return -EINVAL;
339 	}
340 	pl_dev->is_pktlog_cb_subscribed = false;
341 	pl_dev->is_pktlog_cb_subscribed = false;
342 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
343 		pl_info->curr_pkt_state =
344 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
345 	else
346 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
347 	return 0;
348 }
349 
350 void pktlog_init(struct hif_opaque_softc *scn)
351 {
352 	struct ath_pktlog_info *pl_info;
353 	ol_txrx_pdev_handle pdev_txrx_handle;
354 	pdev_txrx_handle = cds_get_context(QDF_MODULE_ID_TXRX);
355 
356 	if (pdev_txrx_handle == NULL ||
357 			pdev_txrx_handle->pl_dev == NULL ||
358 			pdev_txrx_handle->pl_dev->pl_info == NULL)
359 		return;
360 
361 	pl_info = pdev_txrx_handle->pl_dev->pl_info;
362 
363 	OS_MEMZERO(pl_info, sizeof(*pl_info));
364 	PKTLOG_LOCK_INIT(pl_info);
365 
366 	pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
367 	pl_info->buf = NULL;
368 	pl_info->log_state = 0;
369 	pl_info->init_saved_state = 0;
370 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
371 	pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
372 	pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
373 	pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
374 	pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
375 	pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
376 	pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
377 	pl_info->pktlen = 0;
378 	pl_info->start_time_thruput = 0;
379 	pl_info->start_time_per = 0;
380 	pdev_txrx_handle->pl_dev->vendor_cmd_send = false;
381 
382 	PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
383 	PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
384 	PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
385 	PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
386 	PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
387 	PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
388 }
389 
390 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
391 		 bool ini_triggered, uint8_t user_triggered,
392 		 uint32_t is_iwpriv_command)
393 {
394 	struct ol_pktlog_dev_t *pl_dev;
395 	struct ath_pktlog_info *pl_info;
396 	struct ol_txrx_pdev_t *txrx_pdev;
397 	int error;
398 
399 	if (!scn) {
400 		printk("%s: Invalid scn context\n", __func__);
401 		ASSERT(0);
402 		return -EINVAL;
403 	}
404 
405 	txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
406 	if (!txrx_pdev) {
407 		printk("%s: Invalid txrx_pdev context\n", __func__);
408 		ASSERT(0);
409 		return -EINVAL;
410 	}
411 
412 	pl_dev = txrx_pdev->pl_dev;
413 	if (!pl_dev) {
414 		printk("%s: Invalid pktlog context\n", __func__);
415 		ASSERT(0);
416 		return -EINVAL;
417 	}
418 
419 	pl_info = pl_dev->pl_info;
420 
421 	if (!pl_info)
422 		return 0;
423 
424 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
425 		return -EBUSY;
426 
427 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
428 	/* is_iwpriv_command : 0 indicates its a vendor command
429 	 * log_state: 0 indicates pktlog disable command
430 	 * vendor_cmd_send flag; false means no vendor pktlog enable
431 	 * command was sent previously
432 	 */
433 	if (is_iwpriv_command == 0 && log_state == 0 &&
434 	    pl_dev->vendor_cmd_send == false) {
435 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
436 		return 0;
437 	}
438 
439 	if (!pl_dev->tgt_pktlog_alloced) {
440 		if (pl_info->buf == NULL) {
441 			error = pktlog_alloc_buf(scn);
442 
443 			if (error != 0) {
444 				pl_info->curr_pkt_state =
445 					PKTLOG_OPR_NOT_IN_PROGRESS;
446 				return error;
447 			}
448 
449 			if (!pl_info->buf) {
450 				pl_info->curr_pkt_state =
451 					PKTLOG_OPR_NOT_IN_PROGRESS;
452 				printk("%s: pktlog buf alloc failed\n",
453 				       __func__);
454 				ASSERT(0);
455 				return -ENOMEM;
456 			}
457 
458 		}
459 
460 		spin_lock_bh(&pl_info->log_lock);
461 		pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
462 		pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
463 		pl_info->buf->wr_offset = 0;
464 		pl_info->buf->rd_offset = -1;
465 		/* These below variables are used by per packet stats*/
466 		pl_info->buf->bytes_written = 0;
467 		pl_info->buf->msg_index = 1;
468 		pl_info->buf->offset = PKTLOG_READ_OFFSET;
469 		spin_unlock_bh(&pl_info->log_lock);
470 
471 		pl_info->start_time_thruput = os_get_timestamp();
472 		pl_info->start_time_per = pl_info->start_time_thruput;
473 
474 		pl_dev->tgt_pktlog_alloced = true;
475 	}
476 
477 	if (log_state != 0) {
478 		/* WDI subscribe */
479 		if ((!pl_dev->is_pktlog_cb_subscribed) &&
480 			wdi_pktlog_subscribe(txrx_pdev, log_state)) {
481 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
482 			printk("Unable to subscribe to the WDI %s\n", __func__);
483 			return -EINVAL;
484 		}
485 		pl_dev->is_pktlog_cb_subscribed = true;
486 		/* WMI command to enable pktlog on the firmware */
487 		if (pktlog_enable_tgt(scn, log_state, ini_triggered,
488 				user_triggered)) {
489 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
490 			printk("Device cannot be enabled, %s\n", __func__);
491 			return -EINVAL;
492 		}
493 
494 		if (is_iwpriv_command == 0)
495 			pl_dev->vendor_cmd_send = true;
496 	} else {
497 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
498 		pl_dev->pl_funcs->pktlog_disable(scn);
499 		if (is_iwpriv_command == 0)
500 			pl_dev->vendor_cmd_send = false;
501 	}
502 
503 	pl_info->log_state = log_state;
504 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
505 	return 0;
506 }
507 
508 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
509 {
510 	ol_txrx_pdev_handle pdev_txrx_handle =
511 		cds_get_context(QDF_MODULE_ID_TXRX);
512 	struct ol_pktlog_dev_t *pl_dev;
513 	struct ath_pktlog_info *pl_info;
514 
515 	if (pdev_txrx_handle == NULL ||
516 			pdev_txrx_handle->pl_dev == NULL ||
517 			pdev_txrx_handle->pl_dev->pl_info == NULL)
518 		return -EFAULT;
519 
520 	pl_dev = pdev_txrx_handle->pl_dev;
521 	pl_info = pl_dev->pl_info;
522 
523 	if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS)
524 		return -EBUSY;
525 
526 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
527 
528 	if (size < 0) {
529 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
530 		return -EINVAL;
531 	}
532 
533 	if (size == pl_info->buf_size) {
534 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
535 		qdf_print("%s: Pktlog Buff Size is already of same size.",
536 			  __func__);
537 		return 0;
538 	}
539 
540 	if (pl_info->log_state) {
541 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
542 		qdf_print("%s: Logging should be disabled before changing"
543 			  "buffer size.", __func__);
544 		return -EINVAL;
545 	}
546 
547 	spin_lock_bh(&pl_info->log_lock);
548 	if (pl_info->buf != NULL) {
549 		if (pl_dev->is_pktlog_cb_subscribed &&
550 			wdi_pktlog_unsubscribe(pdev_txrx_handle,
551 					 pl_info->log_state)) {
552 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
553 			printk("Cannot unsubscribe pktlog from the WDI\n");
554 			spin_unlock_bh(&pl_info->log_lock);
555 			return -EFAULT;
556 		}
557 		pktlog_release_buf(pdev_txrx_handle);
558 		pl_dev->is_pktlog_cb_subscribed = false;
559 		pl_dev->tgt_pktlog_alloced = false;
560 	}
561 
562 	if (size != 0) {
563 		qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size);
564 		pl_info->buf_size = size;
565 	}
566 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
567 	spin_unlock_bh(&pl_info->log_lock);
568 	return 0;
569 }
570 
571 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
572 {
573 	ol_txrx_pdev_handle pdev_txrx_handle =
574 		cds_get_context(QDF_MODULE_ID_TXRX);
575 	struct ol_pktlog_dev_t *pl_dev;
576 	struct ath_pktlog_info *pl_info;
577 	uint8_t save_pktlog_state;
578 
579 	if (pdev_txrx_handle == NULL ||
580 			pdev_txrx_handle->pl_dev == NULL ||
581 			pdev_txrx_handle->pl_dev->pl_info == NULL)
582 		return -EFAULT;
583 
584 	pl_dev = pdev_txrx_handle->pl_dev;
585 	pl_info = pl_dev->pl_info;
586 
587 	if (!clear_buff)
588 		return -EINVAL;
589 
590 	if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
591 	    pl_info->curr_pkt_state ==
592 				PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
593 		return -EBUSY;
594 
595 	save_pktlog_state = pl_info->curr_pkt_state;
596 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
597 
598 	if (pl_info->log_state) {
599 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
600 		qdf_print("%s: Logging should be disabled before clearing "
601 			  "pktlog buffer.", __func__);
602 		return -EINVAL;
603 	}
604 
605 	if (pl_info->buf != NULL) {
606 		if (pl_info->buf_size > 0) {
607 			qdf_print("%s: pktlog buffer is cleared.", __func__);
608 			memset(pl_info->buf, 0, pl_info->buf_size);
609 			pl_dev->is_pktlog_cb_subscribed = false;
610 			pl_dev->tgt_pktlog_alloced = false;
611 			pl_info->buf->rd_offset = -1;
612 		} else {
613 			pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
614 			qdf_print("%s: pktlog buffer size is not proper. "
615 				  "Existing Buf size %d", __func__,
616 				  pl_info->buf_size);
617 			return -EFAULT;
618 		}
619 	} else {
620 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
621 		qdf_print("%s: pktlog buff is NULL", __func__);
622 		return -EFAULT;
623 	}
624 
625 	if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
626 		pl_info->curr_pkt_state =
627 			PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
628 	else
629 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
630 
631 	return 0;
632 }
633 
634 /**
635  * pktlog_process_fw_msg() - process packetlog message
636  * @buff: buffer
637  *
638  * Return: None
639  */
640 void pktlog_process_fw_msg(uint32_t *buff)
641 {
642 	uint32_t *pl_hdr;
643 	uint32_t log_type;
644 	struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
645 
646 	if (!txrx_pdev) {
647 		qdf_print("%s: txrx_pdev is NULL", __func__);
648 		return;
649 	}
650 
651 	pl_hdr = buff;
652 	log_type =
653 		(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
654 		ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
655 	if ((log_type == PKTLOG_TYPE_TX_CTRL)
656 		|| (log_type == PKTLOG_TYPE_TX_STAT)
657 		|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
658 		|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
659 		|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
660 		wdi_event_handler(WDI_EVENT_TX_STATUS,
661 				  txrx_pdev, pl_hdr);
662 	else if (log_type == PKTLOG_TYPE_RC_FIND)
663 		wdi_event_handler(WDI_EVENT_RATE_FIND,
664 				  txrx_pdev, pl_hdr);
665 	else if (log_type == PKTLOG_TYPE_RC_UPDATE)
666 		wdi_event_handler(WDI_EVENT_RATE_UPDATE,
667 				  txrx_pdev, pl_hdr);
668 	else if (log_type == PKTLOG_TYPE_RX_STAT)
669 		wdi_event_handler(WDI_EVENT_RX_DESC,
670 				  txrx_pdev, pl_hdr);
671 	else if (log_type == PKTLOG_TYPE_SW_EVENT)
672 		wdi_event_handler(WDI_EVENT_SW_EVENT,
673 				  txrx_pdev, pl_hdr);
674 
675 }
676 
677 #if defined(QCA_WIFI_3_0_ADRASTEA)
678 /**
679  * pktlog_t2h_msg_handler() - Target to host message handler
680  * @context: pdev context
681  * @pkt: HTC packet
682  *
683  * Return: None
684  */
685 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
686 {
687 	struct ol_pktlog_dev_t *pdev = (struct ol_pktlog_dev_t *)context;
688 	qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
689 	uint32_t *msg_word;
690 
691 	/* check for successful message reception */
692 	if (pkt->Status != A_OK) {
693 		if (pkt->Status != A_ECANCELED)
694 			pdev->htc_err_cnt++;
695 		qdf_nbuf_free(pktlog_t2h_msg);
696 		return;
697 	}
698 
699 	/* confirm alignment */
700 	qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
701 
702 	msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
703 	pktlog_process_fw_msg(msg_word);
704 
705 	qdf_nbuf_free(pktlog_t2h_msg);
706 }
707 
708 /**
709  * pktlog_tx_resume_handler() - resume callback
710  * @context: pdev context
711  *
712  * Return: None
713  */
714 static void pktlog_tx_resume_handler(void *context)
715 {
716 	qdf_print("%s: Not expected", __func__);
717 	qdf_assert(0);
718 }
719 
720 /**
721  * pktlog_h2t_send_complete() - send complete indication
722  * @context: pdev context
723  * @htc_pkt: HTC packet
724  *
725  * Return: None
726  */
727 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
728 {
729 	qdf_print("%s: Not expected", __func__);
730 	qdf_assert(0);
731 }
732 
733 /**
734  * pktlog_h2t_full() - queue full indication
735  * @context: pdev context
736  * @pkt: HTC packet
737  *
738  * Return: HTC action
739  */
740 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
741 {
742 	return HTC_SEND_FULL_KEEP;
743 }
744 
745 /**
746  * pktlog_htc_connect_service() - create new endpoint for packetlog
747  * @pdev - pktlog pdev
748  *
749  * Return: 0 for success/failure
750  */
751 static int pktlog_htc_connect_service(struct ol_pktlog_dev_t *pdev)
752 {
753 	struct htc_service_connect_req connect;
754 	struct htc_service_connect_resp response;
755 	A_STATUS status;
756 
757 	qdf_mem_set(&connect, sizeof(connect), 0);
758 	qdf_mem_set(&response, sizeof(response), 0);
759 
760 	connect.pMetaData = NULL;
761 	connect.MetaDataLength = 0;
762 	connect.EpCallbacks.pContext = pdev;
763 	connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
764 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
765 	connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
766 	connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
767 
768 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
769 	connect.EpCallbacks.EpRecvRefill = NULL;
770 	connect.EpCallbacks.RecvRefillWaterMark = 1;
771 	/* N/A, fill is done by HIF */
772 
773 	connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
774 	/*
775 	 * Specify how deep to let a queue get before htc_send_pkt will
776 	 * call the EpSendFull function due to excessive send queue depth.
777 	 */
778 	connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
779 
780 	/* disable flow control for HTT data message service */
781 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
782 
783 	/* connect to control service */
784 	connect.service_id = PACKET_LOG_SVC;
785 
786 	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
787 
788 	if (status != A_OK) {
789 		pdev->mt_pktlog_enabled = false;
790 		return -EIO;       /* failure */
791 	}
792 
793 	pdev->htc_endpoint = response.Endpoint;
794 	pdev->mt_pktlog_enabled = true;
795 
796 	return 0;               /* success */
797 }
798 
799 /**
800  * pktlog_htc_attach() - attach pktlog HTC service
801  *
802  * Return: 0 for success/failure
803  */
804 int pktlog_htc_attach(void)
805 {
806 	struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
807 	struct ol_pktlog_dev_t *pdev = NULL;
808 	void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
809 
810 	if ((!txrx_pdev) || (!txrx_pdev->pl_dev) || (!htc_pdev))
811 		return -EINVAL;
812 
813 	pdev = txrx_pdev->pl_dev;
814 	pdev->htc_pdev = htc_pdev;
815 	return pktlog_htc_connect_service(pdev);
816 }
817 #else
818 int pktlog_htc_attach(void)
819 {
820 	struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
821 	struct ol_pktlog_dev_t *pdev = NULL;
822 
823 	if (!txrx_pdev)
824 		return -EINVAL;
825 	pdev = txrx_pdev->pl_dev;
826 	pdev->mt_pktlog_enabled = false;
827 	return 0;
828 }
829 #endif
830 #endif /* REMOVE_PKT_LOG */
831