xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/linux_ac.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #ifndef REMOVE_PKT_LOG
29 #ifndef EXPORT_SYMTAB
30 #define EXPORT_SYMTAB
31 #endif
32 #ifndef __KERNEL__
33 #define __KERNEL__
34 #endif
35 /*
36  * Linux specific implementation of Pktlogs for 802.11ac
37  */
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/module.h>
41 #include <linux/vmalloc.h>
42 #include <linux/proc_fs.h>
43 #include <pktlog_ac_i.h>
44 #include <pktlog_ac_fmt.h>
45 #include "i_host_diag_core_log.h"
46 #include "host_diag_core_log.h"
47 #include "ani_global.h"
48 
49 #define PKTLOG_TAG              "ATH_PKTLOG"
50 #define PKTLOG_DEVNAME_SIZE     32
51 #define MAX_WLANDEV             1
52 
53 #ifdef MULTI_IF_NAME
54 #define PKTLOG_PROC_DIR         "ath_pktlog" MULTI_IF_NAME
55 #else
56 #define PKTLOG_PROC_DIR         "ath_pktlog"
57 #endif
58 
59 /* Permissions for creating proc entries */
60 #define PKTLOG_PROC_PERM        0444
61 #define PKTLOG_PROCSYS_DIR_PERM 0555
62 #define PKTLOG_PROCSYS_PERM     0644
63 
64 #ifndef __MOD_INC_USE_COUNT
65 #define PKTLOG_MOD_INC_USE_COUNT	do {			\
66 	if (!try_module_get(THIS_MODULE)) {			\
67 		printk(KERN_WARNING "try_module_get failed\n");	\
68 	} } while (0)
69 
70 #define PKTLOG_MOD_DEC_USE_COUNT        module_put(THIS_MODULE)
71 #else
72 #define PKTLOG_MOD_INC_USE_COUNT        MOD_INC_USE_COUNT
73 #define PKTLOG_MOD_DEC_USE_COUNT        MOD_DEC_USE_COUNT
74 #endif
75 
76 static struct ath_pktlog_info *g_pktlog_info;
77 
78 static struct proc_dir_entry *g_pktlog_pde;
79 
80 static DEFINE_MUTEX(proc_mutex);
81 
82 static int pktlog_attach(struct hif_opaque_softc *scn);
83 static void pktlog_detach(struct hif_opaque_softc *scn);
84 static int pktlog_open(struct inode *i, struct file *f);
85 static int pktlog_release(struct inode *i, struct file *f);
86 static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes,
87 			   loff_t *ppos);
88 
89 static struct file_operations pktlog_fops = {
90 	open:  pktlog_open,
91 	release:pktlog_release,
92 	read : pktlog_read,
93 };
94 
95 void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn)
96 {
97 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
98 	if (pl_dev)
99 		pl_dev->pl_info->log_state = 0;
100 }
101 
102 int pktlog_alloc_buf(struct hif_opaque_softc *scn)
103 {
104 	uint32_t page_cnt;
105 	unsigned long vaddr;
106 	struct page *vpg;
107 	struct pktlog_dev_t *pl_dev;
108 	struct ath_pktlog_info *pl_info;
109 	struct ath_pktlog_buf *buffer;
110 
111 	pl_dev = get_pktlog_handle();
112 
113 	if (!pl_dev) {
114 		printk(PKTLOG_TAG
115 		       "%s: Unable to allocate buffer pdev_txrx_handle or pdev_txrx_handle->pl_dev is null\n",
116 		       __func__);
117 		return -EINVAL;
118 	}
119 
120 	pl_info = pl_dev->pl_info;
121 
122 	page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE;
123 
124 	spin_lock_bh(&pl_info->log_lock);
125 	if (pl_info->buf != NULL) {
126 		printk(PKTLOG_TAG "Buffer is already in use\n");
127 		spin_unlock_bh(&pl_info->log_lock);
128 		return -EINVAL;
129 	}
130 	spin_unlock_bh(&pl_info->log_lock);
131 
132 	buffer = vmalloc((page_cnt + 2) * PAGE_SIZE);
133 	if (buffer == NULL) {
134 		printk(PKTLOG_TAG
135 		       "%s: Unable to allocate buffer "
136 		       "(%d pages)\n", __func__, page_cnt);
137 		return -ENOMEM;
138 	}
139 
140 	buffer = (struct ath_pktlog_buf *)
141 		       (((unsigned long)(buffer) + PAGE_SIZE - 1)
142 			& PAGE_MASK);
143 
144 	for (vaddr = (unsigned long)(buffer);
145 	     vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE));
146 	     vaddr += PAGE_SIZE) {
147 		vpg = vmalloc_to_page((const void *)vaddr);
148 		SetPageReserved(vpg);
149 	}
150 
151 	spin_lock_bh(&pl_info->log_lock);
152 	if (pl_info->buf != NULL)
153 		pktlog_release_buf(scn);
154 
155 	pl_info->buf =  buffer;
156 	spin_unlock_bh(&pl_info->log_lock);
157 	return 0;
158 }
159 
160 void pktlog_release_buf(struct hif_opaque_softc *scn)
161 {
162 	unsigned long page_cnt;
163 	unsigned long vaddr;
164 	struct page *vpg;
165 	struct pktlog_dev_t *pl_dev;
166 	struct ath_pktlog_info *pl_info;
167 
168 	pl_dev = get_pktlog_handle();
169 
170 	if (!pl_dev) {
171 		qdf_print("%s: invalid pl_dev handle", __func__);
172 		return;
173 	}
174 
175 	if (!pl_dev->pl_info) {
176 		qdf_print("%s: invalid pl_dev handle", __func__);
177 		return;
178 	}
179 
180 	pl_info = pl_dev->pl_info;
181 
182 	page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) /
183 		    PAGE_SIZE) + 1;
184 
185 	for (vaddr = (unsigned long)(pl_info->buf);
186 	     vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE);
187 	     vaddr += PAGE_SIZE) {
188 		vpg = vmalloc_to_page((const void *)vaddr);
189 		ClearPageReserved(vpg);
190 	}
191 
192 	vfree(pl_info->buf);
193 	pl_info->buf = NULL;
194 }
195 
196 static void pktlog_cleanup(struct ath_pktlog_info *pl_info)
197 {
198 	pl_info->log_state = 0;
199 	PKTLOG_LOCK_DESTROY(pl_info);
200 	mutex_destroy(&pl_info->pktlog_mutex);
201 }
202 
203 /* sysctl procfs handler to enable pktlog */
204 static int
205 qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
206 {
207 	int ret, enable;
208 	ol_ath_generic_softc_handle scn;
209 	struct pktlog_dev_t *pl_dev;
210 
211 	mutex_lock(&proc_mutex);
212 	scn = (ol_ath_generic_softc_handle) ctl->extra1;
213 
214 	if (!scn) {
215 		mutex_unlock(&proc_mutex);
216 		printk("%s: Invalid scn context\n", __func__);
217 		ASSERT(0);
218 		return -EINVAL;
219 	}
220 
221 	pl_dev = get_pktlog_handle();
222 
223 	if (!pl_dev) {
224 		mutex_unlock(&proc_mutex);
225 		printk("%s: Invalid pktlog context\n", __func__);
226 		ASSERT(0);
227 		return -ENODEV;
228 	}
229 
230 	ctl->data = &enable;
231 	ctl->maxlen = sizeof(enable);
232 
233 	if (write) {
234 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
235 					       lenp, ppos);
236 		if (ret == 0) {
237 			ret = pl_dev->pl_funcs->pktlog_enable(
238 					(struct hif_opaque_softc *)scn, enable,
239 					cds_is_packet_log_enabled(), 0, 1);
240 		}
241 		else
242 			QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
243 				  "Line:%d %s:proc_dointvec failed reason %d",
244 				   __LINE__, __func__, ret);
245 	} else {
246 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
247 					       lenp, ppos);
248 		if (ret)
249 			QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
250 				  "Line:%d %s:proc_dointvec failed reason %d",
251 				   __LINE__, __func__, ret);
252 	}
253 
254 	ctl->data = NULL;
255 	ctl->maxlen = 0;
256 	mutex_unlock(&proc_mutex);
257 
258 	return ret;
259 }
260 
261 static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev)
262 {
263 	return pl_dev->pl_info->buf_size;
264 }
265 
266 /* sysctl procfs handler to set/get pktlog size */
267 static int
268 qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
269 {
270 	int ret, size;
271 	ol_ath_generic_softc_handle scn;
272 	struct pktlog_dev_t *pl_dev;
273 
274 	mutex_lock(&proc_mutex);
275 	scn = (ol_ath_generic_softc_handle) ctl->extra1;
276 
277 	if (!scn) {
278 		mutex_unlock(&proc_mutex);
279 		printk("%s: Invalid scn context\n", __func__);
280 		ASSERT(0);
281 		return -EINVAL;
282 	}
283 
284 	pl_dev = get_pktlog_handle();
285 
286 	if (!pl_dev) {
287 		mutex_unlock(&proc_mutex);
288 		printk("%s: Invalid pktlog handle\n", __func__);
289 		ASSERT(0);
290 		return -ENODEV;
291 	}
292 
293 	ctl->data = &size;
294 	ctl->maxlen = sizeof(size);
295 
296 	if (write) {
297 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
298 					       lenp, ppos);
299 		if (ret == 0)
300 			ret = pl_dev->pl_funcs->pktlog_setsize(
301 					(struct hif_opaque_softc *)scn, size);
302 	} else {
303 		size = get_pktlog_bufsize(pl_dev);
304 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
305 					       lenp, ppos);
306 	}
307 
308 	ctl->data = NULL;
309 	ctl->maxlen = 0;
310 	mutex_unlock(&proc_mutex);
311 
312 	return ret;
313 }
314 
315 /* Register sysctl table */
316 static int pktlog_sysctl_register(struct hif_opaque_softc *scn)
317 {
318 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
319 	struct ath_pktlog_info_lnx *pl_info_lnx;
320 	char *proc_name;
321 
322 	if (pl_dev) {
323 		pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info);
324 		proc_name = pl_dev->name;
325 	} else {
326 		pl_info_lnx = PL_INFO_LNX(g_pktlog_info);
327 		proc_name = PKTLOG_PROC_SYSTEM;
328 	}
329 
330 	/*
331 	 * Setup the sysctl table for creating the following sysctl entries:
332 	 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/enable for enabling/disabling
333 	 * pktlog
334 	 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/size for changing the buffer size
335 	 */
336 	memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls));
337 	pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR;
338 	pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM;
339 	pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2];
340 
341 	/* [1] is NULL terminator */
342 	pl_info_lnx->sysctls[2].procname = proc_name;
343 	pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM;
344 	pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4];
345 
346 	/* [3] is NULL terminator */
347 	pl_info_lnx->sysctls[4].procname = "enable";
348 	pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM;
349 	pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable;
350 	pl_info_lnx->sysctls[4].extra1 = scn;
351 
352 	pl_info_lnx->sysctls[5].procname = "size";
353 	pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM;
354 	pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size;
355 	pl_info_lnx->sysctls[5].extra1 = scn;
356 
357 	pl_info_lnx->sysctls[6].procname = "options";
358 	pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM;
359 	pl_info_lnx->sysctls[6].proc_handler = proc_dointvec;
360 	pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options;
361 	pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options);
362 
363 	pl_info_lnx->sysctls[7].procname = "sack_thr";
364 	pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM;
365 	pl_info_lnx->sysctls[7].proc_handler = proc_dointvec;
366 	pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr;
367 	pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr);
368 
369 	pl_info_lnx->sysctls[8].procname = "tail_length";
370 	pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM;
371 	pl_info_lnx->sysctls[8].proc_handler = proc_dointvec;
372 	pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length;
373 	pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length);
374 
375 	pl_info_lnx->sysctls[9].procname = "thruput_thresh";
376 	pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM;
377 	pl_info_lnx->sysctls[9].proc_handler = proc_dointvec;
378 	pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh;
379 	pl_info_lnx->sysctls[9].maxlen =
380 		sizeof(pl_info_lnx->info.thruput_thresh);
381 
382 	pl_info_lnx->sysctls[10].procname = "phyerr_thresh";
383 	pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM;
384 	pl_info_lnx->sysctls[10].proc_handler = proc_dointvec;
385 	pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh;
386 	pl_info_lnx->sysctls[10].maxlen =
387 		sizeof(pl_info_lnx->info.phyerr_thresh);
388 
389 	pl_info_lnx->sysctls[11].procname = "per_thresh";
390 	pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM;
391 	pl_info_lnx->sysctls[11].proc_handler = proc_dointvec;
392 	pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh;
393 	pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh);
394 
395 	pl_info_lnx->sysctls[12].procname = "trigger_interval";
396 	pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM;
397 	pl_info_lnx->sysctls[12].proc_handler = proc_dointvec;
398 	pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval;
399 	pl_info_lnx->sysctls[12].maxlen =
400 		sizeof(pl_info_lnx->info.trigger_interval);
401 	/* [13] is NULL terminator */
402 
403 	/* and register everything */
404 	/* register_sysctl_table changed from 2.6.21 onwards */
405 	pl_info_lnx->sysctl_header =
406 		register_sysctl_table(pl_info_lnx->sysctls);
407 
408 	if (!pl_info_lnx->sysctl_header) {
409 		printk("%s: failed to register sysctls!\n", proc_name);
410 		return -EINVAL;
411 	}
412 
413 	return 0;
414 }
415 
416 /*
417  * Initialize logging for system or adapter
418  * Parameter scn should be NULL for system wide logging
419  */
420 static int pktlog_attach(struct hif_opaque_softc *scn)
421 {
422 	struct pktlog_dev_t *pl_dev;
423 	struct ath_pktlog_info_lnx *pl_info_lnx;
424 	char *proc_name;
425 	struct proc_dir_entry *proc_entry;
426 
427 	/* Allocate pktlog dev for later use */
428 	pl_dev = get_pktlog_handle();
429 
430 	if (pl_dev != NULL) {
431 		pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL);
432 		if (pl_info_lnx == NULL) {
433 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
434 				 "%s: Allocation failed for pl_info\n",
435 				 __func__);
436 			goto attach_fail1;
437 		}
438 
439 		pl_dev->pl_info = &pl_info_lnx->info;
440 		pl_dev->name = WLANDEV_BASENAME;
441 		proc_name = pl_dev->name;
442 
443 		if (!pl_dev->pl_funcs)
444 			pl_dev->pl_funcs = &ol_pl_funcs;
445 
446 		/*
447 		 * Valid for both direct attach and offload architecture
448 		 */
449 		pl_dev->pl_funcs->pktlog_init(scn);
450 	} else {
451 		return -EINVAL;
452 	}
453 
454 	/*
455 	 * initialize log info
456 	 * might be good to move to pktlog_init
457 	 */
458 	/* pl_dev->tgt_pktlog_alloced = false; */
459 	pl_info_lnx->proc_entry = NULL;
460 	pl_info_lnx->sysctl_header = NULL;
461 
462 	proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM,
463 			g_pktlog_pde, &pktlog_fops,
464 			&pl_info_lnx->info);
465 
466 	if (proc_entry == NULL) {
467 		printk(PKTLOG_TAG "%s: create_proc_entry failed for %s\n",
468 				__func__, proc_name);
469 		goto attach_fail1;
470 	}
471 
472 	pl_info_lnx->proc_entry = proc_entry;
473 
474 	if (pktlog_sysctl_register(scn)) {
475 		printk(PKTLOG_TAG "%s: sysctl register failed for %s\n",
476 				__func__, proc_name);
477 		goto attach_fail2;
478 	}
479 
480 	return 0;
481 
482 attach_fail2:
483 	remove_proc_entry(proc_name, g_pktlog_pde);
484 
485 attach_fail1:
486 	if (pl_dev)
487 		kfree(pl_dev->pl_info);
488 
489 	return -EINVAL;
490 }
491 
492 static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev)
493 {
494 	struct ath_pktlog_info_lnx *pl_info_lnx;
495 
496 	if (!pl_dev) {
497 		printk("%s: Invalid pktlog context\n", __func__);
498 		ASSERT(0);
499 		return;
500 	}
501 
502 	pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) :
503 		      PL_INFO_LNX(g_pktlog_info);
504 
505 	if (pl_info_lnx->sysctl_header) {
506 		unregister_sysctl_table(pl_info_lnx->sysctl_header);
507 		pl_info_lnx->sysctl_header = NULL;
508 	}
509 }
510 
511 static void pktlog_detach(struct hif_opaque_softc *scn)
512 {
513 	struct ath_pktlog_info *pl_info;
514 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
515 
516 	if (!pl_dev) {
517 		printk("%s: Invalid pktlog context\n", __func__);
518 		ASSERT(0);
519 		return;
520 	}
521 
522 	pl_info = pl_dev->pl_info;
523 	remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde);
524 	pktlog_sysctl_unregister(pl_dev);
525 
526 	spin_lock_bh(&pl_info->log_lock);
527 
528 	if (pl_info->buf) {
529 		pktlog_release_buf(scn);
530 		pl_dev->tgt_pktlog_alloced = false;
531 	}
532 	spin_unlock_bh(&pl_info->log_lock);
533 	pktlog_cleanup(pl_info);
534 
535 	if (pl_dev) {
536 		kfree(pl_info);
537 		pl_dev->pl_info = NULL;
538 	}
539 }
540 
541 static int __pktlog_open(struct inode *i, struct file *f)
542 {
543 	struct hif_opaque_softc *scn;
544 	struct pktlog_dev_t *pl_dev;
545 	struct ath_pktlog_info *pl_info;
546 	int ret = 0;
547 
548 	PKTLOG_MOD_INC_USE_COUNT;
549 	pl_info = (struct ath_pktlog_info *)
550 			PDE_DATA(f->f_path.dentry->d_inode);
551 
552 	if (!pl_info) {
553 		pr_err("%s: pl_info NULL", __func__);
554 		return -EINVAL;
555 	}
556 
557 	if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) {
558 		pr_info("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS",
559 			__func__, pl_info->curr_pkt_state);
560 		return -EBUSY;
561 	}
562 
563 	if (cds_is_module_state_transitioning()) {
564 		pr_info("%s: module transition in progress", __func__);
565 		return -EAGAIN;
566 	}
567 
568 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START;
569 	scn = cds_get_context(QDF_MODULE_ID_HIF);
570 	if (!scn) {
571 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
572 		qdf_print("%s: Invalid scn context\n", __func__);
573 		ASSERT(0);
574 		return -EINVAL;
575 	}
576 
577 	pl_dev = get_pktlog_handle();
578 
579 	if (!pl_dev) {
580 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
581 		qdf_print("%s: Invalid pktlog handle\n", __func__);
582 		ASSERT(0);
583 		return -ENODEV;
584 	}
585 
586 	pl_info->init_saved_state = pl_info->log_state;
587 	if (!pl_info->log_state) {
588 		/* Pktlog is already disabled.
589 		 * Proceed to read directly.
590 		 */
591 		pl_info->curr_pkt_state =
592 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
593 		return ret;
594 	}
595 	/* Disbable the pktlog internally. */
596 	ret = pl_dev->pl_funcs->pktlog_disable(scn);
597 	pl_info->log_state = 0;
598 	pl_info->curr_pkt_state =
599 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
600 	return ret;
601 }
602 
603 static int pktlog_open(struct inode *i, struct file *f)
604 {
605 	int ret;
606 
607 	cds_ssr_protect(__func__);
608 	ret = __pktlog_open(i, f);
609 	cds_ssr_unprotect(__func__);
610 
611 	return ret;
612 }
613 
614 static int __pktlog_release(struct inode *i, struct file *f)
615 {
616 	struct hif_opaque_softc *scn;
617 	struct pktlog_dev_t *pl_dev;
618 	struct ath_pktlog_info *pl_info;
619 	int ret = 0;
620 
621 	PKTLOG_MOD_DEC_USE_COUNT;
622 
623 	pl_info = (struct ath_pktlog_info *)
624 			PDE_DATA(f->f_path.dentry->d_inode);
625 
626 	if (!pl_info)
627 		return -EINVAL;
628 
629 	if (cds_is_module_state_transitioning()) {
630 		pr_info("%s: module transition in progress", __func__);
631 		return -EAGAIN;
632 	}
633 
634 	scn = cds_get_context(QDF_MODULE_ID_HIF);
635 	if (!scn) {
636 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
637 		qdf_print("%s: Invalid scn context\n", __func__);
638 		ASSERT(0);
639 		return -EINVAL;
640 	}
641 
642 	pl_dev = get_pktlog_handle();
643 
644 	if (!pl_dev) {
645 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
646 		qdf_print("%s: Invalid pktlog handle\n", __func__);
647 		ASSERT(0);
648 		return -ENODEV;
649 	}
650 
651 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE;
652 	/*clear pktlog buffer.*/
653 	pktlog_clearbuff(scn, true);
654 	pl_info->log_state = pl_info->init_saved_state;
655 	pl_info->init_saved_state = 0;
656 
657 	/*Enable pktlog again*/
658 	ret = pl_dev->pl_funcs->pktlog_enable(
659 			(struct hif_opaque_softc *)scn, pl_info->log_state,
660 			cds_is_packet_log_enabled(), 0, 1);
661 
662 	if (ret != 0)
663 		pr_warn("%s: pktlog cannot be enabled. ret value %d\n",
664 			__func__, ret);
665 
666 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
667 	return ret;
668 }
669 
670 static int pktlog_release(struct inode *i, struct file *f)
671 {
672 	int ret;
673 
674 	cds_ssr_protect(__func__);
675 	ret = __pktlog_release(i, f);
676 	cds_ssr_unprotect(__func__);
677 
678 	return ret;
679 }
680 
681 #ifndef MIN
682 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
683 #endif
684 
685 /**
686  * pktlog_read_proc_entry() - This function is used to read data from the
687  * proc entry into the readers buffer
688  * @buf:     Readers buffer
689  * @nbytes:  Number of bytes to read
690  * @ppos:    Offset within the drivers buffer
691  * @pl_info: Packet log information pointer
692  * @read_complete: Boolean value indication whether read is complete
693  *
694  * This function is used to read data from the proc entry into the readers
695  * buffer. Its functionality is similar to 'pktlog_read' which does
696  * copy to user to the user space buffer
697  *
698  * Return: Number of bytes read from the buffer
699  *
700  */
701 	ssize_t
702 pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos,
703 		struct ath_pktlog_info *pl_info, bool *read_complete)
704 {
705 	size_t bufhdr_size;
706 	size_t count = 0, ret_val = 0;
707 	int rem_len;
708 	int start_offset, end_offset;
709 	int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset;
710 	struct ath_pktlog_buf *log_buf;
711 
712 	spin_lock_bh(&pl_info->log_lock);
713 	log_buf = pl_info->buf;
714 
715 	*read_complete = false;
716 
717 	if (log_buf == NULL) {
718 		*read_complete = true;
719 		spin_unlock_bh(&pl_info->log_lock);
720 		return 0;
721 	}
722 
723 	if (*ppos == 0 && pl_info->log_state) {
724 		pl_info->saved_state = pl_info->log_state;
725 		pl_info->log_state = 0;
726 	}
727 
728 	bufhdr_size = sizeof(log_buf->bufhdr);
729 
730 	/* copy valid log entries from circular buffer into user space */
731 	rem_len = nbytes;
732 	count = 0;
733 
734 	if (*ppos < bufhdr_size) {
735 		count = MIN((bufhdr_size - *ppos), rem_len);
736 		qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos,
737 				count);
738 		rem_len -= count;
739 		ret_val += count;
740 	}
741 
742 	start_offset = log_buf->rd_offset;
743 	cur_wr_offset = log_buf->wr_offset;
744 
745 	if ((rem_len == 0) || (start_offset < 0))
746 		goto rd_done;
747 
748 	fold_offset = -1;
749 	cur_rd_offset = start_offset;
750 
751 	/* Find the last offset and fold-offset if the buffer is folded */
752 	do {
753 		struct ath_pktlog_hdr *log_hdr;
754 		int log_data_offset;
755 
756 		log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data +
757 				cur_rd_offset);
758 
759 		log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
760 
761 		if ((fold_offset == -1)
762 				&& ((pl_info->buf_size - log_data_offset)
763 					<= log_hdr->size))
764 			fold_offset = log_data_offset - 1;
765 
766 		PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
767 
768 		if ((fold_offset == -1) && (cur_rd_offset == 0)
769 				&& (cur_rd_offset != cur_wr_offset))
770 			fold_offset = log_data_offset + log_hdr->size - 1;
771 
772 		end_offset = log_data_offset + log_hdr->size - 1;
773 	} while (cur_rd_offset != cur_wr_offset);
774 
775 	ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
776 
777 	if (fold_offset == -1) {
778 		if (ppos_data > end_offset)
779 			goto rd_done;
780 
781 		count = MIN(rem_len, (end_offset - ppos_data + 1));
782 		qdf_mem_copy(buf + ret_val,
783 				log_buf->log_data + ppos_data,
784 				count);
785 		ret_val += count;
786 		rem_len -= count;
787 	} else {
788 		if (ppos_data <= fold_offset) {
789 			count = MIN(rem_len, (fold_offset - ppos_data + 1));
790 			qdf_mem_copy(buf + ret_val,
791 					log_buf->log_data + ppos_data,
792 					count);
793 			ret_val += count;
794 			rem_len -= count;
795 		}
796 
797 		if (rem_len == 0)
798 			goto rd_done;
799 
800 		ppos_data =
801 			*ppos + ret_val - (bufhdr_size +
802 					(fold_offset - start_offset + 1));
803 
804 		if (ppos_data <= end_offset) {
805 			count = MIN(rem_len, (end_offset - ppos_data + 1));
806 			qdf_mem_copy(buf + ret_val,
807 					log_buf->log_data + ppos_data,
808 					count);
809 			ret_val += count;
810 			rem_len -= count;
811 		}
812 	}
813 
814 rd_done:
815 	if ((ret_val < nbytes) && pl_info->saved_state) {
816 		pl_info->log_state = pl_info->saved_state;
817 		pl_info->saved_state = 0;
818 	}
819 	*ppos += ret_val;
820 
821 	if (ret_val == 0) {
822 		/* Write pointer might have been updated during the read.
823 		 * So, if some data is written into, lets not reset the pointers
824 		 * We can continue to read from the offset position
825 		 */
826 		if (cur_wr_offset != log_buf->wr_offset) {
827 			*read_complete = false;
828 		} else {
829 			pl_info->buf->rd_offset = -1;
830 			pl_info->buf->wr_offset = 0;
831 			pl_info->buf->bytes_written = 0;
832 			pl_info->buf->offset = PKTLOG_READ_OFFSET;
833 			*read_complete = true;
834 		}
835 	}
836 	spin_unlock_bh(&pl_info->log_lock);
837 	return ret_val;
838 }
839 
840 static ssize_t
841 __pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
842 {
843 	size_t bufhdr_size;
844 	size_t count = 0, ret_val = 0;
845 	int rem_len;
846 	int start_offset, end_offset;
847 	int fold_offset, ppos_data, cur_rd_offset;
848 	struct ath_pktlog_info *pl_info;
849 	struct ath_pktlog_buf *log_buf;
850 
851 	if (cds_is_module_state_transitioning()) {
852 		pr_info("%s: module transition in progress", __func__);
853 		return -EAGAIN;
854 	}
855 
856 	pl_info = (struct ath_pktlog_info *)
857 					PDE_DATA(file->f_path.dentry->d_inode);
858 	if (!pl_info)
859 		return 0;
860 
861 	spin_lock_bh(&pl_info->log_lock);
862 	log_buf = pl_info->buf;
863 
864 	if (log_buf == NULL) {
865 		spin_unlock_bh(&pl_info->log_lock);
866 		return 0;
867 	}
868 
869 	if (pl_info->log_state) {
870 		/* Read is not allowed when write is going on
871 		 * When issuing cat command, ensure to send
872 		 * pktlog disable command first.
873 		 */
874 		spin_unlock_bh(&pl_info->log_lock);
875 		return -EINVAL;
876 	}
877 
878 	if (*ppos == 0 && pl_info->log_state) {
879 		pl_info->saved_state = pl_info->log_state;
880 		pl_info->log_state = 0;
881 	}
882 
883 	bufhdr_size = sizeof(log_buf->bufhdr);
884 
885 	/* copy valid log entries from circular buffer into user space */
886 
887 	rem_len = nbytes;
888 	count = 0;
889 
890 	if (*ppos < bufhdr_size) {
891 		count = QDF_MIN((bufhdr_size - *ppos), rem_len);
892 		spin_unlock_bh(&pl_info->log_lock);
893 		if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos,
894 				 count)) {
895 			return -EFAULT;
896 		}
897 		rem_len -= count;
898 		ret_val += count;
899 		spin_lock_bh(&pl_info->log_lock);
900 	}
901 
902 	start_offset = log_buf->rd_offset;
903 
904 	if ((rem_len == 0) || (start_offset < 0))
905 		goto rd_done;
906 
907 	fold_offset = -1;
908 	cur_rd_offset = start_offset;
909 
910 	/* Find the last offset and fold-offset if the buffer is folded */
911 	do {
912 		struct ath_pktlog_hdr *log_hdr;
913 		int log_data_offset;
914 
915 		log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data +
916 						    cur_rd_offset);
917 
918 		log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
919 
920 		if ((fold_offset == -1)
921 		    && ((pl_info->buf_size - log_data_offset)
922 			<= log_hdr->size))
923 			fold_offset = log_data_offset - 1;
924 
925 		PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
926 
927 		if ((fold_offset == -1) && (cur_rd_offset == 0)
928 		    && (cur_rd_offset != log_buf->wr_offset))
929 			fold_offset = log_data_offset + log_hdr->size - 1;
930 
931 		end_offset = log_data_offset + log_hdr->size - 1;
932 	} while (cur_rd_offset != log_buf->wr_offset);
933 
934 	ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
935 
936 	if (fold_offset == -1) {
937 		if (ppos_data > end_offset)
938 			goto rd_done;
939 
940 		count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
941 		spin_unlock_bh(&pl_info->log_lock);
942 
943 		if (copy_to_user(buf + ret_val,
944 				 log_buf->log_data + ppos_data, count)) {
945 			return -EFAULT;
946 		}
947 
948 		ret_val += count;
949 		rem_len -= count;
950 		spin_lock_bh(&pl_info->log_lock);
951 	} else {
952 		if (ppos_data <= fold_offset) {
953 			count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1));
954 			spin_unlock_bh(&pl_info->log_lock);
955 			if (copy_to_user(buf + ret_val,
956 					 log_buf->log_data + ppos_data,
957 					 count)) {
958 				return -EFAULT;
959 			}
960 			ret_val += count;
961 			rem_len -= count;
962 			spin_lock_bh(&pl_info->log_lock);
963 		}
964 
965 		if (rem_len == 0)
966 			goto rd_done;
967 
968 		ppos_data =
969 			*ppos + ret_val - (bufhdr_size +
970 					   (fold_offset - start_offset + 1));
971 
972 		if (ppos_data <= end_offset) {
973 			count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
974 			spin_unlock_bh(&pl_info->log_lock);
975 			if (copy_to_user(buf + ret_val,
976 					 log_buf->log_data + ppos_data,
977 					 count)) {
978 				return -EFAULT;
979 			}
980 			ret_val += count;
981 			rem_len -= count;
982 			spin_lock_bh(&pl_info->log_lock);
983 		}
984 	}
985 
986 rd_done:
987 	if ((ret_val < nbytes) && pl_info->saved_state) {
988 		pl_info->log_state = pl_info->saved_state;
989 		pl_info->saved_state = 0;
990 	}
991 	*ppos += ret_val;
992 
993 	spin_unlock_bh(&pl_info->log_lock);
994 	return ret_val;
995 }
996 
997 static ssize_t
998 pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
999 {
1000 	size_t ret;
1001 	struct ath_pktlog_info *pl_info;
1002 
1003 	pl_info = (struct ath_pktlog_info *)
1004 			PDE_DATA(file->f_path.dentry->d_inode);
1005 	if (!pl_info)
1006 		return 0;
1007 
1008 	cds_ssr_protect(__func__);
1009 	mutex_lock(&pl_info->pktlog_mutex);
1010 	ret = __pktlog_read(file, buf, nbytes, ppos);
1011 	mutex_unlock(&pl_info->pktlog_mutex);
1012 	cds_ssr_unprotect(__func__);
1013 	return ret;
1014 }
1015 
1016 int pktlogmod_init(void *context)
1017 {
1018 	int ret;
1019 
1020 	/* create the proc directory entry */
1021 	g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL);
1022 
1023 	if (g_pktlog_pde == NULL) {
1024 		printk(PKTLOG_TAG "%s: proc_mkdir failed\n", __func__);
1025 		return -EPERM;
1026 	}
1027 
1028 	/* Attach packet log */
1029 	ret = pktlog_attach((struct hif_opaque_softc *)context);
1030 
1031 	/* If packet log init failed */
1032 	if (ret)
1033 		goto attach_fail;
1034 
1035 	return ret;
1036 
1037 attach_fail:
1038 	remove_proc_entry(PKTLOG_PROC_DIR, NULL);
1039 	g_pktlog_pde = NULL;
1040 
1041 	return ret;
1042 }
1043 
1044 void pktlogmod_exit(void *context)
1045 {
1046 	if (g_pktlog_pde == NULL)
1047 		return;
1048 
1049 	pktlog_detach((struct hif_opaque_softc *)context);
1050 
1051 	/*
1052 	 *  pdev kill needs to be implemented
1053 	 */
1054 	remove_proc_entry(PKTLOG_PROC_DIR, NULL);
1055 }
1056 #endif
1057