xref: /wlan-dirver/qca-wifi-host-cmn/utils/pktlog/linux_ac.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef REMOVE_PKT_LOG
20 #ifndef EXPORT_SYMTAB
21 #define EXPORT_SYMTAB
22 #endif
23 #ifndef __KERNEL__
24 #define __KERNEL__
25 #endif
26 /*
27  * Linux specific implementation of Pktlogs for 802.11ac
28  */
29 #include <linux/kernel.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/vmalloc.h>
33 #include <linux/proc_fs.h>
34 #include <pktlog_ac_i.h>
35 #include <pktlog_ac_fmt.h>
36 #include "i_host_diag_core_log.h"
37 #include "host_diag_core_log.h"
38 #include "ani_global.h"
39 
40 #define PKTLOG_DEVNAME_SIZE     32
41 #define MAX_WLANDEV             1
42 
43 #ifdef MULTI_IF_NAME
44 #define PKTLOG_PROC_DIR         "ath_pktlog" MULTI_IF_NAME
45 #else
46 #define PKTLOG_PROC_DIR         "ath_pktlog"
47 #endif
48 
49 /* Permissions for creating proc entries */
50 #define PKTLOG_PROC_PERM        0444
51 #define PKTLOG_PROCSYS_DIR_PERM 0555
52 #define PKTLOG_PROCSYS_PERM     0644
53 
54 #ifndef __MOD_INC_USE_COUNT
55 #define PKTLOG_MOD_INC_USE_COUNT	do {			\
56 	if (!try_module_get(THIS_MODULE)) {			\
57 		qdf_nofl_info("try_module_get failed");	\
58 	} } while (0)
59 
60 #define PKTLOG_MOD_DEC_USE_COUNT        module_put(THIS_MODULE)
61 #else
62 #define PKTLOG_MOD_INC_USE_COUNT        MOD_INC_USE_COUNT
63 #define PKTLOG_MOD_DEC_USE_COUNT        MOD_DEC_USE_COUNT
64 #endif
65 
66 static struct ath_pktlog_info *g_pktlog_info;
67 
68 static struct proc_dir_entry *g_pktlog_pde;
69 
70 static DEFINE_MUTEX(proc_mutex);
71 
72 static int pktlog_attach(struct hif_opaque_softc *scn);
73 static void pktlog_detach(struct hif_opaque_softc *scn);
74 static int pktlog_open(struct inode *i, struct file *f);
75 static int pktlog_release(struct inode *i, struct file *f);
76 static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes,
77 			   loff_t *ppos);
78 
79 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
80 static const struct proc_ops pktlog_fops = {
81 	.proc_open = pktlog_open,
82 	.proc_release = pktlog_release,
83 	.proc_read = pktlog_read,
84 };
85 #else
86 static struct file_operations pktlog_fops = {
87 	open:  pktlog_open,
88 	release:pktlog_release,
89 	read : pktlog_read,
90 };
91 #endif
92 
93 void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn)
94 {
95 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
96 	if (pl_dev)
97 		pl_dev->pl_info->log_state = 0;
98 }
99 
100 int pktlog_alloc_buf(struct hif_opaque_softc *scn)
101 {
102 	uint32_t page_cnt;
103 	unsigned long vaddr;
104 	struct page *vpg;
105 	struct pktlog_dev_t *pl_dev;
106 	struct ath_pktlog_info *pl_info;
107 	struct ath_pktlog_buf *buffer;
108 
109 	pl_dev = get_pktlog_handle();
110 
111 	if (!pl_dev) {
112 		qdf_nofl_info(PKTLOG_TAG
113 			      "%s: pdev_txrx_handle->pl_dev is null", __func__);
114 		return -EINVAL;
115 	}
116 
117 	pl_info = pl_dev->pl_info;
118 
119 	page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE;
120 
121 	qdf_spin_lock_bh(&pl_info->log_lock);
122 	if (pl_info->buf) {
123 		qdf_spin_unlock_bh(&pl_info->log_lock);
124 		qdf_nofl_info(PKTLOG_TAG "Buffer is already in use");
125 		return -EINVAL;
126 	}
127 	qdf_spin_unlock_bh(&pl_info->log_lock);
128 
129 	buffer = vmalloc((page_cnt + 2) * PAGE_SIZE);
130 	if (!buffer) {
131 		return -ENOMEM;
132 	}
133 
134 	buffer = (struct ath_pktlog_buf *)
135 		       (((unsigned long)(buffer) + PAGE_SIZE - 1)
136 			& PAGE_MASK);
137 
138 	for (vaddr = (unsigned long)(buffer);
139 	     vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE));
140 	     vaddr += PAGE_SIZE) {
141 		vpg = vmalloc_to_page((const void *)vaddr);
142 		SetPageReserved(vpg);
143 	}
144 
145 	qdf_spin_lock_bh(&pl_info->log_lock);
146 	if (pl_info->buf)
147 		pktlog_release_buf(scn);
148 
149 	pl_info->buf =  buffer;
150 	qdf_spin_unlock_bh(&pl_info->log_lock);
151 	return 0;
152 }
153 
154 void pktlog_release_buf(struct hif_opaque_softc *scn)
155 {
156 	unsigned long page_cnt;
157 	unsigned long vaddr;
158 	struct page *vpg;
159 	struct pktlog_dev_t *pl_dev;
160 	struct ath_pktlog_info *pl_info;
161 
162 	pl_dev = get_pktlog_handle();
163 
164 	if (!pl_dev) {
165 		qdf_print("%s: invalid pl_dev handle", __func__);
166 		return;
167 	}
168 
169 	if (!pl_dev->pl_info) {
170 		qdf_print("%s: invalid pl_dev handle", __func__);
171 		return;
172 	}
173 
174 	pl_info = pl_dev->pl_info;
175 
176 	page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) /
177 		    PAGE_SIZE) + 1;
178 
179 	for (vaddr = (unsigned long)(pl_info->buf);
180 	     vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE);
181 	     vaddr += PAGE_SIZE) {
182 		vpg = vmalloc_to_page((const void *)vaddr);
183 		ClearPageReserved(vpg);
184 	}
185 
186 	vfree(pl_info->buf);
187 	pl_info->buf = NULL;
188 }
189 
190 static void pktlog_cleanup(struct ath_pktlog_info *pl_info)
191 {
192 	pl_info->log_state = 0;
193 	PKTLOG_LOCK_DESTROY(pl_info);
194 	mutex_destroy(&pl_info->pktlog_mutex);
195 }
196 
197 /* sysctl procfs handler to enable pktlog */
198 static int
199 qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
200 {
201 	int ret, enable;
202 	ol_ath_generic_softc_handle scn;
203 	struct pktlog_dev_t *pl_dev;
204 
205 	mutex_lock(&proc_mutex);
206 	scn = (ol_ath_generic_softc_handle) ctl->extra1;
207 
208 	if (!scn) {
209 		mutex_unlock(&proc_mutex);
210 		qdf_nofl_info("%s: Invalid scn context", __func__);
211 		ASSERT(0);
212 		return -EINVAL;
213 	}
214 
215 	pl_dev = get_pktlog_handle();
216 
217 	if (!pl_dev) {
218 		mutex_unlock(&proc_mutex);
219 		qdf_nofl_info("%s: Invalid pktlog context", __func__);
220 		ASSERT(0);
221 		return -ENODEV;
222 	}
223 
224 	ctl->data = &enable;
225 	ctl->maxlen = sizeof(enable);
226 
227 	if (write) {
228 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
229 					       lenp, ppos);
230 		if (ret == 0) {
231 			ret = pl_dev->pl_funcs->pktlog_enable(
232 					(struct hif_opaque_softc *)scn, enable,
233 					cds_is_packet_log_enabled(), 0, 1);
234 		}
235 		else
236 			QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
237 				  "Line:%d %s:proc_dointvec failed reason %d",
238 				   __LINE__, __func__, ret);
239 	} else {
240 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
241 					       lenp, ppos);
242 		if (ret)
243 			QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
244 				  "Line:%d %s:proc_dointvec failed reason %d",
245 				   __LINE__, __func__, ret);
246 	}
247 
248 	ctl->data = NULL;
249 	ctl->maxlen = 0;
250 	mutex_unlock(&proc_mutex);
251 
252 	return ret;
253 }
254 
255 static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev)
256 {
257 	return pl_dev->pl_info->buf_size;
258 }
259 
260 /* sysctl procfs handler to set/get pktlog size */
261 static int
262 qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
263 {
264 	int ret, size;
265 	ol_ath_generic_softc_handle scn;
266 	struct pktlog_dev_t *pl_dev;
267 
268 	mutex_lock(&proc_mutex);
269 	scn = (ol_ath_generic_softc_handle) ctl->extra1;
270 
271 	if (!scn) {
272 		mutex_unlock(&proc_mutex);
273 		qdf_nofl_info("%s: Invalid scn context", __func__);
274 		ASSERT(0);
275 		return -EINVAL;
276 	}
277 
278 	pl_dev = get_pktlog_handle();
279 
280 	if (!pl_dev) {
281 		mutex_unlock(&proc_mutex);
282 		qdf_nofl_info("%s: Invalid pktlog handle", __func__);
283 		ASSERT(0);
284 		return -ENODEV;
285 	}
286 
287 	ctl->data = &size;
288 	ctl->maxlen = sizeof(size);
289 
290 	if (write) {
291 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
292 					       lenp, ppos);
293 		if (ret == 0)
294 			ret = pl_dev->pl_funcs->pktlog_setsize(
295 					(struct hif_opaque_softc *)scn, size);
296 	} else {
297 		size = get_pktlog_bufsize(pl_dev);
298 		ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
299 					       lenp, ppos);
300 	}
301 
302 	ctl->data = NULL;
303 	ctl->maxlen = 0;
304 	mutex_unlock(&proc_mutex);
305 
306 	return ret;
307 }
308 
309 /* Register sysctl table */
310 static int pktlog_sysctl_register(struct hif_opaque_softc *scn)
311 {
312 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
313 	struct ath_pktlog_info_lnx *pl_info_lnx;
314 	char *proc_name;
315 
316 	if (pl_dev) {
317 		pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info);
318 		proc_name = pl_dev->name;
319 	} else {
320 		pl_info_lnx = PL_INFO_LNX(g_pktlog_info);
321 		proc_name = PKTLOG_PROC_SYSTEM;
322 	}
323 
324 	/*
325 	 * Setup the sysctl table for creating the following sysctl entries:
326 	 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/enable for enabling/disabling
327 	 * pktlog
328 	 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/size for changing the buffer size
329 	 */
330 	memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls));
331 	pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR;
332 	pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM;
333 	pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2];
334 
335 	/* [1] is NULL terminator */
336 	pl_info_lnx->sysctls[2].procname = proc_name;
337 	pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM;
338 	pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4];
339 
340 	/* [3] is NULL terminator */
341 	pl_info_lnx->sysctls[4].procname = "enable";
342 	pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM;
343 	pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable;
344 	pl_info_lnx->sysctls[4].extra1 = scn;
345 
346 	pl_info_lnx->sysctls[5].procname = "size";
347 	pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM;
348 	pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size;
349 	pl_info_lnx->sysctls[5].extra1 = scn;
350 
351 	pl_info_lnx->sysctls[6].procname = "options";
352 	pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM;
353 	pl_info_lnx->sysctls[6].proc_handler = proc_dointvec;
354 	pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options;
355 	pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options);
356 
357 	pl_info_lnx->sysctls[7].procname = "sack_thr";
358 	pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM;
359 	pl_info_lnx->sysctls[7].proc_handler = proc_dointvec;
360 	pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr;
361 	pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr);
362 
363 	pl_info_lnx->sysctls[8].procname = "tail_length";
364 	pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM;
365 	pl_info_lnx->sysctls[8].proc_handler = proc_dointvec;
366 	pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length;
367 	pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length);
368 
369 	pl_info_lnx->sysctls[9].procname = "thruput_thresh";
370 	pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM;
371 	pl_info_lnx->sysctls[9].proc_handler = proc_dointvec;
372 	pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh;
373 	pl_info_lnx->sysctls[9].maxlen =
374 		sizeof(pl_info_lnx->info.thruput_thresh);
375 
376 	pl_info_lnx->sysctls[10].procname = "phyerr_thresh";
377 	pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM;
378 	pl_info_lnx->sysctls[10].proc_handler = proc_dointvec;
379 	pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh;
380 	pl_info_lnx->sysctls[10].maxlen =
381 		sizeof(pl_info_lnx->info.phyerr_thresh);
382 
383 	pl_info_lnx->sysctls[11].procname = "per_thresh";
384 	pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM;
385 	pl_info_lnx->sysctls[11].proc_handler = proc_dointvec;
386 	pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh;
387 	pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh);
388 
389 	pl_info_lnx->sysctls[12].procname = "trigger_interval";
390 	pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM;
391 	pl_info_lnx->sysctls[12].proc_handler = proc_dointvec;
392 	pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval;
393 	pl_info_lnx->sysctls[12].maxlen =
394 		sizeof(pl_info_lnx->info.trigger_interval);
395 	/* [13] is NULL terminator */
396 
397 	/* and register everything */
398 	/* register_sysctl_table changed from 2.6.21 onwards */
399 	pl_info_lnx->sysctl_header =
400 		register_sysctl_table(pl_info_lnx->sysctls);
401 
402 	if (!pl_info_lnx->sysctl_header) {
403 		qdf_nofl_info("%s: failed to register sysctls!", proc_name);
404 		return -EINVAL;
405 	}
406 
407 	return 0;
408 }
409 
410 /*
411  * Initialize logging for system or adapter
412  * Parameter scn should be NULL for system wide logging
413  */
414 static int pktlog_attach(struct hif_opaque_softc *scn)
415 {
416 	struct pktlog_dev_t *pl_dev;
417 	struct ath_pktlog_info_lnx *pl_info_lnx;
418 	char *proc_name;
419 	struct proc_dir_entry *proc_entry;
420 
421 	/* Allocate pktlog dev for later use */
422 	pl_dev = get_pktlog_handle();
423 
424 	if (pl_dev) {
425 		pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL);
426 		if (!pl_info_lnx) {
427 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
428 				 "%s: Allocation failed for pl_info",
429 				 __func__);
430 			goto attach_fail1;
431 		}
432 
433 		pl_dev->pl_info = &pl_info_lnx->info;
434 		pl_dev->name = WLANDEV_BASENAME;
435 		proc_name = pl_dev->name;
436 
437 		if (!pl_dev->pl_funcs)
438 			pl_dev->pl_funcs = &ol_pl_funcs;
439 
440 		/*
441 		 * Valid for both direct attach and offload architecture
442 		 */
443 		pl_dev->pl_funcs->pktlog_init(scn);
444 	} else {
445 		return -EINVAL;
446 	}
447 
448 	/*
449 	 * initialize log info
450 	 * might be good to move to pktlog_init
451 	 */
452 	/* pl_dev->tgt_pktlog_alloced = false; */
453 	pl_info_lnx->proc_entry = NULL;
454 	pl_info_lnx->sysctl_header = NULL;
455 
456 	proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM,
457 			g_pktlog_pde, &pktlog_fops,
458 			&pl_info_lnx->info);
459 
460 	if (!proc_entry) {
461 		qdf_nofl_info(PKTLOG_TAG "%s: create_proc_entry failed for %s",
462 			      __func__, proc_name);
463 		goto attach_fail1;
464 	}
465 
466 	pl_info_lnx->proc_entry = proc_entry;
467 
468 	if (pktlog_sysctl_register(scn)) {
469 		qdf_nofl_info(PKTLOG_TAG "%s: sysctl register failed for %s",
470 			      __func__, proc_name);
471 		goto attach_fail2;
472 	}
473 
474 	return 0;
475 
476 attach_fail2:
477 	remove_proc_entry(proc_name, g_pktlog_pde);
478 
479 attach_fail1:
480 	if (pl_dev)
481 		kfree(pl_dev->pl_info);
482 
483 	return -EINVAL;
484 }
485 
486 static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev)
487 {
488 	struct ath_pktlog_info_lnx *pl_info_lnx;
489 
490 	if (!pl_dev) {
491 		qdf_nofl_info("%s: Invalid pktlog context", __func__);
492 		ASSERT(0);
493 		return;
494 	}
495 
496 	pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) :
497 		      PL_INFO_LNX(g_pktlog_info);
498 
499 	if (pl_info_lnx->sysctl_header) {
500 		unregister_sysctl_table(pl_info_lnx->sysctl_header);
501 		pl_info_lnx->sysctl_header = NULL;
502 	}
503 }
504 
505 static void pktlog_detach(struct hif_opaque_softc *scn)
506 {
507 	struct ath_pktlog_info *pl_info;
508 	struct pktlog_dev_t *pl_dev = get_pktlog_handle();
509 
510 	if (!pl_dev) {
511 		qdf_nofl_info("%s: Invalid pktlog context", __func__);
512 		ASSERT(0);
513 		return;
514 	}
515 
516 	pl_info = pl_dev->pl_info;
517 	if (!pl_info) {
518 		qdf_print("%s: Invalid pktlog handle", __func__);
519 		ASSERT(0);
520 		return;
521 	}
522 	mutex_lock(&pl_info->pktlog_mutex);
523 	remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde);
524 	pktlog_sysctl_unregister(pl_dev);
525 
526 	qdf_spin_lock_bh(&pl_info->log_lock);
527 
528 	if (pl_info->buf) {
529 		pktlog_release_buf(scn);
530 		pl_dev->tgt_pktlog_alloced = false;
531 	}
532 	qdf_spin_unlock_bh(&pl_info->log_lock);
533 	mutex_unlock(&pl_info->pktlog_mutex);
534 	pktlog_cleanup(pl_info);
535 
536 	if (pl_dev) {
537 		kfree(pl_info);
538 		pl_dev->pl_info = NULL;
539 	}
540 }
541 
542 static int __pktlog_open(struct inode *i, struct file *f)
543 {
544 	struct hif_opaque_softc *scn;
545 	struct pktlog_dev_t *pl_dev;
546 	struct ath_pktlog_info *pl_info;
547 	struct ath_pktlog_info_lnx *pl_info_lnx;
548 	int ret = 0;
549 
550 	PKTLOG_MOD_INC_USE_COUNT;
551 	scn = cds_get_context(QDF_MODULE_ID_HIF);
552 	if (!scn) {
553 		qdf_print("%s: Invalid scn context", __func__);
554 		ASSERT(0);
555 		return -EINVAL;
556 	}
557 
558 	pl_dev = get_pktlog_handle();
559 
560 	if (!pl_dev) {
561 		qdf_print("%s: Invalid pktlog handle", __func__);
562 		ASSERT(0);
563 		return -ENODEV;
564 	}
565 
566 	pl_info = pl_dev->pl_info;
567 
568 	if (!pl_info) {
569 		qdf_nofl_err("%s: pl_info NULL", __func__);
570 		return -EINVAL;
571 	}
572 
573 	mutex_lock(&pl_info->pktlog_mutex);
574 	pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) :
575 		PL_INFO_LNX(g_pktlog_info);
576 
577 	if (!pl_info_lnx->sysctl_header) {
578 		mutex_unlock(&pl_info->pktlog_mutex);
579 		qdf_print("%s: pktlog sysctl is unergistered.", __func__);
580 		ASSERT(0);
581 		return -EINVAL;
582 	}
583 
584 	if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) {
585 		mutex_unlock(&pl_info->pktlog_mutex);
586 		qdf_print("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS",
587 			  __func__, pl_info->curr_pkt_state);
588 		return -EBUSY;
589 	}
590 
591 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START;
592 
593 	pl_info->init_saved_state = pl_info->log_state;
594 	if (!pl_info->log_state) {
595 		/* Pktlog is already disabled.
596 		 * Proceed to read directly.
597 		 */
598 		pl_info->curr_pkt_state =
599 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
600 		mutex_unlock(&pl_info->pktlog_mutex);
601 		return ret;
602 	}
603 	/* Disbable the pktlog internally. */
604 	ret = pl_dev->pl_funcs->pktlog_disable(scn);
605 	pl_info->log_state = 0;
606 	pl_info->curr_pkt_state =
607 			PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
608 	mutex_unlock(&pl_info->pktlog_mutex);
609 	return ret;
610 }
611 
612 static int pktlog_open(struct inode *i, struct file *f)
613 {
614 	struct qdf_op_sync *op_sync;
615 	int errno;
616 
617 	errno = qdf_op_protect(&op_sync);
618 	if (errno)
619 		return errno;
620 
621 	errno = __pktlog_open(i, f);
622 
623 	qdf_op_unprotect(op_sync);
624 
625 	return errno;
626 }
627 
628 static int __pktlog_release(struct inode *i, struct file *f)
629 {
630 	struct hif_opaque_softc *scn;
631 	struct pktlog_dev_t *pl_dev;
632 	struct ath_pktlog_info *pl_info;
633 	struct ath_pktlog_info_lnx *pl_info_lnx;
634 	int ret = 0;
635 
636 	PKTLOG_MOD_DEC_USE_COUNT;
637 	scn = cds_get_context(QDF_MODULE_ID_HIF);
638 	if (!scn) {
639 		qdf_print("%s: Invalid scn context", __func__);
640 		ASSERT(0);
641 		return -EINVAL;
642 	}
643 
644 	pl_dev = get_pktlog_handle();
645 
646 	if (!pl_dev) {
647 		qdf_print("%s: Invalid pktlog handle", __func__);
648 		ASSERT(0);
649 		return -ENODEV;
650 	}
651 
652 	pl_info = pl_dev->pl_info;
653 
654 	if (!pl_info) {
655 		qdf_print("%s: Invalid pktlog info", __func__);
656 		ASSERT(0);
657 		return -EINVAL;
658 	}
659 
660 	mutex_lock(&pl_info->pktlog_mutex);
661 	pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) :
662 		PL_INFO_LNX(g_pktlog_info);
663 
664 	if (!pl_info_lnx->sysctl_header) {
665 		pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
666 		mutex_unlock(&pl_info->pktlog_mutex);
667 		qdf_print("%s: pktlog sysctl is unergistered.", __func__);
668 		ASSERT(0);
669 		return -EINVAL;
670 	}
671 	pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE;
672 	/*clear pktlog buffer.*/
673 	pktlog_clearbuff(scn, true);
674 	pl_info->log_state = pl_info->init_saved_state;
675 	pl_info->init_saved_state = 0;
676 
677 	/*Enable pktlog again*/
678 	ret = __pktlog_enable(
679 			(struct hif_opaque_softc *)scn, pl_info->log_state,
680 			cds_is_packet_log_enabled(), 0, 1);
681 
682 	pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
683 	mutex_unlock(&pl_info->pktlog_mutex);
684 	if (ret != 0)
685 		qdf_print("%s: pktlog cannot be enabled. ret value %d",
686 			  __func__, ret);
687 
688 	return ret;
689 }
690 
691 static int pktlog_release(struct inode *i, struct file *f)
692 {
693 	struct qdf_op_sync *op_sync;
694 	int errno;
695 
696 	errno = qdf_op_protect(&op_sync);
697 	if (errno)
698 		return errno;
699 
700 	errno = __pktlog_release(i, f);
701 
702 	qdf_op_unprotect(op_sync);
703 
704 	return errno;
705 }
706 
707 #ifndef MIN
708 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
709 #endif
710 
711 /**
712  * pktlog_read_proc_entry() - This function is used to read data from the
713  * proc entry into the readers buffer
714  * @buf:     Readers buffer
715  * @nbytes:  Number of bytes to read
716  * @ppos:    Offset within the drivers buffer
717  * @pl_info: Packet log information pointer
718  * @read_complete: Boolean value indication whether read is complete
719  *
720  * This function is used to read data from the proc entry into the readers
721  * buffer. Its functionality is similar to 'pktlog_read' which does
722  * copy to user to the user space buffer
723  *
724  * Return: Number of bytes read from the buffer
725  *
726  */
727 	ssize_t
728 pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos,
729 		struct ath_pktlog_info *pl_info, bool *read_complete)
730 {
731 	size_t bufhdr_size;
732 	size_t count = 0, ret_val = 0;
733 	int rem_len;
734 	int start_offset, end_offset;
735 	int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset;
736 	struct ath_pktlog_buf *log_buf;
737 
738 	qdf_spin_lock_bh(&pl_info->log_lock);
739 	log_buf = pl_info->buf;
740 
741 	*read_complete = false;
742 
743 	if (!log_buf) {
744 		*read_complete = true;
745 		qdf_spin_unlock_bh(&pl_info->log_lock);
746 		return 0;
747 	}
748 
749 	if (*ppos == 0 && pl_info->log_state) {
750 		pl_info->saved_state = pl_info->log_state;
751 		pl_info->log_state = 0;
752 	}
753 
754 	bufhdr_size = sizeof(log_buf->bufhdr);
755 
756 	/* copy valid log entries from circular buffer into user space */
757 	rem_len = nbytes;
758 	count = 0;
759 
760 	if (*ppos < bufhdr_size) {
761 		count = MIN((bufhdr_size - *ppos), rem_len);
762 		qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos,
763 				count);
764 		rem_len -= count;
765 		ret_val += count;
766 	}
767 
768 	start_offset = log_buf->rd_offset;
769 	cur_wr_offset = log_buf->wr_offset;
770 
771 	if ((rem_len == 0) || (start_offset < 0))
772 		goto rd_done;
773 
774 	fold_offset = -1;
775 	cur_rd_offset = start_offset;
776 
777 	/* Find the last offset and fold-offset if the buffer is folded */
778 	do {
779 		struct ath_pktlog_hdr *log_hdr;
780 		int log_data_offset;
781 
782 		log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data +
783 				cur_rd_offset);
784 
785 		log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
786 
787 		if ((fold_offset == -1)
788 				&& ((pl_info->buf_size - log_data_offset)
789 					<= log_hdr->size))
790 			fold_offset = log_data_offset - 1;
791 
792 		PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
793 
794 		if ((fold_offset == -1) && (cur_rd_offset == 0)
795 				&& (cur_rd_offset != cur_wr_offset))
796 			fold_offset = log_data_offset + log_hdr->size - 1;
797 
798 		end_offset = log_data_offset + log_hdr->size - 1;
799 	} while (cur_rd_offset != cur_wr_offset);
800 
801 	ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
802 
803 	if (fold_offset == -1) {
804 		if (ppos_data > end_offset)
805 			goto rd_done;
806 
807 		count = MIN(rem_len, (end_offset - ppos_data + 1));
808 		qdf_mem_copy(buf + ret_val,
809 				log_buf->log_data + ppos_data,
810 				count);
811 		ret_val += count;
812 		rem_len -= count;
813 	} else {
814 		if (ppos_data <= fold_offset) {
815 			count = MIN(rem_len, (fold_offset - ppos_data + 1));
816 			qdf_mem_copy(buf + ret_val,
817 					log_buf->log_data + ppos_data,
818 					count);
819 			ret_val += count;
820 			rem_len -= count;
821 		}
822 
823 		if (rem_len == 0)
824 			goto rd_done;
825 
826 		ppos_data =
827 			*ppos + ret_val - (bufhdr_size +
828 					(fold_offset - start_offset + 1));
829 
830 		if (ppos_data <= end_offset) {
831 			count = MIN(rem_len, (end_offset - ppos_data + 1));
832 			qdf_mem_copy(buf + ret_val,
833 					log_buf->log_data + ppos_data,
834 					count);
835 			ret_val += count;
836 			rem_len -= count;
837 		}
838 	}
839 
840 rd_done:
841 	if ((ret_val < nbytes) && pl_info->saved_state) {
842 		pl_info->log_state = pl_info->saved_state;
843 		pl_info->saved_state = 0;
844 	}
845 	*ppos += ret_val;
846 
847 	if (ret_val == 0) {
848 		/* Write pointer might have been updated during the read.
849 		 * So, if some data is written into, lets not reset the pointers
850 		 * We can continue to read from the offset position
851 		 */
852 		if (cur_wr_offset != log_buf->wr_offset) {
853 			*read_complete = false;
854 		} else {
855 			pl_info->buf->rd_offset = -1;
856 			pl_info->buf->wr_offset = 0;
857 			pl_info->buf->bytes_written = 0;
858 			pl_info->buf->offset = PKTLOG_READ_OFFSET;
859 			*read_complete = true;
860 		}
861 	}
862 	qdf_spin_unlock_bh(&pl_info->log_lock);
863 	return ret_val;
864 }
865 
866 static ssize_t
867 __pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
868 {
869 	size_t bufhdr_size;
870 	size_t count = 0, ret_val = 0;
871 	int rem_len;
872 	int start_offset, end_offset;
873 	int fold_offset, ppos_data, cur_rd_offset;
874 	struct ath_pktlog_info *pl_info;
875 	struct ath_pktlog_buf *log_buf;
876 
877 	pl_info = PDE_DATA(file->f_path.dentry->d_inode);
878 	if (!pl_info)
879 		return 0;
880 
881 	qdf_spin_lock_bh(&pl_info->log_lock);
882 	log_buf = pl_info->buf;
883 
884 	if (!log_buf) {
885 		qdf_spin_unlock_bh(&pl_info->log_lock);
886 		return 0;
887 	}
888 
889 	if (pl_info->log_state) {
890 		/* Read is not allowed when write is going on
891 		 * When issuing cat command, ensure to send
892 		 * pktlog disable command first.
893 		 */
894 		qdf_spin_unlock_bh(&pl_info->log_lock);
895 		return -EINVAL;
896 	}
897 
898 	if (*ppos == 0 && pl_info->log_state) {
899 		pl_info->saved_state = pl_info->log_state;
900 		pl_info->log_state = 0;
901 	}
902 
903 	bufhdr_size = sizeof(log_buf->bufhdr);
904 
905 	/* copy valid log entries from circular buffer into user space */
906 
907 	rem_len = nbytes;
908 	count = 0;
909 
910 	if (*ppos < bufhdr_size) {
911 		count = QDF_MIN((bufhdr_size - *ppos), rem_len);
912 		qdf_spin_unlock_bh(&pl_info->log_lock);
913 		if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos,
914 				 count)) {
915 			return -EFAULT;
916 		}
917 		rem_len -= count;
918 		ret_val += count;
919 		qdf_spin_lock_bh(&pl_info->log_lock);
920 	}
921 
922 	start_offset = log_buf->rd_offset;
923 
924 	if ((rem_len == 0) || (start_offset < 0))
925 		goto rd_done;
926 
927 	fold_offset = -1;
928 	cur_rd_offset = start_offset;
929 
930 	/* Find the last offset and fold-offset if the buffer is folded */
931 	do {
932 		struct ath_pktlog_hdr *log_hdr;
933 		int log_data_offset;
934 
935 		log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data +
936 						    cur_rd_offset);
937 
938 		log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
939 
940 		if ((fold_offset == -1)
941 		    && ((pl_info->buf_size - log_data_offset)
942 			<= log_hdr->size))
943 			fold_offset = log_data_offset - 1;
944 
945 		PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
946 
947 		if ((fold_offset == -1) && (cur_rd_offset == 0)
948 		    && (cur_rd_offset != log_buf->wr_offset))
949 			fold_offset = log_data_offset + log_hdr->size - 1;
950 
951 		end_offset = log_data_offset + log_hdr->size - 1;
952 	} while (cur_rd_offset != log_buf->wr_offset);
953 
954 	ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
955 
956 	if (fold_offset == -1) {
957 		if (ppos_data > end_offset)
958 			goto rd_done;
959 
960 		count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
961 		qdf_spin_unlock_bh(&pl_info->log_lock);
962 
963 		if (copy_to_user(buf + ret_val,
964 				 log_buf->log_data + ppos_data, count)) {
965 			return -EFAULT;
966 		}
967 
968 		ret_val += count;
969 		rem_len -= count;
970 		qdf_spin_lock_bh(&pl_info->log_lock);
971 	} else {
972 		if (ppos_data <= fold_offset) {
973 			count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1));
974 			qdf_spin_unlock_bh(&pl_info->log_lock);
975 			if (copy_to_user(buf + ret_val,
976 					 log_buf->log_data + ppos_data,
977 					 count)) {
978 				return -EFAULT;
979 			}
980 			ret_val += count;
981 			rem_len -= count;
982 			qdf_spin_lock_bh(&pl_info->log_lock);
983 		}
984 
985 		if (rem_len == 0)
986 			goto rd_done;
987 
988 		ppos_data =
989 			*ppos + ret_val - (bufhdr_size +
990 					   (fold_offset - start_offset + 1));
991 
992 		if (ppos_data <= end_offset) {
993 			count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
994 			qdf_spin_unlock_bh(&pl_info->log_lock);
995 			if (copy_to_user(buf + ret_val,
996 					 log_buf->log_data + ppos_data,
997 					 count)) {
998 				return -EFAULT;
999 			}
1000 			ret_val += count;
1001 			rem_len -= count;
1002 			qdf_spin_lock_bh(&pl_info->log_lock);
1003 		}
1004 	}
1005 
1006 rd_done:
1007 	if ((ret_val < nbytes) && pl_info->saved_state) {
1008 		pl_info->log_state = pl_info->saved_state;
1009 		pl_info->saved_state = 0;
1010 	}
1011 	*ppos += ret_val;
1012 
1013 	qdf_spin_unlock_bh(&pl_info->log_lock);
1014 	return ret_val;
1015 }
1016 
1017 static ssize_t
1018 pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
1019 {
1020 	struct ath_pktlog_info *info = PDE_DATA(file->f_path.dentry->d_inode);
1021 	struct qdf_op_sync *op_sync;
1022 	ssize_t err_size;
1023 
1024 	if (!info)
1025 		return 0;
1026 
1027 	err_size = qdf_op_protect(&op_sync);
1028 	if (err_size)
1029 		return err_size;
1030 
1031 	mutex_lock(&info->pktlog_mutex);
1032 	err_size = __pktlog_read(file, buf, nbytes, ppos);
1033 	mutex_unlock(&info->pktlog_mutex);
1034 
1035 	qdf_op_unprotect(op_sync);
1036 
1037 	return err_size;
1038 }
1039 
1040 int pktlogmod_init(void *context)
1041 {
1042 	int ret;
1043 
1044 	/* create the proc directory entry */
1045 	g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL);
1046 
1047 	if (!g_pktlog_pde) {
1048 		qdf_nofl_info(PKTLOG_TAG "%s: proc_mkdir failed", __func__);
1049 		return -EPERM;
1050 	}
1051 
1052 	/* Attach packet log */
1053 	ret = pktlog_attach((struct hif_opaque_softc *)context);
1054 
1055 	/* If packet log init failed */
1056 	if (ret)
1057 		goto attach_fail;
1058 
1059 	return ret;
1060 
1061 attach_fail:
1062 	remove_proc_entry(PKTLOG_PROC_DIR, NULL);
1063 	g_pktlog_pde = NULL;
1064 
1065 	return ret;
1066 }
1067 
1068 void pktlogmod_exit(void *context)
1069 {
1070 	if (!g_pktlog_pde)
1071 		return;
1072 
1073 	pktlog_detach((struct hif_opaque_softc *)context);
1074 
1075 	/*
1076 	 *  pdev kill needs to be implemented
1077 	 */
1078 	remove_proc_entry(PKTLOG_PROC_DIR, NULL);
1079 }
1080 #endif
1081