1 /* 2 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef REMOVE_PKT_LOG 20 #ifndef EXPORT_SYMTAB 21 #define EXPORT_SYMTAB 22 #endif 23 #ifndef __KERNEL__ 24 #define __KERNEL__ 25 #endif 26 /* 27 * Linux specific implementation of Pktlogs for 802.11ac 28 */ 29 #include <linux/kernel.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/vmalloc.h> 33 #include <linux/proc_fs.h> 34 #include <pktlog_ac_i.h> 35 #include <pktlog_ac_fmt.h> 36 #include "i_host_diag_core_log.h" 37 #include "host_diag_core_log.h" 38 #include "ani_global.h" 39 40 #define PKTLOG_TAG "ATH_PKTLOG" 41 #define PKTLOG_DEVNAME_SIZE 32 42 #define MAX_WLANDEV 1 43 44 #ifdef MULTI_IF_NAME 45 #define PKTLOG_PROC_DIR "ath_pktlog" MULTI_IF_NAME 46 #else 47 #define PKTLOG_PROC_DIR "ath_pktlog" 48 #endif 49 50 /* Permissions for creating proc entries */ 51 #define PKTLOG_PROC_PERM 0444 52 #define PKTLOG_PROCSYS_DIR_PERM 0555 53 #define PKTLOG_PROCSYS_PERM 0644 54 55 #ifndef __MOD_INC_USE_COUNT 56 #define PKTLOG_MOD_INC_USE_COUNT do { \ 57 if (!try_module_get(THIS_MODULE)) { \ 58 printk(KERN_WARNING "try_module_get failed\n"); \ 59 } } while (0) 60 61 #define PKTLOG_MOD_DEC_USE_COUNT module_put(THIS_MODULE) 62 #else 63 #define PKTLOG_MOD_INC_USE_COUNT MOD_INC_USE_COUNT 64 #define PKTLOG_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT 65 #endif 66 67 static struct ath_pktlog_info *g_pktlog_info; 68 69 static struct proc_dir_entry *g_pktlog_pde; 70 71 static DEFINE_MUTEX(proc_mutex); 72 73 static int pktlog_attach(struct hif_opaque_softc *scn); 74 static void pktlog_detach(struct hif_opaque_softc *scn); 75 static int pktlog_open(struct inode *i, struct file *f); 76 static int pktlog_release(struct inode *i, struct file *f); 77 static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes, 78 loff_t *ppos); 79 80 static struct file_operations pktlog_fops = { 81 open: pktlog_open, 82 release:pktlog_release, 83 read : pktlog_read, 84 }; 85 86 void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn) 87 { 88 struct pktlog_dev_t *pl_dev = get_pktlog_handle(); 89 if (pl_dev) 90 pl_dev->pl_info->log_state = 0; 91 } 92 93 int pktlog_alloc_buf(struct hif_opaque_softc *scn) 94 { 95 uint32_t page_cnt; 96 unsigned long vaddr; 97 struct page *vpg; 98 struct pktlog_dev_t *pl_dev; 99 struct ath_pktlog_info *pl_info; 100 struct ath_pktlog_buf *buffer; 101 102 pl_dev = get_pktlog_handle(); 103 104 if (!pl_dev) { 105 printk(PKTLOG_TAG 106 "%s: Unable to allocate buffer pdev_txrx_handle or pdev_txrx_handle->pl_dev is null\n", 107 __func__); 108 return -EINVAL; 109 } 110 111 pl_info = pl_dev->pl_info; 112 113 page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE; 114 115 qdf_spin_lock_bh(&pl_info->log_lock); 116 if (pl_info->buf) { 117 qdf_spin_unlock_bh(&pl_info->log_lock); 118 printk(PKTLOG_TAG "Buffer is already in use\n"); 119 return -EINVAL; 120 } 121 qdf_spin_unlock_bh(&pl_info->log_lock); 122 123 buffer = vmalloc((page_cnt + 2) * PAGE_SIZE); 124 if (!buffer) { 125 printk(PKTLOG_TAG 126 "%s: Unable to allocate buffer " 127 "(%d pages)\n", __func__, page_cnt); 128 return -ENOMEM; 129 } 130 131 buffer = (struct ath_pktlog_buf *) 132 (((unsigned long)(buffer) + PAGE_SIZE - 1) 133 & PAGE_MASK); 134 135 for (vaddr = (unsigned long)(buffer); 136 vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE)); 137 vaddr += PAGE_SIZE) { 138 vpg = vmalloc_to_page((const void *)vaddr); 139 SetPageReserved(vpg); 140 } 141 142 qdf_spin_lock_bh(&pl_info->log_lock); 143 if (pl_info->buf) 144 pktlog_release_buf(scn); 145 146 pl_info->buf = buffer; 147 qdf_spin_unlock_bh(&pl_info->log_lock); 148 return 0; 149 } 150 151 void pktlog_release_buf(struct hif_opaque_softc *scn) 152 { 153 unsigned long page_cnt; 154 unsigned long vaddr; 155 struct page *vpg; 156 struct pktlog_dev_t *pl_dev; 157 struct ath_pktlog_info *pl_info; 158 159 pl_dev = get_pktlog_handle(); 160 161 if (!pl_dev) { 162 qdf_print("%s: invalid pl_dev handle", __func__); 163 return; 164 } 165 166 if (!pl_dev->pl_info) { 167 qdf_print("%s: invalid pl_dev handle", __func__); 168 return; 169 } 170 171 pl_info = pl_dev->pl_info; 172 173 page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) / 174 PAGE_SIZE) + 1; 175 176 for (vaddr = (unsigned long)(pl_info->buf); 177 vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE); 178 vaddr += PAGE_SIZE) { 179 vpg = vmalloc_to_page((const void *)vaddr); 180 ClearPageReserved(vpg); 181 } 182 183 vfree(pl_info->buf); 184 pl_info->buf = NULL; 185 } 186 187 static void pktlog_cleanup(struct ath_pktlog_info *pl_info) 188 { 189 pl_info->log_state = 0; 190 PKTLOG_LOCK_DESTROY(pl_info); 191 mutex_destroy(&pl_info->pktlog_mutex); 192 } 193 194 /* sysctl procfs handler to enable pktlog */ 195 static int 196 qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos) 197 { 198 int ret, enable; 199 ol_ath_generic_softc_handle scn; 200 struct pktlog_dev_t *pl_dev; 201 202 mutex_lock(&proc_mutex); 203 scn = (ol_ath_generic_softc_handle) ctl->extra1; 204 205 if (!scn) { 206 mutex_unlock(&proc_mutex); 207 printk("%s: Invalid scn context\n", __func__); 208 ASSERT(0); 209 return -EINVAL; 210 } 211 212 pl_dev = get_pktlog_handle(); 213 214 if (!pl_dev) { 215 mutex_unlock(&proc_mutex); 216 printk("%s: Invalid pktlog context\n", __func__); 217 ASSERT(0); 218 return -ENODEV; 219 } 220 221 ctl->data = &enable; 222 ctl->maxlen = sizeof(enable); 223 224 if (write) { 225 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 226 lenp, ppos); 227 if (ret == 0) { 228 ret = pl_dev->pl_funcs->pktlog_enable( 229 (struct hif_opaque_softc *)scn, enable, 230 cds_is_packet_log_enabled(), 0, 1); 231 } 232 else 233 QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, 234 "Line:%d %s:proc_dointvec failed reason %d", 235 __LINE__, __func__, ret); 236 } else { 237 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 238 lenp, ppos); 239 if (ret) 240 QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, 241 "Line:%d %s:proc_dointvec failed reason %d", 242 __LINE__, __func__, ret); 243 } 244 245 ctl->data = NULL; 246 ctl->maxlen = 0; 247 mutex_unlock(&proc_mutex); 248 249 return ret; 250 } 251 252 static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev) 253 { 254 return pl_dev->pl_info->buf_size; 255 } 256 257 /* sysctl procfs handler to set/get pktlog size */ 258 static int 259 qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos) 260 { 261 int ret, size; 262 ol_ath_generic_softc_handle scn; 263 struct pktlog_dev_t *pl_dev; 264 265 mutex_lock(&proc_mutex); 266 scn = (ol_ath_generic_softc_handle) ctl->extra1; 267 268 if (!scn) { 269 mutex_unlock(&proc_mutex); 270 printk("%s: Invalid scn context\n", __func__); 271 ASSERT(0); 272 return -EINVAL; 273 } 274 275 pl_dev = get_pktlog_handle(); 276 277 if (!pl_dev) { 278 mutex_unlock(&proc_mutex); 279 printk("%s: Invalid pktlog handle\n", __func__); 280 ASSERT(0); 281 return -ENODEV; 282 } 283 284 ctl->data = &size; 285 ctl->maxlen = sizeof(size); 286 287 if (write) { 288 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 289 lenp, ppos); 290 if (ret == 0) 291 ret = pl_dev->pl_funcs->pktlog_setsize( 292 (struct hif_opaque_softc *)scn, size); 293 } else { 294 size = get_pktlog_bufsize(pl_dev); 295 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 296 lenp, ppos); 297 } 298 299 ctl->data = NULL; 300 ctl->maxlen = 0; 301 mutex_unlock(&proc_mutex); 302 303 return ret; 304 } 305 306 /* Register sysctl table */ 307 static int pktlog_sysctl_register(struct hif_opaque_softc *scn) 308 { 309 struct pktlog_dev_t *pl_dev = get_pktlog_handle(); 310 struct ath_pktlog_info_lnx *pl_info_lnx; 311 char *proc_name; 312 313 if (pl_dev) { 314 pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info); 315 proc_name = pl_dev->name; 316 } else { 317 pl_info_lnx = PL_INFO_LNX(g_pktlog_info); 318 proc_name = PKTLOG_PROC_SYSTEM; 319 } 320 321 /* 322 * Setup the sysctl table for creating the following sysctl entries: 323 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/enable for enabling/disabling 324 * pktlog 325 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/size for changing the buffer size 326 */ 327 memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls)); 328 pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR; 329 pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM; 330 pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2]; 331 332 /* [1] is NULL terminator */ 333 pl_info_lnx->sysctls[2].procname = proc_name; 334 pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM; 335 pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4]; 336 337 /* [3] is NULL terminator */ 338 pl_info_lnx->sysctls[4].procname = "enable"; 339 pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM; 340 pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable; 341 pl_info_lnx->sysctls[4].extra1 = scn; 342 343 pl_info_lnx->sysctls[5].procname = "size"; 344 pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM; 345 pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size; 346 pl_info_lnx->sysctls[5].extra1 = scn; 347 348 pl_info_lnx->sysctls[6].procname = "options"; 349 pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM; 350 pl_info_lnx->sysctls[6].proc_handler = proc_dointvec; 351 pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options; 352 pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options); 353 354 pl_info_lnx->sysctls[7].procname = "sack_thr"; 355 pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM; 356 pl_info_lnx->sysctls[7].proc_handler = proc_dointvec; 357 pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr; 358 pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr); 359 360 pl_info_lnx->sysctls[8].procname = "tail_length"; 361 pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM; 362 pl_info_lnx->sysctls[8].proc_handler = proc_dointvec; 363 pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length; 364 pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length); 365 366 pl_info_lnx->sysctls[9].procname = "thruput_thresh"; 367 pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM; 368 pl_info_lnx->sysctls[9].proc_handler = proc_dointvec; 369 pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh; 370 pl_info_lnx->sysctls[9].maxlen = 371 sizeof(pl_info_lnx->info.thruput_thresh); 372 373 pl_info_lnx->sysctls[10].procname = "phyerr_thresh"; 374 pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM; 375 pl_info_lnx->sysctls[10].proc_handler = proc_dointvec; 376 pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh; 377 pl_info_lnx->sysctls[10].maxlen = 378 sizeof(pl_info_lnx->info.phyerr_thresh); 379 380 pl_info_lnx->sysctls[11].procname = "per_thresh"; 381 pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM; 382 pl_info_lnx->sysctls[11].proc_handler = proc_dointvec; 383 pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh; 384 pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh); 385 386 pl_info_lnx->sysctls[12].procname = "trigger_interval"; 387 pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM; 388 pl_info_lnx->sysctls[12].proc_handler = proc_dointvec; 389 pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval; 390 pl_info_lnx->sysctls[12].maxlen = 391 sizeof(pl_info_lnx->info.trigger_interval); 392 /* [13] is NULL terminator */ 393 394 /* and register everything */ 395 /* register_sysctl_table changed from 2.6.21 onwards */ 396 pl_info_lnx->sysctl_header = 397 register_sysctl_table(pl_info_lnx->sysctls); 398 399 if (!pl_info_lnx->sysctl_header) { 400 printk("%s: failed to register sysctls!\n", proc_name); 401 return -EINVAL; 402 } 403 404 return 0; 405 } 406 407 /* 408 * Initialize logging for system or adapter 409 * Parameter scn should be NULL for system wide logging 410 */ 411 static int pktlog_attach(struct hif_opaque_softc *scn) 412 { 413 struct pktlog_dev_t *pl_dev; 414 struct ath_pktlog_info_lnx *pl_info_lnx; 415 char *proc_name; 416 struct proc_dir_entry *proc_entry; 417 418 /* Allocate pktlog dev for later use */ 419 pl_dev = get_pktlog_handle(); 420 421 if (pl_dev) { 422 pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL); 423 if (!pl_info_lnx) { 424 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 425 "%s: Allocation failed for pl_info", 426 __func__); 427 goto attach_fail1; 428 } 429 430 pl_dev->pl_info = &pl_info_lnx->info; 431 pl_dev->name = WLANDEV_BASENAME; 432 proc_name = pl_dev->name; 433 434 if (!pl_dev->pl_funcs) 435 pl_dev->pl_funcs = &ol_pl_funcs; 436 437 /* 438 * Valid for both direct attach and offload architecture 439 */ 440 pl_dev->pl_funcs->pktlog_init(scn); 441 } else { 442 return -EINVAL; 443 } 444 445 /* 446 * initialize log info 447 * might be good to move to pktlog_init 448 */ 449 /* pl_dev->tgt_pktlog_alloced = false; */ 450 pl_info_lnx->proc_entry = NULL; 451 pl_info_lnx->sysctl_header = NULL; 452 453 proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM, 454 g_pktlog_pde, &pktlog_fops, 455 &pl_info_lnx->info); 456 457 if (!proc_entry) { 458 printk(PKTLOG_TAG "%s: create_proc_entry failed for %s\n", 459 __func__, proc_name); 460 goto attach_fail1; 461 } 462 463 pl_info_lnx->proc_entry = proc_entry; 464 465 if (pktlog_sysctl_register(scn)) { 466 printk(PKTLOG_TAG "%s: sysctl register failed for %s\n", 467 __func__, proc_name); 468 goto attach_fail2; 469 } 470 471 return 0; 472 473 attach_fail2: 474 remove_proc_entry(proc_name, g_pktlog_pde); 475 476 attach_fail1: 477 if (pl_dev) 478 kfree(pl_dev->pl_info); 479 480 return -EINVAL; 481 } 482 483 static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev) 484 { 485 struct ath_pktlog_info_lnx *pl_info_lnx; 486 487 if (!pl_dev) { 488 printk("%s: Invalid pktlog context\n", __func__); 489 ASSERT(0); 490 return; 491 } 492 493 pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : 494 PL_INFO_LNX(g_pktlog_info); 495 496 if (pl_info_lnx->sysctl_header) { 497 unregister_sysctl_table(pl_info_lnx->sysctl_header); 498 pl_info_lnx->sysctl_header = NULL; 499 } 500 } 501 502 static void pktlog_detach(struct hif_opaque_softc *scn) 503 { 504 struct ath_pktlog_info *pl_info; 505 struct pktlog_dev_t *pl_dev = get_pktlog_handle(); 506 507 if (!pl_dev) { 508 printk("%s: Invalid pktlog context\n", __func__); 509 ASSERT(0); 510 return; 511 } 512 513 pl_info = pl_dev->pl_info; 514 remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde); 515 pktlog_sysctl_unregister(pl_dev); 516 517 qdf_spin_lock_bh(&pl_info->log_lock); 518 519 if (pl_info->buf) { 520 pktlog_release_buf(scn); 521 pl_dev->tgt_pktlog_alloced = false; 522 } 523 qdf_spin_unlock_bh(&pl_info->log_lock); 524 pktlog_cleanup(pl_info); 525 526 if (pl_dev) { 527 kfree(pl_info); 528 pl_dev->pl_info = NULL; 529 } 530 } 531 532 static int __pktlog_open(struct inode *i, struct file *f) 533 { 534 struct hif_opaque_softc *scn; 535 struct pktlog_dev_t *pl_dev; 536 struct ath_pktlog_info *pl_info; 537 int ret = 0; 538 539 PKTLOG_MOD_INC_USE_COUNT; 540 pl_info = PDE_DATA(f->f_path.dentry->d_inode); 541 if (!pl_info) { 542 pr_err("%s: pl_info NULL", __func__); 543 return -EINVAL; 544 } 545 546 if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) { 547 pr_info("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS", 548 __func__, pl_info->curr_pkt_state); 549 return -EBUSY; 550 } 551 552 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START; 553 scn = cds_get_context(QDF_MODULE_ID_HIF); 554 if (!scn) { 555 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 556 qdf_print("%s: Invalid scn context", __func__); 557 ASSERT(0); 558 return -EINVAL; 559 } 560 561 pl_dev = get_pktlog_handle(); 562 563 if (!pl_dev) { 564 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 565 qdf_print("%s: Invalid pktlog handle", __func__); 566 ASSERT(0); 567 return -ENODEV; 568 } 569 570 pl_info->init_saved_state = pl_info->log_state; 571 if (!pl_info->log_state) { 572 /* Pktlog is already disabled. 573 * Proceed to read directly. 574 */ 575 pl_info->curr_pkt_state = 576 PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; 577 return ret; 578 } 579 /* Disbable the pktlog internally. */ 580 ret = pl_dev->pl_funcs->pktlog_disable(scn); 581 pl_info->log_state = 0; 582 pl_info->curr_pkt_state = 583 PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; 584 return ret; 585 } 586 587 static int pktlog_open(struct inode *i, struct file *f) 588 { 589 struct qdf_op_sync *op_sync; 590 int errno; 591 592 errno = qdf_op_protect(&op_sync); 593 if (errno) 594 return errno; 595 596 errno = __pktlog_open(i, f); 597 598 qdf_op_unprotect(op_sync); 599 600 return errno; 601 } 602 603 static int __pktlog_release(struct inode *i, struct file *f) 604 { 605 struct hif_opaque_softc *scn; 606 struct pktlog_dev_t *pl_dev; 607 struct ath_pktlog_info *pl_info; 608 int ret = 0; 609 610 PKTLOG_MOD_DEC_USE_COUNT; 611 612 pl_info = PDE_DATA(f->f_path.dentry->d_inode); 613 if (!pl_info) 614 return -EINVAL; 615 616 scn = cds_get_context(QDF_MODULE_ID_HIF); 617 if (!scn) { 618 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 619 qdf_print("%s: Invalid scn context", __func__); 620 ASSERT(0); 621 return -EINVAL; 622 } 623 624 pl_dev = get_pktlog_handle(); 625 626 if (!pl_dev) { 627 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 628 qdf_print("%s: Invalid pktlog handle", __func__); 629 ASSERT(0); 630 return -ENODEV; 631 } 632 633 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE; 634 /*clear pktlog buffer.*/ 635 pktlog_clearbuff(scn, true); 636 pl_info->log_state = pl_info->init_saved_state; 637 pl_info->init_saved_state = 0; 638 639 /*Enable pktlog again*/ 640 ret = pl_dev->pl_funcs->pktlog_enable( 641 (struct hif_opaque_softc *)scn, pl_info->log_state, 642 cds_is_packet_log_enabled(), 0, 1); 643 644 if (ret != 0) 645 pr_warn("%s: pktlog cannot be enabled. ret value %d\n", 646 __func__, ret); 647 648 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 649 return ret; 650 } 651 652 static int pktlog_release(struct inode *i, struct file *f) 653 { 654 struct qdf_op_sync *op_sync; 655 int errno; 656 657 errno = qdf_op_protect(&op_sync); 658 if (errno) 659 return errno; 660 661 errno = __pktlog_release(i, f); 662 663 qdf_op_unprotect(op_sync); 664 665 return errno; 666 } 667 668 #ifndef MIN 669 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 670 #endif 671 672 /** 673 * pktlog_read_proc_entry() - This function is used to read data from the 674 * proc entry into the readers buffer 675 * @buf: Readers buffer 676 * @nbytes: Number of bytes to read 677 * @ppos: Offset within the drivers buffer 678 * @pl_info: Packet log information pointer 679 * @read_complete: Boolean value indication whether read is complete 680 * 681 * This function is used to read data from the proc entry into the readers 682 * buffer. Its functionality is similar to 'pktlog_read' which does 683 * copy to user to the user space buffer 684 * 685 * Return: Number of bytes read from the buffer 686 * 687 */ 688 ssize_t 689 pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos, 690 struct ath_pktlog_info *pl_info, bool *read_complete) 691 { 692 size_t bufhdr_size; 693 size_t count = 0, ret_val = 0; 694 int rem_len; 695 int start_offset, end_offset; 696 int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset; 697 struct ath_pktlog_buf *log_buf; 698 699 qdf_spin_lock_bh(&pl_info->log_lock); 700 log_buf = pl_info->buf; 701 702 *read_complete = false; 703 704 if (!log_buf) { 705 *read_complete = true; 706 qdf_spin_unlock_bh(&pl_info->log_lock); 707 return 0; 708 } 709 710 if (*ppos == 0 && pl_info->log_state) { 711 pl_info->saved_state = pl_info->log_state; 712 pl_info->log_state = 0; 713 } 714 715 bufhdr_size = sizeof(log_buf->bufhdr); 716 717 /* copy valid log entries from circular buffer into user space */ 718 rem_len = nbytes; 719 count = 0; 720 721 if (*ppos < bufhdr_size) { 722 count = MIN((bufhdr_size - *ppos), rem_len); 723 qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos, 724 count); 725 rem_len -= count; 726 ret_val += count; 727 } 728 729 start_offset = log_buf->rd_offset; 730 cur_wr_offset = log_buf->wr_offset; 731 732 if ((rem_len == 0) || (start_offset < 0)) 733 goto rd_done; 734 735 fold_offset = -1; 736 cur_rd_offset = start_offset; 737 738 /* Find the last offset and fold-offset if the buffer is folded */ 739 do { 740 struct ath_pktlog_hdr *log_hdr; 741 int log_data_offset; 742 743 log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data + 744 cur_rd_offset); 745 746 log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); 747 748 if ((fold_offset == -1) 749 && ((pl_info->buf_size - log_data_offset) 750 <= log_hdr->size)) 751 fold_offset = log_data_offset - 1; 752 753 PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); 754 755 if ((fold_offset == -1) && (cur_rd_offset == 0) 756 && (cur_rd_offset != cur_wr_offset)) 757 fold_offset = log_data_offset + log_hdr->size - 1; 758 759 end_offset = log_data_offset + log_hdr->size - 1; 760 } while (cur_rd_offset != cur_wr_offset); 761 762 ppos_data = *ppos + ret_val - bufhdr_size + start_offset; 763 764 if (fold_offset == -1) { 765 if (ppos_data > end_offset) 766 goto rd_done; 767 768 count = MIN(rem_len, (end_offset - ppos_data + 1)); 769 qdf_mem_copy(buf + ret_val, 770 log_buf->log_data + ppos_data, 771 count); 772 ret_val += count; 773 rem_len -= count; 774 } else { 775 if (ppos_data <= fold_offset) { 776 count = MIN(rem_len, (fold_offset - ppos_data + 1)); 777 qdf_mem_copy(buf + ret_val, 778 log_buf->log_data + ppos_data, 779 count); 780 ret_val += count; 781 rem_len -= count; 782 } 783 784 if (rem_len == 0) 785 goto rd_done; 786 787 ppos_data = 788 *ppos + ret_val - (bufhdr_size + 789 (fold_offset - start_offset + 1)); 790 791 if (ppos_data <= end_offset) { 792 count = MIN(rem_len, (end_offset - ppos_data + 1)); 793 qdf_mem_copy(buf + ret_val, 794 log_buf->log_data + ppos_data, 795 count); 796 ret_val += count; 797 rem_len -= count; 798 } 799 } 800 801 rd_done: 802 if ((ret_val < nbytes) && pl_info->saved_state) { 803 pl_info->log_state = pl_info->saved_state; 804 pl_info->saved_state = 0; 805 } 806 *ppos += ret_val; 807 808 if (ret_val == 0) { 809 /* Write pointer might have been updated during the read. 810 * So, if some data is written into, lets not reset the pointers 811 * We can continue to read from the offset position 812 */ 813 if (cur_wr_offset != log_buf->wr_offset) { 814 *read_complete = false; 815 } else { 816 pl_info->buf->rd_offset = -1; 817 pl_info->buf->wr_offset = 0; 818 pl_info->buf->bytes_written = 0; 819 pl_info->buf->offset = PKTLOG_READ_OFFSET; 820 *read_complete = true; 821 } 822 } 823 qdf_spin_unlock_bh(&pl_info->log_lock); 824 return ret_val; 825 } 826 827 static ssize_t 828 __pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) 829 { 830 size_t bufhdr_size; 831 size_t count = 0, ret_val = 0; 832 int rem_len; 833 int start_offset, end_offset; 834 int fold_offset, ppos_data, cur_rd_offset; 835 struct ath_pktlog_info *pl_info; 836 struct ath_pktlog_buf *log_buf; 837 838 pl_info = PDE_DATA(file->f_path.dentry->d_inode); 839 if (!pl_info) 840 return 0; 841 842 qdf_spin_lock_bh(&pl_info->log_lock); 843 log_buf = pl_info->buf; 844 845 if (!log_buf) { 846 qdf_spin_unlock_bh(&pl_info->log_lock); 847 return 0; 848 } 849 850 if (pl_info->log_state) { 851 /* Read is not allowed when write is going on 852 * When issuing cat command, ensure to send 853 * pktlog disable command first. 854 */ 855 qdf_spin_unlock_bh(&pl_info->log_lock); 856 return -EINVAL; 857 } 858 859 if (*ppos == 0 && pl_info->log_state) { 860 pl_info->saved_state = pl_info->log_state; 861 pl_info->log_state = 0; 862 } 863 864 bufhdr_size = sizeof(log_buf->bufhdr); 865 866 /* copy valid log entries from circular buffer into user space */ 867 868 rem_len = nbytes; 869 count = 0; 870 871 if (*ppos < bufhdr_size) { 872 count = QDF_MIN((bufhdr_size - *ppos), rem_len); 873 qdf_spin_unlock_bh(&pl_info->log_lock); 874 if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos, 875 count)) { 876 return -EFAULT; 877 } 878 rem_len -= count; 879 ret_val += count; 880 qdf_spin_lock_bh(&pl_info->log_lock); 881 } 882 883 start_offset = log_buf->rd_offset; 884 885 if ((rem_len == 0) || (start_offset < 0)) 886 goto rd_done; 887 888 fold_offset = -1; 889 cur_rd_offset = start_offset; 890 891 /* Find the last offset and fold-offset if the buffer is folded */ 892 do { 893 struct ath_pktlog_hdr *log_hdr; 894 int log_data_offset; 895 896 log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data + 897 cur_rd_offset); 898 899 log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); 900 901 if ((fold_offset == -1) 902 && ((pl_info->buf_size - log_data_offset) 903 <= log_hdr->size)) 904 fold_offset = log_data_offset - 1; 905 906 PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); 907 908 if ((fold_offset == -1) && (cur_rd_offset == 0) 909 && (cur_rd_offset != log_buf->wr_offset)) 910 fold_offset = log_data_offset + log_hdr->size - 1; 911 912 end_offset = log_data_offset + log_hdr->size - 1; 913 } while (cur_rd_offset != log_buf->wr_offset); 914 915 ppos_data = *ppos + ret_val - bufhdr_size + start_offset; 916 917 if (fold_offset == -1) { 918 if (ppos_data > end_offset) 919 goto rd_done; 920 921 count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); 922 qdf_spin_unlock_bh(&pl_info->log_lock); 923 924 if (copy_to_user(buf + ret_val, 925 log_buf->log_data + ppos_data, count)) { 926 return -EFAULT; 927 } 928 929 ret_val += count; 930 rem_len -= count; 931 qdf_spin_lock_bh(&pl_info->log_lock); 932 } else { 933 if (ppos_data <= fold_offset) { 934 count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1)); 935 qdf_spin_unlock_bh(&pl_info->log_lock); 936 if (copy_to_user(buf + ret_val, 937 log_buf->log_data + ppos_data, 938 count)) { 939 return -EFAULT; 940 } 941 ret_val += count; 942 rem_len -= count; 943 qdf_spin_lock_bh(&pl_info->log_lock); 944 } 945 946 if (rem_len == 0) 947 goto rd_done; 948 949 ppos_data = 950 *ppos + ret_val - (bufhdr_size + 951 (fold_offset - start_offset + 1)); 952 953 if (ppos_data <= end_offset) { 954 count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); 955 qdf_spin_unlock_bh(&pl_info->log_lock); 956 if (copy_to_user(buf + ret_val, 957 log_buf->log_data + ppos_data, 958 count)) { 959 return -EFAULT; 960 } 961 ret_val += count; 962 rem_len -= count; 963 qdf_spin_lock_bh(&pl_info->log_lock); 964 } 965 } 966 967 rd_done: 968 if ((ret_val < nbytes) && pl_info->saved_state) { 969 pl_info->log_state = pl_info->saved_state; 970 pl_info->saved_state = 0; 971 } 972 *ppos += ret_val; 973 974 qdf_spin_unlock_bh(&pl_info->log_lock); 975 return ret_val; 976 } 977 978 static ssize_t 979 pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) 980 { 981 struct ath_pktlog_info *info = PDE_DATA(file->f_path.dentry->d_inode); 982 struct qdf_op_sync *op_sync; 983 ssize_t err_size; 984 985 if (!info) 986 return 0; 987 988 err_size = qdf_op_protect(&op_sync); 989 if (err_size) 990 return err_size; 991 992 mutex_lock(&info->pktlog_mutex); 993 err_size = __pktlog_read(file, buf, nbytes, ppos); 994 mutex_unlock(&info->pktlog_mutex); 995 996 qdf_op_unprotect(op_sync); 997 998 return err_size; 999 } 1000 1001 int pktlogmod_init(void *context) 1002 { 1003 int ret; 1004 1005 /* create the proc directory entry */ 1006 g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL); 1007 1008 if (!g_pktlog_pde) { 1009 printk(PKTLOG_TAG "%s: proc_mkdir failed\n", __func__); 1010 return -EPERM; 1011 } 1012 1013 /* Attach packet log */ 1014 ret = pktlog_attach((struct hif_opaque_softc *)context); 1015 1016 /* If packet log init failed */ 1017 if (ret) 1018 goto attach_fail; 1019 1020 return ret; 1021 1022 attach_fail: 1023 remove_proc_entry(PKTLOG_PROC_DIR, NULL); 1024 g_pktlog_pde = NULL; 1025 1026 return ret; 1027 } 1028 1029 void pktlogmod_exit(void *context) 1030 { 1031 if (!g_pktlog_pde) 1032 return; 1033 1034 pktlog_detach((struct hif_opaque_softc *)context); 1035 1036 /* 1037 * pdev kill needs to be implemented 1038 */ 1039 remove_proc_entry(PKTLOG_PROC_DIR, NULL); 1040 } 1041 #endif 1042