1 /* 2 * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef REMOVE_PKT_LOG 20 #ifndef EXPORT_SYMTAB 21 #define EXPORT_SYMTAB 22 #endif 23 #ifndef __KERNEL__ 24 #define __KERNEL__ 25 #endif 26 /* 27 * Linux specific implementation of Pktlogs for 802.11ac 28 */ 29 #include <linux/kernel.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/vmalloc.h> 33 #include <linux/proc_fs.h> 34 #include <pktlog_ac_i.h> 35 #include <pktlog_ac_fmt.h> 36 #include "i_host_diag_core_log.h" 37 #include "host_diag_core_log.h" 38 #include "ani_global.h" 39 40 #define PKTLOG_DEVNAME_SIZE 32 41 #define MAX_WLANDEV 1 42 43 #ifdef MULTI_IF_NAME 44 #define PKTLOG_PROC_DIR "ath_pktlog" MULTI_IF_NAME 45 #else 46 #define PKTLOG_PROC_DIR "ath_pktlog" 47 #endif 48 49 /* Permissions for creating proc entries */ 50 #define PKTLOG_PROC_PERM 0444 51 #define PKTLOG_PROCSYS_DIR_PERM 0555 52 #define PKTLOG_PROCSYS_PERM 0644 53 54 #ifndef __MOD_INC_USE_COUNT 55 #define PKTLOG_MOD_INC_USE_COUNT do { \ 56 if (!try_module_get(THIS_MODULE)) { \ 57 qdf_nofl_info("try_module_get failed"); \ 58 } } while (0) 59 60 #define PKTLOG_MOD_DEC_USE_COUNT module_put(THIS_MODULE) 61 #else 62 #define PKTLOG_MOD_INC_USE_COUNT MOD_INC_USE_COUNT 63 #define PKTLOG_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT 64 #endif 65 66 static struct ath_pktlog_info *g_pktlog_info; 67 68 static struct proc_dir_entry *g_pktlog_pde; 69 70 static DEFINE_MUTEX(proc_mutex); 71 72 static int pktlog_attach(struct hif_opaque_softc *scn); 73 static void pktlog_detach(struct hif_opaque_softc *scn); 74 static int pktlog_open(struct inode *i, struct file *f); 75 static int pktlog_release(struct inode *i, struct file *f); 76 static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes, 77 loff_t *ppos); 78 79 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) 80 static const struct proc_ops pktlog_fops = { 81 .proc_open = pktlog_open, 82 .proc_release = pktlog_release, 83 .proc_read = pktlog_read, 84 }; 85 #else 86 static struct file_operations pktlog_fops = { 87 open: pktlog_open, 88 release:pktlog_release, 89 read : pktlog_read, 90 }; 91 #endif 92 93 void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn) 94 { 95 struct pktlog_dev_t *pl_dev = get_pktlog_handle(); 96 if (pl_dev) 97 pl_dev->pl_info->log_state = 0; 98 } 99 100 int pktlog_alloc_buf(struct hif_opaque_softc *scn) 101 { 102 uint32_t page_cnt; 103 unsigned long vaddr; 104 struct page *vpg; 105 struct pktlog_dev_t *pl_dev; 106 struct ath_pktlog_info *pl_info; 107 struct ath_pktlog_buf *buffer; 108 109 pl_dev = get_pktlog_handle(); 110 111 if (!pl_dev) { 112 qdf_info(PKTLOG_TAG "pdev_txrx_handle->pl_dev is null"); 113 return -EINVAL; 114 } 115 116 pl_info = pl_dev->pl_info; 117 118 page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE; 119 120 qdf_spin_lock_bh(&pl_info->log_lock); 121 if (pl_info->buf) { 122 qdf_spin_unlock_bh(&pl_info->log_lock); 123 qdf_nofl_info(PKTLOG_TAG "Buffer is already in use"); 124 return -EINVAL; 125 } 126 qdf_spin_unlock_bh(&pl_info->log_lock); 127 128 buffer = vmalloc((page_cnt + 2) * PAGE_SIZE); 129 if (!buffer) { 130 return -ENOMEM; 131 } 132 133 buffer = (struct ath_pktlog_buf *) 134 (((unsigned long)(buffer) + PAGE_SIZE - 1) 135 & PAGE_MASK); 136 137 for (vaddr = (unsigned long)(buffer); 138 vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE)); 139 vaddr += PAGE_SIZE) { 140 vpg = vmalloc_to_page((const void *)vaddr); 141 SetPageReserved(vpg); 142 } 143 144 qdf_spin_lock_bh(&pl_info->log_lock); 145 if (pl_info->buf) 146 pktlog_release_buf(scn); 147 148 pl_info->buf = buffer; 149 qdf_spin_unlock_bh(&pl_info->log_lock); 150 return 0; 151 } 152 153 void pktlog_release_buf(struct hif_opaque_softc *scn) 154 { 155 unsigned long page_cnt; 156 unsigned long vaddr; 157 struct page *vpg; 158 struct pktlog_dev_t *pl_dev; 159 struct ath_pktlog_info *pl_info; 160 161 pl_dev = get_pktlog_handle(); 162 163 if (!pl_dev) { 164 qdf_print("Invalid pl_dev handle"); 165 return; 166 } 167 168 if (!pl_dev->pl_info) { 169 qdf_print("Invalid pl_dev handle"); 170 return; 171 } 172 173 pl_info = pl_dev->pl_info; 174 175 page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) / 176 PAGE_SIZE) + 1; 177 178 for (vaddr = (unsigned long)(pl_info->buf); 179 vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE); 180 vaddr += PAGE_SIZE) { 181 vpg = vmalloc_to_page((const void *)vaddr); 182 ClearPageReserved(vpg); 183 } 184 185 vfree(pl_info->buf); 186 pl_info->buf = NULL; 187 } 188 189 static void pktlog_cleanup(struct ath_pktlog_info *pl_info) 190 { 191 pl_info->log_state = 0; 192 PKTLOG_LOCK_DESTROY(pl_info); 193 mutex_destroy(&pl_info->pktlog_mutex); 194 } 195 196 /* sysctl procfs handler to enable pktlog */ 197 static int 198 qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos) 199 { 200 int ret, enable; 201 ol_ath_generic_softc_handle scn; 202 struct pktlog_dev_t *pl_dev; 203 204 mutex_lock(&proc_mutex); 205 scn = (ol_ath_generic_softc_handle) ctl->extra1; 206 207 if (!scn) { 208 mutex_unlock(&proc_mutex); 209 qdf_info("Invalid scn context"); 210 ASSERT(0); 211 return -EINVAL; 212 } 213 214 pl_dev = get_pktlog_handle(); 215 216 if (!pl_dev) { 217 mutex_unlock(&proc_mutex); 218 qdf_info("Invalid pktlog context"); 219 ASSERT(0); 220 return -ENODEV; 221 } 222 223 ctl->data = &enable; 224 ctl->maxlen = sizeof(enable); 225 226 if (write) { 227 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 228 lenp, ppos); 229 if (ret == 0) { 230 ret = pl_dev->pl_funcs->pktlog_enable( 231 (struct hif_opaque_softc *)scn, enable, 232 cds_is_packet_log_enabled(), 0, 1); 233 } 234 else 235 QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, 236 "Line:%d %s:proc_dointvec failed reason %d", 237 __LINE__, __func__, ret); 238 } else { 239 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 240 lenp, ppos); 241 if (ret) 242 QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, 243 "Line:%d %s:proc_dointvec failed reason %d", 244 __LINE__, __func__, ret); 245 } 246 247 ctl->data = NULL; 248 ctl->maxlen = 0; 249 mutex_unlock(&proc_mutex); 250 251 return ret; 252 } 253 254 static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev) 255 { 256 return pl_dev->pl_info->buf_size; 257 } 258 259 /* sysctl procfs handler to set/get pktlog size */ 260 static int 261 qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos) 262 { 263 int ret, size; 264 ol_ath_generic_softc_handle scn; 265 struct pktlog_dev_t *pl_dev; 266 267 mutex_lock(&proc_mutex); 268 scn = (ol_ath_generic_softc_handle) ctl->extra1; 269 270 if (!scn) { 271 mutex_unlock(&proc_mutex); 272 qdf_info("Invalid scn context"); 273 ASSERT(0); 274 return -EINVAL; 275 } 276 277 pl_dev = get_pktlog_handle(); 278 279 if (!pl_dev) { 280 mutex_unlock(&proc_mutex); 281 qdf_info("Invalid pktlog handle"); 282 ASSERT(0); 283 return -ENODEV; 284 } 285 286 ctl->data = &size; 287 ctl->maxlen = sizeof(size); 288 289 if (write) { 290 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 291 lenp, ppos); 292 if (ret == 0) 293 ret = pl_dev->pl_funcs->pktlog_setsize( 294 (struct hif_opaque_softc *)scn, size); 295 } else { 296 size = get_pktlog_bufsize(pl_dev); 297 ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, 298 lenp, ppos); 299 } 300 301 ctl->data = NULL; 302 ctl->maxlen = 0; 303 mutex_unlock(&proc_mutex); 304 305 return ret; 306 } 307 308 /* Register sysctl table */ 309 static int pktlog_sysctl_register(struct hif_opaque_softc *scn) 310 { 311 struct pktlog_dev_t *pl_dev = get_pktlog_handle(); 312 struct ath_pktlog_info_lnx *pl_info_lnx; 313 char *proc_name; 314 315 if (pl_dev) { 316 pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info); 317 proc_name = pl_dev->name; 318 } else { 319 pl_info_lnx = PL_INFO_LNX(g_pktlog_info); 320 proc_name = PKTLOG_PROC_SYSTEM; 321 } 322 323 /* 324 * Setup the sysctl table for creating the following sysctl entries: 325 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/enable for enabling/disabling 326 * pktlog 327 * /proc/sys/PKTLOG_PROC_DIR/<adapter>/size for changing the buffer size 328 */ 329 memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls)); 330 pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR; 331 pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM; 332 pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2]; 333 334 /* [1] is NULL terminator */ 335 pl_info_lnx->sysctls[2].procname = proc_name; 336 pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM; 337 pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4]; 338 339 /* [3] is NULL terminator */ 340 pl_info_lnx->sysctls[4].procname = "enable"; 341 pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM; 342 pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable; 343 pl_info_lnx->sysctls[4].extra1 = scn; 344 345 pl_info_lnx->sysctls[5].procname = "size"; 346 pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM; 347 pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size; 348 pl_info_lnx->sysctls[5].extra1 = scn; 349 350 pl_info_lnx->sysctls[6].procname = "options"; 351 pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM; 352 pl_info_lnx->sysctls[6].proc_handler = proc_dointvec; 353 pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options; 354 pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options); 355 356 pl_info_lnx->sysctls[7].procname = "sack_thr"; 357 pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM; 358 pl_info_lnx->sysctls[7].proc_handler = proc_dointvec; 359 pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr; 360 pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr); 361 362 pl_info_lnx->sysctls[8].procname = "tail_length"; 363 pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM; 364 pl_info_lnx->sysctls[8].proc_handler = proc_dointvec; 365 pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length; 366 pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length); 367 368 pl_info_lnx->sysctls[9].procname = "thruput_thresh"; 369 pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM; 370 pl_info_lnx->sysctls[9].proc_handler = proc_dointvec; 371 pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh; 372 pl_info_lnx->sysctls[9].maxlen = 373 sizeof(pl_info_lnx->info.thruput_thresh); 374 375 pl_info_lnx->sysctls[10].procname = "phyerr_thresh"; 376 pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM; 377 pl_info_lnx->sysctls[10].proc_handler = proc_dointvec; 378 pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh; 379 pl_info_lnx->sysctls[10].maxlen = 380 sizeof(pl_info_lnx->info.phyerr_thresh); 381 382 pl_info_lnx->sysctls[11].procname = "per_thresh"; 383 pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM; 384 pl_info_lnx->sysctls[11].proc_handler = proc_dointvec; 385 pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh; 386 pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh); 387 388 pl_info_lnx->sysctls[12].procname = "trigger_interval"; 389 pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM; 390 pl_info_lnx->sysctls[12].proc_handler = proc_dointvec; 391 pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval; 392 pl_info_lnx->sysctls[12].maxlen = 393 sizeof(pl_info_lnx->info.trigger_interval); 394 /* [13] is NULL terminator */ 395 396 /* and register everything */ 397 /* register_sysctl_table changed from 2.6.21 onwards */ 398 pl_info_lnx->sysctl_header = 399 register_sysctl_table(pl_info_lnx->sysctls); 400 401 if (!pl_info_lnx->sysctl_header) { 402 qdf_nofl_info("%s: failed to register sysctls!", proc_name); 403 return -EINVAL; 404 } 405 406 return 0; 407 } 408 409 /* 410 * Initialize logging for system or adapter 411 * Parameter scn should be NULL for system wide logging 412 */ 413 static int pktlog_attach(struct hif_opaque_softc *scn) 414 { 415 struct pktlog_dev_t *pl_dev; 416 struct ath_pktlog_info_lnx *pl_info_lnx; 417 char *proc_name; 418 struct proc_dir_entry *proc_entry; 419 420 qdf_info("attach pktlog resources"); 421 /* Allocate pktlog dev for later use */ 422 pl_dev = get_pktlog_handle(); 423 424 if (pl_dev) { 425 pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL); 426 if (!pl_info_lnx) { 427 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 428 "%s: Allocation failed for pl_info", 429 __func__); 430 goto attach_fail1; 431 } 432 433 pl_dev->pl_info = &pl_info_lnx->info; 434 pl_dev->name = WLANDEV_BASENAME; 435 proc_name = pl_dev->name; 436 437 if (!pl_dev->pl_funcs) 438 pl_dev->pl_funcs = &ol_pl_funcs; 439 440 /* 441 * Valid for both direct attach and offload architecture 442 */ 443 pl_dev->pl_funcs->pktlog_init(scn); 444 } else { 445 qdf_err("pl_dev is NULL"); 446 return -EINVAL; 447 } 448 449 /* 450 * initialize log info 451 * might be good to move to pktlog_init 452 */ 453 /* pl_dev->tgt_pktlog_alloced = false; */ 454 pl_info_lnx->proc_entry = NULL; 455 pl_info_lnx->sysctl_header = NULL; 456 457 proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM, 458 g_pktlog_pde, &pktlog_fops, 459 &pl_info_lnx->info); 460 461 if (!proc_entry) { 462 qdf_info(PKTLOG_TAG "create_proc_entry failed for %s", proc_name); 463 goto attach_fail1; 464 } 465 466 pl_info_lnx->proc_entry = proc_entry; 467 468 if (pktlog_sysctl_register(scn)) { 469 qdf_nofl_info(PKTLOG_TAG "sysctl register failed for %s", 470 proc_name); 471 goto attach_fail2; 472 } 473 474 return 0; 475 476 attach_fail2: 477 remove_proc_entry(proc_name, g_pktlog_pde); 478 479 attach_fail1: 480 if (pl_dev) 481 kfree(pl_dev->pl_info); 482 483 return -EINVAL; 484 } 485 486 static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev) 487 { 488 struct ath_pktlog_info_lnx *pl_info_lnx; 489 490 if (!pl_dev) { 491 qdf_info("Invalid pktlog context"); 492 ASSERT(0); 493 return; 494 } 495 496 pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : 497 PL_INFO_LNX(g_pktlog_info); 498 499 if (pl_info_lnx->sysctl_header) { 500 unregister_sysctl_table(pl_info_lnx->sysctl_header); 501 pl_info_lnx->sysctl_header = NULL; 502 } 503 } 504 505 static void pktlog_detach(struct hif_opaque_softc *scn) 506 { 507 struct ath_pktlog_info *pl_info; 508 struct pktlog_dev_t *pl_dev = get_pktlog_handle(); 509 510 qdf_info("detach pktlog resources"); 511 if (!pl_dev) { 512 qdf_info("Invalid pktlog context"); 513 ASSERT(0); 514 return; 515 } 516 517 pl_info = pl_dev->pl_info; 518 if (!pl_info) { 519 qdf_print("Invalid pktlog handle"); 520 ASSERT(0); 521 return; 522 } 523 mutex_lock(&pl_info->pktlog_mutex); 524 remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde); 525 pktlog_sysctl_unregister(pl_dev); 526 527 qdf_spin_lock_bh(&pl_info->log_lock); 528 529 if (pl_info->buf) { 530 pktlog_release_buf(scn); 531 pl_dev->tgt_pktlog_alloced = false; 532 } 533 qdf_spin_unlock_bh(&pl_info->log_lock); 534 mutex_unlock(&pl_info->pktlog_mutex); 535 pktlog_cleanup(pl_info); 536 537 if (pl_dev) { 538 kfree(pl_info); 539 pl_dev->pl_info = NULL; 540 } 541 } 542 543 static int __pktlog_open(struct inode *i, struct file *f) 544 { 545 struct hif_opaque_softc *scn; 546 struct pktlog_dev_t *pl_dev; 547 struct ath_pktlog_info *pl_info; 548 struct ath_pktlog_info_lnx *pl_info_lnx; 549 int ret = 0; 550 551 PKTLOG_MOD_INC_USE_COUNT; 552 scn = cds_get_context(QDF_MODULE_ID_HIF); 553 if (!scn) { 554 qdf_print("Invalid scn context"); 555 ASSERT(0); 556 return -EINVAL; 557 } 558 559 pl_dev = get_pktlog_handle(); 560 561 if (!pl_dev) { 562 qdf_print("Invalid pktlog handle"); 563 ASSERT(0); 564 return -ENODEV; 565 } 566 567 pl_info = pl_dev->pl_info; 568 569 if (!pl_info) { 570 qdf_err("pl_info NULL"); 571 return -EINVAL; 572 } 573 574 mutex_lock(&pl_info->pktlog_mutex); 575 pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : 576 PL_INFO_LNX(g_pktlog_info); 577 578 if (!pl_info_lnx->sysctl_header) { 579 mutex_unlock(&pl_info->pktlog_mutex); 580 qdf_print("pktlog sysctl is unergistered"); 581 ASSERT(0); 582 return -EINVAL; 583 } 584 585 if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) { 586 mutex_unlock(&pl_info->pktlog_mutex); 587 qdf_print("plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS", 588 pl_info->curr_pkt_state); 589 return -EBUSY; 590 } 591 592 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START; 593 594 pl_info->init_saved_state = pl_info->log_state; 595 if (!pl_info->log_state) { 596 /* Pktlog is already disabled. 597 * Proceed to read directly. 598 */ 599 pl_info->curr_pkt_state = 600 PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; 601 mutex_unlock(&pl_info->pktlog_mutex); 602 return ret; 603 } 604 /* Disbable the pktlog internally. */ 605 ret = pl_dev->pl_funcs->pktlog_disable(scn); 606 pl_info->log_state = 0; 607 pl_info->curr_pkt_state = 608 PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; 609 mutex_unlock(&pl_info->pktlog_mutex); 610 return ret; 611 } 612 613 static int pktlog_open(struct inode *i, struct file *f) 614 { 615 struct qdf_op_sync *op_sync; 616 int errno; 617 618 errno = qdf_op_protect(&op_sync); 619 if (errno) 620 return errno; 621 622 errno = __pktlog_open(i, f); 623 624 qdf_op_unprotect(op_sync); 625 626 return errno; 627 } 628 629 static int __pktlog_release(struct inode *i, struct file *f) 630 { 631 struct hif_opaque_softc *scn; 632 struct pktlog_dev_t *pl_dev; 633 struct ath_pktlog_info *pl_info; 634 struct ath_pktlog_info_lnx *pl_info_lnx; 635 int ret = 0; 636 637 PKTLOG_MOD_DEC_USE_COUNT; 638 scn = cds_get_context(QDF_MODULE_ID_HIF); 639 if (!scn) { 640 qdf_print("Invalid scn context"); 641 ASSERT(0); 642 return -EINVAL; 643 } 644 645 pl_dev = get_pktlog_handle(); 646 647 if (!pl_dev) { 648 qdf_print("Invalid pktlog handle"); 649 ASSERT(0); 650 return -ENODEV; 651 } 652 653 pl_info = pl_dev->pl_info; 654 655 if (!pl_info) { 656 qdf_print("Invalid pktlog info"); 657 ASSERT(0); 658 return -EINVAL; 659 } 660 661 mutex_lock(&pl_info->pktlog_mutex); 662 pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : 663 PL_INFO_LNX(g_pktlog_info); 664 665 if (!pl_info_lnx->sysctl_header) { 666 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 667 mutex_unlock(&pl_info->pktlog_mutex); 668 qdf_print("pktlog sysctl is unergistered"); 669 ASSERT(0); 670 return -EINVAL; 671 } 672 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE; 673 /*clear pktlog buffer.*/ 674 pktlog_clearbuff(scn, true); 675 pl_info->log_state = pl_info->init_saved_state; 676 pl_info->init_saved_state = 0; 677 678 /*Enable pktlog again*/ 679 ret = __pktlog_enable( 680 (struct hif_opaque_softc *)scn, pl_info->log_state, 681 cds_is_packet_log_enabled(), 0, 1); 682 683 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; 684 mutex_unlock(&pl_info->pktlog_mutex); 685 if (ret != 0) 686 qdf_print("pktlog cannot be enabled. ret value %d", ret); 687 688 return ret; 689 } 690 691 static int pktlog_release(struct inode *i, struct file *f) 692 { 693 struct qdf_op_sync *op_sync; 694 int errno; 695 696 errno = qdf_op_protect(&op_sync); 697 if (errno) 698 return errno; 699 700 errno = __pktlog_release(i, f); 701 702 qdf_op_unprotect(op_sync); 703 704 return errno; 705 } 706 707 #ifndef MIN 708 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 709 #endif 710 711 /** 712 * pktlog_read_proc_entry() - This function is used to read data from the 713 * proc entry into the readers buffer 714 * @buf: Readers buffer 715 * @nbytes: Number of bytes to read 716 * @ppos: Offset within the drivers buffer 717 * @pl_info: Packet log information pointer 718 * @read_complete: Boolean value indication whether read is complete 719 * 720 * This function is used to read data from the proc entry into the readers 721 * buffer. Its functionality is similar to 'pktlog_read' which does 722 * copy to user to the user space buffer 723 * 724 * Return: Number of bytes read from the buffer 725 * 726 */ 727 ssize_t 728 pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos, 729 struct ath_pktlog_info *pl_info, bool *read_complete) 730 { 731 size_t bufhdr_size; 732 size_t count = 0, ret_val = 0; 733 int rem_len; 734 int start_offset, end_offset; 735 int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset; 736 struct ath_pktlog_buf *log_buf; 737 738 qdf_spin_lock_bh(&pl_info->log_lock); 739 log_buf = pl_info->buf; 740 741 *read_complete = false; 742 743 if (!log_buf) { 744 *read_complete = true; 745 qdf_spin_unlock_bh(&pl_info->log_lock); 746 return 0; 747 } 748 749 if (*ppos == 0 && pl_info->log_state) { 750 pl_info->saved_state = pl_info->log_state; 751 pl_info->log_state = 0; 752 } 753 754 bufhdr_size = sizeof(log_buf->bufhdr); 755 756 /* copy valid log entries from circular buffer into user space */ 757 rem_len = nbytes; 758 count = 0; 759 760 if (*ppos < bufhdr_size) { 761 count = MIN((bufhdr_size - *ppos), rem_len); 762 qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos, 763 count); 764 rem_len -= count; 765 ret_val += count; 766 } 767 768 start_offset = log_buf->rd_offset; 769 cur_wr_offset = log_buf->wr_offset; 770 771 if ((rem_len == 0) || (start_offset < 0)) 772 goto rd_done; 773 774 fold_offset = -1; 775 cur_rd_offset = start_offset; 776 777 /* Find the last offset and fold-offset if the buffer is folded */ 778 do { 779 struct ath_pktlog_hdr *log_hdr; 780 int log_data_offset; 781 782 log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data + 783 cur_rd_offset); 784 785 log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); 786 787 if ((fold_offset == -1) 788 && ((pl_info->buf_size - log_data_offset) 789 <= log_hdr->size)) 790 fold_offset = log_data_offset - 1; 791 792 PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); 793 794 if ((fold_offset == -1) && (cur_rd_offset == 0) 795 && (cur_rd_offset != cur_wr_offset)) 796 fold_offset = log_data_offset + log_hdr->size - 1; 797 798 end_offset = log_data_offset + log_hdr->size - 1; 799 } while (cur_rd_offset != cur_wr_offset); 800 801 ppos_data = *ppos + ret_val - bufhdr_size + start_offset; 802 803 if (fold_offset == -1) { 804 if (ppos_data > end_offset) 805 goto rd_done; 806 807 count = MIN(rem_len, (end_offset - ppos_data + 1)); 808 qdf_mem_copy(buf + ret_val, 809 log_buf->log_data + ppos_data, 810 count); 811 ret_val += count; 812 rem_len -= count; 813 } else { 814 if (ppos_data <= fold_offset) { 815 count = MIN(rem_len, (fold_offset - ppos_data + 1)); 816 qdf_mem_copy(buf + ret_val, 817 log_buf->log_data + ppos_data, 818 count); 819 ret_val += count; 820 rem_len -= count; 821 } 822 823 if (rem_len == 0) 824 goto rd_done; 825 826 ppos_data = 827 *ppos + ret_val - (bufhdr_size + 828 (fold_offset - start_offset + 1)); 829 830 if (ppos_data <= end_offset) { 831 count = MIN(rem_len, (end_offset - ppos_data + 1)); 832 qdf_mem_copy(buf + ret_val, 833 log_buf->log_data + ppos_data, 834 count); 835 ret_val += count; 836 rem_len -= count; 837 } 838 } 839 840 rd_done: 841 if ((ret_val < nbytes) && pl_info->saved_state) { 842 pl_info->log_state = pl_info->saved_state; 843 pl_info->saved_state = 0; 844 } 845 *ppos += ret_val; 846 847 if (ret_val == 0) { 848 /* Write pointer might have been updated during the read. 849 * So, if some data is written into, lets not reset the pointers 850 * We can continue to read from the offset position 851 */ 852 if (cur_wr_offset != log_buf->wr_offset) { 853 *read_complete = false; 854 } else { 855 pl_info->buf->rd_offset = -1; 856 pl_info->buf->wr_offset = 0; 857 pl_info->buf->bytes_written = 0; 858 pl_info->buf->offset = PKTLOG_READ_OFFSET; 859 *read_complete = true; 860 } 861 } 862 qdf_spin_unlock_bh(&pl_info->log_lock); 863 return ret_val; 864 } 865 866 static ssize_t 867 __pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) 868 { 869 size_t bufhdr_size; 870 size_t count = 0, ret_val = 0; 871 int rem_len; 872 int start_offset, end_offset; 873 int fold_offset, ppos_data, cur_rd_offset; 874 struct ath_pktlog_info *pl_info; 875 struct ath_pktlog_buf *log_buf; 876 877 pl_info = PDE_DATA(file->f_path.dentry->d_inode); 878 if (!pl_info) 879 return 0; 880 881 qdf_spin_lock_bh(&pl_info->log_lock); 882 log_buf = pl_info->buf; 883 884 if (!log_buf) { 885 qdf_spin_unlock_bh(&pl_info->log_lock); 886 return 0; 887 } 888 889 if (pl_info->log_state) { 890 /* Read is not allowed when write is going on 891 * When issuing cat command, ensure to send 892 * pktlog disable command first. 893 */ 894 qdf_spin_unlock_bh(&pl_info->log_lock); 895 return -EINVAL; 896 } 897 898 if (*ppos == 0 && pl_info->log_state) { 899 pl_info->saved_state = pl_info->log_state; 900 pl_info->log_state = 0; 901 } 902 903 bufhdr_size = sizeof(log_buf->bufhdr); 904 905 /* copy valid log entries from circular buffer into user space */ 906 907 rem_len = nbytes; 908 count = 0; 909 910 if (*ppos < bufhdr_size) { 911 count = QDF_MIN((bufhdr_size - *ppos), rem_len); 912 qdf_spin_unlock_bh(&pl_info->log_lock); 913 if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos, 914 count)) { 915 return -EFAULT; 916 } 917 rem_len -= count; 918 ret_val += count; 919 qdf_spin_lock_bh(&pl_info->log_lock); 920 } 921 922 start_offset = log_buf->rd_offset; 923 924 if ((rem_len == 0) || (start_offset < 0)) 925 goto rd_done; 926 927 fold_offset = -1; 928 cur_rd_offset = start_offset; 929 930 /* Find the last offset and fold-offset if the buffer is folded */ 931 do { 932 struct ath_pktlog_hdr *log_hdr; 933 int log_data_offset; 934 935 log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data + 936 cur_rd_offset); 937 938 log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); 939 940 if ((fold_offset == -1) 941 && ((pl_info->buf_size - log_data_offset) 942 <= log_hdr->size)) 943 fold_offset = log_data_offset - 1; 944 945 PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); 946 947 if ((fold_offset == -1) && (cur_rd_offset == 0) 948 && (cur_rd_offset != log_buf->wr_offset)) 949 fold_offset = log_data_offset + log_hdr->size - 1; 950 951 end_offset = log_data_offset + log_hdr->size - 1; 952 } while (cur_rd_offset != log_buf->wr_offset); 953 954 ppos_data = *ppos + ret_val - bufhdr_size + start_offset; 955 956 if (fold_offset == -1) { 957 if (ppos_data > end_offset) 958 goto rd_done; 959 960 count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); 961 qdf_spin_unlock_bh(&pl_info->log_lock); 962 963 if (copy_to_user(buf + ret_val, 964 log_buf->log_data + ppos_data, count)) { 965 return -EFAULT; 966 } 967 968 ret_val += count; 969 rem_len -= count; 970 qdf_spin_lock_bh(&pl_info->log_lock); 971 } else { 972 if (ppos_data <= fold_offset) { 973 count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1)); 974 qdf_spin_unlock_bh(&pl_info->log_lock); 975 if (copy_to_user(buf + ret_val, 976 log_buf->log_data + ppos_data, 977 count)) { 978 return -EFAULT; 979 } 980 ret_val += count; 981 rem_len -= count; 982 qdf_spin_lock_bh(&pl_info->log_lock); 983 } 984 985 if (rem_len == 0) 986 goto rd_done; 987 988 ppos_data = 989 *ppos + ret_val - (bufhdr_size + 990 (fold_offset - start_offset + 1)); 991 992 if (ppos_data <= end_offset) { 993 count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); 994 qdf_spin_unlock_bh(&pl_info->log_lock); 995 if (copy_to_user(buf + ret_val, 996 log_buf->log_data + ppos_data, 997 count)) { 998 return -EFAULT; 999 } 1000 ret_val += count; 1001 rem_len -= count; 1002 qdf_spin_lock_bh(&pl_info->log_lock); 1003 } 1004 } 1005 1006 rd_done: 1007 if ((ret_val < nbytes) && pl_info->saved_state) { 1008 pl_info->log_state = pl_info->saved_state; 1009 pl_info->saved_state = 0; 1010 } 1011 *ppos += ret_val; 1012 1013 qdf_spin_unlock_bh(&pl_info->log_lock); 1014 return ret_val; 1015 } 1016 1017 static ssize_t 1018 pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) 1019 { 1020 struct ath_pktlog_info *info = PDE_DATA(file->f_path.dentry->d_inode); 1021 struct qdf_op_sync *op_sync; 1022 ssize_t err_size; 1023 1024 if (!info) 1025 return 0; 1026 1027 err_size = qdf_op_protect(&op_sync); 1028 if (err_size) 1029 return err_size; 1030 1031 mutex_lock(&info->pktlog_mutex); 1032 err_size = __pktlog_read(file, buf, nbytes, ppos); 1033 mutex_unlock(&info->pktlog_mutex); 1034 1035 qdf_op_unprotect(op_sync); 1036 1037 return err_size; 1038 } 1039 1040 int pktlogmod_init(void *context) 1041 { 1042 int ret; 1043 1044 qdf_info("Initialize pkt_log module"); 1045 /* create the proc directory entry */ 1046 g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL); 1047 1048 if (!g_pktlog_pde) { 1049 qdf_info(PKTLOG_TAG "proc_mkdir failed"); 1050 return -EPERM; 1051 } 1052 1053 /* Attach packet log */ 1054 ret = pktlog_attach((struct hif_opaque_softc *)context); 1055 1056 /* If packet log init failed */ 1057 if (ret) { 1058 qdf_err("pktlog_attach failed"); 1059 goto attach_fail; 1060 } 1061 1062 return ret; 1063 1064 attach_fail: 1065 remove_proc_entry(PKTLOG_PROC_DIR, NULL); 1066 g_pktlog_pde = NULL; 1067 1068 return ret; 1069 } 1070 1071 void pktlogmod_exit(void *context) 1072 { 1073 qdf_info("pkt_log module cleanup"); 1074 if (!g_pktlog_pde) { 1075 qdf_err("g_pktlog_pde is NULL"); 1076 return; 1077 } 1078 1079 pktlog_detach((struct hif_opaque_softc *)context); 1080 1081 /* 1082 * pdev kill needs to be implemented 1083 */ 1084 remove_proc_entry(PKTLOG_PROC_DIR, NULL); 1085 g_pktlog_pde = NULL; 1086 } 1087 #endif 1088