1 /* 2 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * @file cdp_txrx_cmn.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_H_ 25 #define _CDP_TXRX_CMN_H_ 26 27 #include "qdf_types.h" 28 #include "qdf_nbuf.h" 29 #include "cdp_txrx_ops.h" 30 #include "cdp_txrx_handle.h" 31 #include "cdp_txrx_cmn_struct.h" 32 /****************************************************************************** 33 * 34 * Common Data Path Header File 35 * 36 *****************************************************************************/ 37 #define dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP, params) 38 #define dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP, params) 39 #define dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP, params) 40 #define dp_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_DP, params) 41 #define dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params) 42 43 static inline int 44 cdp_soc_attach_target(ol_txrx_soc_handle soc) 45 { 46 if (!soc || !soc->ops) { 47 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 48 "%s: Invalid Instance:", __func__); 49 QDF_BUG(0); 50 return 0; 51 } 52 53 if (!soc->ops->cmn_drv_ops || 54 !soc->ops->cmn_drv_ops->txrx_soc_attach_target) 55 return 0; 56 57 return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc); 58 59 } 60 61 static inline int 62 cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc) 63 { 64 if (!soc || !soc->ops) { 65 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 66 "%s: Invalid Instance:", __func__); 67 QDF_BUG(0); 68 return 0; 69 } 70 71 if (!soc->ops->cmn_drv_ops || 72 !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg) 73 return 0; 74 75 return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc); 76 } 77 78 static inline void 79 cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config) 80 { 81 if (!soc || !soc->ops) { 82 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 83 "%s: Invalid Instance:", __func__); 84 QDF_BUG(0); 85 return; 86 } 87 88 if (!soc->ops->cmn_drv_ops || 89 !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg) 90 return; 91 92 soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config); 93 } 94 95 static inline struct cdp_vdev * 96 cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 97 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode) 98 { 99 if (!soc || !soc->ops) { 100 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 101 "%s: Invalid Instance:", __func__); 102 QDF_BUG(0); 103 return NULL; 104 } 105 106 if (!soc->ops->cmn_drv_ops || 107 !soc->ops->cmn_drv_ops->txrx_vdev_attach) 108 return NULL; 109 110 return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev, 111 vdev_mac_addr, vdev_id, op_mode); 112 } 113 #ifndef CONFIG_WIN 114 /** 115 * cdp_flow_pool_map() - Create flow pool for vdev 116 * @soc - data path soc handle 117 * @pdev 118 * @vdev_id - vdev_id corresponding to vdev start 119 * 120 * Create per vdev flow pool. 121 * 122 * return none 123 */ 124 static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc, 125 struct cdp_pdev *pdev, uint8_t vdev_id) 126 { 127 if (!soc || !soc->ops) { 128 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 129 "%s: Invalid Instance:", __func__); 130 QDF_BUG(0); 131 return QDF_STATUS_E_INVAL; 132 } 133 134 if (!soc->ops->flowctl_ops || 135 !soc->ops->flowctl_ops->flow_pool_map_handler) 136 return QDF_STATUS_E_INVAL; 137 138 return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id); 139 } 140 141 /** 142 * cdp_flow_pool_unmap() - Delete flow pool 143 * @soc - data path soc handle 144 * @pdev 145 * @vdev_id - vdev_id corresponding to vdev start 146 * 147 * Delete flow pool 148 * 149 * return none 150 */ 151 static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc, 152 struct cdp_pdev *pdev, uint8_t vdev_id) 153 { 154 if (!soc || !soc->ops) { 155 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 156 "%s: Invalid Instance:", __func__); 157 QDF_BUG(0); 158 return; 159 } 160 161 if (!soc->ops->flowctl_ops || 162 !soc->ops->flowctl_ops->flow_pool_unmap_handler) 163 return; 164 165 return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev, 166 vdev_id); 167 } 168 #endif 169 170 static inline void 171 cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 172 ol_txrx_vdev_delete_cb callback, void *cb_context) 173 { 174 if (!soc || !soc->ops) { 175 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 176 "%s: Invalid Instance:", __func__); 177 QDF_BUG(0); 178 return; 179 } 180 181 if (!soc->ops->cmn_drv_ops || 182 !soc->ops->cmn_drv_ops->txrx_vdev_detach) 183 return; 184 185 soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev, 186 callback, cb_context); 187 } 188 189 static inline int 190 cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) 191 { 192 if (!soc || !soc->ops) { 193 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 194 "%s: Invalid Instance:", __func__); 195 QDF_BUG(0); 196 return 0; 197 } 198 199 if (!soc->ops->cmn_drv_ops || 200 !soc->ops->cmn_drv_ops->txrx_pdev_attach_target) 201 return 0; 202 203 return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev); 204 } 205 206 static inline struct cdp_pdev *cdp_pdev_attach 207 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 208 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id) 209 { 210 if (!soc || !soc->ops) { 211 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 212 "%s: Invalid Instance:", __func__); 213 QDF_BUG(0); 214 return NULL; 215 } 216 217 if (!soc->ops->cmn_drv_ops || 218 !soc->ops->cmn_drv_ops->txrx_pdev_attach) 219 return NULL; 220 221 return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev, 222 htc_pdev, osdev, pdev_id); 223 } 224 225 static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, 226 struct cdp_pdev *pdev) 227 { 228 if (!soc || !soc->ops) { 229 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 230 "%s: Invalid Instance:", __func__); 231 QDF_BUG(0); 232 return 0; 233 } 234 235 if (!soc->ops->cmn_drv_ops || 236 !soc->ops->cmn_drv_ops->txrx_pdev_post_attach) 237 return 0; 238 239 return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev); 240 } 241 242 static inline void 243 cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 244 { 245 if (!soc || !soc->ops) { 246 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 247 "%s: Invalid Instance:", __func__); 248 QDF_BUG(0); 249 return; 250 } 251 252 if (!soc->ops->cmn_drv_ops || 253 !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach) 254 return; 255 256 soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force); 257 } 258 259 static inline void 260 cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 261 { 262 if (!soc || !soc->ops) { 263 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 264 "%s: Invalid Instance:", __func__); 265 QDF_BUG(0); 266 return; 267 } 268 269 if (!soc->ops->cmn_drv_ops || 270 !soc->ops->cmn_drv_ops->txrx_pdev_detach) 271 return; 272 273 soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force); 274 } 275 276 static inline void *cdp_peer_create 277 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 278 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer) 279 { 280 if (!soc || !soc->ops) { 281 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 282 "%s: Invalid Instance:", __func__); 283 QDF_BUG(0); 284 return NULL; 285 } 286 287 if (!soc->ops->cmn_drv_ops || 288 !soc->ops->cmn_drv_ops->txrx_peer_create) 289 return NULL; 290 291 return soc->ops->cmn_drv_ops->txrx_peer_create(vdev, 292 peer_mac_addr, ctrl_peer); 293 } 294 295 static inline void cdp_peer_setup 296 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 297 { 298 if (!soc || !soc->ops) { 299 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 300 "%s: Invalid Instance:", __func__); 301 QDF_BUG(0); 302 return; 303 } 304 305 if (!soc->ops->cmn_drv_ops || 306 !soc->ops->cmn_drv_ops->txrx_peer_setup) 307 return; 308 309 soc->ops->cmn_drv_ops->txrx_peer_setup(vdev, 310 peer); 311 } 312 313 static inline void *cdp_peer_ast_hash_find 314 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr) 315 { 316 if (!soc || !soc->ops) { 317 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 318 "%s: Invalid Instance:", __func__); 319 QDF_BUG(0); 320 return NULL; 321 } 322 323 if (!soc->ops->cmn_drv_ops || 324 !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find) 325 return NULL; 326 327 return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find(soc, 328 ast_mac_addr); 329 } 330 331 static inline int cdp_peer_add_ast 332 (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle, 333 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags) 334 { 335 if (!soc || !soc->ops) { 336 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 337 "%s: Invalid Instance:", __func__); 338 QDF_BUG(0); 339 return 0; 340 } 341 342 if (!soc->ops->cmn_drv_ops || 343 !soc->ops->cmn_drv_ops->txrx_peer_add_ast) 344 return 0; 345 346 return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc, 347 peer_handle, 348 mac_addr, 349 type, 350 flags); 351 } 352 353 static inline void cdp_peer_reset_ast 354 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, void *vdev_hdl) 355 { 356 357 if (!soc || !soc->ops) { 358 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 359 "%s: Invalid Instance:", __func__); 360 QDF_BUG(0); 361 return; 362 } 363 if (!soc->ops->cmn_drv_ops || 364 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast) 365 return; 366 367 soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr, vdev_hdl); 368 } 369 370 static inline void cdp_peer_reset_ast_table 371 (ol_txrx_soc_handle soc, void *vdev_hdl) 372 { 373 if (!soc || !soc->ops) { 374 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 375 "%s: Invalid Instance:", __func__); 376 QDF_BUG(0); 377 return; 378 } 379 380 if (!soc->ops->cmn_drv_ops || 381 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table) 382 return; 383 384 soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_hdl); 385 } 386 387 static inline void cdp_peer_flush_ast_table 388 (ol_txrx_soc_handle soc) 389 { 390 if (!soc || !soc->ops) { 391 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 392 "%s: Invalid Instance:", __func__); 393 QDF_BUG(0); 394 return; 395 } 396 397 if (!soc->ops->cmn_drv_ops || 398 !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table) 399 return; 400 401 soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc); 402 } 403 404 static inline int cdp_peer_update_ast 405 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, 406 struct cdp_peer *peer_handle, uint32_t flags) 407 { 408 if (!soc || !soc->ops) { 409 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 410 "%s: Invalid Instance:", __func__); 411 QDF_BUG(0); 412 return 0; 413 } 414 415 if (!soc->ops->cmn_drv_ops || 416 !soc->ops->cmn_drv_ops->txrx_peer_update_ast) 417 return 0; 418 419 420 return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc, 421 peer_handle, 422 wds_macaddr, 423 flags); 424 } 425 426 static inline void cdp_peer_del_ast 427 (ol_txrx_soc_handle soc, void *ast_handle) 428 { 429 if (!soc || !soc->ops) { 430 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 431 "%s: Invalid Instance:", __func__); 432 QDF_BUG(0); 433 return; 434 } 435 436 if (!soc->ops->cmn_drv_ops || 437 !soc->ops->cmn_drv_ops->txrx_peer_del_ast) 438 return; 439 440 soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle); 441 } 442 443 444 static inline uint8_t cdp_peer_ast_get_pdev_id 445 (ol_txrx_soc_handle soc, void *ast_handle) 446 { 447 if (!soc || !soc->ops) { 448 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 449 "%s: Invalid Instance:", __func__); 450 QDF_BUG(0); 451 return 0xff; 452 } 453 454 if (!soc->ops->cmn_drv_ops || 455 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id) 456 return 0xff; 457 458 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc, 459 ast_handle); 460 } 461 462 static inline uint8_t cdp_peer_ast_get_next_hop 463 (ol_txrx_soc_handle soc, void *ast_handle) 464 { 465 if (!soc || !soc->ops) { 466 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 467 "%s: Invalid Instance:", __func__); 468 QDF_BUG(0); 469 return 0xff; 470 } 471 472 if (!soc->ops->cmn_drv_ops || 473 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop) 474 return 0xff; 475 476 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc, 477 ast_handle); 478 } 479 480 /** 481 * cdp_peer_ast_get_type() - Return type (Static, WDS, MEC) of AST entry 482 * @soc: DP SoC handle 483 * @ast_handle: Opaque handle to AST entry 484 * 485 * Return: AST entry type (Static/WDS/MEC) 486 */ 487 static inline enum cdp_txrx_ast_entry_type cdp_peer_ast_get_type 488 (ol_txrx_soc_handle soc, void *ast_handle) 489 490 { 491 if (!soc || !soc->ops) { 492 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 493 "%s: Invalid Instance:", __func__); 494 QDF_BUG(0); 495 return 0; 496 } 497 498 if (!soc->ops->cmn_drv_ops || 499 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_type) 500 return 0; 501 502 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_type(soc, ast_handle); 503 } 504 505 static inline void cdp_peer_ast_set_type 506 (ol_txrx_soc_handle soc, void *ast_handle, 507 enum cdp_txrx_ast_entry_type type) 508 { 509 if (!soc || !soc->ops) { 510 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 511 "%s: Invalid Instance:", __func__); 512 QDF_BUG(0); 513 return; 514 } 515 516 if (!soc->ops->cmn_drv_ops || 517 !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type) 518 return; 519 520 soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type); 521 } 522 523 static inline void cdp_peer_teardown 524 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 525 { 526 if (!soc || !soc->ops) { 527 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 528 "%s: Invalid Instance:", __func__); 529 QDF_BUG(0); 530 return; 531 } 532 533 if (!soc->ops->cmn_drv_ops || 534 !soc->ops->cmn_drv_ops->txrx_peer_teardown) 535 return; 536 537 soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer); 538 } 539 540 static inline void 541 cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap) 542 { 543 if (!soc || !soc->ops) { 544 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 545 "%s: Invalid Instance:", __func__); 546 QDF_BUG(0); 547 return; 548 } 549 550 if (!soc->ops->cmn_drv_ops || 551 !soc->ops->cmn_drv_ops->txrx_peer_delete) 552 return; 553 554 soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap); 555 } 556 557 static inline int 558 cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 559 uint8_t smart_monitor) 560 { 561 if (!soc || !soc->ops) { 562 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 563 "%s: Invalid Instance:", __func__); 564 QDF_BUG(0); 565 return 0; 566 } 567 568 if (!soc->ops->cmn_drv_ops || 569 !soc->ops->cmn_drv_ops->txrx_set_monitor_mode) 570 return 0; 571 572 return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev, 573 smart_monitor); 574 } 575 576 static inline void 577 cdp_set_curchan(ol_txrx_soc_handle soc, 578 struct cdp_pdev *pdev, 579 uint32_t chan_mhz) 580 { 581 if (!soc || !soc->ops) { 582 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 583 "%s: Invalid Instance:", __func__); 584 QDF_BUG(0); 585 return; 586 } 587 588 if (!soc->ops->cmn_drv_ops || 589 !soc->ops->cmn_drv_ops->txrx_set_curchan) 590 return; 591 592 soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz); 593 } 594 595 static inline void 596 cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 597 void *filter, uint32_t num) 598 { 599 if (!soc || !soc->ops) { 600 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 601 "%s: Invalid Instance:", __func__); 602 QDF_BUG(0); 603 return; 604 } 605 606 if (!soc->ops->cmn_drv_ops || 607 !soc->ops->cmn_drv_ops->txrx_set_privacy_filters) 608 return; 609 610 soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev, 611 filter, num); 612 } 613 614 static inline int 615 cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 616 struct cdp_monitor_filter *filter_val) 617 { 618 if (soc->ops->mon_ops->txrx_set_advance_monitor_filter) 619 return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev, 620 filter_val); 621 return 0; 622 } 623 624 625 /****************************************************************************** 626 * Data Interface (B Interface) 627 *****************************************************************************/ 628 static inline void 629 cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 630 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 631 struct ol_txrx_ops *txrx_ops) 632 { 633 if (!soc || !soc->ops) { 634 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 635 "%s: Invalid Instance:", __func__); 636 QDF_BUG(0); 637 return; 638 } 639 640 if (!soc->ops->cmn_drv_ops || 641 !soc->ops->cmn_drv_ops->txrx_vdev_register) 642 return; 643 644 soc->ops->cmn_drv_ops->txrx_vdev_register(vdev, 645 osif_vdev, ctrl_vdev, txrx_ops); 646 } 647 648 static inline int 649 cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 650 qdf_nbuf_t tx_mgmt_frm, uint8_t type) 651 { 652 if (!soc || !soc->ops) { 653 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 654 "%s: Invalid Instance:", __func__); 655 QDF_BUG(0); 656 return 0; 657 } 658 659 if (!soc->ops->cmn_drv_ops || 660 !soc->ops->cmn_drv_ops->txrx_mgmt_send) 661 return 0; 662 663 return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev, 664 tx_mgmt_frm, type); 665 } 666 667 static inline int 668 cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 669 qdf_nbuf_t tx_mgmt_frm, uint8_t type, 670 uint8_t use_6mbps, uint16_t chanfreq) 671 { 672 if (!soc || !soc->ops) { 673 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 674 "%s: Invalid Instance:", __func__); 675 QDF_BUG(0); 676 return 0; 677 } 678 679 if (!soc->ops->cmn_drv_ops || 680 !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext) 681 return 0; 682 683 return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext 684 (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq); 685 } 686 687 688 static inline void 689 cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 690 uint8_t type, ol_txrx_mgmt_tx_cb download_cb, 691 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) 692 { 693 if (!soc || !soc->ops) { 694 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 695 "%s: Invalid Instance:", __func__); 696 QDF_BUG(0); 697 return; 698 } 699 700 if (!soc->ops->cmn_drv_ops || 701 !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set) 702 return; 703 704 soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set 705 (pdev, type, download_cb, ota_ack_cb, ctxt); 706 } 707 708 static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc, 709 struct cdp_pdev *pdev) 710 { 711 if (!soc || !soc->ops) { 712 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 713 "%s: Invalid Instance:", __func__); 714 QDF_BUG(0); 715 return 0; 716 } 717 718 if (!soc->ops->cmn_drv_ops || 719 !soc->ops->cmn_drv_ops->txrx_get_tx_pending) 720 return 0; 721 722 723 return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev); 724 } 725 726 static inline void 727 cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev, 728 ol_txrx_data_tx_cb callback, void *ctxt) 729 { 730 if (!soc || !soc->ops) { 731 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 732 "%s: Invalid Instance:", __func__); 733 QDF_BUG(0); 734 return; 735 } 736 737 if (!soc->ops->cmn_drv_ops || 738 !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set) 739 return; 740 741 soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev, 742 callback, ctxt); 743 } 744 745 /****************************************************************************** 746 * Statistics and Debugging Interface (C Interface) 747 *****************************************************************************/ 748 /** 749 * External Device physical address types 750 * 751 * Currently, both MAC and IPA uController use the same size addresses 752 * and descriptors are exchanged between these two depending on the mode. 753 * 754 * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA 755 * operations. However, external device physical address sizes 756 * may be different from host-specific physical address sizes. 757 * This calls for the following definitions for target devices 758 * (MAC, IPA uc). 759 */ 760 #if HTT_PADDR64 761 typedef uint64_t target_paddr_t; 762 #else 763 typedef uint32_t target_paddr_t; 764 #endif /*HTT_PADDR64 */ 765 766 static inline int 767 cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 768 int max_subfrms_ampdu, 769 int max_subfrms_amsdu) 770 { 771 if (!soc || !soc->ops) { 772 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 773 "%s: Invalid Instance:", __func__); 774 QDF_BUG(0); 775 return 0; 776 } 777 778 if (!soc->ops->cmn_drv_ops || 779 !soc->ops->cmn_drv_ops->txrx_aggr_cfg) 780 return 0; 781 782 return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev, 783 max_subfrms_ampdu, max_subfrms_amsdu); 784 } 785 786 static inline int 787 cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 788 struct ol_txrx_stats_req *req, bool per_vdev, 789 bool response_expected) 790 { 791 if (!soc || !soc->ops) { 792 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 793 "%s: Invalid Instance:", __func__); 794 QDF_BUG(0); 795 return 0; 796 } 797 798 if (!soc->ops->cmn_drv_ops || 799 !soc->ops->cmn_drv_ops->txrx_fw_stats_get) 800 return 0; 801 802 return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req, 803 per_vdev, response_expected); 804 } 805 806 static inline int 807 cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs) 808 { 809 if (!soc || !soc->ops) { 810 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 811 "%s: Invalid Instance:", __func__); 812 QDF_BUG(0); 813 return 0; 814 } 815 816 if (!soc->ops->cmn_drv_ops || 817 !soc->ops->cmn_drv_ops->txrx_debug) 818 return 0; 819 820 return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs); 821 } 822 823 static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc, 824 struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val) 825 { 826 if (!soc || !soc->ops) { 827 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 828 "%s: Invalid Instance:", __func__); 829 QDF_BUG(0); 830 return; 831 } 832 833 if (!soc->ops->cmn_drv_ops || 834 !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg) 835 return; 836 837 soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev, 838 cfg_stats_type, cfg_val); 839 } 840 841 static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level) 842 { 843 if (!soc || !soc->ops) { 844 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 845 "%s: Invalid Instance:", __func__); 846 QDF_BUG(0); 847 return; 848 } 849 850 if (!soc->ops->cmn_drv_ops || 851 !soc->ops->cmn_drv_ops->txrx_print_level_set) 852 return; 853 854 soc->ops->cmn_drv_ops->txrx_print_level_set(level); 855 } 856 857 static inline uint8_t * 858 cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 859 { 860 if (!soc || !soc->ops) { 861 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 862 "%s: Invalid Instance:", __func__); 863 QDF_BUG(0); 864 return NULL; 865 } 866 867 if (!soc->ops->cmn_drv_ops || 868 !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr) 869 return NULL; 870 871 return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev); 872 873 } 874 875 /** 876 * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 877 * vdev 878 * @vdev: vdev handle 879 * 880 * Return: Handle to struct qdf_mac_addr 881 */ 882 static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr 883 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 884 { 885 if (!soc || !soc->ops) { 886 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 887 "%s: Invalid Instance:", __func__); 888 QDF_BUG(0); 889 return NULL; 890 } 891 892 if (!soc->ops->cmn_drv_ops || 893 !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr) 894 return NULL; 895 896 return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr 897 (vdev); 898 899 } 900 901 /** 902 * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev 903 * @vdev: vdev handle 904 * 905 * Return: Handle to pdev 906 */ 907 static inline struct cdp_pdev *cdp_get_pdev_from_vdev 908 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 909 { 910 if (!soc || !soc->ops) { 911 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 912 "%s: Invalid Instance:", __func__); 913 QDF_BUG(0); 914 return NULL; 915 } 916 917 if (!soc->ops->cmn_drv_ops || 918 !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev) 919 return NULL; 920 921 return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev); 922 } 923 924 /** 925 * cdp_get_os_rx_handles_from_vdev() - Return os rx handles for a vdev 926 * @soc: ol_txrx_soc_handle handle 927 * @vdev: vdev for which os rx handles are needed 928 * @stack_fn_p: pointer to stack function pointer 929 * @osif_handle_p: pointer to ol_osif_vdev_handle 930 * 931 * Return: void 932 */ 933 static inline 934 void cdp_get_os_rx_handles_from_vdev(ol_txrx_soc_handle soc, 935 struct cdp_vdev *vdev, 936 ol_txrx_rx_fp *stack_fn_p, 937 ol_osif_vdev_handle *osif_handle_p) 938 { 939 if (!soc || !soc->ops) { 940 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 941 "%s: Invalid Instance:", __func__); 942 QDF_BUG(0); 943 return; 944 } 945 946 if (!soc->ops->cmn_drv_ops || 947 !soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev) 948 return; 949 950 soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev(vdev, 951 stack_fn_p, 952 osif_handle_p); 953 } 954 955 /** 956 * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 957 * @vdev: vdev handle 958 * 959 * Return: Handle to control pdev 960 */ 961 static inline struct cdp_cfg * 962 cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 963 { 964 if (!soc || !soc->ops) { 965 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 966 "%s: Invalid Instance:", __func__); 967 QDF_BUG(0); 968 return NULL; 969 } 970 971 if (!soc->ops->cmn_drv_ops || 972 !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev) 973 return NULL; 974 975 return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev 976 (vdev); 977 } 978 979 static inline struct cdp_vdev * 980 cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 981 uint8_t vdev_id) 982 { 983 if (!soc || !soc->ops) { 984 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 985 "%s: Invalid Instance:", __func__); 986 QDF_BUG(0); 987 return NULL; 988 } 989 990 if (!soc->ops->cmn_drv_ops || 991 !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id) 992 return NULL; 993 994 return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id 995 (pdev, vdev_id); 996 } 997 998 static inline void 999 cdp_soc_detach(ol_txrx_soc_handle soc) 1000 { 1001 if (!soc || !soc->ops) { 1002 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1003 "%s: Invalid Instance:", __func__); 1004 QDF_BUG(0); 1005 return; 1006 } 1007 1008 if (!soc->ops->cmn_drv_ops || 1009 !soc->ops->cmn_drv_ops->txrx_soc_detach) 1010 return; 1011 1012 soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc); 1013 } 1014 1015 /** 1016 * cdp_addba_resp_tx_completion() - Indicate addba response tx 1017 * completion to dp to change tid state. 1018 * @soc: soc handle 1019 * @peer_handle: peer handle 1020 * @tid: tid 1021 * @status: Tx completion status 1022 * 1023 * Return: success/failure of tid update 1024 */ 1025 static inline int cdp_addba_resp_tx_completion(ol_txrx_soc_handle soc, 1026 void *peer_handle, 1027 uint8_t tid, int status) 1028 { 1029 if (!soc || !soc->ops) { 1030 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1031 "%s: Invalid Instance:", __func__); 1032 QDF_BUG(0); 1033 return 0; 1034 } 1035 1036 if (!soc->ops->cmn_drv_ops || 1037 !soc->ops->cmn_drv_ops->addba_resp_tx_completion) 1038 return 0; 1039 1040 return soc->ops->cmn_drv_ops->addba_resp_tx_completion(peer_handle, tid, 1041 status); 1042 } 1043 1044 static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc, 1045 void *peer_handle, uint8_t dialogtoken, uint16_t tid, 1046 uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum) 1047 { 1048 if (!soc || !soc->ops) { 1049 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1050 "%s: Invalid Instance:", __func__); 1051 QDF_BUG(0); 1052 return 0; 1053 } 1054 1055 if (!soc->ops->cmn_drv_ops || 1056 !soc->ops->cmn_drv_ops->addba_requestprocess) 1057 return 0; 1058 1059 return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle, 1060 dialogtoken, tid, batimeout, buffersize, startseqnum); 1061 } 1062 1063 static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc, 1064 void *peer_handle, uint8_t tid, uint8_t *dialogtoken, 1065 uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout) 1066 { 1067 if (!soc || !soc->ops) { 1068 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1069 "%s: Invalid Instance:", __func__); 1070 QDF_BUG(0); 1071 return; 1072 } 1073 1074 if (!soc->ops->cmn_drv_ops || 1075 !soc->ops->cmn_drv_ops->addba_responsesetup) 1076 return; 1077 1078 soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid, 1079 dialogtoken, statuscode, buffersize, batimeout); 1080 } 1081 1082 static inline int cdp_delba_process(ol_txrx_soc_handle soc, 1083 void *peer_handle, int tid, uint16_t reasoncode) 1084 { 1085 if (!soc || !soc->ops) { 1086 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1087 "%s: Invalid Instance:", __func__); 1088 QDF_BUG(0); 1089 return 0; 1090 } 1091 1092 if (!soc->ops->cmn_drv_ops || 1093 !soc->ops->cmn_drv_ops->delba_process) 1094 return 0; 1095 1096 return soc->ops->cmn_drv_ops->delba_process(peer_handle, 1097 tid, reasoncode); 1098 } 1099 1100 /** 1101 * cdp_delba_tx_completion() - Handle delba tx completion 1102 * to update stats and retry transmission if failed. 1103 * @soc: soc handle 1104 * @peer_handle: peer handle 1105 * @tid: Tid number 1106 * @status: Tx completion status 1107 * 1108 * Return: 0 on Success, 1 on failure 1109 */ 1110 1111 static inline int cdp_delba_tx_completion(ol_txrx_soc_handle soc, 1112 void *peer_handle, 1113 uint8_t tid, int status) 1114 { 1115 if (!soc || !soc->ops) { 1116 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1117 "%s: Invalid Instance:", __func__); 1118 QDF_BUG(0); 1119 return 0; 1120 } 1121 1122 if (!soc->ops->cmn_drv_ops || 1123 !soc->ops->cmn_drv_ops->delba_tx_completion) 1124 return 0; 1125 1126 return soc->ops->cmn_drv_ops->delba_tx_completion(peer_handle, 1127 tid, status); 1128 } 1129 1130 static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc, 1131 void *peer_handle, int tid, uint16_t statuscode) 1132 { 1133 if (!soc || !soc->ops) { 1134 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1135 "%s: Invalid Instance:", __func__); 1136 QDF_BUG(0); 1137 return; 1138 } 1139 1140 if (!soc->ops->cmn_drv_ops || 1141 !soc->ops->cmn_drv_ops->set_addba_response) 1142 return; 1143 1144 soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode); 1145 } 1146 1147 /** 1148 * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer 1149 * mac address 1150 * @soc: SOC handle 1151 * @peer_id: peer id of the peer for which mac_address is required 1152 * @mac_addr: reference to mac address 1153 * 1154 * reutm: vdev_id of the vap 1155 */ 1156 static inline uint8_t 1157 cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id, 1158 uint8_t *mac_addr) 1159 { 1160 if (!soc || !soc->ops) { 1161 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1162 "%s: Invalid Instance:", __func__); 1163 QDF_BUG(0); 1164 return CDP_INVALID_VDEV_ID; 1165 } 1166 1167 if (!soc->ops->cmn_drv_ops || 1168 !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id) 1169 return CDP_INVALID_VDEV_ID; 1170 1171 return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc, 1172 peer_id, mac_addr); 1173 } 1174 1175 /** 1176 * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap 1177 * @vdev: vdev handle 1178 * @map_id: id of the tid map 1179 * 1180 * Return: void 1181 */ 1182 static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc, 1183 struct cdp_vdev *vdev, uint8_t map_id) 1184 { 1185 if (!soc || !soc->ops) { 1186 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1187 "%s: Invalid Instance:", __func__); 1188 QDF_BUG(0); 1189 return; 1190 } 1191 1192 if (!soc->ops->cmn_drv_ops || 1193 !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map) 1194 return; 1195 1196 soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev, 1197 map_id); 1198 } 1199 1200 /** 1201 * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map 1202 * @pdev: pdev handle 1203 * @map_id: id of the tid map 1204 * @tos: index value in map that needs to be changed 1205 * @tid: tid value passed by user 1206 * 1207 * Return: void 1208 */ 1209 static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc, 1210 struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid) 1211 { 1212 if (!soc || !soc->ops) { 1213 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1214 "%s: Invalid Instance:", __func__); 1215 QDF_BUG(0); 1216 return; 1217 } 1218 1219 if (!soc->ops->cmn_drv_ops || 1220 !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) 1221 return; 1222 1223 soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev, 1224 map_id, tos, tid); 1225 } 1226 1227 /** 1228 * cdp_flush_cache_rx_queue() - flush cache rx queue frame 1229 * 1230 * Return: None 1231 */ 1232 static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc) 1233 { 1234 if (!soc || !soc->ops) { 1235 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1236 "%s: Invalid Instance:", __func__); 1237 QDF_BUG(0); 1238 return; 1239 } 1240 1241 if (!soc->ops->cmn_drv_ops || 1242 !soc->ops->cmn_drv_ops->flush_cache_rx_queue) 1243 return; 1244 soc->ops->cmn_drv_ops->flush_cache_rx_queue(); 1245 } 1246 1247 /** 1248 * cdp_txrx_stats_request(): function to map to host and firmware statistics 1249 * @soc: soc handle 1250 * @vdev: virtual device 1251 * @req: stats request container 1252 * 1253 * return: status 1254 */ 1255 static inline 1256 int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 1257 struct cdp_txrx_stats_req *req) 1258 { 1259 if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) { 1260 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1261 "%s: Invalid Instance:", __func__); 1262 QDF_ASSERT(0); 1263 return 0; 1264 } 1265 1266 if (soc->ops->cmn_drv_ops->txrx_stats_request) 1267 return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req); 1268 1269 return 0; 1270 } 1271 1272 /** 1273 * cdp_txrx_intr_attach(): function to attach and configure interrupt 1274 * @soc: soc handle 1275 */ 1276 static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc) 1277 { 1278 if (!soc || !soc->ops) { 1279 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1280 "%s: Invalid Instance:", __func__); 1281 QDF_BUG(0); 1282 return 0; 1283 } 1284 1285 if (!soc->ops->cmn_drv_ops || 1286 !soc->ops->cmn_drv_ops->txrx_intr_attach) 1287 return 0; 1288 1289 return soc->ops->cmn_drv_ops->txrx_intr_attach(soc); 1290 } 1291 1292 /** 1293 * cdp_txrx_intr_detach(): function to detach interrupt 1294 * @soc: soc handle 1295 */ 1296 static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc) 1297 { 1298 if (!soc || !soc->ops) { 1299 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1300 "%s: Invalid Instance:", __func__); 1301 QDF_BUG(0); 1302 return; 1303 } 1304 1305 if (!soc->ops->cmn_drv_ops || 1306 !soc->ops->cmn_drv_ops->txrx_intr_detach) 1307 return; 1308 1309 soc->ops->cmn_drv_ops->txrx_intr_detach(soc); 1310 } 1311 1312 /** 1313 * cdp_display_stats(): function to map to dump stats 1314 * @soc: soc handle 1315 * @value: statistics option 1316 */ 1317 static inline QDF_STATUS 1318 cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value, 1319 enum qdf_stats_verbosity_level level) 1320 { 1321 if (!soc || !soc->ops) { 1322 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1323 "%s: Invalid Instance:", __func__); 1324 QDF_BUG(0); 1325 return 0; 1326 } 1327 1328 if (!soc->ops->cmn_drv_ops || 1329 !soc->ops->cmn_drv_ops->display_stats) 1330 return 0; 1331 1332 return soc->ops->cmn_drv_ops->display_stats(soc, value, level); 1333 } 1334 1335 1336 /** 1337 * cdp_set_pn_check(): function to set pn check 1338 * @soc: soc handle 1339 * @sec_type: security type 1340 * #rx_pn: receive pn 1341 */ 1342 static inline int cdp_set_pn_check(ol_txrx_soc_handle soc, 1343 struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 1344 { 1345 if (!soc || !soc->ops) { 1346 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1347 "%s: Invalid Instance:", __func__); 1348 QDF_BUG(0); 1349 return 0; 1350 } 1351 1352 if (!soc->ops->cmn_drv_ops || 1353 !soc->ops->cmn_drv_ops->set_pn_check) 1354 return 0; 1355 1356 soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle, 1357 sec_type, rx_pn); 1358 return 0; 1359 } 1360 1361 static inline int cdp_set_key(ol_txrx_soc_handle soc, 1362 struct cdp_peer *peer_handle, 1363 bool is_unicast, uint32_t *key) 1364 { 1365 if (!soc || !soc->ops) { 1366 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1367 "%s: Invalid Instance:", __func__); 1368 QDF_BUG(0); 1369 return 0; 1370 } 1371 1372 if (!soc->ops->ctrl_ops || 1373 !soc->ops->ctrl_ops->set_key) 1374 return 0; 1375 1376 soc->ops->ctrl_ops->set_key(peer_handle, 1377 is_unicast, key); 1378 return 0; 1379 } 1380 1381 /** 1382 * cdp_update_config_parameters(): function to propagate configuration 1383 * parameters to datapath 1384 * @soc: opaque soc handle 1385 * @cfg: configuration handle 1386 * 1387 * Return: status: 0 - Success, non-zero: Failure 1388 */ 1389 static inline 1390 QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc, 1391 struct cdp_config_params *cfg) 1392 { 1393 struct cdp_soc *psoc = (struct cdp_soc *)soc; 1394 1395 if (!soc || !soc->ops) { 1396 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1397 "%s: Invalid Instance:", __func__); 1398 QDF_BUG(0); 1399 return 0; 1400 } 1401 1402 if (!soc->ops->cmn_drv_ops || 1403 !soc->ops->cmn_drv_ops->update_config_parameters) 1404 return QDF_STATUS_SUCCESS; 1405 1406 return soc->ops->cmn_drv_ops->update_config_parameters(psoc, 1407 cfg); 1408 } 1409 1410 /** 1411 * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev 1412 * @soc: opaque soc handle 1413 * @pdev: data path pdev handle 1414 * 1415 * Return: opaque dp handle 1416 */ 1417 static inline void * 1418 cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev) 1419 { 1420 if (!soc || !soc->ops) { 1421 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1422 "%s: Invalid Instance:", __func__); 1423 QDF_BUG(0); 1424 return 0; 1425 } 1426 1427 if (soc->ops->cmn_drv_ops->get_dp_txrx_handle) 1428 return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev); 1429 1430 return 0; 1431 } 1432 1433 /** 1434 * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev 1435 * @soc: opaque soc handle 1436 * @pdev: data path pdev handle 1437 * @dp_hdl: opaque pointer for dp_txrx_handle 1438 * 1439 * Return: void 1440 */ 1441 static inline void 1442 cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl) 1443 { 1444 if (!soc || !soc->ops) { 1445 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1446 "%s: Invalid Instance:", __func__); 1447 QDF_BUG(0); 1448 return; 1449 } 1450 1451 if (!soc->ops->cmn_drv_ops || 1452 !soc->ops->cmn_drv_ops->set_dp_txrx_handle) 1453 return; 1454 1455 soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl); 1456 } 1457 1458 /* 1459 * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc 1460 * @soc: opaque soc handle 1461 * 1462 * Return: opaque extended dp handle 1463 */ 1464 static inline void * 1465 cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc) 1466 { 1467 if (!soc || !soc->ops) { 1468 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1469 "%s: Invalid Instance:", __func__); 1470 QDF_BUG(0); 1471 return NULL; 1472 } 1473 1474 if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle) 1475 return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle( 1476 (struct cdp_soc *) soc); 1477 1478 return NULL; 1479 } 1480 1481 /** 1482 * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc 1483 * @soc: opaque soc handle 1484 * @dp_hdl: opaque pointer for dp_txrx_handle 1485 * 1486 * Return: void 1487 */ 1488 static inline void 1489 cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle) 1490 { 1491 if (!soc || !soc->ops) { 1492 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1493 "%s: Invalid Instance:", __func__); 1494 QDF_BUG(0); 1495 return; 1496 } 1497 1498 if (!soc->ops->cmn_drv_ops || 1499 !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle) 1500 return; 1501 1502 soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc, 1503 dp_handle); 1504 } 1505 1506 /** 1507 * cdp_tx_send() - enqueue frame for transmission 1508 * @soc: soc opaque handle 1509 * @vdev: VAP device 1510 * @nbuf: nbuf to be enqueued 1511 * 1512 * This API is used by Extended Datapath modules to enqueue frame for 1513 * transmission 1514 * 1515 * Return: void 1516 */ 1517 static inline void 1518 cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf) 1519 { 1520 if (!soc || !soc->ops) { 1521 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1522 "%s: Invalid Instance:", __func__); 1523 QDF_BUG(0); 1524 return; 1525 } 1526 1527 if (!soc->ops->cmn_drv_ops || 1528 !soc->ops->cmn_drv_ops->tx_send) 1529 return; 1530 1531 soc->ops->cmn_drv_ops->tx_send(vdev, nbuf); 1532 } 1533 1534 /* 1535 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1536 * @soc: opaque soc handle 1537 * @pdev: data path pdev handle 1538 * 1539 * Return: pdev_id 1540 */ 1541 static inline 1542 uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc, 1543 struct cdp_pdev *pdev) 1544 { 1545 if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev) 1546 return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev); 1547 return 0; 1548 } 1549 1550 /** 1551 * cdp_pdev_set_chan_noise_floor() - Set channel noise floor to DP layer 1552 * @soc: opaque soc handle 1553 * @pdev: data path pdev handle 1554 * @chan_noise_floor: Channel Noise Floor (in dbM) obtained from control path 1555 * 1556 * Return: None 1557 */ 1558 static inline 1559 void cdp_pdev_set_chan_noise_floor(ol_txrx_soc_handle soc, 1560 struct cdp_pdev *pdev, 1561 int16_t chan_noise_floor) 1562 { 1563 if (soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor) 1564 return soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor( 1565 pdev, chan_noise_floor); 1566 } 1567 1568 /** 1569 * cdp_set_nac() - set nac 1570 * @soc: opaque soc handle 1571 * @peer: data path peer handle 1572 * 1573 */ 1574 static inline 1575 void cdp_set_nac(ol_txrx_soc_handle soc, 1576 struct cdp_peer *peer) 1577 { 1578 if (soc->ops->cmn_drv_ops->txrx_set_nac) 1579 soc->ops->cmn_drv_ops->txrx_set_nac(peer); 1580 } 1581 1582 /** 1583 * cdp_set_pdev_tx_capture() - set pdev tx_capture 1584 * @soc: opaque soc handle 1585 * @pdev: data path pdev handle 1586 * @val: value of pdev_tx_capture 1587 * 1588 * Return: void 1589 */ 1590 static inline 1591 void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc, 1592 struct cdp_pdev *pdev, int val) 1593 { 1594 if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture) 1595 return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev, 1596 val); 1597 1598 } 1599 1600 /** 1601 * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id 1602 * @soc: opaque soc handle 1603 * @pdev: data path pdev handle 1604 * @peer_id: data path peer id 1605 * @peer_mac: peer_mac 1606 * 1607 * Return: void 1608 */ 1609 static inline 1610 void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc, 1611 struct cdp_pdev *pdev_handle, 1612 uint32_t peer_id, uint8_t *peer_mac) 1613 { 1614 if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id) 1615 soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id( 1616 pdev_handle, peer_id, peer_mac); 1617 } 1618 1619 /** 1620 * cdp_vdev_tx_lock() - acquire lock 1621 * @soc: opaque soc handle 1622 * @vdev: data path vdev handle 1623 * 1624 * Return: void 1625 */ 1626 static inline 1627 void cdp_vdev_tx_lock(ol_txrx_soc_handle soc, 1628 struct cdp_vdev *vdev) 1629 { 1630 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock) 1631 soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev); 1632 } 1633 1634 /** 1635 * cdp_vdev_tx_unlock() - release lock 1636 * @soc: opaque soc handle 1637 * @vdev: data path vdev handle 1638 * 1639 * Return: void 1640 */ 1641 static inline 1642 void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc, 1643 struct cdp_vdev *vdev) 1644 { 1645 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock) 1646 soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev); 1647 } 1648 1649 /** 1650 * cdp_ath_getstats() - get updated athstats 1651 * @soc: opaque soc handle 1652 * @dev: dp interface handle 1653 * @stats: cdp network device stats structure 1654 * @type: device type pdev/vdev 1655 * 1656 * Return: void 1657 */ 1658 static inline void cdp_ath_getstats(ol_txrx_soc_handle soc, 1659 void *dev, struct cdp_dev_stats *stats, 1660 uint8_t type) 1661 { 1662 if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats) 1663 soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type); 1664 } 1665 1666 /** 1667 * cdp_set_gid_flag() - set groupid flag 1668 * @soc: opaque soc handle 1669 * @pdev: data path pdev handle 1670 * @mem_status: member status from grp management frame 1671 * @user_position: user position from grp management frame 1672 * 1673 * Return: void 1674 */ 1675 static inline 1676 void cdp_set_gid_flag(ol_txrx_soc_handle soc, 1677 struct cdp_pdev *pdev, u_int8_t *mem_status, 1678 u_int8_t *user_position) 1679 { 1680 if (soc->ops->cmn_drv_ops->txrx_set_gid_flag) 1681 soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position); 1682 } 1683 1684 /** 1685 * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version 1686 * @soc: opaque soc handle 1687 * @pdev: data path pdev handle 1688 * 1689 */ 1690 static inline 1691 uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc, 1692 struct cdp_pdev *pdev) 1693 { 1694 if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version) 1695 return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev); 1696 return 0; 1697 } 1698 1699 /** 1700 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1701 * @soc: opaque soc handle 1702 * @ni: associated node 1703 * @force: number of frame in SW queue 1704 * Return: void 1705 */ 1706 static inline 1707 void cdp_if_mgmt_drain(ol_txrx_soc_handle soc, 1708 void *ni, int force) 1709 { 1710 if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain) 1711 soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force); 1712 } 1713 1714 static inline void 1715 cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers) 1716 { 1717 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1718 soc->ops->cmn_drv_ops->txrx_peer_map_attach) 1719 soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers); 1720 } 1721 1722 /** 1723 1724 * cdp_pdev_set_ctrl_pdev() - set UMAC ctrl pdev to dp pdev 1725 * @soc: opaque soc handle 1726 * @pdev: opaque dp pdev handle 1727 * @ctrl_pdev: opaque ctrl pdev handle 1728 * 1729 * Return: void 1730 */ 1731 static inline void 1732 cdp_pdev_set_ctrl_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *dp_pdev, 1733 struct cdp_ctrl_objmgr_pdev *ctrl_pdev) 1734 { 1735 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1736 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev) 1737 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev(dp_pdev, 1738 ctrl_pdev); 1739 } 1740 1741 /* cdp_txrx_classify_and_update() - To classify the packet and update stats 1742 * @soc: opaque soc handle 1743 * @vdev: opaque dp vdev handle 1744 * @skb: data 1745 * @dir: rx or tx packet 1746 * @nbuf_classify: packet classification object 1747 * 1748 * Return: 1 on success else return 0 1749 */ 1750 static inline int 1751 cdp_txrx_classify_and_update(ol_txrx_soc_handle soc, 1752 struct cdp_vdev *vdev, qdf_nbuf_t skb, 1753 enum txrx_direction dir, 1754 struct ol_txrx_nbuf_classify *nbuf_class) 1755 { 1756 if (!soc || !soc->ops) { 1757 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1758 "%s: Invalid Instance", __func__); 1759 QDF_BUG(0); 1760 return 0; 1761 } 1762 1763 if (!soc->ops->cmn_drv_ops || 1764 !soc->ops->cmn_drv_ops->txrx_classify_update) 1765 return 0; 1766 1767 return soc->ops->cmn_drv_ops->txrx_classify_update(vdev, 1768 skb, 1769 dir, nbuf_class); 1770 } 1771 1772 #ifdef RECEIVE_OFFLOAD 1773 /** 1774 * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer 1775 * @soc - data path soc handle 1776 * @pdev - device instance pointer 1777 * 1778 * register rx offload flush callback function pointer 1779 * 1780 * return none 1781 */ 1782 static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc, 1783 void (rx_ol_flush_cb)(void *)) 1784 { 1785 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 1786 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1787 "%s invalid instance", __func__); 1788 return; 1789 } 1790 1791 if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb) 1792 return soc->ops->rx_offld_ops->register_rx_offld_flush_cb( 1793 rx_ol_flush_cb); 1794 } 1795 1796 /** 1797 * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function 1798 * @soc - data path soc handle 1799 * 1800 * deregister rx offload flush callback function pointer 1801 * 1802 * return none 1803 */ 1804 static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc) 1805 { 1806 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 1807 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1808 "%s invalid instance", __func__); 1809 return; 1810 } 1811 1812 if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb) 1813 return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb(); 1814 } 1815 #endif /* RECEIVE_OFFLOAD */ 1816 1817 /** 1818 * @cdp_set_ba_timeout() - set ba aging timeout per AC 1819 * 1820 * @soc - pointer to the soc 1821 * @value - timeout value in millisec 1822 * @ac - Access category 1823 * 1824 * @return - void 1825 */ 1826 static inline void cdp_set_ba_timeout(ol_txrx_soc_handle soc, 1827 uint8_t ac, uint32_t value) 1828 { 1829 if (!soc || !soc->ops) { 1830 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1831 "%s: Invalid Instance", __func__); 1832 QDF_BUG(0); 1833 return; 1834 } 1835 1836 if (!soc->ops->cmn_drv_ops || 1837 !soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout) 1838 return; 1839 1840 soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout(soc, ac, value); 1841 } 1842 1843 /** 1844 * @cdp_get_ba_timeout() - return ba aging timeout per AC 1845 * 1846 * @soc - pointer to the soc 1847 * @ac - access category 1848 * @value - timeout value in millisec 1849 * 1850 * @return - void 1851 */ 1852 static inline void cdp_get_ba_timeout(ol_txrx_soc_handle soc, 1853 uint8_t ac, uint32_t *value) 1854 { 1855 if (!soc || !soc->ops) { 1856 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1857 "%s: Invalid Instance", __func__); 1858 QDF_BUG(0); 1859 return; 1860 } 1861 1862 if (!soc->ops->cmn_drv_ops || 1863 !soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout) 1864 return; 1865 1866 soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout(soc, ac, value); 1867 } 1868 #endif /* _CDP_TXRX_CMN_H_ */ 1869