1 /* 2 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * @file cdp_txrx_cmn.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_H_ 25 #define _CDP_TXRX_CMN_H_ 26 27 #include "qdf_types.h" 28 #include "qdf_nbuf.h" 29 #include "cdp_txrx_ops.h" 30 #include "cdp_txrx_handle.h" 31 #include "cdp_txrx_cmn_struct.h" 32 /****************************************************************************** 33 * 34 * Common Data Path Header File 35 * 36 *****************************************************************************/ 37 #define dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP, params) 38 #define dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP, params) 39 #define dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP, params) 40 #define dp_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_DP, params) 41 #define dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params) 42 43 static inline int 44 cdp_soc_attach_target(ol_txrx_soc_handle soc) 45 { 46 if (!soc || !soc->ops) { 47 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 48 "%s: Invalid Instance:", __func__); 49 QDF_BUG(0); 50 return 0; 51 } 52 53 if (!soc->ops->cmn_drv_ops || 54 !soc->ops->cmn_drv_ops->txrx_soc_attach_target) 55 return 0; 56 57 return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc); 58 59 } 60 61 static inline int 62 cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc) 63 { 64 if (!soc || !soc->ops) { 65 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 66 "%s: Invalid Instance:", __func__); 67 QDF_BUG(0); 68 return 0; 69 } 70 71 if (!soc->ops->cmn_drv_ops || 72 !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg) 73 return 0; 74 75 return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc); 76 } 77 78 static inline void 79 cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config) 80 { 81 if (!soc || !soc->ops) { 82 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 83 "%s: Invalid Instance:", __func__); 84 QDF_BUG(0); 85 return; 86 } 87 88 if (!soc->ops->cmn_drv_ops || 89 !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg) 90 return; 91 92 soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config); 93 } 94 95 static inline struct cdp_vdev * 96 cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 97 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode) 98 { 99 if (!soc || !soc->ops) { 100 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 101 "%s: Invalid Instance:", __func__); 102 QDF_BUG(0); 103 return NULL; 104 } 105 106 if (!soc->ops->cmn_drv_ops || 107 !soc->ops->cmn_drv_ops->txrx_vdev_attach) 108 return NULL; 109 110 return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev, 111 vdev_mac_addr, vdev_id, op_mode); 112 } 113 #ifndef CONFIG_WIN 114 /** 115 * cdp_flow_pool_map() - Create flow pool for vdev 116 * @soc - data path soc handle 117 * @pdev 118 * @vdev_id - vdev_id corresponding to vdev start 119 * 120 * Create per vdev flow pool. 121 * 122 * return none 123 */ 124 static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc, 125 struct cdp_pdev *pdev, uint8_t vdev_id) 126 { 127 if (!soc || !soc->ops) { 128 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 129 "%s: Invalid Instance:", __func__); 130 QDF_BUG(0); 131 return QDF_STATUS_E_INVAL; 132 } 133 134 if (!soc->ops->flowctl_ops || 135 !soc->ops->flowctl_ops->flow_pool_map_handler) 136 return QDF_STATUS_E_INVAL; 137 138 return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id); 139 } 140 141 /** 142 * cdp_flow_pool_unmap() - Delete flow pool 143 * @soc - data path soc handle 144 * @pdev 145 * @vdev_id - vdev_id corresponding to vdev start 146 * 147 * Delete flow pool 148 * 149 * return none 150 */ 151 static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc, 152 struct cdp_pdev *pdev, uint8_t vdev_id) 153 { 154 if (!soc || !soc->ops) { 155 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 156 "%s: Invalid Instance:", __func__); 157 QDF_BUG(0); 158 return; 159 } 160 161 if (!soc->ops->flowctl_ops || 162 !soc->ops->flowctl_ops->flow_pool_unmap_handler) 163 return; 164 165 return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev, 166 vdev_id); 167 } 168 #endif 169 170 static inline void 171 cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 172 ol_txrx_vdev_delete_cb callback, void *cb_context) 173 { 174 if (!soc || !soc->ops) { 175 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 176 "%s: Invalid Instance:", __func__); 177 QDF_BUG(0); 178 return; 179 } 180 181 if (!soc->ops->cmn_drv_ops || 182 !soc->ops->cmn_drv_ops->txrx_vdev_detach) 183 return; 184 185 soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev, 186 callback, cb_context); 187 } 188 189 static inline int 190 cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) 191 { 192 if (!soc || !soc->ops) { 193 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 194 "%s: Invalid Instance:", __func__); 195 QDF_BUG(0); 196 return 0; 197 } 198 199 if (!soc->ops->cmn_drv_ops || 200 !soc->ops->cmn_drv_ops->txrx_pdev_attach_target) 201 return 0; 202 203 return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev); 204 } 205 206 static inline struct cdp_pdev *cdp_pdev_attach 207 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 208 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id) 209 { 210 if (!soc || !soc->ops) { 211 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 212 "%s: Invalid Instance:", __func__); 213 QDF_BUG(0); 214 return NULL; 215 } 216 217 if (!soc->ops->cmn_drv_ops || 218 !soc->ops->cmn_drv_ops->txrx_pdev_attach) 219 return NULL; 220 221 return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev, 222 htc_pdev, osdev, pdev_id); 223 } 224 225 static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, 226 struct cdp_pdev *pdev) 227 { 228 if (!soc || !soc->ops) { 229 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 230 "%s: Invalid Instance:", __func__); 231 QDF_BUG(0); 232 return 0; 233 } 234 235 if (!soc->ops->cmn_drv_ops || 236 !soc->ops->cmn_drv_ops->txrx_pdev_post_attach) 237 return 0; 238 239 return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev); 240 } 241 242 static inline void 243 cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 244 { 245 if (!soc || !soc->ops) { 246 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 247 "%s: Invalid Instance:", __func__); 248 QDF_BUG(0); 249 return; 250 } 251 252 if (!soc->ops->cmn_drv_ops || 253 !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach) 254 return; 255 256 soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force); 257 } 258 259 static inline void 260 cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 261 { 262 if (!soc || !soc->ops) { 263 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 264 "%s: Invalid Instance:", __func__); 265 QDF_BUG(0); 266 return; 267 } 268 269 if (!soc->ops->cmn_drv_ops || 270 !soc->ops->cmn_drv_ops->txrx_pdev_detach) 271 return; 272 273 soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force); 274 } 275 276 static inline void *cdp_peer_create 277 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 278 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer) 279 { 280 if (!soc || !soc->ops) { 281 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 282 "%s: Invalid Instance:", __func__); 283 QDF_BUG(0); 284 return NULL; 285 } 286 287 if (!soc->ops->cmn_drv_ops || 288 !soc->ops->cmn_drv_ops->txrx_peer_create) 289 return NULL; 290 291 return soc->ops->cmn_drv_ops->txrx_peer_create(vdev, 292 peer_mac_addr, ctrl_peer); 293 } 294 295 static inline void cdp_peer_setup 296 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 297 { 298 if (!soc || !soc->ops) { 299 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 300 "%s: Invalid Instance:", __func__); 301 QDF_BUG(0); 302 return; 303 } 304 305 if (!soc->ops->cmn_drv_ops || 306 !soc->ops->cmn_drv_ops->txrx_peer_setup) 307 return; 308 309 soc->ops->cmn_drv_ops->txrx_peer_setup(vdev, 310 peer); 311 } 312 313 static inline void *cdp_peer_ast_hash_find_soc 314 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr) 315 { 316 if (!soc || !soc->ops) { 317 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 318 "%s: Invalid Instance:", __func__); 319 QDF_BUG(0); 320 return NULL; 321 } 322 323 if (!soc->ops->cmn_drv_ops || 324 !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find_soc) 325 return NULL; 326 327 return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find_soc(soc, 328 ast_mac_addr); 329 } 330 331 static inline void *cdp_peer_ast_hash_find_by_pdevid 332 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 333 uint8_t pdev_id) 334 { 335 if (!soc || !soc->ops) { 336 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 337 "%s: Invalid Instance:", __func__); 338 QDF_BUG(0); 339 return NULL; 340 } 341 342 if (!soc->ops->cmn_drv_ops || 343 !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find_by_pdevid) 344 return NULL; 345 346 return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find_by_pdevid 347 (soc, 348 ast_mac_addr, 349 pdev_id); 350 } 351 352 static inline int cdp_peer_add_ast 353 (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle, 354 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags) 355 { 356 if (!soc || !soc->ops) { 357 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 358 "%s: Invalid Instance:", __func__); 359 QDF_BUG(0); 360 return 0; 361 } 362 363 if (!soc->ops->cmn_drv_ops || 364 !soc->ops->cmn_drv_ops->txrx_peer_add_ast) 365 return 0; 366 367 return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc, 368 peer_handle, 369 mac_addr, 370 type, 371 flags); 372 } 373 374 static inline void cdp_peer_reset_ast 375 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, void *vdev_hdl) 376 { 377 378 if (!soc || !soc->ops) { 379 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 380 "%s: Invalid Instance:", __func__); 381 QDF_BUG(0); 382 return; 383 } 384 if (!soc->ops->cmn_drv_ops || 385 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast) 386 return; 387 388 soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr, vdev_hdl); 389 } 390 391 static inline void cdp_peer_reset_ast_table 392 (ol_txrx_soc_handle soc, void *vdev_hdl) 393 { 394 if (!soc || !soc->ops) { 395 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 396 "%s: Invalid Instance:", __func__); 397 QDF_BUG(0); 398 return; 399 } 400 401 if (!soc->ops->cmn_drv_ops || 402 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table) 403 return; 404 405 soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_hdl); 406 } 407 408 static inline void cdp_peer_flush_ast_table 409 (ol_txrx_soc_handle soc) 410 { 411 if (!soc || !soc->ops) { 412 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 413 "%s: Invalid Instance:", __func__); 414 QDF_BUG(0); 415 return; 416 } 417 418 if (!soc->ops->cmn_drv_ops || 419 !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table) 420 return; 421 422 soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc); 423 } 424 425 static inline int cdp_peer_update_ast 426 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, 427 struct cdp_peer *peer_handle, uint32_t flags) 428 { 429 if (!soc || !soc->ops) { 430 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 431 "%s: Invalid Instance:", __func__); 432 QDF_BUG(0); 433 return 0; 434 } 435 436 if (!soc->ops->cmn_drv_ops || 437 !soc->ops->cmn_drv_ops->txrx_peer_update_ast) 438 return 0; 439 440 441 return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc, 442 peer_handle, 443 wds_macaddr, 444 flags); 445 } 446 447 static inline void cdp_peer_del_ast 448 (ol_txrx_soc_handle soc, void *ast_handle) 449 { 450 if (!soc || !soc->ops) { 451 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 452 "%s: Invalid Instance:", __func__); 453 QDF_BUG(0); 454 return; 455 } 456 457 if (!soc->ops->cmn_drv_ops || 458 !soc->ops->cmn_drv_ops->txrx_peer_del_ast) 459 return; 460 461 soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle); 462 } 463 464 465 static inline uint8_t cdp_peer_ast_get_pdev_id 466 (ol_txrx_soc_handle soc, void *ast_handle) 467 { 468 if (!soc || !soc->ops) { 469 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 470 "%s: Invalid Instance:", __func__); 471 QDF_BUG(0); 472 return 0xff; 473 } 474 475 if (!soc->ops->cmn_drv_ops || 476 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id) 477 return 0xff; 478 479 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc, 480 ast_handle); 481 } 482 483 static inline uint8_t cdp_peer_ast_get_next_hop 484 (ol_txrx_soc_handle soc, void *ast_handle) 485 { 486 if (!soc || !soc->ops) { 487 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 488 "%s: Invalid Instance:", __func__); 489 QDF_BUG(0); 490 return 0xff; 491 } 492 493 if (!soc->ops->cmn_drv_ops || 494 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop) 495 return 0xff; 496 497 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc, 498 ast_handle); 499 } 500 501 /** 502 * cdp_peer_ast_get_type() - Return type (Static, WDS, MEC) of AST entry 503 * @soc: DP SoC handle 504 * @ast_handle: Opaque handle to AST entry 505 * 506 * Return: AST entry type (Static/WDS/MEC) 507 */ 508 static inline enum cdp_txrx_ast_entry_type cdp_peer_ast_get_type 509 (ol_txrx_soc_handle soc, void *ast_handle) 510 511 { 512 if (!soc || !soc->ops) { 513 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 514 "%s: Invalid Instance:", __func__); 515 QDF_BUG(0); 516 return 0; 517 } 518 519 if (!soc->ops->cmn_drv_ops || 520 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_type) 521 return 0; 522 523 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_type(soc, ast_handle); 524 } 525 526 static inline void cdp_peer_ast_set_type 527 (ol_txrx_soc_handle soc, void *ast_handle, 528 enum cdp_txrx_ast_entry_type type) 529 { 530 if (!soc || !soc->ops) { 531 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 532 "%s: Invalid Instance:", __func__); 533 QDF_BUG(0); 534 return; 535 } 536 537 if (!soc->ops->cmn_drv_ops || 538 !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type) 539 return; 540 541 soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type); 542 } 543 544 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND) 545 static inline void cdp_peer_ast_set_cp_ctx(struct cdp_soc_t *soc, 546 void *ast_handle, 547 void *cp_ctx) 548 { 549 if (!soc || !soc->ops) { 550 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 551 "Invalid Instance:"); 552 QDF_BUG(0); 553 return; 554 } 555 556 if (!soc->ops->cmn_drv_ops || 557 !soc->ops->cmn_drv_ops->txrx_peer_ast_set_cp_ctx) 558 return; 559 560 soc->ops->cmn_drv_ops->txrx_peer_ast_set_cp_ctx(soc, ast_handle, 561 cp_ctx); 562 } 563 564 static inline void *cdp_peer_ast_get_cp_ctx(struct cdp_soc_t *soc, 565 void *ast_handle) 566 { 567 if (!soc || !soc->ops) { 568 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 569 "Invalid Instance:"); 570 QDF_BUG(0); 571 return NULL; 572 } 573 574 if (!soc->ops->cmn_drv_ops || 575 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_cp_ctx) 576 return NULL; 577 578 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_cp_ctx(soc, ast_handle); 579 } 580 581 static inline bool cdp_peer_ast_get_wmi_sent(struct cdp_soc_t *soc, 582 void *ast_handle) 583 { 584 if (!soc || !soc->ops) { 585 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 586 "Invalid Instance:"); 587 QDF_BUG(0); 588 return false; 589 } 590 591 if (!soc->ops->cmn_drv_ops || 592 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_wmi_sent) 593 return false; 594 595 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_wmi_sent(soc, 596 ast_handle); 597 } 598 599 static inline 600 void cdp_peer_ast_free_entry(struct cdp_soc_t *soc, 601 void *ast_handle) 602 { 603 if (!soc || !soc->ops) { 604 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 605 "Invalid Instance:"); 606 QDF_BUG(0); 607 return; 608 } 609 610 if (!soc->ops->cmn_drv_ops || 611 !soc->ops->cmn_drv_ops->txrx_peer_ast_free_entry) 612 return; 613 614 soc->ops->cmn_drv_ops->txrx_peer_ast_free_entry(soc, ast_handle); 615 } 616 #endif 617 618 static inline struct cdp_peer *cdp_peer_ast_get_peer 619 (ol_txrx_soc_handle soc, void *ast_handle) 620 { 621 if (!soc || !soc->ops) { 622 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 623 "%s: Invalid Instance:", __func__); 624 QDF_BUG(0); 625 return NULL; 626 } 627 628 if (!soc->ops->cmn_drv_ops || 629 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_peer) 630 return NULL; 631 632 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_peer(soc, ast_handle); 633 } 634 635 static inline uint32_t cdp_peer_ast_get_nexthop_peer_id 636 (ol_txrx_soc_handle soc, void *ast_handle) 637 { 638 if (!soc || !soc->ops) { 639 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 640 "%s: Invalid Instance:", __func__); 641 QDF_BUG(0); 642 return CDP_INVALID_PEER; 643 } 644 645 if (!soc->ops->cmn_drv_ops || 646 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_nexthop_peer_id) 647 return CDP_INVALID_PEER; 648 649 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_nexthop_peer_id 650 (soc, 651 ast_handle); 652 } 653 654 static inline void cdp_peer_teardown 655 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 656 { 657 if (!soc || !soc->ops) { 658 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 659 "%s: Invalid Instance:", __func__); 660 QDF_BUG(0); 661 return; 662 } 663 664 if (!soc->ops->cmn_drv_ops || 665 !soc->ops->cmn_drv_ops->txrx_peer_teardown) 666 return; 667 668 soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer); 669 } 670 671 static inline void 672 cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap) 673 { 674 if (!soc || !soc->ops) { 675 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 676 "%s: Invalid Instance:", __func__); 677 QDF_BUG(0); 678 return; 679 } 680 681 if (!soc->ops->cmn_drv_ops || 682 !soc->ops->cmn_drv_ops->txrx_peer_delete) 683 return; 684 685 soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap); 686 } 687 688 static inline int 689 cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 690 uint8_t smart_monitor) 691 { 692 if (!soc || !soc->ops) { 693 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 694 "%s: Invalid Instance:", __func__); 695 QDF_BUG(0); 696 return 0; 697 } 698 699 if (!soc->ops->cmn_drv_ops || 700 !soc->ops->cmn_drv_ops->txrx_set_monitor_mode) 701 return 0; 702 703 return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev, 704 smart_monitor); 705 } 706 707 static inline void 708 cdp_set_curchan(ol_txrx_soc_handle soc, 709 struct cdp_pdev *pdev, 710 uint32_t chan_mhz) 711 { 712 if (!soc || !soc->ops) { 713 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 714 "%s: Invalid Instance:", __func__); 715 QDF_BUG(0); 716 return; 717 } 718 719 if (!soc->ops->cmn_drv_ops || 720 !soc->ops->cmn_drv_ops->txrx_set_curchan) 721 return; 722 723 soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz); 724 } 725 726 static inline void 727 cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 728 void *filter, uint32_t num) 729 { 730 if (!soc || !soc->ops) { 731 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 732 "%s: Invalid Instance:", __func__); 733 QDF_BUG(0); 734 return; 735 } 736 737 if (!soc->ops->cmn_drv_ops || 738 !soc->ops->cmn_drv_ops->txrx_set_privacy_filters) 739 return; 740 741 soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev, 742 filter, num); 743 } 744 745 static inline int 746 cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 747 struct cdp_monitor_filter *filter_val) 748 { 749 if (soc->ops->mon_ops->txrx_set_advance_monitor_filter) 750 return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev, 751 filter_val); 752 return 0; 753 } 754 755 756 /****************************************************************************** 757 * Data Interface (B Interface) 758 *****************************************************************************/ 759 static inline void 760 cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 761 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 762 struct ol_txrx_ops *txrx_ops) 763 { 764 if (!soc || !soc->ops) { 765 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 766 "%s: Invalid Instance:", __func__); 767 QDF_BUG(0); 768 return; 769 } 770 771 if (!soc->ops->cmn_drv_ops || 772 !soc->ops->cmn_drv_ops->txrx_vdev_register) 773 return; 774 775 soc->ops->cmn_drv_ops->txrx_vdev_register(vdev, 776 osif_vdev, ctrl_vdev, txrx_ops); 777 } 778 779 static inline int 780 cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 781 qdf_nbuf_t tx_mgmt_frm, uint8_t type) 782 { 783 if (!soc || !soc->ops) { 784 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 785 "%s: Invalid Instance:", __func__); 786 QDF_BUG(0); 787 return 0; 788 } 789 790 if (!soc->ops->cmn_drv_ops || 791 !soc->ops->cmn_drv_ops->txrx_mgmt_send) 792 return 0; 793 794 return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev, 795 tx_mgmt_frm, type); 796 } 797 798 static inline int 799 cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 800 qdf_nbuf_t tx_mgmt_frm, uint8_t type, 801 uint8_t use_6mbps, uint16_t chanfreq) 802 { 803 if (!soc || !soc->ops) { 804 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 805 "%s: Invalid Instance:", __func__); 806 QDF_BUG(0); 807 return 0; 808 } 809 810 if (!soc->ops->cmn_drv_ops || 811 !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext) 812 return 0; 813 814 return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext 815 (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq); 816 } 817 818 819 static inline void 820 cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 821 uint8_t type, ol_txrx_mgmt_tx_cb download_cb, 822 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) 823 { 824 if (!soc || !soc->ops) { 825 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 826 "%s: Invalid Instance:", __func__); 827 QDF_BUG(0); 828 return; 829 } 830 831 if (!soc->ops->cmn_drv_ops || 832 !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set) 833 return; 834 835 soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set 836 (pdev, type, download_cb, ota_ack_cb, ctxt); 837 } 838 839 static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc, 840 struct cdp_pdev *pdev) 841 { 842 if (!soc || !soc->ops) { 843 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 844 "%s: Invalid Instance:", __func__); 845 QDF_BUG(0); 846 return 0; 847 } 848 849 if (!soc->ops->cmn_drv_ops || 850 !soc->ops->cmn_drv_ops->txrx_get_tx_pending) 851 return 0; 852 853 854 return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev); 855 } 856 857 static inline void 858 cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev, 859 ol_txrx_data_tx_cb callback, void *ctxt) 860 { 861 if (!soc || !soc->ops) { 862 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 863 "%s: Invalid Instance:", __func__); 864 QDF_BUG(0); 865 return; 866 } 867 868 if (!soc->ops->cmn_drv_ops || 869 !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set) 870 return; 871 872 soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev, 873 callback, ctxt); 874 } 875 876 /****************************************************************************** 877 * Statistics and Debugging Interface (C Interface) 878 *****************************************************************************/ 879 /** 880 * External Device physical address types 881 * 882 * Currently, both MAC and IPA uController use the same size addresses 883 * and descriptors are exchanged between these two depending on the mode. 884 * 885 * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA 886 * operations. However, external device physical address sizes 887 * may be different from host-specific physical address sizes. 888 * This calls for the following definitions for target devices 889 * (MAC, IPA uc). 890 */ 891 #if HTT_PADDR64 892 typedef uint64_t target_paddr_t; 893 #else 894 typedef uint32_t target_paddr_t; 895 #endif /*HTT_PADDR64 */ 896 897 static inline int 898 cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 899 int max_subfrms_ampdu, 900 int max_subfrms_amsdu) 901 { 902 if (!soc || !soc->ops) { 903 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 904 "%s: Invalid Instance:", __func__); 905 QDF_BUG(0); 906 return 0; 907 } 908 909 if (!soc->ops->cmn_drv_ops || 910 !soc->ops->cmn_drv_ops->txrx_aggr_cfg) 911 return 0; 912 913 return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev, 914 max_subfrms_ampdu, max_subfrms_amsdu); 915 } 916 917 static inline int 918 cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 919 struct ol_txrx_stats_req *req, bool per_vdev, 920 bool response_expected) 921 { 922 if (!soc || !soc->ops) { 923 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 924 "%s: Invalid Instance:", __func__); 925 QDF_BUG(0); 926 return 0; 927 } 928 929 if (!soc->ops->cmn_drv_ops || 930 !soc->ops->cmn_drv_ops->txrx_fw_stats_get) 931 return 0; 932 933 return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req, 934 per_vdev, response_expected); 935 } 936 937 static inline int 938 cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs) 939 { 940 if (!soc || !soc->ops) { 941 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 942 "%s: Invalid Instance:", __func__); 943 QDF_BUG(0); 944 return 0; 945 } 946 947 if (!soc->ops->cmn_drv_ops || 948 !soc->ops->cmn_drv_ops->txrx_debug) 949 return 0; 950 951 return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs); 952 } 953 954 static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc, 955 struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val) 956 { 957 if (!soc || !soc->ops) { 958 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 959 "%s: Invalid Instance:", __func__); 960 QDF_BUG(0); 961 return; 962 } 963 964 if (!soc->ops->cmn_drv_ops || 965 !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg) 966 return; 967 968 soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev, 969 cfg_stats_type, cfg_val); 970 } 971 972 static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level) 973 { 974 if (!soc || !soc->ops) { 975 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 976 "%s: Invalid Instance:", __func__); 977 QDF_BUG(0); 978 return; 979 } 980 981 if (!soc->ops->cmn_drv_ops || 982 !soc->ops->cmn_drv_ops->txrx_print_level_set) 983 return; 984 985 soc->ops->cmn_drv_ops->txrx_print_level_set(level); 986 } 987 988 static inline uint8_t * 989 cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 990 { 991 if (!soc || !soc->ops) { 992 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 993 "%s: Invalid Instance:", __func__); 994 QDF_BUG(0); 995 return NULL; 996 } 997 998 if (!soc->ops->cmn_drv_ops || 999 !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr) 1000 return NULL; 1001 1002 return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev); 1003 1004 } 1005 1006 /** 1007 * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 1008 * vdev 1009 * @vdev: vdev handle 1010 * 1011 * Return: Handle to struct qdf_mac_addr 1012 */ 1013 static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr 1014 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 1015 { 1016 if (!soc || !soc->ops) { 1017 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1018 "%s: Invalid Instance:", __func__); 1019 QDF_BUG(0); 1020 return NULL; 1021 } 1022 1023 if (!soc->ops->cmn_drv_ops || 1024 !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr) 1025 return NULL; 1026 1027 return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr 1028 (vdev); 1029 1030 } 1031 1032 /** 1033 * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev 1034 * @vdev: vdev handle 1035 * 1036 * Return: Handle to pdev 1037 */ 1038 static inline struct cdp_pdev *cdp_get_pdev_from_vdev 1039 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 1040 { 1041 if (!soc || !soc->ops) { 1042 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1043 "%s: Invalid Instance:", __func__); 1044 QDF_BUG(0); 1045 return NULL; 1046 } 1047 1048 if (!soc->ops->cmn_drv_ops || 1049 !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev) 1050 return NULL; 1051 1052 return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev); 1053 } 1054 1055 /** 1056 * cdp_get_os_rx_handles_from_vdev() - Return os rx handles for a vdev 1057 * @soc: ol_txrx_soc_handle handle 1058 * @vdev: vdev for which os rx handles are needed 1059 * @stack_fn_p: pointer to stack function pointer 1060 * @osif_handle_p: pointer to ol_osif_vdev_handle 1061 * 1062 * Return: void 1063 */ 1064 static inline 1065 void cdp_get_os_rx_handles_from_vdev(ol_txrx_soc_handle soc, 1066 struct cdp_vdev *vdev, 1067 ol_txrx_rx_fp *stack_fn_p, 1068 ol_osif_vdev_handle *osif_handle_p) 1069 { 1070 if (!soc || !soc->ops) { 1071 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1072 "%s: Invalid Instance:", __func__); 1073 QDF_BUG(0); 1074 return; 1075 } 1076 1077 if (!soc->ops->cmn_drv_ops || 1078 !soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev) 1079 return; 1080 1081 soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev(vdev, 1082 stack_fn_p, 1083 osif_handle_p); 1084 } 1085 1086 /** 1087 * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 1088 * @vdev: vdev handle 1089 * 1090 * Return: Handle to control pdev 1091 */ 1092 static inline struct cdp_cfg * 1093 cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 1094 { 1095 if (!soc || !soc->ops) { 1096 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1097 "%s: Invalid Instance:", __func__); 1098 QDF_BUG(0); 1099 return NULL; 1100 } 1101 1102 if (!soc->ops->cmn_drv_ops || 1103 !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev) 1104 return NULL; 1105 1106 return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev 1107 (vdev); 1108 } 1109 1110 static inline struct cdp_vdev * 1111 cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 1112 uint8_t vdev_id) 1113 { 1114 if (!soc || !soc->ops) { 1115 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1116 "%s: Invalid Instance:", __func__); 1117 QDF_BUG(0); 1118 return NULL; 1119 } 1120 1121 if (!soc->ops->cmn_drv_ops || 1122 !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id) 1123 return NULL; 1124 1125 return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id 1126 (pdev, vdev_id); 1127 } 1128 1129 static inline void 1130 cdp_soc_detach(ol_txrx_soc_handle soc) 1131 { 1132 if (!soc || !soc->ops) { 1133 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1134 "%s: Invalid Instance:", __func__); 1135 QDF_BUG(0); 1136 return; 1137 } 1138 1139 if (!soc->ops->cmn_drv_ops || 1140 !soc->ops->cmn_drv_ops->txrx_soc_detach) 1141 return; 1142 1143 soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc); 1144 } 1145 1146 /** 1147 * cdp_addba_resp_tx_completion() - Indicate addba response tx 1148 * completion to dp to change tid state. 1149 * @soc: soc handle 1150 * @peer_handle: peer handle 1151 * @tid: tid 1152 * @status: Tx completion status 1153 * 1154 * Return: success/failure of tid update 1155 */ 1156 static inline int cdp_addba_resp_tx_completion(ol_txrx_soc_handle soc, 1157 void *peer_handle, 1158 uint8_t tid, int status) 1159 { 1160 if (!soc || !soc->ops) { 1161 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1162 "%s: Invalid Instance:", __func__); 1163 QDF_BUG(0); 1164 return 0; 1165 } 1166 1167 if (!soc->ops->cmn_drv_ops || 1168 !soc->ops->cmn_drv_ops->addba_resp_tx_completion) 1169 return 0; 1170 1171 return soc->ops->cmn_drv_ops->addba_resp_tx_completion(peer_handle, tid, 1172 status); 1173 } 1174 1175 static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc, 1176 void *peer_handle, uint8_t dialogtoken, uint16_t tid, 1177 uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum) 1178 { 1179 if (!soc || !soc->ops) { 1180 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1181 "%s: Invalid Instance:", __func__); 1182 QDF_BUG(0); 1183 return 0; 1184 } 1185 1186 if (!soc->ops->cmn_drv_ops || 1187 !soc->ops->cmn_drv_ops->addba_requestprocess) 1188 return 0; 1189 1190 return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle, 1191 dialogtoken, tid, batimeout, buffersize, startseqnum); 1192 } 1193 1194 static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc, 1195 void *peer_handle, uint8_t tid, uint8_t *dialogtoken, 1196 uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout) 1197 { 1198 if (!soc || !soc->ops) { 1199 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1200 "%s: Invalid Instance:", __func__); 1201 QDF_BUG(0); 1202 return; 1203 } 1204 1205 if (!soc->ops->cmn_drv_ops || 1206 !soc->ops->cmn_drv_ops->addba_responsesetup) 1207 return; 1208 1209 soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid, 1210 dialogtoken, statuscode, buffersize, batimeout); 1211 } 1212 1213 static inline int cdp_delba_process(ol_txrx_soc_handle soc, 1214 void *peer_handle, int tid, uint16_t reasoncode) 1215 { 1216 if (!soc || !soc->ops) { 1217 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1218 "%s: Invalid Instance:", __func__); 1219 QDF_BUG(0); 1220 return 0; 1221 } 1222 1223 if (!soc->ops->cmn_drv_ops || 1224 !soc->ops->cmn_drv_ops->delba_process) 1225 return 0; 1226 1227 return soc->ops->cmn_drv_ops->delba_process(peer_handle, 1228 tid, reasoncode); 1229 } 1230 1231 /** 1232 * cdp_delba_tx_completion() - Handle delba tx completion 1233 * to update stats and retry transmission if failed. 1234 * @soc: soc handle 1235 * @peer_handle: peer handle 1236 * @tid: Tid number 1237 * @status: Tx completion status 1238 * 1239 * Return: 0 on Success, 1 on failure 1240 */ 1241 1242 static inline int cdp_delba_tx_completion(ol_txrx_soc_handle soc, 1243 void *peer_handle, 1244 uint8_t tid, int status) 1245 { 1246 if (!soc || !soc->ops) { 1247 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1248 "%s: Invalid Instance:", __func__); 1249 QDF_BUG(0); 1250 return 0; 1251 } 1252 1253 if (!soc->ops->cmn_drv_ops || 1254 !soc->ops->cmn_drv_ops->delba_tx_completion) 1255 return 0; 1256 1257 return soc->ops->cmn_drv_ops->delba_tx_completion(peer_handle, 1258 tid, status); 1259 } 1260 1261 static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc, 1262 void *peer_handle, int tid, uint16_t statuscode) 1263 { 1264 if (!soc || !soc->ops) { 1265 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1266 "%s: Invalid Instance:", __func__); 1267 QDF_BUG(0); 1268 return; 1269 } 1270 1271 if (!soc->ops->cmn_drv_ops || 1272 !soc->ops->cmn_drv_ops->set_addba_response) 1273 return; 1274 1275 soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode); 1276 } 1277 1278 /** 1279 * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer 1280 * mac address 1281 * @soc: SOC handle 1282 * @peer_id: peer id of the peer for which mac_address is required 1283 * @mac_addr: reference to mac address 1284 * 1285 * reutm: vdev_id of the vap 1286 */ 1287 static inline uint8_t 1288 cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id, 1289 uint8_t *mac_addr) 1290 { 1291 if (!soc || !soc->ops) { 1292 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1293 "%s: Invalid Instance:", __func__); 1294 QDF_BUG(0); 1295 return CDP_INVALID_VDEV_ID; 1296 } 1297 1298 if (!soc->ops->cmn_drv_ops || 1299 !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id) 1300 return CDP_INVALID_VDEV_ID; 1301 1302 return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc, 1303 peer_id, mac_addr); 1304 } 1305 1306 /** 1307 * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap 1308 * @vdev: vdev handle 1309 * @map_id: id of the tid map 1310 * 1311 * Return: void 1312 */ 1313 static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc, 1314 struct cdp_vdev *vdev, uint8_t map_id) 1315 { 1316 if (!soc || !soc->ops) { 1317 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1318 "%s: Invalid Instance:", __func__); 1319 QDF_BUG(0); 1320 return; 1321 } 1322 1323 if (!soc->ops->cmn_drv_ops || 1324 !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map) 1325 return; 1326 1327 soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev, 1328 map_id); 1329 } 1330 1331 /** 1332 * cdp_ath_get_total_per(): function to get hw retries 1333 * @soc : soc handle 1334 * @pdev: pdev handle 1335 * 1336 * Return: get hw retries 1337 */ 1338 static inline 1339 int cdp_ath_get_total_per(ol_txrx_soc_handle soc, 1340 struct cdp_pdev *pdev) 1341 { 1342 if (!soc || !soc->ops) { 1343 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1344 "%s: Invalid Instance:", __func__); 1345 QDF_BUG(0); 1346 return 0; 1347 } 1348 1349 if (!soc->ops->cmn_drv_ops || 1350 !soc->ops->cmn_drv_ops->txrx_get_total_per) 1351 return 0; 1352 1353 return soc->ops->cmn_drv_ops->txrx_get_total_per(pdev); 1354 } 1355 1356 /** 1357 * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map 1358 * @pdev: pdev handle 1359 * @map_id: id of the tid map 1360 * @tos: index value in map that needs to be changed 1361 * @tid: tid value passed by user 1362 * 1363 * Return: void 1364 */ 1365 static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc, 1366 struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid) 1367 { 1368 if (!soc || !soc->ops) { 1369 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1370 "%s: Invalid Instance:", __func__); 1371 QDF_BUG(0); 1372 return; 1373 } 1374 1375 if (!soc->ops->cmn_drv_ops || 1376 !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) 1377 return; 1378 1379 soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev, 1380 map_id, tos, tid); 1381 } 1382 1383 /** 1384 * cdp_flush_cache_rx_queue() - flush cache rx queue frame 1385 * 1386 * Return: None 1387 */ 1388 static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc) 1389 { 1390 if (!soc || !soc->ops) { 1391 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1392 "%s: Invalid Instance:", __func__); 1393 QDF_BUG(0); 1394 return; 1395 } 1396 1397 if (!soc->ops->cmn_drv_ops || 1398 !soc->ops->cmn_drv_ops->flush_cache_rx_queue) 1399 return; 1400 soc->ops->cmn_drv_ops->flush_cache_rx_queue(); 1401 } 1402 1403 /** 1404 * cdp_txrx_stats_request(): function to map to host and firmware statistics 1405 * @soc: soc handle 1406 * @vdev: virtual device 1407 * @req: stats request container 1408 * 1409 * return: status 1410 */ 1411 static inline 1412 int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 1413 struct cdp_txrx_stats_req *req) 1414 { 1415 if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) { 1416 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1417 "%s: Invalid Instance:", __func__); 1418 QDF_ASSERT(0); 1419 return 0; 1420 } 1421 1422 if (soc->ops->cmn_drv_ops->txrx_stats_request) 1423 return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req); 1424 1425 return 0; 1426 } 1427 1428 /** 1429 * cdp_txrx_intr_attach(): function to attach and configure interrupt 1430 * @soc: soc handle 1431 */ 1432 static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc) 1433 { 1434 if (!soc || !soc->ops) { 1435 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1436 "%s: Invalid Instance:", __func__); 1437 QDF_BUG(0); 1438 return 0; 1439 } 1440 1441 if (!soc->ops->cmn_drv_ops || 1442 !soc->ops->cmn_drv_ops->txrx_intr_attach) 1443 return 0; 1444 1445 return soc->ops->cmn_drv_ops->txrx_intr_attach(soc); 1446 } 1447 1448 /** 1449 * cdp_txrx_intr_detach(): function to detach interrupt 1450 * @soc: soc handle 1451 */ 1452 static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc) 1453 { 1454 if (!soc || !soc->ops) { 1455 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1456 "%s: Invalid Instance:", __func__); 1457 QDF_BUG(0); 1458 return; 1459 } 1460 1461 if (!soc->ops->cmn_drv_ops || 1462 !soc->ops->cmn_drv_ops->txrx_intr_detach) 1463 return; 1464 1465 soc->ops->cmn_drv_ops->txrx_intr_detach(soc); 1466 } 1467 1468 /** 1469 * cdp_display_stats(): function to map to dump stats 1470 * @soc: soc handle 1471 * @value: statistics option 1472 */ 1473 static inline QDF_STATUS 1474 cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value, 1475 enum qdf_stats_verbosity_level level) 1476 { 1477 if (!soc || !soc->ops) { 1478 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1479 "%s: Invalid Instance:", __func__); 1480 QDF_BUG(0); 1481 return 0; 1482 } 1483 1484 if (!soc->ops->cmn_drv_ops || 1485 !soc->ops->cmn_drv_ops->display_stats) 1486 return 0; 1487 1488 return soc->ops->cmn_drv_ops->display_stats(soc, value, level); 1489 } 1490 1491 1492 /** 1493 * cdp_set_pn_check(): function to set pn check 1494 * @soc: soc handle 1495 * @sec_type: security type 1496 * #rx_pn: receive pn 1497 */ 1498 static inline int cdp_set_pn_check(ol_txrx_soc_handle soc, 1499 struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 1500 { 1501 if (!soc || !soc->ops) { 1502 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1503 "%s: Invalid Instance:", __func__); 1504 QDF_BUG(0); 1505 return 0; 1506 } 1507 1508 if (!soc->ops->cmn_drv_ops || 1509 !soc->ops->cmn_drv_ops->set_pn_check) 1510 return 0; 1511 1512 soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle, 1513 sec_type, rx_pn); 1514 return 0; 1515 } 1516 1517 static inline int cdp_set_key(ol_txrx_soc_handle soc, 1518 struct cdp_peer *peer_handle, 1519 bool is_unicast, uint32_t *key) 1520 { 1521 if (!soc || !soc->ops) { 1522 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1523 "%s: Invalid Instance:", __func__); 1524 QDF_BUG(0); 1525 return 0; 1526 } 1527 1528 if (!soc->ops->ctrl_ops || 1529 !soc->ops->ctrl_ops->set_key) 1530 return 0; 1531 1532 soc->ops->ctrl_ops->set_key(peer_handle, 1533 is_unicast, key); 1534 return 0; 1535 } 1536 1537 /** 1538 * cdp_update_config_parameters(): function to propagate configuration 1539 * parameters to datapath 1540 * @soc: opaque soc handle 1541 * @cfg: configuration handle 1542 * 1543 * Return: status: 0 - Success, non-zero: Failure 1544 */ 1545 static inline 1546 QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc, 1547 struct cdp_config_params *cfg) 1548 { 1549 struct cdp_soc *psoc = (struct cdp_soc *)soc; 1550 1551 if (!soc || !soc->ops) { 1552 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1553 "%s: Invalid Instance:", __func__); 1554 QDF_BUG(0); 1555 return 0; 1556 } 1557 1558 if (!soc->ops->cmn_drv_ops || 1559 !soc->ops->cmn_drv_ops->update_config_parameters) 1560 return QDF_STATUS_SUCCESS; 1561 1562 return soc->ops->cmn_drv_ops->update_config_parameters(psoc, 1563 cfg); 1564 } 1565 1566 /** 1567 * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev 1568 * @soc: opaque soc handle 1569 * @pdev: data path pdev handle 1570 * 1571 * Return: opaque dp handle 1572 */ 1573 static inline void * 1574 cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev) 1575 { 1576 if (!soc || !soc->ops) { 1577 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1578 "%s: Invalid Instance:", __func__); 1579 QDF_BUG(0); 1580 return 0; 1581 } 1582 1583 if (soc->ops->cmn_drv_ops->get_dp_txrx_handle) 1584 return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev); 1585 1586 return 0; 1587 } 1588 1589 /** 1590 * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev 1591 * @soc: opaque soc handle 1592 * @pdev: data path pdev handle 1593 * @dp_hdl: opaque pointer for dp_txrx_handle 1594 * 1595 * Return: void 1596 */ 1597 static inline void 1598 cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl) 1599 { 1600 if (!soc || !soc->ops) { 1601 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1602 "%s: Invalid Instance:", __func__); 1603 QDF_BUG(0); 1604 return; 1605 } 1606 1607 if (!soc->ops->cmn_drv_ops || 1608 !soc->ops->cmn_drv_ops->set_dp_txrx_handle) 1609 return; 1610 1611 soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl); 1612 } 1613 1614 /* 1615 * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc 1616 * @soc: opaque soc handle 1617 * 1618 * Return: opaque extended dp handle 1619 */ 1620 static inline void * 1621 cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc) 1622 { 1623 if (!soc || !soc->ops) { 1624 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1625 "%s: Invalid Instance:", __func__); 1626 QDF_BUG(0); 1627 return NULL; 1628 } 1629 1630 if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle) 1631 return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle( 1632 (struct cdp_soc *) soc); 1633 1634 return NULL; 1635 } 1636 1637 /** 1638 * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc 1639 * @soc: opaque soc handle 1640 * @dp_hdl: opaque pointer for dp_txrx_handle 1641 * 1642 * Return: void 1643 */ 1644 static inline void 1645 cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle) 1646 { 1647 if (!soc || !soc->ops) { 1648 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1649 "%s: Invalid Instance:", __func__); 1650 QDF_BUG(0); 1651 return; 1652 } 1653 1654 if (!soc->ops->cmn_drv_ops || 1655 !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle) 1656 return; 1657 1658 soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc, 1659 dp_handle); 1660 } 1661 1662 /** 1663 * cdp_tx_send() - enqueue frame for transmission 1664 * @soc: soc opaque handle 1665 * @vdev: VAP device 1666 * @nbuf: nbuf to be enqueued 1667 * 1668 * This API is used by Extended Datapath modules to enqueue frame for 1669 * transmission 1670 * 1671 * Return: void 1672 */ 1673 static inline void 1674 cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf) 1675 { 1676 if (!soc || !soc->ops) { 1677 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1678 "%s: Invalid Instance:", __func__); 1679 QDF_BUG(0); 1680 return; 1681 } 1682 1683 if (!soc->ops->cmn_drv_ops || 1684 !soc->ops->cmn_drv_ops->tx_send) 1685 return; 1686 1687 soc->ops->cmn_drv_ops->tx_send(vdev, nbuf); 1688 } 1689 1690 /* 1691 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1692 * @soc: opaque soc handle 1693 * @pdev: data path pdev handle 1694 * 1695 * Return: pdev_id 1696 */ 1697 static inline 1698 uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc, 1699 struct cdp_pdev *pdev) 1700 { 1701 if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev) 1702 return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev); 1703 return 0; 1704 } 1705 1706 /** 1707 * cdp_pdev_set_chan_noise_floor() - Set channel noise floor to DP layer 1708 * @soc: opaque soc handle 1709 * @pdev: data path pdev handle 1710 * @chan_noise_floor: Channel Noise Floor (in dbM) obtained from control path 1711 * 1712 * Return: None 1713 */ 1714 static inline 1715 void cdp_pdev_set_chan_noise_floor(ol_txrx_soc_handle soc, 1716 struct cdp_pdev *pdev, 1717 int16_t chan_noise_floor) 1718 { 1719 if (soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor) 1720 return soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor( 1721 pdev, chan_noise_floor); 1722 } 1723 1724 /** 1725 * cdp_set_nac() - set nac 1726 * @soc: opaque soc handle 1727 * @peer: data path peer handle 1728 * 1729 */ 1730 static inline 1731 void cdp_set_nac(ol_txrx_soc_handle soc, 1732 struct cdp_peer *peer) 1733 { 1734 if (soc->ops->cmn_drv_ops->txrx_set_nac) 1735 soc->ops->cmn_drv_ops->txrx_set_nac(peer); 1736 } 1737 1738 /** 1739 * cdp_set_pdev_tx_capture() - set pdev tx_capture 1740 * @soc: opaque soc handle 1741 * @pdev: data path pdev handle 1742 * @val: value of pdev_tx_capture 1743 * 1744 * Return: void 1745 */ 1746 static inline 1747 void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc, 1748 struct cdp_pdev *pdev, int val) 1749 { 1750 if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture) 1751 return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev, 1752 val); 1753 1754 } 1755 1756 /** 1757 * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id 1758 * @soc: opaque soc handle 1759 * @pdev: data path pdev handle 1760 * @peer_id: data path peer id 1761 * @peer_mac: peer_mac 1762 * 1763 * Return: void 1764 */ 1765 static inline 1766 void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc, 1767 struct cdp_pdev *pdev_handle, 1768 uint32_t peer_id, uint8_t *peer_mac) 1769 { 1770 if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id) 1771 soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id( 1772 pdev_handle, peer_id, peer_mac); 1773 } 1774 1775 /** 1776 * cdp_vdev_tx_lock() - acquire lock 1777 * @soc: opaque soc handle 1778 * @vdev: data path vdev handle 1779 * 1780 * Return: void 1781 */ 1782 static inline 1783 void cdp_vdev_tx_lock(ol_txrx_soc_handle soc, 1784 struct cdp_vdev *vdev) 1785 { 1786 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock) 1787 soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev); 1788 } 1789 1790 /** 1791 * cdp_vdev_tx_unlock() - release lock 1792 * @soc: opaque soc handle 1793 * @vdev: data path vdev handle 1794 * 1795 * Return: void 1796 */ 1797 static inline 1798 void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc, 1799 struct cdp_vdev *vdev) 1800 { 1801 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock) 1802 soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev); 1803 } 1804 1805 /** 1806 * cdp_ath_getstats() - get updated athstats 1807 * @soc: opaque soc handle 1808 * @dev: dp interface handle 1809 * @stats: cdp network device stats structure 1810 * @type: device type pdev/vdev 1811 * 1812 * Return: void 1813 */ 1814 static inline void cdp_ath_getstats(ol_txrx_soc_handle soc, 1815 void *dev, struct cdp_dev_stats *stats, 1816 uint8_t type) 1817 { 1818 if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats) 1819 soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type); 1820 } 1821 1822 /** 1823 * cdp_set_gid_flag() - set groupid flag 1824 * @soc: opaque soc handle 1825 * @pdev: data path pdev handle 1826 * @mem_status: member status from grp management frame 1827 * @user_position: user position from grp management frame 1828 * 1829 * Return: void 1830 */ 1831 static inline 1832 void cdp_set_gid_flag(ol_txrx_soc_handle soc, 1833 struct cdp_pdev *pdev, u_int8_t *mem_status, 1834 u_int8_t *user_position) 1835 { 1836 if (soc->ops->cmn_drv_ops->txrx_set_gid_flag) 1837 soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position); 1838 } 1839 1840 /** 1841 * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version 1842 * @soc: opaque soc handle 1843 * @pdev: data path pdev handle 1844 * 1845 */ 1846 static inline 1847 uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc, 1848 struct cdp_pdev *pdev) 1849 { 1850 if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version) 1851 return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev); 1852 return 0; 1853 } 1854 1855 /** 1856 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1857 * @soc: opaque soc handle 1858 * @ni: associated node 1859 * @force: number of frame in SW queue 1860 * Return: void 1861 */ 1862 static inline 1863 void cdp_if_mgmt_drain(ol_txrx_soc_handle soc, 1864 void *ni, int force) 1865 { 1866 if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain) 1867 soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force); 1868 } 1869 1870 /* cdp_peer_map_attach() - CDP API to allocate PEER map memory 1871 * @soc: opaque soc handle 1872 * @max_peers: number of peers created in FW 1873 * @peer_map_unmap_v2: flag indicates HTT peer map v2 is enabled in FW 1874 * 1875 * 1876 * Return: void 1877 */ 1878 static inline void 1879 cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers, 1880 bool peer_map_unmap_v2) 1881 { 1882 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1883 soc->ops->cmn_drv_ops->txrx_peer_map_attach) 1884 soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers, 1885 peer_map_unmap_v2); 1886 } 1887 1888 /** 1889 1890 * cdp_pdev_set_ctrl_pdev() - set UMAC ctrl pdev to dp pdev 1891 * @soc: opaque soc handle 1892 * @pdev: opaque dp pdev handle 1893 * @ctrl_pdev: opaque ctrl pdev handle 1894 * 1895 * Return: void 1896 */ 1897 static inline void 1898 cdp_pdev_set_ctrl_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *dp_pdev, 1899 struct cdp_ctrl_objmgr_pdev *ctrl_pdev) 1900 { 1901 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1902 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev) 1903 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev(dp_pdev, 1904 ctrl_pdev); 1905 } 1906 1907 /* cdp_txrx_classify_and_update() - To classify the packet and update stats 1908 * @soc: opaque soc handle 1909 * @vdev: opaque dp vdev handle 1910 * @skb: data 1911 * @dir: rx or tx packet 1912 * @nbuf_classify: packet classification object 1913 * 1914 * Return: 1 on success else return 0 1915 */ 1916 static inline int 1917 cdp_txrx_classify_and_update(ol_txrx_soc_handle soc, 1918 struct cdp_vdev *vdev, qdf_nbuf_t skb, 1919 enum txrx_direction dir, 1920 struct ol_txrx_nbuf_classify *nbuf_class) 1921 { 1922 if (!soc || !soc->ops) { 1923 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1924 "%s: Invalid Instance", __func__); 1925 QDF_BUG(0); 1926 return 0; 1927 } 1928 1929 if (!soc->ops->cmn_drv_ops || 1930 !soc->ops->cmn_drv_ops->txrx_classify_update) 1931 return 0; 1932 1933 return soc->ops->cmn_drv_ops->txrx_classify_update(vdev, 1934 skb, 1935 dir, nbuf_class); 1936 } 1937 1938 /** 1939 * cdp_get_dp_capabilities() - get DP capabilities 1940 * @soc: opaque soc handle 1941 * @dp_cap: enum of DP capabilities 1942 * 1943 * Return: bool 1944 */ 1945 static inline bool 1946 cdp_get_dp_capabilities(struct cdp_soc_t *soc, enum cdp_capabilities dp_caps) 1947 { 1948 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1949 soc->ops->cmn_drv_ops->get_dp_capabilities) 1950 return soc->ops->cmn_drv_ops->get_dp_capabilities(soc, dp_caps); 1951 return false; 1952 } 1953 1954 #ifdef RECEIVE_OFFLOAD 1955 /** 1956 * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer 1957 * @soc - data path soc handle 1958 * @pdev - device instance pointer 1959 * 1960 * register rx offload flush callback function pointer 1961 * 1962 * return none 1963 */ 1964 static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc, 1965 void (rx_ol_flush_cb)(void *)) 1966 { 1967 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 1968 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1969 "%s invalid instance", __func__); 1970 return; 1971 } 1972 1973 if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb) 1974 return soc->ops->rx_offld_ops->register_rx_offld_flush_cb( 1975 rx_ol_flush_cb); 1976 } 1977 1978 /** 1979 * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function 1980 * @soc - data path soc handle 1981 * 1982 * deregister rx offload flush callback function pointer 1983 * 1984 * return none 1985 */ 1986 static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc) 1987 { 1988 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 1989 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1990 "%s invalid instance", __func__); 1991 return; 1992 } 1993 1994 if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb) 1995 return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb(); 1996 } 1997 #endif /* RECEIVE_OFFLOAD */ 1998 1999 /** 2000 * @cdp_set_ba_timeout() - set ba aging timeout per AC 2001 * 2002 * @soc - pointer to the soc 2003 * @value - timeout value in millisec 2004 * @ac - Access category 2005 * 2006 * @return - void 2007 */ 2008 static inline void cdp_set_ba_timeout(ol_txrx_soc_handle soc, 2009 uint8_t ac, uint32_t value) 2010 { 2011 if (!soc || !soc->ops) { 2012 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2013 "%s: Invalid Instance", __func__); 2014 QDF_BUG(0); 2015 return; 2016 } 2017 2018 if (!soc->ops->cmn_drv_ops || 2019 !soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout) 2020 return; 2021 2022 soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout(soc, ac, value); 2023 } 2024 2025 /** 2026 * @cdp_get_ba_timeout() - return ba aging timeout per AC 2027 * 2028 * @soc - pointer to the soc 2029 * @ac - access category 2030 * @value - timeout value in millisec 2031 * 2032 * @return - void 2033 */ 2034 static inline void cdp_get_ba_timeout(ol_txrx_soc_handle soc, 2035 uint8_t ac, uint32_t *value) 2036 { 2037 if (!soc || !soc->ops) { 2038 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2039 "%s: Invalid Instance", __func__); 2040 QDF_BUG(0); 2041 return; 2042 } 2043 2044 if (!soc->ops->cmn_drv_ops || 2045 !soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout) 2046 return; 2047 2048 soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout(soc, ac, value); 2049 } 2050 2051 /** 2052 * cdp_cfg_get() - get cfg for dp enum 2053 * 2054 * @soc: pointer to the soc 2055 * @cfg: cfg enum 2056 * 2057 * Return - cfg value 2058 */ 2059 static inline uint32_t cdp_cfg_get(ol_txrx_soc_handle soc, enum cdp_dp_cfg cfg) 2060 { 2061 if (!soc || !soc->ops) { 2062 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2063 "%s: Invalid Instance", __func__); 2064 return 0; 2065 } 2066 2067 if (!soc->ops->cmn_drv_ops || !soc->ops->cmn_drv_ops->txrx_get_cfg) 2068 return 0; 2069 2070 return soc->ops->cmn_drv_ops->txrx_get_cfg(soc, cfg); 2071 } 2072 #endif /* _CDP_TXRX_CMN_H_ */ 2073