1 /* 2 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * @file cdp_txrx_cmn.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_H_ 25 #define _CDP_TXRX_CMN_H_ 26 27 #include "qdf_types.h" 28 #include "qdf_nbuf.h" 29 #include "cdp_txrx_ops.h" 30 #include "cdp_txrx_handle.h" 31 #include "cdp_txrx_cmn_struct.h" 32 /****************************************************************************** 33 * 34 * Common Data Path Header File 35 * 36 *****************************************************************************/ 37 #define dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP, params) 38 #define dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP, params) 39 #define dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP, params) 40 #define dp_info(params...) \ 41 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP, ## params) 42 #define dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params) 43 44 #define dp_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_DP, params) 45 #define dp_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, params) 46 #define dp_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_DP, params) 47 #define dp_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_DP, params) 48 #define dp_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, params) 49 50 static inline QDF_STATUS 51 cdp_soc_attach_target(ol_txrx_soc_handle soc) 52 { 53 if (!soc || !soc->ops) { 54 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 55 "%s: Invalid Instance:", __func__); 56 QDF_BUG(0); 57 return QDF_STATUS_E_INVAL; 58 } 59 60 if (!soc->ops->cmn_drv_ops || 61 !soc->ops->cmn_drv_ops->txrx_soc_attach_target) 62 return QDF_STATUS_SUCCESS; 63 64 return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc); 65 66 } 67 68 static inline int 69 cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc) 70 { 71 if (!soc || !soc->ops) { 72 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 73 "%s: Invalid Instance:", __func__); 74 QDF_BUG(0); 75 return 0; 76 } 77 78 if (!soc->ops->cmn_drv_ops || 79 !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg) 80 return 0; 81 82 return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc); 83 } 84 85 static inline void 86 cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config) 87 { 88 if (!soc || !soc->ops) { 89 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 90 "%s: Invalid Instance:", __func__); 91 QDF_BUG(0); 92 return; 93 } 94 95 if (!soc->ops->cmn_drv_ops || 96 !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg) 97 return; 98 99 soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config); 100 } 101 102 static inline struct cdp_vdev * 103 cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 104 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode) 105 { 106 if (!soc || !soc->ops) { 107 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 108 "%s: Invalid Instance:", __func__); 109 QDF_BUG(0); 110 return NULL; 111 } 112 113 if (!soc->ops->cmn_drv_ops || 114 !soc->ops->cmn_drv_ops->txrx_vdev_attach) 115 return NULL; 116 117 return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev, 118 vdev_mac_addr, vdev_id, op_mode); 119 } 120 #ifndef CONFIG_WIN 121 /** 122 * cdp_flow_pool_map() - Create flow pool for vdev 123 * @soc - data path soc handle 124 * @pdev 125 * @vdev_id - vdev_id corresponding to vdev start 126 * 127 * Create per vdev flow pool. 128 * 129 * return none 130 */ 131 static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc, 132 struct cdp_pdev *pdev, uint8_t vdev_id) 133 { 134 if (!soc || !soc->ops) { 135 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 136 "%s: Invalid Instance:", __func__); 137 QDF_BUG(0); 138 return QDF_STATUS_E_INVAL; 139 } 140 141 if (!soc->ops->flowctl_ops || 142 !soc->ops->flowctl_ops->flow_pool_map_handler) 143 return QDF_STATUS_E_INVAL; 144 145 return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id); 146 } 147 148 /** 149 * cdp_flow_pool_unmap() - Delete flow pool 150 * @soc - data path soc handle 151 * @pdev 152 * @vdev_id - vdev_id corresponding to vdev start 153 * 154 * Delete flow pool 155 * 156 * return none 157 */ 158 static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc, 159 struct cdp_pdev *pdev, uint8_t vdev_id) 160 { 161 if (!soc || !soc->ops) { 162 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 163 "%s: Invalid Instance:", __func__); 164 QDF_BUG(0); 165 return; 166 } 167 168 if (!soc->ops->flowctl_ops || 169 !soc->ops->flowctl_ops->flow_pool_unmap_handler) 170 return; 171 172 return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev, 173 vdev_id); 174 } 175 #endif 176 177 static inline void 178 cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 179 ol_txrx_vdev_delete_cb callback, void *cb_context) 180 { 181 if (!soc || !soc->ops) { 182 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 183 "%s: Invalid Instance:", __func__); 184 QDF_BUG(0); 185 return; 186 } 187 188 if (!soc->ops->cmn_drv_ops || 189 !soc->ops->cmn_drv_ops->txrx_vdev_detach) 190 return; 191 192 soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev, 193 callback, cb_context); 194 } 195 196 static inline int 197 cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) 198 { 199 if (!soc || !soc->ops) { 200 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 201 "%s: Invalid Instance:", __func__); 202 QDF_BUG(0); 203 return 0; 204 } 205 206 if (!soc->ops->cmn_drv_ops || 207 !soc->ops->cmn_drv_ops->txrx_pdev_attach_target) 208 return 0; 209 210 return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev); 211 } 212 213 static inline struct cdp_pdev *cdp_pdev_attach 214 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 215 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id) 216 { 217 if (!soc || !soc->ops) { 218 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 219 "%s: Invalid Instance:", __func__); 220 QDF_BUG(0); 221 return NULL; 222 } 223 224 if (!soc->ops->cmn_drv_ops || 225 !soc->ops->cmn_drv_ops->txrx_pdev_attach) 226 return NULL; 227 228 return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev, 229 htc_pdev, osdev, pdev_id); 230 } 231 232 static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, 233 struct cdp_pdev *pdev) 234 { 235 if (!soc || !soc->ops) { 236 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 237 "%s: Invalid Instance:", __func__); 238 QDF_BUG(0); 239 return 0; 240 } 241 242 if (!soc->ops->cmn_drv_ops || 243 !soc->ops->cmn_drv_ops->txrx_pdev_post_attach) 244 return 0; 245 246 return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev); 247 } 248 249 static inline void 250 cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 251 { 252 if (!soc || !soc->ops) { 253 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 254 "%s: Invalid Instance:", __func__); 255 QDF_BUG(0); 256 return; 257 } 258 259 if (!soc->ops->cmn_drv_ops || 260 !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach) 261 return; 262 263 soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force); 264 } 265 266 static inline void 267 cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 268 { 269 if (!soc || !soc->ops) { 270 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 271 "%s: Invalid Instance:", __func__); 272 QDF_BUG(0); 273 return; 274 } 275 276 if (!soc->ops->cmn_drv_ops || 277 !soc->ops->cmn_drv_ops->txrx_pdev_detach) 278 return; 279 280 soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force); 281 } 282 283 static inline void 284 cdp_pdev_deinit(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 285 { 286 if (!soc || !soc->ops) { 287 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 288 "%s: Invalid Instance:", __func__); 289 QDF_BUG(0); 290 return; 291 } 292 293 if (!soc->ops->cmn_drv_ops || 294 !soc->ops->cmn_drv_ops->txrx_pdev_deinit) 295 return; 296 297 soc->ops->cmn_drv_ops->txrx_pdev_deinit(pdev, force); 298 } 299 300 static inline void *cdp_peer_create 301 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 302 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer) 303 { 304 if (!soc || !soc->ops) { 305 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 306 "%s: Invalid Instance:", __func__); 307 QDF_BUG(0); 308 return NULL; 309 } 310 311 if (!soc->ops->cmn_drv_ops || 312 !soc->ops->cmn_drv_ops->txrx_peer_create) 313 return NULL; 314 315 return soc->ops->cmn_drv_ops->txrx_peer_create(vdev, 316 peer_mac_addr, ctrl_peer); 317 } 318 319 static inline void cdp_peer_setup 320 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 321 { 322 if (!soc || !soc->ops) { 323 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 324 "%s: Invalid Instance:", __func__); 325 QDF_BUG(0); 326 return; 327 } 328 329 if (!soc->ops->cmn_drv_ops || 330 !soc->ops->cmn_drv_ops->txrx_peer_setup) 331 return; 332 333 soc->ops->cmn_drv_ops->txrx_peer_setup(vdev, 334 peer); 335 } 336 337 /** 338 * cdp_peer_get_ast_info_by_soc() - search the soc AST hash table 339 * and return ast entry information 340 * of first ast entry found in the 341 * table with given mac address 342 * 343 * @soc - data path soc handle 344 * @ast_mac_addr - AST entry mac address 345 * @ast_entry_info - ast entry information 346 * 347 * return - true if ast entry found with ast_mac_addr 348 * false if ast entry not found 349 */ 350 static inline bool cdp_peer_get_ast_info_by_soc 351 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 352 struct cdp_ast_entry_info *ast_entry_info) 353 { 354 if (!soc || !soc->ops) { 355 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 356 "%s: Invalid Instance:", __func__); 357 QDF_BUG(0); 358 return false; 359 } 360 361 if (!soc->ops->cmn_drv_ops || 362 !soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_soc) 363 return false; 364 365 return soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_soc 366 (soc, ast_mac_addr, 367 ast_entry_info); 368 } 369 370 /** 371 * cdp_peer_get_ast_info_by_pdev() - search the soc AST hash table 372 * and return ast entry information 373 * if mac address and pdev_id matches 374 * 375 * @soc - data path soc handle 376 * @ast_mac_addr - AST entry mac address 377 * @pdev_id - pdev_id 378 * @ast_entry_info - ast entry information 379 * 380 * return - true if ast entry found with ast_mac_addr 381 * false if ast entry not found 382 */ 383 static inline bool cdp_peer_get_ast_info_by_pdev 384 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 385 uint8_t pdev_id, 386 struct cdp_ast_entry_info *ast_entry_info) 387 { 388 if (!soc || !soc->ops) { 389 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 390 "%s: Invalid Instance:", __func__); 391 QDF_BUG(0); 392 return false; 393 } 394 395 if (!soc->ops->cmn_drv_ops || 396 !soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_pdev) 397 return false; 398 399 return soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_pdev 400 (soc, 401 ast_mac_addr, 402 pdev_id, 403 ast_entry_info); 404 } 405 406 /** 407 * cdp_peer_ast_delete_by_soc() - delete the ast entry from soc AST hash table 408 * with given mac address 409 * 410 * @soc - data path soc handle 411 * @ast_mac_addr - AST entry mac address 412 * @callback - callback function to called on ast delete response from FW 413 * @cookie - argument to be passed to callback 414 * 415 * return - QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 416 * is sent 417 * QDF_STATUS_E_INVAL false if ast entry not found 418 */ 419 static inline QDF_STATUS cdp_peer_ast_delete_by_soc 420 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 421 txrx_ast_free_cb callback, 422 void *cookie) 423 { 424 if (!soc || !soc->ops) { 425 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 426 "%s: Invalid Instance:", __func__); 427 QDF_BUG(0); 428 return QDF_STATUS_E_INVAL; 429 } 430 431 if (!soc->ops->cmn_drv_ops || 432 !soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_soc) 433 return QDF_STATUS_E_INVAL; 434 435 return soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_soc 436 (soc, 437 ast_mac_addr, 438 callback, 439 cookie); 440 } 441 442 /** 443 * cdp_peer_ast_delete_by_pdev() - delete the ast entry from soc AST hash table 444 * if mac address and pdev_id matches 445 * 446 * @soc - data path soc handle 447 * @ast_mac_addr - AST entry mac address 448 * @pdev_id - pdev id 449 * @callback - callback function to called on ast delete response from FW 450 * @cookie - argument to be passed to callback 451 * 452 * return - QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 453 * is sent 454 * QDF_STATUS_E_INVAL false if ast entry not found 455 */ 456 static inline QDF_STATUS cdp_peer_ast_delete_by_pdev 457 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 458 uint8_t pdev_id, txrx_ast_free_cb callback, 459 void *cookie) 460 { 461 if (!soc || !soc->ops) { 462 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 463 "%s: Invalid Instance:", __func__); 464 QDF_BUG(0); 465 return QDF_STATUS_E_INVAL; 466 } 467 468 if (!soc->ops->cmn_drv_ops || 469 !soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_pdev) 470 return QDF_STATUS_E_INVAL; 471 472 return soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_pdev 473 (soc, 474 ast_mac_addr, 475 pdev_id, 476 callback, 477 cookie); 478 } 479 480 static inline int cdp_peer_add_ast 481 (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle, 482 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags) 483 { 484 if (!soc || !soc->ops) { 485 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 486 "%s: Invalid Instance:", __func__); 487 QDF_BUG(0); 488 return 0; 489 } 490 491 if (!soc->ops->cmn_drv_ops || 492 !soc->ops->cmn_drv_ops->txrx_peer_add_ast) 493 return 0; 494 495 return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc, 496 peer_handle, 497 mac_addr, 498 type, 499 flags); 500 } 501 502 static inline void cdp_peer_reset_ast 503 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, void *vdev_hdl) 504 { 505 506 if (!soc || !soc->ops) { 507 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 508 "%s: Invalid Instance:", __func__); 509 QDF_BUG(0); 510 return; 511 } 512 if (!soc->ops->cmn_drv_ops || 513 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast) 514 return; 515 516 soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr, vdev_hdl); 517 } 518 519 static inline void cdp_peer_reset_ast_table 520 (ol_txrx_soc_handle soc, void *vdev_hdl) 521 { 522 if (!soc || !soc->ops) { 523 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 524 "%s: Invalid Instance:", __func__); 525 QDF_BUG(0); 526 return; 527 } 528 529 if (!soc->ops->cmn_drv_ops || 530 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table) 531 return; 532 533 soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_hdl); 534 } 535 536 static inline void cdp_peer_flush_ast_table 537 (ol_txrx_soc_handle soc) 538 { 539 if (!soc || !soc->ops) { 540 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 541 "%s: Invalid Instance:", __func__); 542 QDF_BUG(0); 543 return; 544 } 545 546 if (!soc->ops->cmn_drv_ops || 547 !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table) 548 return; 549 550 soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc); 551 } 552 553 static inline int cdp_peer_update_ast 554 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, 555 struct cdp_peer *peer_handle, uint32_t flags) 556 { 557 if (!soc || !soc->ops) { 558 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 559 "%s: Invalid Instance:", __func__); 560 QDF_BUG(0); 561 return 0; 562 } 563 564 if (!soc->ops->cmn_drv_ops || 565 !soc->ops->cmn_drv_ops->txrx_peer_update_ast) 566 return 0; 567 568 569 return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc, 570 peer_handle, 571 wds_macaddr, 572 flags); 573 } 574 575 static inline void cdp_peer_teardown 576 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 577 { 578 if (!soc || !soc->ops) { 579 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 580 "%s: Invalid Instance:", __func__); 581 QDF_BUG(0); 582 return; 583 } 584 585 if (!soc->ops->cmn_drv_ops || 586 !soc->ops->cmn_drv_ops->txrx_peer_teardown) 587 return; 588 589 soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer); 590 } 591 592 static inline void 593 cdp_vdev_flush_peers(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 594 bool unmap_only) 595 { 596 if (!soc || !soc->ops) { 597 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 598 "%s: Invalid Instance:", __func__); 599 QDF_BUG(0); 600 return; 601 } 602 603 if (!soc->ops->cmn_drv_ops || 604 !soc->ops->cmn_drv_ops->txrx_vdev_flush_peers) 605 return; 606 607 soc->ops->cmn_drv_ops->txrx_vdev_flush_peers(vdev, unmap_only); 608 } 609 610 static inline void 611 cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap) 612 { 613 if (!soc || !soc->ops) { 614 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 615 "%s: Invalid Instance:", __func__); 616 QDF_BUG(0); 617 return; 618 } 619 620 if (!soc->ops->cmn_drv_ops || 621 !soc->ops->cmn_drv_ops->txrx_peer_delete) 622 return; 623 624 soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap); 625 } 626 627 static inline void 628 cdp_peer_delete_sync(ol_txrx_soc_handle soc, void *peer, 629 QDF_STATUS(*delete_cb)( 630 uint8_t vdev_id, 631 uint32_t peerid_cnt, 632 uint16_t *peerid_list), 633 uint32_t bitmap) 634 { 635 if (!soc || !soc->ops) { 636 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 637 "%s: Invalid Instance:", __func__); 638 QDF_BUG(0); 639 return; 640 } 641 642 if (!soc->ops->cmn_drv_ops || 643 !soc->ops->cmn_drv_ops->txrx_peer_delete_sync) 644 return; 645 646 soc->ops->cmn_drv_ops->txrx_peer_delete_sync(peer, 647 delete_cb, 648 bitmap); 649 } 650 651 static inline int 652 cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 653 uint8_t smart_monitor) 654 { 655 if (!soc || !soc->ops) { 656 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 657 "%s: Invalid Instance:", __func__); 658 QDF_BUG(0); 659 return 0; 660 } 661 662 if (!soc->ops->cmn_drv_ops || 663 !soc->ops->cmn_drv_ops->txrx_set_monitor_mode) 664 return 0; 665 666 return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev, 667 smart_monitor); 668 } 669 670 static inline void 671 cdp_set_curchan(ol_txrx_soc_handle soc, 672 struct cdp_pdev *pdev, 673 uint32_t chan_mhz) 674 { 675 if (!soc || !soc->ops) { 676 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 677 "%s: Invalid Instance:", __func__); 678 QDF_BUG(0); 679 return; 680 } 681 682 if (!soc->ops->cmn_drv_ops || 683 !soc->ops->cmn_drv_ops->txrx_set_curchan) 684 return; 685 686 soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz); 687 } 688 689 static inline void 690 cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 691 void *filter, uint32_t num) 692 { 693 if (!soc || !soc->ops) { 694 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 695 "%s: Invalid Instance:", __func__); 696 QDF_BUG(0); 697 return; 698 } 699 700 if (!soc->ops->cmn_drv_ops || 701 !soc->ops->cmn_drv_ops->txrx_set_privacy_filters) 702 return; 703 704 soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev, 705 filter, num); 706 } 707 708 static inline int 709 cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 710 struct cdp_monitor_filter *filter_val) 711 { 712 if (soc->ops->mon_ops->txrx_set_advance_monitor_filter) 713 return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev, 714 filter_val); 715 return 0; 716 } 717 718 719 /****************************************************************************** 720 * Data Interface (B Interface) 721 *****************************************************************************/ 722 static inline void 723 cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 724 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 725 struct ol_txrx_ops *txrx_ops) 726 { 727 if (!soc || !soc->ops) { 728 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 729 "%s: Invalid Instance:", __func__); 730 QDF_BUG(0); 731 return; 732 } 733 734 if (!soc->ops->cmn_drv_ops || 735 !soc->ops->cmn_drv_ops->txrx_vdev_register) 736 return; 737 738 soc->ops->cmn_drv_ops->txrx_vdev_register(vdev, 739 osif_vdev, ctrl_vdev, txrx_ops); 740 } 741 742 static inline int 743 cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 744 qdf_nbuf_t tx_mgmt_frm, uint8_t type) 745 { 746 if (!soc || !soc->ops) { 747 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 748 "%s: Invalid Instance:", __func__); 749 QDF_BUG(0); 750 return 0; 751 } 752 753 if (!soc->ops->cmn_drv_ops || 754 !soc->ops->cmn_drv_ops->txrx_mgmt_send) 755 return 0; 756 757 return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev, 758 tx_mgmt_frm, type); 759 } 760 761 static inline int 762 cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 763 qdf_nbuf_t tx_mgmt_frm, uint8_t type, 764 uint8_t use_6mbps, uint16_t chanfreq) 765 { 766 if (!soc || !soc->ops) { 767 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 768 "%s: Invalid Instance:", __func__); 769 QDF_BUG(0); 770 return 0; 771 } 772 773 if (!soc->ops->cmn_drv_ops || 774 !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext) 775 return 0; 776 777 return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext 778 (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq); 779 } 780 781 782 static inline void 783 cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 784 uint8_t type, ol_txrx_mgmt_tx_cb download_cb, 785 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) 786 { 787 if (!soc || !soc->ops) { 788 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 789 "%s: Invalid Instance:", __func__); 790 QDF_BUG(0); 791 return; 792 } 793 794 if (!soc->ops->cmn_drv_ops || 795 !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set) 796 return; 797 798 soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set 799 (pdev, type, download_cb, ota_ack_cb, ctxt); 800 } 801 802 static inline void 803 cdp_peer_unmap_sync_cb_set(ol_txrx_soc_handle soc, 804 struct cdp_pdev *pdev, 805 QDF_STATUS(*unmap_resp_cb)( 806 uint8_t vdev_id, 807 uint32_t peerid_cnt, 808 uint16_t *peerid_list)) 809 { 810 if (!soc || !soc->ops) { 811 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 812 "%s: Invalid Instance:", __func__); 813 QDF_BUG(0); 814 return; 815 } 816 817 if (!soc->ops->cmn_drv_ops || 818 !soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set) 819 return; 820 821 soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set(pdev, unmap_resp_cb); 822 } 823 824 static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc, 825 struct cdp_pdev *pdev) 826 { 827 if (!soc || !soc->ops) { 828 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 829 "%s: Invalid Instance:", __func__); 830 QDF_BUG(0); 831 return 0; 832 } 833 834 if (!soc->ops->cmn_drv_ops || 835 !soc->ops->cmn_drv_ops->txrx_get_tx_pending) 836 return 0; 837 838 839 return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev); 840 } 841 842 static inline void 843 cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev, 844 ol_txrx_data_tx_cb callback, void *ctxt) 845 { 846 if (!soc || !soc->ops) { 847 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 848 "%s: Invalid Instance:", __func__); 849 QDF_BUG(0); 850 return; 851 } 852 853 if (!soc->ops->cmn_drv_ops || 854 !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set) 855 return; 856 857 soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev, 858 callback, ctxt); 859 } 860 861 /****************************************************************************** 862 * Statistics and Debugging Interface (C Interface) 863 *****************************************************************************/ 864 /** 865 * External Device physical address types 866 * 867 * Currently, both MAC and IPA uController use the same size addresses 868 * and descriptors are exchanged between these two depending on the mode. 869 * 870 * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA 871 * operations. However, external device physical address sizes 872 * may be different from host-specific physical address sizes. 873 * This calls for the following definitions for target devices 874 * (MAC, IPA uc). 875 */ 876 #if HTT_PADDR64 877 typedef uint64_t target_paddr_t; 878 #else 879 typedef uint32_t target_paddr_t; 880 #endif /*HTT_PADDR64 */ 881 882 static inline int 883 cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 884 int max_subfrms_ampdu, 885 int max_subfrms_amsdu) 886 { 887 if (!soc || !soc->ops) { 888 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 889 "%s: Invalid Instance:", __func__); 890 QDF_BUG(0); 891 return 0; 892 } 893 894 if (!soc->ops->cmn_drv_ops || 895 !soc->ops->cmn_drv_ops->txrx_aggr_cfg) 896 return 0; 897 898 return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev, 899 max_subfrms_ampdu, max_subfrms_amsdu); 900 } 901 902 static inline int 903 cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 904 struct ol_txrx_stats_req *req, bool per_vdev, 905 bool response_expected) 906 { 907 if (!soc || !soc->ops) { 908 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 909 "%s: Invalid Instance:", __func__); 910 QDF_BUG(0); 911 return 0; 912 } 913 914 if (!soc->ops->cmn_drv_ops || 915 !soc->ops->cmn_drv_ops->txrx_fw_stats_get) 916 return 0; 917 918 return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req, 919 per_vdev, response_expected); 920 } 921 922 static inline int 923 cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs) 924 { 925 if (!soc || !soc->ops) { 926 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 927 "%s: Invalid Instance:", __func__); 928 QDF_BUG(0); 929 return 0; 930 } 931 932 if (!soc->ops->cmn_drv_ops || 933 !soc->ops->cmn_drv_ops->txrx_debug) 934 return 0; 935 936 return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs); 937 } 938 939 static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc, 940 struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val) 941 { 942 if (!soc || !soc->ops) { 943 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 944 "%s: Invalid Instance:", __func__); 945 QDF_BUG(0); 946 return; 947 } 948 949 if (!soc->ops->cmn_drv_ops || 950 !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg) 951 return; 952 953 soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev, 954 cfg_stats_type, cfg_val); 955 } 956 957 static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level) 958 { 959 if (!soc || !soc->ops) { 960 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 961 "%s: Invalid Instance:", __func__); 962 QDF_BUG(0); 963 return; 964 } 965 966 if (!soc->ops->cmn_drv_ops || 967 !soc->ops->cmn_drv_ops->txrx_print_level_set) 968 return; 969 970 soc->ops->cmn_drv_ops->txrx_print_level_set(level); 971 } 972 973 static inline uint8_t * 974 cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 975 { 976 if (!soc || !soc->ops) { 977 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 978 "%s: Invalid Instance:", __func__); 979 QDF_BUG(0); 980 return NULL; 981 } 982 983 if (!soc->ops->cmn_drv_ops || 984 !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr) 985 return NULL; 986 987 return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev); 988 989 } 990 991 /** 992 * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 993 * vdev 994 * @vdev: vdev handle 995 * 996 * Return: Handle to struct qdf_mac_addr 997 */ 998 static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr 999 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 1000 { 1001 if (!soc || !soc->ops) { 1002 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1003 "%s: Invalid Instance:", __func__); 1004 QDF_BUG(0); 1005 return NULL; 1006 } 1007 1008 if (!soc->ops->cmn_drv_ops || 1009 !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr) 1010 return NULL; 1011 1012 return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr 1013 (vdev); 1014 1015 } 1016 1017 /** 1018 * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev 1019 * @vdev: vdev handle 1020 * 1021 * Return: Handle to pdev 1022 */ 1023 static inline struct cdp_pdev *cdp_get_pdev_from_vdev 1024 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 1025 { 1026 if (!soc || !soc->ops) { 1027 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1028 "%s: Invalid Instance:", __func__); 1029 QDF_BUG(0); 1030 return NULL; 1031 } 1032 1033 if (!soc->ops->cmn_drv_ops || 1034 !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev) 1035 return NULL; 1036 1037 return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev); 1038 } 1039 1040 /** 1041 * cdp_get_os_rx_handles_from_vdev() - Return os rx handles for a vdev 1042 * @soc: ol_txrx_soc_handle handle 1043 * @vdev: vdev for which os rx handles are needed 1044 * @stack_fn_p: pointer to stack function pointer 1045 * @osif_handle_p: pointer to ol_osif_vdev_handle 1046 * 1047 * Return: void 1048 */ 1049 static inline 1050 void cdp_get_os_rx_handles_from_vdev(ol_txrx_soc_handle soc, 1051 struct cdp_vdev *vdev, 1052 ol_txrx_rx_fp *stack_fn_p, 1053 ol_osif_vdev_handle *osif_handle_p) 1054 { 1055 if (!soc || !soc->ops) { 1056 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1057 "%s: Invalid Instance:", __func__); 1058 QDF_BUG(0); 1059 return; 1060 } 1061 1062 if (!soc->ops->cmn_drv_ops || 1063 !soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev) 1064 return; 1065 1066 soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev(vdev, 1067 stack_fn_p, 1068 osif_handle_p); 1069 } 1070 1071 /** 1072 * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 1073 * @vdev: vdev handle 1074 * 1075 * Return: Handle to control pdev 1076 */ 1077 static inline struct cdp_cfg * 1078 cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 1079 { 1080 if (!soc || !soc->ops) { 1081 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1082 "%s: Invalid Instance:", __func__); 1083 QDF_BUG(0); 1084 return NULL; 1085 } 1086 1087 if (!soc->ops->cmn_drv_ops || 1088 !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev) 1089 return NULL; 1090 1091 return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev 1092 (vdev); 1093 } 1094 1095 static inline struct cdp_vdev * 1096 cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 1097 uint8_t vdev_id) 1098 { 1099 if (!soc || !soc->ops) { 1100 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1101 "%s: Invalid Instance:", __func__); 1102 QDF_BUG(0); 1103 return NULL; 1104 } 1105 1106 if (!soc->ops->cmn_drv_ops || 1107 !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id) 1108 return NULL; 1109 1110 return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id 1111 (pdev, vdev_id); 1112 } 1113 1114 static inline struct cdp_vdev * 1115 cdp_get_mon_vdev_from_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) 1116 { 1117 if (!soc || !soc->ops) { 1118 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1119 "%s: Invalid Instance:", __func__); 1120 QDF_BUG(0); 1121 return NULL; 1122 } 1123 1124 if (!soc->ops->cmn_drv_ops || 1125 !soc->ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev) 1126 return NULL; 1127 1128 return soc->ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev 1129 (pdev); 1130 } 1131 1132 static inline void 1133 cdp_soc_detach(ol_txrx_soc_handle soc) 1134 { 1135 if (!soc || !soc->ops) { 1136 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1137 "%s: Invalid Instance:", __func__); 1138 QDF_BUG(0); 1139 return; 1140 } 1141 1142 if (!soc->ops->cmn_drv_ops || 1143 !soc->ops->cmn_drv_ops->txrx_soc_detach) 1144 return; 1145 1146 soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc); 1147 } 1148 1149 /** 1150 * cdp_soc_init() - Initialize txrx SOC 1151 * @soc: ol_txrx_soc_handle handle 1152 * @devid: Device ID 1153 * @hif_handle: Opaque HIF handle 1154 * @psoc: Opaque Objmgr handle 1155 * @htc_handle: Opaque HTC handle 1156 * @qdf_dev: QDF device 1157 * @dp_ol_if_ops: Offload Operations 1158 * 1159 * Return: DP SOC handle on success, NULL on failure 1160 */ 1161 static inline ol_txrx_soc_handle 1162 cdp_soc_init(ol_txrx_soc_handle soc, u_int16_t devid, void *hif_handle, 1163 void *psoc, void *htc_handle, qdf_device_t qdf_dev, 1164 struct ol_if_ops *dp_ol_if_ops) 1165 { 1166 if (!soc || !soc->ops) { 1167 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1168 "%s: Invalid Instance:", __func__); 1169 QDF_BUG(0); 1170 return NULL; 1171 } 1172 1173 if (!soc->ops->cmn_drv_ops || 1174 !soc->ops->cmn_drv_ops->txrx_soc_init) 1175 return NULL; 1176 1177 return soc->ops->cmn_drv_ops->txrx_soc_init(soc, psoc, 1178 hif_handle, 1179 htc_handle, qdf_dev, 1180 dp_ol_if_ops, devid); 1181 } 1182 1183 /** 1184 * cdp_soc_deinit() - Deinitialize txrx SOC 1185 * @soc: Opaque DP SOC handle 1186 * 1187 * Return: None 1188 */ 1189 static inline void 1190 cdp_soc_deinit(ol_txrx_soc_handle soc) 1191 { 1192 if (!soc || !soc->ops) { 1193 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1194 "%s: Invalid Instance:", __func__); 1195 QDF_BUG(0); 1196 return; 1197 } 1198 1199 if (!soc->ops->cmn_drv_ops || 1200 !soc->ops->cmn_drv_ops->txrx_soc_deinit) 1201 return; 1202 1203 soc->ops->cmn_drv_ops->txrx_soc_deinit((void *)soc); 1204 } 1205 1206 /** 1207 * cdp_tso_soc_attach() - TSO attach function 1208 * @soc: ol_txrx_soc_handle handle 1209 * 1210 * Reserve TSO descriptor buffers 1211 * 1212 * Return: QDF_STATUS_SUCCESS on Success or 1213 * QDF_STATUS_E_FAILURE on failure 1214 */ 1215 static inline QDF_STATUS 1216 cdp_tso_soc_attach(ol_txrx_soc_handle soc) 1217 { 1218 if (!soc || !soc->ops) { 1219 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1220 "%s: Invalid Instance:", __func__); 1221 QDF_BUG(0); 1222 return 0; 1223 } 1224 1225 if (!soc->ops->cmn_drv_ops || 1226 !soc->ops->cmn_drv_ops->txrx_tso_soc_attach) 1227 return 0; 1228 1229 return soc->ops->cmn_drv_ops->txrx_tso_soc_attach((void *)soc); 1230 } 1231 1232 /** 1233 * cdp_tso_soc_detach() - TSO detach function 1234 * @soc: ol_txrx_soc_handle handle 1235 * 1236 * Release TSO descriptor buffers 1237 * 1238 * Return: QDF_STATUS_SUCCESS on Success or 1239 * QDF_STATUS_E_FAILURE on failure 1240 */ 1241 static inline QDF_STATUS 1242 cdp_tso_soc_detach(ol_txrx_soc_handle soc) 1243 { 1244 if (!soc || !soc->ops) { 1245 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1246 "%s: Invalid Instance:", __func__); 1247 QDF_BUG(0); 1248 return 0; 1249 } 1250 1251 if (!soc->ops->cmn_drv_ops || 1252 !soc->ops->cmn_drv_ops->txrx_tso_soc_detach) 1253 return 0; 1254 1255 return soc->ops->cmn_drv_ops->txrx_tso_soc_detach((void *)soc); 1256 } 1257 1258 /** 1259 * cdp_addba_resp_tx_completion() - Indicate addba response tx 1260 * completion to dp to change tid state. 1261 * @soc: soc handle 1262 * @peer_handle: peer handle 1263 * @tid: tid 1264 * @status: Tx completion status 1265 * 1266 * Return: success/failure of tid update 1267 */ 1268 static inline int cdp_addba_resp_tx_completion(ol_txrx_soc_handle soc, 1269 void *peer_handle, 1270 uint8_t tid, int status) 1271 { 1272 if (!soc || !soc->ops) { 1273 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1274 "%s: Invalid Instance:", __func__); 1275 QDF_BUG(0); 1276 return 0; 1277 } 1278 1279 if (!soc->ops->cmn_drv_ops || 1280 !soc->ops->cmn_drv_ops->addba_resp_tx_completion) 1281 return 0; 1282 1283 return soc->ops->cmn_drv_ops->addba_resp_tx_completion(peer_handle, tid, 1284 status); 1285 } 1286 1287 static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc, 1288 void *peer_handle, uint8_t dialogtoken, uint16_t tid, 1289 uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum) 1290 { 1291 if (!soc || !soc->ops) { 1292 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1293 "%s: Invalid Instance:", __func__); 1294 QDF_BUG(0); 1295 return 0; 1296 } 1297 1298 if (!soc->ops->cmn_drv_ops || 1299 !soc->ops->cmn_drv_ops->addba_requestprocess) 1300 return 0; 1301 1302 return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle, 1303 dialogtoken, tid, batimeout, buffersize, startseqnum); 1304 } 1305 1306 static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc, 1307 void *peer_handle, uint8_t tid, uint8_t *dialogtoken, 1308 uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout) 1309 { 1310 if (!soc || !soc->ops) { 1311 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1312 "%s: Invalid Instance:", __func__); 1313 QDF_BUG(0); 1314 return; 1315 } 1316 1317 if (!soc->ops->cmn_drv_ops || 1318 !soc->ops->cmn_drv_ops->addba_responsesetup) 1319 return; 1320 1321 soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid, 1322 dialogtoken, statuscode, buffersize, batimeout); 1323 } 1324 1325 static inline int cdp_delba_process(ol_txrx_soc_handle soc, 1326 void *peer_handle, int tid, uint16_t reasoncode) 1327 { 1328 if (!soc || !soc->ops) { 1329 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1330 "%s: Invalid Instance:", __func__); 1331 QDF_BUG(0); 1332 return 0; 1333 } 1334 1335 if (!soc->ops->cmn_drv_ops || 1336 !soc->ops->cmn_drv_ops->delba_process) 1337 return 0; 1338 1339 return soc->ops->cmn_drv_ops->delba_process(peer_handle, 1340 tid, reasoncode); 1341 } 1342 1343 /** 1344 * cdp_delba_tx_completion() - Handle delba tx completion 1345 * to update stats and retry transmission if failed. 1346 * @soc: soc handle 1347 * @peer_handle: peer handle 1348 * @tid: Tid number 1349 * @status: Tx completion status 1350 * 1351 * Return: 0 on Success, 1 on failure 1352 */ 1353 1354 static inline int cdp_delba_tx_completion(ol_txrx_soc_handle soc, 1355 void *peer_handle, 1356 uint8_t tid, int status) 1357 { 1358 if (!soc || !soc->ops) { 1359 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1360 "%s: Invalid Instance:", __func__); 1361 QDF_BUG(0); 1362 return 0; 1363 } 1364 1365 if (!soc->ops->cmn_drv_ops || 1366 !soc->ops->cmn_drv_ops->delba_tx_completion) 1367 return 0; 1368 1369 return soc->ops->cmn_drv_ops->delba_tx_completion(peer_handle, 1370 tid, status); 1371 } 1372 1373 static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc, 1374 void *peer_handle, int tid, uint16_t statuscode) 1375 { 1376 if (!soc || !soc->ops) { 1377 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1378 "%s: Invalid Instance:", __func__); 1379 QDF_BUG(0); 1380 return; 1381 } 1382 1383 if (!soc->ops->cmn_drv_ops || 1384 !soc->ops->cmn_drv_ops->set_addba_response) 1385 return; 1386 1387 soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode); 1388 } 1389 1390 /** 1391 * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer 1392 * mac address 1393 * @soc: SOC handle 1394 * @peer_id: peer id of the peer for which mac_address is required 1395 * @mac_addr: reference to mac address 1396 * 1397 * reutm: vdev_id of the vap 1398 */ 1399 static inline uint8_t 1400 cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id, 1401 uint8_t *mac_addr) 1402 { 1403 if (!soc || !soc->ops) { 1404 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1405 "%s: Invalid Instance:", __func__); 1406 QDF_BUG(0); 1407 return CDP_INVALID_VDEV_ID; 1408 } 1409 1410 if (!soc->ops->cmn_drv_ops || 1411 !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id) 1412 return CDP_INVALID_VDEV_ID; 1413 1414 return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc, 1415 peer_id, mac_addr); 1416 } 1417 1418 /** 1419 * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap 1420 * @vdev: vdev handle 1421 * @map_id: id of the tid map 1422 * 1423 * Return: void 1424 */ 1425 static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc, 1426 struct cdp_vdev *vdev, uint8_t map_id) 1427 { 1428 if (!soc || !soc->ops) { 1429 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1430 "%s: Invalid Instance:", __func__); 1431 QDF_BUG(0); 1432 return; 1433 } 1434 1435 if (!soc->ops->cmn_drv_ops || 1436 !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map) 1437 return; 1438 1439 soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev, 1440 map_id); 1441 } 1442 1443 /** 1444 * cdp_ath_get_total_per(): function to get hw retries 1445 * @soc : soc handle 1446 * @pdev: pdev handle 1447 * 1448 * Return: get hw retries 1449 */ 1450 static inline 1451 int cdp_ath_get_total_per(ol_txrx_soc_handle soc, 1452 struct cdp_pdev *pdev) 1453 { 1454 if (!soc || !soc->ops) { 1455 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1456 "%s: Invalid Instance:", __func__); 1457 QDF_BUG(0); 1458 return 0; 1459 } 1460 1461 if (!soc->ops->cmn_drv_ops || 1462 !soc->ops->cmn_drv_ops->txrx_get_total_per) 1463 return 0; 1464 1465 return soc->ops->cmn_drv_ops->txrx_get_total_per(pdev); 1466 } 1467 1468 /** 1469 * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map 1470 * @pdev: pdev handle 1471 * @map_id: id of the tid map 1472 * @tos: index value in map that needs to be changed 1473 * @tid: tid value passed by user 1474 * 1475 * Return: void 1476 */ 1477 static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc, 1478 struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid) 1479 { 1480 if (!soc || !soc->ops) { 1481 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1482 "%s: Invalid Instance:", __func__); 1483 QDF_BUG(0); 1484 return; 1485 } 1486 1487 if (!soc->ops->cmn_drv_ops || 1488 !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) 1489 return; 1490 1491 soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev, 1492 map_id, tos, tid); 1493 } 1494 1495 /** 1496 * cdp_hmmc_tid_override_en(): Function to enable hmmc tid override. 1497 * @soc : soc handle 1498 * @pdev: pdev handle 1499 * @val: hmmc-dscp flag value 1500 * 1501 * Return: void 1502 */ 1503 static inline void cdp_hmmc_tid_override_en(ol_txrx_soc_handle soc, 1504 struct cdp_pdev *pdev, bool val) 1505 { 1506 if (!soc || !soc->ops) { 1507 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1508 "%s: Invalid Instance:", __func__); 1509 QDF_BUG(0); 1510 return; 1511 } 1512 1513 if (!soc->ops->cmn_drv_ops || 1514 !soc->ops->cmn_drv_ops->hmmc_tid_override_en) 1515 return; 1516 1517 soc->ops->cmn_drv_ops->hmmc_tid_override_en(pdev, val); 1518 } 1519 1520 /** 1521 * cdp_set_hmmc_tid_val(): Function to set hmmc tid value. 1522 * @soc : soc handle 1523 * @pdev: pdev handle 1524 * @tid: tid value 1525 * 1526 * Return: void 1527 */ 1528 static inline void cdp_set_hmmc_tid_val(ol_txrx_soc_handle soc, 1529 struct cdp_pdev *pdev, uint8_t tid) 1530 { 1531 if (!soc || !soc->ops) { 1532 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1533 "%s: Invalid Instance:", __func__); 1534 QDF_BUG(0); 1535 return; 1536 } 1537 1538 if (!soc->ops->cmn_drv_ops || 1539 !soc->ops->cmn_drv_ops->set_hmmc_tid_val) 1540 return; 1541 1542 soc->ops->cmn_drv_ops->set_hmmc_tid_val(pdev, tid); 1543 } 1544 1545 /** 1546 * cdp_flush_cache_rx_queue() - flush cache rx queue frame 1547 * 1548 * Return: None 1549 */ 1550 static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc) 1551 { 1552 if (!soc || !soc->ops) { 1553 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1554 "%s: Invalid Instance:", __func__); 1555 QDF_BUG(0); 1556 return; 1557 } 1558 1559 if (!soc->ops->cmn_drv_ops || 1560 !soc->ops->cmn_drv_ops->flush_cache_rx_queue) 1561 return; 1562 soc->ops->cmn_drv_ops->flush_cache_rx_queue(); 1563 } 1564 1565 /** 1566 * cdp_txrx_stats_request(): function to map to host and firmware statistics 1567 * @soc: soc handle 1568 * @vdev: virtual device 1569 * @req: stats request container 1570 * 1571 * return: status 1572 */ 1573 static inline 1574 int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 1575 struct cdp_txrx_stats_req *req) 1576 { 1577 if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) { 1578 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1579 "%s: Invalid Instance:", __func__); 1580 QDF_ASSERT(0); 1581 return 0; 1582 } 1583 1584 if (soc->ops->cmn_drv_ops->txrx_stats_request) 1585 return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req); 1586 1587 return 0; 1588 } 1589 1590 /** 1591 * cdp_txrx_intr_attach(): function to attach and configure interrupt 1592 * @soc: soc handle 1593 */ 1594 static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc) 1595 { 1596 if (!soc || !soc->ops) { 1597 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1598 "%s: Invalid Instance:", __func__); 1599 QDF_BUG(0); 1600 return 0; 1601 } 1602 1603 if (!soc->ops->cmn_drv_ops || 1604 !soc->ops->cmn_drv_ops->txrx_intr_attach) 1605 return 0; 1606 1607 return soc->ops->cmn_drv_ops->txrx_intr_attach(soc); 1608 } 1609 1610 /** 1611 * cdp_txrx_intr_detach(): function to detach interrupt 1612 * @soc: soc handle 1613 */ 1614 static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc) 1615 { 1616 if (!soc || !soc->ops) { 1617 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1618 "%s: Invalid Instance:", __func__); 1619 QDF_BUG(0); 1620 return; 1621 } 1622 1623 if (!soc->ops->cmn_drv_ops || 1624 !soc->ops->cmn_drv_ops->txrx_intr_detach) 1625 return; 1626 1627 soc->ops->cmn_drv_ops->txrx_intr_detach(soc); 1628 } 1629 1630 /** 1631 * cdp_display_stats(): function to map to dump stats 1632 * @soc: soc handle 1633 * @value: statistics option 1634 */ 1635 static inline QDF_STATUS 1636 cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value, 1637 enum qdf_stats_verbosity_level level) 1638 { 1639 if (!soc || !soc->ops) { 1640 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1641 "%s: Invalid Instance:", __func__); 1642 QDF_BUG(0); 1643 return 0; 1644 } 1645 1646 if (!soc->ops->cmn_drv_ops || 1647 !soc->ops->cmn_drv_ops->display_stats) 1648 return 0; 1649 1650 return soc->ops->cmn_drv_ops->display_stats(soc, value, level); 1651 } 1652 1653 1654 /** 1655 * cdp_set_pn_check(): function to set pn check 1656 * @soc: soc handle 1657 * @sec_type: security type 1658 * #rx_pn: receive pn 1659 */ 1660 static inline int cdp_set_pn_check(ol_txrx_soc_handle soc, 1661 struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 1662 { 1663 if (!soc || !soc->ops) { 1664 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1665 "%s: Invalid Instance:", __func__); 1666 QDF_BUG(0); 1667 return 0; 1668 } 1669 1670 if (!soc->ops->cmn_drv_ops || 1671 !soc->ops->cmn_drv_ops->set_pn_check) 1672 return 0; 1673 1674 soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle, 1675 sec_type, rx_pn); 1676 return 0; 1677 } 1678 1679 static inline int cdp_set_key(ol_txrx_soc_handle soc, 1680 struct cdp_peer *peer_handle, 1681 bool is_unicast, uint32_t *key) 1682 { 1683 if (!soc || !soc->ops) { 1684 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1685 "%s: Invalid Instance:", __func__); 1686 QDF_BUG(0); 1687 return 0; 1688 } 1689 1690 if (!soc->ops->ctrl_ops || 1691 !soc->ops->ctrl_ops->set_key) 1692 return 0; 1693 1694 soc->ops->ctrl_ops->set_key(peer_handle, 1695 is_unicast, key); 1696 return 0; 1697 } 1698 1699 /** 1700 * cdp_update_config_parameters(): function to propagate configuration 1701 * parameters to datapath 1702 * @soc: opaque soc handle 1703 * @cfg: configuration handle 1704 * 1705 * Return: status: 0 - Success, non-zero: Failure 1706 */ 1707 static inline 1708 QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc, 1709 struct cdp_config_params *cfg) 1710 { 1711 struct cdp_soc *psoc = (struct cdp_soc *)soc; 1712 1713 if (!soc || !soc->ops) { 1714 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1715 "%s: Invalid Instance:", __func__); 1716 QDF_BUG(0); 1717 return 0; 1718 } 1719 1720 if (!soc->ops->cmn_drv_ops || 1721 !soc->ops->cmn_drv_ops->update_config_parameters) 1722 return QDF_STATUS_SUCCESS; 1723 1724 return soc->ops->cmn_drv_ops->update_config_parameters(psoc, 1725 cfg); 1726 } 1727 1728 /** 1729 * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev 1730 * @soc: opaque soc handle 1731 * @pdev: data path pdev handle 1732 * 1733 * Return: opaque dp handle 1734 */ 1735 static inline void * 1736 cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev) 1737 { 1738 if (!soc || !soc->ops) { 1739 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1740 "%s: Invalid Instance:", __func__); 1741 QDF_BUG(0); 1742 return 0; 1743 } 1744 1745 if (soc->ops->cmn_drv_ops->get_dp_txrx_handle) 1746 return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev); 1747 1748 return 0; 1749 } 1750 1751 /** 1752 * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev 1753 * @soc: opaque soc handle 1754 * @pdev: data path pdev handle 1755 * @dp_hdl: opaque pointer for dp_txrx_handle 1756 * 1757 * Return: void 1758 */ 1759 static inline void 1760 cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl) 1761 { 1762 if (!soc || !soc->ops) { 1763 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1764 "%s: Invalid Instance:", __func__); 1765 QDF_BUG(0); 1766 return; 1767 } 1768 1769 if (!soc->ops->cmn_drv_ops || 1770 !soc->ops->cmn_drv_ops->set_dp_txrx_handle) 1771 return; 1772 1773 soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl); 1774 } 1775 1776 /* 1777 * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc 1778 * @soc: opaque soc handle 1779 * 1780 * Return: opaque extended dp handle 1781 */ 1782 static inline void * 1783 cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc) 1784 { 1785 if (!soc || !soc->ops) { 1786 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1787 "%s: Invalid Instance:", __func__); 1788 QDF_BUG(0); 1789 return NULL; 1790 } 1791 1792 if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle) 1793 return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle( 1794 (struct cdp_soc *) soc); 1795 1796 return NULL; 1797 } 1798 1799 /** 1800 * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc 1801 * @soc: opaque soc handle 1802 * @dp_hdl: opaque pointer for dp_txrx_handle 1803 * 1804 * Return: void 1805 */ 1806 static inline void 1807 cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle) 1808 { 1809 if (!soc || !soc->ops) { 1810 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1811 "%s: Invalid Instance:", __func__); 1812 QDF_BUG(0); 1813 return; 1814 } 1815 1816 if (!soc->ops->cmn_drv_ops || 1817 !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle) 1818 return; 1819 1820 soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc, 1821 dp_handle); 1822 } 1823 1824 /** 1825 * cdp_tx_send() - enqueue frame for transmission 1826 * @soc: soc opaque handle 1827 * @vdev: VAP device 1828 * @nbuf: nbuf to be enqueued 1829 * 1830 * This API is used by Extended Datapath modules to enqueue frame for 1831 * transmission 1832 * 1833 * Return: void 1834 */ 1835 static inline void 1836 cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf) 1837 { 1838 if (!soc || !soc->ops) { 1839 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1840 "%s: Invalid Instance:", __func__); 1841 QDF_BUG(0); 1842 return; 1843 } 1844 1845 if (!soc->ops->cmn_drv_ops || 1846 !soc->ops->cmn_drv_ops->tx_send) 1847 return; 1848 1849 soc->ops->cmn_drv_ops->tx_send(vdev, nbuf); 1850 } 1851 1852 /* 1853 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1854 * @soc: opaque soc handle 1855 * @pdev: data path pdev handle 1856 * 1857 * Return: pdev_id 1858 */ 1859 static inline 1860 uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc, 1861 struct cdp_pdev *pdev) 1862 { 1863 if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev) 1864 return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev); 1865 return 0; 1866 } 1867 1868 /* 1869 * cdp_get_vow_config_frm_pdev() - return carrier_vow_config from pdev 1870 * @soc: opaque soc handle 1871 * @pdev: data path pdev handle 1872 * 1873 * Return: carrier_vow_config 1874 */ 1875 static inline 1876 bool cdp_get_vow_config_frm_pdev(ol_txrx_soc_handle soc, 1877 struct cdp_pdev *pdev) 1878 { 1879 if (soc->ops->cmn_drv_ops->txrx_get_vow_config_frm_pdev) 1880 return soc->ops->cmn_drv_ops->txrx_get_vow_config_frm_pdev( 1881 pdev); 1882 return 0; 1883 } 1884 1885 /** 1886 * cdp_pdev_set_chan_noise_floor() - Set channel noise floor to DP layer 1887 * @soc: opaque soc handle 1888 * @pdev: data path pdev handle 1889 * @chan_noise_floor: Channel Noise Floor (in dbM) obtained from control path 1890 * 1891 * Return: None 1892 */ 1893 static inline 1894 void cdp_pdev_set_chan_noise_floor(ol_txrx_soc_handle soc, 1895 struct cdp_pdev *pdev, 1896 int16_t chan_noise_floor) 1897 { 1898 if (soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor) 1899 return soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor( 1900 pdev, chan_noise_floor); 1901 } 1902 1903 /** 1904 * cdp_set_nac() - set nac 1905 * @soc: opaque soc handle 1906 * @peer: data path peer handle 1907 * 1908 */ 1909 static inline 1910 void cdp_set_nac(ol_txrx_soc_handle soc, 1911 struct cdp_peer *peer) 1912 { 1913 if (soc->ops->cmn_drv_ops->txrx_set_nac) 1914 soc->ops->cmn_drv_ops->txrx_set_nac(peer); 1915 } 1916 1917 /** 1918 * cdp_set_pdev_tx_capture() - set pdev tx_capture 1919 * @soc: opaque soc handle 1920 * @pdev: data path pdev handle 1921 * @val: value of pdev_tx_capture 1922 * 1923 * Return: status: 0 - Success, non-zero: Failure 1924 */ 1925 static inline 1926 QDF_STATUS cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc, 1927 struct cdp_pdev *pdev, int val) 1928 { 1929 if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture) 1930 return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev, 1931 val); 1932 return QDF_STATUS_SUCCESS; 1933 } 1934 1935 /** 1936 * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id 1937 * @soc: opaque soc handle 1938 * @pdev: data path pdev handle 1939 * @peer_id: data path peer id 1940 * @peer_mac: peer_mac 1941 * 1942 * Return: void 1943 */ 1944 static inline 1945 void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc, 1946 struct cdp_pdev *pdev_handle, 1947 uint32_t peer_id, uint8_t *peer_mac) 1948 { 1949 if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id) 1950 soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id( 1951 pdev_handle, peer_id, peer_mac); 1952 } 1953 1954 /** 1955 * cdp_vdev_tx_lock() - acquire lock 1956 * @soc: opaque soc handle 1957 * @vdev: data path vdev handle 1958 * 1959 * Return: void 1960 */ 1961 static inline 1962 void cdp_vdev_tx_lock(ol_txrx_soc_handle soc, 1963 struct cdp_vdev *vdev) 1964 { 1965 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock) 1966 soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev); 1967 } 1968 1969 /** 1970 * cdp_vdev_tx_unlock() - release lock 1971 * @soc: opaque soc handle 1972 * @vdev: data path vdev handle 1973 * 1974 * Return: void 1975 */ 1976 static inline 1977 void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc, 1978 struct cdp_vdev *vdev) 1979 { 1980 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock) 1981 soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev); 1982 } 1983 1984 /** 1985 * cdp_ath_getstats() - get updated athstats 1986 * @soc: opaque soc handle 1987 * @dev: dp interface handle 1988 * @stats: cdp network device stats structure 1989 * @type: device type pdev/vdev 1990 * 1991 * Return: void 1992 */ 1993 static inline void cdp_ath_getstats(ol_txrx_soc_handle soc, 1994 void *dev, struct cdp_dev_stats *stats, 1995 uint8_t type) 1996 { 1997 if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats) 1998 soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type); 1999 } 2000 2001 /** 2002 * cdp_set_gid_flag() - set groupid flag 2003 * @soc: opaque soc handle 2004 * @pdev: data path pdev handle 2005 * @mem_status: member status from grp management frame 2006 * @user_position: user position from grp management frame 2007 * 2008 * Return: void 2009 */ 2010 static inline 2011 void cdp_set_gid_flag(ol_txrx_soc_handle soc, 2012 struct cdp_pdev *pdev, u_int8_t *mem_status, 2013 u_int8_t *user_position) 2014 { 2015 if (soc->ops->cmn_drv_ops->txrx_set_gid_flag) 2016 soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position); 2017 } 2018 2019 /** 2020 * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version 2021 * @soc: opaque soc handle 2022 * @pdev: data path pdev handle 2023 * 2024 */ 2025 static inline 2026 uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc, 2027 struct cdp_pdev *pdev) 2028 { 2029 if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version) 2030 return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev); 2031 return 0; 2032 } 2033 2034 /** 2035 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 2036 * @soc: opaque soc handle 2037 * @ni: associated node 2038 * @force: number of frame in SW queue 2039 * Return: void 2040 */ 2041 static inline 2042 void cdp_if_mgmt_drain(ol_txrx_soc_handle soc, 2043 void *ni, int force) 2044 { 2045 if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain) 2046 soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force); 2047 } 2048 2049 /* cdp_peer_map_attach() - CDP API to allocate PEER map memory 2050 * @soc: opaque soc handle 2051 * @max_peers: number of peers created in FW 2052 * @max_ast_index: max number of AST index supported in FW 2053 * @peer_map_unmap_v2: flag indicates HTT peer map v2 is enabled in FW 2054 * 2055 * 2056 * Return: void 2057 */ 2058 static inline void 2059 cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers, 2060 uint32_t max_ast_index, bool peer_map_unmap_v2) 2061 { 2062 if (soc && soc->ops && soc->ops->cmn_drv_ops && 2063 soc->ops->cmn_drv_ops->txrx_peer_map_attach) 2064 soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers, 2065 max_ast_index, 2066 peer_map_unmap_v2); 2067 } 2068 2069 /** 2070 2071 * cdp_pdev_set_ctrl_pdev() - set UMAC ctrl pdev to dp pdev 2072 * @soc: opaque soc handle 2073 * @pdev: opaque dp pdev handle 2074 * @ctrl_pdev: opaque ctrl pdev handle 2075 * 2076 * Return: void 2077 */ 2078 static inline void 2079 cdp_pdev_set_ctrl_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *dp_pdev, 2080 struct cdp_ctrl_objmgr_pdev *ctrl_pdev) 2081 { 2082 if (soc && soc->ops && soc->ops->cmn_drv_ops && 2083 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev) 2084 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev(dp_pdev, 2085 ctrl_pdev); 2086 } 2087 2088 /* cdp_txrx_classify_and_update() - To classify the packet and update stats 2089 * @soc: opaque soc handle 2090 * @vdev: opaque dp vdev handle 2091 * @skb: data 2092 * @dir: rx or tx packet 2093 * @nbuf_classify: packet classification object 2094 * 2095 * Return: 1 on success else return 0 2096 */ 2097 static inline int 2098 cdp_txrx_classify_and_update(ol_txrx_soc_handle soc, 2099 struct cdp_vdev *vdev, qdf_nbuf_t skb, 2100 enum txrx_direction dir, 2101 struct ol_txrx_nbuf_classify *nbuf_class) 2102 { 2103 if (!soc || !soc->ops) { 2104 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2105 "%s: Invalid Instance", __func__); 2106 QDF_BUG(0); 2107 return 0; 2108 } 2109 2110 if (!soc->ops->cmn_drv_ops || 2111 !soc->ops->cmn_drv_ops->txrx_classify_update) 2112 return 0; 2113 2114 return soc->ops->cmn_drv_ops->txrx_classify_update(vdev, 2115 skb, 2116 dir, nbuf_class); 2117 } 2118 2119 /** 2120 * cdp_get_dp_capabilities() - get DP capabilities 2121 * @soc: opaque soc handle 2122 * @dp_cap: enum of DP capabilities 2123 * 2124 * Return: bool 2125 */ 2126 static inline bool 2127 cdp_get_dp_capabilities(struct cdp_soc_t *soc, enum cdp_capabilities dp_caps) 2128 { 2129 if (soc && soc->ops && soc->ops->cmn_drv_ops && 2130 soc->ops->cmn_drv_ops->get_dp_capabilities) 2131 return soc->ops->cmn_drv_ops->get_dp_capabilities(soc, dp_caps); 2132 return false; 2133 } 2134 2135 #ifdef RECEIVE_OFFLOAD 2136 /** 2137 * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer 2138 * @soc - data path soc handle 2139 * @pdev - device instance pointer 2140 * 2141 * register rx offload flush callback function pointer 2142 * 2143 * return none 2144 */ 2145 static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc, 2146 void (rx_ol_flush_cb)(void *)) 2147 { 2148 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 2149 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 2150 "%s invalid instance", __func__); 2151 return; 2152 } 2153 2154 if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb) 2155 return soc->ops->rx_offld_ops->register_rx_offld_flush_cb( 2156 rx_ol_flush_cb); 2157 } 2158 2159 /** 2160 * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function 2161 * @soc - data path soc handle 2162 * 2163 * deregister rx offload flush callback function pointer 2164 * 2165 * return none 2166 */ 2167 static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc) 2168 { 2169 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 2170 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 2171 "%s invalid instance", __func__); 2172 return; 2173 } 2174 2175 if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb) 2176 return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb(); 2177 } 2178 #endif /* RECEIVE_OFFLOAD */ 2179 2180 /** 2181 * @cdp_set_ba_timeout() - set ba aging timeout per AC 2182 * 2183 * @soc - pointer to the soc 2184 * @value - timeout value in millisec 2185 * @ac - Access category 2186 * 2187 * @return - void 2188 */ 2189 static inline void cdp_set_ba_timeout(ol_txrx_soc_handle soc, 2190 uint8_t ac, uint32_t value) 2191 { 2192 if (!soc || !soc->ops) { 2193 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2194 "%s: Invalid Instance", __func__); 2195 QDF_BUG(0); 2196 return; 2197 } 2198 2199 if (!soc->ops->cmn_drv_ops || 2200 !soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout) 2201 return; 2202 2203 soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout(soc, ac, value); 2204 } 2205 2206 /** 2207 * @cdp_get_ba_timeout() - return ba aging timeout per AC 2208 * 2209 * @soc - pointer to the soc 2210 * @ac - access category 2211 * @value - timeout value in millisec 2212 * 2213 * @return - void 2214 */ 2215 static inline void cdp_get_ba_timeout(ol_txrx_soc_handle soc, 2216 uint8_t ac, uint32_t *value) 2217 { 2218 if (!soc || !soc->ops) { 2219 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2220 "%s: Invalid Instance", __func__); 2221 QDF_BUG(0); 2222 return; 2223 } 2224 2225 if (!soc->ops->cmn_drv_ops || 2226 !soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout) 2227 return; 2228 2229 soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout(soc, ac, value); 2230 } 2231 2232 /** 2233 * cdp_cfg_get() - get cfg for dp enum 2234 * 2235 * @soc: pointer to the soc 2236 * @cfg: cfg enum 2237 * 2238 * Return - cfg value 2239 */ 2240 static inline uint32_t cdp_cfg_get(ol_txrx_soc_handle soc, enum cdp_dp_cfg cfg) 2241 { 2242 if (!soc || !soc->ops) { 2243 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 2244 "%s: Invalid Instance", __func__); 2245 return 0; 2246 } 2247 2248 if (!soc->ops->cmn_drv_ops || !soc->ops->cmn_drv_ops->txrx_get_cfg) 2249 return 0; 2250 2251 return soc->ops->cmn_drv_ops->txrx_get_cfg(soc, cfg); 2252 } 2253 #endif /* _CDP_TXRX_CMN_H_ */ 2254