1 /* 2 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * @file cdp_txrx_cmn.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_H_ 25 #define _CDP_TXRX_CMN_H_ 26 27 #include "qdf_types.h" 28 #include "qdf_nbuf.h" 29 #include "cdp_txrx_ops.h" 30 #include "cdp_txrx_handle.h" 31 #include "cdp_txrx_cmn_struct.h" 32 /****************************************************************************** 33 * 34 * Common Data Path Header File 35 * 36 *****************************************************************************/ 37 38 static inline int 39 cdp_soc_attach_target(ol_txrx_soc_handle soc) 40 { 41 if (!soc || !soc->ops) { 42 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 43 "%s: Invalid Instance:", __func__); 44 QDF_BUG(0); 45 return 0; 46 } 47 48 if (!soc->ops->cmn_drv_ops || 49 !soc->ops->cmn_drv_ops->txrx_soc_attach_target) 50 return 0; 51 52 return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc); 53 54 } 55 56 static inline int 57 cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc) 58 { 59 if (!soc || !soc->ops) { 60 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 61 "%s: Invalid Instance:", __func__); 62 QDF_BUG(0); 63 return 0; 64 } 65 66 if (!soc->ops->cmn_drv_ops || 67 !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg) 68 return 0; 69 70 return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc); 71 } 72 73 static inline void 74 cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config) 75 { 76 if (!soc || !soc->ops) { 77 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 78 "%s: Invalid Instance:", __func__); 79 QDF_BUG(0); 80 return; 81 } 82 83 if (!soc->ops->cmn_drv_ops || 84 !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg) 85 return; 86 87 soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config); 88 } 89 90 static inline struct cdp_vdev * 91 cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 92 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode) 93 { 94 if (!soc || !soc->ops) { 95 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 96 "%s: Invalid Instance:", __func__); 97 QDF_BUG(0); 98 return NULL; 99 } 100 101 if (!soc->ops->cmn_drv_ops || 102 !soc->ops->cmn_drv_ops->txrx_vdev_attach) 103 return NULL; 104 105 return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev, 106 vdev_mac_addr, vdev_id, op_mode); 107 } 108 #ifndef CONFIG_WIN 109 /** 110 * cdp_flow_pool_map() - Create flow pool for vdev 111 * @soc - data path soc handle 112 * @pdev 113 * @vdev_id - vdev_id corresponding to vdev start 114 * 115 * Create per vdev flow pool. 116 * 117 * return none 118 */ 119 static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc, 120 struct cdp_pdev *pdev, uint8_t vdev_id) 121 { 122 if (!soc || !soc->ops) { 123 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 124 "%s: Invalid Instance:", __func__); 125 QDF_BUG(0); 126 return QDF_STATUS_E_INVAL; 127 } 128 129 if (!soc->ops->flowctl_ops || 130 !soc->ops->flowctl_ops->flow_pool_map_handler) 131 return QDF_STATUS_E_INVAL; 132 133 return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id); 134 } 135 136 /** 137 * cdp_flow_pool_unmap() - Delete flow pool 138 * @soc - data path soc handle 139 * @pdev 140 * @vdev_id - vdev_id corresponding to vdev start 141 * 142 * Delete flow pool 143 * 144 * return none 145 */ 146 static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc, 147 struct cdp_pdev *pdev, uint8_t vdev_id) 148 { 149 if (!soc || !soc->ops) { 150 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 151 "%s: Invalid Instance:", __func__); 152 QDF_BUG(0); 153 return; 154 } 155 156 if (!soc->ops->flowctl_ops || 157 !soc->ops->flowctl_ops->flow_pool_unmap_handler) 158 return; 159 160 return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev, 161 vdev_id); 162 } 163 #endif 164 165 static inline void 166 cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 167 ol_txrx_vdev_delete_cb callback, void *cb_context) 168 { 169 if (!soc || !soc->ops) { 170 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 171 "%s: Invalid Instance:", __func__); 172 QDF_BUG(0); 173 return; 174 } 175 176 if (!soc->ops->cmn_drv_ops || 177 !soc->ops->cmn_drv_ops->txrx_vdev_detach) 178 return; 179 180 soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev, 181 callback, cb_context); 182 } 183 184 static inline int 185 cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) 186 { 187 if (!soc || !soc->ops) { 188 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 189 "%s: Invalid Instance:", __func__); 190 QDF_BUG(0); 191 return 0; 192 } 193 194 if (!soc->ops->cmn_drv_ops || 195 !soc->ops->cmn_drv_ops->txrx_pdev_attach_target) 196 return 0; 197 198 return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev); 199 } 200 201 static inline struct cdp_pdev *cdp_pdev_attach 202 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 203 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id) 204 { 205 if (!soc || !soc->ops) { 206 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 207 "%s: Invalid Instance:", __func__); 208 QDF_BUG(0); 209 return NULL; 210 } 211 212 if (!soc->ops->cmn_drv_ops || 213 !soc->ops->cmn_drv_ops->txrx_pdev_attach) 214 return NULL; 215 216 return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev, 217 htc_pdev, osdev, pdev_id); 218 } 219 220 static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, 221 struct cdp_pdev *pdev) 222 { 223 if (!soc || !soc->ops) { 224 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 225 "%s: Invalid Instance:", __func__); 226 QDF_BUG(0); 227 return 0; 228 } 229 230 if (!soc->ops->cmn_drv_ops || 231 !soc->ops->cmn_drv_ops->txrx_pdev_post_attach) 232 return 0; 233 234 return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev); 235 } 236 237 static inline void 238 cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 239 { 240 if (!soc || !soc->ops) { 241 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 242 "%s: Invalid Instance:", __func__); 243 QDF_BUG(0); 244 return; 245 } 246 247 if (!soc->ops->cmn_drv_ops || 248 !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach) 249 return; 250 251 soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force); 252 } 253 254 static inline void 255 cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) 256 { 257 if (!soc || !soc->ops) { 258 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 259 "%s: Invalid Instance:", __func__); 260 QDF_BUG(0); 261 return; 262 } 263 264 if (!soc->ops->cmn_drv_ops || 265 !soc->ops->cmn_drv_ops->txrx_pdev_detach) 266 return; 267 268 soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force); 269 } 270 271 static inline void *cdp_peer_create 272 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 273 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer) 274 { 275 if (!soc || !soc->ops) { 276 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 277 "%s: Invalid Instance:", __func__); 278 QDF_BUG(0); 279 return NULL; 280 } 281 282 if (!soc->ops->cmn_drv_ops || 283 !soc->ops->cmn_drv_ops->txrx_peer_create) 284 return NULL; 285 286 return soc->ops->cmn_drv_ops->txrx_peer_create(vdev, 287 peer_mac_addr, ctrl_peer); 288 } 289 290 static inline void cdp_peer_setup 291 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 292 { 293 if (!soc || !soc->ops) { 294 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 295 "%s: Invalid Instance:", __func__); 296 QDF_BUG(0); 297 return; 298 } 299 300 if (!soc->ops->cmn_drv_ops || 301 !soc->ops->cmn_drv_ops->txrx_peer_setup) 302 return; 303 304 soc->ops->cmn_drv_ops->txrx_peer_setup(vdev, 305 peer); 306 } 307 308 static inline void *cdp_peer_ast_hash_find 309 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr) 310 { 311 if (!soc || !soc->ops) { 312 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 313 "%s: Invalid Instance:", __func__); 314 QDF_BUG(0); 315 return NULL; 316 } 317 318 if (!soc->ops->cmn_drv_ops || 319 !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find) 320 return NULL; 321 322 return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find(soc, 323 ast_mac_addr); 324 } 325 326 static inline int cdp_peer_add_ast 327 (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle, 328 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags) 329 { 330 if (!soc || !soc->ops) { 331 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 332 "%s: Invalid Instance:", __func__); 333 QDF_BUG(0); 334 return 0; 335 } 336 337 if (!soc->ops->cmn_drv_ops || 338 !soc->ops->cmn_drv_ops->txrx_peer_add_ast) 339 return 0; 340 341 return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc, 342 peer_handle, 343 mac_addr, 344 type, 345 flags); 346 } 347 348 static inline void cdp_peer_reset_ast 349 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, void *vdev_hdl) 350 { 351 352 if (!soc || !soc->ops) { 353 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 354 "%s: Invalid Instance:", __func__); 355 QDF_BUG(0); 356 return; 357 } 358 if (!soc->ops->cmn_drv_ops || 359 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast) 360 return; 361 362 soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr, vdev_hdl); 363 } 364 365 static inline void cdp_peer_reset_ast_table 366 (ol_txrx_soc_handle soc, void *vdev_hdl) 367 { 368 if (!soc || !soc->ops) { 369 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 370 "%s: Invalid Instance:", __func__); 371 QDF_BUG(0); 372 return; 373 } 374 375 if (!soc->ops->cmn_drv_ops || 376 !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table) 377 return; 378 379 soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_hdl); 380 } 381 382 static inline void cdp_peer_flush_ast_table 383 (ol_txrx_soc_handle soc) 384 { 385 if (!soc || !soc->ops) { 386 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 387 "%s: Invalid Instance:", __func__); 388 QDF_BUG(0); 389 return; 390 } 391 392 if (!soc->ops->cmn_drv_ops || 393 !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table) 394 return; 395 396 soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc); 397 } 398 399 static inline int cdp_peer_update_ast 400 (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, 401 struct cdp_peer *peer_handle, uint32_t flags) 402 { 403 if (!soc || !soc->ops) { 404 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 405 "%s: Invalid Instance:", __func__); 406 QDF_BUG(0); 407 return 0; 408 } 409 410 if (!soc->ops->cmn_drv_ops || 411 !soc->ops->cmn_drv_ops->txrx_peer_update_ast) 412 return 0; 413 414 415 return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc, 416 peer_handle, 417 wds_macaddr, 418 flags); 419 } 420 421 static inline void cdp_peer_del_ast 422 (ol_txrx_soc_handle soc, void *ast_handle) 423 { 424 if (!soc || !soc->ops) { 425 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 426 "%s: Invalid Instance:", __func__); 427 QDF_BUG(0); 428 return; 429 } 430 431 if (!soc->ops->cmn_drv_ops || 432 !soc->ops->cmn_drv_ops->txrx_peer_del_ast) 433 return; 434 435 soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle); 436 } 437 438 439 static inline uint8_t cdp_peer_ast_get_pdev_id 440 (ol_txrx_soc_handle soc, void *ast_handle) 441 { 442 if (!soc || !soc->ops) { 443 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 444 "%s: Invalid Instance:", __func__); 445 QDF_BUG(0); 446 return 0xff; 447 } 448 449 if (!soc->ops->cmn_drv_ops || 450 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id) 451 return 0xff; 452 453 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc, 454 ast_handle); 455 } 456 457 static inline uint8_t cdp_peer_ast_get_next_hop 458 (ol_txrx_soc_handle soc, void *ast_handle) 459 { 460 if (!soc || !soc->ops) { 461 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 462 "%s: Invalid Instance:", __func__); 463 QDF_BUG(0); 464 return 0xff; 465 } 466 467 if (!soc->ops->cmn_drv_ops || 468 !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop) 469 return 0xff; 470 471 return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc, 472 ast_handle); 473 } 474 475 static inline void cdp_peer_ast_set_type 476 (ol_txrx_soc_handle soc, void *ast_handle, 477 enum cdp_txrx_ast_entry_type type) 478 { 479 if (!soc || !soc->ops) { 480 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 481 "%s: Invalid Instance:", __func__); 482 QDF_BUG(0); 483 return; 484 } 485 486 if (!soc->ops->cmn_drv_ops || 487 !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type) 488 return; 489 490 soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type); 491 } 492 493 static inline void cdp_peer_teardown 494 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) 495 { 496 if (!soc || !soc->ops) { 497 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 498 "%s: Invalid Instance:", __func__); 499 QDF_BUG(0); 500 return; 501 } 502 503 if (!soc->ops->cmn_drv_ops || 504 !soc->ops->cmn_drv_ops->txrx_peer_teardown) 505 return; 506 507 soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer); 508 } 509 510 static inline void 511 cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap) 512 { 513 if (!soc || !soc->ops) { 514 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 515 "%s: Invalid Instance:", __func__); 516 QDF_BUG(0); 517 return; 518 } 519 520 if (!soc->ops->cmn_drv_ops || 521 !soc->ops->cmn_drv_ops->txrx_peer_delete) 522 return; 523 524 soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap); 525 } 526 527 static inline int 528 cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 529 uint8_t smart_monitor) 530 { 531 if (!soc || !soc->ops) { 532 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 533 "%s: Invalid Instance:", __func__); 534 QDF_BUG(0); 535 return 0; 536 } 537 538 if (!soc->ops->cmn_drv_ops || 539 !soc->ops->cmn_drv_ops->txrx_set_monitor_mode) 540 return 0; 541 542 return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev, 543 smart_monitor); 544 } 545 546 static inline void 547 cdp_set_curchan(ol_txrx_soc_handle soc, 548 struct cdp_pdev *pdev, 549 uint32_t chan_mhz) 550 { 551 if (!soc || !soc->ops) { 552 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 553 "%s: Invalid Instance:", __func__); 554 QDF_BUG(0); 555 return; 556 } 557 558 if (!soc->ops->cmn_drv_ops || 559 !soc->ops->cmn_drv_ops->txrx_set_curchan) 560 return; 561 562 soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz); 563 } 564 565 static inline void 566 cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 567 void *filter, uint32_t num) 568 { 569 if (!soc || !soc->ops) { 570 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 571 "%s: Invalid Instance:", __func__); 572 QDF_BUG(0); 573 return; 574 } 575 576 if (!soc->ops->cmn_drv_ops || 577 !soc->ops->cmn_drv_ops->txrx_set_privacy_filters) 578 return; 579 580 soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev, 581 filter, num); 582 } 583 584 static inline int 585 cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 586 struct cdp_monitor_filter *filter_val) 587 { 588 if (soc->ops->mon_ops->txrx_set_advance_monitor_filter) 589 return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev, 590 filter_val); 591 return 0; 592 } 593 594 595 /****************************************************************************** 596 * Data Interface (B Interface) 597 *****************************************************************************/ 598 static inline void 599 cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 600 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 601 struct ol_txrx_ops *txrx_ops) 602 { 603 if (!soc || !soc->ops) { 604 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 605 "%s: Invalid Instance:", __func__); 606 QDF_BUG(0); 607 return; 608 } 609 610 if (!soc->ops->cmn_drv_ops || 611 !soc->ops->cmn_drv_ops->txrx_vdev_register) 612 return; 613 614 soc->ops->cmn_drv_ops->txrx_vdev_register(vdev, 615 osif_vdev, ctrl_vdev, txrx_ops); 616 } 617 618 static inline int 619 cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 620 qdf_nbuf_t tx_mgmt_frm, uint8_t type) 621 { 622 if (!soc || !soc->ops) { 623 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 624 "%s: Invalid Instance:", __func__); 625 QDF_BUG(0); 626 return 0; 627 } 628 629 if (!soc->ops->cmn_drv_ops || 630 !soc->ops->cmn_drv_ops->txrx_mgmt_send) 631 return 0; 632 633 return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev, 634 tx_mgmt_frm, type); 635 } 636 637 static inline int 638 cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 639 qdf_nbuf_t tx_mgmt_frm, uint8_t type, 640 uint8_t use_6mbps, uint16_t chanfreq) 641 { 642 if (!soc || !soc->ops) { 643 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 644 "%s: Invalid Instance:", __func__); 645 QDF_BUG(0); 646 return 0; 647 } 648 649 if (!soc->ops->cmn_drv_ops || 650 !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext) 651 return 0; 652 653 return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext 654 (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq); 655 } 656 657 658 static inline void 659 cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 660 uint8_t type, ol_txrx_mgmt_tx_cb download_cb, 661 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) 662 { 663 if (!soc || !soc->ops) { 664 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 665 "%s: Invalid Instance:", __func__); 666 QDF_BUG(0); 667 return; 668 } 669 670 if (!soc->ops->cmn_drv_ops || 671 !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set) 672 return; 673 674 soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set 675 (pdev, type, download_cb, ota_ack_cb, ctxt); 676 } 677 678 static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc, 679 struct cdp_pdev *pdev) 680 { 681 if (!soc || !soc->ops) { 682 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 683 "%s: Invalid Instance:", __func__); 684 QDF_BUG(0); 685 return 0; 686 } 687 688 if (!soc->ops->cmn_drv_ops || 689 !soc->ops->cmn_drv_ops->txrx_get_tx_pending) 690 return 0; 691 692 693 return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev); 694 } 695 696 static inline void 697 cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev, 698 ol_txrx_data_tx_cb callback, void *ctxt) 699 { 700 if (!soc || !soc->ops) { 701 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 702 "%s: Invalid Instance:", __func__); 703 QDF_BUG(0); 704 return; 705 } 706 707 if (!soc->ops->cmn_drv_ops || 708 !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set) 709 return; 710 711 soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev, 712 callback, ctxt); 713 } 714 715 /****************************************************************************** 716 * Statistics and Debugging Interface (C Interface) 717 *****************************************************************************/ 718 /** 719 * External Device physical address types 720 * 721 * Currently, both MAC and IPA uController use the same size addresses 722 * and descriptors are exchanged between these two depending on the mode. 723 * 724 * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA 725 * operations. However, external device physical address sizes 726 * may be different from host-specific physical address sizes. 727 * This calls for the following definitions for target devices 728 * (MAC, IPA uc). 729 */ 730 #if HTT_PADDR64 731 typedef uint64_t target_paddr_t; 732 #else 733 typedef uint32_t target_paddr_t; 734 #endif /*HTT_PADDR64 */ 735 736 static inline int 737 cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 738 int max_subfrms_ampdu, 739 int max_subfrms_amsdu) 740 { 741 if (!soc || !soc->ops) { 742 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 743 "%s: Invalid Instance:", __func__); 744 QDF_BUG(0); 745 return 0; 746 } 747 748 if (!soc->ops->cmn_drv_ops || 749 !soc->ops->cmn_drv_ops->txrx_aggr_cfg) 750 return 0; 751 752 return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev, 753 max_subfrms_ampdu, max_subfrms_amsdu); 754 } 755 756 static inline int 757 cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 758 struct ol_txrx_stats_req *req, bool per_vdev, 759 bool response_expected) 760 { 761 if (!soc || !soc->ops) { 762 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 763 "%s: Invalid Instance:", __func__); 764 QDF_BUG(0); 765 return 0; 766 } 767 768 if (!soc->ops->cmn_drv_ops || 769 !soc->ops->cmn_drv_ops->txrx_fw_stats_get) 770 return 0; 771 772 return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req, 773 per_vdev, response_expected); 774 } 775 776 static inline int 777 cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs) 778 { 779 if (!soc || !soc->ops) { 780 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 781 "%s: Invalid Instance:", __func__); 782 QDF_BUG(0); 783 return 0; 784 } 785 786 if (!soc->ops->cmn_drv_ops || 787 !soc->ops->cmn_drv_ops->txrx_debug) 788 return 0; 789 790 return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs); 791 } 792 793 static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc, 794 struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val) 795 { 796 if (!soc || !soc->ops) { 797 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 798 "%s: Invalid Instance:", __func__); 799 QDF_BUG(0); 800 return; 801 } 802 803 if (!soc->ops->cmn_drv_ops || 804 !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg) 805 return; 806 807 soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev, 808 cfg_stats_type, cfg_val); 809 } 810 811 static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level) 812 { 813 if (!soc || !soc->ops) { 814 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 815 "%s: Invalid Instance:", __func__); 816 QDF_BUG(0); 817 return; 818 } 819 820 if (!soc->ops->cmn_drv_ops || 821 !soc->ops->cmn_drv_ops->txrx_print_level_set) 822 return; 823 824 soc->ops->cmn_drv_ops->txrx_print_level_set(level); 825 } 826 827 static inline uint8_t * 828 cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 829 { 830 if (!soc || !soc->ops) { 831 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 832 "%s: Invalid Instance:", __func__); 833 QDF_BUG(0); 834 return NULL; 835 } 836 837 if (!soc->ops->cmn_drv_ops || 838 !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr) 839 return NULL; 840 841 return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev); 842 843 } 844 845 /** 846 * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 847 * vdev 848 * @vdev: vdev handle 849 * 850 * Return: Handle to struct qdf_mac_addr 851 */ 852 static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr 853 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 854 { 855 if (!soc || !soc->ops) { 856 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 857 "%s: Invalid Instance:", __func__); 858 QDF_BUG(0); 859 return NULL; 860 } 861 862 if (!soc->ops->cmn_drv_ops || 863 !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr) 864 return NULL; 865 866 return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr 867 (vdev); 868 869 } 870 871 /** 872 * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev 873 * @vdev: vdev handle 874 * 875 * Return: Handle to pdev 876 */ 877 static inline struct cdp_pdev *cdp_get_pdev_from_vdev 878 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 879 { 880 if (!soc || !soc->ops) { 881 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 882 "%s: Invalid Instance:", __func__); 883 QDF_BUG(0); 884 return NULL; 885 } 886 887 if (!soc->ops->cmn_drv_ops || 888 !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev) 889 return NULL; 890 891 return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev); 892 } 893 894 /** 895 * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 896 * @vdev: vdev handle 897 * 898 * Return: Handle to control pdev 899 */ 900 static inline struct cdp_cfg * 901 cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) 902 { 903 if (!soc || !soc->ops) { 904 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 905 "%s: Invalid Instance:", __func__); 906 QDF_BUG(0); 907 return NULL; 908 } 909 910 if (!soc->ops->cmn_drv_ops || 911 !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev) 912 return NULL; 913 914 return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev 915 (vdev); 916 } 917 918 static inline struct cdp_vdev * 919 cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, 920 uint8_t vdev_id) 921 { 922 if (!soc || !soc->ops) { 923 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 924 "%s: Invalid Instance:", __func__); 925 QDF_BUG(0); 926 return NULL; 927 } 928 929 if (!soc->ops->cmn_drv_ops || 930 !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id) 931 return NULL; 932 933 return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id 934 (pdev, vdev_id); 935 } 936 937 static inline void 938 cdp_soc_detach(ol_txrx_soc_handle soc) 939 { 940 if (!soc || !soc->ops) { 941 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 942 "%s: Invalid Instance:", __func__); 943 QDF_BUG(0); 944 return; 945 } 946 947 if (!soc->ops->cmn_drv_ops || 948 !soc->ops->cmn_drv_ops->txrx_soc_detach) 949 return; 950 951 soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc); 952 } 953 954 static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc, 955 void *peer_handle, uint8_t dialogtoken, uint16_t tid, 956 uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum) 957 { 958 if (!soc || !soc->ops) { 959 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 960 "%s: Invalid Instance:", __func__); 961 QDF_BUG(0); 962 return 0; 963 } 964 965 if (!soc->ops->cmn_drv_ops || 966 !soc->ops->cmn_drv_ops->addba_requestprocess) 967 return 0; 968 969 return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle, 970 dialogtoken, tid, batimeout, buffersize, startseqnum); 971 } 972 973 static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc, 974 void *peer_handle, uint8_t tid, uint8_t *dialogtoken, 975 uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout) 976 { 977 if (!soc || !soc->ops) { 978 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 979 "%s: Invalid Instance:", __func__); 980 QDF_BUG(0); 981 return; 982 } 983 984 if (!soc->ops->cmn_drv_ops || 985 !soc->ops->cmn_drv_ops->addba_responsesetup) 986 return; 987 988 soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid, 989 dialogtoken, statuscode, buffersize, batimeout); 990 } 991 992 static inline int cdp_delba_process(ol_txrx_soc_handle soc, 993 void *peer_handle, int tid, uint16_t reasoncode) 994 { 995 if (!soc || !soc->ops) { 996 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 997 "%s: Invalid Instance:", __func__); 998 QDF_BUG(0); 999 return 0; 1000 } 1001 1002 if (!soc->ops->cmn_drv_ops || 1003 !soc->ops->cmn_drv_ops->delba_process) 1004 return 0; 1005 1006 return soc->ops->cmn_drv_ops->delba_process(peer_handle, 1007 tid, reasoncode); 1008 } 1009 1010 static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc, 1011 void *peer_handle, int tid, uint16_t statuscode) 1012 { 1013 if (!soc || !soc->ops) { 1014 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1015 "%s: Invalid Instance:", __func__); 1016 QDF_BUG(0); 1017 return; 1018 } 1019 1020 if (!soc->ops->cmn_drv_ops || 1021 !soc->ops->cmn_drv_ops->set_addba_response) 1022 return; 1023 1024 soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode); 1025 } 1026 1027 /** 1028 * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer 1029 * mac address 1030 * @soc: SOC handle 1031 * @peer_id: peer id of the peer for which mac_address is required 1032 * @mac_addr: reference to mac address 1033 * 1034 * reutm: vdev_id of the vap 1035 */ 1036 static inline uint8_t 1037 cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id, 1038 uint8_t *mac_addr) 1039 { 1040 if (!soc || !soc->ops) { 1041 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1042 "%s: Invalid Instance:", __func__); 1043 QDF_BUG(0); 1044 return CDP_INVALID_VDEV_ID; 1045 } 1046 1047 if (!soc->ops->cmn_drv_ops || 1048 !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id) 1049 return CDP_INVALID_VDEV_ID; 1050 1051 return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc, 1052 peer_id, mac_addr); 1053 } 1054 1055 /** 1056 * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap 1057 * @vdev: vdev handle 1058 * @map_id: id of the tid map 1059 * 1060 * Return: void 1061 */ 1062 static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc, 1063 struct cdp_vdev *vdev, uint8_t map_id) 1064 { 1065 if (!soc || !soc->ops) { 1066 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1067 "%s: Invalid Instance:", __func__); 1068 QDF_BUG(0); 1069 return; 1070 } 1071 1072 if (!soc->ops->cmn_drv_ops || 1073 !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map) 1074 return; 1075 1076 soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev, 1077 map_id); 1078 } 1079 1080 /** 1081 * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map 1082 * @pdev: pdev handle 1083 * @map_id: id of the tid map 1084 * @tos: index value in map that needs to be changed 1085 * @tid: tid value passed by user 1086 * 1087 * Return: void 1088 */ 1089 static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc, 1090 struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid) 1091 { 1092 if (!soc || !soc->ops) { 1093 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1094 "%s: Invalid Instance:", __func__); 1095 QDF_BUG(0); 1096 return; 1097 } 1098 1099 if (!soc->ops->cmn_drv_ops || 1100 !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) 1101 return; 1102 1103 soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev, 1104 map_id, tos, tid); 1105 } 1106 1107 /** 1108 * cdp_flush_cache_rx_queue() - flush cache rx queue frame 1109 * 1110 * Return: None 1111 */ 1112 static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc) 1113 { 1114 if (!soc || !soc->ops) { 1115 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1116 "%s: Invalid Instance:", __func__); 1117 QDF_BUG(0); 1118 return; 1119 } 1120 1121 if (!soc->ops->cmn_drv_ops || 1122 !soc->ops->cmn_drv_ops->flush_cache_rx_queue) 1123 return; 1124 soc->ops->cmn_drv_ops->flush_cache_rx_queue(); 1125 } 1126 1127 /** 1128 * cdp_txrx_stats_request(): function to map to host and firmware statistics 1129 * @soc: soc handle 1130 * @vdev: virtual device 1131 * @req: stats request container 1132 * 1133 * return: status 1134 */ 1135 static inline 1136 int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, 1137 struct cdp_txrx_stats_req *req) 1138 { 1139 if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) { 1140 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1141 "%s: Invalid Instance:", __func__); 1142 QDF_ASSERT(0); 1143 return 0; 1144 } 1145 1146 if (soc->ops->cmn_drv_ops->txrx_stats_request) 1147 return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req); 1148 1149 return 0; 1150 } 1151 1152 /** 1153 * cdp_txrx_intr_attach(): function to attach and configure interrupt 1154 * @soc: soc handle 1155 */ 1156 static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc) 1157 { 1158 if (!soc || !soc->ops) { 1159 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1160 "%s: Invalid Instance:", __func__); 1161 QDF_BUG(0); 1162 return 0; 1163 } 1164 1165 if (!soc->ops->cmn_drv_ops || 1166 !soc->ops->cmn_drv_ops->txrx_intr_attach) 1167 return 0; 1168 1169 return soc->ops->cmn_drv_ops->txrx_intr_attach(soc); 1170 } 1171 1172 /** 1173 * cdp_txrx_intr_detach(): function to detach interrupt 1174 * @soc: soc handle 1175 */ 1176 static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc) 1177 { 1178 if (!soc || !soc->ops) { 1179 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1180 "%s: Invalid Instance:", __func__); 1181 QDF_BUG(0); 1182 return; 1183 } 1184 1185 if (!soc->ops->cmn_drv_ops || 1186 !soc->ops->cmn_drv_ops->txrx_intr_detach) 1187 return; 1188 1189 soc->ops->cmn_drv_ops->txrx_intr_detach(soc); 1190 } 1191 1192 /** 1193 * cdp_display_stats(): function to map to dump stats 1194 * @soc: soc handle 1195 * @value: statistics option 1196 */ 1197 static inline QDF_STATUS 1198 cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value, 1199 enum qdf_stats_verbosity_level level) 1200 { 1201 if (!soc || !soc->ops) { 1202 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1203 "%s: Invalid Instance:", __func__); 1204 QDF_BUG(0); 1205 return 0; 1206 } 1207 1208 if (!soc->ops->cmn_drv_ops || 1209 !soc->ops->cmn_drv_ops->display_stats) 1210 return 0; 1211 1212 return soc->ops->cmn_drv_ops->display_stats(soc, value, level); 1213 } 1214 1215 1216 /** 1217 * cdp_set_pn_check(): function to set pn check 1218 * @soc: soc handle 1219 * @sec_type: security type 1220 * #rx_pn: receive pn 1221 */ 1222 static inline int cdp_set_pn_check(ol_txrx_soc_handle soc, 1223 struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 1224 { 1225 if (!soc || !soc->ops) { 1226 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1227 "%s: Invalid Instance:", __func__); 1228 QDF_BUG(0); 1229 return 0; 1230 } 1231 1232 if (!soc->ops->cmn_drv_ops || 1233 !soc->ops->cmn_drv_ops->set_pn_check) 1234 return 0; 1235 1236 soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle, 1237 sec_type, rx_pn); 1238 return 0; 1239 } 1240 1241 static inline int cdp_set_key(ol_txrx_soc_handle soc, 1242 struct cdp_peer *peer_handle, 1243 bool is_unicast, uint32_t *key) 1244 { 1245 if (!soc || !soc->ops) { 1246 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1247 "%s: Invalid Instance:", __func__); 1248 QDF_BUG(0); 1249 return 0; 1250 } 1251 1252 if (!soc->ops->ctrl_ops || 1253 !soc->ops->ctrl_ops->set_key) 1254 return 0; 1255 1256 soc->ops->ctrl_ops->set_key(peer_handle, 1257 is_unicast, key); 1258 return 0; 1259 } 1260 1261 /** 1262 * cdp_update_config_parameters(): function to propagate configuration 1263 * parameters to datapath 1264 * @soc: opaque soc handle 1265 * @cfg: configuration handle 1266 * 1267 * Return: status: 0 - Success, non-zero: Failure 1268 */ 1269 static inline 1270 QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc, 1271 struct cdp_config_params *cfg) 1272 { 1273 struct cdp_soc *psoc = (struct cdp_soc *)soc; 1274 1275 if (!soc || !soc->ops) { 1276 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1277 "%s: Invalid Instance:", __func__); 1278 QDF_BUG(0); 1279 return 0; 1280 } 1281 1282 if (!soc->ops->cmn_drv_ops || 1283 !soc->ops->cmn_drv_ops->update_config_parameters) 1284 return QDF_STATUS_SUCCESS; 1285 1286 return soc->ops->cmn_drv_ops->update_config_parameters(psoc, 1287 cfg); 1288 } 1289 1290 /** 1291 * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev 1292 * @soc: opaque soc handle 1293 * @pdev: data path pdev handle 1294 * 1295 * Return: opaque dp handle 1296 */ 1297 static inline void * 1298 cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev) 1299 { 1300 if (!soc || !soc->ops) { 1301 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1302 "%s: Invalid Instance:", __func__); 1303 QDF_BUG(0); 1304 return 0; 1305 } 1306 1307 if (soc->ops->cmn_drv_ops->get_dp_txrx_handle) 1308 return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev); 1309 1310 return 0; 1311 } 1312 1313 /** 1314 * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev 1315 * @soc: opaque soc handle 1316 * @pdev: data path pdev handle 1317 * @dp_hdl: opaque pointer for dp_txrx_handle 1318 * 1319 * Return: void 1320 */ 1321 static inline void 1322 cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl) 1323 { 1324 if (!soc || !soc->ops) { 1325 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1326 "%s: Invalid Instance:", __func__); 1327 QDF_BUG(0); 1328 return; 1329 } 1330 1331 if (!soc->ops->cmn_drv_ops || 1332 !soc->ops->cmn_drv_ops->set_dp_txrx_handle) 1333 return; 1334 1335 soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl); 1336 } 1337 1338 /* 1339 * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc 1340 * @soc: opaque soc handle 1341 * 1342 * Return: opaque extended dp handle 1343 */ 1344 static inline void * 1345 cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc) 1346 { 1347 if (!soc || !soc->ops) { 1348 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1349 "%s: Invalid Instance:", __func__); 1350 QDF_BUG(0); 1351 return NULL; 1352 } 1353 1354 if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle) 1355 return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle( 1356 (struct cdp_soc *) soc); 1357 1358 return NULL; 1359 } 1360 1361 /** 1362 * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc 1363 * @soc: opaque soc handle 1364 * @dp_hdl: opaque pointer for dp_txrx_handle 1365 * 1366 * Return: void 1367 */ 1368 static inline void 1369 cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle) 1370 { 1371 if (!soc || !soc->ops) { 1372 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1373 "%s: Invalid Instance:", __func__); 1374 QDF_BUG(0); 1375 return; 1376 } 1377 1378 if (!soc->ops->cmn_drv_ops || 1379 !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle) 1380 return; 1381 1382 soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc, 1383 dp_handle); 1384 } 1385 1386 /** 1387 * cdp_tx_send() - enqueue frame for transmission 1388 * @soc: soc opaque handle 1389 * @vdev: VAP device 1390 * @nbuf: nbuf to be enqueued 1391 * 1392 * This API is used by Extended Datapath modules to enqueue frame for 1393 * transmission 1394 * 1395 * Return: void 1396 */ 1397 static inline void 1398 cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf) 1399 { 1400 if (!soc || !soc->ops) { 1401 QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, 1402 "%s: Invalid Instance:", __func__); 1403 QDF_BUG(0); 1404 return; 1405 } 1406 1407 if (!soc->ops->cmn_drv_ops || 1408 !soc->ops->cmn_drv_ops->tx_send) 1409 return; 1410 1411 soc->ops->cmn_drv_ops->tx_send(vdev, nbuf); 1412 } 1413 1414 /* 1415 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1416 * @soc: opaque soc handle 1417 * @pdev: data path pdev handle 1418 * 1419 * Return: pdev_id 1420 */ 1421 static inline 1422 uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc, 1423 struct cdp_pdev *pdev) 1424 { 1425 if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev) 1426 return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev); 1427 return 0; 1428 } 1429 1430 /** 1431 * cdp_set_nac() - set nac 1432 * @soc: opaque soc handle 1433 * @peer: data path peer handle 1434 * 1435 */ 1436 static inline 1437 void cdp_set_nac(ol_txrx_soc_handle soc, 1438 struct cdp_peer *peer) 1439 { 1440 if (soc->ops->cmn_drv_ops->txrx_set_nac) 1441 soc->ops->cmn_drv_ops->txrx_set_nac(peer); 1442 } 1443 1444 /** 1445 * cdp_set_pdev_tx_capture() - set pdev tx_capture 1446 * @soc: opaque soc handle 1447 * @pdev: data path pdev handle 1448 * @val: value of pdev_tx_capture 1449 * 1450 * Return: void 1451 */ 1452 static inline 1453 void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc, 1454 struct cdp_pdev *pdev, int val) 1455 { 1456 if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture) 1457 return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev, 1458 val); 1459 1460 } 1461 1462 /** 1463 * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id 1464 * @soc: opaque soc handle 1465 * @pdev: data path pdev handle 1466 * @peer_id: data path peer id 1467 * @peer_mac: peer_mac 1468 * 1469 * Return: void 1470 */ 1471 static inline 1472 void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc, 1473 struct cdp_pdev *pdev_handle, 1474 uint32_t peer_id, uint8_t *peer_mac) 1475 { 1476 if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id) 1477 soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id( 1478 pdev_handle, peer_id, peer_mac); 1479 } 1480 1481 /** 1482 * cdp_vdev_tx_lock() - acquire lock 1483 * @soc: opaque soc handle 1484 * @vdev: data path vdev handle 1485 * 1486 * Return: void 1487 */ 1488 static inline 1489 void cdp_vdev_tx_lock(ol_txrx_soc_handle soc, 1490 struct cdp_vdev *vdev) 1491 { 1492 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock) 1493 soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev); 1494 } 1495 1496 /** 1497 * cdp_vdev_tx_unlock() - release lock 1498 * @soc: opaque soc handle 1499 * @vdev: data path vdev handle 1500 * 1501 * Return: void 1502 */ 1503 static inline 1504 void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc, 1505 struct cdp_vdev *vdev) 1506 { 1507 if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock) 1508 soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev); 1509 } 1510 1511 /** 1512 * cdp_ath_getstats() - get updated athstats 1513 * @soc: opaque soc handle 1514 * @dev: dp interface handle 1515 * @stats: cdp network device stats structure 1516 * @type: device type pdev/vdev 1517 * 1518 * Return: void 1519 */ 1520 static inline void cdp_ath_getstats(ol_txrx_soc_handle soc, 1521 void *dev, struct cdp_dev_stats *stats, 1522 uint8_t type) 1523 { 1524 if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats) 1525 soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type); 1526 } 1527 1528 /** 1529 * cdp_set_gid_flag() - set groupid flag 1530 * @soc: opaque soc handle 1531 * @pdev: data path pdev handle 1532 * @mem_status: member status from grp management frame 1533 * @user_position: user position from grp management frame 1534 * 1535 * Return: void 1536 */ 1537 static inline 1538 void cdp_set_gid_flag(ol_txrx_soc_handle soc, 1539 struct cdp_pdev *pdev, u_int8_t *mem_status, 1540 u_int8_t *user_position) 1541 { 1542 if (soc->ops->cmn_drv_ops->txrx_set_gid_flag) 1543 soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position); 1544 } 1545 1546 /** 1547 * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version 1548 * @soc: opaque soc handle 1549 * @pdev: data path pdev handle 1550 * 1551 */ 1552 static inline 1553 uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc, 1554 struct cdp_pdev *pdev) 1555 { 1556 if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version) 1557 return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev); 1558 return 0; 1559 } 1560 1561 /** 1562 * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev 1563 * @soc: opaque soc handle 1564 * @ni: associated node 1565 * @force: number of frame in SW queue 1566 * Return: void 1567 */ 1568 static inline 1569 void cdp_if_mgmt_drain(ol_txrx_soc_handle soc, 1570 void *ni, int force) 1571 { 1572 if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain) 1573 soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force); 1574 } 1575 1576 static inline void 1577 cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers) 1578 { 1579 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1580 soc->ops->cmn_drv_ops->txrx_peer_map_attach) 1581 soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers); 1582 } 1583 1584 /** 1585 1586 * cdp_pdev_set_ctrl_pdev() - set UMAC ctrl pdev to dp pdev 1587 * @soc: opaque soc handle 1588 * @pdev: opaque dp pdev handle 1589 * @ctrl_pdev: opaque ctrl pdev handle 1590 * 1591 * Return: void 1592 */ 1593 static inline void 1594 cdp_pdev_set_ctrl_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *dp_pdev, 1595 struct cdp_ctrl_objmgr_pdev *ctrl_pdev) 1596 { 1597 if (soc && soc->ops && soc->ops->cmn_drv_ops && 1598 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev) 1599 soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev(dp_pdev, 1600 ctrl_pdev); 1601 } 1602 1603 #ifdef RECEIVE_OFFLOAD 1604 /** 1605 * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer 1606 * @soc - data path soc handle 1607 * @pdev - device instance pointer 1608 * 1609 * register rx offload flush callback function pointer 1610 * 1611 * return none 1612 */ 1613 static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc, 1614 void (rx_ol_flush_cb)(void *)) 1615 { 1616 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 1617 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1618 "%s invalid instance", __func__); 1619 return; 1620 } 1621 1622 if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb) 1623 return soc->ops->rx_offld_ops->register_rx_offld_flush_cb( 1624 rx_ol_flush_cb); 1625 } 1626 1627 /** 1628 * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function 1629 * @soc - data path soc handle 1630 * 1631 * deregister rx offload flush callback function pointer 1632 * 1633 * return none 1634 */ 1635 static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc) 1636 { 1637 if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { 1638 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1639 "%s invalid instance", __func__); 1640 return; 1641 } 1642 1643 if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb) 1644 return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb(); 1645 } 1646 #endif /* RECEIVE_OFFLOAD */ 1647 #endif /* _CDP_TXRX_CMN_H_ */ 1648