1 /* 2 * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. 3 * 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 /** 20 * @file cdp_txrx_ops.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_OPS_H_ 25 #define _CDP_TXRX_CMN_OPS_H_ 26 27 #include <cdp_txrx_cmn_struct.h> 28 #include <cdp_txrx_stats_struct.h> 29 #include "cdp_txrx_handle.h" 30 #include <cdp_txrx_mon_struct.h> 31 #include "wlan_objmgr_psoc_obj.h" 32 33 #ifdef IPA_OFFLOAD 34 #ifdef CONFIG_IPA_WDI_UNIFIED_API 35 #include <qdf_ipa_wdi3.h> 36 #else 37 #include <qdf_ipa.h> 38 #endif 39 #endif 40 41 /** 42 * bitmap values to indicate special handling of peer_delete 43 */ 44 #define CDP_PEER_DELETE_NO_SPECIAL 0 45 #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 46 47 /* same as ieee80211_nac_param */ 48 enum cdp_nac_param_cmd { 49 /* IEEE80211_NAC_PARAM_ADD */ 50 CDP_NAC_PARAM_ADD = 1, 51 /* IEEE80211_NAC_PARAM_DEL */ 52 CDP_NAC_PARAM_DEL, 53 /* IEEE80211_NAC_PARAM_LIST */ 54 CDP_NAC_PARAM_LIST, 55 }; 56 /****************************************************************************** 57 * 58 * Control Interface (A Interface) 59 * 60 *****************************************************************************/ 61 62 struct cdp_cmn_ops { 63 64 int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); 65 66 int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev); 67 68 struct cdp_vdev *(*txrx_vdev_attach) 69 (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr, 70 uint8_t vdev_id, enum wlan_op_mode op_mode); 71 72 void (*txrx_vdev_detach) 73 (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback, 74 void *cb_context); 75 76 struct cdp_pdev *(*txrx_pdev_attach) 77 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 78 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id); 79 80 int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev); 81 82 void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force); 83 84 void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force); 85 86 void *(*txrx_peer_create) 87 (struct cdp_vdev *vdev, uint8_t *peer_mac_addr, 88 struct cdp_ctrl_objmgr_peer *ctrl_peer); 89 90 void (*txrx_peer_setup) 91 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 92 93 void (*txrx_peer_teardown) 94 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 95 96 int (*txrx_peer_add_ast) 97 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 98 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 99 uint32_t flags); 100 101 void (*txrx_peer_del_ast) 102 (ol_txrx_soc_handle soc, void *ast_hdl); 103 104 int (*txrx_peer_update_ast) 105 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 106 uint8_t *mac_addr, uint32_t flags); 107 108 109 void *(*txrx_peer_ast_hash_find) 110 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr); 111 112 uint8_t (*txrx_peer_ast_get_pdev_id) 113 (ol_txrx_soc_handle soc, void *ast_hdl); 114 115 uint8_t (*txrx_peer_ast_get_next_hop) 116 (ol_txrx_soc_handle soc, void *ast_hdl); 117 118 void (*txrx_peer_ast_set_type) 119 (ol_txrx_soc_handle soc, void *ast_hdl, 120 enum cdp_txrx_ast_entry_type type); 121 122 enum cdp_txrx_ast_entry_type (*txrx_peer_ast_get_type) 123 (ol_txrx_soc_handle soc, void *ast_hdl); 124 125 void (*txrx_peer_delete)(void *peer, uint32_t bitmap); 126 127 int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev, 128 uint8_t smart_monitor); 129 130 uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev); 131 132 void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev, 133 int16_t chan_noise_floor); 134 135 void (*txrx_set_nac)(struct cdp_peer *peer); 136 137 void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val); 138 139 void (*txrx_get_peer_mac_from_peer_id) 140 (struct cdp_pdev *pdev_handle, 141 uint32_t peer_id, uint8_t *peer_mac); 142 143 void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev); 144 145 void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev); 146 147 void (*txrx_ath_getstats)(void *pdev, 148 struct cdp_dev_stats *stats, uint8_t type); 149 150 void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status, 151 u_int8_t *user_position); 152 153 uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev); 154 155 void (*txrx_if_mgmt_drain)(void *ni, int force); 156 157 void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz); 158 159 void (*txrx_set_privacy_filters) 160 (struct cdp_vdev *vdev, void *filter, uint32_t num); 161 162 /******************************************************************** 163 * Data Interface (B Interface) 164 ********************************************************************/ 165 166 void (*txrx_vdev_register)(struct cdp_vdev *vdev, 167 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 168 struct ol_txrx_ops *txrx_ops); 169 170 int (*txrx_mgmt_send)(struct cdp_vdev *vdev, 171 qdf_nbuf_t tx_mgmt_frm, uint8_t type); 172 173 int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev, 174 qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps, 175 uint16_t chanfreq); 176 177 /** 178 * ol_txrx_mgmt_tx_cb - tx management delivery notification 179 * callback function 180 */ 181 182 void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type, 183 ol_txrx_mgmt_tx_cb download_cb, 184 ol_txrx_mgmt_tx_cb ota_ack_cb, 185 void *ctxt); 186 187 int (*txrx_get_tx_pending)(struct cdp_pdev *pdev); 188 189 /** 190 * ol_txrx_data_tx_cb - Function registered with the data path 191 * that is called when tx frames marked as "no free" are 192 * done being transmitted 193 */ 194 195 void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev, 196 ol_txrx_data_tx_cb callback, void *ctxt); 197 198 /******************************************************************* 199 * Statistics and Debugging Interface (C Interface) 200 ********************************************************************/ 201 202 int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu, 203 int max_subfrms_amsdu); 204 205 A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev, 206 struct ol_txrx_stats_req *req, 207 bool per_vdev, bool response_expected); 208 209 int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs); 210 211 void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev, 212 uint8_t cfg_stats_type, uint32_t cfg_val); 213 214 void (*txrx_print_level_set)(unsigned level); 215 216 /** 217 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev 218 * @vdev: vdev handle 219 * 220 * Return: vdev mac address 221 */ 222 uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev); 223 224 /** 225 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 226 * vdev 227 * @vdev: vdev handle 228 * 229 * Return: Handle to struct qdf_mac_addr 230 */ 231 struct qdf_mac_addr * 232 (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev); 233 234 /** 235 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev 236 * @vdev: vdev handle 237 * 238 * Return: Handle to pdev 239 */ 240 struct cdp_pdev *(*txrx_get_pdev_from_vdev) 241 (struct cdp_vdev *vdev); 242 243 /** 244 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 245 * @vdev: vdev handle 246 * 247 * Return: Handle to control pdev 248 */ 249 struct cdp_cfg * 250 (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev); 251 252 struct cdp_vdev * 253 (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev, 254 uint8_t vdev_id); 255 256 void (*txrx_soc_detach)(void *soc); 257 258 int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid, 259 int status); 260 261 int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken, 262 uint16_t tid, uint16_t batimeout, 263 uint16_t buffersize, 264 uint16_t startseqnum); 265 266 void (*addba_responsesetup)(void *peer_handle, uint8_t tid, 267 uint8_t *dialogtoken, uint16_t *statuscode, 268 uint16_t *buffersize, uint16_t *batimeout); 269 270 int (*delba_process)(void *peer_handle, 271 int tid, uint16_t reasoncode); 272 273 /** 274 * delba_tx_completion() - Indicate delba tx status 275 * @peer_handle: Peer handle 276 * @tid: Tid number 277 * @status: Tx completion status 278 * 279 * Return: 0 on Success, 1 on failure 280 */ 281 int (*delba_tx_completion)(void *peer_handle, 282 uint8_t tid, int status); 283 284 void (*set_addba_response)(void *peer_handle, 285 uint8_t tid, uint16_t statuscode); 286 287 uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle, 288 uint16_t peer_id, uint8_t *mac_addr); 289 290 void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle, 291 uint8_t map_id); 292 293 void (*flush_cache_rx_queue)(void); 294 void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id, 295 uint8_t tos, uint8_t tid); 296 297 QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev, 298 struct cdp_txrx_stats_req *req); 299 300 QDF_STATUS (*display_stats)(void *psoc, uint16_t value, 301 enum qdf_stats_verbosity_level level); 302 void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config); 303 304 int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc); 305 QDF_STATUS (*txrx_intr_attach)(void *soc); 306 void (*txrx_intr_detach)(void *soc); 307 void (*set_pn_check)(struct cdp_vdev *vdev, 308 struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, 309 uint32_t *rx_pn); 310 QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, 311 struct cdp_config_params *params); 312 313 void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl); 314 void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, 315 void *dp_txrx_hdl); 316 317 void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); 318 void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, 319 void *dp_txrx_handle); 320 321 void (*txrx_peer_reset_ast) 322 (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl); 323 324 void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, 325 void *vdev_hdl); 326 327 void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); 328 void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 329 uint8_t ac, uint32_t value); 330 void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 331 uint8_t ac, uint32_t *value); 332 333 QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, 334 uint32_t num_peers); 335 336 void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl, 337 struct cdp_ctrl_objmgr_pdev *ctrl_pdev); 338 339 ol_txrx_tx_fp tx_send; 340 /** 341 * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev 342 * to deliver pkt to stack. 343 * @vdev: vdev handle 344 * @stack_fn: pointer to - function pointer to deliver RX pkt to stack 345 * @osif_vdev: pointer to - osif vdev to deliver RX packet to. 346 */ 347 void (*txrx_get_os_rx_handles_from_vdev) 348 (struct cdp_vdev *vdev, 349 ol_txrx_rx_fp *stack_fn, 350 ol_osif_vdev_handle *osif_vdev); 351 int (*txrx_classify_update) 352 (struct cdp_vdev *vdev, qdf_nbuf_t skb, 353 enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class); 354 }; 355 356 struct cdp_ctrl_ops { 357 358 int 359 (*txrx_mempools_attach)(void *ctrl_pdev); 360 int 361 (*txrx_set_filter_neighbour_peers)( 362 struct cdp_pdev *pdev, 363 uint32_t val); 364 int 365 (*txrx_update_filter_neighbour_peers)( 366 struct cdp_vdev *vdev, 367 uint32_t cmd, uint8_t *macaddr); 368 /** 369 * @brief set the safemode of the device 370 * @details 371 * This flag is used to bypass the encrypt and decrypt processes when 372 * send and receive packets. It works like open AUTH mode, HW will 373 * ctreate all packets as non-encrypt frames because no key installed. 374 * For rx fragmented frames,it bypasses all the rx defragmentaion. 375 * 376 * @param vdev - the data virtual device object 377 * @param val - the safemode state 378 * @return - void 379 */ 380 381 void 382 (*txrx_set_safemode)( 383 struct cdp_vdev *vdev, 384 u_int32_t val); 385 /** 386 * @brief configure the drop unencrypted frame flag 387 * @details 388 * Rx related. When set this flag, all the unencrypted frames 389 * received over a secure connection will be discarded 390 * 391 * @param vdev - the data virtual device object 392 * @param val - flag 393 * @return - void 394 */ 395 void 396 (*txrx_set_drop_unenc)( 397 struct cdp_vdev *vdev, 398 u_int32_t val); 399 400 401 /** 402 * @brief set the Tx encapsulation type of the VDEV 403 * @details 404 * This will be used to populate the HTT desc packet type field 405 * during Tx 406 * @param vdev - the data virtual device object 407 * @param val - the Tx encap type 408 * @return - void 409 */ 410 void 411 (*txrx_set_tx_encap_type)( 412 struct cdp_vdev *vdev, 413 enum htt_cmn_pkt_type val); 414 /** 415 * @brief set the Rx decapsulation type of the VDEV 416 * @details 417 * This will be used to configure into firmware and hardware 418 * which format to decap all Rx packets into, for all peers under 419 * the VDEV. 420 * @param vdev - the data virtual device object 421 * @param val - the Rx decap mode 422 * @return - void 423 */ 424 void 425 (*txrx_set_vdev_rx_decap_type)( 426 struct cdp_vdev *vdev, 427 enum htt_cmn_pkt_type val); 428 429 /** 430 * @brief get the Rx decapsulation type of the VDEV 431 * 432 * @param vdev - the data virtual device object 433 * @return - the Rx decap type 434 */ 435 enum htt_cmn_pkt_type 436 (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev); 437 438 /* Is this similar to ol_txrx_peer_state_update() in MCL */ 439 /** 440 * @brief Update the authorize peer object at association time 441 * @details 442 * For the host-based implementation of rate-control, it 443 * updates the peer/node-related parameters within rate-control 444 * context of the peer at association. 445 * 446 * @param peer - pointer to the node's object 447 * @authorize - either to authorize or unauthorize peer 448 * 449 * @return none 450 */ 451 void 452 (*txrx_peer_authorize)(struct cdp_peer *peer, 453 u_int32_t authorize); 454 455 bool 456 (*txrx_set_inact_params)(struct cdp_pdev *pdev, 457 u_int16_t inact_check_interval, 458 u_int16_t inact_normal, 459 u_int16_t inact_overload); 460 bool 461 (*txrx_start_inact_timer)( 462 struct cdp_pdev *pdev, 463 bool enable); 464 465 466 /** 467 * @brief Set the overload status of the radio 468 * @details 469 * Set the overload status of the radio, updating the inactivity 470 * threshold and inactivity count for each node. 471 * 472 * @param pdev - the data physical device object 473 * @param overload - whether the radio is overloaded or not 474 */ 475 void (*txrx_set_overload)( 476 struct cdp_pdev *pdev, 477 bool overload); 478 /** 479 * @brief Check the inactivity status of the peer/node 480 * 481 * @param peer - pointer to the node's object 482 * @return true if the node is inactive; otherwise return false 483 */ 484 bool 485 (*txrx_peer_is_inact)(void *peer); 486 487 /** 488 * @brief Mark inactivity status of the peer/node 489 * @details 490 * If it becomes active, reset inactivity count to reload value; 491 * if the inactivity status changed, notify umac band steering. 492 * 493 * @param peer - pointer to the node's object 494 * @param inactive - whether the node is inactive or not 495 */ 496 void (*txrx_mark_peer_inact)( 497 void *peer, 498 bool inactive); 499 500 501 /* Should be ol_txrx_ctrl_api.h */ 502 void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val); 503 504 /** 505 * @brief setting mesh rx filter 506 * @details 507 * based on the bits enabled in the filter packets has to be dropped. 508 * 509 * @param vdev - the data virtual device object 510 * @param val - value to set 511 */ 512 void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val); 513 514 void (*tx_flush_buffers)(struct cdp_vdev *vdev); 515 516 int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev); 517 518 void (*txrx_set_vdev_param)(struct cdp_vdev *vdev, 519 enum cdp_vdev_param_type param, uint32_t val); 520 521 void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value); 522 /** 523 * @brief Set the reo dest ring num of the radio 524 * @details 525 * Set the reo destination ring no on which we will receive 526 * pkts for this radio. 527 * 528 * @param pdev - the data physical device object 529 * @param reo_dest_ring_num - value ranges between 1 - 4 530 */ 531 void (*txrx_set_pdev_reo_dest)( 532 struct cdp_pdev *pdev, 533 enum cdp_host_reo_dest_ring reo_dest_ring_num); 534 535 /** 536 * @brief Get the reo dest ring num of the radio 537 * @details 538 * Get the reo destination ring no on which we will receive 539 * pkts for this radio. 540 * 541 * @param pdev - the data physical device object 542 * @return the reo destination ring number 543 */ 544 enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( 545 struct cdp_pdev *pdev); 546 547 int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub, 548 uint32_t event); 549 550 int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub, 551 uint32_t event); 552 int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx); 553 554 void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev, 555 uint8_t subtype, uint8_t tx_power); 556 557 void (*txrx_set_pdev_param)(struct cdp_pdev *pdev, 558 enum cdp_pdev_param_type type, uint8_t val); 559 void * (*txrx_get_pldev)(struct cdp_pdev *pdev); 560 561 #ifdef ATH_SUPPORT_NAC_RSSI 562 QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev, 563 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, 564 uint8_t chan_num); 565 QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev, 566 char *macaddr, 567 uint8_t *rssi); 568 #endif 569 void (*set_key)(struct cdp_peer *peer_handle, 570 bool is_unicast, uint32_t *key); 571 }; 572 573 struct cdp_me_ops { 574 575 u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone) 576 (struct cdp_pdev *pdev, u_int16_t buf_count); 577 578 u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)( 579 struct cdp_pdev *pdev, 580 u_int16_t buf_count); 581 582 u_int16_t 583 (*tx_get_mcast_buf_allocated_marked) 584 (struct cdp_pdev *pdev); 585 void 586 (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev); 587 588 void 589 (*tx_me_free_descriptor)(struct cdp_pdev *pdev); 590 591 uint16_t 592 (*tx_me_convert_ucast)(struct cdp_vdev *vdev, 593 qdf_nbuf_t wbuf, u_int8_t newmac[][6], 594 uint8_t newmaccnt); 595 /* Should be a function pointer in ol_txrx_osif_ops{} */ 596 /** 597 * @brief notify mcast frame indication from FW. 598 * @details 599 * This notification will be used to convert 600 * multicast frame to unicast. 601 * 602 * @param pdev - handle to the ctrl SW's physical device object 603 * @param vdev_id - ID of the virtual device received the special data 604 * @param msdu - the multicast msdu returned by FW for host inspect 605 */ 606 607 int (*mcast_notify)(struct cdp_pdev *pdev, 608 u_int8_t vdev_id, qdf_nbuf_t msdu); 609 }; 610 611 struct cdp_mon_ops { 612 613 void (*txrx_monitor_set_filter_ucast_data) 614 (struct cdp_pdev *, u_int8_t val); 615 void (*txrx_monitor_set_filter_mcast_data) 616 (struct cdp_pdev *, u_int8_t val); 617 void (*txrx_monitor_set_filter_non_data) 618 (struct cdp_pdev *, u_int8_t val); 619 620 bool (*txrx_monitor_get_filter_ucast_data) 621 (struct cdp_vdev *vdev_txrx_handle); 622 bool (*txrx_monitor_get_filter_mcast_data) 623 (struct cdp_vdev *vdev_txrx_handle); 624 bool (*txrx_monitor_get_filter_non_data) 625 (struct cdp_vdev *vdev_txrx_handle); 626 int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev); 627 628 /* HK advance monitor filter support */ 629 int (*txrx_set_advance_monitor_filter) 630 (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val); 631 }; 632 633 struct cdp_host_stats_ops { 634 int (*txrx_host_stats_get)(struct cdp_vdev *vdev, 635 struct ol_txrx_stats_req *req); 636 637 void (*txrx_host_stats_clr)(struct cdp_vdev *vdev); 638 639 void (*txrx_host_ce_stats)(struct cdp_vdev *vdev); 640 641 int (*txrx_stats_publish)(struct cdp_pdev *pdev, 642 void *buf); 643 /** 644 * @brief Enable enhanced stats functionality. 645 * 646 * @param pdev - the physical device object 647 * @return - void 648 */ 649 void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev); 650 651 /** 652 * @brief Disable enhanced stats functionality. 653 * 654 * @param pdev - the physical device object 655 * @return - void 656 */ 657 void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev); 658 659 /** 660 * @brief Get the desired stats from the message. 661 * 662 * @param pdev - the physical device object 663 * @param stats_base - stats buffer received from FW 664 * @param type - stats type. 665 * @return - pointer to requested stat identified by type 666 */ 667 uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev, 668 uint32_t *stats_base, uint32_t msg_len, uint8_t type); 669 void 670 (*tx_print_tso_stats)(struct cdp_vdev *vdev); 671 672 void 673 (*tx_rst_tso_stats)(struct cdp_vdev *vdev); 674 675 void 676 (*tx_print_sg_stats)(struct cdp_vdev *vdev); 677 678 void 679 (*tx_rst_sg_stats)(struct cdp_vdev *vdev); 680 681 void 682 (*print_rx_cksum_stats)(struct cdp_vdev *vdev); 683 684 void 685 (*rst_rx_cksum_stats)(struct cdp_vdev *vdev); 686 687 A_STATUS 688 (*txrx_host_me_stats)(struct cdp_vdev *vdev); 689 690 void 691 (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr); 692 693 int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev, 694 struct ol_txrx_stats_req *req); 695 696 void 697 (*print_lro_stats)(struct cdp_vdev *vdev); 698 699 void 700 (*reset_lro_stats)(struct cdp_vdev *vdev); 701 702 void 703 (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr, 704 uint32_t cap); 705 void 706 (*get_htt_stats)(struct cdp_pdev *pdev, void *data, 707 uint32_t data_len); 708 void 709 (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data, 710 uint16_t stats_id); 711 struct cdp_peer_stats* 712 (*txrx_get_peer_stats)(struct cdp_peer *peer); 713 void 714 (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer); 715 void 716 (*txrx_reset_peer_stats)(struct cdp_peer *peer); 717 int 718 (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf, 719 bool is_aggregate); 720 int 721 (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc, 722 void *data, uint32_t len, 723 uint32_t stats_id); 724 int 725 (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle, 726 void *buffer); 727 }; 728 729 struct cdp_wds_ops { 730 void 731 (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev, 732 u_int32_t val); 733 void 734 (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer, 735 int wds_tx_ucast, int wds_tx_mcast); 736 int (*vdev_set_wds)(void *vdev, uint32_t val); 737 }; 738 739 struct cdp_raw_ops { 740 int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev); 741 742 void (*rsim_get_astentry)(struct cdp_vdev *vdev, 743 qdf_nbuf_t *pnbuf, 744 struct cdp_raw_ast *raw_ast); 745 }; 746 747 #ifdef CONFIG_WIN 748 struct cdp_pflow_ops { 749 uint32_t(*pflow_update_pdev_params)(void *, 750 enum _ol_ath_param_t, uint32_t, void *); 751 }; 752 #endif /* CONFIG_WIN */ 753 754 #define LRO_IPV4_SEED_ARR_SZ 5 755 #define LRO_IPV6_SEED_ARR_SZ 11 756 757 /** 758 * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters 759 * @lro_enable: indicates whether rx_offld is enabled 760 * @tcp_flag: If the TCP flags from the packet do not match 761 * the values in this field after masking with TCP flags mask 762 * below, packet is not rx_offld eligible 763 * @tcp_flag_mask: field for comparing the TCP values provided 764 * above with the TCP flags field in the received packet 765 * @toeplitz_hash_ipv4: contains seed needed to compute the flow id 766 * 5-tuple toeplitz hash for ipv4 packets 767 * @toeplitz_hash_ipv6: contains seed needed to compute the flow id 768 * 5-tuple toeplitz hash for ipv6 packets 769 */ 770 struct cdp_lro_hash_config { 771 uint32_t lro_enable; 772 uint32_t tcp_flag:9, 773 tcp_flag_mask:9; 774 uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; 775 uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; 776 }; 777 778 struct ol_if_ops { 779 void 780 (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 781 uint8_t *peer_macaddr, uint8_t vdev_id, 782 bool hash_based, uint8_t ring_num); 783 QDF_STATUS 784 (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 785 uint8_t vdev_id, uint8_t *peer_mac, 786 qdf_dma_addr_t hw_qdesc, int tid, 787 uint16_t queue_num, 788 uint8_t ba_window_size_valid, 789 uint16_t ba_window_size); 790 QDF_STATUS 791 (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 792 uint8_t vdev_id, uint8_t *peer_macaddr, 793 uint32_t tid_mask); 794 int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id, 795 uint8_t *peer_macaddr); 796 bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); 797 int (*peer_add_wds_entry)(void *ol_soc_handle, 798 const uint8_t *dest_macaddr, uint8_t *peer_macaddr, 799 uint32_t flags); 800 int (*peer_update_wds_entry)(void *ol_soc_handle, 801 uint8_t *dest_macaddr, uint8_t *peer_macaddr, 802 uint32_t flags); 803 void (*peer_del_wds_entry)(void *ol_soc_handle, 804 uint8_t *wds_macaddr); 805 QDF_STATUS 806 (*lro_hash_config)(struct wlan_objmgr_psoc *ctrl_psoc, 807 struct cdp_lro_hash_config *rx_offld_hash); 808 void (*update_dp_stats)(void *soc, void *stats, uint16_t id, 809 uint8_t type); 810 #ifdef CONFIG_WIN 811 uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg); 812 #else 813 uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh); 814 #endif 815 int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id, 816 uint8_t vdev_id, uint8_t *peer_mac_addr, 817 enum cdp_txrx_ast_entry_type peer_type); 818 int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id); 819 820 int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num); 821 822 void (*rx_mic_error)(void *ol_soc_handle, 823 uint16_t vdev_id, void *wh); 824 bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer, 825 qdf_nbuf_t nbuf, 826 uint16_t hdr_space); 827 uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id); 828 829 void (*record_act_change)(struct wlan_objmgr_pdev *pdev, 830 u_int8_t *dstmac, bool active); 831 #ifdef ATH_SUPPORT_NAC_RSSI 832 int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 833 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid, 834 char *client_macaddr, uint8_t chan_num); 835 int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 836 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid); 837 #endif 838 int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr); 839 840 /** 841 * send_delba() - Send delba to peer 842 * @pdev_handle: Dp pdev handle 843 * @ctrl_peer: Peer handle 844 * @peer_macaddr: Peer mac addr 845 * @tid: Tid number 846 * 847 * Return: 0 for success, non-zero for failure 848 */ 849 int (*send_delba)(void *pdev_handle, void *ctrl_peer, 850 uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle, 851 uint8_t reason_code); 852 /* TODO: Add any other control path calls required to OL_IF/WMA layer */ 853 }; 854 855 #ifndef CONFIG_WIN 856 /* From here MCL specific OPs */ 857 /** 858 * struct cdp_misc_ops - mcl ops not classified 859 * @set_ibss_vdev_heart_beat_timer: 860 * @bad_peer_txctl_set_setting: 861 * @bad_peer_txctl_update_threshold: 862 * @hl_tdls_flag_reset: 863 * @tx_non_std: 864 * @get_vdev_id: 865 * @set_wisa_mode: 866 * @txrx_data_stall_cb_register: 867 * @txrx_data_stall_cb_deregister: 868 * @txrx_post_data_stall_event 869 * @runtime_suspend: 870 * @runtime_resume: 871 */ 872 struct cdp_misc_ops { 873 uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev, 874 uint16_t timer_value_sec); 875 void (*set_wmm_param)(struct cdp_pdev *cfg_pdev, 876 struct ol_tx_wmm_param_t wmm_param); 877 void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable, 878 int period, int txq_limit); 879 void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev, 880 int level, int tput_thresh, int tx_limit); 881 void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag); 882 qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev, 883 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 884 uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); 885 uint32_t (*get_tx_ack_stats)(uint8_t vdev_id); 886 QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable); 887 QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb); 888 QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb); 889 void (*txrx_post_data_stall_event)( 890 enum data_stall_log_event_indicator indicator, 891 enum data_stall_log_event_type data_stall_type, 892 uint32_t pdev_id, uint32_t vdev_id_bitmap, 893 enum data_stall_log_recovery_type recovery_type); 894 QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev); 895 QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev); 896 int (*get_opmode)(struct cdp_vdev *vdev); 897 void (*mark_first_wakeup_packet)(uint8_t value); 898 void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id); 899 void (*flush_rx_frames)(void *peer, bool drop); 900 A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id, 901 uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets); 902 void (*pkt_log_init)(struct cdp_pdev *handle, void *scn); 903 void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn); 904 }; 905 906 /** 907 * struct cdp_tx_delay_ops - mcl tx delay ops 908 * @tx_delay: 909 * @tx_delay_hist: 910 * @tx_packet_count: 911 * @tx_set_compute_interval: 912 */ 913 struct cdp_tx_delay_ops { 914 void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec, 915 uint32_t *tx_delay_microsec, int category); 916 void (*tx_delay_hist)(struct cdp_pdev *pdev, 917 uint16_t *bin_values, int category); 918 void (*tx_packet_count)(struct cdp_pdev *pdev, 919 uint16_t *out_packet_count, 920 uint16_t *out_packet_loss_count, int category); 921 void (*tx_set_compute_interval)(struct cdp_pdev *pdev, 922 uint32_t interval); 923 }; 924 925 /** 926 * struct cdp_pmf_ops - mcl protected management frame ops 927 * @get_pn_info: 928 */ 929 struct cdp_pmf_ops { 930 void (*get_pn_info)(void *peer, uint8_t **last_pn_valid, 931 uint64_t **last_pn, uint32_t **rmf_pn_replays); 932 }; 933 934 /** 935 * struct cdp_cfg_ops - mcl configuration ops 936 * @set_cfg_rx_fwd_disabled: 937 * @set_cfg_packet_log_enabled: 938 * @cfg_attach: 939 * @vdev_rx_set_intrabss_fwd: 940 * @get_opmode: 941 * @is_rx_fwd_disabled: 942 * @tx_set_is_mgmt_over_wmi_enabled: 943 * @is_high_latency: 944 * @set_flow_control_parameters: 945 */ 946 struct cdp_cfg_ops { 947 void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, 948 uint8_t disable_rx_fwd); 949 void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, 950 uint8_t val); 951 struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); 952 void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val); 953 uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); 954 void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); 955 int (*is_high_latency)(struct cdp_cfg *cfg_pdev); 956 void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, 957 void *param); 958 void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); 959 void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); 960 }; 961 962 /** 963 * struct cdp_flowctl_ops - mcl flow control 964 * @register_pause_cb: 965 * @set_desc_global_pool_size: 966 * @dump_flow_pool_info: 967 */ 968 struct cdp_flowctl_ops { 969 QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, 970 struct cdp_pdev *pdev, 971 uint8_t vdev_id); 972 void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, 973 struct cdp_pdev *pdev, 974 uint8_t vdev_id); 975 QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, 976 tx_pause_callback); 977 void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); 978 979 void (*dump_flow_pool_info)(void *); 980 }; 981 982 /** 983 * struct cdp_lflowctl_ops - mcl legacy flow control ops 984 * @register_tx_flow_control: 985 * @deregister_tx_flow_control_cb: 986 * @flow_control_cb: 987 * @get_tx_resource: 988 * @ll_set_tx_pause_q_depth: 989 * @vdev_flush: 990 * @vdev_pause: 991 * @vdev_unpause: 992 */ 993 struct cdp_lflowctl_ops { 994 #ifdef QCA_HL_NETDEV_FLOW_CONTROL 995 int (*register_tx_flow_control)(struct cdp_soc_t *soc, 996 tx_pause_callback flowcontrol); 997 int (*set_vdev_tx_desc_limit)(u8 vdev_id, u8 chan); 998 int (*set_vdev_os_queue_status)(u8 vdev_id, 999 enum netif_action_type action); 1000 #else 1001 int (*register_tx_flow_control)(uint8_t vdev_id, 1002 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, 1003 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); 1004 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */ 1005 int (*deregister_tx_flow_control_cb)(uint8_t vdev_id); 1006 void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume); 1007 bool (*get_tx_resource)(uint8_t sta_id, 1008 unsigned int low_watermark, 1009 unsigned int high_watermark_offset); 1010 int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth); 1011 void (*vdev_flush)(struct cdp_vdev *vdev); 1012 void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason); 1013 void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason); 1014 }; 1015 1016 #ifdef IPA_OFFLOAD 1017 /** 1018 * struct cdp_ipa_ops - mcl ipa data path ops 1019 * @ipa_get_resource: 1020 * @ipa_set_doorbell_paddr: 1021 * @ipa_set_active: 1022 * @ipa_op_response: 1023 * @ipa_register_op_cb: 1024 * @ipa_get_stat: 1025 * @ipa_tx_data_frame: 1026 */ 1027 struct cdp_ipa_ops { 1028 QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev); 1029 QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev); 1030 QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active, 1031 bool is_tx); 1032 QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg); 1033 QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev, 1034 void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt), 1035 void *usr_ctxt); 1036 QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev); 1037 qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb); 1038 void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, 1039 uint32_t value); 1040 #ifdef FEATURE_METERING 1041 QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev, 1042 uint8_t reset_stats); 1043 QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev, 1044 uint64_t quota_bytes); 1045 #endif 1046 QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev); 1047 QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev); 1048 #ifdef CONFIG_IPA_WDI_UNIFIED_API 1049 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1050 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1051 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1052 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, 1053 bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in); 1054 #else /* CONFIG_IPA_WDI_UNIFIED_API */ 1055 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1056 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1057 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1058 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle); 1059 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 1060 QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle, 1061 uint32_t rx_pipe_handle); 1062 QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, 1063 qdf_ipa_client_type_t prod_client, 1064 qdf_ipa_client_type_t cons_client, 1065 uint8_t session_id, bool is_ipv6_enabled); 1066 QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); 1067 QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev); 1068 QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev); 1069 QDF_STATUS (*ipa_set_perf_level)(int client, 1070 uint32_t max_supported_bw_mbps); 1071 }; 1072 #endif 1073 1074 /** 1075 * struct cdp_bus_ops - mcl bus suspend/resume ops 1076 * @bus_suspend: 1077 * @bus_resume: 1078 */ 1079 struct cdp_bus_ops { 1080 QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev); 1081 QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev); 1082 }; 1083 1084 /** 1085 * struct cdp_ocb_ops - mcl ocb ops 1086 * @set_ocb_chan_info: 1087 * @get_ocb_chan_info: 1088 */ 1089 struct cdp_ocb_ops { 1090 void (*set_ocb_chan_info)(struct cdp_vdev *vdev, 1091 struct ol_txrx_ocb_set_chan ocb_set_chan); 1092 struct ol_txrx_ocb_chan_info * 1093 (*get_ocb_chan_info)(struct cdp_vdev *vdev); 1094 }; 1095 1096 /** 1097 * struct cdp_peer_ops - mcl peer related ops 1098 * @register_peer: 1099 * @clear_peer: 1100 * @cfg_attach: 1101 * @find_peer_by_addr: 1102 * @find_peer_by_addr_and_vdev: 1103 * @local_peer_id: 1104 * @peer_find_by_local_id: 1105 * @peer_state_update: 1106 * @get_vdevid: 1107 * @get_vdev_by_sta_id: 1108 * @register_ocb_peer: 1109 * @peer_get_peer_mac_addr: 1110 * @get_peer_state: 1111 * @get_vdev_for_peer: 1112 * @update_ibss_add_peer_num_of_vdev: 1113 * @remove_peers_for_vdev: 1114 * @remove_peers_for_vdev_no_lock: 1115 * @copy_mac_addr_raw: 1116 * @add_last_real_peer: 1117 * @is_vdev_restore_last_peer: 1118 * @update_last_real_peer: 1119 */ 1120 struct cdp_peer_ops { 1121 QDF_STATUS (*register_peer)(struct cdp_pdev *pdev, 1122 struct ol_txrx_desc_type *sta_desc); 1123 QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id); 1124 QDF_STATUS (*change_peer_state)(uint8_t sta_id, 1125 enum ol_txrx_peer_state sta_state, 1126 bool roam_synch_in_progress); 1127 void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev, 1128 u8 *peer_addr, uint8_t *peer_id, 1129 enum peer_debug_id_type debug_id); 1130 void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id); 1131 void * (*find_peer_by_addr)(struct cdp_pdev *pdev, 1132 uint8_t *peer_addr, uint8_t *peer_id); 1133 void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev, 1134 struct cdp_vdev *vdev, 1135 uint8_t *peer_addr, uint8_t *peer_id); 1136 uint16_t (*local_peer_id)(void *peer); 1137 void * (*peer_find_by_local_id)(struct cdp_pdev *pdev, 1138 uint8_t local_peer_id); 1139 QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev, 1140 uint8_t *peer_addr, 1141 enum ol_txrx_peer_state state); 1142 QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id); 1143 struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev, 1144 uint8_t sta_id); 1145 QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id); 1146 uint8_t * (*peer_get_peer_mac_addr)(void *peer); 1147 int (*get_peer_state)(void *peer); 1148 struct cdp_vdev * (*get_vdev_for_peer)(void *peer); 1149 int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev, 1150 int16_t peer_num_delta); 1151 void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, 1152 ol_txrx_vdev_peer_remove_cb callback, 1153 void *callback_context, bool remove_last_peer); 1154 void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, 1155 ol_txrx_vdev_peer_remove_cb callback, 1156 void *callback_context); 1157 void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr); 1158 void (*add_last_real_peer)(struct cdp_pdev *pdev, 1159 struct cdp_vdev *vdev, uint8_t *peer_id); 1160 bool (*is_vdev_restore_last_peer)(void *peer); 1161 void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer, 1162 uint8_t *peer_id, bool restore_last_peer); 1163 void (*peer_detach_force_delete)(void *peer); 1164 }; 1165 1166 /** 1167 * struct cdp_ocb_ops - mcl ocb ops 1168 * @throttle_init_period: 1169 * @throttle_set_level: 1170 */ 1171 struct cdp_throttle_ops { 1172 void (*throttle_init_period)(struct cdp_pdev *pdev, int period, 1173 uint8_t *dutycycle_level); 1174 void (*throttle_set_level)(struct cdp_pdev *pdev, int level); 1175 }; 1176 1177 /** 1178 * struct cdp_ocb_ops - mcl ocb ops 1179 * @clear_stats: 1180 * @stats: 1181 */ 1182 struct cdp_mob_stats_ops { 1183 void (*clear_stats)(uint16_t bitmap); 1184 int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); 1185 }; 1186 #endif /* CONFIG_WIN */ 1187 1188 #ifdef RECEIVE_OFFLOAD 1189 /** 1190 * struct cdp_rx_offld_ops - mcl receive offload ops 1191 * @register_rx_offld_flush_cb: 1192 * @deregister_rx_offld_flush_cb: 1193 */ 1194 struct cdp_rx_offld_ops { 1195 void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); 1196 void (*deregister_rx_offld_flush_cb)(void); 1197 }; 1198 #endif 1199 1200 struct cdp_ops { 1201 struct cdp_cmn_ops *cmn_drv_ops; 1202 struct cdp_ctrl_ops *ctrl_ops; 1203 struct cdp_me_ops *me_ops; 1204 struct cdp_mon_ops *mon_ops; 1205 struct cdp_host_stats_ops *host_stats_ops; 1206 struct cdp_wds_ops *wds_ops; 1207 struct cdp_raw_ops *raw_ops; 1208 struct cdp_pflow_ops *pflow_ops; 1209 #ifndef CONFIG_WIN 1210 struct cdp_misc_ops *misc_ops; 1211 struct cdp_cfg_ops *cfg_ops; 1212 struct cdp_flowctl_ops *flowctl_ops; 1213 struct cdp_lflowctl_ops *l_flowctl_ops; 1214 #ifdef IPA_OFFLOAD 1215 struct cdp_ipa_ops *ipa_ops; 1216 #endif 1217 #ifdef RECEIVE_OFFLOAD 1218 struct cdp_rx_offld_ops *rx_offld_ops; 1219 #endif 1220 struct cdp_bus_ops *bus_ops; 1221 struct cdp_ocb_ops *ocb_ops; 1222 struct cdp_peer_ops *peer_ops; 1223 struct cdp_throttle_ops *throttle_ops; 1224 struct cdp_mob_stats_ops *mob_stats_ops; 1225 struct cdp_tx_delay_ops *delay_ops; 1226 struct cdp_pmf_ops *pmf_ops; 1227 #endif /* CONFIG_WIN */ 1228 }; 1229 #endif 1230