1 /* 2 * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. 3 * 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 /** 20 * @file cdp_txrx_ops.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_OPS_H_ 25 #define _CDP_TXRX_CMN_OPS_H_ 26 27 #include <cdp_txrx_cmn_struct.h> 28 #include <cdp_txrx_stats_struct.h> 29 #include "cdp_txrx_handle.h" 30 #include <cdp_txrx_mon_struct.h> 31 #include "wlan_objmgr_psoc_obj.h" 32 33 #ifdef IPA_OFFLOAD 34 #ifdef CONFIG_IPA_WDI_UNIFIED_API 35 #include <qdf_ipa_wdi3.h> 36 #else 37 #include <qdf_ipa.h> 38 #endif 39 #endif 40 41 /** 42 * bitmap values to indicate special handling of peer_delete 43 */ 44 #define CDP_PEER_DELETE_NO_SPECIAL 0 45 #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 46 47 /* same as ieee80211_nac_param */ 48 enum cdp_nac_param_cmd { 49 /* IEEE80211_NAC_PARAM_ADD */ 50 CDP_NAC_PARAM_ADD = 1, 51 /* IEEE80211_NAC_PARAM_DEL */ 52 CDP_NAC_PARAM_DEL, 53 /* IEEE80211_NAC_PARAM_LIST */ 54 CDP_NAC_PARAM_LIST, 55 }; 56 /****************************************************************************** 57 * 58 * Control Interface (A Interface) 59 * 60 *****************************************************************************/ 61 62 struct cdp_cmn_ops { 63 64 int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); 65 66 int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev); 67 68 struct cdp_vdev *(*txrx_vdev_attach) 69 (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr, 70 uint8_t vdev_id, enum wlan_op_mode op_mode); 71 72 void (*txrx_vdev_detach) 73 (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback, 74 void *cb_context); 75 76 struct cdp_pdev *(*txrx_pdev_attach) 77 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 78 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id); 79 80 int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev); 81 82 void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force); 83 84 void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force); 85 86 void *(*txrx_peer_create) 87 (struct cdp_vdev *vdev, uint8_t *peer_mac_addr, 88 struct cdp_ctrl_objmgr_peer *ctrl_peer); 89 90 void (*txrx_peer_setup) 91 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 92 93 void (*txrx_peer_teardown) 94 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 95 96 int (*txrx_peer_add_ast) 97 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 98 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 99 uint32_t flags); 100 101 void (*txrx_peer_del_ast) 102 (ol_txrx_soc_handle soc, void *ast_hdl); 103 104 int (*txrx_peer_update_ast) 105 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 106 uint8_t *mac_addr, uint32_t flags); 107 108 void *(*txrx_peer_ast_hash_find_soc) 109 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr); 110 111 void *(*txrx_peer_ast_hash_find_by_pdevid) 112 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 113 uint8_t pdev_id); 114 115 uint8_t (*txrx_peer_ast_get_pdev_id) 116 (ol_txrx_soc_handle soc, void *ast_hdl); 117 118 uint8_t (*txrx_peer_ast_get_next_hop) 119 (ol_txrx_soc_handle soc, void *ast_hdl); 120 121 void (*txrx_peer_ast_set_type) 122 (ol_txrx_soc_handle soc, void *ast_hdl, 123 enum cdp_txrx_ast_entry_type type); 124 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND) 125 void (*txrx_peer_ast_set_cp_ctx)(ol_txrx_soc_handle soc, 126 void *ast_entry, 127 void *cp_ctx); 128 129 void * (*txrx_peer_ast_get_cp_ctx)(ol_txrx_soc_handle soc, 130 void *ast_entry); 131 132 bool (*txrx_peer_ast_get_wmi_sent)(ol_txrx_soc_handle soc, 133 void *ast_entry); 134 135 void (*txrx_peer_ast_free_entry)(ol_txrx_soc_handle soc, 136 void *ast_entry); 137 #endif 138 139 enum cdp_txrx_ast_entry_type (*txrx_peer_ast_get_type) 140 (ol_txrx_soc_handle soc, void *ast_hdl); 141 142 struct cdp_peer* (*txrx_peer_ast_get_peer) 143 (ol_txrx_soc_handle soc, void *ast_hdl); 144 145 uint32_t (*txrx_peer_ast_get_nexthop_peer_id) 146 (ol_txrx_soc_handle soc, void *ast_hdl); 147 148 void (*txrx_peer_delete)(void *peer, uint32_t bitmap); 149 150 int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev, 151 uint8_t smart_monitor); 152 153 uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev); 154 155 void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev, 156 int16_t chan_noise_floor); 157 158 void (*txrx_set_nac)(struct cdp_peer *peer); 159 160 void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val); 161 162 void (*txrx_get_peer_mac_from_peer_id) 163 (struct cdp_pdev *pdev_handle, 164 uint32_t peer_id, uint8_t *peer_mac); 165 166 void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev); 167 168 void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev); 169 170 void (*txrx_ath_getstats)(void *pdev, 171 struct cdp_dev_stats *stats, uint8_t type); 172 173 void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status, 174 u_int8_t *user_position); 175 176 uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev); 177 178 void (*txrx_if_mgmt_drain)(void *ni, int force); 179 180 void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz); 181 182 void (*txrx_set_privacy_filters) 183 (struct cdp_vdev *vdev, void *filter, uint32_t num); 184 185 uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg); 186 187 /******************************************************************** 188 * Data Interface (B Interface) 189 ********************************************************************/ 190 191 void (*txrx_vdev_register)(struct cdp_vdev *vdev, 192 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 193 struct ol_txrx_ops *txrx_ops); 194 195 int (*txrx_mgmt_send)(struct cdp_vdev *vdev, 196 qdf_nbuf_t tx_mgmt_frm, uint8_t type); 197 198 int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev, 199 qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps, 200 uint16_t chanfreq); 201 202 /** 203 * ol_txrx_mgmt_tx_cb - tx management delivery notification 204 * callback function 205 */ 206 207 void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type, 208 ol_txrx_mgmt_tx_cb download_cb, 209 ol_txrx_mgmt_tx_cb ota_ack_cb, 210 void *ctxt); 211 212 int (*txrx_get_tx_pending)(struct cdp_pdev *pdev); 213 214 /** 215 * ol_txrx_data_tx_cb - Function registered with the data path 216 * that is called when tx frames marked as "no free" are 217 * done being transmitted 218 */ 219 220 void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev, 221 ol_txrx_data_tx_cb callback, void *ctxt); 222 223 /******************************************************************* 224 * Statistics and Debugging Interface (C Interface) 225 ********************************************************************/ 226 227 int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu, 228 int max_subfrms_amsdu); 229 230 A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev, 231 struct ol_txrx_stats_req *req, 232 bool per_vdev, bool response_expected); 233 234 int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs); 235 236 void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev, 237 uint8_t cfg_stats_type, uint32_t cfg_val); 238 239 void (*txrx_print_level_set)(unsigned level); 240 241 /** 242 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev 243 * @vdev: vdev handle 244 * 245 * Return: vdev mac address 246 */ 247 uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev); 248 249 /** 250 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 251 * vdev 252 * @vdev: vdev handle 253 * 254 * Return: Handle to struct qdf_mac_addr 255 */ 256 struct qdf_mac_addr * 257 (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev); 258 259 /** 260 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev 261 * @vdev: vdev handle 262 * 263 * Return: Handle to pdev 264 */ 265 struct cdp_pdev *(*txrx_get_pdev_from_vdev) 266 (struct cdp_vdev *vdev); 267 268 /** 269 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 270 * @vdev: vdev handle 271 * 272 * Return: Handle to control pdev 273 */ 274 struct cdp_cfg * 275 (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev); 276 277 struct cdp_vdev * 278 (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev, 279 uint8_t vdev_id); 280 281 void (*txrx_soc_detach)(void *soc); 282 283 int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid, 284 int status); 285 286 int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken, 287 uint16_t tid, uint16_t batimeout, 288 uint16_t buffersize, 289 uint16_t startseqnum); 290 291 void (*addba_responsesetup)(void *peer_handle, uint8_t tid, 292 uint8_t *dialogtoken, uint16_t *statuscode, 293 uint16_t *buffersize, uint16_t *batimeout); 294 295 int (*delba_process)(void *peer_handle, 296 int tid, uint16_t reasoncode); 297 298 /** 299 * delba_tx_completion() - Indicate delba tx status 300 * @peer_handle: Peer handle 301 * @tid: Tid number 302 * @status: Tx completion status 303 * 304 * Return: 0 on Success, 1 on failure 305 */ 306 int (*delba_tx_completion)(void *peer_handle, 307 uint8_t tid, int status); 308 309 void (*set_addba_response)(void *peer_handle, 310 uint8_t tid, uint16_t statuscode); 311 312 uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle, 313 uint16_t peer_id, uint8_t *mac_addr); 314 315 void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle, 316 uint8_t map_id); 317 int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle); 318 319 void (*flush_cache_rx_queue)(void); 320 void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id, 321 uint8_t tos, uint8_t tid); 322 323 QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev, 324 struct cdp_txrx_stats_req *req); 325 326 QDF_STATUS (*display_stats)(void *psoc, uint16_t value, 327 enum qdf_stats_verbosity_level level); 328 void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config); 329 330 int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc); 331 QDF_STATUS (*txrx_intr_attach)(void *soc); 332 void (*txrx_intr_detach)(void *soc); 333 void (*set_pn_check)(struct cdp_vdev *vdev, 334 struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, 335 uint32_t *rx_pn); 336 QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, 337 struct cdp_config_params *params); 338 339 void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl); 340 void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, 341 void *dp_txrx_hdl); 342 343 void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); 344 void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, 345 void *dp_txrx_handle); 346 347 void (*txrx_peer_reset_ast) 348 (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl); 349 350 void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, 351 void *vdev_hdl); 352 353 void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); 354 void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 355 uint8_t ac, uint32_t value); 356 void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 357 uint8_t ac, uint32_t *value); 358 359 QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, 360 uint32_t num_peers, 361 bool peer_map_unmap_v2); 362 363 void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl, 364 struct cdp_ctrl_objmgr_pdev *ctrl_pdev); 365 366 ol_txrx_tx_fp tx_send; 367 /** 368 * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev 369 * to deliver pkt to stack. 370 * @vdev: vdev handle 371 * @stack_fn: pointer to - function pointer to deliver RX pkt to stack 372 * @osif_vdev: pointer to - osif vdev to deliver RX packet to. 373 */ 374 void (*txrx_get_os_rx_handles_from_vdev) 375 (struct cdp_vdev *vdev, 376 ol_txrx_rx_fp *stack_fn, 377 ol_osif_vdev_handle *osif_vdev); 378 int (*txrx_classify_update) 379 (struct cdp_vdev *vdev, qdf_nbuf_t skb, 380 enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class); 381 382 bool (*get_dp_capabilities)(struct cdp_soc_t *soc, 383 enum cdp_capabilities dp_caps); 384 }; 385 386 struct cdp_ctrl_ops { 387 388 int 389 (*txrx_mempools_attach)(void *ctrl_pdev); 390 int 391 (*txrx_set_filter_neighbour_peers)( 392 struct cdp_pdev *pdev, 393 uint32_t val); 394 int 395 (*txrx_update_filter_neighbour_peers)( 396 struct cdp_vdev *vdev, 397 uint32_t cmd, uint8_t *macaddr); 398 /** 399 * @brief set the safemode of the device 400 * @details 401 * This flag is used to bypass the encrypt and decrypt processes when 402 * send and receive packets. It works like open AUTH mode, HW will 403 * ctreate all packets as non-encrypt frames because no key installed. 404 * For rx fragmented frames,it bypasses all the rx defragmentaion. 405 * 406 * @param vdev - the data virtual device object 407 * @param val - the safemode state 408 * @return - void 409 */ 410 411 void 412 (*txrx_set_safemode)( 413 struct cdp_vdev *vdev, 414 u_int32_t val); 415 /** 416 * @brief configure the drop unencrypted frame flag 417 * @details 418 * Rx related. When set this flag, all the unencrypted frames 419 * received over a secure connection will be discarded 420 * 421 * @param vdev - the data virtual device object 422 * @param val - flag 423 * @return - void 424 */ 425 void 426 (*txrx_set_drop_unenc)( 427 struct cdp_vdev *vdev, 428 u_int32_t val); 429 430 431 /** 432 * @brief set the Tx encapsulation type of the VDEV 433 * @details 434 * This will be used to populate the HTT desc packet type field 435 * during Tx 436 * @param vdev - the data virtual device object 437 * @param val - the Tx encap type 438 * @return - void 439 */ 440 void 441 (*txrx_set_tx_encap_type)( 442 struct cdp_vdev *vdev, 443 enum htt_cmn_pkt_type val); 444 /** 445 * @brief set the Rx decapsulation type of the VDEV 446 * @details 447 * This will be used to configure into firmware and hardware 448 * which format to decap all Rx packets into, for all peers under 449 * the VDEV. 450 * @param vdev - the data virtual device object 451 * @param val - the Rx decap mode 452 * @return - void 453 */ 454 void 455 (*txrx_set_vdev_rx_decap_type)( 456 struct cdp_vdev *vdev, 457 enum htt_cmn_pkt_type val); 458 459 /** 460 * @brief get the Rx decapsulation type of the VDEV 461 * 462 * @param vdev - the data virtual device object 463 * @return - the Rx decap type 464 */ 465 enum htt_cmn_pkt_type 466 (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev); 467 468 /* Is this similar to ol_txrx_peer_state_update() in MCL */ 469 /** 470 * @brief Update the authorize peer object at association time 471 * @details 472 * For the host-based implementation of rate-control, it 473 * updates the peer/node-related parameters within rate-control 474 * context of the peer at association. 475 * 476 * @param peer - pointer to the node's object 477 * @authorize - either to authorize or unauthorize peer 478 * 479 * @return none 480 */ 481 void 482 (*txrx_peer_authorize)(struct cdp_peer *peer, 483 u_int32_t authorize); 484 485 /* Should be ol_txrx_ctrl_api.h */ 486 void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val); 487 488 /** 489 * @brief setting mesh rx filter 490 * @details 491 * based on the bits enabled in the filter packets has to be dropped. 492 * 493 * @param vdev - the data virtual device object 494 * @param val - value to set 495 */ 496 void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val); 497 498 void (*tx_flush_buffers)(struct cdp_vdev *vdev); 499 500 int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev); 501 502 void (*txrx_set_vdev_param)(struct cdp_vdev *vdev, 503 enum cdp_vdev_param_type param, uint32_t val); 504 505 void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value); 506 /** 507 * @brief Set the reo dest ring num of the radio 508 * @details 509 * Set the reo destination ring no on which we will receive 510 * pkts for this radio. 511 * 512 * @param pdev - the data physical device object 513 * @param reo_dest_ring_num - value ranges between 1 - 4 514 */ 515 void (*txrx_set_pdev_reo_dest)( 516 struct cdp_pdev *pdev, 517 enum cdp_host_reo_dest_ring reo_dest_ring_num); 518 519 /** 520 * @brief Get the reo dest ring num of the radio 521 * @details 522 * Get the reo destination ring no on which we will receive 523 * pkts for this radio. 524 * 525 * @param pdev - the data physical device object 526 * @return the reo destination ring number 527 */ 528 enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( 529 struct cdp_pdev *pdev); 530 531 int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub, 532 uint32_t event); 533 534 int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub, 535 uint32_t event); 536 int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx); 537 538 void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev, 539 uint8_t subtype, uint8_t tx_power); 540 541 void (*txrx_set_pdev_param)(struct cdp_pdev *pdev, 542 enum cdp_pdev_param_type type, uint8_t val); 543 void * (*txrx_get_pldev)(struct cdp_pdev *pdev); 544 545 #ifdef ATH_SUPPORT_NAC_RSSI 546 QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev, 547 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, 548 uint8_t chan_num); 549 QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev, 550 char *macaddr, 551 uint8_t *rssi); 552 #endif 553 void (*set_key)(struct cdp_peer *peer_handle, 554 bool is_unicast, uint32_t *key); 555 }; 556 557 struct cdp_me_ops { 558 559 u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone) 560 (struct cdp_pdev *pdev, u_int16_t buf_count); 561 562 u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)( 563 struct cdp_pdev *pdev, 564 u_int16_t buf_count); 565 566 u_int16_t 567 (*tx_get_mcast_buf_allocated_marked) 568 (struct cdp_pdev *pdev); 569 void 570 (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev); 571 572 void 573 (*tx_me_free_descriptor)(struct cdp_pdev *pdev); 574 575 uint16_t 576 (*tx_me_convert_ucast)(struct cdp_vdev *vdev, 577 qdf_nbuf_t wbuf, u_int8_t newmac[][6], 578 uint8_t newmaccnt); 579 /* Should be a function pointer in ol_txrx_osif_ops{} */ 580 /** 581 * @brief notify mcast frame indication from FW. 582 * @details 583 * This notification will be used to convert 584 * multicast frame to unicast. 585 * 586 * @param pdev - handle to the ctrl SW's physical device object 587 * @param vdev_id - ID of the virtual device received the special data 588 * @param msdu - the multicast msdu returned by FW for host inspect 589 */ 590 591 int (*mcast_notify)(struct cdp_pdev *pdev, 592 u_int8_t vdev_id, qdf_nbuf_t msdu); 593 }; 594 595 struct cdp_mon_ops { 596 597 void (*txrx_monitor_set_filter_ucast_data) 598 (struct cdp_pdev *, u_int8_t val); 599 void (*txrx_monitor_set_filter_mcast_data) 600 (struct cdp_pdev *, u_int8_t val); 601 void (*txrx_monitor_set_filter_non_data) 602 (struct cdp_pdev *, u_int8_t val); 603 604 bool (*txrx_monitor_get_filter_ucast_data) 605 (struct cdp_vdev *vdev_txrx_handle); 606 bool (*txrx_monitor_get_filter_mcast_data) 607 (struct cdp_vdev *vdev_txrx_handle); 608 bool (*txrx_monitor_get_filter_non_data) 609 (struct cdp_vdev *vdev_txrx_handle); 610 int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev); 611 612 /* HK advance monitor filter support */ 613 int (*txrx_set_advance_monitor_filter) 614 (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val); 615 }; 616 617 struct cdp_host_stats_ops { 618 int (*txrx_host_stats_get)(struct cdp_vdev *vdev, 619 struct ol_txrx_stats_req *req); 620 621 void (*txrx_host_stats_clr)(struct cdp_vdev *vdev); 622 623 void (*txrx_host_ce_stats)(struct cdp_vdev *vdev); 624 625 int (*txrx_stats_publish)(struct cdp_pdev *pdev, 626 void *buf); 627 /** 628 * @brief Enable enhanced stats functionality. 629 * 630 * @param pdev - the physical device object 631 * @return - void 632 */ 633 void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev); 634 635 /** 636 * @brief Disable enhanced stats functionality. 637 * 638 * @param pdev - the physical device object 639 * @return - void 640 */ 641 void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev); 642 643 /** 644 * @brief Get the desired stats from the message. 645 * 646 * @param pdev - the physical device object 647 * @param stats_base - stats buffer received from FW 648 * @param type - stats type. 649 * @return - pointer to requested stat identified by type 650 */ 651 uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev, 652 uint32_t *stats_base, uint32_t msg_len, uint8_t type); 653 void 654 (*tx_print_tso_stats)(struct cdp_vdev *vdev); 655 656 void 657 (*tx_rst_tso_stats)(struct cdp_vdev *vdev); 658 659 void 660 (*tx_print_sg_stats)(struct cdp_vdev *vdev); 661 662 void 663 (*tx_rst_sg_stats)(struct cdp_vdev *vdev); 664 665 void 666 (*print_rx_cksum_stats)(struct cdp_vdev *vdev); 667 668 void 669 (*rst_rx_cksum_stats)(struct cdp_vdev *vdev); 670 671 A_STATUS 672 (*txrx_host_me_stats)(struct cdp_vdev *vdev); 673 674 void 675 (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr); 676 677 int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev, 678 struct ol_txrx_stats_req *req); 679 680 void 681 (*print_lro_stats)(struct cdp_vdev *vdev); 682 683 void 684 (*reset_lro_stats)(struct cdp_vdev *vdev); 685 686 void 687 (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr, 688 uint32_t cap); 689 void 690 (*get_htt_stats)(struct cdp_pdev *pdev, void *data, 691 uint32_t data_len); 692 void 693 (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data, 694 uint16_t stats_id); 695 struct cdp_peer_stats* 696 (*txrx_get_peer_stats)(struct cdp_peer *peer); 697 void 698 (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer); 699 void 700 (*txrx_reset_peer_stats)(struct cdp_peer *peer); 701 int 702 (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf, 703 bool is_aggregate); 704 int 705 (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc, 706 void *data, uint32_t len, 707 uint32_t stats_id); 708 int 709 (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle, 710 void *buffer); 711 void 712 (*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf, 713 uint16_t stats_id); 714 }; 715 716 struct cdp_wds_ops { 717 void 718 (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev, 719 u_int32_t val); 720 void 721 (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer, 722 int wds_tx_ucast, int wds_tx_mcast); 723 int (*vdev_set_wds)(void *vdev, uint32_t val); 724 }; 725 726 struct cdp_raw_ops { 727 int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev); 728 729 void (*rsim_get_astentry)(struct cdp_vdev *vdev, 730 qdf_nbuf_t *pnbuf, 731 struct cdp_raw_ast *raw_ast); 732 }; 733 734 #ifdef CONFIG_WIN 735 struct cdp_pflow_ops { 736 uint32_t(*pflow_update_pdev_params)(void *, 737 enum _ol_ath_param_t, uint32_t, void *); 738 }; 739 #endif /* CONFIG_WIN */ 740 741 #define LRO_IPV4_SEED_ARR_SZ 5 742 #define LRO_IPV6_SEED_ARR_SZ 11 743 744 /** 745 * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters 746 * @lro_enable: indicates whether rx_offld is enabled 747 * @tcp_flag: If the TCP flags from the packet do not match 748 * the values in this field after masking with TCP flags mask 749 * below, packet is not rx_offld eligible 750 * @tcp_flag_mask: field for comparing the TCP values provided 751 * above with the TCP flags field in the received packet 752 * @toeplitz_hash_ipv4: contains seed needed to compute the flow id 753 * 5-tuple toeplitz hash for ipv4 packets 754 * @toeplitz_hash_ipv6: contains seed needed to compute the flow id 755 * 5-tuple toeplitz hash for ipv6 packets 756 */ 757 struct cdp_lro_hash_config { 758 uint32_t lro_enable; 759 uint32_t tcp_flag:9, 760 tcp_flag_mask:9; 761 uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; 762 uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; 763 }; 764 765 struct ol_if_ops { 766 void 767 (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 768 uint8_t *peer_macaddr, uint8_t vdev_id, 769 bool hash_based, uint8_t ring_num); 770 QDF_STATUS 771 (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 772 uint8_t vdev_id, uint8_t *peer_mac, 773 qdf_dma_addr_t hw_qdesc, int tid, 774 uint16_t queue_num, 775 uint8_t ba_window_size_valid, 776 uint16_t ba_window_size); 777 QDF_STATUS 778 (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 779 uint8_t vdev_id, uint8_t *peer_macaddr, 780 uint32_t tid_mask); 781 int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id, 782 uint8_t *peer_macaddr); 783 bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); 784 int (*peer_add_wds_entry)(void *ol_soc_handle, 785 const uint8_t *dest_macaddr, uint8_t *peer_macaddr, 786 uint32_t flags); 787 int (*peer_update_wds_entry)(void *ol_soc_handle, 788 uint8_t *dest_macaddr, uint8_t *peer_macaddr, 789 uint32_t flags); 790 void (*peer_del_wds_entry)(void *ol_soc_handle, 791 uint8_t *wds_macaddr); 792 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND) 793 void (*peer_del_wds_cp_ctx)(void *cp_ctx); 794 #endif 795 QDF_STATUS 796 (*lro_hash_config)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 797 struct cdp_lro_hash_config *rx_offld_hash); 798 void (*update_dp_stats)(void *soc, void *stats, uint16_t id, 799 uint8_t type); 800 #ifdef CONFIG_WIN 801 uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg); 802 #else 803 uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh); 804 #endif 805 int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id, 806 uint8_t vdev_id, uint8_t *peer_mac_addr, 807 enum cdp_txrx_ast_entry_type peer_type, 808 uint32_t tx_ast_hashidx); 809 int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id); 810 811 int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num); 812 813 void (*rx_mic_error)(void *ol_soc_handle, 814 uint16_t vdev_id, void *wh); 815 bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer, 816 qdf_nbuf_t nbuf, 817 uint16_t hdr_space); 818 uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id); 819 820 void (*record_act_change)(struct wlan_objmgr_pdev *pdev, 821 u_int8_t *dstmac, bool active); 822 #ifdef ATH_SUPPORT_NAC_RSSI 823 int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 824 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid, 825 char *client_macaddr, uint8_t chan_num); 826 int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 827 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid); 828 #endif 829 int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr); 830 831 /** 832 * send_delba() - Send delba to peer 833 * @pdev_handle: Dp pdev handle 834 * @ctrl_peer: Peer handle 835 * @peer_macaddr: Peer mac addr 836 * @tid: Tid number 837 * 838 * Return: 0 for success, non-zero for failure 839 */ 840 int (*send_delba)(void *pdev_handle, void *ctrl_peer, 841 uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle, 842 uint8_t reason_code); 843 /* TODO: Add any other control path calls required to OL_IF/WMA layer */ 844 }; 845 846 #ifndef CONFIG_WIN 847 /* From here MCL specific OPs */ 848 /** 849 * struct cdp_misc_ops - mcl ops not classified 850 * @set_ibss_vdev_heart_beat_timer: 851 * @bad_peer_txctl_set_setting: 852 * @bad_peer_txctl_update_threshold: 853 * @hl_tdls_flag_reset: 854 * @tx_non_std: 855 * @get_vdev_id: 856 * @set_wisa_mode: 857 * @txrx_data_stall_cb_register: 858 * @txrx_data_stall_cb_deregister: 859 * @txrx_post_data_stall_event 860 * @runtime_suspend: 861 * @runtime_resume: 862 */ 863 struct cdp_misc_ops { 864 uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev, 865 uint16_t timer_value_sec); 866 void (*set_wmm_param)(struct cdp_pdev *cfg_pdev, 867 struct ol_tx_wmm_param_t wmm_param); 868 void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable, 869 int period, int txq_limit); 870 void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev, 871 int level, int tput_thresh, int tx_limit); 872 void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag); 873 qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev, 874 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 875 uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); 876 uint32_t (*get_tx_ack_stats)(uint8_t vdev_id); 877 QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable); 878 QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb); 879 QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb); 880 void (*txrx_post_data_stall_event)( 881 enum data_stall_log_event_indicator indicator, 882 enum data_stall_log_event_type data_stall_type, 883 uint32_t pdev_id, uint32_t vdev_id_bitmap, 884 enum data_stall_log_recovery_type recovery_type); 885 QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev); 886 QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev); 887 int (*get_opmode)(struct cdp_vdev *vdev); 888 void (*mark_first_wakeup_packet)(uint8_t value); 889 void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id); 890 void (*flush_rx_frames)(void *peer, bool drop); 891 A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id, 892 uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets); 893 void (*pkt_log_init)(struct cdp_pdev *handle, void *scn); 894 void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn); 895 }; 896 897 /** 898 * struct cdp_tx_delay_ops - mcl tx delay ops 899 * @tx_delay: 900 * @tx_delay_hist: 901 * @tx_packet_count: 902 * @tx_set_compute_interval: 903 */ 904 struct cdp_tx_delay_ops { 905 void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec, 906 uint32_t *tx_delay_microsec, int category); 907 void (*tx_delay_hist)(struct cdp_pdev *pdev, 908 uint16_t *bin_values, int category); 909 void (*tx_packet_count)(struct cdp_pdev *pdev, 910 uint16_t *out_packet_count, 911 uint16_t *out_packet_loss_count, int category); 912 void (*tx_set_compute_interval)(struct cdp_pdev *pdev, 913 uint32_t interval); 914 }; 915 916 /** 917 * struct cdp_pmf_ops - mcl protected management frame ops 918 * @get_pn_info: 919 */ 920 struct cdp_pmf_ops { 921 void (*get_pn_info)(void *peer, uint8_t **last_pn_valid, 922 uint64_t **last_pn, uint32_t **rmf_pn_replays); 923 }; 924 925 /** 926 * struct cdp_cfg_ops - mcl configuration ops 927 * @set_cfg_rx_fwd_disabled: 928 * @set_cfg_packet_log_enabled: 929 * @cfg_attach: 930 * @vdev_rx_set_intrabss_fwd: 931 * @get_opmode: 932 * @is_rx_fwd_disabled: 933 * @tx_set_is_mgmt_over_wmi_enabled: 934 * @is_high_latency: 935 * @set_flow_control_parameters: 936 */ 937 struct cdp_cfg_ops { 938 void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, 939 uint8_t disable_rx_fwd); 940 void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, 941 uint8_t val); 942 struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); 943 void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val); 944 uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); 945 void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); 946 int (*is_high_latency)(struct cdp_cfg *cfg_pdev); 947 void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, 948 void *param); 949 void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); 950 void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); 951 }; 952 953 /** 954 * struct cdp_flowctl_ops - mcl flow control 955 * @register_pause_cb: 956 * @set_desc_global_pool_size: 957 * @dump_flow_pool_info: 958 */ 959 struct cdp_flowctl_ops { 960 QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, 961 struct cdp_pdev *pdev, 962 uint8_t vdev_id); 963 void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, 964 struct cdp_pdev *pdev, 965 uint8_t vdev_id); 966 QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, 967 tx_pause_callback); 968 void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); 969 970 void (*dump_flow_pool_info)(void *); 971 }; 972 973 /** 974 * struct cdp_lflowctl_ops - mcl legacy flow control ops 975 * @register_tx_flow_control: 976 * @deregister_tx_flow_control_cb: 977 * @flow_control_cb: 978 * @get_tx_resource: 979 * @ll_set_tx_pause_q_depth: 980 * @vdev_flush: 981 * @vdev_pause: 982 * @vdev_unpause: 983 */ 984 struct cdp_lflowctl_ops { 985 #ifdef QCA_HL_NETDEV_FLOW_CONTROL 986 int (*register_tx_flow_control)(struct cdp_soc_t *soc, 987 tx_pause_callback flowcontrol); 988 int (*set_vdev_tx_desc_limit)(u8 vdev_id, u8 chan); 989 int (*set_vdev_os_queue_status)(u8 vdev_id, 990 enum netif_action_type action); 991 #else 992 int (*register_tx_flow_control)(uint8_t vdev_id, 993 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, 994 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); 995 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */ 996 int (*deregister_tx_flow_control_cb)(uint8_t vdev_id); 997 void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume); 998 bool (*get_tx_resource)(uint8_t sta_id, 999 unsigned int low_watermark, 1000 unsigned int high_watermark_offset); 1001 int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth); 1002 void (*vdev_flush)(struct cdp_vdev *vdev); 1003 void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason); 1004 void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason); 1005 }; 1006 1007 #ifdef IPA_OFFLOAD 1008 /** 1009 * struct cdp_ipa_ops - mcl ipa data path ops 1010 * @ipa_get_resource: 1011 * @ipa_set_doorbell_paddr: 1012 * @ipa_set_active: 1013 * @ipa_op_response: 1014 * @ipa_register_op_cb: 1015 * @ipa_get_stat: 1016 * @ipa_tx_data_frame: 1017 */ 1018 struct cdp_ipa_ops { 1019 QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev); 1020 QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev); 1021 QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active, 1022 bool is_tx); 1023 QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg); 1024 QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev, 1025 void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt), 1026 void *usr_ctxt); 1027 QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev); 1028 qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb); 1029 void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, 1030 uint32_t value); 1031 #ifdef FEATURE_METERING 1032 QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev, 1033 uint8_t reset_stats); 1034 QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev, 1035 uint64_t quota_bytes); 1036 #endif 1037 QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev); 1038 QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev); 1039 #ifdef CONFIG_IPA_WDI_UNIFIED_API 1040 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1041 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1042 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1043 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, 1044 bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in); 1045 #else /* CONFIG_IPA_WDI_UNIFIED_API */ 1046 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1047 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1048 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1049 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle); 1050 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 1051 QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle, 1052 uint32_t rx_pipe_handle); 1053 QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, 1054 qdf_ipa_client_type_t prod_client, 1055 qdf_ipa_client_type_t cons_client, 1056 uint8_t session_id, bool is_ipv6_enabled); 1057 QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); 1058 QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev); 1059 QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev); 1060 QDF_STATUS (*ipa_set_perf_level)(int client, 1061 uint32_t max_supported_bw_mbps); 1062 }; 1063 #endif 1064 1065 /** 1066 * struct cdp_bus_ops - mcl bus suspend/resume ops 1067 * @bus_suspend: 1068 * @bus_resume: 1069 */ 1070 struct cdp_bus_ops { 1071 QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev); 1072 QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev); 1073 }; 1074 1075 /** 1076 * struct cdp_ocb_ops - mcl ocb ops 1077 * @set_ocb_chan_info: 1078 * @get_ocb_chan_info: 1079 */ 1080 struct cdp_ocb_ops { 1081 void (*set_ocb_chan_info)(struct cdp_vdev *vdev, 1082 struct ol_txrx_ocb_set_chan ocb_set_chan); 1083 struct ol_txrx_ocb_chan_info * 1084 (*get_ocb_chan_info)(struct cdp_vdev *vdev); 1085 }; 1086 1087 /** 1088 * struct cdp_peer_ops - mcl peer related ops 1089 * @register_peer: 1090 * @clear_peer: 1091 * @cfg_attach: 1092 * @find_peer_by_addr: 1093 * @find_peer_by_addr_and_vdev: 1094 * @local_peer_id: 1095 * @peer_find_by_local_id: 1096 * @peer_state_update: 1097 * @get_vdevid: 1098 * @get_vdev_by_sta_id: 1099 * @register_ocb_peer: 1100 * @peer_get_peer_mac_addr: 1101 * @get_peer_state: 1102 * @get_vdev_for_peer: 1103 * @update_ibss_add_peer_num_of_vdev: 1104 * @remove_peers_for_vdev: 1105 * @remove_peers_for_vdev_no_lock: 1106 * @copy_mac_addr_raw: 1107 * @add_last_real_peer: 1108 * @is_vdev_restore_last_peer: 1109 * @update_last_real_peer: 1110 */ 1111 struct cdp_peer_ops { 1112 QDF_STATUS (*register_peer)(struct cdp_pdev *pdev, 1113 struct ol_txrx_desc_type *sta_desc); 1114 QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id); 1115 QDF_STATUS (*change_peer_state)(uint8_t sta_id, 1116 enum ol_txrx_peer_state sta_state, 1117 bool roam_synch_in_progress); 1118 void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev, 1119 u8 *peer_addr, uint8_t *peer_id, 1120 enum peer_debug_id_type debug_id); 1121 void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id); 1122 void * (*find_peer_by_addr)(struct cdp_pdev *pdev, 1123 uint8_t *peer_addr, uint8_t *peer_id); 1124 void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev, 1125 struct cdp_vdev *vdev, 1126 uint8_t *peer_addr, uint8_t *peer_id); 1127 uint16_t (*local_peer_id)(void *peer); 1128 void * (*peer_find_by_local_id)(struct cdp_pdev *pdev, 1129 uint8_t local_peer_id); 1130 QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev, 1131 uint8_t *peer_addr, 1132 enum ol_txrx_peer_state state); 1133 QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id); 1134 struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev, 1135 uint8_t sta_id); 1136 QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id); 1137 uint8_t * (*peer_get_peer_mac_addr)(void *peer); 1138 int (*get_peer_state)(void *peer); 1139 struct cdp_vdev * (*get_vdev_for_peer)(void *peer); 1140 int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev, 1141 int16_t peer_num_delta); 1142 void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, 1143 ol_txrx_vdev_peer_remove_cb callback, 1144 void *callback_context, bool remove_last_peer); 1145 void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, 1146 ol_txrx_vdev_peer_remove_cb callback, 1147 void *callback_context); 1148 void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr); 1149 void (*add_last_real_peer)(struct cdp_pdev *pdev, 1150 struct cdp_vdev *vdev, uint8_t *peer_id); 1151 bool (*is_vdev_restore_last_peer)(void *peer); 1152 void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer, 1153 uint8_t *peer_id, bool restore_last_peer); 1154 void (*peer_detach_force_delete)(void *peer); 1155 }; 1156 1157 /** 1158 * struct cdp_ocb_ops - mcl ocb ops 1159 * @throttle_init_period: 1160 * @throttle_set_level: 1161 */ 1162 struct cdp_throttle_ops { 1163 void (*throttle_init_period)(struct cdp_pdev *pdev, int period, 1164 uint8_t *dutycycle_level); 1165 void (*throttle_set_level)(struct cdp_pdev *pdev, int level); 1166 }; 1167 1168 /** 1169 * struct cdp_ocb_ops - mcl ocb ops 1170 * @clear_stats: 1171 * @stats: 1172 */ 1173 struct cdp_mob_stats_ops { 1174 void (*clear_stats)(uint16_t bitmap); 1175 int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); 1176 }; 1177 #endif /* CONFIG_WIN */ 1178 1179 #ifdef RECEIVE_OFFLOAD 1180 /** 1181 * struct cdp_rx_offld_ops - mcl receive offload ops 1182 * @register_rx_offld_flush_cb: 1183 * @deregister_rx_offld_flush_cb: 1184 */ 1185 struct cdp_rx_offld_ops { 1186 void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); 1187 void (*deregister_rx_offld_flush_cb)(void); 1188 }; 1189 #endif 1190 1191 struct cdp_ops { 1192 struct cdp_cmn_ops *cmn_drv_ops; 1193 struct cdp_ctrl_ops *ctrl_ops; 1194 struct cdp_me_ops *me_ops; 1195 struct cdp_mon_ops *mon_ops; 1196 struct cdp_host_stats_ops *host_stats_ops; 1197 struct cdp_wds_ops *wds_ops; 1198 struct cdp_raw_ops *raw_ops; 1199 struct cdp_pflow_ops *pflow_ops; 1200 #ifndef CONFIG_WIN 1201 struct cdp_misc_ops *misc_ops; 1202 struct cdp_cfg_ops *cfg_ops; 1203 struct cdp_flowctl_ops *flowctl_ops; 1204 struct cdp_lflowctl_ops *l_flowctl_ops; 1205 #ifdef IPA_OFFLOAD 1206 struct cdp_ipa_ops *ipa_ops; 1207 #endif 1208 #ifdef RECEIVE_OFFLOAD 1209 struct cdp_rx_offld_ops *rx_offld_ops; 1210 #endif 1211 struct cdp_bus_ops *bus_ops; 1212 struct cdp_ocb_ops *ocb_ops; 1213 struct cdp_peer_ops *peer_ops; 1214 struct cdp_throttle_ops *throttle_ops; 1215 struct cdp_mob_stats_ops *mob_stats_ops; 1216 struct cdp_tx_delay_ops *delay_ops; 1217 struct cdp_pmf_ops *pmf_ops; 1218 #endif /* CONFIG_WIN */ 1219 }; 1220 #endif 1221