1 /* 2 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. 3 * 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 /** 20 * @file cdp_txrx_ops.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_OPS_H_ 25 #define _CDP_TXRX_CMN_OPS_H_ 26 27 #include <cdp_txrx_cmn_struct.h> 28 #include <cdp_txrx_stats_struct.h> 29 #include "cdp_txrx_handle.h" 30 #include <cdp_txrx_mon_struct.h> 31 #include "wlan_objmgr_psoc_obj.h" 32 33 #ifdef IPA_OFFLOAD 34 #ifdef CONFIG_IPA_WDI_UNIFIED_API 35 #include <qdf_ipa_wdi3.h> 36 #else 37 #include <qdf_ipa.h> 38 #endif 39 #endif 40 41 /** 42 * bitmap values to indicate special handling of peer_delete 43 */ 44 #define CDP_PEER_DELETE_NO_SPECIAL 0 45 #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 46 47 /* same as ieee80211_nac_param */ 48 enum cdp_nac_param_cmd { 49 /* IEEE80211_NAC_PARAM_ADD */ 50 CDP_NAC_PARAM_ADD = 1, 51 /* IEEE80211_NAC_PARAM_DEL */ 52 CDP_NAC_PARAM_DEL, 53 /* IEEE80211_NAC_PARAM_LIST */ 54 CDP_NAC_PARAM_LIST, 55 }; 56 /****************************************************************************** 57 * 58 * Control Interface (A Interface) 59 * 60 *****************************************************************************/ 61 62 struct cdp_cmn_ops { 63 64 QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); 65 66 int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev); 67 68 struct cdp_vdev *(*txrx_vdev_attach) 69 (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr, 70 uint8_t vdev_id, enum wlan_op_mode op_mode); 71 72 void (*txrx_vdev_detach) 73 (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback, 74 void *cb_context); 75 76 struct cdp_pdev *(*txrx_pdev_attach) 77 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 78 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id); 79 80 int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev); 81 82 void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force); 83 84 void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force); 85 86 /** 87 * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory 88 * @pdev: Dp pdev handle 89 * @force: Force deinit or not 90 * 91 * Return: None 92 */ 93 void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force); 94 95 void *(*txrx_peer_create) 96 (struct cdp_vdev *vdev, uint8_t *peer_mac_addr, 97 struct cdp_ctrl_objmgr_peer *ctrl_peer); 98 99 void (*txrx_peer_setup) 100 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 101 102 void (*txrx_peer_teardown) 103 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 104 105 int (*txrx_peer_add_ast) 106 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 107 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 108 uint32_t flags); 109 110 int (*txrx_peer_update_ast) 111 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 112 uint8_t *mac_addr, uint32_t flags); 113 114 bool (*txrx_peer_get_ast_info_by_soc) 115 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 116 struct cdp_ast_entry_info *ast_entry_info); 117 118 bool (*txrx_peer_get_ast_info_by_pdev) 119 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 120 uint8_t pdev_id, 121 struct cdp_ast_entry_info *ast_entry_info); 122 123 QDF_STATUS (*txrx_peer_ast_delete_by_soc) 124 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 125 txrx_ast_free_cb callback, 126 void *cookie); 127 128 QDF_STATUS (*txrx_peer_ast_delete_by_pdev) 129 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 130 uint8_t pdev_id, 131 txrx_ast_free_cb callback, 132 void *cookie); 133 134 void (*txrx_peer_delete)(void *peer, uint32_t bitmap); 135 136 QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_vdev *vdev, 137 uint8_t smart_monitor); 138 void (*txrx_peer_delete_sync)(void *peer, 139 QDF_STATUS(*delete_cb)( 140 uint8_t vdev_id, 141 uint32_t peerid_cnt, 142 uint16_t *peerid_list), 143 uint32_t bitmap); 144 145 uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev); 146 bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev); 147 148 void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev, 149 int16_t chan_noise_floor); 150 151 void (*txrx_set_nac)(struct cdp_peer *peer); 152 153 /** 154 * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture 155 * @soc: opaque soc handle 156 * @pdev: data path pdev handle 157 * @val: value of pdev_tx_capture 158 * 159 * Return: status: 0 - Success, non-zero: Failure 160 */ 161 QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val); 162 163 void (*txrx_get_peer_mac_from_peer_id) 164 (struct cdp_pdev *pdev_handle, 165 uint32_t peer_id, uint8_t *peer_mac); 166 167 void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev); 168 169 void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev); 170 171 void (*txrx_ath_getstats)(void *pdev, 172 struct cdp_dev_stats *stats, uint8_t type); 173 174 void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status, 175 u_int8_t *user_position); 176 177 uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev); 178 179 void (*txrx_if_mgmt_drain)(void *ni, int force); 180 181 void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz); 182 183 void (*txrx_set_privacy_filters) 184 (struct cdp_vdev *vdev, void *filter, uint32_t num); 185 186 uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg); 187 188 /******************************************************************** 189 * Data Interface (B Interface) 190 ********************************************************************/ 191 192 void (*txrx_vdev_register)(struct cdp_vdev *vdev, 193 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 194 struct ol_txrx_ops *txrx_ops); 195 196 int (*txrx_mgmt_send)(struct cdp_vdev *vdev, 197 qdf_nbuf_t tx_mgmt_frm, uint8_t type); 198 199 int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev, 200 qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps, 201 uint16_t chanfreq); 202 203 /** 204 * ol_txrx_mgmt_tx_cb - tx management delivery notification 205 * callback function 206 */ 207 208 void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type, 209 ol_txrx_mgmt_tx_cb download_cb, 210 ol_txrx_mgmt_tx_cb ota_ack_cb, 211 void *ctxt); 212 213 int (*txrx_get_tx_pending)(struct cdp_pdev *pdev); 214 215 /** 216 * ol_txrx_data_tx_cb - Function registered with the data path 217 * that is called when tx frames marked as "no free" are 218 * done being transmitted 219 */ 220 221 void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev, 222 ol_txrx_data_tx_cb callback, void *ctxt); 223 224 /******************************************************************* 225 * Statistics and Debugging Interface (C Interface) 226 ********************************************************************/ 227 228 int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu, 229 int max_subfrms_amsdu); 230 231 A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev, 232 struct ol_txrx_stats_req *req, 233 bool per_vdev, bool response_expected); 234 235 int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs); 236 237 void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev, 238 uint8_t cfg_stats_type, uint32_t cfg_val); 239 240 void (*txrx_print_level_set)(unsigned level); 241 242 /** 243 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev 244 * @vdev: vdev handle 245 * 246 * Return: vdev mac address 247 */ 248 uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev); 249 250 /** 251 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 252 * vdev 253 * @vdev: vdev handle 254 * 255 * Return: Handle to struct qdf_mac_addr 256 */ 257 struct qdf_mac_addr * 258 (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev); 259 260 /** 261 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev 262 * @vdev: vdev handle 263 * 264 * Return: Handle to pdev 265 */ 266 struct cdp_pdev *(*txrx_get_pdev_from_vdev) 267 (struct cdp_vdev *vdev); 268 269 /** 270 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 271 * @vdev: vdev handle 272 * 273 * Return: Handle to control pdev 274 */ 275 struct cdp_cfg * 276 (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev); 277 278 /** 279 * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev 280 * @pdev: pdev handle 281 * 282 * Return: Handle to vdev 283 */ 284 struct cdp_vdev * 285 (*txrx_get_mon_vdev_from_pdev)(struct cdp_pdev *pdev); 286 287 struct cdp_vdev * 288 (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev, 289 uint8_t vdev_id); 290 291 void (*txrx_soc_detach)(void *soc); 292 293 /** 294 * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory 295 * @soc: Opaque Dp handle 296 * 297 * Return: None 298 */ 299 void (*txrx_soc_deinit)(void *soc); 300 301 /** 302 * txrx_soc_init() - Initialize dp soc and dp ring memory 303 * @soc: Opaque Dp handle 304 * @htchdl: Opaque htc handle 305 * @hifhdl: Opaque hif handle 306 * 307 * Return: None 308 */ 309 void *(*txrx_soc_init)(void *soc, void *ctrl_psoc, void *hif_handle, 310 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, 311 struct ol_if_ops *ol_ops, uint16_t device_id); 312 313 /** 314 * txrx_tso_soc_attach() - TSO attach handler triggered during 315 * dynamic tso activation 316 * @soc: Opaque Dp handle 317 * 318 * Return: QDF status 319 */ 320 QDF_STATUS (*txrx_tso_soc_attach)(void *soc); 321 322 /** 323 * txrx_tso_soc_detach() - TSO detach handler triggered during 324 * dynamic tso de-activation 325 * @soc: Opaque Dp handle 326 * 327 * Return: QDF status 328 */ 329 QDF_STATUS (*txrx_tso_soc_detach)(void *soc); 330 int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid, 331 int status); 332 333 int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken, 334 uint16_t tid, uint16_t batimeout, 335 uint16_t buffersize, 336 uint16_t startseqnum); 337 338 void (*addba_responsesetup)(void *peer_handle, uint8_t tid, 339 uint8_t *dialogtoken, uint16_t *statuscode, 340 uint16_t *buffersize, uint16_t *batimeout); 341 342 int (*delba_process)(void *peer_handle, 343 int tid, uint16_t reasoncode); 344 345 /** 346 * delba_tx_completion() - Indicate delba tx status 347 * @peer_handle: Peer handle 348 * @tid: Tid number 349 * @status: Tx completion status 350 * 351 * Return: 0 on Success, 1 on failure 352 */ 353 int (*delba_tx_completion)(void *peer_handle, 354 uint8_t tid, int status); 355 356 void (*set_addba_response)(void *peer_handle, 357 uint8_t tid, uint16_t statuscode); 358 359 uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle, 360 uint16_t peer_id, uint8_t *mac_addr); 361 362 void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle, 363 uint8_t map_id); 364 int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle); 365 366 void (*flush_cache_rx_queue)(void); 367 void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id, 368 uint8_t tos, uint8_t tid); 369 void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val); 370 void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid); 371 372 QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev, 373 struct cdp_txrx_stats_req *req); 374 375 QDF_STATUS (*display_stats)(void *psoc, uint16_t value, 376 enum qdf_stats_verbosity_level level); 377 void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config); 378 379 int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc); 380 QDF_STATUS (*txrx_intr_attach)(void *soc); 381 void (*txrx_intr_detach)(void *soc); 382 void (*set_pn_check)(struct cdp_vdev *vdev, 383 struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, 384 uint32_t *rx_pn); 385 QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, 386 struct cdp_config_params *params); 387 388 void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl); 389 void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, 390 void *dp_txrx_hdl); 391 392 void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); 393 void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, 394 void *dp_txrx_handle); 395 396 void (*txrx_peer_reset_ast) 397 (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl); 398 399 void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, 400 void *vdev_hdl); 401 402 void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); 403 void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 404 uint8_t ac, uint32_t value); 405 void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 406 uint8_t ac, uint32_t *value); 407 408 QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, 409 uint32_t num_peers, 410 bool peer_map_unmap_v2); 411 412 void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl, 413 struct cdp_ctrl_objmgr_pdev *ctrl_pdev); 414 415 ol_txrx_tx_fp tx_send; 416 /** 417 * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev 418 * to deliver pkt to stack. 419 * @vdev: vdev handle 420 * @stack_fn: pointer to - function pointer to deliver RX pkt to stack 421 * @osif_vdev: pointer to - osif vdev to deliver RX packet to. 422 */ 423 void (*txrx_get_os_rx_handles_from_vdev) 424 (struct cdp_vdev *vdev, 425 ol_txrx_rx_fp *stack_fn, 426 ol_osif_vdev_handle *osif_vdev); 427 int (*txrx_classify_update) 428 (struct cdp_vdev *vdev, qdf_nbuf_t skb, 429 enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class); 430 431 bool (*get_dp_capabilities)(struct cdp_soc_t *soc, 432 enum cdp_capabilities dp_caps); 433 }; 434 435 struct cdp_ctrl_ops { 436 437 int 438 (*txrx_mempools_attach)(void *ctrl_pdev); 439 int 440 (*txrx_set_filter_neighbour_peers)( 441 struct cdp_pdev *pdev, 442 uint32_t val); 443 int 444 (*txrx_update_filter_neighbour_peers)( 445 struct cdp_vdev *vdev, 446 uint32_t cmd, uint8_t *macaddr); 447 /** 448 * @brief set the safemode of the device 449 * @details 450 * This flag is used to bypass the encrypt and decrypt processes when 451 * send and receive packets. It works like open AUTH mode, HW will 452 * ctreate all packets as non-encrypt frames because no key installed. 453 * For rx fragmented frames,it bypasses all the rx defragmentaion. 454 * 455 * @param vdev - the data virtual device object 456 * @param val - the safemode state 457 * @return - void 458 */ 459 460 void 461 (*txrx_set_safemode)( 462 struct cdp_vdev *vdev, 463 u_int32_t val); 464 /** 465 * @brief configure the drop unencrypted frame flag 466 * @details 467 * Rx related. When set this flag, all the unencrypted frames 468 * received over a secure connection will be discarded 469 * 470 * @param vdev - the data virtual device object 471 * @param val - flag 472 * @return - void 473 */ 474 void 475 (*txrx_set_drop_unenc)( 476 struct cdp_vdev *vdev, 477 u_int32_t val); 478 479 480 /** 481 * @brief set the Tx encapsulation type of the VDEV 482 * @details 483 * This will be used to populate the HTT desc packet type field 484 * during Tx 485 * @param vdev - the data virtual device object 486 * @param val - the Tx encap type 487 * @return - void 488 */ 489 void 490 (*txrx_set_tx_encap_type)( 491 struct cdp_vdev *vdev, 492 enum htt_cmn_pkt_type val); 493 /** 494 * @brief set the Rx decapsulation type of the VDEV 495 * @details 496 * This will be used to configure into firmware and hardware 497 * which format to decap all Rx packets into, for all peers under 498 * the VDEV. 499 * @param vdev - the data virtual device object 500 * @param val - the Rx decap mode 501 * @return - void 502 */ 503 void 504 (*txrx_set_vdev_rx_decap_type)( 505 struct cdp_vdev *vdev, 506 enum htt_cmn_pkt_type val); 507 508 /** 509 * @brief get the Rx decapsulation type of the VDEV 510 * 511 * @param vdev - the data virtual device object 512 * @return - the Rx decap type 513 */ 514 enum htt_cmn_pkt_type 515 (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev); 516 517 /* Is this similar to ol_txrx_peer_state_update() in MCL */ 518 /** 519 * @brief Update the authorize peer object at association time 520 * @details 521 * For the host-based implementation of rate-control, it 522 * updates the peer/node-related parameters within rate-control 523 * context of the peer at association. 524 * 525 * @param peer - pointer to the node's object 526 * @authorize - either to authorize or unauthorize peer 527 * 528 * @return none 529 */ 530 void 531 (*txrx_peer_authorize)(struct cdp_peer *peer, 532 u_int32_t authorize); 533 534 /* Should be ol_txrx_ctrl_api.h */ 535 void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val); 536 537 /** 538 * @brief setting mesh rx filter 539 * @details 540 * based on the bits enabled in the filter packets has to be dropped. 541 * 542 * @param vdev - the data virtual device object 543 * @param val - value to set 544 */ 545 void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val); 546 547 void (*tx_flush_buffers)(struct cdp_vdev *vdev); 548 549 int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev); 550 551 void (*txrx_set_vdev_param)(struct cdp_vdev *vdev, 552 enum cdp_vdev_param_type param, uint32_t val); 553 554 void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value); 555 /** 556 * @brief Set the reo dest ring num of the radio 557 * @details 558 * Set the reo destination ring no on which we will receive 559 * pkts for this radio. 560 * 561 * @param pdev - the data physical device object 562 * @param reo_dest_ring_num - value ranges between 1 - 4 563 */ 564 void (*txrx_set_pdev_reo_dest)( 565 struct cdp_pdev *pdev, 566 enum cdp_host_reo_dest_ring reo_dest_ring_num); 567 568 /** 569 * @brief Get the reo dest ring num of the radio 570 * @details 571 * Get the reo destination ring no on which we will receive 572 * pkts for this radio. 573 * 574 * @param pdev - the data physical device object 575 * @return the reo destination ring number 576 */ 577 enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( 578 struct cdp_pdev *pdev); 579 580 int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub, 581 uint32_t event); 582 583 int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub, 584 uint32_t event); 585 int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx); 586 587 void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev, 588 uint8_t subtype, uint8_t tx_power); 589 590 /** 591 * txrx_set_pdev_param() - callback to set pdev parameter 592 * @soc: opaque soc handle 593 * @pdev: data path pdev handle 594 * @val: value of pdev_tx_capture 595 * 596 * Return: status: 0 - Success, non-zero: Failure 597 */ 598 QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev, 599 enum cdp_pdev_param_type type, 600 uint8_t val); 601 void * (*txrx_get_pldev)(struct cdp_pdev *pdev); 602 603 #ifdef ATH_SUPPORT_NAC_RSSI 604 QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev, 605 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, 606 uint8_t chan_num); 607 QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev, 608 char *macaddr, 609 uint8_t *rssi); 610 #endif 611 void (*set_key)(struct cdp_peer *peer_handle, 612 bool is_unicast, uint32_t *key); 613 614 uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev, 615 enum cdp_vdev_param_type param); 616 int (*enable_peer_based_pktlog)(struct cdp_pdev 617 *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb); 618 619 }; 620 621 struct cdp_me_ops { 622 623 u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone) 624 (struct cdp_pdev *pdev, u_int16_t buf_count); 625 626 u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)( 627 struct cdp_pdev *pdev, 628 u_int16_t buf_count); 629 630 u_int16_t 631 (*tx_get_mcast_buf_allocated_marked) 632 (struct cdp_pdev *pdev); 633 void 634 (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev); 635 636 void 637 (*tx_me_free_descriptor)(struct cdp_pdev *pdev); 638 639 uint16_t 640 (*tx_me_convert_ucast)(struct cdp_vdev *vdev, 641 qdf_nbuf_t wbuf, u_int8_t newmac[][6], 642 uint8_t newmaccnt); 643 /* Should be a function pointer in ol_txrx_osif_ops{} */ 644 /** 645 * @brief notify mcast frame indication from FW. 646 * @details 647 * This notification will be used to convert 648 * multicast frame to unicast. 649 * 650 * @param pdev - handle to the ctrl SW's physical device object 651 * @param vdev_id - ID of the virtual device received the special data 652 * @param msdu - the multicast msdu returned by FW for host inspect 653 */ 654 655 int (*mcast_notify)(struct cdp_pdev *pdev, 656 u_int8_t vdev_id, qdf_nbuf_t msdu); 657 658 uint16_t (*tx_me_find_ast_entry)(struct cdp_vdev *vdev, 659 uint8_t *da_mac_addr, 660 uint8_t *ra_mac_addr); 661 }; 662 663 struct cdp_mon_ops { 664 665 void (*txrx_monitor_set_filter_ucast_data) 666 (struct cdp_pdev *, u_int8_t val); 667 void (*txrx_monitor_set_filter_mcast_data) 668 (struct cdp_pdev *, u_int8_t val); 669 void (*txrx_monitor_set_filter_non_data) 670 (struct cdp_pdev *, u_int8_t val); 671 672 bool (*txrx_monitor_get_filter_ucast_data) 673 (struct cdp_vdev *vdev_txrx_handle); 674 bool (*txrx_monitor_get_filter_mcast_data) 675 (struct cdp_vdev *vdev_txrx_handle); 676 bool (*txrx_monitor_get_filter_non_data) 677 (struct cdp_vdev *vdev_txrx_handle); 678 QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev); 679 680 /* HK advance monitor filter support */ 681 QDF_STATUS (*txrx_set_advance_monitor_filter) 682 (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val); 683 }; 684 685 struct cdp_host_stats_ops { 686 int (*txrx_host_stats_get)(struct cdp_vdev *vdev, 687 struct ol_txrx_stats_req *req); 688 689 void (*txrx_host_stats_clr)(struct cdp_vdev *vdev); 690 691 void (*txrx_host_ce_stats)(struct cdp_vdev *vdev); 692 693 int (*txrx_stats_publish)(struct cdp_pdev *pdev, 694 void *buf); 695 /** 696 * @brief Enable enhanced stats functionality. 697 * 698 * @param pdev - the physical device object 699 * @return - void 700 */ 701 void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev); 702 703 /** 704 * @brief Disable enhanced stats functionality. 705 * 706 * @param pdev - the physical device object 707 * @return - void 708 */ 709 void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev); 710 711 /** 712 * @brief Get the desired stats from the message. 713 * 714 * @param pdev - the physical device object 715 * @param stats_base - stats buffer received from FW 716 * @param type - stats type. 717 * @return - pointer to requested stat identified by type 718 */ 719 uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev, 720 uint32_t *stats_base, uint32_t msg_len, uint8_t type); 721 void 722 (*tx_print_tso_stats)(struct cdp_vdev *vdev); 723 724 void 725 (*tx_rst_tso_stats)(struct cdp_vdev *vdev); 726 727 void 728 (*tx_print_sg_stats)(struct cdp_vdev *vdev); 729 730 void 731 (*tx_rst_sg_stats)(struct cdp_vdev *vdev); 732 733 void 734 (*print_rx_cksum_stats)(struct cdp_vdev *vdev); 735 736 void 737 (*rst_rx_cksum_stats)(struct cdp_vdev *vdev); 738 739 A_STATUS 740 (*txrx_host_me_stats)(struct cdp_vdev *vdev); 741 742 void 743 (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr); 744 745 int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev, 746 struct ol_txrx_stats_req *req); 747 748 void 749 (*print_lro_stats)(struct cdp_vdev *vdev); 750 751 void 752 (*reset_lro_stats)(struct cdp_vdev *vdev); 753 754 void 755 (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr, 756 uint32_t cap); 757 void 758 (*get_htt_stats)(struct cdp_pdev *pdev, void *data, 759 uint32_t data_len); 760 void 761 (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data, 762 uint16_t stats_id); 763 struct cdp_peer_stats* 764 (*txrx_get_peer_stats)(struct cdp_peer *peer); 765 void 766 (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer); 767 void 768 (*txrx_reset_peer_stats)(struct cdp_peer *peer); 769 int 770 (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf, 771 bool is_aggregate); 772 int 773 (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc, 774 void *data, uint32_t len, 775 uint32_t stats_id); 776 int 777 (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle, 778 void *buffer); 779 void 780 (*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf, 781 uint16_t stats_id); 782 int 783 (*txrx_get_radio_stats)(struct cdp_pdev *pdev, 784 void *buf); 785 struct cdp_pdev_stats* 786 (*txrx_get_pdev_stats)(struct cdp_pdev *pdev); 787 }; 788 789 struct cdp_wds_ops { 790 void 791 (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev, 792 u_int32_t val); 793 void 794 (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer, 795 int wds_tx_ucast, int wds_tx_mcast); 796 int (*vdev_set_wds)(void *vdev, uint32_t val); 797 }; 798 799 struct cdp_raw_ops { 800 int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev); 801 802 void (*rsim_get_astentry)(struct cdp_vdev *vdev, 803 qdf_nbuf_t *pnbuf, 804 struct cdp_raw_ast *raw_ast); 805 }; 806 807 #ifdef CONFIG_WIN 808 struct cdp_pflow_ops { 809 uint32_t(*pflow_update_pdev_params)(void *, 810 enum _ol_ath_param_t, uint32_t, void *); 811 }; 812 #endif /* CONFIG_WIN */ 813 814 #define LRO_IPV4_SEED_ARR_SZ 5 815 #define LRO_IPV6_SEED_ARR_SZ 11 816 817 /** 818 * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters 819 * @lro_enable: indicates whether rx_offld is enabled 820 * @tcp_flag: If the TCP flags from the packet do not match 821 * the values in this field after masking with TCP flags mask 822 * below, packet is not rx_offld eligible 823 * @tcp_flag_mask: field for comparing the TCP values provided 824 * above with the TCP flags field in the received packet 825 * @toeplitz_hash_ipv4: contains seed needed to compute the flow id 826 * 5-tuple toeplitz hash for ipv4 packets 827 * @toeplitz_hash_ipv6: contains seed needed to compute the flow id 828 * 5-tuple toeplitz hash for ipv6 packets 829 */ 830 struct cdp_lro_hash_config { 831 uint32_t lro_enable; 832 uint32_t tcp_flag:9, 833 tcp_flag_mask:9; 834 uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; 835 uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; 836 }; 837 838 struct ol_if_ops { 839 void 840 (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 841 uint8_t *peer_macaddr, uint8_t vdev_id, 842 bool hash_based, uint8_t ring_num); 843 QDF_STATUS 844 (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 845 uint8_t vdev_id, uint8_t *peer_mac, 846 qdf_dma_addr_t hw_qdesc, int tid, 847 uint16_t queue_num, 848 uint8_t ba_window_size_valid, 849 uint16_t ba_window_size); 850 QDF_STATUS 851 (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 852 uint8_t vdev_id, uint8_t *peer_macaddr, 853 uint32_t tid_mask); 854 int (*peer_unref_delete)(void *scn_handle, uint8_t *peer_mac, 855 uint8_t *vdev_mac, enum wlan_op_mode opmode, 856 void *old_peer, void *new_peer); 857 bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); 858 int (*peer_add_wds_entry)(void *vdev_handle, 859 struct cdp_peer *peer_handle, 860 const uint8_t *dest_macaddr, 861 uint8_t *next_node_mac, 862 uint32_t flags); 863 int (*peer_update_wds_entry)(void *ol_soc_handle, 864 uint8_t *dest_macaddr, uint8_t *peer_macaddr, 865 uint32_t flags); 866 void (*peer_del_wds_entry)(void *ol_soc_handle, 867 uint8_t *wds_macaddr); 868 QDF_STATUS 869 (*lro_hash_config)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 870 struct cdp_lro_hash_config *rx_offld_hash); 871 void (*update_dp_stats)(void *soc, void *stats, uint16_t id, 872 uint8_t type); 873 #ifdef CONFIG_WIN 874 uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg); 875 #else 876 uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh); 877 #endif 878 int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id, 879 uint8_t vdev_id, uint8_t *peer_mac_addr, 880 enum cdp_txrx_ast_entry_type peer_type, 881 uint32_t tx_ast_hashidx); 882 int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id); 883 884 int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num); 885 886 void (*rx_mic_error)(void *ol_soc_handle, 887 uint16_t vdev_id, void *wh); 888 bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer, 889 qdf_nbuf_t nbuf, 890 uint16_t hdr_space); 891 uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id); 892 893 void (*record_act_change)(struct wlan_objmgr_pdev *pdev, 894 u_int8_t *dstmac, bool active); 895 #ifdef ATH_SUPPORT_NAC_RSSI 896 int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 897 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid, 898 char *client_macaddr, uint8_t chan_num); 899 int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 900 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid); 901 #endif 902 int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr); 903 904 /** 905 * send_delba() - Send delba to peer 906 * @pdev_handle: Dp pdev handle 907 * @ctrl_peer: Peer handle 908 * @peer_macaddr: Peer mac addr 909 * @tid: Tid number 910 * 911 * Return: 0 for success, non-zero for failure 912 */ 913 int (*send_delba)(void *pdev_handle, void *ctrl_peer, 914 uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle, 915 uint8_t reason_code); 916 /* TODO: Add any other control path calls required to OL_IF/WMA layer */ 917 }; 918 919 #ifndef CONFIG_WIN 920 /* From here MCL specific OPs */ 921 /** 922 * struct cdp_misc_ops - mcl ops not classified 923 * @set_ibss_vdev_heart_beat_timer: 924 * @bad_peer_txctl_set_setting: 925 * @bad_peer_txctl_update_threshold: 926 * @hl_tdls_flag_reset: 927 * @tx_non_std: 928 * @get_vdev_id: 929 * @set_wisa_mode: 930 * @txrx_data_stall_cb_register: 931 * @txrx_data_stall_cb_deregister: 932 * @txrx_post_data_stall_event 933 * @runtime_suspend: 934 * @runtime_resume: 935 * @register_packetdump_cb: 936 * @unregister_packetdump_cb: 937 */ 938 struct cdp_misc_ops { 939 uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev, 940 uint16_t timer_value_sec); 941 void (*set_wmm_param)(struct cdp_pdev *cfg_pdev, 942 struct ol_tx_wmm_param_t wmm_param); 943 void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable, 944 int period, int txq_limit); 945 void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev, 946 int level, int tput_thresh, int tx_limit); 947 void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag); 948 qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev, 949 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 950 uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); 951 uint32_t (*get_tx_ack_stats)(uint8_t vdev_id); 952 QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable); 953 QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb); 954 QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb); 955 void (*txrx_post_data_stall_event)( 956 enum data_stall_log_event_indicator indicator, 957 enum data_stall_log_event_type data_stall_type, 958 uint32_t pdev_id, uint32_t vdev_id_bitmap, 959 enum data_stall_log_recovery_type recovery_type); 960 QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev); 961 QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev); 962 int (*get_opmode)(struct cdp_vdev *vdev); 963 void (*mark_first_wakeup_packet)(uint8_t value); 964 void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id); 965 void (*flush_rx_frames)(void *peer, bool drop); 966 A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id, 967 uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets); 968 void (*pkt_log_init)(struct cdp_pdev *handle, void *scn); 969 void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn); 970 int (*get_num_rx_contexts)(struct cdp_soc_t *soc); 971 void (*register_pktdump_cb)(ol_txrx_pktdump_cb tx_cb, 972 ol_txrx_pktdump_cb rx_cb); 973 void (*unregister_pktdump_cb)(void); 974 }; 975 976 /** 977 * struct cdp_tx_delay_ops - mcl tx delay ops 978 * @tx_delay: 979 * @tx_delay_hist: 980 * @tx_packet_count: 981 * @tx_set_compute_interval: 982 */ 983 struct cdp_tx_delay_ops { 984 void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec, 985 uint32_t *tx_delay_microsec, int category); 986 void (*tx_delay_hist)(struct cdp_pdev *pdev, 987 uint16_t *bin_values, int category); 988 void (*tx_packet_count)(struct cdp_pdev *pdev, 989 uint16_t *out_packet_count, 990 uint16_t *out_packet_loss_count, int category); 991 void (*tx_set_compute_interval)(struct cdp_pdev *pdev, 992 uint32_t interval); 993 }; 994 995 /** 996 * struct cdp_pmf_ops - mcl protected management frame ops 997 * @get_pn_info: 998 */ 999 struct cdp_pmf_ops { 1000 void (*get_pn_info)(void *peer, uint8_t **last_pn_valid, 1001 uint64_t **last_pn, uint32_t **rmf_pn_replays); 1002 }; 1003 1004 /** 1005 * struct cdp_cfg_ops - mcl configuration ops 1006 * @set_cfg_rx_fwd_disabled: 1007 * @set_cfg_packet_log_enabled: 1008 * @cfg_attach: 1009 * @vdev_rx_set_intrabss_fwd: 1010 * @is_rx_fwd_disabled: 1011 * @tx_set_is_mgmt_over_wmi_enabled: 1012 * @is_high_latency: 1013 * @set_flow_control_parameters: 1014 * @set_flow_steering: 1015 * @set_ptp_rx_opt_enabled: 1016 * @set_new_htt_msg_format: 1017 * @set_peer_unmap_conf_support: 1018 * @get_peer_unmap_conf_support: 1019 */ 1020 struct cdp_cfg_ops { 1021 void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, 1022 uint8_t disable_rx_fwd); 1023 void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, 1024 uint8_t val); 1025 struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); 1026 void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val); 1027 uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); 1028 void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); 1029 int (*is_high_latency)(struct cdp_cfg *cfg_pdev); 1030 void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, 1031 void *param); 1032 void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); 1033 void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); 1034 void (*set_new_htt_msg_format)(uint8_t val); 1035 void (*set_peer_unmap_conf_support)(bool val); 1036 bool (*get_peer_unmap_conf_support)(void); 1037 }; 1038 1039 /** 1040 * struct cdp_flowctl_ops - mcl flow control 1041 * @register_pause_cb: 1042 * @set_desc_global_pool_size: 1043 * @dump_flow_pool_info: 1044 */ 1045 struct cdp_flowctl_ops { 1046 QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, 1047 struct cdp_pdev *pdev, 1048 uint8_t vdev_id); 1049 void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, 1050 struct cdp_pdev *pdev, 1051 uint8_t vdev_id); 1052 QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, 1053 tx_pause_callback); 1054 void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); 1055 1056 void (*dump_flow_pool_info)(void *); 1057 1058 bool (*tx_desc_thresh_reached)(struct cdp_vdev *vdev); 1059 }; 1060 1061 /** 1062 * struct cdp_lflowctl_ops - mcl legacy flow control ops 1063 * @register_tx_flow_control: 1064 * @deregister_tx_flow_control_cb: 1065 * @flow_control_cb: 1066 * @get_tx_resource: 1067 * @ll_set_tx_pause_q_depth: 1068 * @vdev_flush: 1069 * @vdev_pause: 1070 * @vdev_unpause: 1071 */ 1072 struct cdp_lflowctl_ops { 1073 #ifdef QCA_HL_NETDEV_FLOW_CONTROL 1074 int (*register_tx_flow_control)(struct cdp_soc_t *soc, 1075 tx_pause_callback flowcontrol); 1076 int (*set_vdev_tx_desc_limit)(uint8_t vdev_id, uint8_t chan); 1077 int (*set_vdev_os_queue_status)(uint8_t vdev_id, 1078 enum netif_action_type action); 1079 #else 1080 int (*register_tx_flow_control)(uint8_t vdev_id, 1081 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, 1082 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); 1083 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */ 1084 int (*deregister_tx_flow_control_cb)(uint8_t vdev_id); 1085 void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume); 1086 bool (*get_tx_resource)(uint8_t sta_id, 1087 unsigned int low_watermark, 1088 unsigned int high_watermark_offset); 1089 int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth); 1090 void (*vdev_flush)(struct cdp_vdev *vdev); 1091 void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason); 1092 void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason); 1093 }; 1094 1095 #ifdef IPA_OFFLOAD 1096 /** 1097 * struct cdp_ipa_ops - mcl ipa data path ops 1098 * @ipa_get_resource: 1099 * @ipa_set_doorbell_paddr: 1100 * @ipa_set_active: 1101 * @ipa_op_response: 1102 * @ipa_register_op_cb: 1103 * @ipa_get_stat: 1104 * @ipa_tx_data_frame: 1105 */ 1106 struct cdp_ipa_ops { 1107 QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev); 1108 QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev); 1109 QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active, 1110 bool is_tx); 1111 QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg); 1112 QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev, 1113 void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt), 1114 void *usr_ctxt); 1115 QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev); 1116 qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb); 1117 void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, 1118 uint32_t value); 1119 #ifdef FEATURE_METERING 1120 QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev, 1121 uint8_t reset_stats); 1122 QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev, 1123 uint64_t quota_bytes); 1124 #endif 1125 QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev); 1126 QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev); 1127 #ifdef CONFIG_IPA_WDI_UNIFIED_API 1128 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1129 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1130 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1131 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, 1132 bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in); 1133 #else /* CONFIG_IPA_WDI_UNIFIED_API */ 1134 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1135 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1136 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1137 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle); 1138 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 1139 QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle, 1140 uint32_t rx_pipe_handle); 1141 QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, 1142 qdf_ipa_client_type_t prod_client, 1143 qdf_ipa_client_type_t cons_client, 1144 uint8_t session_id, bool is_ipv6_enabled); 1145 QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); 1146 QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev); 1147 QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev); 1148 QDF_STATUS (*ipa_set_perf_level)(int client, 1149 uint32_t max_supported_bw_mbps); 1150 }; 1151 #endif 1152 1153 /** 1154 * struct cdp_bus_ops - mcl bus suspend/resume ops 1155 * @bus_suspend: 1156 * @bus_resume: 1157 */ 1158 struct cdp_bus_ops { 1159 QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev); 1160 QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev); 1161 }; 1162 1163 /** 1164 * struct cdp_ocb_ops - mcl ocb ops 1165 * @set_ocb_chan_info: 1166 * @get_ocb_chan_info: 1167 */ 1168 struct cdp_ocb_ops { 1169 void (*set_ocb_chan_info)(struct cdp_vdev *vdev, 1170 struct ol_txrx_ocb_set_chan ocb_set_chan); 1171 struct ol_txrx_ocb_chan_info * 1172 (*get_ocb_chan_info)(struct cdp_vdev *vdev); 1173 }; 1174 1175 /** 1176 * struct cdp_peer_ops - mcl peer related ops 1177 * @register_peer: 1178 * @clear_peer: 1179 * @cfg_attach: 1180 * @find_peer_by_addr: 1181 * @find_peer_by_addr_and_vdev: 1182 * @local_peer_id: 1183 * @peer_find_by_local_id: 1184 * @peer_state_update: 1185 * @get_vdevid: 1186 * @get_vdev_by_sta_id: 1187 * @register_ocb_peer: 1188 * @peer_get_peer_mac_addr: 1189 * @get_peer_state: 1190 * @get_vdev_for_peer: 1191 * @update_ibss_add_peer_num_of_vdev: 1192 * @remove_peers_for_vdev: 1193 * @remove_peers_for_vdev_no_lock: 1194 * @copy_mac_addr_raw: 1195 * @add_last_real_peer: 1196 * @is_vdev_restore_last_peer: 1197 * @update_last_real_peer: 1198 */ 1199 struct cdp_peer_ops { 1200 QDF_STATUS (*register_peer)(struct cdp_pdev *pdev, 1201 struct ol_txrx_desc_type *sta_desc); 1202 QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id); 1203 QDF_STATUS (*change_peer_state)(uint8_t sta_id, 1204 enum ol_txrx_peer_state sta_state, 1205 bool roam_synch_in_progress); 1206 void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev, 1207 uint8_t *peer_addr, uint8_t *peer_id, 1208 enum peer_debug_id_type debug_id); 1209 void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id); 1210 void * (*find_peer_by_addr)(struct cdp_pdev *pdev, 1211 uint8_t *peer_addr, uint8_t *peer_id); 1212 void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev, 1213 struct cdp_vdev *vdev, 1214 uint8_t *peer_addr, uint8_t *peer_id); 1215 uint16_t (*local_peer_id)(void *peer); 1216 void * (*peer_find_by_local_id)(struct cdp_pdev *pdev, 1217 uint8_t local_peer_id); 1218 QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev, 1219 uint8_t *peer_addr, 1220 enum ol_txrx_peer_state state); 1221 QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id); 1222 struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev, 1223 uint8_t sta_id); 1224 QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id); 1225 uint8_t * (*peer_get_peer_mac_addr)(void *peer); 1226 int (*get_peer_state)(void *peer); 1227 struct cdp_vdev * (*get_vdev_for_peer)(void *peer); 1228 int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev, 1229 int16_t peer_num_delta); 1230 void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, 1231 ol_txrx_vdev_peer_remove_cb callback, 1232 void *callback_context, bool remove_last_peer); 1233 void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, 1234 ol_txrx_vdev_peer_remove_cb callback, 1235 void *callback_context); 1236 void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr); 1237 void (*add_last_real_peer)(struct cdp_pdev *pdev, 1238 struct cdp_vdev *vdev, uint8_t *peer_id); 1239 bool (*is_vdev_restore_last_peer)(void *peer); 1240 void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer, 1241 uint8_t *peer_id, bool restore_last_peer); 1242 void (*peer_detach_force_delete)(void *peer); 1243 }; 1244 1245 /** 1246 * struct cdp_ocb_ops - mcl ocb ops 1247 * @throttle_init_period: 1248 * @throttle_set_level: 1249 */ 1250 struct cdp_throttle_ops { 1251 void (*throttle_init_period)(struct cdp_pdev *pdev, int period, 1252 uint8_t *dutycycle_level); 1253 void (*throttle_set_level)(struct cdp_pdev *pdev, int level); 1254 }; 1255 1256 /** 1257 * struct cdp_ocb_ops - mcl ocb ops 1258 * @clear_stats: 1259 * @stats: 1260 */ 1261 struct cdp_mob_stats_ops { 1262 void (*clear_stats)(uint16_t bitmap); 1263 int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); 1264 }; 1265 #endif /* CONFIG_WIN */ 1266 1267 #ifdef RECEIVE_OFFLOAD 1268 /** 1269 * struct cdp_rx_offld_ops - mcl host receive offload ops 1270 * @register_rx_offld_flush_cb: 1271 * @deregister_rx_offld_flush_cb: 1272 */ 1273 struct cdp_rx_offld_ops { 1274 void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); 1275 void (*deregister_rx_offld_flush_cb)(void); 1276 }; 1277 #endif 1278 1279 struct cdp_ops { 1280 struct cdp_cmn_ops *cmn_drv_ops; 1281 struct cdp_ctrl_ops *ctrl_ops; 1282 struct cdp_me_ops *me_ops; 1283 struct cdp_mon_ops *mon_ops; 1284 struct cdp_host_stats_ops *host_stats_ops; 1285 struct cdp_wds_ops *wds_ops; 1286 struct cdp_raw_ops *raw_ops; 1287 struct cdp_pflow_ops *pflow_ops; 1288 #ifndef CONFIG_WIN 1289 struct cdp_misc_ops *misc_ops; 1290 struct cdp_cfg_ops *cfg_ops; 1291 struct cdp_flowctl_ops *flowctl_ops; 1292 struct cdp_lflowctl_ops *l_flowctl_ops; 1293 #ifdef IPA_OFFLOAD 1294 struct cdp_ipa_ops *ipa_ops; 1295 #endif 1296 #ifdef RECEIVE_OFFLOAD 1297 struct cdp_rx_offld_ops *rx_offld_ops; 1298 #endif 1299 struct cdp_bus_ops *bus_ops; 1300 struct cdp_ocb_ops *ocb_ops; 1301 struct cdp_peer_ops *peer_ops; 1302 struct cdp_throttle_ops *throttle_ops; 1303 struct cdp_mob_stats_ops *mob_stats_ops; 1304 struct cdp_tx_delay_ops *delay_ops; 1305 struct cdp_pmf_ops *pmf_ops; 1306 #endif /* CONFIG_WIN */ 1307 }; 1308 #endif 1309