1 /* 2 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. 3 * 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 /** 20 * @file cdp_txrx_ops.h 21 * @brief Define the host data path converged API functions 22 * called by the host control SW and the OS interface module 23 */ 24 #ifndef _CDP_TXRX_CMN_OPS_H_ 25 #define _CDP_TXRX_CMN_OPS_H_ 26 27 #include <cdp_txrx_cmn_struct.h> 28 #include <cdp_txrx_stats_struct.h> 29 #include "cdp_txrx_handle.h" 30 #include <cdp_txrx_mon_struct.h> 31 #include "wlan_objmgr_psoc_obj.h" 32 33 #ifdef IPA_OFFLOAD 34 #ifdef CONFIG_IPA_WDI_UNIFIED_API 35 #include <qdf_ipa_wdi3.h> 36 #else 37 #include <qdf_ipa.h> 38 #endif 39 #endif 40 41 /** 42 * bitmap values to indicate special handling of peer_delete 43 */ 44 #define CDP_PEER_DELETE_NO_SPECIAL 0 45 #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 46 47 /* same as ieee80211_nac_param */ 48 enum cdp_nac_param_cmd { 49 /* IEEE80211_NAC_PARAM_ADD */ 50 CDP_NAC_PARAM_ADD = 1, 51 /* IEEE80211_NAC_PARAM_DEL */ 52 CDP_NAC_PARAM_DEL, 53 /* IEEE80211_NAC_PARAM_LIST */ 54 CDP_NAC_PARAM_LIST, 55 }; 56 /****************************************************************************** 57 * 58 * Control Interface (A Interface) 59 * 60 *****************************************************************************/ 61 62 struct cdp_cmn_ops { 63 64 QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); 65 66 int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev); 67 68 struct cdp_vdev *(*txrx_vdev_attach) 69 (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr, 70 uint8_t vdev_id, enum wlan_op_mode op_mode); 71 72 void (*txrx_vdev_detach) 73 (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback, 74 void *cb_context); 75 76 struct cdp_pdev *(*txrx_pdev_attach) 77 (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 78 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id); 79 80 int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev); 81 82 void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force); 83 84 void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force); 85 86 /** 87 * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory 88 * @pdev: Dp pdev handle 89 * @force: Force deinit or not 90 * 91 * Return: None 92 */ 93 void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force); 94 95 void *(*txrx_peer_create) 96 (struct cdp_vdev *vdev, uint8_t *peer_mac_addr, 97 struct cdp_ctrl_objmgr_peer *ctrl_peer); 98 99 void (*txrx_peer_setup) 100 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 101 102 void (*txrx_cp_peer_del_response) 103 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev_hdl, 104 uint8_t *peer_mac_addr); 105 106 void (*txrx_peer_teardown) 107 (struct cdp_vdev *vdev_hdl, void *peer_hdl); 108 109 int (*txrx_peer_add_ast) 110 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 111 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 112 uint32_t flags); 113 114 int (*txrx_peer_update_ast) 115 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, 116 uint8_t *mac_addr, uint32_t flags); 117 118 bool (*txrx_peer_get_ast_info_by_soc) 119 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 120 struct cdp_ast_entry_info *ast_entry_info); 121 122 bool (*txrx_peer_get_ast_info_by_pdev) 123 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 124 uint8_t pdev_id, 125 struct cdp_ast_entry_info *ast_entry_info); 126 127 QDF_STATUS (*txrx_peer_ast_delete_by_soc) 128 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 129 txrx_ast_free_cb callback, 130 void *cookie); 131 132 QDF_STATUS (*txrx_peer_ast_delete_by_pdev) 133 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, 134 uint8_t pdev_id, 135 txrx_ast_free_cb callback, 136 void *cookie); 137 138 void (*txrx_peer_delete)(void *peer, uint32_t bitmap); 139 140 void (*txrx_vdev_flush_peers)(struct cdp_vdev *vdev, bool unmap_only); 141 142 QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_vdev *vdev, 143 uint8_t smart_monitor); 144 void (*txrx_peer_delete_sync)(void *peer, 145 QDF_STATUS(*delete_cb)( 146 uint8_t vdev_id, 147 uint32_t peerid_cnt, 148 uint16_t *peerid_list), 149 uint32_t bitmap); 150 151 void (*txrx_peer_unmap_sync_cb_set)(struct cdp_pdev *pdev, 152 QDF_STATUS(*unmap_resp_cb)( 153 uint8_t vdev_id, 154 uint32_t peerid_cnt, 155 uint16_t *peerid_list)); 156 157 uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev); 158 bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev); 159 160 void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev, 161 int16_t chan_noise_floor); 162 163 void (*txrx_set_nac)(struct cdp_peer *peer); 164 165 /** 166 * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture 167 * @soc: opaque soc handle 168 * @pdev: data path pdev handle 169 * @val: value of pdev_tx_capture 170 * 171 * Return: status: 0 - Success, non-zero: Failure 172 */ 173 QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val); 174 175 void (*txrx_get_peer_mac_from_peer_id) 176 (struct cdp_pdev *pdev_handle, 177 uint32_t peer_id, uint8_t *peer_mac); 178 179 void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev); 180 181 void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev); 182 183 void (*txrx_ath_getstats)(void *pdev, 184 struct cdp_dev_stats *stats, uint8_t type); 185 186 void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status, 187 u_int8_t *user_position); 188 189 uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev); 190 191 void (*txrx_if_mgmt_drain)(void *ni, int force); 192 193 void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz); 194 195 void (*txrx_set_privacy_filters) 196 (struct cdp_vdev *vdev, void *filter, uint32_t num); 197 198 uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg); 199 200 /******************************************************************** 201 * Data Interface (B Interface) 202 ********************************************************************/ 203 204 void (*txrx_vdev_register)(struct cdp_vdev *vdev, 205 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev, 206 struct ol_txrx_ops *txrx_ops); 207 208 int (*txrx_mgmt_send)(struct cdp_vdev *vdev, 209 qdf_nbuf_t tx_mgmt_frm, uint8_t type); 210 211 int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev, 212 qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps, 213 uint16_t chanfreq); 214 215 /** 216 * ol_txrx_mgmt_tx_cb - tx management delivery notification 217 * callback function 218 */ 219 220 void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type, 221 ol_txrx_mgmt_tx_cb download_cb, 222 ol_txrx_mgmt_tx_cb ota_ack_cb, 223 void *ctxt); 224 225 int (*txrx_get_tx_pending)(struct cdp_pdev *pdev); 226 227 /** 228 * ol_txrx_data_tx_cb - Function registered with the data path 229 * that is called when tx frames marked as "no free" are 230 * done being transmitted 231 */ 232 233 void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev, 234 ol_txrx_data_tx_cb callback, void *ctxt); 235 236 /******************************************************************* 237 * Statistics and Debugging Interface (C Interface) 238 ********************************************************************/ 239 240 int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu, 241 int max_subfrms_amsdu); 242 243 A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev, 244 struct ol_txrx_stats_req *req, 245 bool per_vdev, bool response_expected); 246 247 int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs); 248 249 void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev, 250 uint8_t cfg_stats_type, uint32_t cfg_val); 251 252 void (*txrx_print_level_set)(unsigned level); 253 254 /** 255 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev 256 * @vdev: vdev handle 257 * 258 * Return: vdev mac address 259 */ 260 uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev); 261 262 /** 263 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of 264 * vdev 265 * @vdev: vdev handle 266 * 267 * Return: Handle to struct qdf_mac_addr 268 */ 269 struct qdf_mac_addr * 270 (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev); 271 272 /** 273 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev 274 * @vdev: vdev handle 275 * 276 * Return: Handle to pdev 277 */ 278 struct cdp_pdev *(*txrx_get_pdev_from_vdev) 279 (struct cdp_vdev *vdev); 280 281 /** 282 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev 283 * @vdev: vdev handle 284 * 285 * Return: Handle to control pdev 286 */ 287 struct cdp_cfg * 288 (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev); 289 290 /** 291 * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev 292 * @pdev: pdev handle 293 * 294 * Return: Handle to vdev 295 */ 296 struct cdp_vdev * 297 (*txrx_get_mon_vdev_from_pdev)(struct cdp_pdev *pdev); 298 299 struct cdp_vdev * 300 (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev, 301 uint8_t vdev_id); 302 303 void (*txrx_soc_detach)(void *soc); 304 305 /** 306 * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory 307 * @soc: Opaque Dp handle 308 * 309 * Return: None 310 */ 311 void (*txrx_soc_deinit)(void *soc); 312 313 /** 314 * txrx_soc_init() - Initialize dp soc and dp ring memory 315 * @soc: Opaque Dp handle 316 * @htchdl: Opaque htc handle 317 * @hifhdl: Opaque hif handle 318 * 319 * Return: None 320 */ 321 void *(*txrx_soc_init)(void *soc, void *ctrl_psoc, void *hif_handle, 322 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, 323 struct ol_if_ops *ol_ops, uint16_t device_id); 324 325 /** 326 * txrx_tso_soc_attach() - TSO attach handler triggered during 327 * dynamic tso activation 328 * @soc: Opaque Dp handle 329 * 330 * Return: QDF status 331 */ 332 QDF_STATUS (*txrx_tso_soc_attach)(void *soc); 333 334 /** 335 * txrx_tso_soc_detach() - TSO detach handler triggered during 336 * dynamic tso de-activation 337 * @soc: Opaque Dp handle 338 * 339 * Return: QDF status 340 */ 341 QDF_STATUS (*txrx_tso_soc_detach)(void *soc); 342 int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid, 343 int status); 344 345 int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken, 346 uint16_t tid, uint16_t batimeout, 347 uint16_t buffersize, 348 uint16_t startseqnum); 349 350 void (*addba_responsesetup)(void *peer_handle, uint8_t tid, 351 uint8_t *dialogtoken, uint16_t *statuscode, 352 uint16_t *buffersize, uint16_t *batimeout); 353 354 int (*delba_process)(void *peer_handle, 355 int tid, uint16_t reasoncode); 356 357 /** 358 * delba_tx_completion() - Indicate delba tx status 359 * @peer_handle: Peer handle 360 * @tid: Tid number 361 * @status: Tx completion status 362 * 363 * Return: 0 on Success, 1 on failure 364 */ 365 int (*delba_tx_completion)(void *peer_handle, 366 uint8_t tid, int status); 367 368 void (*set_addba_response)(void *peer_handle, 369 uint8_t tid, uint16_t statuscode); 370 371 uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle, 372 uint16_t peer_id, uint8_t *mac_addr); 373 374 void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle, 375 uint8_t map_id); 376 int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle); 377 378 void (*flush_cache_rx_queue)(void); 379 void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id, 380 uint8_t tos, uint8_t tid); 381 void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val); 382 void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid); 383 384 QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev, 385 struct cdp_txrx_stats_req *req); 386 387 QDF_STATUS (*display_stats)(void *psoc, uint16_t value, 388 enum qdf_stats_verbosity_level level); 389 void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config); 390 391 int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc); 392 QDF_STATUS (*txrx_intr_attach)(void *soc); 393 void (*txrx_intr_detach)(void *soc); 394 void (*set_pn_check)(struct cdp_vdev *vdev, 395 struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, 396 uint32_t *rx_pn); 397 QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, 398 struct cdp_config_params *params); 399 400 void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl); 401 void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, 402 void *dp_txrx_hdl); 403 404 void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); 405 void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, 406 void *dp_txrx_handle); 407 408 void (*map_pdev_to_lmac)(struct cdp_pdev *pdev_hdl, 409 uint32_t lmac_id); 410 411 void (*txrx_peer_reset_ast) 412 (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, 413 uint8_t *peer_macaddr, void *vdev_hdl); 414 415 void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, 416 void *vdev_hdl); 417 418 void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); 419 void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 420 uint8_t ac, uint32_t value); 421 void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle, 422 uint8_t ac, uint32_t *value); 423 424 QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, 425 uint32_t num_peers, 426 uint32_t max_ast_index, 427 bool peer_map_unmap_v2); 428 429 void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl, 430 struct cdp_ctrl_objmgr_pdev *ctrl_pdev); 431 432 ol_txrx_tx_fp tx_send; 433 /** 434 * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev 435 * to deliver pkt to stack. 436 * @vdev: vdev handle 437 * @stack_fn: pointer to - function pointer to deliver RX pkt to stack 438 * @osif_vdev: pointer to - osif vdev to deliver RX packet to. 439 */ 440 void (*txrx_get_os_rx_handles_from_vdev) 441 (struct cdp_vdev *vdev, 442 ol_txrx_rx_fp *stack_fn, 443 ol_osif_vdev_handle *osif_vdev); 444 int (*txrx_classify_update) 445 (struct cdp_vdev *vdev, qdf_nbuf_t skb, 446 enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class); 447 448 bool (*get_dp_capabilities)(struct cdp_soc_t *soc, 449 enum cdp_capabilities dp_caps); 450 void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, void *ctx); 451 void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc); 452 void (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc, 453 struct cdp_pdev *pdev, 454 void *buf); 455 void (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc, 456 struct cdp_pdev *pdev); 457 QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_pdev *pdev, 458 uint8_t pcp, uint8_t tid); 459 QDF_STATUS (*set_pdev_tidmap_prty)(struct cdp_pdev *pdev, uint8_t prty); 460 QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_vdev *vdev, 461 uint8_t pcp, uint8_t tid); 462 QDF_STATUS (*set_vdev_tidmap_prty)(struct cdp_vdev *vdev, uint8_t prty); 463 QDF_STATUS (*set_vdev_tidmap_tbl_id)(struct cdp_vdev *vdev, 464 uint8_t mapid); 465 }; 466 467 struct cdp_ctrl_ops { 468 469 int 470 (*txrx_mempools_attach)(void *ctrl_pdev); 471 int 472 (*txrx_set_filter_neighbour_peers)( 473 struct cdp_pdev *pdev, 474 uint32_t val); 475 int 476 (*txrx_update_filter_neighbour_peers)( 477 struct cdp_vdev *vdev, 478 uint32_t cmd, uint8_t *macaddr); 479 /** 480 * @brief set the safemode of the device 481 * @details 482 * This flag is used to bypass the encrypt and decrypt processes when 483 * send and receive packets. It works like open AUTH mode, HW will 484 * ctreate all packets as non-encrypt frames because no key installed. 485 * For rx fragmented frames,it bypasses all the rx defragmentaion. 486 * 487 * @param vdev - the data virtual device object 488 * @param val - the safemode state 489 * @return - void 490 */ 491 492 void 493 (*txrx_set_safemode)( 494 struct cdp_vdev *vdev, 495 u_int32_t val); 496 /** 497 * @brief configure the drop unencrypted frame flag 498 * @details 499 * Rx related. When set this flag, all the unencrypted frames 500 * received over a secure connection will be discarded 501 * 502 * @param vdev - the data virtual device object 503 * @param val - flag 504 * @return - void 505 */ 506 void 507 (*txrx_set_drop_unenc)( 508 struct cdp_vdev *vdev, 509 u_int32_t val); 510 511 512 /** 513 * @brief set the Tx encapsulation type of the VDEV 514 * @details 515 * This will be used to populate the HTT desc packet type field 516 * during Tx 517 * @param vdev - the data virtual device object 518 * @param val - the Tx encap type 519 * @return - void 520 */ 521 void 522 (*txrx_set_tx_encap_type)( 523 struct cdp_vdev *vdev, 524 enum htt_cmn_pkt_type val); 525 /** 526 * @brief set the Rx decapsulation type of the VDEV 527 * @details 528 * This will be used to configure into firmware and hardware 529 * which format to decap all Rx packets into, for all peers under 530 * the VDEV. 531 * @param vdev - the data virtual device object 532 * @param val - the Rx decap mode 533 * @return - void 534 */ 535 void 536 (*txrx_set_vdev_rx_decap_type)( 537 struct cdp_vdev *vdev, 538 enum htt_cmn_pkt_type val); 539 540 /** 541 * @brief get the Rx decapsulation type of the VDEV 542 * 543 * @param vdev - the data virtual device object 544 * @return - the Rx decap type 545 */ 546 enum htt_cmn_pkt_type 547 (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev); 548 549 /* Is this similar to ol_txrx_peer_state_update() in MCL */ 550 /** 551 * @brief Update the authorize peer object at association time 552 * @details 553 * For the host-based implementation of rate-control, it 554 * updates the peer/node-related parameters within rate-control 555 * context of the peer at association. 556 * 557 * @param peer - pointer to the node's object 558 * @authorize - either to authorize or unauthorize peer 559 * 560 * @return none 561 */ 562 void 563 (*txrx_peer_authorize)(struct cdp_peer *peer, 564 u_int32_t authorize); 565 566 /* Should be ol_txrx_ctrl_api.h */ 567 void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val); 568 569 /** 570 * @brief setting mesh rx filter 571 * @details 572 * based on the bits enabled in the filter packets has to be dropped. 573 * 574 * @param vdev - the data virtual device object 575 * @param val - value to set 576 */ 577 void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val); 578 579 void (*tx_flush_buffers)(struct cdp_vdev *vdev); 580 581 int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev); 582 583 void (*txrx_set_vdev_param)(struct cdp_vdev *vdev, 584 enum cdp_vdev_param_type param, uint32_t val); 585 586 void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value); 587 /** 588 * @brief Set the reo dest ring num of the radio 589 * @details 590 * Set the reo destination ring no on which we will receive 591 * pkts for this radio. 592 * 593 * @param pdev - the data physical device object 594 * @param reo_dest_ring_num - value ranges between 1 - 4 595 */ 596 void (*txrx_set_pdev_reo_dest)( 597 struct cdp_pdev *pdev, 598 enum cdp_host_reo_dest_ring reo_dest_ring_num); 599 600 /** 601 * @brief Get the reo dest ring num of the radio 602 * @details 603 * Get the reo destination ring no on which we will receive 604 * pkts for this radio. 605 * 606 * @param pdev - the data physical device object 607 * @return the reo destination ring number 608 */ 609 enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( 610 struct cdp_pdev *pdev); 611 612 int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub, 613 uint32_t event); 614 615 int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub, 616 uint32_t event); 617 int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx); 618 619 void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev, 620 uint8_t subtype, uint8_t tx_power); 621 622 /** 623 * txrx_set_pdev_param() - callback to set pdev parameter 624 * @soc: opaque soc handle 625 * @pdev: data path pdev handle 626 * @val: value of pdev_tx_capture 627 * 628 * Return: status: 0 - Success, non-zero: Failure 629 */ 630 QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev, 631 enum cdp_pdev_param_type type, 632 uint8_t val); 633 void * (*txrx_get_pldev)(struct cdp_pdev *pdev); 634 635 #ifdef ATH_SUPPORT_NAC_RSSI 636 QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev, 637 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, 638 uint8_t chan_num); 639 QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev, 640 char *macaddr, 641 uint8_t *rssi); 642 #endif 643 void (*set_key)(struct cdp_peer *peer_handle, 644 bool is_unicast, uint32_t *key); 645 646 uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev, 647 enum cdp_vdev_param_type param); 648 int (*enable_peer_based_pktlog)(struct cdp_pdev 649 *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb); 650 651 void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf); 652 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 653 QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)( 654 struct cdp_pdev *txrx_pdev_handle, 655 uint32_t protocol_mask, uint16_t protocol_type, 656 uint16_t tag); 657 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS 658 void (*txrx_dump_pdev_rx_protocol_tag_stats)( 659 struct cdp_pdev *txrx_pdev_handle, 660 uint16_t protocol_type); 661 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 662 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 663 }; 664 665 struct cdp_me_ops { 666 667 u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone) 668 (struct cdp_pdev *pdev, u_int16_t buf_count); 669 670 u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)( 671 struct cdp_pdev *pdev, 672 u_int16_t buf_count); 673 674 u_int16_t 675 (*tx_get_mcast_buf_allocated_marked) 676 (struct cdp_pdev *pdev); 677 void 678 (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev); 679 680 void 681 (*tx_me_free_descriptor)(struct cdp_pdev *pdev); 682 683 uint16_t 684 (*tx_me_convert_ucast)(struct cdp_vdev *vdev, 685 qdf_nbuf_t wbuf, u_int8_t newmac[][6], 686 uint8_t newmaccnt); 687 /* Should be a function pointer in ol_txrx_osif_ops{} */ 688 /** 689 * @brief notify mcast frame indication from FW. 690 * @details 691 * This notification will be used to convert 692 * multicast frame to unicast. 693 * 694 * @param pdev - handle to the ctrl SW's physical device object 695 * @param vdev_id - ID of the virtual device received the special data 696 * @param msdu - the multicast msdu returned by FW for host inspect 697 */ 698 699 int (*mcast_notify)(struct cdp_pdev *pdev, 700 u_int8_t vdev_id, qdf_nbuf_t msdu); 701 }; 702 703 struct cdp_mon_ops { 704 705 void (*txrx_monitor_set_filter_ucast_data) 706 (struct cdp_pdev *, u_int8_t val); 707 void (*txrx_monitor_set_filter_mcast_data) 708 (struct cdp_pdev *, u_int8_t val); 709 void (*txrx_monitor_set_filter_non_data) 710 (struct cdp_pdev *, u_int8_t val); 711 712 bool (*txrx_monitor_get_filter_ucast_data) 713 (struct cdp_vdev *vdev_txrx_handle); 714 bool (*txrx_monitor_get_filter_mcast_data) 715 (struct cdp_vdev *vdev_txrx_handle); 716 bool (*txrx_monitor_get_filter_non_data) 717 (struct cdp_vdev *vdev_txrx_handle); 718 QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev); 719 720 /* HK advance monitor filter support */ 721 QDF_STATUS (*txrx_set_advance_monitor_filter) 722 (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val); 723 }; 724 725 struct cdp_host_stats_ops { 726 int (*txrx_host_stats_get)(struct cdp_vdev *vdev, 727 struct ol_txrx_stats_req *req); 728 729 void (*txrx_host_stats_clr)(struct cdp_vdev *vdev); 730 731 void (*txrx_host_ce_stats)(struct cdp_vdev *vdev); 732 733 int (*txrx_stats_publish)(struct cdp_pdev *pdev, 734 struct cdp_stats_extd *buf); 735 /** 736 * @brief Enable enhanced stats functionality. 737 * 738 * @param pdev - the physical device object 739 * @return - void 740 */ 741 void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev); 742 743 /** 744 * @brief Disable enhanced stats functionality. 745 * 746 * @param pdev - the physical device object 747 * @return - void 748 */ 749 void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev); 750 751 /** 752 * @brief Get the desired stats from the message. 753 * 754 * @param pdev - the physical device object 755 * @param stats_base - stats buffer received from FW 756 * @param type - stats type. 757 * @return - pointer to requested stat identified by type 758 */ 759 uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev, 760 uint32_t *stats_base, uint32_t msg_len, uint8_t type); 761 void 762 (*tx_print_tso_stats)(struct cdp_vdev *vdev); 763 764 void 765 (*tx_rst_tso_stats)(struct cdp_vdev *vdev); 766 767 void 768 (*tx_print_sg_stats)(struct cdp_vdev *vdev); 769 770 void 771 (*tx_rst_sg_stats)(struct cdp_vdev *vdev); 772 773 void 774 (*print_rx_cksum_stats)(struct cdp_vdev *vdev); 775 776 void 777 (*rst_rx_cksum_stats)(struct cdp_vdev *vdev); 778 779 A_STATUS 780 (*txrx_host_me_stats)(struct cdp_vdev *vdev); 781 782 void 783 (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr); 784 785 int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev, 786 struct ol_txrx_stats_req *req); 787 788 void 789 (*print_lro_stats)(struct cdp_vdev *vdev); 790 791 void 792 (*reset_lro_stats)(struct cdp_vdev *vdev); 793 794 void 795 (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr, 796 uint32_t cap, uint32_t copy_stats); 797 void 798 (*get_htt_stats)(struct cdp_pdev *pdev, void *data, 799 uint32_t data_len); 800 void 801 (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data, 802 uint16_t stats_id); 803 struct cdp_peer_stats* 804 (*txrx_get_peer_stats)(struct cdp_peer *peer); 805 void 806 (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer); 807 void 808 (*txrx_reset_peer_stats)(struct cdp_peer *peer); 809 int 810 (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf, 811 bool is_aggregate); 812 int 813 (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc, 814 void *data, uint32_t len, 815 uint32_t stats_id); 816 int 817 (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle, 818 void *buffer); 819 void 820 (*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf, 821 uint16_t stats_id); 822 int 823 (*txrx_get_radio_stats)(struct cdp_pdev *pdev, 824 void *buf); 825 struct cdp_pdev_stats* 826 (*txrx_get_pdev_stats)(struct cdp_pdev *pdev); 827 int 828 (*txrx_get_ratekbps)(int preamb, int mcs, 829 int htflag, int gintval); 830 void 831 (*configure_rate_stats)(struct cdp_soc_t *soc, 832 uint8_t val); 833 }; 834 835 struct cdp_wds_ops { 836 void 837 (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev, 838 u_int32_t val); 839 void 840 (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer, 841 int wds_tx_ucast, int wds_tx_mcast); 842 int (*vdev_set_wds)(void *vdev, uint32_t val); 843 }; 844 845 struct cdp_raw_ops { 846 int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev); 847 848 void (*rsim_get_astentry)(struct cdp_vdev *vdev, 849 qdf_nbuf_t *pnbuf, 850 struct cdp_raw_ast *raw_ast); 851 }; 852 853 #ifdef PEER_FLOW_CONTROL 854 struct cdp_pflow_ops { 855 uint32_t(*pflow_update_pdev_params)(void *, 856 enum _ol_ath_param_t, uint32_t, void *); 857 }; 858 #endif /* PEER_FLOW_CONTROL */ 859 860 #define LRO_IPV4_SEED_ARR_SZ 5 861 #define LRO_IPV6_SEED_ARR_SZ 11 862 863 /** 864 * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters 865 * @lro_enable: indicates whether rx_offld is enabled 866 * @tcp_flag: If the TCP flags from the packet do not match 867 * the values in this field after masking with TCP flags mask 868 * below, packet is not rx_offld eligible 869 * @tcp_flag_mask: field for comparing the TCP values provided 870 * above with the TCP flags field in the received packet 871 * @toeplitz_hash_ipv4: contains seed needed to compute the flow id 872 * 5-tuple toeplitz hash for ipv4 packets 873 * @toeplitz_hash_ipv6: contains seed needed to compute the flow id 874 * 5-tuple toeplitz hash for ipv6 packets 875 */ 876 struct cdp_lro_hash_config { 877 uint32_t lro_enable; 878 uint32_t tcp_flag:9, 879 tcp_flag_mask:9; 880 uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; 881 uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; 882 }; 883 884 struct ol_if_ops { 885 void 886 (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 887 uint8_t *peer_macaddr, uint8_t vdev_id, 888 bool hash_based, uint8_t ring_num); 889 QDF_STATUS 890 (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 891 uint8_t vdev_id, uint8_t *peer_mac, 892 qdf_dma_addr_t hw_qdesc, int tid, 893 uint16_t queue_num, 894 uint8_t ba_window_size_valid, 895 uint16_t ba_window_size); 896 QDF_STATUS 897 (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 898 uint8_t vdev_id, uint8_t *peer_macaddr, 899 uint32_t tid_mask); 900 int (*peer_unref_delete)(void *scn_handle, uint8_t *peer_mac, 901 uint8_t *vdev_mac, enum wlan_op_mode opmode, 902 void *old_peer, void *new_peer); 903 bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); 904 int (*peer_add_wds_entry)(void *vdev_handle, 905 struct cdp_peer *peer_handle, 906 const uint8_t *dest_macaddr, 907 uint8_t *next_node_mac, 908 uint32_t flags); 909 int (*peer_update_wds_entry)(void *ol_soc_handle, 910 uint8_t *dest_macaddr, uint8_t *peer_macaddr, 911 uint32_t flags); 912 void (*peer_del_wds_entry)(void *ol_soc_handle, 913 uint8_t *wds_macaddr, 914 uint8_t type); 915 QDF_STATUS 916 (*lro_hash_config)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev, 917 struct cdp_lro_hash_config *rx_offld_hash); 918 void (*update_dp_stats)(void *soc, void *stats, uint16_t id, 919 uint8_t type); 920 #ifdef CONFIG_MCL 921 uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh); 922 #else 923 uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg); 924 #endif 925 int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id, 926 uint8_t vdev_id, uint8_t *peer_mac_addr, 927 enum cdp_txrx_ast_entry_type peer_type, 928 uint32_t tx_ast_hashidx); 929 int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id, 930 uint8_t vdev_id); 931 932 int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num); 933 934 void (*rx_mic_error)(void *ol_soc_handle, 935 uint16_t vdev_id, void *wh); 936 bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer, 937 qdf_nbuf_t nbuf, 938 uint16_t hdr_space); 939 uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id); 940 941 void (*record_act_change)(struct wlan_objmgr_pdev *pdev, 942 u_int8_t *dstmac, bool active); 943 #ifdef ATH_SUPPORT_NAC_RSSI 944 int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 945 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid, 946 char *client_macaddr, uint8_t chan_num); 947 int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, 948 u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid); 949 #endif 950 int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr); 951 952 /** 953 * send_delba() - Send delba to peer 954 * @pdev_handle: Dp pdev handle 955 * @ctrl_peer: Peer handle 956 * @peer_macaddr: Peer mac addr 957 * @tid: Tid number 958 * 959 * Return: 0 for success, non-zero for failure 960 */ 961 int (*send_delba)(void *pdev_handle, void *ctrl_peer, 962 uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle, 963 uint8_t reason_code); 964 int (*peer_delete_multiple_wds_entries)(void *vdev_handle, 965 uint8_t *dest_macaddr, 966 uint8_t *peer_macaddr, 967 uint32_t flags); 968 /* TODO: Add any other control path calls required to OL_IF/WMA layer */ 969 }; 970 971 #ifdef CONFIG_MCL 972 /* From here MCL specific OPs */ 973 /** 974 * struct cdp_misc_ops - mcl ops not classified 975 * @set_ibss_vdev_heart_beat_timer: 976 * @bad_peer_txctl_set_setting: 977 * @bad_peer_txctl_update_threshold: 978 * @hl_tdls_flag_reset: 979 * @tx_non_std: 980 * @get_vdev_id: 981 * @set_wisa_mode: 982 * @txrx_data_stall_cb_register: 983 * @txrx_data_stall_cb_deregister: 984 * @txrx_post_data_stall_event 985 * @runtime_suspend: 986 * @runtime_resume: 987 * @register_packetdump_cb: 988 * @unregister_packetdump_cb: 989 */ 990 struct cdp_misc_ops { 991 uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev, 992 uint16_t timer_value_sec); 993 void (*set_wmm_param)(struct cdp_pdev *cfg_pdev, 994 struct ol_tx_wmm_param_t wmm_param); 995 void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable, 996 int period, int txq_limit); 997 void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev, 998 int level, int tput_thresh, int tx_limit); 999 void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag); 1000 qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev, 1001 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 1002 uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); 1003 uint32_t (*get_tx_ack_stats)(struct cdp_pdev *pdev, uint8_t vdev_id); 1004 QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable); 1005 QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb); 1006 QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb); 1007 void (*txrx_post_data_stall_event)( 1008 enum data_stall_log_event_indicator indicator, 1009 enum data_stall_log_event_type data_stall_type, 1010 uint32_t pdev_id, uint32_t vdev_id_bitmap, 1011 enum data_stall_log_recovery_type recovery_type); 1012 QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev); 1013 QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev); 1014 int (*get_opmode)(struct cdp_vdev *vdev); 1015 void (*mark_first_wakeup_packet)(uint8_t value); 1016 void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id); 1017 void (*flush_rx_frames)(void *peer, bool drop); 1018 A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id, 1019 uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets); 1020 void (*pkt_log_init)(struct cdp_pdev *handle, void *scn); 1021 void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn); 1022 int (*get_num_rx_contexts)(struct cdp_soc_t *soc); 1023 void (*register_pktdump_cb)(ol_txrx_pktdump_cb tx_cb, 1024 ol_txrx_pktdump_cb rx_cb); 1025 void (*unregister_pktdump_cb)(void); 1026 }; 1027 1028 /** 1029 * struct cdp_tx_delay_ops - mcl tx delay ops 1030 * @tx_delay: 1031 * @tx_delay_hist: 1032 * @tx_packet_count: 1033 * @tx_set_compute_interval: 1034 */ 1035 struct cdp_tx_delay_ops { 1036 void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec, 1037 uint32_t *tx_delay_microsec, int category); 1038 void (*tx_delay_hist)(struct cdp_pdev *pdev, 1039 uint16_t *bin_values, int category); 1040 void (*tx_packet_count)(struct cdp_pdev *pdev, 1041 uint16_t *out_packet_count, 1042 uint16_t *out_packet_loss_count, int category); 1043 void (*tx_set_compute_interval)(struct cdp_pdev *pdev, 1044 uint32_t interval); 1045 }; 1046 1047 /** 1048 * struct cdp_pmf_ops - mcl protected management frame ops 1049 * @get_pn_info: 1050 */ 1051 struct cdp_pmf_ops { 1052 void (*get_pn_info)(void *peer, uint8_t **last_pn_valid, 1053 uint64_t **last_pn, uint32_t **rmf_pn_replays); 1054 }; 1055 1056 /** 1057 * struct cdp_cfg_ops - mcl configuration ops 1058 * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag 1059 * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag 1060 * @cfg_attach: hardcode the configuration parameters 1061 * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag 1062 * @is_rx_fwd_disabled: get the rx_fwd_disabled flag, 1063 * 1 enabled, 0 disabled. 1064 * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to 1065 * indicate that mgmt over wmi is enabled 1066 * or not, 1067 * 1 for enabled, 0 for disable 1068 * @is_high_latency: get device is high or low latency device, 1069 * 1 high latency bus, 0 low latency bus 1070 * @set_flow_control_parameters: set flow control parameters 1071 * @set_flow_steering: set flow_steering_enabled flag 1072 * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag 1073 * @set_new_htt_msg_format: set new_htt_msg_format flag 1074 * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag 1075 * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag 1076 * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag, 1077 * 1 enabled, 0 disabled. 1078 * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag, 1079 * 1 enabled, 0 disabled. 1080 */ 1081 struct cdp_cfg_ops { 1082 void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, 1083 uint8_t disable_rx_fwd); 1084 void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, 1085 uint8_t val); 1086 struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); 1087 void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val); 1088 uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); 1089 void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); 1090 int (*is_high_latency)(struct cdp_cfg *cfg_pdev); 1091 void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, 1092 void *param); 1093 void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); 1094 void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); 1095 void (*set_new_htt_msg_format)(uint8_t val); 1096 void (*set_peer_unmap_conf_support)(bool val); 1097 bool (*get_peer_unmap_conf_support)(void); 1098 void (*set_tx_compl_tsf64)(bool val); 1099 bool (*get_tx_compl_tsf64)(void); 1100 }; 1101 1102 /** 1103 * struct cdp_flowctl_ops - mcl flow control 1104 * @register_pause_cb: 1105 * @set_desc_global_pool_size: 1106 * @dump_flow_pool_info: 1107 */ 1108 struct cdp_flowctl_ops { 1109 QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, 1110 struct cdp_pdev *pdev, 1111 uint8_t vdev_id); 1112 void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, 1113 struct cdp_pdev *pdev, 1114 uint8_t vdev_id); 1115 QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, 1116 tx_pause_callback); 1117 void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); 1118 1119 void (*dump_flow_pool_info)(void *); 1120 1121 bool (*tx_desc_thresh_reached)(struct cdp_vdev *vdev); 1122 }; 1123 1124 /** 1125 * struct cdp_lflowctl_ops - mcl legacy flow control ops 1126 * @register_tx_flow_control: 1127 * @deregister_tx_flow_control_cb: 1128 * @flow_control_cb: 1129 * @get_tx_resource: 1130 * @ll_set_tx_pause_q_depth: 1131 * @vdev_flush: 1132 * @vdev_pause: 1133 * @vdev_unpause: 1134 */ 1135 struct cdp_lflowctl_ops { 1136 #ifdef QCA_HL_NETDEV_FLOW_CONTROL 1137 int (*register_tx_flow_control)(struct cdp_soc_t *soc, 1138 tx_pause_callback flowcontrol); 1139 int (*set_vdev_tx_desc_limit)(uint8_t vdev_id, uint8_t chan); 1140 int (*set_vdev_os_queue_status)(uint8_t vdev_id, 1141 enum netif_action_type action); 1142 #else 1143 int (*register_tx_flow_control)(uint8_t vdev_id, 1144 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, 1145 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); 1146 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */ 1147 int (*deregister_tx_flow_control_cb)(uint8_t vdev_id); 1148 void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume); 1149 bool (*get_tx_resource)(uint8_t sta_id, 1150 unsigned int low_watermark, 1151 unsigned int high_watermark_offset); 1152 int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth); 1153 void (*vdev_flush)(struct cdp_vdev *vdev); 1154 void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason); 1155 void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason); 1156 }; 1157 1158 #ifdef IPA_OFFLOAD 1159 /** 1160 * struct cdp_ipa_ops - mcl ipa data path ops 1161 * @ipa_get_resource: 1162 * @ipa_set_doorbell_paddr: 1163 * @ipa_set_active: 1164 * @ipa_op_response: 1165 * @ipa_register_op_cb: 1166 * @ipa_get_stat: 1167 * @ipa_tx_data_frame: 1168 */ 1169 struct cdp_ipa_ops { 1170 QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev); 1171 QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev); 1172 QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active, 1173 bool is_tx); 1174 QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg); 1175 QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev, 1176 void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt), 1177 void *usr_ctxt); 1178 QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev); 1179 qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb); 1180 void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, 1181 uint32_t value); 1182 #ifdef FEATURE_METERING 1183 QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev, 1184 uint8_t reset_stats); 1185 QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev, 1186 uint64_t quota_bytes); 1187 #endif 1188 QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev); 1189 QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev); 1190 #ifdef CONFIG_IPA_WDI_UNIFIED_API 1191 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1192 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1193 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1194 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, 1195 bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in, 1196 bool over_gsi); 1197 #else /* CONFIG_IPA_WDI_UNIFIED_API */ 1198 QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, 1199 void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, 1200 uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, 1201 uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle); 1202 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 1203 QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle, 1204 uint32_t rx_pipe_handle); 1205 QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, 1206 qdf_ipa_client_type_t prod_client, 1207 qdf_ipa_client_type_t cons_client, 1208 uint8_t session_id, bool is_ipv6_enabled); 1209 QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); 1210 QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev); 1211 QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev); 1212 QDF_STATUS (*ipa_set_perf_level)(int client, 1213 uint32_t max_supported_bw_mbps); 1214 bool (*ipa_rx_intrabss_fwd)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf, 1215 bool *fwd_success); 1216 }; 1217 #endif 1218 1219 /** 1220 * struct cdp_bus_ops - mcl bus suspend/resume ops 1221 * @bus_suspend: 1222 * @bus_resume: 1223 */ 1224 struct cdp_bus_ops { 1225 QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev); 1226 QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev); 1227 }; 1228 1229 /** 1230 * struct cdp_ocb_ops - mcl ocb ops 1231 * @set_ocb_chan_info: 1232 * @get_ocb_chan_info: 1233 */ 1234 struct cdp_ocb_ops { 1235 void (*set_ocb_chan_info)(struct cdp_vdev *vdev, 1236 struct ol_txrx_ocb_set_chan ocb_set_chan); 1237 struct ol_txrx_ocb_chan_info * 1238 (*get_ocb_chan_info)(struct cdp_vdev *vdev); 1239 }; 1240 1241 /** 1242 * struct cdp_peer_ops - mcl peer related ops 1243 * @register_peer: 1244 * @clear_peer: 1245 * @cfg_attach: 1246 * @find_peer_by_addr: 1247 * @find_peer_by_addr_and_vdev: 1248 * @local_peer_id: 1249 * @peer_find_by_local_id: 1250 * @peer_state_update: 1251 * @get_vdevid: 1252 * @get_vdev_by_sta_id: 1253 * @register_ocb_peer: 1254 * @peer_get_peer_mac_addr: 1255 * @get_peer_state: 1256 * @get_vdev_for_peer: 1257 * @update_ibss_add_peer_num_of_vdev: 1258 * @remove_peers_for_vdev: 1259 * @remove_peers_for_vdev_no_lock: 1260 * @copy_mac_addr_raw: 1261 * @add_last_real_peer: 1262 * @is_vdev_restore_last_peer: 1263 * @update_last_real_peer: 1264 */ 1265 struct cdp_peer_ops { 1266 QDF_STATUS (*register_peer)(struct cdp_pdev *pdev, 1267 struct ol_txrx_desc_type *sta_desc); 1268 QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id); 1269 QDF_STATUS (*change_peer_state)(uint8_t sta_id, 1270 enum ol_txrx_peer_state sta_state, 1271 bool roam_synch_in_progress); 1272 void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev, 1273 uint8_t *peer_addr, uint8_t *peer_id, 1274 enum peer_debug_id_type debug_id); 1275 void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id); 1276 void * (*find_peer_by_addr)(struct cdp_pdev *pdev, 1277 uint8_t *peer_addr, uint8_t *peer_id); 1278 void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev, 1279 struct cdp_vdev *vdev, 1280 uint8_t *peer_addr, uint8_t *peer_id); 1281 uint16_t (*local_peer_id)(void *peer); 1282 void * (*peer_find_by_local_id)(struct cdp_pdev *pdev, 1283 uint8_t local_peer_id); 1284 QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev, 1285 uint8_t *peer_addr, 1286 enum ol_txrx_peer_state state); 1287 QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id); 1288 struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev, 1289 uint8_t sta_id); 1290 QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id); 1291 uint8_t * (*peer_get_peer_mac_addr)(void *peer); 1292 int (*get_peer_state)(void *peer); 1293 struct cdp_vdev * (*get_vdev_for_peer)(void *peer); 1294 int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev, 1295 int16_t peer_num_delta); 1296 void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, 1297 ol_txrx_vdev_peer_remove_cb callback, 1298 void *callback_context, bool remove_last_peer); 1299 void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, 1300 ol_txrx_vdev_peer_remove_cb callback, 1301 void *callback_context); 1302 void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr); 1303 void (*add_last_real_peer)(struct cdp_pdev *pdev, 1304 struct cdp_vdev *vdev, uint8_t *peer_id); 1305 bool (*is_vdev_restore_last_peer)(void *peer); 1306 void (*update_last_real_peer)(struct cdp_pdev *pdev, void *vdev, 1307 uint8_t *peer_id, bool restore_last_peer); 1308 void (*peer_detach_force_delete)(void *peer); 1309 }; 1310 1311 /** 1312 * struct cdp_ocb_ops - mcl ocb ops 1313 * @throttle_init_period: 1314 * @throttle_set_level: 1315 */ 1316 struct cdp_throttle_ops { 1317 void (*throttle_init_period)(struct cdp_pdev *pdev, int period, 1318 uint8_t *dutycycle_level); 1319 void (*throttle_set_level)(struct cdp_pdev *pdev, int level); 1320 }; 1321 1322 /** 1323 * struct cdp_ocb_ops - mcl ocb ops 1324 * @clear_stats: 1325 * @stats: 1326 */ 1327 struct cdp_mob_stats_ops { 1328 void (*clear_stats)(uint16_t bitmap); 1329 int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); 1330 }; 1331 #endif /* CONFIG_MCL */ 1332 1333 #ifdef RECEIVE_OFFLOAD 1334 /** 1335 * struct cdp_rx_offld_ops - mcl host receive offload ops 1336 * @register_rx_offld_flush_cb: 1337 * @deregister_rx_offld_flush_cb: 1338 */ 1339 struct cdp_rx_offld_ops { 1340 void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); 1341 void (*deregister_rx_offld_flush_cb)(void); 1342 }; 1343 #endif 1344 1345 struct cdp_ops { 1346 struct cdp_cmn_ops *cmn_drv_ops; 1347 struct cdp_ctrl_ops *ctrl_ops; 1348 struct cdp_me_ops *me_ops; 1349 struct cdp_mon_ops *mon_ops; 1350 struct cdp_host_stats_ops *host_stats_ops; 1351 struct cdp_wds_ops *wds_ops; 1352 struct cdp_raw_ops *raw_ops; 1353 struct cdp_pflow_ops *pflow_ops; 1354 #ifdef CONFIG_MCL 1355 struct cdp_misc_ops *misc_ops; 1356 struct cdp_cfg_ops *cfg_ops; 1357 struct cdp_flowctl_ops *flowctl_ops; 1358 struct cdp_lflowctl_ops *l_flowctl_ops; 1359 #ifdef IPA_OFFLOAD 1360 struct cdp_ipa_ops *ipa_ops; 1361 #endif 1362 #ifdef RECEIVE_OFFLOAD 1363 struct cdp_rx_offld_ops *rx_offld_ops; 1364 #endif 1365 struct cdp_bus_ops *bus_ops; 1366 struct cdp_ocb_ops *ocb_ops; 1367 struct cdp_peer_ops *peer_ops; 1368 struct cdp_throttle_ops *throttle_ops; 1369 struct cdp_mob_stats_ops *mob_stats_ops; 1370 struct cdp_tx_delay_ops *delay_ops; 1371 struct cdp_pmf_ops *pmf_ops; 1372 #endif 1373 }; 1374 #endif 1375