1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _DP_IPA_H_ 19 #define _DP_IPA_H_ 20 21 #if defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_KIWI_V2) 22 /* Index into soc->tcl_data_ring[] */ 23 #define IPA_TCL_DATA_RING_IDX 3 24 #else 25 #define IPA_TCL_DATA_RING_IDX 2 26 #endif 27 /* Index into soc->tx_comp_ring[] */ 28 #define IPA_TX_COMP_RING_IDX IPA_TCL_DATA_RING_IDX 29 30 #ifdef IPA_OFFLOAD 31 32 #define DP_IPA_MAX_IFACE 3 33 #define IPA_REO_DEST_RING_IDX 3 34 #define IPA_REO_DEST_RING_IDX_2 7 35 36 #define IPA_RX_REFILL_BUF_RING_IDX 2 37 38 #define IPA_ALT_REO_DEST_RING_IDX 2 39 #define IPA_RX_ALT_REFILL_BUF_RING_IDX 3 40 41 /* Adding delay before disabling ipa pipes if any Tx Completions are pending */ 42 #define TX_COMP_DRAIN_WAIT_MS 50 43 #define TX_COMP_DRAIN_WAIT_TIMEOUT_MS 100 44 45 #ifdef IPA_WDI3_TX_TWO_PIPES 46 #if defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_KIWI_V2) 47 /* Index into soc->tcl_data_ring[] and soc->tx_comp_ring[] */ 48 #define IPA_TX_ALT_RING_IDX 4 49 #define IPA_TX_ALT_COMP_RING_IDX IPA_TX_ALT_RING_IDX 50 #else /* !KIWI */ 51 #define IPA_TX_ALT_RING_IDX 1 52 /* 53 * must be same as IPA_TX_ALT_RING_IDX as tcl and wbm ring 54 * are initialized with same index as a pair. 55 */ 56 #define IPA_TX_ALT_COMP_RING_IDX 1 57 #endif /* KIWI */ 58 59 #define IPA_SESSION_ID_SHIFT 1 60 #endif /* IPA_WDI3_TX_TWO_PIPES */ 61 62 /** 63 * struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware 64 * @eth: ether II header 65 */ 66 struct dp_ipa_uc_tx_hdr { 67 struct ethhdr eth; 68 } __packed; 69 70 /** 71 * struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware 72 * @eth: ether II header 73 */ 74 struct dp_ipa_uc_tx_vlan_hdr { 75 struct vlan_ethhdr eth; 76 } __packed; 77 78 /** 79 * struct dp_ipa_uc_rx_hdr - full rx header registered to IPA hardware 80 * @eth: ether II header 81 */ 82 struct dp_ipa_uc_rx_hdr { 83 struct ethhdr eth; 84 } __packed; 85 86 #define DP_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct dp_ipa_uc_tx_hdr) 87 #define DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN sizeof(struct dp_ipa_uc_tx_vlan_hdr) 88 #define DP_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct dp_ipa_uc_rx_hdr) 89 /* 28 <bytes of rx_msdu_end_tlv> + 16 <bytes of attn tlv> + 90 * 52 <bytes of rx_mpdu_start_tlv> + <L2 Header> 91 */ 92 #define DP_IPA_UC_WLAN_RX_HDR_LEN_AST 110 93 #define DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN 114 94 #define DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 0 95 96 #define DP_IPA_HDL_INVALID 0xFF 97 #define DP_IPA_HDL_FIRST 0 98 #define DP_IPA_HDL_SECOND 1 99 /** 100 * wlan_ipa_get_hdl() - Get ipa handle from IPA component 101 * @psoc - control psoc object 102 * @pdev_id - pdev id 103 * 104 * IPA componenet will return the IPA handle based on pdev_id 105 * 106 * Return: IPA handle 107 */ 108 qdf_ipa_wdi_hdl_t wlan_ipa_get_hdl(void *psoc, uint8_t pdev_id); 109 110 /** 111 * dp_ipa_get_resource() - Client request resource information 112 * @soc_hdl - data path soc handle 113 * @pdev_id - device instance id 114 * 115 * IPA client will request IPA UC related resource information 116 * Resource information will be distributed to IPA module 117 * All of the required resources should be pre-allocated 118 * 119 * Return: QDF_STATUS 120 */ 121 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 122 123 /** 124 * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG 125 * @soc_hdl - data path soc handle 126 * @pdev_id - device instance id 127 * 128 * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB 129 * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB 130 * 131 * Return: none 132 */ 133 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, 134 uint8_t pdev_id); 135 136 /** 137 * dp_ipa_iounmap_doorbell_vaddr() - unmap ipa RX db vaddr 138 * @soc_hdl - data path soc handle 139 * @pdev_id - device instance id 140 * 141 * Return: none 142 */ 143 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl, 144 uint8_t pdev_id); 145 146 QDF_STATUS dp_ipa_uc_set_active(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 147 bool uc_active, bool is_tx); 148 149 /** 150 * dp_ipa_op_response() - Handle OP command response from firmware 151 * @soc_hdl - data path soc handle 152 * @pdev_id - device instance id 153 * @op_msg: op response message from firmware 154 * 155 * Return: none 156 */ 157 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 158 uint8_t *op_msg); 159 160 /** 161 * dp_ipa_register_op_cb() - Register OP handler function 162 * @soc_hdl - data path soc handle 163 * @pdev_id - device instance id 164 * @op_cb: handler function pointer 165 * 166 * Return: none 167 */ 168 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 169 ipa_uc_op_cb_type op_cb, void *usr_ctxt); 170 171 /** 172 * dp_ipa_register_op_cb() - Deregister OP handler function 173 * @soc_hdl - data path soc handle 174 * @pdev_id - device instance id 175 * 176 * Return: none 177 */ 178 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 179 180 /** 181 * dp_ipa_get_stat() - Get firmware wdi status 182 * @soc_hdl - data path soc handle 183 * @pdev_id - device instance id 184 * 185 * Return: none 186 */ 187 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 188 189 /** 190 * dp_tx_send_ipa_data_frame() - send IPA data frame 191 * @soc_hdl: datapath soc handle 192 * @vdev_id: virtual device/interface id 193 * @skb: skb 194 * 195 * Return: skb/ NULL is for success 196 */ 197 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 198 qdf_nbuf_t skb); 199 200 /** 201 * dp_ipa_enable_autonomy() – Enable autonomy RX path 202 * @soc_hdl - data path soc handle 203 * @pdev_id - device instance id 204 * 205 * Set all RX packet route to IPA REO ring 206 * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring 207 * Return: none 208 */ 209 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 210 211 /** 212 * dp_ipa_disable_autonomy() – Disable autonomy RX path 213 * @soc_hdl - data path soc handle 214 * @pdev_id - device instance id 215 * 216 * Disable RX packet routing to IPA REO 217 * Program Destination_Ring_Ctrl_IX_0 REO register to disable 218 * Return: none 219 */ 220 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 221 222 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ 223 defined(CONFIG_IPA_WDI_UNIFIED_API) 224 /** 225 * dp_ipa_setup() - Setup and connect IPA pipes 226 * @soc_hdl - data path soc handle 227 * @pdev_id - device instance id 228 * @ipa_i2w_cb: IPA to WLAN callback 229 * @ipa_w2i_cb: WLAN to IPA callback 230 * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback 231 * @ipa_desc_size: IPA descriptor size 232 * @ipa_priv: handle to the HTT instance 233 * @is_rm_enabled: Is IPA RM enabled or not 234 * @tx_pipe_handle: pointer to Tx pipe handle 235 * @rx_pipe_handle: pointer to Rx pipe handle 236 * @is_smmu_enabled: Is SMMU enabled or not 237 * @sys_in: parameters to setup sys pipe in mcc mode 238 * @hdl: IPA handle 239 * @id: IPA instance id 240 * @ipa_ast_notify_cb: IPA to WLAN callback for ast create and update 241 * 242 * Return: QDF_STATUS 243 */ 244 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 245 void *ipa_i2w_cb, void *ipa_w2i_cb, 246 void *ipa_wdi_meter_notifier_cb, 247 uint32_t ipa_desc_size, void *ipa_priv, 248 bool is_rm_enabled, uint32_t *tx_pipe_handle, 249 uint32_t *rx_pipe_handle, 250 bool is_smmu_enabled, 251 qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi, 252 qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id, 253 void *ipa_ast_notify_cb); 254 #else /* CONFIG_IPA_WDI_UNIFIED_API */ 255 /** 256 * dp_ipa_setup() - Setup and connect IPA pipes 257 * @soc_hdl - data path soc handle 258 * @pdev_id - device instance id 259 * @ipa_i2w_cb: IPA to WLAN callback 260 * @ipa_w2i_cb: WLAN to IPA callback 261 * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback 262 * @ipa_desc_size: IPA descriptor size 263 * @ipa_priv: handle to the HTT instance 264 * @is_rm_enabled: Is IPA RM enabled or not 265 * @tx_pipe_handle: pointer to Tx pipe handle 266 * @rx_pipe_handle: pointer to Rx pipe handle 267 * @hdl: IPA handle 268 * 269 * Return: QDF_STATUS 270 */ 271 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 272 void *ipa_i2w_cb, void *ipa_w2i_cb, 273 void *ipa_wdi_meter_notifier_cb, 274 uint32_t ipa_desc_size, void *ipa_priv, 275 bool is_rm_enabled, uint32_t *tx_pipe_handle, 276 uint32_t *rx_pipe_handle); 277 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 278 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 279 uint32_t tx_pipe_handle, 280 uint32_t rx_pipe_handle, qdf_ipa_wdi_hdl_t hdl); 281 QDF_STATUS dp_ipa_remove_header(char *name); 282 int dp_ipa_add_header_info(char *ifname, uint8_t *mac_addr, 283 uint8_t session_id, bool is_ipv6_enabled); 284 int dp_ipa_register_interface(char *ifname, bool is_ipv6_enabled); 285 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 286 qdf_ipa_client_type_t prod_client, 287 qdf_ipa_client_type_t cons_client, 288 uint8_t session_id, bool is_ipv6_enabled, 289 qdf_ipa_wdi_hdl_t hdl); 290 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled, 291 qdf_ipa_wdi_hdl_t hdl); 292 293 /** 294 * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes 295 * @soc_hdl - handle to the soc 296 * @pdev_id - pdev id number, to get the handle 297 * @hdl: IPA handle 298 * 299 * Return: QDF_STATUS 300 */ 301 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 302 qdf_ipa_wdi_hdl_t hdl); 303 304 /** 305 * dp_ipa_disable_pipes() – Suspend traffic and disable Tx/Rx pipes 306 * @soc_hdl - handle to the soc 307 * @pdev_id - pdev id number, to get the handle 308 * @hdl: IPA handle 309 * 310 * Return: QDF_STATUS 311 */ 312 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 313 qdf_ipa_wdi_hdl_t hdl); 314 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps, 315 qdf_ipa_wdi_hdl_t hdl); 316 317 /** 318 * dp_ipa_rx_intrabss_fwd() - Perform intra-bss fwd for IPA RX path 319 * 320 * @soc_hdl: data path soc handle 321 * @vdev_id: virtual device/interface id 322 * @nbuf: pointer to skb of ethernet packet received from IPA RX path 323 * @fwd_success: pointer to indicate if skb succeeded in intra-bss TX 324 * 325 * This function performs intra-bss forwarding for WDI 3.0 IPA RX path. 326 * 327 * Return: true if packet is intra-bss fwd-ed and no need to pass to 328 * network stack. false if packet needs to be passed to network stack. 329 */ 330 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 331 qdf_nbuf_t nbuf, bool *fwd_success); 332 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev); 333 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev); 334 int dp_ipa_ring_resource_setup(struct dp_soc *soc, 335 struct dp_pdev *pdev); 336 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0, 337 uint32_t *remap1, uint32_t *remap2); 338 bool dp_ipa_is_mdm_platform(void); 339 340 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf); 341 342 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, 343 qdf_nbuf_t nbuf, 344 uint32_t size, 345 bool create, 346 const char *func, 347 uint32_t line); 348 349 /** 350 * dp_ipa_tx_buf_smmu_mapping() - Create SMMU mappings for IPA 351 * allocated TX buffers 352 * @soc_hdl: handle to the soc 353 * @pdev_id: pdev id number, to get the handle 354 * @func: caller function 355 * @line: line number 356 * 357 * Return: QDF_STATUS 358 */ 359 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl, 360 uint8_t pdev_id, const char *func, 361 uint32_t line); 362 363 /** 364 * dp_ipa_tx_buf_smmu_unmapping() - Release SMMU mappings for IPA 365 * allocated TX buffers 366 * @soc_hdl: handle to the soc 367 * @pdev_id: pdev id number, to get the handle 368 * @func: caller function 369 * @line: line number 370 * 371 * Return: QDF_STATUS 372 */ 373 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl, 374 uint8_t pdev_id, const char *func, 375 uint32_t line); 376 377 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 378 static inline void 379 dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc) 380 { 381 if (soc->ipa_rx_buf_map_lock_initialized) 382 qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock); 383 } 384 385 static inline void 386 dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc) 387 { 388 if (soc->ipa_rx_buf_map_lock_initialized) 389 qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock); 390 } 391 392 static inline void 393 dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc, 394 uint32_t reo_ring_num) 395 { 396 if (!soc->ipa_reo_ctx_lock_required[reo_ring_num]) 397 return; 398 399 qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock); 400 } 401 402 static inline void 403 dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc, 404 uint32_t reo_ring_num) 405 { 406 if (!soc->ipa_reo_ctx_lock_required[reo_ring_num]) 407 return; 408 409 qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock); 410 } 411 #else 412 413 static inline void 414 dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc) 415 { 416 } 417 418 static inline void 419 dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc) 420 { 421 } 422 423 static inline void 424 dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc, 425 uint32_t reo_ring_num) 426 { 427 } 428 429 static inline void 430 dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc, 431 uint32_t reo_ring_num) 432 { 433 } 434 #endif 435 436 #ifdef IPA_WDS_EASYMESH_FEATURE 437 /** 438 * dp_ipa_ast_create() - Create/update AST entry in AST table 439 * for learning/roaming packets from IPA 440 * @soc: data path soc handle 441 * @data: Structure used for updating the AST table 442 * 443 * Create/update AST entry in AST table for learning/roaming packets from IPA 444 * 445 * Return: QDF_STATUS 446 */ 447 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl, 448 qdf_ipa_ast_info_type_t *data); 449 450 /** 451 * dp_ipa_ast_notify_cb() - Provide ast notify cb to IPA 452 * @pipe_in: WDI conn pipe in params 453 * @ipa_ast_notify_cb: ipa ast notify cb 454 * 455 * Return: None 456 */ 457 static inline void 458 dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in, 459 void *ipa_ast_notify_cb) 460 { 461 QDF_IPA_WDI_CONN_IN_PARAMS_AST_NOTIFY(pipe_in) = ipa_ast_notify_cb; 462 } 463 #else 464 static inline void 465 dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in, 466 void *ipa_ast_notify_cb) 467 { 468 } 469 #endif 470 471 #ifdef QCA_ENHANCED_STATS_SUPPORT 472 QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 473 uint8_t *peer_mac, 474 struct cdp_peer_stats *peer_stats); 475 int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 476 void *buf, bool is_aggregate); 477 QDF_STATUS dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 478 struct cdp_pdev_stats *pdev_stats); 479 QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 480 uint8_t *peer_mac, qdf_nbuf_t nbuf); 481 #endif 482 #else 483 static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 484 { 485 return QDF_STATUS_SUCCESS; 486 } 487 488 static inline int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 489 { 490 return QDF_STATUS_SUCCESS; 491 } 492 493 static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc, 494 struct dp_pdev *pdev) 495 { 496 return 0; 497 } 498 499 static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, 500 qdf_nbuf_t nbuf, 501 uint32_t size, 502 bool create, 503 const char *func, 504 uint32_t line) 505 { 506 return QDF_STATUS_SUCCESS; 507 } 508 509 static inline void 510 dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc) 511 { 512 } 513 514 static inline void 515 dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc) 516 { 517 } 518 519 static inline void 520 dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc, 521 uint32_t reo_ring_num) 522 { 523 } 524 525 static inline void 526 dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc, 527 uint32_t reo_ring_num) 528 { 529 } 530 531 static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, 532 qdf_nbuf_t nbuf) 533 { 534 return nbuf; 535 } 536 537 static inline QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl, 538 uint8_t pdev_id, 539 const char *func, 540 uint32_t line) 541 { 542 return QDF_STATUS_SUCCESS; 543 } 544 545 static inline QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl, 546 uint8_t pdev_id, 547 const char *func, 548 uint32_t line) 549 { 550 return QDF_STATUS_SUCCESS; 551 } 552 553 #ifdef IPA_WDS_EASYMESH_FEATURE 554 static inline QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl, 555 qdf_ipa_ast_info_type_t *data) 556 { 557 return QDF_STATUS_SUCCESS; 558 } 559 #endif 560 561 #endif 562 #endif /* _DP_IPA_H_ */ 563