1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <wlan_ipa_obj_mgmt_api.h> 21 #include <qdf_types.h> 22 #include <qdf_lock.h> 23 #include <qdf_net_types.h> 24 #include <qdf_lro.h> 25 #include <qdf_module.h> 26 #include <hal_hw_headers.h> 27 #include <hal_api.h> 28 #include <hif.h> 29 #include <htt.h> 30 #include <wdi_event.h> 31 #include <queue.h> 32 #include "dp_types.h" 33 #include "dp_rings.h" 34 #include "dp_internal.h" 35 #include "dp_tx.h" 36 #include "dp_tx_desc.h" 37 #include "dp_rx.h" 38 #ifdef DP_RATETABLE_SUPPORT 39 #include "dp_ratetable.h" 40 #endif 41 #include <cdp_txrx_handle.h> 42 #include <wlan_cfg.h> 43 #include <wlan_utility.h> 44 #include "cdp_txrx_cmn_struct.h" 45 #include "cdp_txrx_stats_struct.h" 46 #include "cdp_txrx_cmn_reg.h" 47 #include <qdf_util.h> 48 #include "dp_peer.h" 49 #include "htt_stats.h" 50 #include "dp_htt.h" 51 #ifdef WLAN_SUPPORT_RX_FISA 52 #include <wlan_dp_fisa_rx.h> 53 #endif 54 #include "htt_ppdu_stats.h" 55 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 56 #include "cfg_ucfg_api.h" 57 #include <wlan_module_ids.h> 58 59 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 60 #include "cdp_txrx_flow_ctrl_v2.h" 61 #else 62 63 static inline void 64 cdp_dump_flow_pool_info(struct cdp_soc_t *soc) 65 { 66 return; 67 } 68 #endif 69 #ifdef WIFI_MONITOR_SUPPORT 70 #include <dp_mon.h> 71 #endif 72 #include "dp_ipa.h" 73 #ifdef FEATURE_WDS 74 #include "dp_txrx_wds.h" 75 #endif 76 #ifdef WLAN_SUPPORT_MSCS 77 #include "dp_mscs.h" 78 #endif 79 #ifdef WLAN_SUPPORT_MESH_LATENCY 80 #include "dp_mesh_latency.h" 81 #endif 82 #ifdef WLAN_SUPPORT_SCS 83 #include "dp_scs.h" 84 #endif 85 #ifdef ATH_SUPPORT_IQUE 86 #include "dp_txrx_me.h" 87 #endif 88 #if defined(DP_CON_MON) 89 #ifndef REMOVE_PKT_LOG 90 #include <pktlog_ac_api.h> 91 #include <pktlog_ac.h> 92 #endif 93 #endif 94 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 95 #include <wlan_dp_swlm.h> 96 #endif 97 #ifdef CONFIG_SAWF_DEF_QUEUES 98 #include "dp_sawf.h" 99 #endif 100 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF 101 #include <target_if_dp.h> 102 #endif 103 104 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH) 105 #define SET_PEER_REF_CNT_ONE(_peer) \ 106 qdf_atomic_set(&(_peer)->ref_cnt, 1) 107 #else 108 #define SET_PEER_REF_CNT_ONE(_peer) 109 #endif 110 111 #ifdef WLAN_SYSFS_DP_STATS 112 /* sysfs event wait time for firmware stat request unit milliseconds */ 113 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000 114 #endif 115 116 #ifdef QCA_DP_TX_FW_METADATA_V2 117 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \ 118 HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val) 119 #else 120 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \ 121 HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) 122 #endif 123 124 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check, 125 MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS); 126 127 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check, 128 MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS); 129 130 void dp_configure_arch_ops(struct dp_soc *soc); 131 qdf_size_t dp_get_soc_context_size(uint16_t device_id); 132 133 /* 134 * The max size of cdp_peer_stats_param_t is limited to 16 bytes. 135 * If the buffer size is exceeding this size limit, 136 * dp_txrx_get_peer_stats is to be used instead. 137 */ 138 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size, 139 (sizeof(cdp_peer_stats_param_t) <= 16)); 140 141 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 142 /* 143 * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS 144 * also should be updated accordingly 145 */ 146 QDF_COMPILE_TIME_ASSERT(num_intr_grps, 147 HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS); 148 149 /* 150 * HIF_EVENT_HIST_MAX should always be power of 2 151 */ 152 QDF_COMPILE_TIME_ASSERT(hif_event_history_size, 153 (HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0); 154 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 155 156 /* 157 * If WLAN_CFG_INT_NUM_CONTEXTS is changed, 158 * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated 159 */ 160 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs, 161 WLAN_CFG_INT_NUM_CONTEXTS_MAX >= 162 WLAN_CFG_INT_NUM_CONTEXTS); 163 164 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl); 165 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl); 166 167 static void dp_pdev_srng_deinit(struct dp_pdev *pdev); 168 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev); 169 static void dp_pdev_srng_free(struct dp_pdev *pdev); 170 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev); 171 172 static inline 173 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, 174 struct cdp_pdev_attach_params *params); 175 176 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id); 177 178 static QDF_STATUS 179 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, 180 HTC_HANDLE htc_handle, 181 qdf_device_t qdf_osdev, 182 uint8_t pdev_id); 183 184 static QDF_STATUS 185 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force); 186 187 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc); 188 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc); 189 190 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force); 191 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, 192 uint8_t pdev_id, 193 int force); 194 static struct dp_soc * 195 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 196 struct cdp_soc_attach_params *params); 197 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, 198 uint8_t vdev_id, 199 uint8_t *peer_mac_addr, 200 enum cdp_peer_type peer_type); 201 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, 202 uint8_t vdev_id, 203 uint8_t *peer_mac, uint32_t bitmap, 204 enum cdp_peer_type peer_type); 205 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, 206 bool unmap_only, 207 bool mlo_peers_only); 208 #ifdef ENABLE_VERBOSE_DEBUG 209 bool is_dp_verbose_debug_enabled; 210 #endif 211 212 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 213 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 214 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 215 bool enable); 216 static inline void 217 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 218 struct cdp_cfr_rcc_stats *cfr_rcc_stats); 219 static inline void 220 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 221 #endif 222 223 #ifdef DP_UMAC_HW_RESET_SUPPORT 224 static QDF_STATUS dp_umac_reset_action_trigger_recovery(struct dp_soc *soc); 225 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc); 226 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc); 227 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc); 228 #endif 229 230 #define MON_VDEV_TIMER_INIT 0x1 231 #define MON_VDEV_TIMER_RUNNING 0x2 232 233 #define DP_MCS_LENGTH (6*MAX_MCS) 234 235 #define DP_CURR_FW_STATS_AVAIL 19 236 #define DP_HTT_DBG_EXT_STATS_MAX 256 237 #define DP_MAX_SLEEP_TIME 100 238 #ifndef QCA_WIFI_3_0_EMU 239 #define SUSPEND_DRAIN_WAIT 500 240 #else 241 #define SUSPEND_DRAIN_WAIT 3000 242 #endif 243 244 #ifdef IPA_OFFLOAD 245 /* Exclude IPA rings from the interrupt context */ 246 #define TX_RING_MASK_VAL 0xb 247 #define RX_RING_MASK_VAL 0x7 248 #else 249 #define TX_RING_MASK_VAL 0xF 250 #define RX_RING_MASK_VAL 0xF 251 #endif 252 253 #define STR_MAXLEN 64 254 255 #define RNG_ERR "SRNG setup failed for" 256 257 /** 258 * enum dp_stats_type - Select the type of statistics 259 * @STATS_FW: Firmware-based statistic 260 * @STATS_HOST: Host-based statistic 261 * @STATS_TYPE_MAX: maximum enumeration 262 */ 263 enum dp_stats_type { 264 STATS_FW = 0, 265 STATS_HOST = 1, 266 STATS_TYPE_MAX = 2, 267 }; 268 269 /** 270 * enum dp_fw_stats - General Firmware statistics options 271 * @TXRX_FW_STATS_INVALID: statistic is not available 272 */ 273 enum dp_fw_stats { 274 TXRX_FW_STATS_INVALID = -1, 275 }; 276 277 /* 278 * dp_stats_mapping_table - Firmware and Host statistics 279 * currently supported 280 */ 281 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = { 282 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID}, 283 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID}, 284 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID}, 285 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID}, 286 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID}, 287 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID}, 288 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID}, 289 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID}, 290 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID}, 291 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID}, 292 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID}, 293 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 294 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID}, 295 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID}, 296 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID}, 297 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID}, 298 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID}, 299 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID}, 300 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID}, 301 /* Last ENUM for HTT FW STATS */ 302 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID}, 303 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS}, 304 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS}, 305 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS}, 306 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS}, 307 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS}, 308 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS}, 309 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS}, 310 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS}, 311 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS}, 312 {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS}, 313 {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS}, 314 {TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS}, 315 {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS}, 316 {TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS}, 317 {TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS}, 318 {TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP}, 319 {TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP}, 320 {TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS}, 321 {HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}, 322 {HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID} 323 }; 324 325 /* MCL specific functions */ 326 #if defined(DP_CON_MON) 327 328 #ifdef IPA_OFFLOAD 329 /** 330 * dp_get_num_rx_contexts() - get number of RX contexts 331 * @soc_hdl: cdp opaque soc handle 332 * 333 * Return: number of RX contexts 334 */ 335 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) 336 { 337 int num_rx_contexts; 338 uint32_t reo_ring_map; 339 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 340 341 reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx); 342 343 switch (soc->arch_id) { 344 case CDP_ARCH_TYPE_BE: 345 /* 2 REO rings are used for IPA */ 346 reo_ring_map &= ~(BIT(3) | BIT(7)); 347 348 break; 349 case CDP_ARCH_TYPE_LI: 350 /* 1 REO ring is used for IPA */ 351 reo_ring_map &= ~BIT(3); 352 break; 353 default: 354 dp_err("unknown arch_id 0x%x", soc->arch_id); 355 QDF_BUG(0); 356 } 357 /* 358 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled 359 * in future 360 */ 361 num_rx_contexts = qdf_get_hweight32(reo_ring_map); 362 363 return num_rx_contexts; 364 } 365 #else 366 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) 367 { 368 int num_rx_contexts; 369 uint32_t reo_config; 370 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 371 372 reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx); 373 /* 374 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled 375 * in future 376 */ 377 num_rx_contexts = qdf_get_hweight32(reo_config); 378 379 return num_rx_contexts; 380 } 381 #endif 382 383 #endif 384 385 #ifdef FEATURE_MEC 386 void dp_peer_mec_flush_entries(struct dp_soc *soc) 387 { 388 unsigned int index; 389 struct dp_mec_entry *mecentry, *mecentry_next; 390 391 TAILQ_HEAD(, dp_mec_entry) free_list; 392 TAILQ_INIT(&free_list); 393 394 if (!soc->mec_hash.mask) 395 return; 396 397 if (!soc->mec_hash.bins) 398 return; 399 400 if (!qdf_atomic_read(&soc->mec_cnt)) 401 return; 402 403 qdf_spin_lock_bh(&soc->mec_lock); 404 for (index = 0; index <= soc->mec_hash.mask; index++) { 405 if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) { 406 TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index], 407 hash_list_elem, mecentry_next) { 408 dp_peer_mec_detach_entry(soc, mecentry, &free_list); 409 } 410 } 411 } 412 qdf_spin_unlock_bh(&soc->mec_lock); 413 414 dp_peer_mec_free_list(soc, &free_list); 415 } 416 417 /** 418 * dp_print_mec_stats() - Dump MEC entries in table 419 * @soc: Datapath soc handle 420 * 421 * Return: none 422 */ 423 static void dp_print_mec_stats(struct dp_soc *soc) 424 { 425 int i; 426 uint32_t index; 427 struct dp_mec_entry *mecentry = NULL, *mec_list; 428 uint32_t num_entries = 0; 429 430 DP_PRINT_STATS("MEC Stats:"); 431 DP_PRINT_STATS(" Entries Added = %d", soc->stats.mec.added); 432 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.mec.deleted); 433 434 if (!qdf_atomic_read(&soc->mec_cnt)) 435 return; 436 437 mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY); 438 if (!mec_list) { 439 dp_peer_warn("%pK: failed to allocate mec_list", soc); 440 return; 441 } 442 443 DP_PRINT_STATS("MEC Table:"); 444 for (index = 0; index <= soc->mec_hash.mask; index++) { 445 qdf_spin_lock_bh(&soc->mec_lock); 446 if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) { 447 qdf_spin_unlock_bh(&soc->mec_lock); 448 continue; 449 } 450 451 TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], 452 hash_list_elem) { 453 qdf_mem_copy(&mec_list[num_entries], mecentry, 454 sizeof(*mecentry)); 455 num_entries++; 456 } 457 qdf_spin_unlock_bh(&soc->mec_lock); 458 } 459 460 if (!num_entries) { 461 qdf_mem_free(mec_list); 462 return; 463 } 464 465 for (i = 0; i < num_entries; i++) { 466 DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT 467 " is_active = %d pdev_id = %d vdev_id = %d", 468 i, 469 QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw), 470 mec_list[i].is_active, 471 mec_list[i].pdev_id, 472 mec_list[i].vdev_id); 473 } 474 qdf_mem_free(mec_list); 475 } 476 #else 477 static void dp_print_mec_stats(struct dp_soc *soc) 478 { 479 } 480 #endif 481 482 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl, 483 uint8_t vdev_id, 484 uint8_t *peer_mac, 485 uint8_t *mac_addr, 486 enum cdp_txrx_ast_entry_type type, 487 uint32_t flags) 488 { 489 int ret = -1; 490 QDF_STATUS status = QDF_STATUS_SUCCESS; 491 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, 492 peer_mac, 0, vdev_id, 493 DP_MOD_ID_CDP); 494 495 if (!peer) { 496 dp_peer_debug("Peer is NULL!"); 497 return ret; 498 } 499 500 status = dp_peer_add_ast((struct dp_soc *)soc_hdl, 501 peer, 502 mac_addr, 503 type, 504 flags); 505 if ((status == QDF_STATUS_SUCCESS) || 506 (status == QDF_STATUS_E_ALREADY) || 507 (status == QDF_STATUS_E_AGAIN)) 508 ret = 0; 509 510 dp_hmwds_ast_add_notify(peer, mac_addr, 511 type, status, false); 512 513 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 514 515 return ret; 516 } 517 518 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl, 519 uint8_t vdev_id, 520 uint8_t *peer_mac, 521 uint8_t *wds_macaddr, 522 uint32_t flags) 523 { 524 int status = -1; 525 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 526 struct dp_ast_entry *ast_entry = NULL; 527 struct dp_peer *peer; 528 529 if (soc->ast_offload_support) 530 return status; 531 532 peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, 533 peer_mac, 0, vdev_id, 534 DP_MOD_ID_CDP); 535 536 if (!peer) { 537 dp_peer_debug("Peer is NULL!"); 538 return status; 539 } 540 541 qdf_spin_lock_bh(&soc->ast_lock); 542 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, 543 peer->vdev->pdev->pdev_id); 544 545 if (ast_entry) { 546 status = dp_peer_update_ast(soc, 547 peer, 548 ast_entry, flags); 549 } 550 qdf_spin_unlock_bh(&soc->ast_lock); 551 552 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 553 554 return status; 555 } 556 557 /** 558 * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer 559 * @soc: Datapath SOC handle 560 * @peer: DP peer 561 * @arg: callback argument 562 * 563 * Return: None 564 */ 565 static void 566 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 567 { 568 struct dp_ast_entry *ast_entry = NULL; 569 struct dp_ast_entry *tmp_ast_entry; 570 571 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) { 572 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || 573 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 574 dp_peer_del_ast(soc, ast_entry); 575 } 576 } 577 578 /** 579 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry 580 * @soc_hdl: Datapath SOC handle 581 * @wds_macaddr: WDS entry MAC Address 582 * @peer_mac_addr: WDS entry MAC Address 583 * @vdev_id: id of vdev handle 584 * 585 * Return: QDF_STATUS 586 */ 587 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl, 588 uint8_t *wds_macaddr, 589 uint8_t *peer_mac_addr, 590 uint8_t vdev_id) 591 { 592 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 593 struct dp_ast_entry *ast_entry = NULL; 594 struct dp_peer *peer; 595 struct dp_pdev *pdev; 596 struct dp_vdev *vdev; 597 598 if (soc->ast_offload_support) 599 return QDF_STATUS_E_FAILURE; 600 601 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 602 603 if (!vdev) 604 return QDF_STATUS_E_FAILURE; 605 606 pdev = vdev->pdev; 607 608 if (peer_mac_addr) { 609 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 610 0, vdev->vdev_id, 611 DP_MOD_ID_CDP); 612 if (!peer) { 613 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 614 return QDF_STATUS_E_FAILURE; 615 } 616 617 qdf_spin_lock_bh(&soc->ast_lock); 618 dp_peer_reset_ast_entries(soc, peer, NULL); 619 qdf_spin_unlock_bh(&soc->ast_lock); 620 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 621 } else if (wds_macaddr) { 622 qdf_spin_lock_bh(&soc->ast_lock); 623 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, 624 pdev->pdev_id); 625 626 if (ast_entry) { 627 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || 628 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 629 dp_peer_del_ast(soc, ast_entry); 630 } 631 qdf_spin_unlock_bh(&soc->ast_lock); 632 } 633 634 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 635 return QDF_STATUS_SUCCESS; 636 } 637 638 /** 639 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry 640 * @soc_hdl: Datapath SOC handle 641 * @vdev_id: id of vdev object 642 * 643 * Return: QDF_STATUS 644 */ 645 static QDF_STATUS 646 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl, 647 uint8_t vdev_id) 648 { 649 struct dp_soc *soc = (struct dp_soc *) soc_hdl; 650 651 if (soc->ast_offload_support) 652 return QDF_STATUS_SUCCESS; 653 654 qdf_spin_lock_bh(&soc->ast_lock); 655 656 dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL, 657 DP_MOD_ID_CDP); 658 qdf_spin_unlock_bh(&soc->ast_lock); 659 660 return QDF_STATUS_SUCCESS; 661 } 662 663 /** 664 * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer 665 * @soc: Datapath SOC 666 * @peer: Datapath peer 667 * @arg: arg to callback 668 * 669 * Return: None 670 */ 671 static void 672 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 673 { 674 struct dp_ast_entry *ase = NULL; 675 struct dp_ast_entry *temp_ase; 676 677 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { 678 if ((ase->type == 679 CDP_TXRX_AST_TYPE_STATIC) || 680 (ase->type == 681 CDP_TXRX_AST_TYPE_SELF) || 682 (ase->type == 683 CDP_TXRX_AST_TYPE_STA_BSS)) 684 continue; 685 dp_peer_del_ast(soc, ase); 686 } 687 } 688 689 /** 690 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry 691 * @soc_hdl: Datapath SOC handle 692 * 693 * Return: None 694 */ 695 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl) 696 { 697 struct dp_soc *soc = (struct dp_soc *) soc_hdl; 698 699 qdf_spin_lock_bh(&soc->ast_lock); 700 701 dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL, 702 DP_MOD_ID_CDP); 703 704 qdf_spin_unlock_bh(&soc->ast_lock); 705 dp_peer_mec_flush_entries(soc); 706 } 707 708 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST) 709 /** 710 * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer 711 * @soc: Datapath SOC 712 * @peer: Datapath peer 713 * 714 * Return: None 715 */ 716 static void 717 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer) 718 { 719 struct dp_ast_entry *ase = NULL; 720 struct dp_ast_entry *temp_ase; 721 722 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { 723 if (ase->type == CDP_TXRX_AST_TYPE_WDS) { 724 soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc, 725 ase->mac_addr.raw, 726 ase->vdev_id); 727 } 728 } 729 } 730 #elif defined(FEATURE_AST) 731 static void 732 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer) 733 { 734 } 735 #endif 736 737 /** 738 * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table 739 * and return ast entry information 740 * of first ast entry found in the 741 * table with given mac address 742 * @soc_hdl: data path soc handle 743 * @ast_mac_addr: AST entry mac address 744 * @ast_entry_info: ast entry information 745 * 746 * Return: true if ast entry found with ast_mac_addr 747 * false if ast entry not found 748 */ 749 static bool dp_peer_get_ast_info_by_soc_wifi3 750 (struct cdp_soc_t *soc_hdl, 751 uint8_t *ast_mac_addr, 752 struct cdp_ast_entry_info *ast_entry_info) 753 { 754 struct dp_ast_entry *ast_entry = NULL; 755 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 756 struct dp_peer *peer = NULL; 757 758 if (soc->ast_offload_support) 759 return false; 760 761 qdf_spin_lock_bh(&soc->ast_lock); 762 763 ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr); 764 if ((!ast_entry) || 765 (ast_entry->delete_in_progress && !ast_entry->callback)) { 766 qdf_spin_unlock_bh(&soc->ast_lock); 767 return false; 768 } 769 770 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 771 DP_MOD_ID_AST); 772 if (!peer) { 773 qdf_spin_unlock_bh(&soc->ast_lock); 774 return false; 775 } 776 777 ast_entry_info->type = ast_entry->type; 778 ast_entry_info->pdev_id = ast_entry->pdev_id; 779 ast_entry_info->vdev_id = ast_entry->vdev_id; 780 ast_entry_info->peer_id = ast_entry->peer_id; 781 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], 782 &peer->mac_addr.raw[0], 783 QDF_MAC_ADDR_SIZE); 784 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 785 qdf_spin_unlock_bh(&soc->ast_lock); 786 return true; 787 } 788 789 /** 790 * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table 791 * and return ast entry information 792 * if mac address and pdev_id matches 793 * @soc_hdl: data path soc handle 794 * @ast_mac_addr: AST entry mac address 795 * @pdev_id: pdev_id 796 * @ast_entry_info: ast entry information 797 * 798 * Return: true if ast entry found with ast_mac_addr 799 * false if ast entry not found 800 */ 801 static bool dp_peer_get_ast_info_by_pdevid_wifi3 802 (struct cdp_soc_t *soc_hdl, 803 uint8_t *ast_mac_addr, 804 uint8_t pdev_id, 805 struct cdp_ast_entry_info *ast_entry_info) 806 { 807 struct dp_ast_entry *ast_entry; 808 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 809 struct dp_peer *peer = NULL; 810 811 if (soc->ast_offload_support) 812 return false; 813 814 qdf_spin_lock_bh(&soc->ast_lock); 815 816 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, 817 pdev_id); 818 819 if ((!ast_entry) || 820 (ast_entry->delete_in_progress && !ast_entry->callback)) { 821 qdf_spin_unlock_bh(&soc->ast_lock); 822 return false; 823 } 824 825 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 826 DP_MOD_ID_AST); 827 if (!peer) { 828 qdf_spin_unlock_bh(&soc->ast_lock); 829 return false; 830 } 831 832 ast_entry_info->type = ast_entry->type; 833 ast_entry_info->pdev_id = ast_entry->pdev_id; 834 ast_entry_info->vdev_id = ast_entry->vdev_id; 835 ast_entry_info->peer_id = ast_entry->peer_id; 836 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], 837 &peer->mac_addr.raw[0], 838 QDF_MAC_ADDR_SIZE); 839 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 840 qdf_spin_unlock_bh(&soc->ast_lock); 841 return true; 842 } 843 844 /** 845 * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table 846 * with given mac address 847 * @soc_handle: data path soc handle 848 * @mac_addr: AST entry mac address 849 * @callback: callback function to called on ast delete response from FW 850 * @cookie: argument to be passed to callback 851 * 852 * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 853 * is sent 854 * QDF_STATUS_E_INVAL false if ast entry not found 855 */ 856 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle, 857 uint8_t *mac_addr, 858 txrx_ast_free_cb callback, 859 void *cookie) 860 861 { 862 struct dp_soc *soc = (struct dp_soc *)soc_handle; 863 struct dp_ast_entry *ast_entry = NULL; 864 txrx_ast_free_cb cb = NULL; 865 void *arg = NULL; 866 867 if (soc->ast_offload_support) 868 return -QDF_STATUS_E_INVAL; 869 870 qdf_spin_lock_bh(&soc->ast_lock); 871 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); 872 if (!ast_entry) { 873 qdf_spin_unlock_bh(&soc->ast_lock); 874 return -QDF_STATUS_E_INVAL; 875 } 876 877 if (ast_entry->callback) { 878 cb = ast_entry->callback; 879 arg = ast_entry->cookie; 880 } 881 882 ast_entry->callback = callback; 883 ast_entry->cookie = cookie; 884 885 /* 886 * if delete_in_progress is set AST delete is sent to target 887 * and host is waiting for response should not send delete 888 * again 889 */ 890 if (!ast_entry->delete_in_progress) 891 dp_peer_del_ast(soc, ast_entry); 892 893 qdf_spin_unlock_bh(&soc->ast_lock); 894 if (cb) { 895 cb(soc->ctrl_psoc, 896 dp_soc_to_cdp_soc(soc), 897 arg, 898 CDP_TXRX_AST_DELETE_IN_PROGRESS); 899 } 900 return QDF_STATUS_SUCCESS; 901 } 902 903 /** 904 * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash 905 * table if mac address and pdev_id matches 906 * @soc_handle: data path soc handle 907 * @mac_addr: AST entry mac address 908 * @pdev_id: pdev id 909 * @callback: callback function to called on ast delete response from FW 910 * @cookie: argument to be passed to callback 911 * 912 * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 913 * is sent 914 * QDF_STATUS_E_INVAL false if ast entry not found 915 */ 916 917 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle, 918 uint8_t *mac_addr, 919 uint8_t pdev_id, 920 txrx_ast_free_cb callback, 921 void *cookie) 922 923 { 924 struct dp_soc *soc = (struct dp_soc *)soc_handle; 925 struct dp_ast_entry *ast_entry; 926 txrx_ast_free_cb cb = NULL; 927 void *arg = NULL; 928 929 if (soc->ast_offload_support) 930 return -QDF_STATUS_E_INVAL; 931 932 qdf_spin_lock_bh(&soc->ast_lock); 933 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id); 934 935 if (!ast_entry) { 936 qdf_spin_unlock_bh(&soc->ast_lock); 937 return -QDF_STATUS_E_INVAL; 938 } 939 940 if (ast_entry->callback) { 941 cb = ast_entry->callback; 942 arg = ast_entry->cookie; 943 } 944 945 ast_entry->callback = callback; 946 ast_entry->cookie = cookie; 947 948 /* 949 * if delete_in_progress is set AST delete is sent to target 950 * and host is waiting for response should not sent delete 951 * again 952 */ 953 if (!ast_entry->delete_in_progress) 954 dp_peer_del_ast(soc, ast_entry); 955 956 qdf_spin_unlock_bh(&soc->ast_lock); 957 958 if (cb) { 959 cb(soc->ctrl_psoc, 960 dp_soc_to_cdp_soc(soc), 961 arg, 962 CDP_TXRX_AST_DELETE_IN_PROGRESS); 963 } 964 return QDF_STATUS_SUCCESS; 965 } 966 967 /** 968 * dp_peer_HMWDS_ast_entry_del() - delete the ast entry from soc AST hash 969 * table if HMWDS rem-addr command is issued 970 * 971 * @soc_handle: data path soc handle 972 * @vdev_id: vdev id 973 * @wds_macaddr: AST entry mac address to delete 974 * @type: cdp_txrx_ast_entry_type to send to FW 975 * @delete_in_fw: flag to indicate AST entry deletion in FW 976 * 977 * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 978 * is sent 979 * QDF_STATUS_E_INVAL false if ast entry not found 980 */ 981 static QDF_STATUS dp_peer_HMWDS_ast_entry_del(struct cdp_soc_t *soc_handle, 982 uint8_t vdev_id, 983 uint8_t *wds_macaddr, 984 uint8_t type, 985 uint8_t delete_in_fw) 986 { 987 struct dp_soc *soc = (struct dp_soc *)soc_handle; 988 989 if (soc->ast_offload_support) { 990 dp_del_wds_entry_wrapper(soc, vdev_id, wds_macaddr, type, 991 delete_in_fw); 992 return QDF_STATUS_SUCCESS; 993 } 994 995 return -QDF_STATUS_E_INVAL; 996 } 997 998 #ifdef FEATURE_AST 999 /** 1000 * dp_print_mlo_ast_stats() - Print AST stats for MLO peers 1001 * 1002 * @soc: core DP soc context 1003 * 1004 * Return: void 1005 */ 1006 static void dp_print_mlo_ast_stats(struct dp_soc *soc) 1007 { 1008 if (soc->arch_ops.print_mlo_ast_stats) 1009 soc->arch_ops.print_mlo_ast_stats(soc); 1010 } 1011 1012 void 1013 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 1014 { 1015 struct dp_ast_entry *ase, *tmp_ase; 1016 uint32_t num_entries = 0; 1017 char type[CDP_TXRX_AST_TYPE_MAX][10] = { 1018 "NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS", 1019 "DA", "HMWDS_SEC", "MLD"}; 1020 1021 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) { 1022 DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT 1023 " peer_mac_addr = "QDF_MAC_ADDR_FMT 1024 " peer_id = %u" 1025 " type = %s" 1026 " next_hop = %d" 1027 " is_active = %d" 1028 " ast_idx = %d" 1029 " ast_hash = %d" 1030 " delete_in_progress = %d" 1031 " pdev_id = %d" 1032 " vdev_id = %d", 1033 ++num_entries, 1034 QDF_MAC_ADDR_REF(ase->mac_addr.raw), 1035 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1036 ase->peer_id, 1037 type[ase->type], 1038 ase->next_hop, 1039 ase->is_active, 1040 ase->ast_idx, 1041 ase->ast_hash_value, 1042 ase->delete_in_progress, 1043 ase->pdev_id, 1044 ase->vdev_id); 1045 } 1046 } 1047 1048 void dp_print_ast_stats(struct dp_soc *soc) 1049 { 1050 DP_PRINT_STATS("AST Stats:"); 1051 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added); 1052 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted); 1053 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out); 1054 DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err); 1055 DP_PRINT_STATS(" Entries Mismatch ERR = %d", 1056 soc->stats.ast.ast_mismatch); 1057 1058 DP_PRINT_STATS("AST Table:"); 1059 1060 qdf_spin_lock_bh(&soc->ast_lock); 1061 1062 dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL, 1063 DP_MOD_ID_GENERIC_STATS); 1064 1065 qdf_spin_unlock_bh(&soc->ast_lock); 1066 1067 dp_print_mlo_ast_stats(soc); 1068 } 1069 #else 1070 void dp_print_ast_stats(struct dp_soc *soc) 1071 { 1072 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST"); 1073 return; 1074 } 1075 #endif 1076 1077 /** 1078 * dp_print_peer_info() - Dump peer info 1079 * @soc: Datapath soc handle 1080 * @peer: Datapath peer handle 1081 * @arg: argument to iter function 1082 * 1083 * Return: void 1084 */ 1085 static void 1086 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg) 1087 { 1088 struct dp_txrx_peer *txrx_peer = NULL; 1089 1090 txrx_peer = dp_get_txrx_peer(peer); 1091 if (!txrx_peer) 1092 return; 1093 1094 DP_PRINT_STATS(" peer id = %d" 1095 " peer_mac_addr = "QDF_MAC_ADDR_FMT 1096 " nawds_enabled = %d" 1097 " bss_peer = %d" 1098 " wds_enabled = %d" 1099 " tx_cap_enabled = %d" 1100 " rx_cap_enabled = %d", 1101 peer->peer_id, 1102 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1103 txrx_peer->nawds_enabled, 1104 txrx_peer->bss_peer, 1105 txrx_peer->wds_enabled, 1106 dp_monitor_is_tx_cap_enabled(peer), 1107 dp_monitor_is_rx_cap_enabled(peer)); 1108 } 1109 1110 /** 1111 * dp_print_peer_table() - Dump all Peer stats 1112 * @vdev: Datapath Vdev handle 1113 * 1114 * Return: void 1115 */ 1116 static void dp_print_peer_table(struct dp_vdev *vdev) 1117 { 1118 DP_PRINT_STATS("Dumping Peer Table Stats:"); 1119 dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL, 1120 DP_MOD_ID_GENERIC_STATS); 1121 } 1122 1123 #ifdef DP_MEM_PRE_ALLOC 1124 1125 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 1126 size_t ctxt_size) 1127 { 1128 void *ctxt_mem; 1129 1130 if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) { 1131 dp_warn("dp_prealloc_get_context null!"); 1132 goto dynamic_alloc; 1133 } 1134 1135 ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type, 1136 ctxt_size); 1137 1138 if (ctxt_mem) 1139 goto end; 1140 1141 dynamic_alloc: 1142 dp_info("switch to dynamic-alloc for type %d, size %zu", 1143 ctxt_type, ctxt_size); 1144 ctxt_mem = qdf_mem_malloc(ctxt_size); 1145 end: 1146 return ctxt_mem; 1147 } 1148 1149 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 1150 void *vaddr) 1151 { 1152 QDF_STATUS status; 1153 1154 if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) { 1155 status = soc->cdp_soc.ol_ops->dp_prealloc_put_context( 1156 ctxt_type, 1157 vaddr); 1158 } else { 1159 dp_warn("dp_prealloc_put_context null!"); 1160 status = QDF_STATUS_E_NOSUPPORT; 1161 } 1162 1163 if (QDF_IS_STATUS_ERROR(status)) { 1164 dp_info("Context type %d not pre-allocated", ctxt_type); 1165 qdf_mem_free(vaddr); 1166 } 1167 } 1168 1169 static inline 1170 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, 1171 struct dp_srng *srng, 1172 uint32_t ring_type) 1173 { 1174 void *mem; 1175 1176 qdf_assert(!srng->is_mem_prealloc); 1177 1178 if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) { 1179 dp_warn("dp_prealloc_get_consistent is null!"); 1180 goto qdf; 1181 } 1182 1183 mem = 1184 soc->cdp_soc.ol_ops->dp_prealloc_get_consistent 1185 (&srng->alloc_size, 1186 &srng->base_vaddr_unaligned, 1187 &srng->base_paddr_unaligned, 1188 &srng->base_paddr_aligned, 1189 DP_RING_BASE_ALIGN, ring_type); 1190 1191 if (mem) { 1192 srng->is_mem_prealloc = true; 1193 goto end; 1194 } 1195 qdf: 1196 mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, 1197 &srng->base_vaddr_unaligned, 1198 &srng->base_paddr_unaligned, 1199 &srng->base_paddr_aligned, 1200 DP_RING_BASE_ALIGN); 1201 end: 1202 dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d", 1203 srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem, 1204 srng, ring_type, srng->alloc_size, srng->num_entries); 1205 return mem; 1206 } 1207 1208 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, 1209 struct dp_srng *srng) 1210 { 1211 if (srng->is_mem_prealloc) { 1212 if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) { 1213 dp_warn("dp_prealloc_put_consistent is null!"); 1214 QDF_BUG(0); 1215 return; 1216 } 1217 soc->cdp_soc.ol_ops->dp_prealloc_put_consistent 1218 (srng->alloc_size, 1219 srng->base_vaddr_unaligned, 1220 srng->base_paddr_unaligned); 1221 1222 } else { 1223 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 1224 srng->alloc_size, 1225 srng->base_vaddr_unaligned, 1226 srng->base_paddr_unaligned, 0); 1227 } 1228 } 1229 1230 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 1231 enum dp_desc_type desc_type, 1232 struct qdf_mem_multi_page_t *pages, 1233 size_t element_size, 1234 uint32_t element_num, 1235 qdf_dma_context_t memctxt, 1236 bool cacheable) 1237 { 1238 if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) { 1239 dp_warn("dp_get_multi_pages is null!"); 1240 goto qdf; 1241 } 1242 1243 pages->num_pages = 0; 1244 pages->is_mem_prealloc = 0; 1245 soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type, 1246 element_size, 1247 element_num, 1248 pages, 1249 cacheable); 1250 if (pages->num_pages) 1251 goto end; 1252 1253 qdf: 1254 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 1255 element_num, memctxt, cacheable); 1256 end: 1257 dp_info("%s desc_type %d element_size %d element_num %d cacheable %d", 1258 pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", 1259 desc_type, (int)element_size, element_num, cacheable); 1260 } 1261 1262 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 1263 enum dp_desc_type desc_type, 1264 struct qdf_mem_multi_page_t *pages, 1265 qdf_dma_context_t memctxt, 1266 bool cacheable) 1267 { 1268 if (pages->is_mem_prealloc) { 1269 if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) { 1270 dp_warn("dp_put_multi_pages is null!"); 1271 QDF_BUG(0); 1272 return; 1273 } 1274 1275 soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages); 1276 qdf_mem_zero(pages, sizeof(*pages)); 1277 } else { 1278 qdf_mem_multi_pages_free(soc->osdev, pages, 1279 memctxt, cacheable); 1280 } 1281 } 1282 1283 #else 1284 1285 static inline 1286 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, 1287 struct dp_srng *srng, 1288 uint32_t ring_type) 1289 1290 { 1291 void *mem; 1292 1293 mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, 1294 &srng->base_vaddr_unaligned, 1295 &srng->base_paddr_unaligned, 1296 &srng->base_paddr_aligned, 1297 DP_RING_BASE_ALIGN); 1298 if (mem) 1299 qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size); 1300 1301 return mem; 1302 } 1303 1304 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, 1305 struct dp_srng *srng) 1306 { 1307 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 1308 srng->alloc_size, 1309 srng->base_vaddr_unaligned, 1310 srng->base_paddr_unaligned, 0); 1311 } 1312 1313 #endif /* DP_MEM_PRE_ALLOC */ 1314 1315 #ifdef QCA_SUPPORT_WDS_EXTENDED 1316 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev) 1317 { 1318 return vdev->wds_ext_enabled; 1319 } 1320 #else 1321 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev) 1322 { 1323 return false; 1324 } 1325 #endif 1326 1327 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev) 1328 { 1329 struct dp_vdev *vdev = NULL; 1330 uint8_t rx_fast_flag = true; 1331 1332 /* Check if protocol tagging enable */ 1333 if (pdev->is_rx_protocol_tagging_enabled && !pdev->enhanced_stats_en) { 1334 rx_fast_flag = false; 1335 goto update_flag; 1336 } 1337 1338 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1339 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1340 /* Check if any VDEV has NAWDS enabled */ 1341 if (vdev->nawds_enabled) { 1342 rx_fast_flag = false; 1343 break; 1344 } 1345 1346 /* Check if any VDEV has multipass enabled */ 1347 if (vdev->multipass_en) { 1348 rx_fast_flag = false; 1349 break; 1350 } 1351 1352 /* Check if any VDEV has mesh enabled */ 1353 if (vdev->mesh_vdev) { 1354 rx_fast_flag = false; 1355 break; 1356 } 1357 } 1358 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1359 1360 update_flag: 1361 dp_init_info("Updated Rx fast flag to %u", rx_fast_flag); 1362 pdev->rx_fast_flag = rx_fast_flag; 1363 } 1364 1365 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng) 1366 { 1367 if (srng->alloc_size && srng->base_vaddr_unaligned) { 1368 if (!srng->cached) { 1369 dp_srng_mem_free_consistent(soc, srng); 1370 } else { 1371 qdf_mem_free(srng->base_vaddr_unaligned); 1372 } 1373 srng->alloc_size = 0; 1374 srng->base_vaddr_unaligned = NULL; 1375 } 1376 srng->hal_srng = NULL; 1377 } 1378 1379 qdf_export_symbol(dp_srng_free); 1380 1381 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, int ring_type, 1382 int ring_num, int mac_id) 1383 { 1384 return soc->arch_ops.txrx_srng_init(soc, srng, ring_type, 1385 ring_num, mac_id); 1386 } 1387 1388 qdf_export_symbol(dp_srng_init); 1389 1390 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng, 1391 int ring_type, uint32_t num_entries, 1392 bool cached) 1393 { 1394 hal_soc_handle_t hal_soc = soc->hal_soc; 1395 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type); 1396 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type); 1397 1398 if (srng->base_vaddr_unaligned) { 1399 dp_init_err("%pK: Ring type: %d, is already allocated", 1400 soc, ring_type); 1401 return QDF_STATUS_SUCCESS; 1402 } 1403 1404 num_entries = (num_entries > max_entries) ? max_entries : num_entries; 1405 srng->hal_srng = NULL; 1406 srng->alloc_size = num_entries * entry_size; 1407 srng->num_entries = num_entries; 1408 srng->cached = cached; 1409 1410 if (!cached) { 1411 srng->base_vaddr_aligned = 1412 dp_srng_aligned_mem_alloc_consistent(soc, 1413 srng, 1414 ring_type); 1415 } else { 1416 srng->base_vaddr_aligned = qdf_aligned_malloc( 1417 &srng->alloc_size, 1418 &srng->base_vaddr_unaligned, 1419 &srng->base_paddr_unaligned, 1420 &srng->base_paddr_aligned, 1421 DP_RING_BASE_ALIGN); 1422 } 1423 1424 if (!srng->base_vaddr_aligned) 1425 return QDF_STATUS_E_NOMEM; 1426 1427 return QDF_STATUS_SUCCESS; 1428 } 1429 1430 qdf_export_symbol(dp_srng_alloc); 1431 1432 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, 1433 int ring_type, int ring_num) 1434 { 1435 if (!srng->hal_srng) { 1436 dp_init_err("%pK: Ring type: %d, num:%d not setup", 1437 soc, ring_type, ring_num); 1438 return; 1439 } 1440 1441 if (dp_check_umac_reset_in_progress(soc)) 1442 goto srng_cleanup; 1443 1444 if (soc->arch_ops.dp_free_ppeds_interrupts) 1445 soc->arch_ops.dp_free_ppeds_interrupts(soc, srng, ring_type, 1446 ring_num); 1447 1448 srng_cleanup: 1449 hal_srng_cleanup(soc->hal_soc, srng->hal_srng, 1450 dp_check_umac_reset_in_progress(soc)); 1451 srng->hal_srng = NULL; 1452 } 1453 1454 qdf_export_symbol(dp_srng_deinit); 1455 1456 /* TODO: Need this interface from HIF */ 1457 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle); 1458 1459 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 1460 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 1461 hal_ring_handle_t hal_ring_hdl) 1462 { 1463 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 1464 uint32_t hp, tp; 1465 uint8_t ring_id; 1466 1467 if (!int_ctx) 1468 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 1469 1470 hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); 1471 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 1472 1473 hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, 1474 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START); 1475 1476 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 1477 } 1478 1479 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 1480 hal_ring_handle_t hal_ring_hdl) 1481 { 1482 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 1483 uint32_t hp, tp; 1484 uint8_t ring_id; 1485 1486 if (!int_ctx) 1487 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 1488 1489 hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); 1490 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 1491 1492 hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, 1493 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END); 1494 1495 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 1496 } 1497 1498 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc, 1499 uint8_t hist_group_id) 1500 { 1501 hif_record_event(dp_soc->hif_handle, hist_group_id, 1502 0, 0, 0, HIF_EVENT_TIMER_ENTRY); 1503 } 1504 1505 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc, 1506 uint8_t hist_group_id) 1507 { 1508 hif_record_event(dp_soc->hif_handle, hist_group_id, 1509 0, 0, 0, HIF_EVENT_TIMER_EXIT); 1510 } 1511 #else 1512 1513 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc, 1514 uint8_t hist_group_id) 1515 { 1516 } 1517 1518 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc, 1519 uint8_t hist_group_id) 1520 { 1521 } 1522 1523 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 1524 1525 enum timer_yield_status 1526 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done, 1527 uint64_t start_time) 1528 { 1529 uint64_t cur_time = qdf_get_log_timestamp(); 1530 1531 if (!work_done) 1532 return DP_TIMER_WORK_DONE; 1533 1534 if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS) 1535 return DP_TIMER_TIME_EXHAUST; 1536 1537 return DP_TIMER_NO_YIELD; 1538 } 1539 1540 qdf_export_symbol(dp_should_timer_irq_yield); 1541 1542 void dp_interrupt_timer(void *arg) 1543 { 1544 struct dp_soc *soc = (struct dp_soc *) arg; 1545 struct dp_pdev *pdev = soc->pdev_list[0]; 1546 enum timer_yield_status yield = DP_TIMER_NO_YIELD; 1547 uint32_t work_done = 0, total_work_done = 0; 1548 int budget = 0xffff, i; 1549 uint32_t remaining_quota = budget; 1550 uint64_t start_time; 1551 uint32_t lmac_id = DP_MON_INVALID_LMAC_ID; 1552 uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 1553 uint32_t lmac_iter; 1554 int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 1555 enum reg_wifi_band mon_band; 1556 int cpu = dp_srng_get_cpu(); 1557 1558 /* 1559 * this logic makes all data path interfacing rings (UMAC/LMAC) 1560 * and Monitor rings polling mode when NSS offload is disabled 1561 */ 1562 if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) && 1563 !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1564 if (qdf_atomic_read(&soc->cmn_init_done)) { 1565 for (i = 0; i < wlan_cfg_get_num_contexts( 1566 soc->wlan_cfg_ctx); i++) 1567 dp_service_srngs(&soc->intr_ctx[i], 0xffff, 1568 cpu); 1569 1570 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 1571 } 1572 return; 1573 } 1574 1575 if (!qdf_atomic_read(&soc->cmn_init_done)) 1576 return; 1577 1578 if (dp_monitor_is_chan_band_known(pdev)) { 1579 mon_band = dp_monitor_get_chan_band(pdev); 1580 lmac_id = pdev->ch_band_lmac_id_mapping[mon_band]; 1581 if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) { 1582 dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; 1583 dp_srng_record_timer_entry(soc, dp_intr_id); 1584 } 1585 } 1586 1587 start_time = qdf_get_log_timestamp(); 1588 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 1589 1590 while (yield == DP_TIMER_NO_YIELD) { 1591 for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { 1592 if (lmac_iter == lmac_id) 1593 work_done = dp_monitor_process(soc, 1594 &soc->intr_ctx[dp_intr_id], 1595 lmac_iter, remaining_quota); 1596 else 1597 work_done = 1598 dp_monitor_drop_packets_for_mac(pdev, 1599 lmac_iter, 1600 remaining_quota); 1601 if (work_done) { 1602 budget -= work_done; 1603 if (budget <= 0) { 1604 yield = DP_TIMER_WORK_EXHAUST; 1605 goto budget_done; 1606 } 1607 remaining_quota = budget; 1608 total_work_done += work_done; 1609 } 1610 } 1611 1612 yield = dp_should_timer_irq_yield(soc, total_work_done, 1613 start_time); 1614 total_work_done = 0; 1615 } 1616 1617 budget_done: 1618 if (yield == DP_TIMER_WORK_EXHAUST || 1619 yield == DP_TIMER_TIME_EXHAUST) 1620 qdf_timer_mod(&soc->int_timer, 1); 1621 else 1622 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 1623 1624 if (lmac_id != DP_MON_INVALID_LMAC_ID) 1625 dp_srng_record_timer_exit(soc, dp_intr_id); 1626 } 1627 1628 #if defined(DP_INTR_POLL_BOTH) 1629 /** 1630 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts 1631 * @txrx_soc: DP SOC handle 1632 * 1633 * Call the appropriate attach function based on the mode of operation. 1634 * This is a WAR for enabling monitor mode. 1635 * 1636 * Return: 0 for success. nonzero for failure. 1637 */ 1638 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 1639 { 1640 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 1641 1642 if (!(soc->wlan_cfg_ctx->napi_enabled) || 1643 (dp_is_monitor_mode_using_poll(soc) && 1644 soc->cdp_soc.ol_ops->get_con_mode && 1645 soc->cdp_soc.ol_ops->get_con_mode() == 1646 QDF_GLOBAL_MONITOR_MODE)) { 1647 dp_info("Poll mode"); 1648 return dp_soc_attach_poll(txrx_soc); 1649 } else { 1650 dp_info("Interrupt mode"); 1651 return dp_soc_interrupt_attach(txrx_soc); 1652 } 1653 } 1654 #else 1655 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED 1656 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 1657 { 1658 return dp_soc_attach_poll(txrx_soc); 1659 } 1660 #else 1661 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 1662 { 1663 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 1664 1665 if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx)) 1666 return dp_soc_attach_poll(txrx_soc); 1667 else 1668 return dp_soc_interrupt_attach(txrx_soc); 1669 } 1670 #endif 1671 #endif 1672 1673 /** 1674 * dp_soc_ppeds_stop() - Stop PPE DS processing 1675 * @soc_handle: DP SOC handle 1676 * 1677 * Return: none 1678 */ 1679 static void dp_soc_ppeds_stop(struct cdp_soc_t *soc_handle) 1680 { 1681 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1682 1683 if (soc->arch_ops.txrx_soc_ppeds_stop) 1684 soc->arch_ops.txrx_soc_ppeds_stop(soc); 1685 } 1686 1687 #ifdef ENABLE_VERBOSE_DEBUG 1688 void dp_enable_verbose_debug(struct dp_soc *soc) 1689 { 1690 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1691 1692 soc_cfg_ctx = soc->wlan_cfg_ctx; 1693 1694 if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask) 1695 is_dp_verbose_debug_enabled = true; 1696 1697 if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask) 1698 hal_set_verbose_debug(true); 1699 else 1700 hal_set_verbose_debug(false); 1701 } 1702 #else 1703 void dp_enable_verbose_debug(struct dp_soc *soc) 1704 { 1705 } 1706 #endif 1707 1708 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1709 { 1710 struct cdp_lro_hash_config lro_hash; 1711 QDF_STATUS status; 1712 1713 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) && 1714 !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) && 1715 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 1716 dp_err("LRO, GRO and RX hash disabled"); 1717 return QDF_STATUS_E_FAILURE; 1718 } 1719 1720 qdf_mem_zero(&lro_hash, sizeof(lro_hash)); 1721 1722 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) || 1723 wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) { 1724 lro_hash.lro_enable = 1; 1725 lro_hash.tcp_flag = QDF_TCPHDR_ACK; 1726 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN | 1727 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG | 1728 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR; 1729 } 1730 1731 soc->arch_ops.get_rx_hash_key(soc, &lro_hash); 1732 1733 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config); 1734 1735 if (!soc->cdp_soc.ol_ops->lro_hash_config) { 1736 QDF_BUG(0); 1737 dp_err("lro_hash_config not configured"); 1738 return QDF_STATUS_E_FAILURE; 1739 } 1740 1741 status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc, 1742 pdev->pdev_id, 1743 &lro_hash); 1744 if (!QDF_IS_STATUS_SUCCESS(status)) { 1745 dp_err("failed to send lro_hash_config to FW %u", status); 1746 return status; 1747 } 1748 1749 dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x", 1750 lro_hash.lro_enable, lro_hash.tcp_flag, 1751 lro_hash.tcp_flag_mask); 1752 1753 dp_info("toeplitz_hash_ipv4:"); 1754 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1755 lro_hash.toeplitz_hash_ipv4, 1756 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * 1757 LRO_IPV4_SEED_ARR_SZ)); 1758 1759 dp_info("toeplitz_hash_ipv6:"); 1760 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1761 lro_hash.toeplitz_hash_ipv6, 1762 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * 1763 LRO_IPV6_SEED_ARR_SZ)); 1764 1765 return status; 1766 } 1767 1768 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 1769 /** 1770 * dp_reap_timer_init() - initialize the reap timer 1771 * @soc: data path SoC handle 1772 * 1773 * Return: void 1774 */ 1775 static void dp_reap_timer_init(struct dp_soc *soc) 1776 { 1777 /* 1778 * Timer to reap rxdma status rings. 1779 * Needed until we enable ppdu end interrupts 1780 */ 1781 dp_monitor_reap_timer_init(soc); 1782 dp_monitor_vdev_timer_init(soc); 1783 } 1784 1785 /** 1786 * dp_reap_timer_deinit() - de-initialize the reap timer 1787 * @soc: data path SoC handle 1788 * 1789 * Return: void 1790 */ 1791 static void dp_reap_timer_deinit(struct dp_soc *soc) 1792 { 1793 dp_monitor_reap_timer_deinit(soc); 1794 } 1795 #else 1796 /* WIN use case */ 1797 static void dp_reap_timer_init(struct dp_soc *soc) 1798 { 1799 /* Configure LMAC rings in Polled mode */ 1800 if (soc->lmac_polled_mode) { 1801 /* 1802 * Timer to reap lmac rings. 1803 */ 1804 qdf_timer_init(soc->osdev, &soc->lmac_reap_timer, 1805 dp_service_lmac_rings, (void *)soc, 1806 QDF_TIMER_TYPE_WAKE_APPS); 1807 soc->lmac_timer_init = 1; 1808 qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS); 1809 } 1810 } 1811 1812 static void dp_reap_timer_deinit(struct dp_soc *soc) 1813 { 1814 if (soc->lmac_timer_init) { 1815 qdf_timer_stop(&soc->lmac_reap_timer); 1816 qdf_timer_free(&soc->lmac_reap_timer); 1817 soc->lmac_timer_init = 0; 1818 } 1819 } 1820 #endif 1821 1822 #ifdef QCA_HOST2FW_RXBUF_RING 1823 /** 1824 * dp_rxdma_ring_alloc() - allocate the RXDMA rings 1825 * @soc: data path SoC handle 1826 * @pdev: Physical device handle 1827 * 1828 * Return: 0 - success, > 0 - failure 1829 */ 1830 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev) 1831 { 1832 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 1833 int max_mac_rings; 1834 int i; 1835 int ring_size; 1836 1837 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 1838 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx); 1839 ring_size = wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx); 1840 1841 for (i = 0; i < max_mac_rings; i++) { 1842 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); 1843 if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i], 1844 RXDMA_BUF, ring_size, 0)) { 1845 dp_init_err("%pK: failed rx mac ring setup", soc); 1846 return QDF_STATUS_E_FAILURE; 1847 } 1848 } 1849 return QDF_STATUS_SUCCESS; 1850 } 1851 1852 /** 1853 * dp_rxdma_ring_setup() - configure the RXDMA rings 1854 * @soc: data path SoC handle 1855 * @pdev: Physical device handle 1856 * 1857 * Return: 0 - success, > 0 - failure 1858 */ 1859 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1860 { 1861 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 1862 int max_mac_rings; 1863 int i; 1864 1865 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 1866 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx); 1867 1868 for (i = 0; i < max_mac_rings; i++) { 1869 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); 1870 if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i], 1871 RXDMA_BUF, 1, i)) { 1872 dp_init_err("%pK: failed rx mac ring setup", soc); 1873 return QDF_STATUS_E_FAILURE; 1874 } 1875 } 1876 return QDF_STATUS_SUCCESS; 1877 } 1878 1879 /** 1880 * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer 1881 * @soc: data path SoC handle 1882 * @pdev: Physical device handle 1883 * 1884 * Return: void 1885 */ 1886 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 1887 { 1888 int i; 1889 1890 for (i = 0; i < MAX_RX_MAC_RINGS; i++) 1891 dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1); 1892 1893 dp_reap_timer_deinit(soc); 1894 } 1895 1896 /** 1897 * dp_rxdma_ring_free() - Free the RXDMA rings 1898 * @pdev: Physical device handle 1899 * 1900 * Return: void 1901 */ 1902 static void dp_rxdma_ring_free(struct dp_pdev *pdev) 1903 { 1904 int i; 1905 1906 for (i = 0; i < MAX_RX_MAC_RINGS; i++) 1907 dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]); 1908 } 1909 1910 #else 1911 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev) 1912 { 1913 return QDF_STATUS_SUCCESS; 1914 } 1915 1916 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1917 { 1918 return QDF_STATUS_SUCCESS; 1919 } 1920 1921 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 1922 { 1923 dp_reap_timer_deinit(soc); 1924 } 1925 1926 static void dp_rxdma_ring_free(struct dp_pdev *pdev) 1927 { 1928 } 1929 #endif 1930 1931 #ifdef IPA_OFFLOAD 1932 /** 1933 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring 1934 * @soc: data path instance 1935 * @pdev: core txrx pdev context 1936 * 1937 * Return: QDF_STATUS_SUCCESS: success 1938 * QDF_STATUS_E_RESOURCES: Error return 1939 */ 1940 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 1941 struct dp_pdev *pdev) 1942 { 1943 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1944 int entries; 1945 1946 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 1947 soc_cfg_ctx = soc->wlan_cfg_ctx; 1948 entries = 1949 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 1950 1951 /* Setup second Rx refill buffer ring */ 1952 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 1953 entries, 0)) { 1954 dp_init_err("%pK: dp_srng_alloc failed second" 1955 "rx refill ring", soc); 1956 return QDF_STATUS_E_FAILURE; 1957 } 1958 } 1959 1960 return QDF_STATUS_SUCCESS; 1961 } 1962 1963 #ifdef IPA_WDI3_VLAN_SUPPORT 1964 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 1965 struct dp_pdev *pdev) 1966 { 1967 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1968 int entries; 1969 1970 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 1971 wlan_ipa_is_vlan_enabled()) { 1972 soc_cfg_ctx = soc->wlan_cfg_ctx; 1973 entries = 1974 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 1975 1976 /* Setup second Rx refill buffer ring */ 1977 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 1978 entries, 0)) { 1979 dp_init_err("%pK: alloc failed for 3rd rx refill ring", 1980 soc); 1981 return QDF_STATUS_E_FAILURE; 1982 } 1983 } 1984 1985 return QDF_STATUS_SUCCESS; 1986 } 1987 1988 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 1989 struct dp_pdev *pdev) 1990 { 1991 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 1992 wlan_ipa_is_vlan_enabled()) { 1993 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 1994 IPA_RX_ALT_REFILL_BUF_RING_IDX, 1995 pdev->pdev_id)) { 1996 dp_init_err("%pK: init failed for 3rd rx refill ring", 1997 soc); 1998 return QDF_STATUS_E_FAILURE; 1999 } 2000 } 2001 2002 return QDF_STATUS_SUCCESS; 2003 } 2004 2005 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2006 struct dp_pdev *pdev) 2007 { 2008 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 2009 wlan_ipa_is_vlan_enabled()) 2010 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0); 2011 } 2012 2013 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2014 struct dp_pdev *pdev) 2015 { 2016 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 2017 wlan_ipa_is_vlan_enabled()) 2018 dp_srng_free(soc, &pdev->rx_refill_buf_ring3); 2019 } 2020 #else 2021 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2022 struct dp_pdev *pdev) 2023 { 2024 return QDF_STATUS_SUCCESS; 2025 } 2026 2027 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2028 struct dp_pdev *pdev) 2029 { 2030 return QDF_STATUS_SUCCESS; 2031 } 2032 2033 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2034 struct dp_pdev *pdev) 2035 { 2036 } 2037 2038 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2039 struct dp_pdev *pdev) 2040 { 2041 } 2042 #endif 2043 2044 /** 2045 * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring 2046 * @soc: data path instance 2047 * @pdev: core txrx pdev context 2048 * 2049 * Return: void 2050 */ 2051 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2052 struct dp_pdev *pdev) 2053 { 2054 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2055 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0); 2056 } 2057 2058 /** 2059 * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring 2060 * @soc: data path instance 2061 * @pdev: core txrx pdev context 2062 * 2063 * Return: QDF_STATUS_SUCCESS: success 2064 * QDF_STATUS_E_RESOURCES: Error return 2065 */ 2066 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2067 struct dp_pdev *pdev) 2068 { 2069 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 2070 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 2071 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) { 2072 dp_init_err("%pK: dp_srng_init failed second" 2073 "rx refill ring", soc); 2074 return QDF_STATUS_E_FAILURE; 2075 } 2076 } 2077 2078 if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) { 2079 dp_deinit_ipa_rx_refill_buf_ring(soc, pdev); 2080 return QDF_STATUS_E_FAILURE; 2081 } 2082 2083 return QDF_STATUS_SUCCESS; 2084 } 2085 2086 /** 2087 * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring 2088 * @soc: data path instance 2089 * @pdev: core txrx pdev context 2090 * 2091 * Return: void 2092 */ 2093 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2094 struct dp_pdev *pdev) 2095 { 2096 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2097 dp_srng_free(soc, &pdev->rx_refill_buf_ring2); 2098 } 2099 #else 2100 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2101 struct dp_pdev *pdev) 2102 { 2103 return QDF_STATUS_SUCCESS; 2104 } 2105 2106 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2107 struct dp_pdev *pdev) 2108 { 2109 return QDF_STATUS_SUCCESS; 2110 } 2111 2112 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2113 struct dp_pdev *pdev) 2114 { 2115 } 2116 2117 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2118 struct dp_pdev *pdev) 2119 { 2120 } 2121 2122 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2123 struct dp_pdev *pdev) 2124 { 2125 return QDF_STATUS_SUCCESS; 2126 } 2127 2128 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2129 struct dp_pdev *pdev) 2130 { 2131 } 2132 2133 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 2134 struct dp_pdev *pdev) 2135 { 2136 } 2137 #endif 2138 2139 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 2140 2141 /** 2142 * dp_soc_cfg_history_attach() - Allocate and attach datapath config events 2143 * history 2144 * @soc: DP soc handle 2145 * 2146 * Return: None 2147 */ 2148 static void dp_soc_cfg_history_attach(struct dp_soc *soc) 2149 { 2150 dp_soc_frag_history_attach(soc, &soc->cfg_event_history, 2151 DP_CFG_EVT_HIST_MAX_SLOTS, 2152 DP_CFG_EVT_HIST_PER_SLOT_MAX, 2153 sizeof(struct dp_cfg_event), 2154 true, DP_CFG_EVENT_HIST_TYPE); 2155 } 2156 2157 /** 2158 * dp_soc_cfg_history_detach() - Detach and free DP config events history 2159 * @soc: DP soc handle 2160 * 2161 * Return: none 2162 */ 2163 static void dp_soc_cfg_history_detach(struct dp_soc *soc) 2164 { 2165 dp_soc_frag_history_detach(soc, &soc->cfg_event_history, 2166 DP_CFG_EVT_HIST_MAX_SLOTS, 2167 true, DP_CFG_EVENT_HIST_TYPE); 2168 } 2169 2170 #else 2171 static void dp_soc_cfg_history_attach(struct dp_soc *soc) 2172 { 2173 } 2174 2175 static void dp_soc_cfg_history_detach(struct dp_soc *soc) 2176 { 2177 } 2178 #endif 2179 2180 #ifdef DP_TX_HW_DESC_HISTORY 2181 /** 2182 * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history 2183 * 2184 * @soc: DP soc handle 2185 * 2186 * Return: None 2187 */ 2188 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc) 2189 { 2190 dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history, 2191 DP_TX_HW_DESC_HIST_MAX_SLOTS, 2192 DP_TX_HW_DESC_HIST_PER_SLOT_MAX, 2193 sizeof(struct dp_tx_hw_desc_evt), 2194 true, DP_TX_HW_DESC_HIST_TYPE); 2195 } 2196 2197 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc) 2198 { 2199 dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history, 2200 DP_TX_HW_DESC_HIST_MAX_SLOTS, 2201 true, DP_TX_HW_DESC_HIST_TYPE); 2202 } 2203 2204 #else /* DP_TX_HW_DESC_HISTORY */ 2205 static inline void 2206 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc) 2207 { 2208 } 2209 2210 static inline void 2211 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc) 2212 { 2213 } 2214 #endif /* DP_TX_HW_DESC_HISTORY */ 2215 2216 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2217 #ifndef RX_DEFRAG_DO_NOT_REINJECT 2218 /** 2219 * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring 2220 * history. 2221 * @soc: DP soc handle 2222 * 2223 * Return: None 2224 */ 2225 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc) 2226 { 2227 soc->rx_reinject_ring_history = 2228 dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE, 2229 sizeof(struct dp_rx_reinject_history)); 2230 if (soc->rx_reinject_ring_history) 2231 qdf_atomic_init(&soc->rx_reinject_ring_history->index); 2232 } 2233 #else /* RX_DEFRAG_DO_NOT_REINJECT */ 2234 static inline void 2235 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc) 2236 { 2237 } 2238 #endif /* RX_DEFRAG_DO_NOT_REINJECT */ 2239 2240 /** 2241 * dp_soc_rx_history_attach() - Attach the ring history record buffers 2242 * @soc: DP soc structure 2243 * 2244 * This function allocates the memory for recording the rx ring, rx error 2245 * ring and the reinject ring entries. There is no error returned in case 2246 * of allocation failure since the record function checks if the history is 2247 * initialized or not. We do not want to fail the driver load in case of 2248 * failure to allocate memory for debug history. 2249 * 2250 * Return: None 2251 */ 2252 static void dp_soc_rx_history_attach(struct dp_soc *soc) 2253 { 2254 int i; 2255 uint32_t rx_ring_hist_size; 2256 uint32_t rx_refill_ring_hist_size; 2257 2258 rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]); 2259 rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]); 2260 2261 for (i = 0; i < MAX_REO_DEST_RINGS; i++) { 2262 soc->rx_ring_history[i] = dp_context_alloc_mem( 2263 soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size); 2264 if (soc->rx_ring_history[i]) 2265 qdf_atomic_init(&soc->rx_ring_history[i]->index); 2266 } 2267 2268 soc->rx_err_ring_history = dp_context_alloc_mem( 2269 soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size); 2270 if (soc->rx_err_ring_history) 2271 qdf_atomic_init(&soc->rx_err_ring_history->index); 2272 2273 dp_soc_rx_reinject_ring_history_attach(soc); 2274 2275 for (i = 0; i < MAX_PDEV_CNT; i++) { 2276 soc->rx_refill_ring_history[i] = dp_context_alloc_mem( 2277 soc, 2278 DP_RX_REFILL_RING_HIST_TYPE, 2279 rx_refill_ring_hist_size); 2280 2281 if (soc->rx_refill_ring_history[i]) 2282 qdf_atomic_init(&soc->rx_refill_ring_history[i]->index); 2283 } 2284 } 2285 2286 static void dp_soc_rx_history_detach(struct dp_soc *soc) 2287 { 2288 int i; 2289 2290 for (i = 0; i < MAX_REO_DEST_RINGS; i++) 2291 dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE, 2292 soc->rx_ring_history[i]); 2293 2294 dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE, 2295 soc->rx_err_ring_history); 2296 2297 /* 2298 * No need for a featurized detach since qdf_mem_free takes 2299 * care of NULL pointer. 2300 */ 2301 dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE, 2302 soc->rx_reinject_ring_history); 2303 2304 for (i = 0; i < MAX_PDEV_CNT; i++) 2305 dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE, 2306 soc->rx_refill_ring_history[i]); 2307 } 2308 2309 #else 2310 static inline void dp_soc_rx_history_attach(struct dp_soc *soc) 2311 { 2312 } 2313 2314 static inline void dp_soc_rx_history_detach(struct dp_soc *soc) 2315 { 2316 } 2317 #endif 2318 2319 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY 2320 /** 2321 * dp_soc_mon_status_ring_history_attach() - Attach the monitor status 2322 * buffer record history. 2323 * @soc: DP soc handle 2324 * 2325 * This function allocates memory to track the event for a monitor 2326 * status buffer, before its parsed and freed. 2327 * 2328 * Return: None 2329 */ 2330 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc) 2331 { 2332 soc->mon_status_ring_history = dp_context_alloc_mem(soc, 2333 DP_MON_STATUS_BUF_HIST_TYPE, 2334 sizeof(struct dp_mon_status_ring_history)); 2335 if (!soc->mon_status_ring_history) { 2336 dp_err("Failed to alloc memory for mon status ring history"); 2337 return; 2338 } 2339 } 2340 2341 /** 2342 * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer 2343 * record history. 2344 * @soc: DP soc handle 2345 * 2346 * Return: None 2347 */ 2348 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc) 2349 { 2350 dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE, 2351 soc->mon_status_ring_history); 2352 } 2353 #else 2354 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc) 2355 { 2356 } 2357 2358 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc) 2359 { 2360 } 2361 #endif 2362 2363 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY 2364 /** 2365 * dp_soc_tx_history_attach() - Attach the ring history record buffers 2366 * @soc: DP soc structure 2367 * 2368 * This function allocates the memory for recording the tx tcl ring and 2369 * the tx comp ring entries. There is no error returned in case 2370 * of allocation failure since the record function checks if the history is 2371 * initialized or not. We do not want to fail the driver load in case of 2372 * failure to allocate memory for debug history. 2373 * 2374 * Return: None 2375 */ 2376 static void dp_soc_tx_history_attach(struct dp_soc *soc) 2377 { 2378 dp_soc_frag_history_attach(soc, &soc->tx_tcl_history, 2379 DP_TX_TCL_HIST_MAX_SLOTS, 2380 DP_TX_TCL_HIST_PER_SLOT_MAX, 2381 sizeof(struct dp_tx_desc_event), 2382 true, DP_TX_TCL_HIST_TYPE); 2383 dp_soc_frag_history_attach(soc, &soc->tx_comp_history, 2384 DP_TX_COMP_HIST_MAX_SLOTS, 2385 DP_TX_COMP_HIST_PER_SLOT_MAX, 2386 sizeof(struct dp_tx_desc_event), 2387 true, DP_TX_COMP_HIST_TYPE); 2388 } 2389 2390 /** 2391 * dp_soc_tx_history_detach() - Detach the ring history record buffers 2392 * @soc: DP soc structure 2393 * 2394 * This function frees the memory for recording the tx tcl ring and 2395 * the tx comp ring entries. 2396 * 2397 * Return: None 2398 */ 2399 static void dp_soc_tx_history_detach(struct dp_soc *soc) 2400 { 2401 dp_soc_frag_history_detach(soc, &soc->tx_tcl_history, 2402 DP_TX_TCL_HIST_MAX_SLOTS, 2403 true, DP_TX_TCL_HIST_TYPE); 2404 dp_soc_frag_history_detach(soc, &soc->tx_comp_history, 2405 DP_TX_COMP_HIST_MAX_SLOTS, 2406 true, DP_TX_COMP_HIST_TYPE); 2407 } 2408 2409 #else 2410 static inline void dp_soc_tx_history_attach(struct dp_soc *soc) 2411 { 2412 } 2413 2414 static inline void dp_soc_tx_history_detach(struct dp_soc *soc) 2415 { 2416 } 2417 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */ 2418 2419 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 2420 QDF_STATUS 2421 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 2422 { 2423 struct dp_rx_fst *rx_fst = NULL; 2424 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2425 2426 /* for Lithium the below API is not registered 2427 * hence fst attach happens for each pdev 2428 */ 2429 if (!soc->arch_ops.dp_get_rx_fst) 2430 return dp_rx_fst_attach(soc, pdev); 2431 2432 rx_fst = soc->arch_ops.dp_get_rx_fst(); 2433 2434 /* for BE the FST attach is called only once per 2435 * ML context. if rx_fst is already registered 2436 * increase the ref count and return. 2437 */ 2438 if (rx_fst) { 2439 soc->rx_fst = rx_fst; 2440 pdev->rx_fst = rx_fst; 2441 soc->arch_ops.dp_rx_fst_ref(); 2442 } else { 2443 ret = dp_rx_fst_attach(soc, pdev); 2444 if ((ret != QDF_STATUS_SUCCESS) && 2445 (ret != QDF_STATUS_E_NOSUPPORT)) 2446 return ret; 2447 2448 soc->arch_ops.dp_set_rx_fst(soc->rx_fst); 2449 soc->arch_ops.dp_rx_fst_ref(); 2450 } 2451 return ret; 2452 } 2453 2454 void 2455 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 2456 { 2457 struct dp_rx_fst *rx_fst = NULL; 2458 2459 /* for Lithium the below API is not registered 2460 * hence fst detach happens for each pdev 2461 */ 2462 if (!soc->arch_ops.dp_get_rx_fst) { 2463 dp_rx_fst_detach(soc, pdev); 2464 return; 2465 } 2466 2467 rx_fst = soc->arch_ops.dp_get_rx_fst(); 2468 2469 /* for BE the FST detach is called only when last 2470 * ref count reaches 1. 2471 */ 2472 if (rx_fst) { 2473 if (soc->arch_ops.dp_rx_fst_deref() == 1) 2474 dp_rx_fst_detach(soc, pdev); 2475 } 2476 pdev->rx_fst = NULL; 2477 } 2478 #elif defined(WLAN_SUPPORT_RX_FISA) 2479 QDF_STATUS 2480 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 2481 { 2482 return dp_rx_fst_attach(soc, pdev); 2483 } 2484 2485 void 2486 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 2487 { 2488 dp_rx_fst_detach(soc, pdev); 2489 } 2490 #else 2491 QDF_STATUS 2492 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 2493 { 2494 return QDF_STATUS_SUCCESS; 2495 } 2496 2497 void 2498 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 2499 { 2500 } 2501 #endif 2502 2503 /** 2504 * dp_pdev_attach_wifi3() - attach txrx pdev 2505 * @txrx_soc: Datapath SOC handle 2506 * @params: Params for PDEV attach 2507 * 2508 * Return: QDF_STATUS 2509 */ 2510 static inline 2511 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, 2512 struct cdp_pdev_attach_params *params) 2513 { 2514 qdf_size_t pdev_context_size; 2515 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2516 struct dp_pdev *pdev = NULL; 2517 uint8_t pdev_id = params->pdev_id; 2518 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 2519 int nss_cfg; 2520 QDF_STATUS ret; 2521 2522 pdev_context_size = 2523 soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV); 2524 if (pdev_context_size) 2525 pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, 2526 pdev_context_size); 2527 2528 if (!pdev) { 2529 dp_init_err("%pK: DP PDEV memory allocation failed", 2530 soc); 2531 goto fail0; 2532 } 2533 wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc, 2534 WLAN_MD_DP_PDEV, "dp_pdev"); 2535 2536 soc_cfg_ctx = soc->wlan_cfg_ctx; 2537 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc); 2538 2539 if (!pdev->wlan_cfg_ctx) { 2540 dp_init_err("%pK: pdev cfg_attach failed", soc); 2541 goto fail1; 2542 } 2543 2544 /* 2545 * set nss pdev config based on soc config 2546 */ 2547 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); 2548 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, 2549 (nss_cfg & (1 << pdev_id))); 2550 2551 pdev->soc = soc; 2552 pdev->pdev_id = pdev_id; 2553 soc->pdev_list[pdev_id] = pdev; 2554 2555 pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id); 2556 soc->pdev_count++; 2557 2558 /* Allocate memory for pdev srng rings */ 2559 if (dp_pdev_srng_alloc(pdev)) { 2560 dp_init_err("%pK: dp_pdev_srng_alloc failed", soc); 2561 goto fail2; 2562 } 2563 2564 /* Setup second Rx refill buffer ring */ 2565 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) { 2566 dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring", 2567 soc); 2568 goto fail3; 2569 } 2570 2571 /* Allocate memory for pdev rxdma rings */ 2572 if (dp_rxdma_ring_alloc(soc, pdev)) { 2573 dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc); 2574 goto fail4; 2575 } 2576 2577 /* Rx specific init */ 2578 if (dp_rx_pdev_desc_pool_alloc(pdev)) { 2579 dp_init_err("%pK: dp_rx_pdev_attach failed", soc); 2580 goto fail4; 2581 } 2582 2583 if (dp_monitor_pdev_attach(pdev)) { 2584 dp_init_err("%pK: dp_monitor_pdev_attach failed", soc); 2585 goto fail5; 2586 } 2587 2588 soc->arch_ops.txrx_pdev_attach(pdev, params); 2589 2590 /* Setup third Rx refill buffer ring */ 2591 if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) { 2592 dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring", 2593 soc); 2594 goto fail6; 2595 } 2596 2597 ret = dp_rx_fst_attach_wrapper(soc, pdev); 2598 if ((ret != QDF_STATUS_SUCCESS) && (ret != QDF_STATUS_E_NOSUPPORT)) { 2599 dp_init_err("%pK: RX FST attach failed: pdev %d err %d", 2600 soc, pdev_id, ret); 2601 goto fail7; 2602 } 2603 2604 return QDF_STATUS_SUCCESS; 2605 2606 fail7: 2607 dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev); 2608 fail6: 2609 dp_monitor_pdev_detach(pdev); 2610 fail5: 2611 dp_rx_pdev_desc_pool_free(pdev); 2612 fail4: 2613 dp_rxdma_ring_free(pdev); 2614 dp_free_ipa_rx_refill_buf_ring(soc, pdev); 2615 fail3: 2616 dp_pdev_srng_free(pdev); 2617 fail2: 2618 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); 2619 fail1: 2620 soc->pdev_list[pdev_id] = NULL; 2621 qdf_mem_free(pdev); 2622 fail0: 2623 return QDF_STATUS_E_FAILURE; 2624 } 2625 2626 /** 2627 * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev 2628 * @pdev: Datapath PDEV handle 2629 * 2630 * This is the last chance to flush all pending dp vdevs/peers, 2631 * some peer/vdev leak case like Non-SSR + peer unmap missing 2632 * will be covered here. 2633 * 2634 * Return: None 2635 */ 2636 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) 2637 { 2638 struct dp_soc *soc = pdev->soc; 2639 struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0}; 2640 uint32_t i = 0; 2641 uint32_t num_vdevs = 0; 2642 struct dp_vdev *vdev = NULL; 2643 2644 if (TAILQ_EMPTY(&soc->inactive_vdev_list)) 2645 return; 2646 2647 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 2648 TAILQ_FOREACH(vdev, &soc->inactive_vdev_list, 2649 inactive_list_elem) { 2650 if (vdev->pdev != pdev) 2651 continue; 2652 2653 vdev_arr[num_vdevs] = vdev; 2654 num_vdevs++; 2655 /* take reference to free */ 2656 dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP); 2657 } 2658 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 2659 2660 for (i = 0; i < num_vdevs; i++) { 2661 dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0); 2662 dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP); 2663 } 2664 } 2665 2666 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 2667 /** 2668 * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW 2669 * for enable/disable of HW vdev stats 2670 * @soc: Datapath soc handle 2671 * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev 2672 * @enable: flag to represent enable/disable of hw vdev stats 2673 * 2674 * Return: none 2675 */ 2676 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, 2677 uint8_t pdev_id, 2678 bool enable) 2679 { 2680 /* Check SOC level config for HW offload vdev stats support */ 2681 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 2682 dp_debug("%pK: HW vdev offload stats is disabled", soc); 2683 return; 2684 } 2685 2686 /* Send HTT command to FW for enable of stats */ 2687 dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0); 2688 } 2689 2690 /** 2691 * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target 2692 * @soc: Datapath soc handle 2693 * @pdev_id: pdev_id (0,1,2) 2694 * @vdev_id_bitmask: bitmask with vdev_id(s) for which stats are to be 2695 * cleared on HW 2696 * 2697 * Return: none 2698 */ 2699 static 2700 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id, 2701 uint64_t vdev_id_bitmask) 2702 { 2703 /* Check SOC level config for HW offload vdev stats support */ 2704 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 2705 dp_debug("%pK: HW vdev offload stats is disabled", soc); 2706 return; 2707 } 2708 2709 /* Send HTT command to FW for reset of stats */ 2710 dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true, 2711 vdev_id_bitmask); 2712 } 2713 #else 2714 static void 2715 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id, 2716 bool enable) 2717 { 2718 } 2719 2720 static 2721 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id, 2722 uint64_t vdev_id_bitmask) 2723 { 2724 } 2725 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */ 2726 2727 /** 2728 * dp_pdev_deinit() - Deinit txrx pdev 2729 * @txrx_pdev: Datapath PDEV handle 2730 * @force: Force deinit 2731 * 2732 * Return: None 2733 */ 2734 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force) 2735 { 2736 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 2737 qdf_nbuf_t curr_nbuf, next_nbuf; 2738 2739 if (pdev->pdev_deinit) 2740 return; 2741 2742 dp_tx_me_exit(pdev); 2743 dp_rx_pdev_buffers_free(pdev); 2744 dp_rx_pdev_desc_pool_deinit(pdev); 2745 dp_pdev_bkp_stats_detach(pdev); 2746 qdf_event_destroy(&pdev->fw_peer_stats_event); 2747 qdf_event_destroy(&pdev->fw_stats_event); 2748 qdf_event_destroy(&pdev->fw_obss_stats_event); 2749 if (pdev->sojourn_buf) 2750 qdf_nbuf_free(pdev->sojourn_buf); 2751 2752 dp_pdev_flush_pending_vdevs(pdev); 2753 dp_tx_desc_flush(pdev, NULL, true); 2754 2755 qdf_spinlock_destroy(&pdev->tx_mutex); 2756 qdf_spinlock_destroy(&pdev->vdev_list_lock); 2757 2758 dp_monitor_pdev_deinit(pdev); 2759 2760 dp_pdev_srng_deinit(pdev); 2761 2762 dp_ipa_uc_detach(pdev->soc, pdev); 2763 dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev); 2764 dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev); 2765 dp_rxdma_ring_cleanup(pdev->soc, pdev); 2766 2767 curr_nbuf = pdev->invalid_peer_head_msdu; 2768 while (curr_nbuf) { 2769 next_nbuf = qdf_nbuf_next(curr_nbuf); 2770 dp_rx_nbuf_free(curr_nbuf); 2771 curr_nbuf = next_nbuf; 2772 } 2773 pdev->invalid_peer_head_msdu = NULL; 2774 pdev->invalid_peer_tail_msdu = NULL; 2775 2776 dp_wdi_event_detach(pdev); 2777 pdev->pdev_deinit = 1; 2778 } 2779 2780 /** 2781 * dp_pdev_deinit_wifi3() - Deinit txrx pdev 2782 * @psoc: Datapath psoc handle 2783 * @pdev_id: Id of datapath PDEV handle 2784 * @force: Force deinit 2785 * 2786 * Return: QDF_STATUS 2787 */ 2788 static QDF_STATUS 2789 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, 2790 int force) 2791 { 2792 struct dp_pdev *txrx_pdev; 2793 2794 txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, 2795 pdev_id); 2796 2797 if (!txrx_pdev) 2798 return QDF_STATUS_E_FAILURE; 2799 2800 dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); 2801 2802 return QDF_STATUS_SUCCESS; 2803 } 2804 2805 /** 2806 * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name 2807 * @txrx_pdev: Datapath PDEV handle 2808 * 2809 * Return: None 2810 */ 2811 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev) 2812 { 2813 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 2814 2815 dp_monitor_tx_capture_debugfs_init(pdev); 2816 2817 if (dp_pdev_htt_stats_dbgfs_init(pdev)) { 2818 dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc); 2819 } 2820 } 2821 2822 /** 2823 * dp_pdev_post_attach_wifi3() - attach txrx pdev post 2824 * @soc: Datapath soc handle 2825 * @pdev_id: pdev id of pdev 2826 * 2827 * Return: QDF_STATUS 2828 */ 2829 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc, 2830 uint8_t pdev_id) 2831 { 2832 struct dp_pdev *pdev; 2833 2834 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 2835 pdev_id); 2836 2837 if (!pdev) { 2838 dp_init_err("%pK: DP PDEV is Null for pdev id %d", 2839 (struct dp_soc *)soc, pdev_id); 2840 return QDF_STATUS_E_FAILURE; 2841 } 2842 2843 dp_pdev_post_attach((struct cdp_pdev *)pdev); 2844 return QDF_STATUS_SUCCESS; 2845 } 2846 2847 /** 2848 * dp_pdev_detach() - Complete rest of pdev detach 2849 * @txrx_pdev: Datapath PDEV handle 2850 * @force: Force deinit 2851 * 2852 * Return: None 2853 */ 2854 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force) 2855 { 2856 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 2857 struct dp_soc *soc = pdev->soc; 2858 2859 dp_rx_fst_detach_wrapper(soc, pdev); 2860 dp_pdev_htt_stats_dbgfs_deinit(pdev); 2861 dp_rx_pdev_desc_pool_free(pdev); 2862 dp_monitor_pdev_detach(pdev); 2863 dp_rxdma_ring_free(pdev); 2864 dp_free_ipa_rx_refill_buf_ring(soc, pdev); 2865 dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev); 2866 dp_pdev_srng_free(pdev); 2867 2868 soc->pdev_count--; 2869 soc->pdev_list[pdev->pdev_id] = NULL; 2870 2871 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); 2872 wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc, 2873 WLAN_MD_DP_PDEV, "dp_pdev"); 2874 dp_context_free_mem(soc, DP_PDEV_TYPE, pdev); 2875 } 2876 2877 /** 2878 * dp_pdev_detach_wifi3() - detach txrx pdev 2879 * @psoc: Datapath soc handle 2880 * @pdev_id: pdev id of pdev 2881 * @force: Force detach 2882 * 2883 * Return: QDF_STATUS 2884 */ 2885 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, 2886 int force) 2887 { 2888 struct dp_pdev *pdev; 2889 struct dp_soc *soc = (struct dp_soc *)psoc; 2890 2891 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, 2892 pdev_id); 2893 2894 if (!pdev) { 2895 dp_init_err("%pK: DP PDEV is Null for pdev id %d", 2896 (struct dp_soc *)psoc, pdev_id); 2897 return QDF_STATUS_E_FAILURE; 2898 } 2899 2900 soc->arch_ops.txrx_pdev_detach(pdev); 2901 2902 dp_pdev_detach((struct cdp_pdev *)pdev, force); 2903 return QDF_STATUS_SUCCESS; 2904 } 2905 2906 void dp_soc_print_inactive_objects(struct dp_soc *soc) 2907 { 2908 struct dp_peer *peer = NULL; 2909 struct dp_peer *tmp_peer = NULL; 2910 struct dp_vdev *vdev = NULL; 2911 struct dp_vdev *tmp_vdev = NULL; 2912 int i = 0; 2913 uint32_t count; 2914 2915 if (TAILQ_EMPTY(&soc->inactive_peer_list) && 2916 TAILQ_EMPTY(&soc->inactive_vdev_list)) 2917 return; 2918 2919 TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list, 2920 inactive_list_elem, tmp_peer) { 2921 for (i = 0; i < DP_MOD_ID_MAX; i++) { 2922 count = qdf_atomic_read(&peer->mod_refs[i]); 2923 if (count) 2924 DP_PRINT_STATS("peer %pK Module id %u ==> %u", 2925 peer, i, count); 2926 } 2927 } 2928 2929 TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list, 2930 inactive_list_elem, tmp_vdev) { 2931 for (i = 0; i < DP_MOD_ID_MAX; i++) { 2932 count = qdf_atomic_read(&vdev->mod_refs[i]); 2933 if (count) 2934 DP_PRINT_STATS("vdev %pK Module id %u ==> %u", 2935 vdev, i, count); 2936 } 2937 } 2938 QDF_BUG(0); 2939 } 2940 2941 /** 2942 * dp_soc_deinit_wifi3() - Deinitialize txrx SOC 2943 * @txrx_soc: Opaque DP SOC handle 2944 * 2945 * Return: None 2946 */ 2947 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc) 2948 { 2949 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2950 2951 soc->arch_ops.txrx_soc_deinit(soc); 2952 } 2953 2954 /** 2955 * dp_soc_detach() - Detach rest of txrx SOC 2956 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. 2957 * 2958 * Return: None 2959 */ 2960 static void dp_soc_detach(struct cdp_soc_t *txrx_soc) 2961 { 2962 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2963 2964 soc->arch_ops.txrx_soc_detach(soc); 2965 2966 dp_runtime_deinit(); 2967 2968 dp_sysfs_deinitialize_stats(soc); 2969 dp_soc_swlm_detach(soc); 2970 dp_soc_tx_desc_sw_pools_free(soc); 2971 dp_soc_srng_free(soc); 2972 dp_hw_link_desc_ring_free(soc); 2973 dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); 2974 wlan_cfg_soc_detach(soc->wlan_cfg_ctx); 2975 dp_soc_tx_hw_desc_history_detach(soc); 2976 dp_soc_tx_history_detach(soc); 2977 dp_soc_mon_status_ring_history_detach(soc); 2978 dp_soc_rx_history_detach(soc); 2979 dp_soc_cfg_history_detach(soc); 2980 2981 if (!dp_monitor_modularized_enable()) { 2982 dp_mon_soc_detach_wrapper(soc); 2983 } 2984 2985 qdf_mem_free(soc->cdp_soc.ops); 2986 qdf_mem_common_free(soc); 2987 } 2988 2989 /** 2990 * dp_soc_detach_wifi3() - Detach txrx SOC 2991 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. 2992 * 2993 * Return: None 2994 */ 2995 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc) 2996 { 2997 dp_soc_detach(txrx_soc); 2998 } 2999 3000 #ifdef QCA_HOST2FW_RXBUF_RING 3001 #ifdef IPA_WDI3_VLAN_SUPPORT 3002 static inline 3003 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc, 3004 struct dp_pdev *pdev, 3005 uint8_t idx) 3006 { 3007 if (pdev->rx_refill_buf_ring3.hal_srng) 3008 htt_srng_setup(soc->htt_handle, idx, 3009 pdev->rx_refill_buf_ring3.hal_srng, 3010 RXDMA_BUF); 3011 } 3012 #else 3013 static inline 3014 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc, 3015 struct dp_pdev *pdev, 3016 uint8_t idx) 3017 { } 3018 #endif 3019 3020 /** 3021 * dp_rxdma_ring_config() - configure the RX DMA rings 3022 * @soc: data path SoC handle 3023 * 3024 * This function is used to configure the MAC rings. 3025 * On MCL host provides buffers in Host2FW ring 3026 * FW refills (copies) buffers to the ring and updates 3027 * ring_idx in register 3028 * 3029 * Return: zero on success, non-zero on failure 3030 */ 3031 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) 3032 { 3033 int i; 3034 QDF_STATUS status = QDF_STATUS_SUCCESS; 3035 3036 for (i = 0; i < MAX_PDEV_CNT; i++) { 3037 struct dp_pdev *pdev = soc->pdev_list[i]; 3038 3039 if (pdev) { 3040 int mac_id; 3041 int max_mac_rings = 3042 wlan_cfg_get_num_mac_rings 3043 (pdev->wlan_cfg_ctx); 3044 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); 3045 3046 htt_srng_setup(soc->htt_handle, i, 3047 soc->rx_refill_buf_ring[lmac_id] 3048 .hal_srng, 3049 RXDMA_BUF); 3050 3051 if (pdev->rx_refill_buf_ring2.hal_srng) 3052 htt_srng_setup(soc->htt_handle, i, 3053 pdev->rx_refill_buf_ring2 3054 .hal_srng, 3055 RXDMA_BUF); 3056 3057 dp_rxdma_setup_refill_ring3(soc, pdev, i); 3058 3059 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 3060 dp_err("pdev_id %d max_mac_rings %d", 3061 pdev->pdev_id, max_mac_rings); 3062 3063 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 3064 int mac_for_pdev = 3065 dp_get_mac_id_for_pdev(mac_id, 3066 pdev->pdev_id); 3067 /* 3068 * Obtain lmac id from pdev to access the LMAC 3069 * ring in soc context 3070 */ 3071 lmac_id = 3072 dp_get_lmac_id_for_pdev_id(soc, 3073 mac_id, 3074 pdev->pdev_id); 3075 QDF_TRACE(QDF_MODULE_ID_TXRX, 3076 QDF_TRACE_LEVEL_ERROR, 3077 FL("mac_id %d"), mac_for_pdev); 3078 3079 htt_srng_setup(soc->htt_handle, mac_for_pdev, 3080 pdev->rx_mac_buf_ring[mac_id] 3081 .hal_srng, 3082 RXDMA_BUF); 3083 3084 if (!soc->rxdma2sw_rings_not_supported) 3085 dp_htt_setup_rxdma_err_dst_ring(soc, 3086 mac_for_pdev, lmac_id); 3087 3088 /* Configure monitor mode rings */ 3089 status = dp_monitor_htt_srng_setup(soc, pdev, 3090 lmac_id, 3091 mac_for_pdev); 3092 if (status != QDF_STATUS_SUCCESS) { 3093 dp_err("Failed to send htt monitor messages to target"); 3094 return status; 3095 } 3096 3097 } 3098 } 3099 } 3100 3101 dp_reap_timer_init(soc); 3102 return status; 3103 } 3104 #else 3105 /* This is only for WIN */ 3106 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) 3107 { 3108 int i; 3109 QDF_STATUS status = QDF_STATUS_SUCCESS; 3110 int mac_for_pdev; 3111 int lmac_id; 3112 3113 /* Configure monitor mode rings */ 3114 dp_monitor_soc_htt_srng_setup(soc); 3115 3116 for (i = 0; i < MAX_PDEV_CNT; i++) { 3117 struct dp_pdev *pdev = soc->pdev_list[i]; 3118 3119 if (!pdev) 3120 continue; 3121 3122 mac_for_pdev = i; 3123 lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); 3124 3125 if (soc->rx_refill_buf_ring[lmac_id].hal_srng) 3126 htt_srng_setup(soc->htt_handle, mac_for_pdev, 3127 soc->rx_refill_buf_ring[lmac_id]. 3128 hal_srng, RXDMA_BUF); 3129 3130 /* Configure monitor mode rings */ 3131 dp_monitor_htt_srng_setup(soc, pdev, 3132 lmac_id, 3133 mac_for_pdev); 3134 if (!soc->rxdma2sw_rings_not_supported) 3135 htt_srng_setup(soc->htt_handle, mac_for_pdev, 3136 soc->rxdma_err_dst_ring[lmac_id].hal_srng, 3137 RXDMA_DST); 3138 } 3139 3140 dp_reap_timer_init(soc); 3141 return status; 3142 } 3143 #endif 3144 3145 /** 3146 * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine 3147 * 3148 * This function is used to configure the FSE HW block in RX OLE on a 3149 * per pdev basis. Here, we will be programming parameters related to 3150 * the Flow Search Table. 3151 * 3152 * @soc: data path SoC handle 3153 * 3154 * Return: zero on success, non-zero on failure 3155 */ 3156 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 3157 static QDF_STATUS 3158 dp_rx_target_fst_config(struct dp_soc *soc) 3159 { 3160 int i; 3161 QDF_STATUS status = QDF_STATUS_SUCCESS; 3162 3163 for (i = 0; i < MAX_PDEV_CNT; i++) { 3164 struct dp_pdev *pdev = soc->pdev_list[i]; 3165 3166 /* Flow search is not enabled if NSS offload is enabled */ 3167 if (pdev && 3168 !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3169 status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev); 3170 if (status != QDF_STATUS_SUCCESS) 3171 break; 3172 } 3173 } 3174 return status; 3175 } 3176 #elif defined(WLAN_SUPPORT_RX_FISA) 3177 /** 3178 * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW 3179 * @soc: SoC handle 3180 * 3181 * Return: Success 3182 */ 3183 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) 3184 { 3185 QDF_STATUS status; 3186 struct dp_rx_fst *fst = soc->rx_fst; 3187 3188 /* Check if it is enabled in the INI */ 3189 if (!soc->fisa_enable) { 3190 dp_err("RX FISA feature is disabled"); 3191 return QDF_STATUS_E_NOSUPPORT; 3192 } 3193 3194 status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]); 3195 if (QDF_IS_STATUS_ERROR(status)) { 3196 dp_err("dp_rx_flow_send_fst_fw_setup failed %d", 3197 status); 3198 return status; 3199 } 3200 3201 if (soc->fst_cmem_base) { 3202 soc->fst_in_cmem = true; 3203 dp_rx_fst_update_cmem_params(soc, fst->max_entries, 3204 soc->fst_cmem_base & 0xffffffff, 3205 soc->fst_cmem_base >> 32); 3206 } 3207 return status; 3208 } 3209 3210 #define FISA_MAX_TIMEOUT 0xffffffff 3211 #define FISA_DISABLE_TIMEOUT 0 3212 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc) 3213 { 3214 struct dp_htt_rx_fisa_cfg fisa_config; 3215 3216 fisa_config.pdev_id = 0; 3217 fisa_config.fisa_timeout = FISA_MAX_TIMEOUT; 3218 3219 return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config); 3220 } 3221 3222 #else /* !WLAN_SUPPORT_RX_FISA */ 3223 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) 3224 { 3225 return QDF_STATUS_SUCCESS; 3226 } 3227 #endif /* !WLAN_SUPPORT_RX_FISA */ 3228 3229 #ifndef WLAN_SUPPORT_RX_FISA 3230 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc) 3231 { 3232 return QDF_STATUS_SUCCESS; 3233 } 3234 3235 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc) 3236 { 3237 return QDF_STATUS_SUCCESS; 3238 } 3239 3240 static void dp_rx_dump_fisa_table(struct dp_soc *soc) 3241 { 3242 } 3243 3244 static void dp_suspend_fse_cache_flush(struct dp_soc *soc) 3245 { 3246 } 3247 3248 static void dp_resume_fse_cache_flush(struct dp_soc *soc) 3249 { 3250 } 3251 #endif /* !WLAN_SUPPORT_RX_FISA */ 3252 3253 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 3254 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc) 3255 { 3256 return QDF_STATUS_SUCCESS; 3257 } 3258 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 3259 3260 #ifdef WLAN_SUPPORT_PPEDS 3261 /** 3262 * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE 3263 * @soc: DP Tx/Rx handle 3264 * 3265 * Return: QDF_STATUS 3266 */ 3267 static 3268 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc) 3269 { 3270 struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0}; 3271 QDF_STATUS status; 3272 3273 /* 3274 * Program RxDMA to override the reo destination indication 3275 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END, 3276 * thereby driving the packet to REO2PPE ring. 3277 * If the MSDU is spanning more than 1 buffer, then this 3278 * override is not done. 3279 */ 3280 htt_cfg.override = 1; 3281 htt_cfg.reo_destination_indication = REO2PPE_DST_IND; 3282 htt_cfg.multi_buffer_msdu_override_en = 0; 3283 3284 /* 3285 * Override use_ppe to 0 in RxOLE for the following 3286 * cases. 3287 */ 3288 htt_cfg.intra_bss_override = 1; 3289 htt_cfg.decap_raw_override = 1; 3290 htt_cfg.decap_nwifi_override = 1; 3291 htt_cfg.ip_frag_override = 1; 3292 3293 status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg); 3294 if (status != QDF_STATUS_SUCCESS) 3295 dp_err("RxOLE and RxDMA PPE config failed %d", status); 3296 3297 return status; 3298 } 3299 3300 #else 3301 static inline 3302 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc) 3303 { 3304 return QDF_STATUS_SUCCESS; 3305 } 3306 3307 #endif /* WLAN_SUPPORT_PPEDS */ 3308 3309 #ifdef DP_UMAC_HW_RESET_SUPPORT 3310 static void dp_register_umac_reset_handlers(struct dp_soc *soc) 3311 { 3312 dp_umac_reset_register_rx_action_callback(soc, 3313 dp_umac_reset_action_trigger_recovery, 3314 UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY); 3315 3316 dp_umac_reset_register_rx_action_callback(soc, 3317 dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET); 3318 3319 dp_umac_reset_register_rx_action_callback(soc, 3320 dp_umac_reset_handle_post_reset, 3321 UMAC_RESET_ACTION_DO_POST_RESET_START); 3322 3323 dp_umac_reset_register_rx_action_callback(soc, 3324 dp_umac_reset_handle_post_reset_complete, 3325 UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE); 3326 3327 } 3328 #else 3329 static void dp_register_umac_reset_handlers(struct dp_soc *soc) 3330 { 3331 } 3332 #endif 3333 /** 3334 * dp_soc_attach_target_wifi3() - SOC initialization in the target 3335 * @cdp_soc: Opaque Datapath SOC handle 3336 * 3337 * Return: zero on success, non-zero on failure 3338 */ 3339 static QDF_STATUS 3340 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc) 3341 { 3342 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 3343 QDF_STATUS status = QDF_STATUS_SUCCESS; 3344 struct hal_reo_params reo_params; 3345 3346 htt_soc_attach_target(soc->htt_handle); 3347 3348 status = dp_soc_target_ppe_rxole_rxdma_cfg(soc); 3349 if (status != QDF_STATUS_SUCCESS) { 3350 dp_err("Failed to send htt RxOLE and RxDMA messages to target"); 3351 return status; 3352 } 3353 3354 status = dp_rxdma_ring_config(soc); 3355 if (status != QDF_STATUS_SUCCESS) { 3356 dp_err("Failed to send htt srng setup messages to target"); 3357 return status; 3358 } 3359 3360 status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc); 3361 if (status != QDF_STATUS_SUCCESS) { 3362 dp_err("Failed to send htt ring config message to target"); 3363 return status; 3364 } 3365 3366 status = dp_soc_umac_reset_init(cdp_soc); 3367 if (status != QDF_STATUS_SUCCESS && 3368 status != QDF_STATUS_E_NOSUPPORT) { 3369 dp_err("Failed to initialize UMAC reset"); 3370 return status; 3371 } 3372 3373 dp_register_umac_reset_handlers(soc); 3374 3375 status = dp_rx_target_fst_config(soc); 3376 if (status != QDF_STATUS_SUCCESS && 3377 status != QDF_STATUS_E_NOSUPPORT) { 3378 dp_err("Failed to send htt fst setup config message to target"); 3379 return status; 3380 } 3381 3382 if (status == QDF_STATUS_SUCCESS) { 3383 status = dp_rx_fisa_config(soc); 3384 if (status != QDF_STATUS_SUCCESS) { 3385 dp_err("Failed to send htt FISA config message to target"); 3386 return status; 3387 } 3388 } 3389 3390 DP_STATS_INIT(soc); 3391 3392 dp_runtime_init(soc); 3393 3394 /* Enable HW vdev offload stats if feature is supported */ 3395 dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true); 3396 3397 /* initialize work queue for stats processing */ 3398 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); 3399 3400 wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx, 3401 soc->ctrl_psoc); 3402 /* Setup HW REO */ 3403 qdf_mem_zero(&reo_params, sizeof(reo_params)); 3404 3405 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 3406 /* 3407 * Reo ring remap is not required if both radios 3408 * are offloaded to NSS 3409 */ 3410 3411 if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0, 3412 &reo_params.remap1, 3413 &reo_params.remap2)) 3414 reo_params.rx_hash_enabled = true; 3415 else 3416 reo_params.rx_hash_enabled = false; 3417 } 3418 3419 /* 3420 * set the fragment destination ring 3421 */ 3422 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); 3423 3424 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) 3425 reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE; 3426 3427 reo_params.reo_qref = &soc->reo_qref; 3428 hal_reo_setup(soc->hal_soc, &reo_params, 1); 3429 3430 hal_reo_set_err_dst_remap(soc->hal_soc); 3431 3432 soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc); 3433 3434 return QDF_STATUS_SUCCESS; 3435 } 3436 3437 /** 3438 * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table 3439 * @soc: SoC handle 3440 * @vdev: vdev handle 3441 * @vdev_id: vdev_id 3442 * 3443 * Return: None 3444 */ 3445 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc, 3446 struct dp_vdev *vdev, 3447 uint8_t vdev_id) 3448 { 3449 QDF_ASSERT(vdev_id <= MAX_VDEV_CNT); 3450 3451 qdf_spin_lock_bh(&soc->vdev_map_lock); 3452 3453 if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) != 3454 QDF_STATUS_SUCCESS) { 3455 dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u", 3456 soc, vdev, vdev_id); 3457 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3458 return; 3459 } 3460 3461 if (!soc->vdev_id_map[vdev_id]) 3462 soc->vdev_id_map[vdev_id] = vdev; 3463 else 3464 QDF_ASSERT(0); 3465 3466 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3467 } 3468 3469 /** 3470 * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table 3471 * @soc: SoC handle 3472 * @vdev: vdev handle 3473 * 3474 * Return: None 3475 */ 3476 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc, 3477 struct dp_vdev *vdev) 3478 { 3479 qdf_spin_lock_bh(&soc->vdev_map_lock); 3480 QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev); 3481 3482 soc->vdev_id_map[vdev->vdev_id] = NULL; 3483 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 3484 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3485 } 3486 3487 /** 3488 * dp_vdev_pdev_list_add() - add vdev into pdev's list 3489 * @soc: soc handle 3490 * @pdev: pdev handle 3491 * @vdev: vdev handle 3492 * 3493 * Return: none 3494 */ 3495 static void dp_vdev_pdev_list_add(struct dp_soc *soc, 3496 struct dp_pdev *pdev, 3497 struct dp_vdev *vdev) 3498 { 3499 qdf_spin_lock_bh(&pdev->vdev_list_lock); 3500 if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) != 3501 QDF_STATUS_SUCCESS) { 3502 dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK", 3503 soc, vdev); 3504 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 3505 return; 3506 } 3507 /* add this vdev into the pdev's list */ 3508 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem); 3509 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 3510 } 3511 3512 /** 3513 * dp_vdev_pdev_list_remove() - remove vdev from pdev's list 3514 * @soc: SoC handle 3515 * @pdev: pdev handle 3516 * @vdev: VDEV handle 3517 * 3518 * Return: none 3519 */ 3520 static void dp_vdev_pdev_list_remove(struct dp_soc *soc, 3521 struct dp_pdev *pdev, 3522 struct dp_vdev *vdev) 3523 { 3524 uint8_t found = 0; 3525 struct dp_vdev *tmpvdev = NULL; 3526 3527 qdf_spin_lock_bh(&pdev->vdev_list_lock); 3528 TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) { 3529 if (tmpvdev == vdev) { 3530 found = 1; 3531 break; 3532 } 3533 } 3534 3535 if (found) { 3536 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem); 3537 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 3538 } else { 3539 dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK", 3540 soc, vdev, pdev, &pdev->vdev_list); 3541 QDF_ASSERT(0); 3542 } 3543 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 3544 } 3545 3546 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 3547 /** 3548 * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol 3549 * @vdev: Datapath VDEV handle 3550 * 3551 * Return: None 3552 */ 3553 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev) 3554 { 3555 vdev->osif_rx_eapol = NULL; 3556 } 3557 3558 /** 3559 * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol 3560 * @vdev: DP vdev handle 3561 * @txrx_ops: Tx and Rx operations 3562 * 3563 * Return: None 3564 */ 3565 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev, 3566 struct ol_txrx_ops *txrx_ops) 3567 { 3568 vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol; 3569 } 3570 #else 3571 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev) 3572 { 3573 } 3574 3575 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev, 3576 struct ol_txrx_ops *txrx_ops) 3577 { 3578 } 3579 #endif 3580 3581 #ifdef WLAN_FEATURE_11BE_MLO 3582 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev, 3583 struct cdp_vdev_info *vdev_info) 3584 { 3585 if (vdev_info->mld_mac_addr) 3586 qdf_mem_copy(&vdev->mld_mac_addr.raw[0], 3587 vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE); 3588 } 3589 #else 3590 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev, 3591 struct cdp_vdev_info *vdev_info) 3592 { 3593 3594 } 3595 #endif 3596 3597 #ifdef DP_TRAFFIC_END_INDICATION 3598 /** 3599 * dp_tx_vdev_traffic_end_indication_attach() - Initialize data end indication 3600 * related members in VDEV 3601 * @vdev: DP vdev handle 3602 * 3603 * Return: None 3604 */ 3605 static inline void 3606 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev) 3607 { 3608 qdf_nbuf_queue_init(&vdev->end_ind_pkt_q); 3609 } 3610 3611 /** 3612 * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication 3613 * related members in VDEV 3614 * @vdev: DP vdev handle 3615 * 3616 * Return: None 3617 */ 3618 static inline void 3619 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev) 3620 { 3621 qdf_nbuf_t nbuf; 3622 3623 while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL) 3624 qdf_nbuf_free(nbuf); 3625 } 3626 #else 3627 static inline void 3628 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev) 3629 {} 3630 3631 static inline void 3632 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev) 3633 {} 3634 #endif 3635 3636 /** 3637 * dp_vdev_attach_wifi3() - attach txrx vdev 3638 * @cdp_soc: CDP SoC context 3639 * @pdev_id: PDEV ID for vdev creation 3640 * @vdev_info: parameters used for vdev creation 3641 * 3642 * Return: status 3643 */ 3644 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc, 3645 uint8_t pdev_id, 3646 struct cdp_vdev_info *vdev_info) 3647 { 3648 int i = 0; 3649 qdf_size_t vdev_context_size; 3650 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 3651 struct dp_pdev *pdev = 3652 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 3653 pdev_id); 3654 struct dp_vdev *vdev; 3655 uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr; 3656 uint8_t vdev_id = vdev_info->vdev_id; 3657 enum wlan_op_mode op_mode = vdev_info->op_mode; 3658 enum wlan_op_subtype subtype = vdev_info->subtype; 3659 uint8_t vdev_stats_id = vdev_info->vdev_stats_id; 3660 3661 vdev_context_size = 3662 soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV); 3663 vdev = qdf_mem_malloc(vdev_context_size); 3664 3665 if (!pdev) { 3666 dp_init_err("%pK: DP PDEV is Null for pdev id %d", 3667 cdp_soc, pdev_id); 3668 qdf_mem_free(vdev); 3669 goto fail0; 3670 } 3671 3672 if (!vdev) { 3673 dp_init_err("%pK: DP VDEV memory allocation failed", 3674 cdp_soc); 3675 goto fail0; 3676 } 3677 3678 wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc, 3679 WLAN_MD_DP_VDEV, "dp_vdev"); 3680 3681 vdev->pdev = pdev; 3682 vdev->vdev_id = vdev_id; 3683 vdev->vdev_stats_id = vdev_stats_id; 3684 vdev->opmode = op_mode; 3685 vdev->subtype = subtype; 3686 vdev->osdev = soc->osdev; 3687 3688 vdev->osif_rx = NULL; 3689 vdev->osif_rsim_rx_decap = NULL; 3690 vdev->osif_get_key = NULL; 3691 vdev->osif_tx_free_ext = NULL; 3692 vdev->osif_vdev = NULL; 3693 3694 vdev->delete.pending = 0; 3695 vdev->safemode = 0; 3696 vdev->drop_unenc = 1; 3697 vdev->sec_type = cdp_sec_type_none; 3698 vdev->multipass_en = false; 3699 vdev->wrap_vdev = false; 3700 dp_vdev_init_rx_eapol(vdev); 3701 qdf_atomic_init(&vdev->ref_cnt); 3702 for (i = 0; i < DP_MOD_ID_MAX; i++) 3703 qdf_atomic_init(&vdev->mod_refs[i]); 3704 3705 /* Take one reference for create*/ 3706 qdf_atomic_inc(&vdev->ref_cnt); 3707 qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]); 3708 vdev->num_peers = 0; 3709 #ifdef notyet 3710 vdev->filters_num = 0; 3711 #endif 3712 vdev->lmac_id = pdev->lmac_id; 3713 3714 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE); 3715 3716 dp_vdev_save_mld_addr(vdev, vdev_info); 3717 3718 /* TODO: Initialize default HTT meta data that will be used in 3719 * TCL descriptors for packets transmitted from this VDEV 3720 */ 3721 3722 qdf_spinlock_create(&vdev->peer_list_lock); 3723 TAILQ_INIT(&vdev->peer_list); 3724 dp_peer_multipass_list_init(vdev); 3725 if ((soc->intr_mode == DP_INTR_POLL) && 3726 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) { 3727 if ((pdev->vdev_count == 0) || 3728 (wlan_op_mode_monitor == vdev->opmode)) 3729 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 3730 } else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE && 3731 soc->intr_mode == DP_INTR_MSI && 3732 wlan_op_mode_monitor == vdev->opmode) { 3733 /* Timer to reap status ring in mission mode */ 3734 dp_monitor_vdev_timer_start(soc); 3735 } 3736 3737 dp_vdev_id_map_tbl_add(soc, vdev, vdev_id); 3738 3739 if (wlan_op_mode_monitor == vdev->opmode) { 3740 if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) { 3741 dp_monitor_pdev_set_mon_vdev(vdev); 3742 return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev); 3743 } 3744 return QDF_STATUS_E_FAILURE; 3745 } 3746 3747 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); 3748 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); 3749 vdev->dscp_tid_map_id = 0; 3750 vdev->mcast_enhancement_en = 0; 3751 vdev->igmp_mcast_enhanc_en = 0; 3752 vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx); 3753 vdev->prev_tx_enq_tstamp = 0; 3754 vdev->prev_rx_deliver_tstamp = 0; 3755 vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID; 3756 dp_tx_vdev_traffic_end_indication_attach(vdev); 3757 3758 dp_vdev_pdev_list_add(soc, pdev, vdev); 3759 pdev->vdev_count++; 3760 3761 if (wlan_op_mode_sta != vdev->opmode && 3762 wlan_op_mode_ndi != vdev->opmode) 3763 vdev->ap_bridge_enabled = true; 3764 else 3765 vdev->ap_bridge_enabled = false; 3766 dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d", 3767 cdp_soc, vdev->ap_bridge_enabled); 3768 3769 dp_tx_vdev_attach(vdev); 3770 3771 dp_monitor_vdev_attach(vdev); 3772 if (!pdev->is_lro_hash_configured) { 3773 if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev))) 3774 pdev->is_lro_hash_configured = true; 3775 else 3776 dp_err("LRO hash setup failure!"); 3777 } 3778 3779 dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_ATTACH, vdev); 3780 dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT") vdev_id %d", vdev, 3781 QDF_MAC_ADDR_REF(vdev->mac_addr.raw), vdev->vdev_id); 3782 DP_STATS_INIT(vdev); 3783 3784 if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev))) 3785 goto fail0; 3786 3787 if (wlan_op_mode_sta == vdev->opmode) 3788 dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id, 3789 vdev->mac_addr.raw, CDP_LINK_PEER_TYPE); 3790 3791 dp_pdev_update_fast_rx_flag(soc, pdev); 3792 3793 return QDF_STATUS_SUCCESS; 3794 3795 fail0: 3796 return QDF_STATUS_E_FAILURE; 3797 } 3798 3799 #ifndef QCA_HOST_MODE_WIFI_DISABLED 3800 /** 3801 * dp_vdev_fetch_tx_handler() - Fetch Tx handlers 3802 * @vdev: struct dp_vdev * 3803 * @soc: struct dp_soc * 3804 * @ctx: struct ol_txrx_hardtart_ctxt * 3805 */ 3806 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev, 3807 struct dp_soc *soc, 3808 struct ol_txrx_hardtart_ctxt *ctx) 3809 { 3810 /* Enable vdev_id check only for ap, if flag is enabled */ 3811 if (vdev->mesh_vdev) 3812 ctx->tx = dp_tx_send_mesh; 3813 else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) && 3814 (vdev->opmode == wlan_op_mode_ap)) { 3815 ctx->tx = dp_tx_send_vdev_id_check; 3816 ctx->tx_fast = dp_tx_send_vdev_id_check; 3817 } else { 3818 ctx->tx = dp_tx_send; 3819 ctx->tx_fast = soc->arch_ops.dp_tx_send_fast; 3820 } 3821 3822 /* Avoid check in regular exception Path */ 3823 if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) && 3824 (vdev->opmode == wlan_op_mode_ap)) 3825 ctx->tx_exception = dp_tx_send_exception_vdev_id_check; 3826 else 3827 ctx->tx_exception = dp_tx_send_exception; 3828 } 3829 3830 /** 3831 * dp_vdev_register_tx_handler() - Register Tx handler 3832 * @vdev: struct dp_vdev * 3833 * @soc: struct dp_soc * 3834 * @txrx_ops: struct ol_txrx_ops * 3835 */ 3836 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev, 3837 struct dp_soc *soc, 3838 struct ol_txrx_ops *txrx_ops) 3839 { 3840 struct ol_txrx_hardtart_ctxt ctx = {0}; 3841 3842 dp_vdev_fetch_tx_handler(vdev, soc, &ctx); 3843 3844 txrx_ops->tx.tx = ctx.tx; 3845 txrx_ops->tx.tx_fast = ctx.tx_fast; 3846 txrx_ops->tx.tx_exception = ctx.tx_exception; 3847 3848 dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d", 3849 wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx), 3850 vdev->opmode, vdev->vdev_id); 3851 } 3852 #else /* QCA_HOST_MODE_WIFI_DISABLED */ 3853 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev, 3854 struct dp_soc *soc, 3855 struct ol_txrx_ops *txrx_ops) 3856 { 3857 } 3858 3859 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev, 3860 struct dp_soc *soc, 3861 struct ol_txrx_hardtart_ctxt *ctx) 3862 { 3863 } 3864 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3865 3866 /** 3867 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer 3868 * @soc_hdl: Datapath soc handle 3869 * @vdev_id: id of Datapath VDEV handle 3870 * @osif_vdev: OSIF vdev handle 3871 * @txrx_ops: Tx and Rx operations 3872 * 3873 * Return: DP VDEV handle on success, NULL on failure 3874 */ 3875 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl, 3876 uint8_t vdev_id, 3877 ol_osif_vdev_handle osif_vdev, 3878 struct ol_txrx_ops *txrx_ops) 3879 { 3880 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3881 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3882 DP_MOD_ID_CDP); 3883 3884 if (!vdev) 3885 return QDF_STATUS_E_FAILURE; 3886 3887 vdev->osif_vdev = osif_vdev; 3888 vdev->osif_rx = txrx_ops->rx.rx; 3889 vdev->osif_rx_stack = txrx_ops->rx.rx_stack; 3890 vdev->osif_rx_flush = txrx_ops->rx.rx_flush; 3891 vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush; 3892 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap; 3893 vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx; 3894 vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush; 3895 vdev->osif_get_key = txrx_ops->get_key; 3896 dp_monitor_vdev_register_osif(vdev, txrx_ops); 3897 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext; 3898 vdev->tx_comp = txrx_ops->tx.tx_comp; 3899 vdev->stats_cb = txrx_ops->rx.stats_rx; 3900 vdev->tx_classify_critical_pkt_cb = 3901 txrx_ops->tx.tx_classify_critical_pkt_cb; 3902 #ifdef notyet 3903 #if ATH_SUPPORT_WAPI 3904 vdev->osif_check_wai = txrx_ops->rx.wai_check; 3905 #endif 3906 #endif 3907 #ifdef UMAC_SUPPORT_PROXY_ARP 3908 vdev->osif_proxy_arp = txrx_ops->proxy_arp; 3909 #endif 3910 vdev->me_convert = txrx_ops->me_convert; 3911 vdev->get_tsf_time = txrx_ops->get_tsf_time; 3912 3913 dp_vdev_register_rx_eapol(vdev, txrx_ops); 3914 3915 dp_vdev_register_tx_handler(vdev, soc, txrx_ops); 3916 3917 dp_init_info("%pK: DP Vdev Register success", soc); 3918 3919 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 3920 return QDF_STATUS_SUCCESS; 3921 } 3922 3923 #ifdef WLAN_FEATURE_11BE_MLO 3924 void dp_peer_delete(struct dp_soc *soc, 3925 struct dp_peer *peer, 3926 void *arg) 3927 { 3928 if (!peer->valid) 3929 return; 3930 3931 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 3932 peer->vdev->vdev_id, 3933 peer->mac_addr.raw, 0, 3934 peer->peer_type); 3935 } 3936 #else 3937 void dp_peer_delete(struct dp_soc *soc, 3938 struct dp_peer *peer, 3939 void *arg) 3940 { 3941 if (!peer->valid) 3942 return; 3943 3944 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 3945 peer->vdev->vdev_id, 3946 peer->mac_addr.raw, 0, 3947 CDP_LINK_PEER_TYPE); 3948 } 3949 #endif 3950 3951 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3952 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg) 3953 { 3954 if (!peer->valid) 3955 return; 3956 3957 if (IS_MLO_DP_LINK_PEER(peer)) 3958 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 3959 peer->vdev->vdev_id, 3960 peer->mac_addr.raw, 0, 3961 CDP_LINK_PEER_TYPE); 3962 } 3963 #else 3964 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg) 3965 { 3966 } 3967 #endif 3968 /** 3969 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev 3970 * @vdev_handle: Datapath VDEV handle 3971 * @unmap_only: Flag to indicate "only unmap" 3972 * @mlo_peers_only: true if only MLO peers should be flushed 3973 * 3974 * Return: void 3975 */ 3976 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, 3977 bool unmap_only, 3978 bool mlo_peers_only) 3979 { 3980 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 3981 struct dp_pdev *pdev = vdev->pdev; 3982 struct dp_soc *soc = pdev->soc; 3983 struct dp_peer *peer; 3984 uint32_t i = 0; 3985 3986 3987 if (!unmap_only) { 3988 if (!mlo_peers_only) 3989 dp_vdev_iterate_peer_lock_safe(vdev, 3990 dp_peer_delete, 3991 NULL, 3992 DP_MOD_ID_CDP); 3993 else 3994 dp_vdev_iterate_peer_lock_safe(vdev, 3995 dp_mlo_peer_delete, 3996 NULL, 3997 DP_MOD_ID_CDP); 3998 } 3999 4000 for (i = 0; i < soc->max_peer_id ; i++) { 4001 peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP); 4002 4003 if (!peer) 4004 continue; 4005 4006 if (peer->vdev != vdev) { 4007 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4008 continue; 4009 } 4010 4011 if (!mlo_peers_only) { 4012 dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap", 4013 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 4014 dp_rx_peer_unmap_handler(soc, i, 4015 vdev->vdev_id, 4016 peer->mac_addr.raw, 0, 4017 DP_PEER_WDS_COUNT_INVALID); 4018 SET_PEER_REF_CNT_ONE(peer); 4019 } else if (IS_MLO_DP_LINK_PEER(peer) || 4020 IS_MLO_DP_MLD_PEER(peer)) { 4021 dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap", 4022 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 4023 dp_rx_peer_unmap_handler(soc, i, 4024 vdev->vdev_id, 4025 peer->mac_addr.raw, 0, 4026 DP_PEER_WDS_COUNT_INVALID); 4027 SET_PEER_REF_CNT_ONE(peer); 4028 } 4029 4030 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4031 } 4032 } 4033 4034 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 4035 /** 4036 * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id 4037 * @soc_hdl: Datapath soc handle 4038 * @vdev_stats_id: Address of vdev_stats_id 4039 * 4040 * Return: QDF_STATUS 4041 */ 4042 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl, 4043 uint8_t *vdev_stats_id) 4044 { 4045 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4046 uint8_t id = 0; 4047 4048 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 4049 *vdev_stats_id = CDP_INVALID_VDEV_STATS_ID; 4050 return QDF_STATUS_E_FAILURE; 4051 } 4052 4053 while (id < CDP_MAX_VDEV_STATS_ID) { 4054 if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) { 4055 *vdev_stats_id = id; 4056 return QDF_STATUS_SUCCESS; 4057 } 4058 id++; 4059 } 4060 4061 *vdev_stats_id = CDP_INVALID_VDEV_STATS_ID; 4062 return QDF_STATUS_E_FAILURE; 4063 } 4064 4065 /** 4066 * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc 4067 * @soc_hdl: Datapath soc handle 4068 * @vdev_stats_id: vdev_stats_id to reset in dp_soc 4069 * 4070 * Return: none 4071 */ 4072 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl, 4073 uint8_t vdev_stats_id) 4074 { 4075 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4076 4077 if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) || 4078 (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID)) 4079 return; 4080 4081 qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map); 4082 } 4083 #else 4084 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc, 4085 uint8_t vdev_stats_id) 4086 {} 4087 #endif 4088 /** 4089 * dp_vdev_detach_wifi3() - Detach txrx vdev 4090 * @cdp_soc: Datapath soc handle 4091 * @vdev_id: VDEV Id 4092 * @callback: Callback OL_IF on completion of detach 4093 * @cb_context: Callback context 4094 * 4095 */ 4096 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc, 4097 uint8_t vdev_id, 4098 ol_txrx_vdev_delete_cb callback, 4099 void *cb_context) 4100 { 4101 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 4102 struct dp_pdev *pdev; 4103 struct dp_neighbour_peer *peer = NULL; 4104 struct dp_peer *vap_self_peer = NULL; 4105 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4106 DP_MOD_ID_CDP); 4107 4108 if (!vdev) 4109 return QDF_STATUS_E_FAILURE; 4110 4111 soc->arch_ops.txrx_vdev_detach(soc, vdev); 4112 4113 pdev = vdev->pdev; 4114 4115 vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev, 4116 DP_MOD_ID_CONFIG); 4117 if (vap_self_peer) { 4118 qdf_spin_lock_bh(&soc->ast_lock); 4119 if (vap_self_peer->self_ast_entry) { 4120 dp_peer_del_ast(soc, vap_self_peer->self_ast_entry); 4121 vap_self_peer->self_ast_entry = NULL; 4122 } 4123 qdf_spin_unlock_bh(&soc->ast_lock); 4124 4125 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id, 4126 vap_self_peer->mac_addr.raw, 0, 4127 CDP_LINK_PEER_TYPE); 4128 dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG); 4129 } 4130 4131 /* 4132 * If Target is hung, flush all peers before detaching vdev 4133 * this will free all references held due to missing 4134 * unmap commands from Target 4135 */ 4136 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 4137 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false); 4138 else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) 4139 dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false); 4140 4141 /* indicate that the vdev needs to be deleted */ 4142 vdev->delete.pending = 1; 4143 dp_rx_vdev_detach(vdev); 4144 /* 4145 * move it after dp_rx_vdev_detach(), 4146 * as the call back done in dp_rx_vdev_detach() 4147 * still need to get vdev pointer by vdev_id. 4148 */ 4149 dp_vdev_id_map_tbl_remove(soc, vdev); 4150 4151 dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer); 4152 4153 dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id); 4154 4155 dp_tx_vdev_multipass_deinit(vdev); 4156 dp_tx_vdev_traffic_end_indication_detach(vdev); 4157 4158 if (vdev->vdev_dp_ext_handle) { 4159 qdf_mem_free(vdev->vdev_dp_ext_handle); 4160 vdev->vdev_dp_ext_handle = NULL; 4161 } 4162 vdev->delete.callback = callback; 4163 vdev->delete.context = cb_context; 4164 4165 if (vdev->opmode != wlan_op_mode_monitor) 4166 dp_vdev_pdev_list_remove(soc, pdev, vdev); 4167 4168 pdev->vdev_count--; 4169 /* release reference taken above for find */ 4170 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4171 4172 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 4173 TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem); 4174 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 4175 4176 dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_DETACH, vdev); 4177 dp_info("detach vdev %pK id %d pending refs %d", 4178 vdev, vdev->vdev_id, qdf_atomic_read(&vdev->ref_cnt)); 4179 4180 /* release reference taken at dp_vdev_create */ 4181 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 4182 4183 return QDF_STATUS_SUCCESS; 4184 } 4185 4186 #ifdef WLAN_FEATURE_11BE_MLO 4187 /** 4188 * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused 4189 * @vdev: Target DP vdev handle 4190 * @peer: DP peer handle to be checked 4191 * @peer_mac_addr: Target peer mac address 4192 * @peer_type: Target peer type 4193 * 4194 * Return: true - if match, false - not match 4195 */ 4196 static inline 4197 bool is_dp_peer_can_reuse(struct dp_vdev *vdev, 4198 struct dp_peer *peer, 4199 uint8_t *peer_mac_addr, 4200 enum cdp_peer_type peer_type) 4201 { 4202 if (peer->bss_peer && (peer->vdev == vdev) && 4203 (peer->peer_type == peer_type) && 4204 (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw, 4205 QDF_MAC_ADDR_SIZE) == 0)) 4206 return true; 4207 4208 return false; 4209 } 4210 #else 4211 static inline 4212 bool is_dp_peer_can_reuse(struct dp_vdev *vdev, 4213 struct dp_peer *peer, 4214 uint8_t *peer_mac_addr, 4215 enum cdp_peer_type peer_type) 4216 { 4217 if (peer->bss_peer && (peer->vdev == vdev) && 4218 (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw, 4219 QDF_MAC_ADDR_SIZE) == 0)) 4220 return true; 4221 4222 return false; 4223 } 4224 #endif 4225 4226 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev, 4227 uint8_t *peer_mac_addr, 4228 enum cdp_peer_type peer_type) 4229 { 4230 struct dp_peer *peer; 4231 struct dp_soc *soc = vdev->pdev->soc; 4232 4233 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 4234 TAILQ_FOREACH(peer, &soc->inactive_peer_list, 4235 inactive_list_elem) { 4236 4237 /* reuse bss peer only when vdev matches*/ 4238 if (is_dp_peer_can_reuse(vdev, peer, 4239 peer_mac_addr, peer_type)) { 4240 /* increment ref count for cdp_peer_create*/ 4241 if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) == 4242 QDF_STATUS_SUCCESS) { 4243 TAILQ_REMOVE(&soc->inactive_peer_list, peer, 4244 inactive_list_elem); 4245 qdf_spin_unlock_bh 4246 (&soc->inactive_peer_list_lock); 4247 return peer; 4248 } 4249 } 4250 } 4251 4252 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 4253 return NULL; 4254 } 4255 4256 #ifdef FEATURE_AST 4257 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc, 4258 struct dp_pdev *pdev, 4259 uint8_t *peer_mac_addr) 4260 { 4261 struct dp_ast_entry *ast_entry; 4262 4263 if (soc->ast_offload_support) 4264 return; 4265 4266 qdf_spin_lock_bh(&soc->ast_lock); 4267 if (soc->ast_override_support) 4268 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr, 4269 pdev->pdev_id); 4270 else 4271 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); 4272 4273 if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress) 4274 dp_peer_del_ast(soc, ast_entry); 4275 4276 qdf_spin_unlock_bh(&soc->ast_lock); 4277 } 4278 #else 4279 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc, 4280 struct dp_pdev *pdev, 4281 uint8_t *peer_mac_addr) 4282 { 4283 } 4284 #endif 4285 4286 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 4287 /** 4288 * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer 4289 * @soc: Datapath soc handle 4290 * @txrx_peer: Datapath peer handle 4291 * 4292 * Return: none 4293 */ 4294 static inline 4295 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc, 4296 struct dp_txrx_peer *txrx_peer) 4297 { 4298 txrx_peer->hw_txrx_stats_en = 4299 wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx); 4300 } 4301 #else 4302 static inline 4303 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc, 4304 struct dp_txrx_peer *txrx_peer) 4305 { 4306 txrx_peer->hw_txrx_stats_en = 0; 4307 } 4308 #endif 4309 4310 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer) 4311 { 4312 struct dp_txrx_peer *txrx_peer; 4313 struct dp_pdev *pdev; 4314 struct cdp_txrx_peer_params_update params = {0}; 4315 4316 /* dp_txrx_peer exists for mld peer and legacy peer */ 4317 if (peer->txrx_peer) { 4318 txrx_peer = peer->txrx_peer; 4319 peer->txrx_peer = NULL; 4320 pdev = txrx_peer->vdev->pdev; 4321 4322 params.osif_vdev = (void *)peer->vdev->osif_vdev; 4323 params.peer_mac = peer->mac_addr.raw; 4324 4325 dp_wdi_event_handler(WDI_EVENT_PEER_DELETE, soc, 4326 (void *)¶ms, peer->peer_id, 4327 WDI_NO_VAL, pdev->pdev_id); 4328 4329 dp_peer_defrag_rx_tids_deinit(txrx_peer); 4330 /* 4331 * Deallocate the extended stats contenxt 4332 */ 4333 dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer); 4334 dp_peer_rx_bufq_resources_deinit(txrx_peer); 4335 dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer); 4336 dp_peer_sawf_stats_ctx_free(soc, txrx_peer); 4337 4338 qdf_mem_free(txrx_peer); 4339 } 4340 4341 return QDF_STATUS_SUCCESS; 4342 } 4343 4344 static inline 4345 uint8_t dp_txrx_peer_calculate_stats_size(struct dp_soc *soc, 4346 struct dp_peer *peer) 4347 { 4348 if ((wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) && 4349 IS_MLO_DP_MLD_PEER(peer)) { 4350 return (DP_MAX_MLO_LINKS + 1); 4351 } 4352 return 1; 4353 } 4354 4355 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer) 4356 { 4357 struct dp_txrx_peer *txrx_peer; 4358 struct dp_pdev *pdev; 4359 struct cdp_txrx_peer_params_update params = {0}; 4360 uint8_t stats_arr_size = 0; 4361 4362 stats_arr_size = dp_txrx_peer_calculate_stats_size(soc, peer); 4363 4364 txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer) + 4365 (stats_arr_size * 4366 sizeof(struct dp_peer_stats))); 4367 4368 if (!txrx_peer) 4369 return QDF_STATUS_E_NOMEM; /* failure */ 4370 4371 txrx_peer->peer_id = HTT_INVALID_PEER; 4372 /* initialize the peer_id */ 4373 txrx_peer->vdev = peer->vdev; 4374 pdev = peer->vdev->pdev; 4375 txrx_peer->stats_arr_size = stats_arr_size; 4376 4377 DP_TXRX_PEER_STATS_INIT(txrx_peer, 4378 (txrx_peer->stats_arr_size * 4379 sizeof(struct dp_peer_stats))); 4380 4381 if (!IS_DP_LEGACY_PEER(peer)) 4382 txrx_peer->is_mld_peer = 1; 4383 4384 dp_wds_ext_peer_init(txrx_peer); 4385 dp_peer_rx_bufq_resources_init(txrx_peer); 4386 dp_peer_hw_txrx_stats_init(soc, txrx_peer); 4387 /* 4388 * Allocate peer extended stats context. Fall through in 4389 * case of failure as its not an implicit requirement to have 4390 * this object for regular statistics updates. 4391 */ 4392 if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) != 4393 QDF_STATUS_SUCCESS) 4394 dp_warn("peer delay_stats ctx alloc failed"); 4395 4396 /* 4397 * Alloctate memory for jitter stats. Fall through in 4398 * case of failure as its not an implicit requirement to have 4399 * this object for regular statistics updates. 4400 */ 4401 if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) != 4402 QDF_STATUS_SUCCESS) 4403 dp_warn("peer jitter_stats ctx alloc failed"); 4404 4405 dp_set_peer_isolation(txrx_peer, false); 4406 4407 dp_peer_defrag_rx_tids_init(txrx_peer); 4408 4409 if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS) 4410 dp_warn("peer sawf stats alloc failed"); 4411 4412 dp_txrx_peer_attach_add(soc, peer, txrx_peer); 4413 4414 params.peer_mac = peer->mac_addr.raw; 4415 params.osif_vdev = (void *)peer->vdev->osif_vdev; 4416 params.chip_id = dp_mlo_get_chip_id(soc); 4417 params.pdev_id = peer->vdev->pdev->pdev_id; 4418 4419 dp_wdi_event_handler(WDI_EVENT_TXRX_PEER_CREATE, soc, 4420 (void *)¶ms, peer->peer_id, 4421 WDI_NO_VAL, params.pdev_id); 4422 4423 return QDF_STATUS_SUCCESS; 4424 } 4425 4426 static inline 4427 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer) 4428 { 4429 if (!txrx_peer) 4430 return; 4431 4432 txrx_peer->tx_failed = 0; 4433 txrx_peer->comp_pkt.num = 0; 4434 txrx_peer->comp_pkt.bytes = 0; 4435 txrx_peer->to_stack.num = 0; 4436 txrx_peer->to_stack.bytes = 0; 4437 4438 DP_TXRX_PEER_STATS_CLR(txrx_peer, 4439 (txrx_peer->stats_arr_size * 4440 sizeof(struct dp_peer_stats))); 4441 dp_peer_delay_stats_ctx_clr(txrx_peer); 4442 dp_peer_jitter_stats_ctx_clr(txrx_peer); 4443 } 4444 4445 /** 4446 * dp_peer_create_wifi3() - attach txrx peer 4447 * @soc_hdl: Datapath soc handle 4448 * @vdev_id: id of vdev 4449 * @peer_mac_addr: Peer MAC address 4450 * @peer_type: link or MLD peer type 4451 * 4452 * Return: 0 on success, -1 on failure 4453 */ 4454 static QDF_STATUS 4455 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4456 uint8_t *peer_mac_addr, enum cdp_peer_type peer_type) 4457 { 4458 struct dp_peer *peer; 4459 int i; 4460 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 4461 struct dp_pdev *pdev; 4462 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC; 4463 struct dp_vdev *vdev = NULL; 4464 4465 if (!peer_mac_addr) 4466 return QDF_STATUS_E_FAILURE; 4467 4468 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 4469 4470 if (!vdev) 4471 return QDF_STATUS_E_FAILURE; 4472 4473 pdev = vdev->pdev; 4474 soc = pdev->soc; 4475 4476 /* 4477 * If a peer entry with given MAC address already exists, 4478 * reuse the peer and reset the state of peer. 4479 */ 4480 peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type); 4481 4482 if (peer) { 4483 qdf_atomic_init(&peer->is_default_route_set); 4484 dp_peer_cleanup(vdev, peer); 4485 4486 dp_peer_vdev_list_add(soc, vdev, peer); 4487 dp_peer_find_hash_add(soc, peer); 4488 4489 if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) { 4490 dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")", 4491 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 4492 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4493 return QDF_STATUS_E_FAILURE; 4494 } 4495 4496 if (IS_MLO_DP_MLD_PEER(peer)) 4497 dp_mld_peer_init_link_peers_info(peer); 4498 4499 qdf_spin_lock_bh(&soc->ast_lock); 4500 dp_peer_delete_ast_entries(soc, peer); 4501 qdf_spin_unlock_bh(&soc->ast_lock); 4502 4503 if ((vdev->opmode == wlan_op_mode_sta) && 4504 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], 4505 QDF_MAC_ADDR_SIZE)) { 4506 ast_type = CDP_TXRX_AST_TYPE_SELF; 4507 } 4508 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); 4509 4510 peer->valid = 1; 4511 peer->is_tdls_peer = false; 4512 dp_local_peer_id_alloc(pdev, peer); 4513 4514 qdf_spinlock_create(&peer->peer_info_lock); 4515 4516 DP_STATS_INIT(peer); 4517 4518 /* 4519 * In tx_monitor mode, filter may be set for unassociated peer 4520 * when unassociated peer get associated peer need to 4521 * update tx_cap_enabled flag to support peer filter. 4522 */ 4523 if (!IS_MLO_DP_MLD_PEER(peer)) { 4524 dp_monitor_peer_tx_capture_filter_check(pdev, peer); 4525 dp_monitor_peer_reset_stats(soc, peer); 4526 } 4527 4528 if (peer->txrx_peer) { 4529 dp_peer_rx_bufq_resources_init(peer->txrx_peer); 4530 dp_txrx_peer_stats_clr(peer->txrx_peer); 4531 dp_set_peer_isolation(peer->txrx_peer, false); 4532 dp_wds_ext_peer_init(peer->txrx_peer); 4533 dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer); 4534 } 4535 4536 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE, 4537 peer, vdev, 1); 4538 dp_info("vdev %pK Reused peer %pK ("QDF_MAC_ADDR_FMT 4539 ") vdev_ref_cnt " 4540 "%d peer_ref_cnt: %d", 4541 vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 4542 qdf_atomic_read(&vdev->ref_cnt), 4543 qdf_atomic_read(&peer->ref_cnt)); 4544 dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT); 4545 4546 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4547 return QDF_STATUS_SUCCESS; 4548 } else { 4549 /* 4550 * When a STA roams from RPTR AP to ROOT AP and vice versa, we 4551 * need to remove the AST entry which was earlier added as a WDS 4552 * entry. 4553 * If an AST entry exists, but no peer entry exists with a given 4554 * MAC addresses, we could deduce it as a WDS entry 4555 */ 4556 dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr); 4557 } 4558 4559 #ifdef notyet 4560 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev, 4561 soc->mempool_ol_ath_peer); 4562 #else 4563 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer)); 4564 #endif 4565 wlan_minidump_log(peer, 4566 sizeof(*peer), 4567 soc->ctrl_psoc, 4568 WLAN_MD_DP_PEER, "dp_peer"); 4569 if (!peer) { 4570 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4571 return QDF_STATUS_E_FAILURE; /* failure */ 4572 } 4573 4574 qdf_mem_zero(peer, sizeof(struct dp_peer)); 4575 4576 /* store provided params */ 4577 peer->vdev = vdev; 4578 4579 /* initialize the peer_id */ 4580 peer->peer_id = HTT_INVALID_PEER; 4581 4582 qdf_mem_copy( 4583 &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE); 4584 4585 DP_PEER_SET_TYPE(peer, peer_type); 4586 if (IS_MLO_DP_MLD_PEER(peer)) { 4587 if (dp_txrx_peer_attach(soc, peer) != 4588 QDF_STATUS_SUCCESS) 4589 goto fail; /* failure */ 4590 4591 dp_mld_peer_init_link_peers_info(peer); 4592 } else if (dp_monitor_peer_attach(soc, peer) != 4593 QDF_STATUS_SUCCESS) 4594 dp_warn("peer monitor ctx alloc failed"); 4595 4596 TAILQ_INIT(&peer->ast_entry_list); 4597 4598 /* get the vdev reference for new peer */ 4599 dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD); 4600 4601 if ((vdev->opmode == wlan_op_mode_sta) && 4602 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], 4603 QDF_MAC_ADDR_SIZE)) { 4604 ast_type = CDP_TXRX_AST_TYPE_SELF; 4605 } 4606 qdf_spinlock_create(&peer->peer_state_lock); 4607 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); 4608 qdf_spinlock_create(&peer->peer_info_lock); 4609 4610 /* reset the ast index to flowid table */ 4611 dp_peer_reset_flowq_map(peer); 4612 4613 qdf_atomic_init(&peer->ref_cnt); 4614 4615 for (i = 0; i < DP_MOD_ID_MAX; i++) 4616 qdf_atomic_init(&peer->mod_refs[i]); 4617 4618 /* keep one reference for attach */ 4619 qdf_atomic_inc(&peer->ref_cnt); 4620 qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]); 4621 4622 dp_peer_vdev_list_add(soc, vdev, peer); 4623 4624 /* TODO: See if hash based search is required */ 4625 dp_peer_find_hash_add(soc, peer); 4626 4627 /* Initialize the peer state */ 4628 peer->state = OL_TXRX_PEER_STATE_DISC; 4629 4630 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE, 4631 peer, vdev, 0); 4632 dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt " 4633 "%d peer_ref_cnt: %d", 4634 vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 4635 qdf_atomic_read(&vdev->ref_cnt), 4636 qdf_atomic_read(&peer->ref_cnt)); 4637 /* 4638 * For every peer MAp message search and set if bss_peer 4639 */ 4640 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, 4641 QDF_MAC_ADDR_SIZE) == 0 && 4642 (wlan_op_mode_sta != vdev->opmode)) { 4643 dp_info("vdev bss_peer!!"); 4644 peer->bss_peer = 1; 4645 if (peer->txrx_peer) 4646 peer->txrx_peer->bss_peer = 1; 4647 } 4648 4649 if (wlan_op_mode_sta == vdev->opmode && 4650 qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, 4651 QDF_MAC_ADDR_SIZE) == 0) { 4652 peer->sta_self_peer = 1; 4653 } 4654 4655 if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) { 4656 dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")", 4657 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 4658 goto fail; 4659 } 4660 4661 peer->valid = 1; 4662 dp_local_peer_id_alloc(pdev, peer); 4663 DP_STATS_INIT(peer); 4664 4665 if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS) 4666 dp_warn("peer sawf context alloc failed"); 4667 4668 dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT); 4669 4670 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4671 4672 return QDF_STATUS_SUCCESS; 4673 fail: 4674 qdf_mem_free(peer); 4675 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4676 4677 return QDF_STATUS_E_FAILURE; 4678 } 4679 4680 QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer) 4681 { 4682 /* txrx_peer might exist already in peer reuse case */ 4683 if (peer->txrx_peer) 4684 return QDF_STATUS_SUCCESS; 4685 4686 if (dp_txrx_peer_attach(soc, peer) != 4687 QDF_STATUS_SUCCESS) { 4688 dp_err("peer txrx ctx alloc failed"); 4689 return QDF_STATUS_E_FAILURE; 4690 } 4691 4692 return QDF_STATUS_SUCCESS; 4693 } 4694 4695 #ifdef WLAN_FEATURE_11BE_MLO 4696 QDF_STATUS dp_peer_mlo_setup( 4697 struct dp_soc *soc, 4698 struct dp_peer *peer, 4699 uint8_t vdev_id, 4700 struct cdp_peer_setup_info *setup_info) 4701 { 4702 struct dp_peer *mld_peer = NULL; 4703 struct cdp_txrx_peer_params_update params = {0}; 4704 4705 /* Non-MLO connection, do nothing */ 4706 if (!setup_info || !setup_info->mld_peer_mac) 4707 return QDF_STATUS_SUCCESS; 4708 4709 dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_MLO_SETUP, 4710 peer, NULL, vdev_id, setup_info); 4711 dp_info("link peer: " QDF_MAC_ADDR_FMT "mld peer: " QDF_MAC_ADDR_FMT 4712 "first_link %d, primary_link %d", 4713 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 4714 QDF_MAC_ADDR_REF(setup_info->mld_peer_mac), 4715 setup_info->is_first_link, 4716 setup_info->is_primary_link); 4717 4718 /* if this is the first link peer */ 4719 if (setup_info->is_first_link) 4720 /* create MLD peer */ 4721 dp_peer_create_wifi3((struct cdp_soc_t *)soc, 4722 vdev_id, 4723 setup_info->mld_peer_mac, 4724 CDP_MLD_PEER_TYPE); 4725 4726 if (peer->vdev->opmode == wlan_op_mode_sta && 4727 setup_info->is_primary_link) { 4728 struct cdp_txrx_peer_params_update params = {0}; 4729 4730 params.chip_id = dp_mlo_get_chip_id(soc); 4731 params.pdev_id = peer->vdev->pdev->pdev_id; 4732 params.osif_vdev = peer->vdev->osif_vdev; 4733 4734 dp_wdi_event_handler( 4735 WDI_EVENT_STA_PRIMARY_UMAC_UPDATE, 4736 soc, 4737 (void *)¶ms, peer->peer_id, 4738 WDI_NO_VAL, params.pdev_id); 4739 } 4740 4741 peer->first_link = setup_info->is_first_link; 4742 peer->primary_link = setup_info->is_primary_link; 4743 mld_peer = dp_mld_peer_find_hash_find(soc, 4744 setup_info->mld_peer_mac, 4745 0, vdev_id, DP_MOD_ID_CDP); 4746 if (mld_peer) { 4747 if (setup_info->is_first_link) { 4748 /* assign rx_tid to mld peer */ 4749 mld_peer->rx_tid = peer->rx_tid; 4750 /* no cdp_peer_setup for MLD peer, 4751 * set it for addba processing 4752 */ 4753 qdf_atomic_set(&mld_peer->is_default_route_set, 1); 4754 } else { 4755 /* free link peer original rx_tids mem */ 4756 dp_peer_rx_tids_destroy(peer); 4757 /* assign mld peer rx_tid to link peer */ 4758 peer->rx_tid = mld_peer->rx_tid; 4759 } 4760 4761 if (setup_info->is_primary_link && 4762 !setup_info->is_first_link) { 4763 struct dp_vdev *prev_vdev; 4764 /* 4765 * if first link is not the primary link, 4766 * then need to change mld_peer->vdev as 4767 * primary link dp_vdev is not same one 4768 * during mld peer creation. 4769 */ 4770 prev_vdev = mld_peer->vdev; 4771 dp_info("Primary link is not the first link. vdev: %pK," 4772 "vdev_id %d vdev_ref_cnt %d", 4773 mld_peer->vdev, vdev_id, 4774 qdf_atomic_read(&mld_peer->vdev->ref_cnt)); 4775 /* release the ref to original dp_vdev */ 4776 dp_vdev_unref_delete(soc, mld_peer->vdev, 4777 DP_MOD_ID_CHILD); 4778 /* 4779 * get the ref to new dp_vdev, 4780 * increase dp_vdev ref_cnt 4781 */ 4782 mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4783 DP_MOD_ID_CHILD); 4784 mld_peer->txrx_peer->vdev = mld_peer->vdev; 4785 4786 dp_cfg_event_record_mlo_setup_vdev_update_evt( 4787 soc, mld_peer, prev_vdev, 4788 mld_peer->vdev); 4789 4790 params.osif_vdev = (void *)peer->vdev->osif_vdev; 4791 params.peer_mac = mld_peer->mac_addr.raw; 4792 params.chip_id = dp_mlo_get_chip_id(soc); 4793 params.pdev_id = peer->vdev->pdev->pdev_id; 4794 4795 dp_wdi_event_handler( 4796 WDI_EVENT_PEER_PRIMARY_UMAC_UPDATE, 4797 soc, (void *)¶ms, peer->peer_id, 4798 WDI_NO_VAL, params.pdev_id); 4799 } 4800 4801 /* associate mld and link peer */ 4802 dp_link_peer_add_mld_peer(peer, mld_peer); 4803 dp_mld_peer_add_link_peer(mld_peer, peer); 4804 4805 mld_peer->txrx_peer->is_mld_peer = 1; 4806 dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP); 4807 } else { 4808 peer->mld_peer = NULL; 4809 dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!", 4810 QDF_MAC_ADDR_REF(setup_info->mld_peer_mac)); 4811 return QDF_STATUS_E_FAILURE; 4812 } 4813 4814 return QDF_STATUS_SUCCESS; 4815 } 4816 4817 /** 4818 * dp_mlo_peer_authorize() - authorize MLO peer 4819 * @soc: soc handle 4820 * @peer: pointer to link peer 4821 * 4822 * Return: void 4823 */ 4824 static void dp_mlo_peer_authorize(struct dp_soc *soc, 4825 struct dp_peer *peer) 4826 { 4827 int i; 4828 struct dp_peer *link_peer = NULL; 4829 struct dp_peer *mld_peer = peer->mld_peer; 4830 struct dp_mld_link_peers link_peers_info; 4831 4832 if (!mld_peer) 4833 return; 4834 4835 /* get link peers with reference */ 4836 dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, 4837 &link_peers_info, 4838 DP_MOD_ID_CDP); 4839 4840 for (i = 0; i < link_peers_info.num_links; i++) { 4841 link_peer = link_peers_info.link_peers[i]; 4842 4843 if (!link_peer->authorize) { 4844 dp_release_link_peers_ref(&link_peers_info, 4845 DP_MOD_ID_CDP); 4846 mld_peer->authorize = false; 4847 return; 4848 } 4849 } 4850 4851 /* if we are here all link peers are authorized, 4852 * authorize ml_peer also 4853 */ 4854 mld_peer->authorize = true; 4855 4856 /* release link peers reference */ 4857 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 4858 } 4859 #endif 4860 4861 /** 4862 * dp_peer_setup_wifi3_wrapper() - initialize the peer 4863 * @soc_hdl: soc handle object 4864 * @vdev_id : vdev_id of vdev object 4865 * @peer_mac: Peer's mac address 4866 * @setup_info: peer setup info for MLO 4867 * 4868 * Return: QDF_STATUS 4869 */ 4870 static QDF_STATUS 4871 dp_peer_setup_wifi3_wrapper(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4872 uint8_t *peer_mac, 4873 struct cdp_peer_setup_info *setup_info) 4874 { 4875 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 4876 4877 return soc->arch_ops.txrx_peer_setup(soc_hdl, vdev_id, 4878 peer_mac, setup_info); 4879 } 4880 4881 /** 4882 * dp_cp_peer_del_resp_handler() - Handle the peer delete response 4883 * @soc_hdl: Datapath SOC handle 4884 * @vdev_id: id of virtual device object 4885 * @mac_addr: Mac address of the peer 4886 * 4887 * Return: QDF_STATUS 4888 */ 4889 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl, 4890 uint8_t vdev_id, 4891 uint8_t *mac_addr) 4892 { 4893 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 4894 struct dp_ast_entry *ast_entry = NULL; 4895 txrx_ast_free_cb cb = NULL; 4896 void *cookie; 4897 4898 if (soc->ast_offload_support) 4899 return QDF_STATUS_E_INVAL; 4900 4901 qdf_spin_lock_bh(&soc->ast_lock); 4902 4903 ast_entry = 4904 dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, 4905 vdev_id); 4906 4907 /* in case of qwrap we have multiple BSS peers 4908 * with same mac address 4909 * 4910 * AST entry for this mac address will be created 4911 * only for one peer hence it will be NULL here 4912 */ 4913 if ((!ast_entry || !ast_entry->delete_in_progress) || 4914 (ast_entry->peer_id != HTT_INVALID_PEER)) { 4915 qdf_spin_unlock_bh(&soc->ast_lock); 4916 return QDF_STATUS_E_FAILURE; 4917 } 4918 4919 if (ast_entry->is_mapped) 4920 soc->ast_table[ast_entry->ast_idx] = NULL; 4921 4922 DP_STATS_INC(soc, ast.deleted, 1); 4923 dp_peer_ast_hash_remove(soc, ast_entry); 4924 4925 cb = ast_entry->callback; 4926 cookie = ast_entry->cookie; 4927 ast_entry->callback = NULL; 4928 ast_entry->cookie = NULL; 4929 4930 soc->num_ast_entries--; 4931 qdf_spin_unlock_bh(&soc->ast_lock); 4932 4933 if (cb) { 4934 cb(soc->ctrl_psoc, 4935 dp_soc_to_cdp_soc(soc), 4936 cookie, 4937 CDP_TXRX_AST_DELETED); 4938 } 4939 qdf_mem_free(ast_entry); 4940 4941 return QDF_STATUS_SUCCESS; 4942 } 4943 4944 #ifdef WLAN_SUPPORT_MSCS 4945 /** 4946 * dp_record_mscs_params() - Record MSCS parameters sent by the STA in 4947 * the MSCS Request to the AP. 4948 * @soc_hdl: Datapath soc handle 4949 * @peer_mac: STA Mac address 4950 * @vdev_id: ID of the vdev handle 4951 * @mscs_params: Structure having MSCS parameters obtained 4952 * from handshake 4953 * @active: Flag to set MSCS active/inactive 4954 * 4955 * The AP makes a note of these parameters while comparing the MSDUs 4956 * sent by the STA, to send the downlink traffic with correct User 4957 * priority. 4958 * 4959 * Return: QDF_STATUS - Success/Invalid 4960 */ 4961 static QDF_STATUS 4962 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 4963 uint8_t vdev_id, struct cdp_mscs_params *mscs_params, 4964 bool active) 4965 { 4966 struct dp_peer *peer; 4967 struct dp_peer *tgt_peer; 4968 QDF_STATUS status = QDF_STATUS_E_INVAL; 4969 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 4970 4971 peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 4972 DP_MOD_ID_CDP); 4973 4974 if (!peer) { 4975 dp_err("Peer is NULL!"); 4976 goto fail; 4977 } 4978 4979 tgt_peer = dp_get_tgt_peer_from_peer(peer); 4980 if (!tgt_peer) 4981 goto fail; 4982 4983 if (!active) { 4984 dp_info("MSCS Procedure is terminated"); 4985 tgt_peer->mscs_active = active; 4986 goto fail; 4987 } 4988 4989 if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) { 4990 /* Populate entries inside IPV4 database first */ 4991 tgt_peer->mscs_ipv4_parameter.user_priority_bitmap = 4992 mscs_params->user_pri_bitmap; 4993 tgt_peer->mscs_ipv4_parameter.user_priority_limit = 4994 mscs_params->user_pri_limit; 4995 tgt_peer->mscs_ipv4_parameter.classifier_mask = 4996 mscs_params->classifier_mask; 4997 4998 /* Populate entries inside IPV6 database */ 4999 tgt_peer->mscs_ipv6_parameter.user_priority_bitmap = 5000 mscs_params->user_pri_bitmap; 5001 tgt_peer->mscs_ipv6_parameter.user_priority_limit = 5002 mscs_params->user_pri_limit; 5003 tgt_peer->mscs_ipv6_parameter.classifier_mask = 5004 mscs_params->classifier_mask; 5005 tgt_peer->mscs_active = 1; 5006 dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n" 5007 "\tClassifier_type = %d\tUser priority bitmap = %x\n" 5008 "\tUser priority limit = %x\tClassifier mask = %x", 5009 QDF_MAC_ADDR_REF(peer_mac), 5010 mscs_params->classifier_type, 5011 tgt_peer->mscs_ipv4_parameter.user_priority_bitmap, 5012 tgt_peer->mscs_ipv4_parameter.user_priority_limit, 5013 tgt_peer->mscs_ipv4_parameter.classifier_mask); 5014 } 5015 5016 status = QDF_STATUS_SUCCESS; 5017 fail: 5018 if (peer) 5019 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5020 return status; 5021 } 5022 #endif 5023 5024 /** 5025 * dp_get_sec_type() - Get the security type 5026 * @soc: soc handle 5027 * @vdev_id: id of dp handle 5028 * @peer_mac: mac of datapath PEER handle 5029 * @sec_idx: Security id (mcast, ucast) 5030 * 5031 * return sec_type: Security type 5032 */ 5033 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id, 5034 uint8_t *peer_mac, uint8_t sec_idx) 5035 { 5036 int sec_type = 0; 5037 struct dp_peer *peer = 5038 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc, 5039 peer_mac, 0, vdev_id, 5040 DP_MOD_ID_CDP); 5041 5042 if (!peer) { 5043 dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc); 5044 return sec_type; 5045 } 5046 5047 if (!peer->txrx_peer) { 5048 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5049 dp_peer_debug("%pK: txrx peer is NULL!\n", soc); 5050 return sec_type; 5051 } 5052 sec_type = peer->txrx_peer->security[sec_idx].sec_type; 5053 5054 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5055 return sec_type; 5056 } 5057 5058 /** 5059 * dp_peer_authorize() - authorize txrx peer 5060 * @soc_hdl: soc handle 5061 * @vdev_id: id of dp handle 5062 * @peer_mac: mac of datapath PEER handle 5063 * @authorize: 5064 * 5065 * Return: QDF_STATUS 5066 * 5067 */ 5068 static QDF_STATUS 5069 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5070 uint8_t *peer_mac, uint32_t authorize) 5071 { 5072 QDF_STATUS status = QDF_STATUS_SUCCESS; 5073 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 5074 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 5075 0, vdev_id, 5076 DP_MOD_ID_CDP); 5077 5078 if (!peer) { 5079 dp_cdp_debug("%pK: Peer is NULL!\n", soc); 5080 status = QDF_STATUS_E_FAILURE; 5081 } else { 5082 peer->authorize = authorize ? 1 : 0; 5083 if (peer->txrx_peer) 5084 peer->txrx_peer->authorize = peer->authorize; 5085 5086 if (!peer->authorize) 5087 dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac); 5088 5089 dp_mlo_peer_authorize(soc, peer); 5090 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5091 } 5092 5093 return status; 5094 } 5095 5096 /** 5097 * dp_peer_get_authorize() - get peer authorize status 5098 * @soc_hdl: soc handle 5099 * @vdev_id: id of dp handle 5100 * @peer_mac: mac of datapath PEER handle 5101 * 5102 * Return: true is peer is authorized, false otherwise 5103 */ 5104 static bool 5105 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5106 uint8_t *peer_mac) 5107 { 5108 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 5109 bool authorize = false; 5110 struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 5111 0, vdev_id, 5112 DP_MOD_ID_CDP); 5113 5114 if (!peer) { 5115 dp_cdp_debug("%pK: Peer is NULL!\n", soc); 5116 return authorize; 5117 } 5118 5119 authorize = peer->authorize; 5120 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5121 5122 return authorize; 5123 } 5124 5125 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 5126 enum dp_mod_id mod_id) 5127 { 5128 ol_txrx_vdev_delete_cb vdev_delete_cb = NULL; 5129 void *vdev_delete_context = NULL; 5130 uint8_t vdev_id = vdev->vdev_id; 5131 struct dp_pdev *pdev = vdev->pdev; 5132 struct dp_vdev *tmp_vdev = NULL; 5133 uint8_t found = 0; 5134 5135 QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0); 5136 5137 /* Return if this is not the last reference*/ 5138 if (!qdf_atomic_dec_and_test(&vdev->ref_cnt)) 5139 return; 5140 5141 /* 5142 * This should be set as last reference need to released 5143 * after cdp_vdev_detach() is called 5144 * 5145 * if this assert is hit there is a ref count issue 5146 */ 5147 QDF_ASSERT(vdev->delete.pending); 5148 5149 vdev_delete_cb = vdev->delete.callback; 5150 vdev_delete_context = vdev->delete.context; 5151 5152 dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done", 5153 vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 5154 5155 if (wlan_op_mode_monitor == vdev->opmode) { 5156 dp_monitor_vdev_delete(soc, vdev); 5157 goto free_vdev; 5158 } 5159 5160 /* all peers are gone, go ahead and delete it */ 5161 dp_tx_flow_pool_unmap_handler(pdev, vdev_id, 5162 FLOW_TYPE_VDEV, vdev_id); 5163 dp_tx_vdev_detach(vdev); 5164 dp_monitor_vdev_detach(vdev); 5165 5166 free_vdev: 5167 qdf_spinlock_destroy(&vdev->peer_list_lock); 5168 5169 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 5170 TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list, 5171 inactive_list_elem) { 5172 if (tmp_vdev == vdev) { 5173 found = 1; 5174 break; 5175 } 5176 } 5177 if (found) 5178 TAILQ_REMOVE(&soc->inactive_vdev_list, vdev, 5179 inactive_list_elem); 5180 /* delete this peer from the list */ 5181 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 5182 5183 dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_UNREF_DEL, 5184 vdev); 5185 dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")", 5186 vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 5187 wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc, 5188 WLAN_MD_DP_VDEV, "dp_vdev"); 5189 qdf_mem_free(vdev); 5190 vdev = NULL; 5191 5192 if (vdev_delete_cb) 5193 vdev_delete_cb(vdev_delete_context); 5194 } 5195 5196 qdf_export_symbol(dp_vdev_unref_delete); 5197 5198 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id) 5199 { 5200 struct dp_vdev *vdev = peer->vdev; 5201 struct dp_pdev *pdev = vdev->pdev; 5202 struct dp_soc *soc = pdev->soc; 5203 uint16_t peer_id; 5204 struct dp_peer *tmp_peer; 5205 bool found = false; 5206 5207 if (mod_id > DP_MOD_ID_RX) 5208 QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0); 5209 5210 /* 5211 * Hold the lock all the way from checking if the peer ref count 5212 * is zero until the peer references are removed from the hash 5213 * table and vdev list (if the peer ref count is zero). 5214 * This protects against a new HL tx operation starting to use the 5215 * peer object just after this function concludes it's done being used. 5216 * Furthermore, the lock needs to be held while checking whether the 5217 * vdev's list of peers is empty, to make sure that list is not modified 5218 * concurrently with the empty check. 5219 */ 5220 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) { 5221 peer_id = peer->peer_id; 5222 5223 /* 5224 * Make sure that the reference to the peer in 5225 * peer object map is removed 5226 */ 5227 QDF_ASSERT(peer_id == HTT_INVALID_PEER); 5228 5229 dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer, 5230 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 5231 5232 dp_peer_sawf_ctx_free(soc, peer); 5233 5234 wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc, 5235 WLAN_MD_DP_PEER, "dp_peer"); 5236 5237 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 5238 TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list, 5239 inactive_list_elem) { 5240 if (tmp_peer == peer) { 5241 found = 1; 5242 break; 5243 } 5244 } 5245 if (found) 5246 TAILQ_REMOVE(&soc->inactive_peer_list, peer, 5247 inactive_list_elem); 5248 /* delete this peer from the list */ 5249 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 5250 DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list)); 5251 dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED); 5252 5253 /* cleanup the peer data */ 5254 dp_peer_cleanup(vdev, peer); 5255 5256 if (!IS_MLO_DP_MLD_PEER(peer)) 5257 dp_monitor_peer_detach(soc, peer); 5258 5259 qdf_spinlock_destroy(&peer->peer_state_lock); 5260 5261 dp_txrx_peer_detach(soc, peer); 5262 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_UNREF_DEL, 5263 peer, vdev, 0); 5264 qdf_mem_free(peer); 5265 5266 /* 5267 * Decrement ref count taken at peer create 5268 */ 5269 dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d", 5270 vdev, qdf_atomic_read(&vdev->ref_cnt)); 5271 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD); 5272 } 5273 } 5274 5275 qdf_export_symbol(dp_peer_unref_delete); 5276 5277 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, 5278 enum dp_mod_id mod_id) 5279 { 5280 dp_peer_unref_delete((struct dp_peer *)handle, mod_id); 5281 } 5282 5283 qdf_export_symbol(dp_txrx_peer_unref_delete); 5284 5285 /** 5286 * dp_peer_delete_wifi3() - Delete txrx peer 5287 * @soc_hdl: soc handle 5288 * @vdev_id: id of dp handle 5289 * @peer_mac: mac of datapath PEER handle 5290 * @bitmap: bitmap indicating special handling of request. 5291 * @peer_type: peer type (link or MLD) 5292 * 5293 */ 5294 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, 5295 uint8_t vdev_id, 5296 uint8_t *peer_mac, uint32_t bitmap, 5297 enum cdp_peer_type peer_type) 5298 { 5299 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5300 struct dp_peer *peer; 5301 struct cdp_peer_info peer_info = { 0 }; 5302 struct dp_vdev *vdev = NULL; 5303 5304 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, 5305 false, peer_type); 5306 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP); 5307 5308 /* Peer can be null for monitor vap mac address */ 5309 if (!peer) { 5310 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 5311 "%s: Invalid peer\n", __func__); 5312 return QDF_STATUS_E_FAILURE; 5313 } 5314 5315 if (!peer->valid) { 5316 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5317 dp_err("Invalid peer: "QDF_MAC_ADDR_FMT, 5318 QDF_MAC_ADDR_REF(peer_mac)); 5319 return QDF_STATUS_E_ALREADY; 5320 } 5321 5322 vdev = peer->vdev; 5323 5324 if (!vdev) { 5325 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5326 return QDF_STATUS_E_FAILURE; 5327 } 5328 5329 peer->valid = 0; 5330 5331 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_DELETE, peer, 5332 vdev, 0); 5333 dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ") pending-refs %d", 5334 soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 5335 qdf_atomic_read(&peer->ref_cnt)); 5336 5337 dp_peer_rx_reo_shared_qaddr_delete(soc, peer); 5338 5339 dp_local_peer_id_free(peer->vdev->pdev, peer); 5340 5341 /* Drop all rx packets before deleting peer */ 5342 dp_clear_peer_internal(soc, peer); 5343 5344 qdf_spinlock_destroy(&peer->peer_info_lock); 5345 dp_peer_multipass_list_remove(peer); 5346 5347 /* remove the reference to the peer from the hash table */ 5348 dp_peer_find_hash_remove(soc, peer); 5349 5350 dp_peer_vdev_list_remove(soc, vdev, peer); 5351 5352 dp_peer_mlo_delete(peer); 5353 5354 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 5355 TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer, 5356 inactive_list_elem); 5357 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 5358 5359 /* 5360 * Remove the reference added during peer_attach. 5361 * The peer will still be left allocated until the 5362 * PEER_UNMAP message arrives to remove the other 5363 * reference, added by the PEER_MAP message. 5364 */ 5365 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 5366 /* 5367 * Remove the reference taken above 5368 */ 5369 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5370 5371 return QDF_STATUS_SUCCESS; 5372 } 5373 5374 #ifdef DP_RX_UDP_OVER_PEER_ROAM 5375 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl, 5376 uint8_t vdev_id, 5377 uint8_t *peer_mac, 5378 uint32_t auth_status) 5379 { 5380 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5381 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5382 DP_MOD_ID_CDP); 5383 if (!vdev) 5384 return QDF_STATUS_E_FAILURE; 5385 5386 vdev->roaming_peer_status = auth_status; 5387 qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac, 5388 QDF_MAC_ADDR_SIZE); 5389 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5390 5391 return QDF_STATUS_SUCCESS; 5392 } 5393 #endif 5394 /** 5395 * dp_get_vdev_mac_addr_wifi3() - Detach txrx peer 5396 * @soc_hdl: Datapath soc handle 5397 * @vdev_id: virtual interface id 5398 * 5399 * Return: MAC address on success, NULL on failure. 5400 * 5401 */ 5402 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl, 5403 uint8_t vdev_id) 5404 { 5405 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5406 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5407 DP_MOD_ID_CDP); 5408 uint8_t *mac = NULL; 5409 5410 if (!vdev) 5411 return NULL; 5412 5413 mac = vdev->mac_addr.raw; 5414 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5415 5416 return mac; 5417 } 5418 5419 /** 5420 * dp_vdev_set_wds() - Enable per packet stats 5421 * @soc_hdl: DP soc handle 5422 * @vdev_id: id of DP VDEV handle 5423 * @val: value 5424 * 5425 * Return: none 5426 */ 5427 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5428 uint32_t val) 5429 { 5430 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5431 struct dp_vdev *vdev = 5432 dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id, 5433 DP_MOD_ID_CDP); 5434 5435 if (!vdev) 5436 return QDF_STATUS_E_FAILURE; 5437 5438 vdev->wds_enabled = val; 5439 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5440 5441 return QDF_STATUS_SUCCESS; 5442 } 5443 5444 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) 5445 { 5446 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5447 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5448 DP_MOD_ID_CDP); 5449 int opmode; 5450 5451 if (!vdev) { 5452 dp_err_rl("vdev for id %d is NULL", vdev_id); 5453 return -EINVAL; 5454 } 5455 opmode = vdev->opmode; 5456 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5457 5458 return opmode; 5459 } 5460 5461 /** 5462 * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev 5463 * @soc_hdl: ol_txrx_soc_handle handle 5464 * @vdev_id: vdev id for which os rx handles are needed 5465 * @stack_fn_p: pointer to stack function pointer 5466 * @osif_vdev_p: pointer to ol_osif_vdev_handle 5467 * 5468 * Return: void 5469 */ 5470 static 5471 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl, 5472 uint8_t vdev_id, 5473 ol_txrx_rx_fp *stack_fn_p, 5474 ol_osif_vdev_handle *osif_vdev_p) 5475 { 5476 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5477 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5478 DP_MOD_ID_CDP); 5479 5480 if (qdf_unlikely(!vdev)) { 5481 *stack_fn_p = NULL; 5482 *osif_vdev_p = NULL; 5483 return; 5484 } 5485 *stack_fn_p = vdev->osif_rx_stack; 5486 *osif_vdev_p = vdev->osif_vdev; 5487 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5488 } 5489 5490 /** 5491 * dp_get_ctrl_pdev_from_vdev_wifi3() - Get control pdev of vdev 5492 * @soc_hdl: datapath soc handle 5493 * @vdev_id: virtual device/interface id 5494 * 5495 * Return: Handle to control pdev 5496 */ 5497 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3( 5498 struct cdp_soc_t *soc_hdl, 5499 uint8_t vdev_id) 5500 { 5501 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5502 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5503 DP_MOD_ID_CDP); 5504 struct dp_pdev *pdev; 5505 5506 if (!vdev) 5507 return NULL; 5508 5509 pdev = vdev->pdev; 5510 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5511 return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL; 5512 } 5513 5514 int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle) 5515 { 5516 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 5517 5518 return qdf_atomic_read(&pdev->num_tx_outstanding); 5519 } 5520 5521 /** 5522 * dp_get_peer_mac_from_peer_id() - get peer mac 5523 * @soc: CDP SoC handle 5524 * @peer_id: Peer ID 5525 * @peer_mac: MAC addr of PEER 5526 * 5527 * Return: QDF_STATUS 5528 */ 5529 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc, 5530 uint32_t peer_id, 5531 uint8_t *peer_mac) 5532 { 5533 struct dp_peer *peer; 5534 5535 if (soc && peer_mac) { 5536 peer = dp_peer_get_ref_by_id((struct dp_soc *)soc, 5537 (uint16_t)peer_id, 5538 DP_MOD_ID_CDP); 5539 if (peer) { 5540 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 5541 QDF_MAC_ADDR_SIZE); 5542 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5543 return QDF_STATUS_SUCCESS; 5544 } 5545 } 5546 5547 return QDF_STATUS_E_FAILURE; 5548 } 5549 5550 #ifdef MESH_MODE_SUPPORT 5551 static 5552 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val) 5553 { 5554 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 5555 5556 dp_cdp_info("%pK: val %d", vdev->pdev->soc, val); 5557 vdev->mesh_vdev = val; 5558 if (val) 5559 vdev->skip_sw_tid_classification |= 5560 DP_TX_MESH_ENABLED; 5561 else 5562 vdev->skip_sw_tid_classification &= 5563 ~DP_TX_MESH_ENABLED; 5564 } 5565 5566 /** 5567 * dp_vdev_set_mesh_rx_filter() - to set the mesh rx filter 5568 * @vdev_hdl: virtual device object 5569 * @val: value to be set 5570 * 5571 * Return: void 5572 */ 5573 static 5574 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val) 5575 { 5576 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 5577 5578 dp_cdp_info("%pK: val %d", vdev->pdev->soc, val); 5579 vdev->mesh_rx_filter = val; 5580 } 5581 #endif 5582 5583 /** 5584 * dp_vdev_set_hlos_tid_override() - to set hlos tid override 5585 * @vdev: virtual device object 5586 * @val: value to be set 5587 * 5588 * Return: void 5589 */ 5590 static 5591 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val) 5592 { 5593 dp_cdp_info("%pK: val %d", vdev->pdev->soc, val); 5594 if (val) 5595 vdev->skip_sw_tid_classification |= 5596 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED; 5597 else 5598 vdev->skip_sw_tid_classification &= 5599 ~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED; 5600 } 5601 5602 /** 5603 * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag 5604 * @vdev_hdl: virtual device object 5605 * 5606 * Return: 1 if this flag is set 5607 */ 5608 static 5609 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl) 5610 { 5611 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 5612 5613 return !!(vdev->skip_sw_tid_classification & 5614 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED); 5615 } 5616 5617 #ifdef VDEV_PEER_PROTOCOL_COUNT 5618 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl, 5619 int8_t vdev_id, 5620 bool enable) 5621 { 5622 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5623 struct dp_vdev *vdev; 5624 5625 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5626 if (!vdev) 5627 return; 5628 5629 dp_info("enable %d vdev_id %d", enable, vdev_id); 5630 vdev->peer_protocol_count_track = enable; 5631 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5632 } 5633 5634 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl, 5635 int8_t vdev_id, 5636 int drop_mask) 5637 { 5638 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5639 struct dp_vdev *vdev; 5640 5641 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5642 if (!vdev) 5643 return; 5644 5645 dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id); 5646 vdev->peer_protocol_count_dropmask = drop_mask; 5647 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5648 } 5649 5650 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl, 5651 int8_t vdev_id) 5652 { 5653 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5654 struct dp_vdev *vdev; 5655 int peer_protocol_count_track; 5656 5657 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5658 if (!vdev) 5659 return 0; 5660 5661 dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track, 5662 vdev_id); 5663 peer_protocol_count_track = 5664 vdev->peer_protocol_count_track; 5665 5666 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5667 return peer_protocol_count_track; 5668 } 5669 5670 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl, 5671 int8_t vdev_id) 5672 { 5673 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5674 struct dp_vdev *vdev; 5675 int peer_protocol_count_dropmask; 5676 5677 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5678 if (!vdev) 5679 return 0; 5680 5681 dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask, 5682 vdev_id); 5683 peer_protocol_count_dropmask = 5684 vdev->peer_protocol_count_dropmask; 5685 5686 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5687 return peer_protocol_count_dropmask; 5688 } 5689 5690 #endif 5691 5692 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data) 5693 { 5694 uint8_t pdev_count; 5695 5696 for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) { 5697 if (soc->pdev_list[pdev_count] && 5698 soc->pdev_list[pdev_count] == data) 5699 return true; 5700 } 5701 return false; 5702 } 5703 5704 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 5705 struct cdp_vdev_stats *vdev_stats) 5706 { 5707 5708 if (!vdev || !vdev->pdev) 5709 return; 5710 5711 dp_update_vdev_ingress_stats(vdev); 5712 5713 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); 5714 5715 dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats, 5716 DP_MOD_ID_GENERIC_STATS); 5717 5718 dp_update_vdev_rate_stats(vdev_stats, &vdev->stats); 5719 5720 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 5721 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 5722 vdev_stats, vdev->vdev_id, 5723 UPDATE_VDEV_STATS, vdev->pdev->pdev_id); 5724 #endif 5725 } 5726 5727 void dp_aggregate_pdev_stats(struct dp_pdev *pdev) 5728 { 5729 struct dp_vdev *vdev = NULL; 5730 struct dp_soc *soc; 5731 struct cdp_vdev_stats *vdev_stats = 5732 qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 5733 5734 if (!vdev_stats) { 5735 dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", 5736 pdev->soc); 5737 return; 5738 } 5739 5740 soc = pdev->soc; 5741 5742 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx)); 5743 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx)); 5744 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i)); 5745 qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i)); 5746 5747 if (dp_monitor_is_enable_mcopy_mode(pdev)) 5748 dp_monitor_invalid_peer_update_pdev_stats(soc, pdev); 5749 5750 qdf_spin_lock_bh(&pdev->vdev_list_lock); 5751 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 5752 5753 dp_aggregate_vdev_stats(vdev, vdev_stats); 5754 dp_update_pdev_stats(pdev, vdev_stats); 5755 dp_update_pdev_ingress_stats(pdev, vdev); 5756 } 5757 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 5758 qdf_mem_free(vdev_stats); 5759 5760 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 5761 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats, 5762 pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id); 5763 #endif 5764 } 5765 5766 /** 5767 * dp_vdev_getstats() - get vdev packet level stats 5768 * @vdev_handle: Datapath VDEV handle 5769 * @stats: cdp network device stats structure 5770 * 5771 * Return: QDF_STATUS 5772 */ 5773 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle, 5774 struct cdp_dev_stats *stats) 5775 { 5776 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 5777 struct dp_pdev *pdev; 5778 struct dp_soc *soc; 5779 struct cdp_vdev_stats *vdev_stats; 5780 5781 if (!vdev) 5782 return QDF_STATUS_E_FAILURE; 5783 5784 pdev = vdev->pdev; 5785 if (!pdev) 5786 return QDF_STATUS_E_FAILURE; 5787 5788 soc = pdev->soc; 5789 5790 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 5791 5792 if (!vdev_stats) { 5793 dp_err("%pK: DP alloc failure - unable to get alloc vdev stats", 5794 soc); 5795 return QDF_STATUS_E_FAILURE; 5796 } 5797 5798 dp_aggregate_vdev_stats(vdev, vdev_stats); 5799 5800 stats->tx_packets = vdev_stats->tx.comp_pkt.num; 5801 stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes; 5802 5803 stats->tx_errors = vdev_stats->tx.tx_failed; 5804 stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num + 5805 vdev_stats->tx_i.sg.dropped_host.num + 5806 vdev_stats->tx_i.mcast_en.dropped_map_error + 5807 vdev_stats->tx_i.mcast_en.dropped_self_mac + 5808 vdev_stats->tx_i.mcast_en.dropped_send_fail + 5809 vdev_stats->tx.nawds_mcast_drop; 5810 5811 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 5812 stats->rx_packets = vdev_stats->rx.to_stack.num; 5813 stats->rx_bytes = vdev_stats->rx.to_stack.bytes; 5814 } else { 5815 stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num + 5816 vdev_stats->rx_i.null_q_desc_pkt.num + 5817 vdev_stats->rx_i.routed_eapol_pkt.num; 5818 stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes + 5819 vdev_stats->rx_i.null_q_desc_pkt.bytes + 5820 vdev_stats->rx_i.routed_eapol_pkt.bytes; 5821 } 5822 5823 stats->rx_errors = vdev_stats->rx.err.mic_err + 5824 vdev_stats->rx.err.decrypt_err + 5825 vdev_stats->rx.err.fcserr + 5826 vdev_stats->rx.err.pn_err + 5827 vdev_stats->rx.err.oor_err + 5828 vdev_stats->rx.err.jump_2k_err + 5829 vdev_stats->rx.err.rxdma_wifi_parse_err; 5830 5831 stats->rx_dropped = vdev_stats->rx.mec_drop.num + 5832 vdev_stats->rx.multipass_rx_pkt_drop + 5833 vdev_stats->rx.peer_unauth_rx_pkt_drop + 5834 vdev_stats->rx.policy_check_drop + 5835 vdev_stats->rx.nawds_mcast_drop + 5836 vdev_stats->rx.mcast_3addr_drop + 5837 vdev_stats->rx.ppeds_drop.num; 5838 5839 qdf_mem_free(vdev_stats); 5840 5841 return QDF_STATUS_SUCCESS; 5842 } 5843 5844 /** 5845 * dp_pdev_getstats() - get pdev packet level stats 5846 * @pdev_handle: Datapath PDEV handle 5847 * @stats: cdp network device stats structure 5848 * 5849 * Return: QDF_STATUS 5850 */ 5851 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle, 5852 struct cdp_dev_stats *stats) 5853 { 5854 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 5855 5856 dp_aggregate_pdev_stats(pdev); 5857 5858 stats->tx_packets = pdev->stats.tx.comp_pkt.num; 5859 stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes; 5860 5861 stats->tx_errors = pdev->stats.tx.tx_failed; 5862 stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num + 5863 pdev->stats.tx_i.sg.dropped_host.num + 5864 pdev->stats.tx_i.mcast_en.dropped_map_error + 5865 pdev->stats.tx_i.mcast_en.dropped_self_mac + 5866 pdev->stats.tx_i.mcast_en.dropped_send_fail + 5867 pdev->stats.tx.nawds_mcast_drop + 5868 pdev->stats.tso_stats.dropped_host.num; 5869 5870 if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) { 5871 stats->rx_packets = pdev->stats.rx.to_stack.num; 5872 stats->rx_bytes = pdev->stats.rx.to_stack.bytes; 5873 } else { 5874 stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num + 5875 pdev->stats.rx_i.null_q_desc_pkt.num + 5876 pdev->stats.rx_i.routed_eapol_pkt.num; 5877 stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes + 5878 pdev->stats.rx_i.null_q_desc_pkt.bytes + 5879 pdev->stats.rx_i.routed_eapol_pkt.bytes; 5880 } 5881 5882 stats->rx_errors = pdev->stats.err.ip_csum_err + 5883 pdev->stats.err.tcp_udp_csum_err + 5884 pdev->stats.rx.err.mic_err + 5885 pdev->stats.rx.err.decrypt_err + 5886 pdev->stats.rx.err.fcserr + 5887 pdev->stats.rx.err.pn_err + 5888 pdev->stats.rx.err.oor_err + 5889 pdev->stats.rx.err.jump_2k_err + 5890 pdev->stats.rx.err.rxdma_wifi_parse_err; 5891 stats->rx_dropped = pdev->stats.dropped.msdu_not_done + 5892 pdev->stats.dropped.mec + 5893 pdev->stats.dropped.mesh_filter + 5894 pdev->stats.dropped.wifi_parse + 5895 pdev->stats.dropped.mon_rx_drop + 5896 pdev->stats.dropped.mon_radiotap_update_err + 5897 pdev->stats.rx.mec_drop.num + 5898 pdev->stats.rx.ppeds_drop.num + 5899 pdev->stats.rx.multipass_rx_pkt_drop + 5900 pdev->stats.rx.peer_unauth_rx_pkt_drop + 5901 pdev->stats.rx.policy_check_drop + 5902 pdev->stats.rx.nawds_mcast_drop + 5903 pdev->stats.rx.mcast_3addr_drop; 5904 } 5905 5906 /** 5907 * dp_get_device_stats() - get interface level packet stats 5908 * @soc_hdl: soc handle 5909 * @id: vdev_id or pdev_id based on type 5910 * @stats: cdp network device stats structure 5911 * @type: device type pdev/vdev 5912 * 5913 * Return: QDF_STATUS 5914 */ 5915 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id, 5916 struct cdp_dev_stats *stats, 5917 uint8_t type) 5918 { 5919 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5920 QDF_STATUS status = QDF_STATUS_E_FAILURE; 5921 struct dp_vdev *vdev; 5922 5923 switch (type) { 5924 case UPDATE_VDEV_STATS: 5925 vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP); 5926 5927 if (vdev) { 5928 status = dp_vdev_getstats((struct cdp_vdev *)vdev, 5929 stats); 5930 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5931 } 5932 return status; 5933 case UPDATE_PDEV_STATS: 5934 { 5935 struct dp_pdev *pdev = 5936 dp_get_pdev_from_soc_pdev_id_wifi3( 5937 (struct dp_soc *)soc, 5938 id); 5939 if (pdev) { 5940 dp_pdev_getstats((struct cdp_pdev *)pdev, 5941 stats); 5942 return QDF_STATUS_SUCCESS; 5943 } 5944 } 5945 break; 5946 default: 5947 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 5948 "apstats cannot be updated for this input " 5949 "type %d", type); 5950 break; 5951 } 5952 5953 return QDF_STATUS_E_FAILURE; 5954 } 5955 5956 const 5957 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type) 5958 { 5959 switch (ring_type) { 5960 case REO_DST: 5961 return "Reo_dst"; 5962 case REO_EXCEPTION: 5963 return "Reo_exception"; 5964 case REO_CMD: 5965 return "Reo_cmd"; 5966 case REO_REINJECT: 5967 return "Reo_reinject"; 5968 case REO_STATUS: 5969 return "Reo_status"; 5970 case WBM2SW_RELEASE: 5971 return "wbm2sw_release"; 5972 case TCL_DATA: 5973 return "tcl_data"; 5974 case TCL_CMD_CREDIT: 5975 return "tcl_cmd_credit"; 5976 case TCL_STATUS: 5977 return "tcl_status"; 5978 case SW2WBM_RELEASE: 5979 return "sw2wbm_release"; 5980 case RXDMA_BUF: 5981 return "Rxdma_buf"; 5982 case RXDMA_DST: 5983 return "Rxdma_dst"; 5984 case RXDMA_MONITOR_BUF: 5985 return "Rxdma_monitor_buf"; 5986 case RXDMA_MONITOR_DESC: 5987 return "Rxdma_monitor_desc"; 5988 case RXDMA_MONITOR_STATUS: 5989 return "Rxdma_monitor_status"; 5990 case RXDMA_MONITOR_DST: 5991 return "Rxdma_monitor_destination"; 5992 case WBM_IDLE_LINK: 5993 return "WBM_hw_idle_link"; 5994 case PPE2TCL: 5995 return "PPE2TCL"; 5996 case REO2PPE: 5997 return "REO2PPE"; 5998 case TX_MONITOR_DST: 5999 return "tx_monitor_destination"; 6000 case TX_MONITOR_BUF: 6001 return "tx_monitor_buf"; 6002 default: 6003 dp_err("Invalid ring type"); 6004 break; 6005 } 6006 return "Invalid"; 6007 } 6008 6009 void dp_print_napi_stats(struct dp_soc *soc) 6010 { 6011 hif_print_napi_stats(soc->hif_handle); 6012 } 6013 6014 /** 6015 * dp_txrx_host_peer_stats_clr() - Reinitialize the txrx peer stats 6016 * @soc: Datapath soc 6017 * @peer: Datatpath peer 6018 * @arg: argument to iter function 6019 * 6020 * Return: QDF_STATUS 6021 */ 6022 static inline void 6023 dp_txrx_host_peer_stats_clr(struct dp_soc *soc, 6024 struct dp_peer *peer, 6025 void *arg) 6026 { 6027 struct dp_txrx_peer *txrx_peer = NULL; 6028 struct dp_peer *tgt_peer = NULL; 6029 struct cdp_interface_peer_stats peer_stats_intf; 6030 6031 qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats)); 6032 6033 DP_STATS_CLR(peer); 6034 /* Clear monitor peer stats */ 6035 dp_monitor_peer_reset_stats(soc, peer); 6036 6037 /* Clear MLD peer stats only when link peer is primary */ 6038 if (dp_peer_is_primary_link_peer(peer)) { 6039 tgt_peer = dp_get_tgt_peer_from_peer(peer); 6040 if (tgt_peer) { 6041 DP_STATS_CLR(tgt_peer); 6042 txrx_peer = tgt_peer->txrx_peer; 6043 dp_txrx_peer_stats_clr(txrx_peer); 6044 } 6045 } 6046 6047 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6048 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc, 6049 &peer_stats_intf, peer->peer_id, 6050 UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id); 6051 #endif 6052 } 6053 6054 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 6055 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc) 6056 { 6057 int ring; 6058 6059 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) 6060 hal_srng_clear_ring_usage_wm_locked(soc->hal_soc, 6061 soc->reo_dest_ring[ring].hal_srng); 6062 } 6063 #else 6064 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc) 6065 { 6066 } 6067 #endif 6068 6069 /** 6070 * dp_txrx_host_stats_clr() - Reinitialize the txrx stats 6071 * @vdev: DP_VDEV handle 6072 * @soc: DP_SOC handle 6073 * 6074 * Return: QDF_STATUS 6075 */ 6076 static inline QDF_STATUS 6077 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc) 6078 { 6079 if (!vdev || !vdev->pdev) 6080 return QDF_STATUS_E_FAILURE; 6081 6082 /* 6083 * if NSS offload is enabled, then send message 6084 * to NSS FW to clear the stats. Once NSS FW clears the statistics 6085 * then clear host statistics. 6086 */ 6087 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 6088 if (soc->cdp_soc.ol_ops->nss_stats_clr) 6089 soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc, 6090 vdev->vdev_id); 6091 } 6092 6093 dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id, 6094 (1 << vdev->vdev_id)); 6095 6096 DP_STATS_CLR(vdev->pdev); 6097 DP_STATS_CLR(vdev->pdev->soc); 6098 DP_STATS_CLR(vdev); 6099 6100 hif_clear_napi_stats(vdev->pdev->soc->hif_handle); 6101 6102 dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL, 6103 DP_MOD_ID_GENERIC_STATS); 6104 6105 dp_srng_clear_ring_usage_wm_stats(soc); 6106 6107 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6108 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 6109 &vdev->stats, vdev->vdev_id, 6110 UPDATE_VDEV_STATS, vdev->pdev->pdev_id); 6111 #endif 6112 return QDF_STATUS_SUCCESS; 6113 } 6114 6115 /** 6116 * dp_get_peer_calibr_stats()- Get peer calibrated stats 6117 * @peer: Datapath peer 6118 * @peer_stats: buffer for peer stats 6119 * 6120 * Return: none 6121 */ 6122 static inline 6123 void dp_get_peer_calibr_stats(struct dp_peer *peer, 6124 struct cdp_peer_stats *peer_stats) 6125 { 6126 struct dp_peer *tgt_peer; 6127 6128 tgt_peer = dp_get_tgt_peer_from_peer(peer); 6129 if (!tgt_peer) 6130 return; 6131 6132 peer_stats->tx.last_per = tgt_peer->stats.tx.last_per; 6133 peer_stats->tx.tx_bytes_success_last = 6134 tgt_peer->stats.tx.tx_bytes_success_last; 6135 peer_stats->tx.tx_data_success_last = 6136 tgt_peer->stats.tx.tx_data_success_last; 6137 peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate; 6138 peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate; 6139 peer_stats->tx.tx_data_ucast_last = 6140 tgt_peer->stats.tx.tx_data_ucast_last; 6141 peer_stats->tx.tx_data_ucast_rate = 6142 tgt_peer->stats.tx.tx_data_ucast_rate; 6143 peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time; 6144 peer_stats->rx.rx_bytes_success_last = 6145 tgt_peer->stats.rx.rx_bytes_success_last; 6146 peer_stats->rx.rx_data_success_last = 6147 tgt_peer->stats.rx.rx_data_success_last; 6148 peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate; 6149 peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate; 6150 } 6151 6152 /** 6153 * dp_get_peer_basic_stats()- Get peer basic stats 6154 * @peer: Datapath peer 6155 * @peer_stats: buffer for peer stats 6156 * 6157 * Return: none 6158 */ 6159 static inline 6160 void dp_get_peer_basic_stats(struct dp_peer *peer, 6161 struct cdp_peer_stats *peer_stats) 6162 { 6163 struct dp_txrx_peer *txrx_peer; 6164 6165 txrx_peer = dp_get_txrx_peer(peer); 6166 if (!txrx_peer) 6167 return; 6168 6169 peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num; 6170 peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes; 6171 peer_stats->tx.tx_failed += txrx_peer->tx_failed; 6172 peer_stats->rx.to_stack.num += txrx_peer->to_stack.num; 6173 peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes; 6174 } 6175 6176 #ifdef QCA_ENHANCED_STATS_SUPPORT 6177 /** 6178 * dp_get_peer_per_pkt_stats()- Get peer per pkt stats 6179 * @peer: Datapath peer 6180 * @peer_stats: buffer for peer stats 6181 * 6182 * Return: none 6183 */ 6184 static inline 6185 void dp_get_peer_per_pkt_stats(struct dp_peer *peer, 6186 struct cdp_peer_stats *peer_stats) 6187 { 6188 struct dp_txrx_peer *txrx_peer; 6189 struct dp_peer_per_pkt_stats *per_pkt_stats; 6190 uint8_t inx = 0, link_id = 0; 6191 struct dp_pdev *pdev; 6192 struct dp_soc *soc; 6193 uint8_t stats_arr_size; 6194 6195 txrx_peer = dp_get_txrx_peer(peer); 6196 pdev = peer->vdev->pdev; 6197 6198 if (!txrx_peer) 6199 return; 6200 6201 if (!IS_MLO_DP_LINK_PEER(peer)) { 6202 stats_arr_size = txrx_peer->stats_arr_size; 6203 for (inx = 0; inx < stats_arr_size; inx++) { 6204 per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats; 6205 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 6206 } 6207 } else { 6208 soc = pdev->soc; 6209 link_id = dp_get_peer_hw_link_id(soc, pdev); 6210 per_pkt_stats = 6211 &txrx_peer->stats[link_id].per_pkt_stats; 6212 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 6213 } 6214 } 6215 6216 #ifdef WLAN_FEATURE_11BE_MLO 6217 /** 6218 * dp_get_peer_extd_stats()- Get peer extd stats 6219 * @peer: Datapath peer 6220 * @peer_stats: buffer for peer stats 6221 * 6222 * Return: none 6223 */ 6224 static inline 6225 void dp_get_peer_extd_stats(struct dp_peer *peer, 6226 struct cdp_peer_stats *peer_stats) 6227 { 6228 struct dp_soc *soc = peer->vdev->pdev->soc; 6229 6230 if (IS_MLO_DP_MLD_PEER(peer)) { 6231 uint8_t i; 6232 struct dp_peer *link_peer; 6233 struct dp_soc *link_peer_soc; 6234 struct dp_mld_link_peers link_peers_info; 6235 6236 dp_get_link_peers_ref_from_mld_peer(soc, peer, 6237 &link_peers_info, 6238 DP_MOD_ID_CDP); 6239 for (i = 0; i < link_peers_info.num_links; i++) { 6240 link_peer = link_peers_info.link_peers[i]; 6241 link_peer_soc = link_peer->vdev->pdev->soc; 6242 dp_monitor_peer_get_stats(link_peer_soc, link_peer, 6243 peer_stats, 6244 UPDATE_PEER_STATS); 6245 } 6246 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 6247 } else { 6248 dp_monitor_peer_get_stats(soc, peer, peer_stats, 6249 UPDATE_PEER_STATS); 6250 } 6251 } 6252 #else 6253 static inline 6254 void dp_get_peer_extd_stats(struct dp_peer *peer, 6255 struct cdp_peer_stats *peer_stats) 6256 { 6257 struct dp_soc *soc = peer->vdev->pdev->soc; 6258 6259 dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS); 6260 } 6261 #endif 6262 #else 6263 static inline 6264 void dp_get_peer_per_pkt_stats(struct dp_peer *peer, 6265 struct cdp_peer_stats *peer_stats) 6266 { 6267 struct dp_txrx_peer *txrx_peer; 6268 struct dp_peer_per_pkt_stats *per_pkt_stats; 6269 6270 txrx_peer = dp_get_txrx_peer(peer); 6271 if (!txrx_peer) 6272 return; 6273 6274 per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats; 6275 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 6276 } 6277 6278 static inline 6279 void dp_get_peer_extd_stats(struct dp_peer *peer, 6280 struct cdp_peer_stats *peer_stats) 6281 { 6282 struct dp_txrx_peer *txrx_peer; 6283 struct dp_peer_extd_stats *extd_stats; 6284 6285 txrx_peer = dp_get_txrx_peer(peer); 6286 if (qdf_unlikely(!txrx_peer)) { 6287 dp_err_rl("txrx_peer NULL"); 6288 return; 6289 } 6290 6291 extd_stats = &txrx_peer->stats[0].extd_stats; 6292 DP_UPDATE_EXTD_STATS(peer_stats, extd_stats); 6293 } 6294 #endif 6295 6296 /** 6297 * dp_get_peer_tx_per()- Get peer packet error ratio 6298 * @peer_stats: buffer for peer stats 6299 * 6300 * Return: none 6301 */ 6302 static inline 6303 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats) 6304 { 6305 if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0) 6306 peer_stats->tx.per = (peer_stats->tx.retries * 100) / 6307 (peer_stats->tx.tx_success.num + 6308 peer_stats->tx.retries); 6309 else 6310 peer_stats->tx.per = 0; 6311 } 6312 6313 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats) 6314 { 6315 dp_get_peer_calibr_stats(peer, peer_stats); 6316 6317 dp_get_peer_basic_stats(peer, peer_stats); 6318 6319 dp_get_peer_per_pkt_stats(peer, peer_stats); 6320 6321 dp_get_peer_extd_stats(peer, peer_stats); 6322 6323 dp_get_peer_tx_per(peer_stats); 6324 } 6325 6326 /** 6327 * dp_get_host_peer_stats()- function to print peer stats 6328 * @soc: dp_soc handle 6329 * @mac_addr: mac address of the peer 6330 * 6331 * Return: QDF_STATUS 6332 */ 6333 static QDF_STATUS 6334 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr) 6335 { 6336 struct dp_peer *peer = NULL; 6337 struct cdp_peer_stats *peer_stats = NULL; 6338 struct cdp_peer_info peer_info = { 0 }; 6339 6340 if (!mac_addr) { 6341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 6342 "%s: NULL peer mac addr\n", __func__); 6343 return QDF_STATUS_E_FAILURE; 6344 } 6345 6346 DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false, 6347 CDP_WILD_PEER_TYPE); 6348 6349 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 6350 DP_MOD_ID_CDP); 6351 if (!peer) { 6352 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 6353 "%s: Invalid peer\n", __func__); 6354 return QDF_STATUS_E_FAILURE; 6355 } 6356 6357 peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats)); 6358 if (!peer_stats) { 6359 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 6360 "%s: Memory allocation failed for cdp_peer_stats\n", 6361 __func__); 6362 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6363 return QDF_STATUS_E_NOMEM; 6364 } 6365 6366 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats)); 6367 6368 dp_get_peer_stats(peer, peer_stats); 6369 dp_print_peer_stats(peer, peer_stats); 6370 6371 dp_peer_rxtid_stats(dp_get_tgt_peer_from_peer(peer), 6372 dp_rx_tid_stats_cb, NULL); 6373 6374 qdf_mem_free(peer_stats); 6375 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6376 6377 return QDF_STATUS_SUCCESS; 6378 } 6379 6380 /** 6381 * dp_txrx_stats_help() - Helper function for Txrx_Stats 6382 * 6383 * Return: None 6384 */ 6385 static void dp_txrx_stats_help(void) 6386 { 6387 dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>"); 6388 dp_info("stats_option:"); 6389 dp_info(" 1 -- HTT Tx Statistics"); 6390 dp_info(" 2 -- HTT Rx Statistics"); 6391 dp_info(" 3 -- HTT Tx HW Queue Statistics"); 6392 dp_info(" 4 -- HTT Tx HW Sched Statistics"); 6393 dp_info(" 5 -- HTT Error Statistics"); 6394 dp_info(" 6 -- HTT TQM Statistics"); 6395 dp_info(" 7 -- HTT TQM CMDQ Statistics"); 6396 dp_info(" 8 -- HTT TX_DE_CMN Statistics"); 6397 dp_info(" 9 -- HTT Tx Rate Statistics"); 6398 dp_info(" 10 -- HTT Rx Rate Statistics"); 6399 dp_info(" 11 -- HTT Peer Statistics"); 6400 dp_info(" 12 -- HTT Tx SelfGen Statistics"); 6401 dp_info(" 13 -- HTT Tx MU HWQ Statistics"); 6402 dp_info(" 14 -- HTT RING_IF_INFO Statistics"); 6403 dp_info(" 15 -- HTT SRNG Statistics"); 6404 dp_info(" 16 -- HTT SFM Info Statistics"); 6405 dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics"); 6406 dp_info(" 18 -- HTT Peer List Details"); 6407 dp_info(" 20 -- Clear Host Statistics"); 6408 dp_info(" 21 -- Host Rx Rate Statistics"); 6409 dp_info(" 22 -- Host Tx Rate Statistics"); 6410 dp_info(" 23 -- Host Tx Statistics"); 6411 dp_info(" 24 -- Host Rx Statistics"); 6412 dp_info(" 25 -- Host AST Statistics"); 6413 dp_info(" 26 -- Host SRNG PTR Statistics"); 6414 dp_info(" 27 -- Host Mon Statistics"); 6415 dp_info(" 28 -- Host REO Queue Statistics"); 6416 dp_info(" 29 -- Host Soc cfg param Statistics"); 6417 dp_info(" 30 -- Host pdev cfg param Statistics"); 6418 dp_info(" 31 -- Host NAPI stats"); 6419 dp_info(" 32 -- Host Interrupt stats"); 6420 dp_info(" 33 -- Host FISA stats"); 6421 dp_info(" 34 -- Host Register Work stats"); 6422 dp_info(" 35 -- HW REO Queue stats"); 6423 dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP"); 6424 dp_info(" 37 -- Host SRNG usage watermark stats"); 6425 } 6426 6427 #ifdef DP_UMAC_HW_RESET_SUPPORT 6428 /** 6429 * dp_umac_rst_skel_enable_update() - Update skel dbg flag for umac reset 6430 * @soc: dp soc handle 6431 * @en: ebable/disable 6432 * 6433 * Return: void 6434 */ 6435 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en) 6436 { 6437 soc->umac_reset_ctx.skel_enable = en; 6438 dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u", 6439 soc->umac_reset_ctx.skel_enable); 6440 } 6441 6442 /** 6443 * dp_umac_rst_skel_enable_get() - Get skel dbg flag for umac reset 6444 * @soc: dp soc handle 6445 * 6446 * Return: enable/disable flag 6447 */ 6448 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc) 6449 { 6450 return soc->umac_reset_ctx.skel_enable; 6451 } 6452 #else 6453 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en) 6454 { 6455 } 6456 6457 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc) 6458 { 6459 return false; 6460 } 6461 #endif 6462 6463 /** 6464 * dp_print_host_stats()- Function to print the stats aggregated at host 6465 * @vdev: DP_VDEV handle 6466 * @req: host stats type 6467 * @soc: dp soc handler 6468 * 6469 * Return: 0 on success, print error message in case of failure 6470 */ 6471 static int 6472 dp_print_host_stats(struct dp_vdev *vdev, 6473 struct cdp_txrx_stats_req *req, 6474 struct dp_soc *soc) 6475 { 6476 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 6477 enum cdp_host_txrx_stats type = 6478 dp_stats_mapping_table[req->stats][STATS_HOST]; 6479 6480 dp_aggregate_pdev_stats(pdev); 6481 6482 switch (type) { 6483 case TXRX_CLEAR_STATS: 6484 dp_txrx_host_stats_clr(vdev, soc); 6485 break; 6486 case TXRX_RX_RATE_STATS: 6487 dp_print_rx_rates(vdev); 6488 break; 6489 case TXRX_TX_RATE_STATS: 6490 dp_print_tx_rates(vdev); 6491 break; 6492 case TXRX_TX_HOST_STATS: 6493 dp_print_pdev_tx_stats(pdev); 6494 dp_print_soc_tx_stats(pdev->soc); 6495 dp_print_global_desc_count(); 6496 break; 6497 case TXRX_RX_HOST_STATS: 6498 dp_print_pdev_rx_stats(pdev); 6499 dp_print_soc_rx_stats(pdev->soc); 6500 break; 6501 case TXRX_AST_STATS: 6502 dp_print_ast_stats(pdev->soc); 6503 dp_print_mec_stats(pdev->soc); 6504 dp_print_peer_table(vdev); 6505 break; 6506 case TXRX_SRNG_PTR_STATS: 6507 dp_print_ring_stats(pdev); 6508 break; 6509 case TXRX_RX_MON_STATS: 6510 dp_monitor_print_pdev_rx_mon_stats(pdev); 6511 break; 6512 case TXRX_REO_QUEUE_STATS: 6513 dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc, 6514 req->peer_addr); 6515 break; 6516 case TXRX_SOC_CFG_PARAMS: 6517 dp_print_soc_cfg_params(pdev->soc); 6518 break; 6519 case TXRX_PDEV_CFG_PARAMS: 6520 dp_print_pdev_cfg_params(pdev); 6521 break; 6522 case TXRX_NAPI_STATS: 6523 dp_print_napi_stats(pdev->soc); 6524 break; 6525 case TXRX_SOC_INTERRUPT_STATS: 6526 dp_print_soc_interrupt_stats(pdev->soc); 6527 break; 6528 case TXRX_SOC_FSE_STATS: 6529 dp_rx_dump_fisa_table(pdev->soc); 6530 break; 6531 case TXRX_HAL_REG_WRITE_STATS: 6532 hal_dump_reg_write_stats(pdev->soc->hal_soc); 6533 hal_dump_reg_write_srng_stats(pdev->soc->hal_soc); 6534 break; 6535 case TXRX_SOC_REO_HW_DESC_DUMP: 6536 dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc, 6537 vdev->vdev_id); 6538 break; 6539 case TXRX_SOC_WBM_IDLE_HPTP_DUMP: 6540 dp_dump_wbm_idle_hptp(pdev->soc, pdev); 6541 break; 6542 case TXRX_SRNG_USAGE_WM_STATS: 6543 /* Dump usage watermark stats for all SRNGs */ 6544 dp_dump_srng_high_wm_stats(soc, 0xFF); 6545 break; 6546 default: 6547 dp_info("Wrong Input For TxRx Host Stats"); 6548 dp_txrx_stats_help(); 6549 break; 6550 } 6551 return 0; 6552 } 6553 6554 /** 6555 * dp_pdev_tid_stats_ingress_inc() - increment ingress_stack counter 6556 * @pdev: pdev handle 6557 * @val: increase in value 6558 * 6559 * Return: void 6560 */ 6561 static void 6562 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val) 6563 { 6564 pdev->stats.tid_stats.ingress_stack += val; 6565 } 6566 6567 /** 6568 * dp_pdev_tid_stats_osif_drop() - increment osif_drop counter 6569 * @pdev: pdev handle 6570 * @val: increase in value 6571 * 6572 * Return: void 6573 */ 6574 static void 6575 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val) 6576 { 6577 pdev->stats.tid_stats.osif_drop += val; 6578 } 6579 6580 /** 6581 * dp_get_fw_peer_stats()- function to print peer stats 6582 * @soc: soc handle 6583 * @pdev_id: id of the pdev handle 6584 * @mac_addr: mac address of the peer 6585 * @cap: Type of htt stats requested 6586 * @is_wait: if set, wait on completion from firmware response 6587 * 6588 * Currently Supporting only MAC ID based requests Only 6589 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY 6590 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM 6591 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM 6592 * 6593 * Return: QDF_STATUS 6594 */ 6595 static QDF_STATUS 6596 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 6597 uint8_t *mac_addr, 6598 uint32_t cap, uint32_t is_wait) 6599 { 6600 int i; 6601 uint32_t config_param0 = 0; 6602 uint32_t config_param1 = 0; 6603 uint32_t config_param2 = 0; 6604 uint32_t config_param3 = 0; 6605 struct dp_pdev *pdev = 6606 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 6607 pdev_id); 6608 6609 if (!pdev) 6610 return QDF_STATUS_E_FAILURE; 6611 6612 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1); 6613 config_param0 |= (1 << (cap + 1)); 6614 6615 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) { 6616 config_param1 |= (1 << i); 6617 } 6618 6619 config_param2 |= (mac_addr[0] & 0x000000ff); 6620 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00); 6621 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000); 6622 config_param2 |= ((mac_addr[3] << 24) & 0xff000000); 6623 6624 config_param3 |= (mac_addr[4] & 0x000000ff); 6625 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00); 6626 6627 if (is_wait) { 6628 qdf_event_reset(&pdev->fw_peer_stats_event); 6629 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, 6630 config_param0, config_param1, 6631 config_param2, config_param3, 6632 0, DBG_STATS_COOKIE_DP_STATS, 0); 6633 qdf_wait_single_event(&pdev->fw_peer_stats_event, 6634 DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC); 6635 } else { 6636 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, 6637 config_param0, config_param1, 6638 config_param2, config_param3, 6639 0, DBG_STATS_COOKIE_DEFAULT, 0); 6640 } 6641 6642 return QDF_STATUS_SUCCESS; 6643 6644 } 6645 6646 /* This struct definition will be removed from here 6647 * once it get added in FW headers*/ 6648 struct httstats_cmd_req { 6649 uint32_t config_param0; 6650 uint32_t config_param1; 6651 uint32_t config_param2; 6652 uint32_t config_param3; 6653 int cookie; 6654 u_int8_t stats_id; 6655 }; 6656 6657 /** 6658 * dp_get_htt_stats: function to process the httstas request 6659 * @soc: DP soc handle 6660 * @pdev_id: id of pdev handle 6661 * @data: pointer to request data 6662 * @data_len: length for request data 6663 * 6664 * Return: QDF_STATUS 6665 */ 6666 static QDF_STATUS 6667 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data, 6668 uint32_t data_len) 6669 { 6670 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data; 6671 struct dp_pdev *pdev = 6672 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 6673 pdev_id); 6674 6675 if (!pdev) 6676 return QDF_STATUS_E_FAILURE; 6677 6678 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req)); 6679 dp_h2t_ext_stats_msg_send(pdev, req->stats_id, 6680 req->config_param0, req->config_param1, 6681 req->config_param2, req->config_param3, 6682 req->cookie, DBG_STATS_COOKIE_DEFAULT, 0); 6683 6684 return QDF_STATUS_SUCCESS; 6685 } 6686 6687 /** 6688 * dp_set_pdev_tidmap_prty_wifi3() - update tidmap priority in pdev 6689 * @pdev: DP_PDEV handle 6690 * @prio: tidmap priority value passed by the user 6691 * 6692 * Return: QDF_STATUS_SUCCESS on success 6693 */ 6694 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev, 6695 uint8_t prio) 6696 { 6697 struct dp_soc *soc = pdev->soc; 6698 6699 soc->tidmap_prty = prio; 6700 6701 hal_tx_set_tidmap_prty(soc->hal_soc, prio); 6702 return QDF_STATUS_SUCCESS; 6703 } 6704 6705 /** 6706 * dp_get_peer_param: function to get parameters in peer 6707 * @cdp_soc: DP soc handle 6708 * @vdev_id: id of vdev handle 6709 * @peer_mac: peer mac address 6710 * @param: parameter type to be set 6711 * @val: address of buffer 6712 * 6713 * Return: val 6714 */ 6715 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 6716 uint8_t *peer_mac, 6717 enum cdp_peer_param_type param, 6718 cdp_config_param_type *val) 6719 { 6720 return QDF_STATUS_SUCCESS; 6721 } 6722 6723 /** 6724 * dp_set_peer_param: function to set parameters in peer 6725 * @cdp_soc: DP soc handle 6726 * @vdev_id: id of vdev handle 6727 * @peer_mac: peer mac address 6728 * @param: parameter type to be set 6729 * @val: value of parameter to be set 6730 * 6731 * Return: 0 for success. nonzero for failure. 6732 */ 6733 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 6734 uint8_t *peer_mac, 6735 enum cdp_peer_param_type param, 6736 cdp_config_param_type val) 6737 { 6738 struct dp_peer *peer = 6739 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc, 6740 peer_mac, 0, vdev_id, 6741 DP_MOD_ID_CDP); 6742 struct dp_txrx_peer *txrx_peer; 6743 6744 if (!peer) 6745 return QDF_STATUS_E_FAILURE; 6746 6747 txrx_peer = peer->txrx_peer; 6748 if (!txrx_peer) { 6749 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6750 return QDF_STATUS_E_FAILURE; 6751 } 6752 6753 switch (param) { 6754 case CDP_CONFIG_NAWDS: 6755 txrx_peer->nawds_enabled = val.cdp_peer_param_nawds; 6756 break; 6757 case CDP_CONFIG_ISOLATION: 6758 dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation); 6759 break; 6760 case CDP_CONFIG_IN_TWT: 6761 txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt); 6762 break; 6763 default: 6764 break; 6765 } 6766 6767 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6768 6769 return QDF_STATUS_SUCCESS; 6770 } 6771 6772 /** 6773 * dp_get_pdev_param() - function to get parameters from pdev 6774 * @cdp_soc: DP soc handle 6775 * @pdev_id: id of pdev handle 6776 * @param: parameter type to be get 6777 * @val: buffer for value 6778 * 6779 * Return: status 6780 */ 6781 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 6782 enum cdp_pdev_param_type param, 6783 cdp_config_param_type *val) 6784 { 6785 struct cdp_pdev *pdev = (struct cdp_pdev *) 6786 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 6787 pdev_id); 6788 if (!pdev) 6789 return QDF_STATUS_E_FAILURE; 6790 6791 switch (param) { 6792 case CDP_CONFIG_VOW: 6793 val->cdp_pdev_param_cfg_vow = 6794 ((struct dp_pdev *)pdev)->delay_stats_flag; 6795 break; 6796 case CDP_TX_PENDING: 6797 val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev); 6798 break; 6799 case CDP_FILTER_MCAST_DATA: 6800 val->cdp_pdev_param_fltr_mcast = 6801 dp_monitor_pdev_get_filter_mcast_data(pdev); 6802 break; 6803 case CDP_FILTER_NO_DATA: 6804 val->cdp_pdev_param_fltr_none = 6805 dp_monitor_pdev_get_filter_non_data(pdev); 6806 break; 6807 case CDP_FILTER_UCAST_DATA: 6808 val->cdp_pdev_param_fltr_ucast = 6809 dp_monitor_pdev_get_filter_ucast_data(pdev); 6810 break; 6811 case CDP_MONITOR_CHANNEL: 6812 val->cdp_pdev_param_monitor_chan = 6813 dp_monitor_get_chan_num((struct dp_pdev *)pdev); 6814 break; 6815 case CDP_MONITOR_FREQUENCY: 6816 val->cdp_pdev_param_mon_freq = 6817 dp_monitor_get_chan_freq((struct dp_pdev *)pdev); 6818 break; 6819 default: 6820 return QDF_STATUS_E_FAILURE; 6821 } 6822 6823 return QDF_STATUS_SUCCESS; 6824 } 6825 6826 /** 6827 * dp_set_pdev_param() - function to set parameters in pdev 6828 * @cdp_soc: DP soc handle 6829 * @pdev_id: id of pdev handle 6830 * @param: parameter type to be set 6831 * @val: value of parameter to be set 6832 * 6833 * Return: 0 for success. nonzero for failure. 6834 */ 6835 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 6836 enum cdp_pdev_param_type param, 6837 cdp_config_param_type val) 6838 { 6839 int target_type; 6840 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 6841 struct dp_pdev *pdev = 6842 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 6843 pdev_id); 6844 enum reg_wifi_band chan_band; 6845 6846 if (!pdev) 6847 return QDF_STATUS_E_FAILURE; 6848 6849 target_type = hal_get_target_type(soc->hal_soc); 6850 switch (target_type) { 6851 case TARGET_TYPE_QCA6750: 6852 case TARGET_TYPE_WCN6450: 6853 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID; 6854 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID; 6855 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID; 6856 break; 6857 case TARGET_TYPE_KIWI: 6858 case TARGET_TYPE_MANGO: 6859 case TARGET_TYPE_PEACH: 6860 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID; 6861 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID; 6862 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID; 6863 break; 6864 default: 6865 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID; 6866 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID; 6867 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID; 6868 break; 6869 } 6870 6871 switch (param) { 6872 case CDP_CONFIG_TX_CAPTURE: 6873 return dp_monitor_config_debug_sniffer(pdev, 6874 val.cdp_pdev_param_tx_capture); 6875 case CDP_CONFIG_DEBUG_SNIFFER: 6876 return dp_monitor_config_debug_sniffer(pdev, 6877 val.cdp_pdev_param_dbg_snf); 6878 case CDP_CONFIG_BPR_ENABLE: 6879 return dp_monitor_set_bpr_enable(pdev, 6880 val.cdp_pdev_param_bpr_enable); 6881 case CDP_CONFIG_PRIMARY_RADIO: 6882 pdev->is_primary = val.cdp_pdev_param_primary_radio; 6883 break; 6884 case CDP_CONFIG_CAPTURE_LATENCY: 6885 pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy; 6886 break; 6887 case CDP_INGRESS_STATS: 6888 dp_pdev_tid_stats_ingress_inc(pdev, 6889 val.cdp_pdev_param_ingrs_stats); 6890 break; 6891 case CDP_OSIF_DROP: 6892 dp_pdev_tid_stats_osif_drop(pdev, 6893 val.cdp_pdev_param_osif_drop); 6894 break; 6895 case CDP_CONFIG_ENH_RX_CAPTURE: 6896 return dp_monitor_config_enh_rx_capture(pdev, 6897 val.cdp_pdev_param_en_rx_cap); 6898 case CDP_CONFIG_ENH_TX_CAPTURE: 6899 return dp_monitor_config_enh_tx_capture(pdev, 6900 val.cdp_pdev_param_en_tx_cap); 6901 case CDP_CONFIG_HMMC_TID_OVERRIDE: 6902 pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd; 6903 break; 6904 case CDP_CONFIG_HMMC_TID_VALUE: 6905 pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid; 6906 break; 6907 case CDP_CHAN_NOISE_FLOOR: 6908 pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr; 6909 break; 6910 case CDP_TIDMAP_PRTY: 6911 dp_set_pdev_tidmap_prty_wifi3(pdev, 6912 val.cdp_pdev_param_tidmap_prty); 6913 break; 6914 case CDP_FILTER_NEIGH_PEERS: 6915 dp_monitor_set_filter_neigh_peers(pdev, 6916 val.cdp_pdev_param_fltr_neigh_peers); 6917 break; 6918 case CDP_MONITOR_CHANNEL: 6919 dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan); 6920 break; 6921 case CDP_MONITOR_FREQUENCY: 6922 chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq); 6923 dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq); 6924 dp_monitor_set_chan_band(pdev, chan_band); 6925 break; 6926 case CDP_CONFIG_BSS_COLOR: 6927 dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color); 6928 break; 6929 case CDP_SET_ATF_STATS_ENABLE: 6930 dp_monitor_set_atf_stats_enable(pdev, 6931 val.cdp_pdev_param_atf_stats_enable); 6932 break; 6933 case CDP_CONFIG_SPECIAL_VAP: 6934 dp_monitor_pdev_config_scan_spcl_vap(pdev, 6935 val.cdp_pdev_param_config_special_vap); 6936 dp_monitor_vdev_set_monitor_mode_buf_rings(pdev); 6937 break; 6938 case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE: 6939 dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev, 6940 val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable); 6941 break; 6942 case CDP_CONFIG_ENHANCED_STATS_ENABLE: 6943 pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable; 6944 break; 6945 case CDP_ISOLATION: 6946 pdev->isolation = val.cdp_pdev_param_isolation; 6947 break; 6948 case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE: 6949 return dp_monitor_config_undecoded_metadata_capture(pdev, 6950 val.cdp_pdev_param_undecoded_metadata_enable); 6951 break; 6952 default: 6953 return QDF_STATUS_E_INVAL; 6954 } 6955 return QDF_STATUS_SUCCESS; 6956 } 6957 6958 #ifdef QCA_UNDECODED_METADATA_SUPPORT 6959 static 6960 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc, 6961 uint8_t pdev_id, uint32_t mask, 6962 uint32_t mask_cont) 6963 { 6964 struct dp_pdev *pdev = 6965 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 6966 pdev_id); 6967 6968 if (!pdev) 6969 return QDF_STATUS_E_FAILURE; 6970 6971 return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev, 6972 mask, mask_cont); 6973 } 6974 6975 static 6976 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc, 6977 uint8_t pdev_id, uint32_t *mask, 6978 uint32_t *mask_cont) 6979 { 6980 struct dp_pdev *pdev = 6981 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 6982 pdev_id); 6983 6984 if (!pdev) 6985 return QDF_STATUS_E_FAILURE; 6986 6987 return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev, 6988 mask, mask_cont); 6989 } 6990 #endif 6991 6992 #ifdef QCA_PEER_EXT_STATS 6993 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc, 6994 qdf_nbuf_t nbuf) 6995 { 6996 struct dp_peer *peer = NULL; 6997 uint16_t peer_id, ring_id; 6998 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 6999 struct dp_peer_delay_stats *delay_stats = NULL; 7000 7001 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 7002 if (peer_id > soc->max_peer_id) 7003 return; 7004 7005 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP); 7006 if (qdf_unlikely(!peer)) 7007 return; 7008 7009 if (qdf_unlikely(!peer->txrx_peer)) { 7010 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7011 return; 7012 } 7013 7014 if (qdf_likely(peer->txrx_peer->delay_stats)) { 7015 delay_stats = peer->txrx_peer->delay_stats; 7016 ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 7017 dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id], 7018 nbuf); 7019 } 7020 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7021 } 7022 #else 7023 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc, 7024 qdf_nbuf_t nbuf) 7025 { 7026 } 7027 #endif 7028 7029 /** 7030 * dp_calculate_delay_stats() - function to get rx delay stats 7031 * @cdp_soc: DP soc handle 7032 * @vdev_id: id of DP vdev handle 7033 * @nbuf: skb 7034 * 7035 * Return: QDF_STATUS 7036 */ 7037 static QDF_STATUS 7038 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 7039 qdf_nbuf_t nbuf) 7040 { 7041 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 7042 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7043 DP_MOD_ID_CDP); 7044 7045 if (!vdev) 7046 return QDF_STATUS_SUCCESS; 7047 7048 if (vdev->pdev->delay_stats_flag) 7049 dp_rx_compute_delay(vdev, nbuf); 7050 else 7051 dp_rx_update_peer_delay_stats(soc, nbuf); 7052 7053 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7054 return QDF_STATUS_SUCCESS; 7055 } 7056 7057 /** 7058 * dp_get_vdev_param() - function to get parameters from vdev 7059 * @cdp_soc: DP soc handle 7060 * @vdev_id: id of DP vdev handle 7061 * @param: parameter type to get value 7062 * @val: buffer address 7063 * 7064 * Return: status 7065 */ 7066 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 7067 enum cdp_vdev_param_type param, 7068 cdp_config_param_type *val) 7069 { 7070 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 7071 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7072 DP_MOD_ID_CDP); 7073 7074 if (!vdev) 7075 return QDF_STATUS_E_FAILURE; 7076 7077 switch (param) { 7078 case CDP_ENABLE_WDS: 7079 val->cdp_vdev_param_wds = vdev->wds_enabled; 7080 break; 7081 case CDP_ENABLE_MEC: 7082 val->cdp_vdev_param_mec = vdev->mec_enabled; 7083 break; 7084 case CDP_ENABLE_DA_WAR: 7085 val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled; 7086 break; 7087 case CDP_ENABLE_IGMP_MCAST_EN: 7088 val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en; 7089 break; 7090 case CDP_ENABLE_MCAST_EN: 7091 val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en; 7092 break; 7093 case CDP_ENABLE_HLOS_TID_OVERRIDE: 7094 val->cdp_vdev_param_hlos_tid_override = 7095 dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev); 7096 break; 7097 case CDP_ENABLE_PEER_AUTHORIZE: 7098 val->cdp_vdev_param_peer_authorize = 7099 vdev->peer_authorize; 7100 break; 7101 case CDP_TX_ENCAP_TYPE: 7102 val->cdp_vdev_param_tx_encap = vdev->tx_encap_type; 7103 break; 7104 case CDP_ENABLE_CIPHER: 7105 val->cdp_vdev_param_cipher_en = vdev->sec_type; 7106 break; 7107 #ifdef WLAN_SUPPORT_MESH_LATENCY 7108 case CDP_ENABLE_PEER_TID_LATENCY: 7109 val->cdp_vdev_param_peer_tid_latency_enable = 7110 vdev->peer_tid_latency_enabled; 7111 break; 7112 case CDP_SET_VAP_MESH_TID: 7113 val->cdp_vdev_param_mesh_tid = 7114 vdev->mesh_tid_latency_config.latency_tid; 7115 break; 7116 #endif 7117 case CDP_DROP_3ADDR_MCAST: 7118 val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast; 7119 break; 7120 case CDP_SET_MCAST_VDEV: 7121 soc->arch_ops.txrx_get_vdev_mcast_param(soc, vdev, val); 7122 break; 7123 #ifdef QCA_SUPPORT_WDS_EXTENDED 7124 case CDP_DROP_TX_MCAST: 7125 val->cdp_drop_tx_mcast = vdev->drop_tx_mcast; 7126 break; 7127 #endif 7128 7129 #ifdef MESH_MODE_SUPPORT 7130 case CDP_MESH_RX_FILTER: 7131 val->cdp_vdev_param_mesh_rx_filter = vdev->mesh_rx_filter; 7132 break; 7133 case CDP_MESH_MODE: 7134 val->cdp_vdev_param_mesh_mode = vdev->mesh_vdev; 7135 break; 7136 #endif 7137 case CDP_ENABLE_NAWDS: 7138 val->cdp_vdev_param_nawds = vdev->nawds_enabled; 7139 break; 7140 7141 case CDP_ENABLE_WRAP: 7142 val->cdp_vdev_param_wrap = vdev->wrap_vdev; 7143 break; 7144 7145 #ifdef DP_TRAFFIC_END_INDICATION 7146 case CDP_ENABLE_TRAFFIC_END_INDICATION: 7147 val->cdp_vdev_param_traffic_end_ind = vdev->traffic_end_ind_en; 7148 break; 7149 #endif 7150 7151 default: 7152 dp_cdp_err("%pK: param value %d is wrong", 7153 soc, param); 7154 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7155 return QDF_STATUS_E_FAILURE; 7156 } 7157 7158 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7159 return QDF_STATUS_SUCCESS; 7160 } 7161 7162 /** 7163 * dp_set_vdev_param() - function to set parameters in vdev 7164 * @cdp_soc: DP soc handle 7165 * @vdev_id: id of DP vdev handle 7166 * @param: parameter type to get value 7167 * @val: value 7168 * 7169 * Return: QDF_STATUS 7170 */ 7171 static QDF_STATUS 7172 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 7173 enum cdp_vdev_param_type param, cdp_config_param_type val) 7174 { 7175 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc; 7176 struct dp_vdev *vdev = 7177 dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP); 7178 uint32_t var = 0; 7179 7180 if (!vdev) 7181 return QDF_STATUS_E_FAILURE; 7182 7183 switch (param) { 7184 case CDP_ENABLE_WDS: 7185 dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n", 7186 dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id); 7187 vdev->wds_enabled = val.cdp_vdev_param_wds; 7188 break; 7189 case CDP_ENABLE_MEC: 7190 dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n", 7191 dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id); 7192 vdev->mec_enabled = val.cdp_vdev_param_mec; 7193 break; 7194 case CDP_ENABLE_DA_WAR: 7195 dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n", 7196 dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id); 7197 vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war; 7198 dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *) 7199 vdev->pdev->soc)); 7200 break; 7201 case CDP_ENABLE_NAWDS: 7202 vdev->nawds_enabled = val.cdp_vdev_param_nawds; 7203 break; 7204 case CDP_ENABLE_MCAST_EN: 7205 vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en; 7206 break; 7207 case CDP_ENABLE_IGMP_MCAST_EN: 7208 vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en; 7209 break; 7210 case CDP_ENABLE_PROXYSTA: 7211 vdev->proxysta_vdev = val.cdp_vdev_param_proxysta; 7212 break; 7213 case CDP_UPDATE_TDLS_FLAGS: 7214 vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags; 7215 break; 7216 case CDP_CFG_WDS_AGING_TIMER: 7217 var = val.cdp_vdev_param_aging_tmr; 7218 if (!var) 7219 qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer); 7220 else if (var != vdev->wds_aging_timer_val) 7221 qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var); 7222 7223 vdev->wds_aging_timer_val = var; 7224 break; 7225 case CDP_ENABLE_AP_BRIDGE: 7226 if (wlan_op_mode_sta != vdev->opmode) 7227 vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en; 7228 else 7229 vdev->ap_bridge_enabled = false; 7230 break; 7231 case CDP_ENABLE_CIPHER: 7232 vdev->sec_type = val.cdp_vdev_param_cipher_en; 7233 break; 7234 case CDP_ENABLE_QWRAP_ISOLATION: 7235 vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation; 7236 break; 7237 case CDP_UPDATE_MULTIPASS: 7238 vdev->multipass_en = val.cdp_vdev_param_update_multipass; 7239 break; 7240 case CDP_TX_ENCAP_TYPE: 7241 vdev->tx_encap_type = val.cdp_vdev_param_tx_encap; 7242 break; 7243 case CDP_RX_DECAP_TYPE: 7244 vdev->rx_decap_type = val.cdp_vdev_param_rx_decap; 7245 break; 7246 case CDP_TID_VDEV_PRTY: 7247 vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty; 7248 break; 7249 case CDP_TIDMAP_TBL_ID: 7250 vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id; 7251 break; 7252 #ifdef MESH_MODE_SUPPORT 7253 case CDP_MESH_RX_FILTER: 7254 dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev, 7255 val.cdp_vdev_param_mesh_rx_filter); 7256 break; 7257 case CDP_MESH_MODE: 7258 dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev, 7259 val.cdp_vdev_param_mesh_mode); 7260 break; 7261 #endif 7262 case CDP_ENABLE_HLOS_TID_OVERRIDE: 7263 dp_info("vdev_id %d enable hlod tid override %d", vdev_id, 7264 val.cdp_vdev_param_hlos_tid_override); 7265 dp_vdev_set_hlos_tid_override(vdev, 7266 val.cdp_vdev_param_hlos_tid_override); 7267 break; 7268 #ifdef QCA_SUPPORT_WDS_EXTENDED 7269 case CDP_CFG_WDS_EXT: 7270 if (vdev->opmode == wlan_op_mode_ap) 7271 vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext; 7272 break; 7273 case CDP_DROP_TX_MCAST: 7274 dp_info("vdev_id %d drop tx mcast :%d", vdev_id, 7275 val.cdp_drop_tx_mcast); 7276 vdev->drop_tx_mcast = val.cdp_drop_tx_mcast; 7277 break; 7278 #endif 7279 case CDP_ENABLE_PEER_AUTHORIZE: 7280 vdev->peer_authorize = val.cdp_vdev_param_peer_authorize; 7281 break; 7282 #ifdef WLAN_SUPPORT_MESH_LATENCY 7283 case CDP_ENABLE_PEER_TID_LATENCY: 7284 dp_info("vdev_id %d enable peer tid latency %d", vdev_id, 7285 val.cdp_vdev_param_peer_tid_latency_enable); 7286 vdev->peer_tid_latency_enabled = 7287 val.cdp_vdev_param_peer_tid_latency_enable; 7288 break; 7289 case CDP_SET_VAP_MESH_TID: 7290 dp_info("vdev_id %d enable peer tid latency %d", vdev_id, 7291 val.cdp_vdev_param_mesh_tid); 7292 vdev->mesh_tid_latency_config.latency_tid 7293 = val.cdp_vdev_param_mesh_tid; 7294 break; 7295 #endif 7296 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE 7297 case CDP_SKIP_BAR_UPDATE_AP: 7298 dp_info("vdev_id %d skip BAR update: %u", vdev_id, 7299 val.cdp_skip_bar_update); 7300 vdev->skip_bar_update = val.cdp_skip_bar_update; 7301 vdev->skip_bar_update_last_ts = 0; 7302 break; 7303 #endif 7304 case CDP_DROP_3ADDR_MCAST: 7305 dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id, 7306 val.cdp_drop_3addr_mcast); 7307 vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast; 7308 break; 7309 case CDP_ENABLE_WRAP: 7310 vdev->wrap_vdev = val.cdp_vdev_param_wrap; 7311 break; 7312 #ifdef DP_TRAFFIC_END_INDICATION 7313 case CDP_ENABLE_TRAFFIC_END_INDICATION: 7314 vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind; 7315 break; 7316 #endif 7317 #ifdef FEATURE_DIRECT_LINK 7318 case CDP_VDEV_TX_TO_FW: 7319 dp_info("vdev_id %d to_fw :%d", vdev_id, val.cdp_vdev_tx_to_fw); 7320 vdev->to_fw = val.cdp_vdev_tx_to_fw; 7321 break; 7322 #endif 7323 default: 7324 break; 7325 } 7326 7327 dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev); 7328 dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val); 7329 7330 /* Update PDEV flags as VDEV flags are updated */ 7331 dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev); 7332 dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP); 7333 7334 return QDF_STATUS_SUCCESS; 7335 } 7336 7337 #if defined(FEATURE_WLAN_TDLS) && defined(WLAN_FEATURE_11BE_MLO) 7338 /** 7339 * dp_update_mlo_vdev_for_tdls() - update mlo vdev configuration 7340 * for TDLS 7341 * @cdp_soc: DP soc handle 7342 * @vdev_id: id of DP vdev handle 7343 * @param: parameter type for vdev 7344 * @val: value 7345 * 7346 * If TDLS connection is from secondary vdev, then copy osif_vdev from 7347 * primary vdev to support RX, update TX bank register info for primary 7348 * vdev as well. 7349 * If TDLS connection is from primary vdev, same as before. 7350 * 7351 * Return: None 7352 */ 7353 static void 7354 dp_update_mlo_vdev_for_tdls(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 7355 enum cdp_vdev_param_type param, 7356 cdp_config_param_type val) 7357 { 7358 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 7359 struct dp_peer *peer; 7360 struct dp_peer *tmp_peer; 7361 struct dp_peer *mld_peer; 7362 struct dp_vdev *vdev = NULL; 7363 struct dp_vdev *pri_vdev = NULL; 7364 uint8_t pri_vdev_id = CDP_INVALID_VDEV_ID; 7365 7366 if (param != CDP_UPDATE_TDLS_FLAGS) 7367 return; 7368 7369 dp_info("update TDLS flag for vdev_id %d, val %d", 7370 vdev_id, val.cdp_vdev_param_tdls_flags); 7371 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC); 7372 /* only check for STA mode vdev */ 7373 if (!vdev || vdev->opmode != wlan_op_mode_sta) { 7374 dp_info("vdev is not as expected for TDLS"); 7375 goto comp_ret; 7376 } 7377 7378 /* Find primary vdev_id */ 7379 qdf_spin_lock_bh(&vdev->peer_list_lock); 7380 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 7381 peer_list_elem, 7382 tmp_peer) { 7383 if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) == 7384 QDF_STATUS_SUCCESS) { 7385 /* do check only if MLO link peer exist */ 7386 if (IS_MLO_DP_LINK_PEER(peer)) { 7387 mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer); 7388 pri_vdev_id = mld_peer->vdev->vdev_id; 7389 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 7390 break; 7391 } 7392 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 7393 } 7394 } 7395 qdf_spin_unlock_bh(&vdev->peer_list_lock); 7396 7397 if (pri_vdev_id != CDP_INVALID_VDEV_ID) 7398 pri_vdev = dp_vdev_get_ref_by_id(soc, pri_vdev_id, 7399 DP_MOD_ID_MISC); 7400 7401 /* If current vdev is not same as primary vdev */ 7402 if (pri_vdev && pri_vdev != vdev) { 7403 dp_info("primary vdev [%d] %pK different with vdev [%d] %pK", 7404 pri_vdev->vdev_id, pri_vdev, 7405 vdev->vdev_id, vdev); 7406 /* update osif_vdev to support RX for vdev */ 7407 vdev->osif_vdev = pri_vdev->osif_vdev; 7408 dp_set_vdev_param(cdp_soc, pri_vdev->vdev_id, 7409 CDP_UPDATE_TDLS_FLAGS, val); 7410 } 7411 7412 comp_ret: 7413 if (pri_vdev) 7414 dp_vdev_unref_delete(soc, pri_vdev, DP_MOD_ID_MISC); 7415 if (vdev) 7416 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC); 7417 } 7418 7419 static QDF_STATUS 7420 dp_set_vdev_param_wrapper(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 7421 enum cdp_vdev_param_type param, 7422 cdp_config_param_type val) 7423 { 7424 dp_update_mlo_vdev_for_tdls(cdp_soc, vdev_id, param, val); 7425 7426 return dp_set_vdev_param(cdp_soc, vdev_id, param, val); 7427 } 7428 #else 7429 static QDF_STATUS 7430 dp_set_vdev_param_wrapper(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 7431 enum cdp_vdev_param_type param, 7432 cdp_config_param_type val) 7433 { 7434 return dp_set_vdev_param(cdp_soc, vdev_id, param, val); 7435 } 7436 #endif 7437 7438 /** 7439 * dp_set_psoc_param: function to set parameters in psoc 7440 * @cdp_soc: DP soc handle 7441 * @param: parameter type to be set 7442 * @val: value of parameter to be set 7443 * 7444 * Return: QDF_STATUS 7445 */ 7446 static QDF_STATUS 7447 dp_set_psoc_param(struct cdp_soc_t *cdp_soc, 7448 enum cdp_psoc_param_type param, cdp_config_param_type val) 7449 { 7450 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 7451 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx; 7452 7453 switch (param) { 7454 case CDP_ENABLE_RATE_STATS: 7455 soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats; 7456 break; 7457 case CDP_SET_NSS_CFG: 7458 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, 7459 val.cdp_psoc_param_en_nss_cfg); 7460 /* 7461 * TODO: masked out based on the per offloaded radio 7462 */ 7463 switch (val.cdp_psoc_param_en_nss_cfg) { 7464 case dp_nss_cfg_default: 7465 break; 7466 case dp_nss_cfg_first_radio: 7467 /* 7468 * This configuration is valid for single band radio which 7469 * is also NSS offload. 7470 */ 7471 case dp_nss_cfg_dbdc: 7472 case dp_nss_cfg_dbtc: 7473 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0); 7474 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0); 7475 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0); 7476 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0); 7477 break; 7478 default: 7479 dp_cdp_err("%pK: Invalid offload config %d", 7480 soc, val.cdp_psoc_param_en_nss_cfg); 7481 } 7482 7483 dp_cdp_err("%pK: nss-wifi<0> nss config is enabled" 7484 , soc); 7485 break; 7486 case CDP_SET_PREFERRED_HW_MODE: 7487 soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode; 7488 break; 7489 case CDP_IPA_ENABLE: 7490 soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled; 7491 break; 7492 case CDP_CFG_VDEV_STATS_HW_OFFLOAD: 7493 wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx, 7494 val.cdp_psoc_param_vdev_stats_hw_offload); 7495 break; 7496 case CDP_SAWF_ENABLE: 7497 wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled); 7498 break; 7499 case CDP_UMAC_RST_SKEL_ENABLE: 7500 dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel); 7501 break; 7502 case CDP_UMAC_RESET_STATS: 7503 dp_umac_reset_stats_print(soc); 7504 break; 7505 case CDP_SAWF_STATS: 7506 wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx, 7507 val.cdp_sawf_stats); 7508 break; 7509 default: 7510 break; 7511 } 7512 7513 return QDF_STATUS_SUCCESS; 7514 } 7515 7516 /** 7517 * dp_get_psoc_param: function to get parameters in soc 7518 * @cdp_soc: DP soc handle 7519 * @param: parameter type to be set 7520 * @val: address of buffer 7521 * 7522 * Return: status 7523 */ 7524 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc, 7525 enum cdp_psoc_param_type param, 7526 cdp_config_param_type *val) 7527 { 7528 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 7529 7530 if (!soc) 7531 return QDF_STATUS_E_FAILURE; 7532 7533 switch (param) { 7534 case CDP_CFG_PEER_EXT_STATS: 7535 val->cdp_psoc_param_pext_stats = 7536 wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 7537 break; 7538 case CDP_CFG_VDEV_STATS_HW_OFFLOAD: 7539 val->cdp_psoc_param_vdev_stats_hw_offload = 7540 wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx); 7541 break; 7542 case CDP_UMAC_RST_SKEL_ENABLE: 7543 val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc); 7544 break; 7545 default: 7546 dp_warn("Invalid param"); 7547 break; 7548 } 7549 7550 return QDF_STATUS_SUCCESS; 7551 } 7552 7553 /** 7554 * dp_set_vdev_dscp_tid_map_wifi3() - Update Map ID selected for particular vdev 7555 * @cdp_soc: CDP SOC handle 7556 * @vdev_id: id of DP_VDEV handle 7557 * @map_id:ID of map that needs to be updated 7558 * 7559 * Return: QDF_STATUS 7560 */ 7561 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc, 7562 uint8_t vdev_id, 7563 uint8_t map_id) 7564 { 7565 cdp_config_param_type val; 7566 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 7567 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7568 DP_MOD_ID_CDP); 7569 if (vdev) { 7570 vdev->dscp_tid_map_id = map_id; 7571 val.cdp_vdev_param_dscp_tid_map_id = map_id; 7572 soc->arch_ops.txrx_set_vdev_param(soc, 7573 vdev, 7574 CDP_UPDATE_DSCP_TO_TID_MAP, 7575 val); 7576 /* Update flag for transmit tid classification */ 7577 if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map) 7578 vdev->skip_sw_tid_classification |= 7579 DP_TX_HW_DSCP_TID_MAP_VALID; 7580 else 7581 vdev->skip_sw_tid_classification &= 7582 ~DP_TX_HW_DSCP_TID_MAP_VALID; 7583 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7584 return QDF_STATUS_SUCCESS; 7585 } 7586 7587 return QDF_STATUS_E_FAILURE; 7588 } 7589 7590 #ifdef DP_RATETABLE_SUPPORT 7591 static int dp_txrx_get_ratekbps(int preamb, int mcs, 7592 int htflag, int gintval) 7593 { 7594 uint32_t rix; 7595 uint16_t ratecode; 7596 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 7597 7598 return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1, 7599 (uint8_t)preamb, 1, punc_mode, 7600 &rix, &ratecode); 7601 } 7602 #else 7603 static int dp_txrx_get_ratekbps(int preamb, int mcs, 7604 int htflag, int gintval) 7605 { 7606 return 0; 7607 } 7608 #endif 7609 7610 /** 7611 * dp_txrx_get_pdev_stats() - Returns cdp_pdev_stats 7612 * @soc: DP soc handle 7613 * @pdev_id: id of DP pdev handle 7614 * @pdev_stats: buffer to copy to 7615 * 7616 * Return: status success/failure 7617 */ 7618 static QDF_STATUS 7619 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 7620 struct cdp_pdev_stats *pdev_stats) 7621 { 7622 struct dp_pdev *pdev = 7623 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 7624 pdev_id); 7625 if (!pdev) 7626 return QDF_STATUS_E_FAILURE; 7627 7628 dp_aggregate_pdev_stats(pdev); 7629 7630 qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats)); 7631 return QDF_STATUS_SUCCESS; 7632 } 7633 7634 /** 7635 * dp_txrx_update_vdev_me_stats() - Update vdev ME stats sent from CDP 7636 * @vdev: DP vdev handle 7637 * @buf: buffer containing specific stats structure 7638 * 7639 * Return: void 7640 */ 7641 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev, 7642 void *buf) 7643 { 7644 struct cdp_tx_ingress_stats *host_stats = NULL; 7645 7646 if (!buf) { 7647 dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc); 7648 return; 7649 } 7650 host_stats = (struct cdp_tx_ingress_stats *)buf; 7651 7652 DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 7653 host_stats->mcast_en.mcast_pkt.num, 7654 host_stats->mcast_en.mcast_pkt.bytes); 7655 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 7656 host_stats->mcast_en.dropped_map_error); 7657 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac, 7658 host_stats->mcast_en.dropped_self_mac); 7659 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail, 7660 host_stats->mcast_en.dropped_send_fail); 7661 DP_STATS_INC(vdev, tx_i.mcast_en.ucast, 7662 host_stats->mcast_en.ucast); 7663 DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 7664 host_stats->mcast_en.fail_seg_alloc); 7665 DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 7666 host_stats->mcast_en.clone_fail); 7667 } 7668 7669 /** 7670 * dp_txrx_update_vdev_igmp_me_stats() - Update vdev IGMP ME stats sent from CDP 7671 * @vdev: DP vdev handle 7672 * @buf: buffer containing specific stats structure 7673 * 7674 * Return: void 7675 */ 7676 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev, 7677 void *buf) 7678 { 7679 struct cdp_tx_ingress_stats *host_stats = NULL; 7680 7681 if (!buf) { 7682 dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc); 7683 return; 7684 } 7685 host_stats = (struct cdp_tx_ingress_stats *)buf; 7686 7687 DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd, 7688 host_stats->igmp_mcast_en.igmp_rcvd); 7689 DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted, 7690 host_stats->igmp_mcast_en.igmp_ucast_converted); 7691 } 7692 7693 /** 7694 * dp_txrx_update_vdev_host_stats() - Update stats sent through CDP 7695 * @soc_hdl: DP soc handle 7696 * @vdev_id: id of DP vdev handle 7697 * @buf: buffer containing specific stats structure 7698 * @stats_id: stats type 7699 * 7700 * Return: QDF_STATUS 7701 */ 7702 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl, 7703 uint8_t vdev_id, 7704 void *buf, 7705 uint16_t stats_id) 7706 { 7707 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7708 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7709 DP_MOD_ID_CDP); 7710 7711 if (!vdev) { 7712 dp_cdp_err("%pK: Invalid vdev handle", soc); 7713 return QDF_STATUS_E_FAILURE; 7714 } 7715 7716 switch (stats_id) { 7717 case DP_VDEV_STATS_PKT_CNT_ONLY: 7718 break; 7719 case DP_VDEV_STATS_TX_ME: 7720 dp_txrx_update_vdev_me_stats(vdev, buf); 7721 dp_txrx_update_vdev_igmp_me_stats(vdev, buf); 7722 break; 7723 default: 7724 qdf_info("Invalid stats_id %d", stats_id); 7725 break; 7726 } 7727 7728 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7729 return QDF_STATUS_SUCCESS; 7730 } 7731 7732 /** 7733 * dp_txrx_get_peer_stats() - will return cdp_peer_stats 7734 * @soc: soc handle 7735 * @vdev_id: id of vdev handle 7736 * @peer_mac: mac of DP_PEER handle 7737 * @peer_stats: buffer to copy to 7738 * 7739 * Return: status success/failure 7740 */ 7741 static QDF_STATUS 7742 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 7743 uint8_t *peer_mac, struct cdp_peer_stats *peer_stats) 7744 { 7745 struct dp_peer *peer = NULL; 7746 struct cdp_peer_info peer_info = { 0 }; 7747 7748 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 7749 CDP_WILD_PEER_TYPE); 7750 7751 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 7752 DP_MOD_ID_CDP); 7753 7754 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats)); 7755 7756 if (!peer) 7757 return QDF_STATUS_E_FAILURE; 7758 7759 dp_get_peer_stats(peer, peer_stats); 7760 7761 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7762 7763 return QDF_STATUS_SUCCESS; 7764 } 7765 7766 /** 7767 * dp_txrx_get_peer_stats_param() - will return specified cdp_peer_stats 7768 * @soc: soc handle 7769 * @vdev_id: vdev_id of vdev object 7770 * @peer_mac: mac address of the peer 7771 * @type: enum of required stats 7772 * @buf: buffer to hold the value 7773 * 7774 * Return: status success/failure 7775 */ 7776 static QDF_STATUS 7777 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id, 7778 uint8_t *peer_mac, enum cdp_peer_stats_type type, 7779 cdp_peer_stats_param_t *buf) 7780 { 7781 QDF_STATUS ret; 7782 struct dp_peer *peer = NULL; 7783 struct cdp_peer_info peer_info = { 0 }; 7784 7785 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 7786 CDP_WILD_PEER_TYPE); 7787 7788 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 7789 DP_MOD_ID_CDP); 7790 7791 if (!peer) { 7792 dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT, 7793 soc, QDF_MAC_ADDR_REF(peer_mac)); 7794 return QDF_STATUS_E_FAILURE; 7795 } 7796 7797 if (type >= cdp_peer_per_pkt_stats_min && 7798 type < cdp_peer_per_pkt_stats_max) { 7799 ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf); 7800 } else if (type >= cdp_peer_extd_stats_min && 7801 type < cdp_peer_extd_stats_max) { 7802 ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf); 7803 } else { 7804 dp_err("%pK: Invalid stat type requested", soc); 7805 ret = QDF_STATUS_E_FAILURE; 7806 } 7807 7808 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7809 7810 return ret; 7811 } 7812 7813 /** 7814 * dp_txrx_reset_peer_stats() - reset cdp_peer_stats for particular peer 7815 * @soc_hdl: soc handle 7816 * @vdev_id: id of vdev handle 7817 * @peer_mac: mac of DP_PEER handle 7818 * 7819 * Return: QDF_STATUS 7820 */ 7821 #ifdef WLAN_FEATURE_11BE_MLO 7822 static QDF_STATUS 7823 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 7824 uint8_t *peer_mac) 7825 { 7826 QDF_STATUS status = QDF_STATUS_SUCCESS; 7827 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 7828 struct dp_peer *peer = 7829 dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0, 7830 vdev_id, DP_MOD_ID_CDP); 7831 7832 if (!peer) 7833 return QDF_STATUS_E_FAILURE; 7834 7835 DP_STATS_CLR(peer); 7836 dp_txrx_peer_stats_clr(peer->txrx_peer); 7837 7838 if (IS_MLO_DP_MLD_PEER(peer)) { 7839 uint8_t i; 7840 struct dp_peer *link_peer; 7841 struct dp_soc *link_peer_soc; 7842 struct dp_mld_link_peers link_peers_info; 7843 7844 dp_get_link_peers_ref_from_mld_peer(soc, peer, 7845 &link_peers_info, 7846 DP_MOD_ID_CDP); 7847 for (i = 0; i < link_peers_info.num_links; i++) { 7848 link_peer = link_peers_info.link_peers[i]; 7849 link_peer_soc = link_peer->vdev->pdev->soc; 7850 7851 DP_STATS_CLR(link_peer); 7852 dp_monitor_peer_reset_stats(link_peer_soc, link_peer); 7853 } 7854 7855 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 7856 } else { 7857 dp_monitor_peer_reset_stats(soc, peer); 7858 } 7859 7860 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7861 7862 return status; 7863 } 7864 #else 7865 static QDF_STATUS 7866 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 7867 uint8_t *peer_mac) 7868 { 7869 QDF_STATUS status = QDF_STATUS_SUCCESS; 7870 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 7871 peer_mac, 0, vdev_id, 7872 DP_MOD_ID_CDP); 7873 7874 if (!peer) 7875 return QDF_STATUS_E_FAILURE; 7876 7877 DP_STATS_CLR(peer); 7878 dp_txrx_peer_stats_clr(peer->txrx_peer); 7879 dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer); 7880 7881 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7882 7883 return status; 7884 } 7885 #endif 7886 7887 /** 7888 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 7889 * @soc_hdl: CDP SoC handle 7890 * @vdev_id: vdev Id 7891 * @buf: buffer for vdev stats 7892 * @is_aggregate: are aggregate stats being collected 7893 * 7894 * Return: int 7895 */ 7896 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 7897 void *buf, bool is_aggregate) 7898 { 7899 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7900 struct cdp_vdev_stats *vdev_stats; 7901 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7902 DP_MOD_ID_CDP); 7903 7904 if (!vdev) 7905 return 1; 7906 7907 vdev_stats = (struct cdp_vdev_stats *)buf; 7908 7909 if (is_aggregate) { 7910 dp_aggregate_vdev_stats(vdev, buf); 7911 } else { 7912 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); 7913 } 7914 7915 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7916 return 0; 7917 } 7918 7919 /** 7920 * dp_get_total_per() - get total per 7921 * @soc: DP soc handle 7922 * @pdev_id: id of DP_PDEV handle 7923 * 7924 * Return: % error rate using retries per packet and success packets 7925 */ 7926 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id) 7927 { 7928 struct dp_pdev *pdev = 7929 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 7930 pdev_id); 7931 7932 if (!pdev) 7933 return 0; 7934 7935 dp_aggregate_pdev_stats(pdev); 7936 if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0) 7937 return 0; 7938 return ((pdev->stats.tx.retries * 100) / 7939 ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries))); 7940 } 7941 7942 /** 7943 * dp_txrx_stats_publish() - publish pdev stats into a buffer 7944 * @soc: DP soc handle 7945 * @pdev_id: id of DP_PDEV handle 7946 * @buf: to hold pdev_stats 7947 * 7948 * Return: int 7949 */ 7950 static int 7951 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id, 7952 struct cdp_stats_extd *buf) 7953 { 7954 struct cdp_txrx_stats_req req = {0,}; 7955 QDF_STATUS status; 7956 struct dp_pdev *pdev = 7957 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 7958 pdev_id); 7959 7960 if (!pdev) 7961 return TXRX_STATS_LEVEL_OFF; 7962 7963 if (pdev->pending_fw_stats_response) 7964 return TXRX_STATS_LEVEL_OFF; 7965 7966 dp_aggregate_pdev_stats(pdev); 7967 7968 pdev->pending_fw_stats_response = true; 7969 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX; 7970 req.cookie_val = DBG_STATS_COOKIE_DP_STATS; 7971 pdev->fw_stats_tlv_bitmap_rcvd = 0; 7972 qdf_event_reset(&pdev->fw_stats_event); 7973 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, 7974 req.param1, req.param2, req.param3, 0, 7975 req.cookie_val, 0); 7976 7977 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX; 7978 req.cookie_val = DBG_STATS_COOKIE_DP_STATS; 7979 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, 7980 req.param1, req.param2, req.param3, 0, 7981 req.cookie_val, 0); 7982 7983 status = 7984 qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME); 7985 7986 if (status != QDF_STATUS_SUCCESS) { 7987 if (status == QDF_STATUS_E_TIMEOUT) 7988 qdf_debug("TIMEOUT_OCCURS"); 7989 pdev->pending_fw_stats_response = false; 7990 return TXRX_STATS_LEVEL_OFF; 7991 } 7992 qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats)); 7993 pdev->pending_fw_stats_response = false; 7994 7995 return TXRX_STATS_LEVEL; 7996 } 7997 7998 /** 7999 * dp_get_obss_stats() - Get Pdev OBSS stats from Fw 8000 * @soc: DP soc handle 8001 * @pdev_id: id of DP_PDEV handle 8002 * @buf: to hold pdev obss stats 8003 * @req: Pointer to CDP TxRx stats 8004 * 8005 * Return: status 8006 */ 8007 static QDF_STATUS 8008 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 8009 struct cdp_pdev_obss_pd_stats_tlv *buf, 8010 struct cdp_txrx_stats_req *req) 8011 { 8012 QDF_STATUS status; 8013 struct dp_pdev *pdev = 8014 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8015 pdev_id); 8016 8017 if (!pdev) 8018 return QDF_STATUS_E_INVAL; 8019 8020 if (pdev->pending_fw_obss_stats_response) 8021 return QDF_STATUS_E_AGAIN; 8022 8023 pdev->pending_fw_obss_stats_response = true; 8024 req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS; 8025 req->cookie_val = DBG_STATS_COOKIE_HTT_OBSS; 8026 qdf_event_reset(&pdev->fw_obss_stats_event); 8027 status = dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0, 8028 req->param1, req->param2, 8029 req->param3, 0, req->cookie_val, 8030 req->mac_id); 8031 if (QDF_IS_STATUS_ERROR(status)) { 8032 pdev->pending_fw_obss_stats_response = false; 8033 return status; 8034 } 8035 status = 8036 qdf_wait_single_event(&pdev->fw_obss_stats_event, 8037 DP_MAX_SLEEP_TIME); 8038 8039 if (status != QDF_STATUS_SUCCESS) { 8040 if (status == QDF_STATUS_E_TIMEOUT) 8041 qdf_debug("TIMEOUT_OCCURS"); 8042 pdev->pending_fw_obss_stats_response = false; 8043 return QDF_STATUS_E_TIMEOUT; 8044 } 8045 qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv, 8046 sizeof(struct cdp_pdev_obss_pd_stats_tlv)); 8047 pdev->pending_fw_obss_stats_response = false; 8048 return status; 8049 } 8050 8051 /** 8052 * dp_clear_pdev_obss_pd_stats() - Clear pdev obss stats 8053 * @soc: DP soc handle 8054 * @pdev_id: id of DP_PDEV handle 8055 * @req: Pointer to CDP TxRx stats request mac_id will be 8056 * pre-filled and should not be overwritten 8057 * 8058 * Return: status 8059 */ 8060 static QDF_STATUS 8061 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 8062 struct cdp_txrx_stats_req *req) 8063 { 8064 struct dp_pdev *pdev = 8065 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8066 pdev_id); 8067 uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT; 8068 8069 if (!pdev) 8070 return QDF_STATUS_E_INVAL; 8071 8072 /* 8073 * For HTT_DBG_EXT_STATS_RESET command, FW need to config 8074 * from param0 to param3 according to below rule: 8075 * 8076 * PARAM: 8077 * - config_param0 : start_offset (stats type) 8078 * - config_param1 : stats bmask from start offset 8079 * - config_param2 : stats bmask from start offset + 32 8080 * - config_param3 : stats bmask from start offset + 64 8081 */ 8082 req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET; 8083 req->param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS; 8084 req->param1 = 0x00000001; 8085 8086 return dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0, 8087 req->param1, req->param2, req->param3, 0, 8088 cookie_val, req->mac_id); 8089 } 8090 8091 /** 8092 * dp_set_pdev_dscp_tid_map_wifi3() - update dscp tid map in pdev 8093 * @soc_handle: soc handle 8094 * @pdev_id: id of DP_PDEV handle 8095 * @map_id: ID of map that needs to be updated 8096 * @tos: index value in map 8097 * @tid: tid value passed by the user 8098 * 8099 * Return: QDF_STATUS 8100 */ 8101 static QDF_STATUS 8102 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle, 8103 uint8_t pdev_id, 8104 uint8_t map_id, 8105 uint8_t tos, uint8_t tid) 8106 { 8107 uint8_t dscp; 8108 struct dp_soc *soc = (struct dp_soc *)soc_handle; 8109 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 8110 8111 if (!pdev) 8112 return QDF_STATUS_E_FAILURE; 8113 8114 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 8115 pdev->dscp_tid_map[map_id][dscp] = tid; 8116 8117 if (map_id < soc->num_hw_dscp_tid_map) 8118 hal_tx_update_dscp_tid(soc->hal_soc, tid, 8119 map_id, dscp); 8120 else 8121 return QDF_STATUS_E_FAILURE; 8122 8123 return QDF_STATUS_SUCCESS; 8124 } 8125 8126 #ifdef WLAN_SYSFS_DP_STATS 8127 /** 8128 * dp_sysfs_event_trigger() - Trigger event to wait for firmware 8129 * stats request response. 8130 * @soc: soc handle 8131 * @cookie_val: cookie value 8132 * 8133 * Return: QDF_STATUS 8134 */ 8135 static QDF_STATUS 8136 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val) 8137 { 8138 QDF_STATUS status = QDF_STATUS_SUCCESS; 8139 /* wait for firmware response for sysfs stats request */ 8140 if (cookie_val == DBG_SYSFS_STATS_COOKIE) { 8141 if (!soc) { 8142 dp_cdp_err("soc is NULL"); 8143 return QDF_STATUS_E_FAILURE; 8144 } 8145 /* wait for event completion */ 8146 status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done, 8147 WLAN_SYSFS_STAT_REQ_WAIT_MS); 8148 if (status == QDF_STATUS_SUCCESS) 8149 dp_cdp_info("sysfs_txrx_fw_request_done event completed"); 8150 else if (status == QDF_STATUS_E_TIMEOUT) 8151 dp_cdp_warn("sysfs_txrx_fw_request_done event expired"); 8152 else 8153 dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status); 8154 } 8155 8156 return status; 8157 } 8158 #else /* WLAN_SYSFS_DP_STATS */ 8159 static QDF_STATUS 8160 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val) 8161 { 8162 return QDF_STATUS_SUCCESS; 8163 } 8164 #endif /* WLAN_SYSFS_DP_STATS */ 8165 8166 /** 8167 * dp_fw_stats_process() - Process TXRX FW stats request. 8168 * @vdev: DP VDEV handle 8169 * @req: stats request 8170 * 8171 * Return: QDF_STATUS 8172 */ 8173 static QDF_STATUS 8174 dp_fw_stats_process(struct dp_vdev *vdev, 8175 struct cdp_txrx_stats_req *req) 8176 { 8177 struct dp_pdev *pdev = NULL; 8178 struct dp_soc *soc = NULL; 8179 uint32_t stats = req->stats; 8180 uint8_t mac_id = req->mac_id; 8181 uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT; 8182 8183 if (!vdev) { 8184 DP_TRACE(NONE, "VDEV not found"); 8185 return QDF_STATUS_E_FAILURE; 8186 } 8187 8188 pdev = vdev->pdev; 8189 if (!pdev) { 8190 DP_TRACE(NONE, "PDEV not found"); 8191 return QDF_STATUS_E_FAILURE; 8192 } 8193 8194 soc = pdev->soc; 8195 if (!soc) { 8196 DP_TRACE(NONE, "soc not found"); 8197 return QDF_STATUS_E_FAILURE; 8198 } 8199 8200 /* In case request is from host sysfs for displaying stats on console */ 8201 if (req->cookie_val == DBG_SYSFS_STATS_COOKIE) 8202 cookie_val = DBG_SYSFS_STATS_COOKIE; 8203 8204 /* 8205 * For HTT_DBG_EXT_STATS_RESET command, FW need to config 8206 * from param0 to param3 according to below rule: 8207 * 8208 * PARAM: 8209 * - config_param0 : start_offset (stats type) 8210 * - config_param1 : stats bmask from start offset 8211 * - config_param2 : stats bmask from start offset + 32 8212 * - config_param3 : stats bmask from start offset + 64 8213 */ 8214 if (req->stats == CDP_TXRX_STATS_0) { 8215 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX; 8216 req->param1 = 0xFFFFFFFF; 8217 req->param2 = 0xFFFFFFFF; 8218 req->param3 = 0xFFFFFFFF; 8219 } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) { 8220 req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id); 8221 } 8222 8223 if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) { 8224 dp_h2t_ext_stats_msg_send(pdev, 8225 HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, 8226 req->param0, req->param1, req->param2, 8227 req->param3, 0, cookie_val, 8228 mac_id); 8229 } else { 8230 dp_h2t_ext_stats_msg_send(pdev, stats, req->param0, 8231 req->param1, req->param2, req->param3, 8232 0, cookie_val, mac_id); 8233 } 8234 8235 dp_sysfs_event_trigger(soc, cookie_val); 8236 8237 return QDF_STATUS_SUCCESS; 8238 } 8239 8240 /** 8241 * dp_txrx_stats_request - function to map to firmware and host stats 8242 * @soc_handle: soc handle 8243 * @vdev_id: virtual device ID 8244 * @req: stats request 8245 * 8246 * Return: QDF_STATUS 8247 */ 8248 static 8249 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle, 8250 uint8_t vdev_id, 8251 struct cdp_txrx_stats_req *req) 8252 { 8253 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle); 8254 int host_stats; 8255 int fw_stats; 8256 enum cdp_stats stats; 8257 int num_stats; 8258 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8259 DP_MOD_ID_CDP); 8260 QDF_STATUS status = QDF_STATUS_E_INVAL; 8261 8262 if (!vdev || !req) { 8263 dp_cdp_err("%pK: Invalid vdev/req instance", soc); 8264 status = QDF_STATUS_E_INVAL; 8265 goto fail0; 8266 } 8267 8268 if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) { 8269 dp_err("Invalid mac id request"); 8270 status = QDF_STATUS_E_INVAL; 8271 goto fail0; 8272 } 8273 8274 stats = req->stats; 8275 if (stats >= CDP_TXRX_MAX_STATS) { 8276 status = QDF_STATUS_E_INVAL; 8277 goto fail0; 8278 } 8279 8280 /* 8281 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available 8282 * has to be updated if new FW HTT stats added 8283 */ 8284 if (stats > CDP_TXRX_STATS_HTT_MAX) 8285 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; 8286 8287 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table); 8288 8289 if (stats >= num_stats) { 8290 dp_cdp_err("%pK : Invalid stats option: %d", soc, stats); 8291 status = QDF_STATUS_E_INVAL; 8292 goto fail0; 8293 } 8294 8295 req->stats = stats; 8296 fw_stats = dp_stats_mapping_table[stats][STATS_FW]; 8297 host_stats = dp_stats_mapping_table[stats][STATS_HOST]; 8298 8299 dp_info("stats: %u fw_stats_type: %d host_stats: %d", 8300 stats, fw_stats, host_stats); 8301 8302 if (fw_stats != TXRX_FW_STATS_INVALID) { 8303 /* update request with FW stats type */ 8304 req->stats = fw_stats; 8305 status = dp_fw_stats_process(vdev, req); 8306 } else if ((host_stats != TXRX_HOST_STATS_INVALID) && 8307 (host_stats <= TXRX_HOST_STATS_MAX)) 8308 status = dp_print_host_stats(vdev, req, soc); 8309 else 8310 dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc); 8311 fail0: 8312 if (vdev) 8313 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8314 return status; 8315 } 8316 8317 /** 8318 * dp_soc_notify_asserted_soc() - API to notify asserted soc info 8319 * @psoc: CDP soc handle 8320 * 8321 * Return: QDF_STATUS 8322 */ 8323 static QDF_STATUS dp_soc_notify_asserted_soc(struct cdp_soc_t *psoc) 8324 { 8325 struct dp_soc *soc = (struct dp_soc *)psoc; 8326 8327 if (!soc) { 8328 dp_cdp_err("%pK: soc is NULL", soc); 8329 return QDF_STATUS_E_INVAL; 8330 } 8331 8332 return dp_umac_reset_notify_asserted_soc(soc); 8333 } 8334 8335 /** 8336 * dp_txrx_dump_stats() - Dump statistics 8337 * @psoc: CDP soc handle 8338 * @value: Statistics option 8339 * @level: verbosity level 8340 */ 8341 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value, 8342 enum qdf_stats_verbosity_level level) 8343 { 8344 struct dp_soc *soc = 8345 (struct dp_soc *)psoc; 8346 QDF_STATUS status = QDF_STATUS_SUCCESS; 8347 8348 if (!soc) { 8349 dp_cdp_err("%pK: soc is NULL", soc); 8350 return QDF_STATUS_E_INVAL; 8351 } 8352 8353 switch (value) { 8354 case CDP_TXRX_PATH_STATS: 8355 dp_txrx_path_stats(soc); 8356 dp_print_soc_interrupt_stats(soc); 8357 hal_dump_reg_write_stats(soc->hal_soc); 8358 dp_pdev_print_tx_delay_stats(soc); 8359 /* Dump usage watermark stats for core TX/RX SRNGs */ 8360 dp_dump_srng_high_wm_stats(soc, (1 << REO_DST)); 8361 dp_print_fisa_stats(soc); 8362 break; 8363 8364 case CDP_RX_RING_STATS: 8365 dp_print_per_ring_stats(soc); 8366 break; 8367 8368 case CDP_TXRX_TSO_STATS: 8369 dp_print_tso_stats(soc, level); 8370 break; 8371 8372 case CDP_DUMP_TX_FLOW_POOL_INFO: 8373 if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH) 8374 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc); 8375 else 8376 dp_tx_dump_flow_pool_info_compact(soc); 8377 break; 8378 8379 case CDP_DP_NAPI_STATS: 8380 dp_print_napi_stats(soc); 8381 break; 8382 8383 case CDP_TXRX_DESC_STATS: 8384 /* TODO: NOT IMPLEMENTED */ 8385 break; 8386 8387 case CDP_DP_RX_FISA_STATS: 8388 dp_rx_dump_fisa_stats(soc); 8389 break; 8390 8391 case CDP_DP_SWLM_STATS: 8392 dp_print_swlm_stats(soc); 8393 break; 8394 8395 case CDP_DP_TX_HW_LATENCY_STATS: 8396 dp_pdev_print_tx_delay_stats(soc); 8397 break; 8398 8399 default: 8400 status = QDF_STATUS_E_INVAL; 8401 break; 8402 } 8403 8404 return status; 8405 8406 } 8407 8408 #ifdef WLAN_SYSFS_DP_STATS 8409 static 8410 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id, 8411 uint32_t *stat_type) 8412 { 8413 qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock); 8414 *stat_type = soc->sysfs_config->stat_type_requested; 8415 *mac_id = soc->sysfs_config->mac_id; 8416 8417 qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock); 8418 } 8419 8420 static 8421 void dp_sysfs_update_config_buf_params(struct dp_soc *soc, 8422 uint32_t curr_len, 8423 uint32_t max_buf_len, 8424 char *buf) 8425 { 8426 qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer); 8427 /* set sysfs_config parameters */ 8428 soc->sysfs_config->buf = buf; 8429 soc->sysfs_config->curr_buffer_length = curr_len; 8430 soc->sysfs_config->max_buffer_length = max_buf_len; 8431 qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer); 8432 } 8433 8434 static 8435 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl, 8436 char *buf, uint32_t buf_size) 8437 { 8438 uint32_t mac_id = 0; 8439 uint32_t stat_type = 0; 8440 uint32_t fw_stats = 0; 8441 uint32_t host_stats = 0; 8442 enum cdp_stats stats; 8443 struct cdp_txrx_stats_req req; 8444 uint32_t num_stats; 8445 struct dp_soc *soc = NULL; 8446 8447 if (!soc_hdl) { 8448 dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl); 8449 return QDF_STATUS_E_INVAL; 8450 } 8451 8452 soc = cdp_soc_t_to_dp_soc(soc_hdl); 8453 8454 if (!soc) { 8455 dp_cdp_err("%pK: soc is NULL", soc); 8456 return QDF_STATUS_E_INVAL; 8457 } 8458 8459 dp_sysfs_get_stat_type(soc, &mac_id, &stat_type); 8460 8461 stats = stat_type; 8462 if (stats >= CDP_TXRX_MAX_STATS) { 8463 dp_cdp_info("sysfs stat type requested is invalid"); 8464 return QDF_STATUS_E_INVAL; 8465 } 8466 /* 8467 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available 8468 * has to be updated if new FW HTT stats added 8469 */ 8470 if (stats > CDP_TXRX_MAX_STATS) 8471 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; 8472 8473 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table); 8474 8475 if (stats >= num_stats) { 8476 dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d", 8477 soc, stats, num_stats); 8478 return QDF_STATUS_E_INVAL; 8479 } 8480 8481 /* build request */ 8482 fw_stats = dp_stats_mapping_table[stats][STATS_FW]; 8483 host_stats = dp_stats_mapping_table[stats][STATS_HOST]; 8484 8485 req.stats = stat_type; 8486 req.mac_id = mac_id; 8487 /* request stats to be printed */ 8488 qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock); 8489 8490 if (fw_stats != TXRX_FW_STATS_INVALID) { 8491 /* update request with FW stats type */ 8492 req.cookie_val = DBG_SYSFS_STATS_COOKIE; 8493 } else if ((host_stats != TXRX_HOST_STATS_INVALID) && 8494 (host_stats <= TXRX_HOST_STATS_MAX)) { 8495 req.cookie_val = DBG_STATS_COOKIE_DEFAULT; 8496 soc->sysfs_config->process_id = qdf_get_current_pid(); 8497 soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED; 8498 } 8499 8500 dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf); 8501 8502 dp_txrx_stats_request(soc_hdl, mac_id, &req); 8503 soc->sysfs_config->process_id = 0; 8504 soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED; 8505 8506 dp_sysfs_update_config_buf_params(soc, 0, 0, NULL); 8507 8508 qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock); 8509 return QDF_STATUS_SUCCESS; 8510 } 8511 8512 static 8513 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl, 8514 uint32_t stat_type, uint32_t mac_id) 8515 { 8516 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8517 8518 if (!soc_hdl) { 8519 dp_cdp_err("%pK: soc is NULL", soc); 8520 return QDF_STATUS_E_INVAL; 8521 } 8522 8523 qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock); 8524 8525 soc->sysfs_config->stat_type_requested = stat_type; 8526 soc->sysfs_config->mac_id = mac_id; 8527 8528 qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock); 8529 8530 return QDF_STATUS_SUCCESS; 8531 } 8532 8533 static 8534 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl) 8535 { 8536 struct dp_soc *soc; 8537 QDF_STATUS status; 8538 8539 if (!soc_hdl) { 8540 dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl); 8541 return QDF_STATUS_E_INVAL; 8542 } 8543 8544 soc = soc_hdl; 8545 8546 soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config)); 8547 if (!soc->sysfs_config) { 8548 dp_cdp_err("failed to allocate memory for sysfs_config no memory"); 8549 return QDF_STATUS_E_NOMEM; 8550 } 8551 8552 status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done); 8553 /* create event for fw stats request from sysfs */ 8554 if (status != QDF_STATUS_SUCCESS) { 8555 dp_cdp_err("failed to create event sysfs_txrx_fw_request_done"); 8556 qdf_mem_free(soc->sysfs_config); 8557 soc->sysfs_config = NULL; 8558 return QDF_STATUS_E_FAILURE; 8559 } 8560 8561 qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock); 8562 qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock); 8563 qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer); 8564 8565 return QDF_STATUS_SUCCESS; 8566 } 8567 8568 static 8569 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl) 8570 { 8571 struct dp_soc *soc; 8572 QDF_STATUS status; 8573 8574 if (!soc_hdl) { 8575 dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl); 8576 return QDF_STATUS_E_INVAL; 8577 } 8578 8579 soc = soc_hdl; 8580 if (!soc->sysfs_config) { 8581 dp_cdp_err("soc->sysfs_config is NULL"); 8582 return QDF_STATUS_E_FAILURE; 8583 } 8584 8585 status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done); 8586 if (status != QDF_STATUS_SUCCESS) 8587 dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done"); 8588 8589 qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock); 8590 qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock); 8591 qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer); 8592 8593 qdf_mem_free(soc->sysfs_config); 8594 8595 return QDF_STATUS_SUCCESS; 8596 } 8597 8598 #else /* WLAN_SYSFS_DP_STATS */ 8599 8600 static 8601 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl) 8602 { 8603 return QDF_STATUS_SUCCESS; 8604 } 8605 8606 static 8607 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl) 8608 { 8609 return QDF_STATUS_SUCCESS; 8610 } 8611 #endif /* WLAN_SYSFS_DP_STATS */ 8612 8613 /** 8614 * dp_txrx_clear_dump_stats() - clear dumpStats 8615 * @soc_hdl: soc handle 8616 * @pdev_id: pdev ID 8617 * @value: stats option 8618 * 8619 * Return: 0 - Success, non-zero - failure 8620 */ 8621 static 8622 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 8623 uint8_t value) 8624 { 8625 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8626 QDF_STATUS status = QDF_STATUS_SUCCESS; 8627 8628 if (!soc) { 8629 dp_err("soc is NULL"); 8630 return QDF_STATUS_E_INVAL; 8631 } 8632 8633 switch (value) { 8634 case CDP_TXRX_TSO_STATS: 8635 dp_txrx_clear_tso_stats(soc); 8636 break; 8637 8638 case CDP_DP_TX_HW_LATENCY_STATS: 8639 dp_pdev_clear_tx_delay_stats(soc); 8640 break; 8641 8642 default: 8643 status = QDF_STATUS_E_INVAL; 8644 break; 8645 } 8646 8647 return status; 8648 } 8649 8650 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 8651 /** 8652 * dp_update_flow_control_parameters() - API to store datapath 8653 * config parameters 8654 * @soc: soc handle 8655 * @params: ini parameter handle 8656 * 8657 * Return: void 8658 */ 8659 static inline 8660 void dp_update_flow_control_parameters(struct dp_soc *soc, 8661 struct cdp_config_params *params) 8662 { 8663 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold = 8664 params->tx_flow_stop_queue_threshold; 8665 soc->wlan_cfg_ctx->tx_flow_start_queue_offset = 8666 params->tx_flow_start_queue_offset; 8667 } 8668 #else 8669 static inline 8670 void dp_update_flow_control_parameters(struct dp_soc *soc, 8671 struct cdp_config_params *params) 8672 { 8673 } 8674 #endif 8675 8676 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 8677 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */ 8678 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024 8679 8680 /* Max packet limit for RX REAP Loop (dp_rx_process) */ 8681 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024 8682 8683 static 8684 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, 8685 struct cdp_config_params *params) 8686 { 8687 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = 8688 params->tx_comp_loop_pkt_limit; 8689 8690 if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX) 8691 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true; 8692 else 8693 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false; 8694 8695 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = 8696 params->rx_reap_loop_pkt_limit; 8697 8698 if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX) 8699 soc->wlan_cfg_ctx->rx_enable_eol_data_check = true; 8700 else 8701 soc->wlan_cfg_ctx->rx_enable_eol_data_check = false; 8702 8703 soc->wlan_cfg_ctx->rx_hp_oos_update_limit = 8704 params->rx_hp_oos_update_limit; 8705 8706 dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u", 8707 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit, 8708 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check, 8709 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit, 8710 soc->wlan_cfg_ctx->rx_enable_eol_data_check, 8711 soc->wlan_cfg_ctx->rx_hp_oos_update_limit); 8712 } 8713 8714 #else 8715 static inline 8716 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, 8717 struct cdp_config_params *params) 8718 { } 8719 8720 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 8721 8722 /** 8723 * dp_update_config_parameters() - API to store datapath 8724 * config parameters 8725 * @psoc: soc handle 8726 * @params: ini parameter handle 8727 * 8728 * Return: status 8729 */ 8730 static 8731 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc, 8732 struct cdp_config_params *params) 8733 { 8734 struct dp_soc *soc = (struct dp_soc *)psoc; 8735 8736 if (!(soc)) { 8737 dp_cdp_err("%pK: Invalid handle", soc); 8738 return QDF_STATUS_E_INVAL; 8739 } 8740 8741 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable; 8742 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable; 8743 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable; 8744 soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload = 8745 params->p2p_tcp_udp_checksumoffload; 8746 soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload = 8747 params->nan_tcp_udp_checksumoffload; 8748 soc->wlan_cfg_ctx->tcp_udp_checksumoffload = 8749 params->tcp_udp_checksumoffload; 8750 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable; 8751 soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable; 8752 soc->wlan_cfg_ctx->gro_enabled = params->gro_enable; 8753 8754 dp_update_rx_soft_irq_limit_params(soc, params); 8755 dp_update_flow_control_parameters(soc, params); 8756 8757 return QDF_STATUS_SUCCESS; 8758 } 8759 8760 static struct cdp_wds_ops dp_ops_wds = { 8761 .vdev_set_wds = dp_vdev_set_wds, 8762 #ifdef WDS_VENDOR_EXTENSION 8763 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy, 8764 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update, 8765 #endif 8766 }; 8767 8768 /** 8769 * dp_txrx_data_tx_cb_set() - set the callback for non standard tx 8770 * @soc_hdl: datapath soc handle 8771 * @vdev_id: virtual interface id 8772 * @callback: callback function 8773 * @ctxt: callback context 8774 * 8775 */ 8776 static void 8777 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 8778 ol_txrx_data_tx_cb callback, void *ctxt) 8779 { 8780 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8781 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8782 DP_MOD_ID_CDP); 8783 8784 if (!vdev) 8785 return; 8786 8787 vdev->tx_non_std_data_callback.func = callback; 8788 vdev->tx_non_std_data_callback.ctxt = ctxt; 8789 8790 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8791 } 8792 8793 /** 8794 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev 8795 * @soc: datapath soc handle 8796 * @pdev_id: id of datapath pdev handle 8797 * 8798 * Return: opaque pointer to dp txrx handle 8799 */ 8800 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id) 8801 { 8802 struct dp_pdev *pdev = 8803 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8804 pdev_id); 8805 if (qdf_unlikely(!pdev)) 8806 return NULL; 8807 8808 return pdev->dp_txrx_handle; 8809 } 8810 8811 /** 8812 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev 8813 * @soc: datapath soc handle 8814 * @pdev_id: id of datapath pdev handle 8815 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle 8816 * 8817 * Return: void 8818 */ 8819 static void 8820 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id, 8821 void *dp_txrx_hdl) 8822 { 8823 struct dp_pdev *pdev = 8824 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8825 pdev_id); 8826 8827 if (!pdev) 8828 return; 8829 8830 pdev->dp_txrx_handle = dp_txrx_hdl; 8831 } 8832 8833 /** 8834 * dp_vdev_get_dp_ext_handle() - get dp handle from vdev 8835 * @soc_hdl: datapath soc handle 8836 * @vdev_id: vdev id 8837 * 8838 * Return: opaque pointer to dp txrx handle 8839 */ 8840 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl, 8841 uint8_t vdev_id) 8842 { 8843 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8844 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8845 DP_MOD_ID_CDP); 8846 void *dp_ext_handle; 8847 8848 if (!vdev) 8849 return NULL; 8850 dp_ext_handle = vdev->vdev_dp_ext_handle; 8851 8852 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8853 return dp_ext_handle; 8854 } 8855 8856 /** 8857 * dp_vdev_set_dp_ext_handle() - set dp handle in vdev 8858 * @soc_hdl: datapath soc handle 8859 * @vdev_id: vdev id 8860 * @size: size of advance dp handle 8861 * 8862 * Return: QDF_STATUS 8863 */ 8864 static QDF_STATUS 8865 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id, 8866 uint16_t size) 8867 { 8868 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8869 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8870 DP_MOD_ID_CDP); 8871 void *dp_ext_handle; 8872 8873 if (!vdev) 8874 return QDF_STATUS_E_FAILURE; 8875 8876 dp_ext_handle = qdf_mem_malloc(size); 8877 8878 if (!dp_ext_handle) { 8879 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8880 return QDF_STATUS_E_FAILURE; 8881 } 8882 8883 vdev->vdev_dp_ext_handle = dp_ext_handle; 8884 8885 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8886 return QDF_STATUS_SUCCESS; 8887 } 8888 8889 /** 8890 * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical 8891 * connection for this vdev 8892 * @soc_hdl: CDP soc handle 8893 * @vdev_id: vdev ID 8894 * @action: Add/Delete action 8895 * 8896 * Return: QDF_STATUS. 8897 */ 8898 static QDF_STATUS 8899 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 8900 enum vdev_ll_conn_actions action) 8901 { 8902 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8903 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8904 DP_MOD_ID_CDP); 8905 8906 if (!vdev) { 8907 dp_err("LL connection action for invalid vdev %d", vdev_id); 8908 return QDF_STATUS_E_FAILURE; 8909 } 8910 8911 switch (action) { 8912 case CDP_VDEV_LL_CONN_ADD: 8913 vdev->num_latency_critical_conn++; 8914 break; 8915 8916 case CDP_VDEV_LL_CONN_DEL: 8917 vdev->num_latency_critical_conn--; 8918 break; 8919 8920 default: 8921 dp_err("LL connection action invalid %d", action); 8922 break; 8923 } 8924 8925 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8926 return QDF_STATUS_SUCCESS; 8927 } 8928 8929 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 8930 /** 8931 * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized. 8932 * @soc_hdl: CDP Soc handle 8933 * @value: Enable/Disable value 8934 * 8935 * Return: QDF_STATUS 8936 */ 8937 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl, 8938 uint8_t value) 8939 { 8940 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8941 8942 if (!soc->swlm.is_init) { 8943 dp_err("SWLM is not initialized"); 8944 return QDF_STATUS_E_FAILURE; 8945 } 8946 8947 soc->swlm.is_enabled = !!value; 8948 8949 return QDF_STATUS_SUCCESS; 8950 } 8951 8952 /** 8953 * dp_soc_is_swlm_enabled() - Check if SWLM is enabled. 8954 * @soc_hdl: CDP Soc handle 8955 * 8956 * Return: QDF_STATUS 8957 */ 8958 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl) 8959 { 8960 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 8961 8962 return soc->swlm.is_enabled; 8963 } 8964 #endif 8965 8966 /** 8967 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc 8968 * @soc_handle: datapath soc handle 8969 * 8970 * Return: opaque pointer to external dp (non-core DP) 8971 */ 8972 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle) 8973 { 8974 struct dp_soc *soc = (struct dp_soc *)soc_handle; 8975 8976 return soc->external_txrx_handle; 8977 } 8978 8979 /** 8980 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc 8981 * @soc_handle: datapath soc handle 8982 * @txrx_handle: opaque pointer to external dp (non-core DP) 8983 * 8984 * Return: void 8985 */ 8986 static void 8987 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle) 8988 { 8989 struct dp_soc *soc = (struct dp_soc *)soc_handle; 8990 8991 soc->external_txrx_handle = txrx_handle; 8992 } 8993 8994 /** 8995 * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping 8996 * @soc_hdl: datapath soc handle 8997 * @pdev_id: id of the datapath pdev handle 8998 * @lmac_id: lmac id 8999 * 9000 * Return: QDF_STATUS 9001 */ 9002 static QDF_STATUS 9003 dp_soc_map_pdev_to_lmac 9004 (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 9005 uint32_t lmac_id) 9006 { 9007 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9008 9009 wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx, 9010 pdev_id, 9011 lmac_id); 9012 9013 /*Set host PDEV ID for lmac_id*/ 9014 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 9015 pdev_id, 9016 lmac_id); 9017 9018 return QDF_STATUS_SUCCESS; 9019 } 9020 9021 /** 9022 * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping 9023 * @soc_hdl: datapath soc handle 9024 * @pdev_id: id of the datapath pdev handle 9025 * @lmac_id: lmac id 9026 * 9027 * In the event of a dynamic mode change, update the pdev to lmac mapping 9028 * 9029 * Return: QDF_STATUS 9030 */ 9031 static QDF_STATUS 9032 dp_soc_handle_pdev_mode_change 9033 (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 9034 uint32_t lmac_id) 9035 { 9036 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9037 struct dp_vdev *vdev = NULL; 9038 uint8_t hw_pdev_id, mac_id; 9039 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, 9040 pdev_id); 9041 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 9042 9043 if (qdf_unlikely(!pdev)) 9044 return QDF_STATUS_E_FAILURE; 9045 9046 pdev->lmac_id = lmac_id; 9047 pdev->target_pdev_id = 9048 dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); 9049 dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id); 9050 9051 /*Set host PDEV ID for lmac_id*/ 9052 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 9053 pdev->pdev_id, 9054 lmac_id); 9055 9056 hw_pdev_id = 9057 dp_get_target_pdev_id_for_host_pdev_id(soc, 9058 pdev->pdev_id); 9059 9060 /* 9061 * When NSS offload is enabled, send pdev_id->lmac_id 9062 * and pdev_id to hw_pdev_id to NSS FW 9063 */ 9064 if (nss_config) { 9065 mac_id = pdev->lmac_id; 9066 if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id) 9067 soc->cdp_soc.ol_ops-> 9068 pdev_update_lmac_n_target_pdev_id( 9069 soc->ctrl_psoc, 9070 &pdev_id, &mac_id, &hw_pdev_id); 9071 } 9072 9073 qdf_spin_lock_bh(&pdev->vdev_list_lock); 9074 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 9075 DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 9076 hw_pdev_id); 9077 vdev->lmac_id = pdev->lmac_id; 9078 } 9079 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 9080 9081 return QDF_STATUS_SUCCESS; 9082 } 9083 9084 /** 9085 * dp_soc_set_pdev_status_down() - set pdev down/up status 9086 * @soc: datapath soc handle 9087 * @pdev_id: id of datapath pdev handle 9088 * @is_pdev_down: pdev down/up status 9089 * 9090 * Return: QDF_STATUS 9091 */ 9092 static QDF_STATUS 9093 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id, 9094 bool is_pdev_down) 9095 { 9096 struct dp_pdev *pdev = 9097 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9098 pdev_id); 9099 if (!pdev) 9100 return QDF_STATUS_E_FAILURE; 9101 9102 pdev->is_pdev_down = is_pdev_down; 9103 return QDF_STATUS_SUCCESS; 9104 } 9105 9106 /** 9107 * dp_get_cfg_capabilities() - get dp capabilities 9108 * @soc_handle: datapath soc handle 9109 * @dp_caps: enum for dp capabilities 9110 * 9111 * Return: bool to determine if dp caps is enabled 9112 */ 9113 static bool 9114 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle, 9115 enum cdp_capabilities dp_caps) 9116 { 9117 struct dp_soc *soc = (struct dp_soc *)soc_handle; 9118 9119 return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps); 9120 } 9121 9122 #ifdef FEATURE_AST 9123 static QDF_STATUS 9124 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 9125 uint8_t *peer_mac) 9126 { 9127 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9128 QDF_STATUS status = QDF_STATUS_SUCCESS; 9129 struct dp_peer *peer = 9130 dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 9131 DP_MOD_ID_CDP); 9132 9133 /* Peer can be null for monitor vap mac address */ 9134 if (!peer) { 9135 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 9136 "%s: Invalid peer\n", __func__); 9137 return QDF_STATUS_E_FAILURE; 9138 } 9139 9140 dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE); 9141 9142 qdf_spin_lock_bh(&soc->ast_lock); 9143 dp_peer_send_wds_disconnect(soc, peer); 9144 dp_peer_delete_ast_entries(soc, peer); 9145 qdf_spin_unlock_bh(&soc->ast_lock); 9146 9147 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9148 return status; 9149 } 9150 #endif 9151 9152 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS 9153 /** 9154 * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for 9155 * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol) 9156 * @soc: cdp_soc handle 9157 * @pdev_id: id of cdp_pdev handle 9158 * @protocol_type: protocol type for which stats should be displayed 9159 * 9160 * Return: none 9161 */ 9162 static inline void 9163 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 9164 uint16_t protocol_type) 9165 { 9166 } 9167 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 9168 9169 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 9170 /** 9171 * dp_update_pdev_rx_protocol_tag() - Add/remove a protocol tag that should be 9172 * applied to the desired protocol type packets 9173 * @soc: soc handle 9174 * @pdev_id: id of cdp_pdev handle 9175 * @enable_rx_protocol_tag: bitmask that indicates what protocol types 9176 * are enabled for tagging. zero indicates disable feature, non-zero indicates 9177 * enable feature 9178 * @protocol_type: new protocol type for which the tag is being added 9179 * @tag: user configured tag for the new protocol 9180 * 9181 * Return: Success 9182 */ 9183 static inline QDF_STATUS 9184 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t *soc, uint8_t pdev_id, 9185 uint32_t enable_rx_protocol_tag, 9186 uint16_t protocol_type, 9187 uint16_t tag) 9188 { 9189 return QDF_STATUS_SUCCESS; 9190 } 9191 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 9192 9193 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 9194 /** 9195 * dp_set_rx_flow_tag() - add/delete a flow 9196 * @cdp_soc: CDP soc handle 9197 * @pdev_id: id of cdp_pdev handle 9198 * @flow_info: flow tuple that is to be added to/deleted from flow search table 9199 * 9200 * Return: Success 9201 */ 9202 static inline QDF_STATUS 9203 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 9204 struct cdp_rx_flow_info *flow_info) 9205 { 9206 return QDF_STATUS_SUCCESS; 9207 } 9208 /** 9209 * dp_dump_rx_flow_tag_stats() - dump the number of packets tagged for 9210 * given flow 5-tuple 9211 * @cdp_soc: soc handle 9212 * @pdev_id: id of cdp_pdev handle 9213 * @flow_info: flow 5-tuple for which stats should be displayed 9214 * 9215 * Return: Success 9216 */ 9217 static inline QDF_STATUS 9218 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 9219 struct cdp_rx_flow_info *flow_info) 9220 { 9221 return QDF_STATUS_SUCCESS; 9222 } 9223 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 9224 9225 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl, 9226 uint32_t max_peers, 9227 uint32_t max_ast_index, 9228 uint8_t peer_map_unmap_versions) 9229 { 9230 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9231 QDF_STATUS status; 9232 9233 soc->max_peers = max_peers; 9234 9235 wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index); 9236 9237 status = soc->arch_ops.txrx_peer_map_attach(soc); 9238 if (!QDF_IS_STATUS_SUCCESS(status)) { 9239 dp_err("failure in allocating peer tables"); 9240 return QDF_STATUS_E_FAILURE; 9241 } 9242 9243 dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n", 9244 max_peers, soc->max_peer_id, max_ast_index); 9245 9246 status = dp_peer_find_attach(soc); 9247 if (!QDF_IS_STATUS_SUCCESS(status)) { 9248 dp_err("Peer find attach failure"); 9249 goto fail; 9250 } 9251 9252 soc->peer_map_unmap_versions = peer_map_unmap_versions; 9253 soc->peer_map_attach_success = TRUE; 9254 9255 return QDF_STATUS_SUCCESS; 9256 fail: 9257 soc->arch_ops.txrx_peer_map_detach(soc); 9258 9259 return status; 9260 } 9261 9262 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t *soc_hdl, 9263 enum cdp_soc_param_t param, 9264 uint32_t value) 9265 { 9266 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9267 9268 switch (param) { 9269 case DP_SOC_PARAM_MSDU_EXCEPTION_DESC: 9270 soc->num_msdu_exception_desc = value; 9271 dp_info("num_msdu exception_desc %u", 9272 value); 9273 break; 9274 case DP_SOC_PARAM_CMEM_FSE_SUPPORT: 9275 if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx)) 9276 soc->fst_in_cmem = !!value; 9277 dp_info("FW supports CMEM FSE %u", value); 9278 break; 9279 case DP_SOC_PARAM_MAX_AST_AGEOUT: 9280 soc->max_ast_ageout_count = value; 9281 dp_info("Max ast ageout count %u", soc->max_ast_ageout_count); 9282 break; 9283 case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT: 9284 soc->eapol_over_control_port = value; 9285 dp_info("Eapol over control_port:%d", 9286 soc->eapol_over_control_port); 9287 break; 9288 case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT: 9289 soc->multi_peer_grp_cmd_supported = value; 9290 dp_info("Multi Peer group command support:%d", 9291 soc->multi_peer_grp_cmd_supported); 9292 break; 9293 case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT: 9294 soc->features.rssi_dbm_conv_support = value; 9295 dp_info("Rssi dbm conversion support:%u", 9296 soc->features.rssi_dbm_conv_support); 9297 break; 9298 case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT: 9299 soc->features.umac_hw_reset_support = value; 9300 dp_info("UMAC HW reset support :%u", 9301 soc->features.umac_hw_reset_support); 9302 break; 9303 default: 9304 dp_info("not handled param %d ", param); 9305 break; 9306 } 9307 9308 return QDF_STATUS_SUCCESS; 9309 } 9310 9311 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle, 9312 void *stats_ctx) 9313 { 9314 struct dp_soc *soc = (struct dp_soc *)soc_handle; 9315 9316 soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx; 9317 } 9318 9319 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 9320 /** 9321 * dp_peer_flush_rate_stats_req() - Flush peer rate stats 9322 * @soc: Datapath SOC handle 9323 * @peer: Datapath peer 9324 * @arg: argument to iter function 9325 * 9326 * Return: QDF_STATUS 9327 */ 9328 static void 9329 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer, 9330 void *arg) 9331 { 9332 /* Skip self peer */ 9333 if (!qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw, 9334 QDF_MAC_ADDR_SIZE)) 9335 return; 9336 9337 dp_wdi_event_handler( 9338 WDI_EVENT_FLUSH_RATE_STATS_REQ, 9339 soc, dp_monitor_peer_get_peerstats_ctx(soc, peer), 9340 peer->peer_id, 9341 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 9342 } 9343 9344 /** 9345 * dp_flush_rate_stats_req() - Flush peer rate stats in pdev 9346 * @soc_hdl: Datapath SOC handle 9347 * @pdev_id: pdev_id 9348 * 9349 * Return: QDF_STATUS 9350 */ 9351 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, 9352 uint8_t pdev_id) 9353 { 9354 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9355 struct dp_pdev *pdev = 9356 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9357 pdev_id); 9358 if (!pdev) 9359 return QDF_STATUS_E_FAILURE; 9360 9361 dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL, 9362 DP_MOD_ID_CDP); 9363 9364 return QDF_STATUS_SUCCESS; 9365 } 9366 #else 9367 static inline QDF_STATUS 9368 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, 9369 uint8_t pdev_id) 9370 { 9371 return QDF_STATUS_SUCCESS; 9372 } 9373 #endif 9374 9375 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 9376 #ifdef WLAN_FEATURE_11BE_MLO 9377 /** 9378 * dp_get_peer_extd_rate_link_stats() - function to get peer 9379 * extended rate and link stats 9380 * @soc_hdl: dp soc handler 9381 * @mac_addr: mac address of peer 9382 * 9383 * Return: QDF_STATUS 9384 */ 9385 static QDF_STATUS 9386 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr) 9387 { 9388 uint8_t i; 9389 struct dp_peer *link_peer; 9390 struct dp_soc *link_peer_soc; 9391 struct dp_mld_link_peers link_peers_info; 9392 struct dp_peer *peer = NULL; 9393 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9394 struct cdp_peer_info peer_info = { 0 }; 9395 9396 if (!mac_addr) { 9397 dp_err("NULL peer mac addr\n"); 9398 return QDF_STATUS_E_FAILURE; 9399 } 9400 9401 DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false, 9402 CDP_WILD_PEER_TYPE); 9403 9404 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP); 9405 if (!peer) { 9406 dp_err("Invalid peer\n"); 9407 return QDF_STATUS_E_FAILURE; 9408 } 9409 9410 if (IS_MLO_DP_MLD_PEER(peer)) { 9411 dp_get_link_peers_ref_from_mld_peer(soc, peer, 9412 &link_peers_info, 9413 DP_MOD_ID_CDP); 9414 for (i = 0; i < link_peers_info.num_links; i++) { 9415 link_peer = link_peers_info.link_peers[i]; 9416 link_peer_soc = link_peer->vdev->pdev->soc; 9417 dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ, 9418 link_peer_soc, 9419 dp_monitor_peer_get_peerstats_ctx 9420 (link_peer_soc, link_peer), 9421 link_peer->peer_id, 9422 WDI_NO_VAL, 9423 link_peer->vdev->pdev->pdev_id); 9424 } 9425 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 9426 } else { 9427 dp_wdi_event_handler( 9428 WDI_EVENT_FLUSH_RATE_STATS_REQ, soc, 9429 dp_monitor_peer_get_peerstats_ctx(soc, peer), 9430 peer->peer_id, 9431 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 9432 } 9433 9434 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9435 return QDF_STATUS_SUCCESS; 9436 } 9437 #else 9438 static QDF_STATUS 9439 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr) 9440 { 9441 struct dp_peer *peer = NULL; 9442 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9443 9444 if (!mac_addr) { 9445 dp_err("NULL peer mac addr\n"); 9446 return QDF_STATUS_E_FAILURE; 9447 } 9448 9449 peer = dp_peer_find_hash_find(soc, mac_addr, 0, 9450 DP_VDEV_ALL, DP_MOD_ID_CDP); 9451 if (!peer) { 9452 dp_err("Invalid peer\n"); 9453 return QDF_STATUS_E_FAILURE; 9454 } 9455 9456 dp_wdi_event_handler( 9457 WDI_EVENT_FLUSH_RATE_STATS_REQ, soc, 9458 dp_monitor_peer_get_peerstats_ctx(soc, peer), 9459 peer->peer_id, 9460 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 9461 9462 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9463 return QDF_STATUS_SUCCESS; 9464 } 9465 #endif 9466 #else 9467 static inline QDF_STATUS 9468 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr) 9469 { 9470 return QDF_STATUS_SUCCESS; 9471 } 9472 #endif 9473 9474 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl, 9475 uint8_t vdev_id, 9476 uint8_t *mac_addr) 9477 { 9478 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9479 struct dp_peer *peer; 9480 void *peerstats_ctx = NULL; 9481 9482 if (mac_addr) { 9483 peer = dp_peer_find_hash_find(soc, mac_addr, 9484 0, vdev_id, 9485 DP_MOD_ID_CDP); 9486 if (!peer) 9487 return NULL; 9488 9489 if (!IS_MLO_DP_MLD_PEER(peer)) 9490 peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc, 9491 peer); 9492 9493 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9494 } 9495 9496 return peerstats_ctx; 9497 } 9498 9499 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 9500 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc, 9501 uint8_t pdev_id, 9502 void *buf) 9503 { 9504 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS, 9505 (struct dp_soc *)soc, buf, HTT_INVALID_PEER, 9506 WDI_NO_VAL, pdev_id); 9507 return QDF_STATUS_SUCCESS; 9508 } 9509 #else 9510 static inline QDF_STATUS 9511 dp_peer_flush_rate_stats(struct cdp_soc_t *soc, 9512 uint8_t pdev_id, 9513 void *buf) 9514 { 9515 return QDF_STATUS_SUCCESS; 9516 } 9517 #endif 9518 9519 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle) 9520 { 9521 struct dp_soc *soc = (struct dp_soc *)soc_handle; 9522 9523 return soc->rate_stats_ctx; 9524 } 9525 9526 /** 9527 * dp_get_cfg() - get dp cfg 9528 * @soc: cdp soc handle 9529 * @cfg: cfg enum 9530 * 9531 * Return: cfg value 9532 */ 9533 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg) 9534 { 9535 struct dp_soc *dpsoc = (struct dp_soc *)soc; 9536 uint32_t value = 0; 9537 9538 switch (cfg) { 9539 case cfg_dp_enable_data_stall: 9540 value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection; 9541 break; 9542 case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload: 9543 value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload; 9544 break; 9545 case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload: 9546 value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload; 9547 break; 9548 case cfg_dp_enable_ip_tcp_udp_checksum_offload: 9549 value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload; 9550 break; 9551 case cfg_dp_disable_legacy_mode_csum_offload: 9552 value = dpsoc->wlan_cfg_ctx-> 9553 legacy_mode_checksumoffload_disable; 9554 break; 9555 case cfg_dp_tso_enable: 9556 value = dpsoc->wlan_cfg_ctx->tso_enabled; 9557 break; 9558 case cfg_dp_lro_enable: 9559 value = dpsoc->wlan_cfg_ctx->lro_enabled; 9560 break; 9561 case cfg_dp_gro_enable: 9562 value = dpsoc->wlan_cfg_ctx->gro_enabled; 9563 break; 9564 case cfg_dp_tc_based_dyn_gro_enable: 9565 value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro; 9566 break; 9567 case cfg_dp_tc_ingress_prio: 9568 value = dpsoc->wlan_cfg_ctx->tc_ingress_prio; 9569 break; 9570 case cfg_dp_sg_enable: 9571 value = dpsoc->wlan_cfg_ctx->sg_enabled; 9572 break; 9573 case cfg_dp_tx_flow_start_queue_offset: 9574 value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset; 9575 break; 9576 case cfg_dp_tx_flow_stop_queue_threshold: 9577 value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold; 9578 break; 9579 case cfg_dp_disable_intra_bss_fwd: 9580 value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd; 9581 break; 9582 case cfg_dp_pktlog_buffer_size: 9583 value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size; 9584 break; 9585 case cfg_dp_wow_check_rx_pending: 9586 value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable; 9587 break; 9588 default: 9589 value = 0; 9590 } 9591 9592 return value; 9593 } 9594 9595 #ifdef PEER_FLOW_CONTROL 9596 /** 9597 * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params 9598 * @soc_handle: datapath soc handle 9599 * @pdev_id: id of datapath pdev handle 9600 * @param: ol ath params 9601 * @value: value of the flag 9602 * @buff: Buffer to be passed 9603 * 9604 * Implemented this function same as legacy function. In legacy code, single 9605 * function is used to display stats and update pdev params. 9606 * 9607 * Return: 0 for success. nonzero for failure. 9608 */ 9609 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle, 9610 uint8_t pdev_id, 9611 enum _dp_param_t param, 9612 uint32_t value, void *buff) 9613 { 9614 struct dp_soc *soc = (struct dp_soc *)soc_handle; 9615 struct dp_pdev *pdev = 9616 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9617 pdev_id); 9618 9619 if (qdf_unlikely(!pdev)) 9620 return 1; 9621 9622 soc = pdev->soc; 9623 if (!soc) 9624 return 1; 9625 9626 switch (param) { 9627 #ifdef QCA_ENH_V3_STATS_SUPPORT 9628 case DP_PARAM_VIDEO_DELAY_STATS_FC: 9629 if (value) 9630 pdev->delay_stats_flag = true; 9631 else 9632 pdev->delay_stats_flag = false; 9633 break; 9634 case DP_PARAM_VIDEO_STATS_FC: 9635 qdf_print("------- TID Stats ------\n"); 9636 dp_pdev_print_tid_stats(pdev); 9637 qdf_print("------ Delay Stats ------\n"); 9638 dp_pdev_print_delay_stats(pdev); 9639 qdf_print("------ Rx Error Stats ------\n"); 9640 dp_pdev_print_rx_error_stats(pdev); 9641 break; 9642 #endif 9643 case DP_PARAM_TOTAL_Q_SIZE: 9644 { 9645 uint32_t tx_min, tx_max; 9646 9647 tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx); 9648 tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 9649 9650 if (!buff) { 9651 if ((value >= tx_min) && (value <= tx_max)) { 9652 pdev->num_tx_allowed = value; 9653 } else { 9654 dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d", 9655 soc, tx_min, tx_max); 9656 break; 9657 } 9658 } else { 9659 *(int *)buff = pdev->num_tx_allowed; 9660 } 9661 } 9662 break; 9663 default: 9664 dp_tx_info("%pK: not handled param %d ", soc, param); 9665 break; 9666 } 9667 9668 return 0; 9669 } 9670 #endif 9671 9672 #ifdef DP_UMAC_HW_RESET_SUPPORT 9673 /** 9674 * dp_reset_interrupt_ring_masks() - Reset rx interrupt masks 9675 * @soc: dp soc handle 9676 * 9677 * Return: void 9678 */ 9679 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc) 9680 { 9681 struct dp_intr_bkp *intr_bkp; 9682 struct dp_intr *intr_ctx; 9683 int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 9684 int i; 9685 9686 intr_bkp = 9687 (struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) * 9688 num_ctxt); 9689 9690 qdf_assert_always(intr_bkp); 9691 9692 soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp; 9693 for (i = 0; i < num_ctxt; i++) { 9694 intr_ctx = &soc->intr_ctx[i]; 9695 9696 intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask; 9697 intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask; 9698 intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask; 9699 intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask; 9700 intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask; 9701 intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask; 9702 intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask; 9703 intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask; 9704 intr_bkp->host2rxdma_mon_ring_mask = 9705 intr_ctx->host2rxdma_mon_ring_mask; 9706 intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask; 9707 9708 intr_ctx->tx_ring_mask = 0; 9709 intr_ctx->rx_ring_mask = 0; 9710 intr_ctx->rx_mon_ring_mask = 0; 9711 intr_ctx->rx_err_ring_mask = 0; 9712 intr_ctx->rx_wbm_rel_ring_mask = 0; 9713 intr_ctx->reo_status_ring_mask = 0; 9714 intr_ctx->rxdma2host_ring_mask = 0; 9715 intr_ctx->host2rxdma_ring_mask = 0; 9716 intr_ctx->host2rxdma_mon_ring_mask = 0; 9717 intr_ctx->tx_mon_ring_mask = 0; 9718 9719 intr_bkp++; 9720 } 9721 } 9722 9723 /** 9724 * dp_restore_interrupt_ring_masks() - Restore rx interrupt masks 9725 * @soc: dp soc handle 9726 * 9727 * Return: void 9728 */ 9729 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc) 9730 { 9731 struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp; 9732 struct dp_intr_bkp *intr_bkp_base = intr_bkp; 9733 struct dp_intr *intr_ctx; 9734 int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 9735 int i; 9736 9737 if (!intr_bkp) 9738 return; 9739 9740 for (i = 0; i < num_ctxt; i++) { 9741 intr_ctx = &soc->intr_ctx[i]; 9742 9743 intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask; 9744 intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask; 9745 intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask; 9746 intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask; 9747 intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask; 9748 intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask; 9749 intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask; 9750 intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask; 9751 intr_ctx->host2rxdma_mon_ring_mask = 9752 intr_bkp->host2rxdma_mon_ring_mask; 9753 intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask; 9754 9755 intr_bkp++; 9756 } 9757 9758 qdf_mem_free(intr_bkp_base); 9759 soc->umac_reset_ctx.intr_ctx_bkp = NULL; 9760 } 9761 9762 /** 9763 * dp_resume_tx_hardstart() - Restore the old Tx hardstart functions 9764 * @soc: dp soc handle 9765 * 9766 * Return: void 9767 */ 9768 static void dp_resume_tx_hardstart(struct dp_soc *soc) 9769 { 9770 struct dp_vdev *vdev; 9771 struct ol_txrx_hardtart_ctxt ctxt = {0}; 9772 struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc; 9773 int i; 9774 9775 for (i = 0; i < MAX_PDEV_CNT; i++) { 9776 struct dp_pdev *pdev = soc->pdev_list[i]; 9777 9778 if (!pdev) 9779 continue; 9780 9781 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 9782 uint8_t vdev_id = vdev->vdev_id; 9783 9784 dp_vdev_fetch_tx_handler(vdev, soc, &ctxt); 9785 soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc, 9786 vdev_id, 9787 &ctxt); 9788 } 9789 } 9790 } 9791 9792 /** 9793 * dp_pause_tx_hardstart() - Register Tx hardstart functions to drop packets 9794 * @soc: dp soc handle 9795 * 9796 * Return: void 9797 */ 9798 static void dp_pause_tx_hardstart(struct dp_soc *soc) 9799 { 9800 struct dp_vdev *vdev; 9801 struct ol_txrx_hardtart_ctxt ctxt; 9802 struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc; 9803 int i; 9804 9805 ctxt.tx = &dp_tx_drop; 9806 ctxt.tx_fast = &dp_tx_drop; 9807 ctxt.tx_exception = &dp_tx_exc_drop; 9808 9809 for (i = 0; i < MAX_PDEV_CNT; i++) { 9810 struct dp_pdev *pdev = soc->pdev_list[i]; 9811 9812 if (!pdev) 9813 continue; 9814 9815 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 9816 uint8_t vdev_id = vdev->vdev_id; 9817 9818 soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc, 9819 vdev_id, 9820 &ctxt); 9821 } 9822 } 9823 } 9824 9825 /** 9826 * dp_unregister_notify_umac_pre_reset_fw_callback() - unregister notify_fw_cb 9827 * @soc: dp soc handle 9828 * 9829 * Return: void 9830 */ 9831 static inline 9832 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc) 9833 { 9834 soc->notify_fw_callback = NULL; 9835 } 9836 9837 /** 9838 * dp_check_n_notify_umac_prereset_done() - Send pre reset done to firmware 9839 * @soc: dp soc handle 9840 * 9841 * Return: void 9842 */ 9843 static inline 9844 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc) 9845 { 9846 /* Some Cpu(s) is processing the umac rings*/ 9847 if (soc->service_rings_running) 9848 return; 9849 9850 /* Notify the firmware that Umac pre reset is complete */ 9851 dp_umac_reset_notify_action_completion(soc, 9852 UMAC_RESET_ACTION_DO_PRE_RESET); 9853 9854 /* Unregister the callback */ 9855 dp_unregister_notify_umac_pre_reset_fw_callback(soc); 9856 } 9857 9858 /** 9859 * dp_register_notify_umac_pre_reset_fw_callback() - register notify_fw_cb 9860 * @soc: dp soc handle 9861 * 9862 * Return: void 9863 */ 9864 static inline 9865 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc) 9866 { 9867 soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done; 9868 } 9869 9870 #ifdef DP_UMAC_HW_HARD_RESET 9871 /** 9872 * dp_set_umac_regs() - Reinitialize host umac registers 9873 * @soc: dp soc handle 9874 * 9875 * Return: void 9876 */ 9877 static void dp_set_umac_regs(struct dp_soc *soc) 9878 { 9879 int i; 9880 struct hal_reo_params reo_params; 9881 9882 qdf_mem_zero(&reo_params, sizeof(reo_params)); 9883 9884 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 9885 if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0, 9886 &reo_params.remap1, 9887 &reo_params.remap2)) 9888 reo_params.rx_hash_enabled = true; 9889 else 9890 reo_params.rx_hash_enabled = false; 9891 } 9892 9893 reo_params.reo_qref = &soc->reo_qref; 9894 hal_reo_setup(soc->hal_soc, &reo_params, 0); 9895 9896 soc->arch_ops.dp_cc_reg_cfg_init(soc, true); 9897 9898 for (i = 0; i < PCP_TID_MAP_MAX; i++) 9899 hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i); 9900 9901 for (i = 0; i < MAX_PDEV_CNT; i++) { 9902 struct dp_vdev *vdev = NULL; 9903 struct dp_pdev *pdev = soc->pdev_list[i]; 9904 9905 if (!pdev) 9906 continue; 9907 9908 for (i = 0; i < soc->num_hw_dscp_tid_map; i++) 9909 hal_tx_set_dscp_tid_map(soc->hal_soc, 9910 pdev->dscp_tid_map[i], i); 9911 9912 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 9913 soc->arch_ops.dp_bank_reconfig(soc, vdev); 9914 soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc, 9915 vdev); 9916 } 9917 } 9918 } 9919 #else 9920 static void dp_set_umac_regs(struct dp_soc *soc) 9921 { 9922 } 9923 #endif 9924 9925 /** 9926 * dp_reinit_rings() - Reinitialize host managed rings 9927 * @soc: dp soc handle 9928 * 9929 * Return: QDF_STATUS 9930 */ 9931 static void dp_reinit_rings(struct dp_soc *soc) 9932 { 9933 unsigned long end; 9934 9935 dp_soc_srng_deinit(soc); 9936 dp_hw_link_desc_ring_deinit(soc); 9937 9938 /* Busy wait for 2 ms to make sure the rings are in idle state 9939 * before we enable them again 9940 */ 9941 end = jiffies + msecs_to_jiffies(2); 9942 while (time_before(jiffies, end)) 9943 ; 9944 9945 dp_hw_link_desc_ring_init(soc); 9946 dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID); 9947 dp_soc_srng_init(soc); 9948 } 9949 9950 /** 9951 * dp_umac_reset_action_trigger_recovery() - Handle FW Umac recovery trigger 9952 * @soc: dp soc handle 9953 * 9954 * Return: QDF_STATUS 9955 */ 9956 static QDF_STATUS dp_umac_reset_action_trigger_recovery(struct dp_soc *soc) 9957 { 9958 enum umac_reset_action action = UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY; 9959 9960 return dp_umac_reset_notify_action_completion(soc, action); 9961 } 9962 9963 #ifdef WLAN_SUPPORT_PPEDS 9964 /** 9965 * dp_umac_reset_service_handle_n_notify_done() 9966 * Handle Umac pre reset for direct switch 9967 * @soc: dp soc handle 9968 * 9969 * Return: QDF_STATUS 9970 */ 9971 static QDF_STATUS dp_umac_reset_service_handle_n_notify_done(struct dp_soc *soc) 9972 { 9973 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check || 9974 !soc->arch_ops.txrx_soc_ppeds_service_status_update || 9975 !soc->arch_ops.txrx_soc_ppeds_interrupt_stop) 9976 goto non_ppeds; 9977 9978 /* 9979 * Check if ppeds is enabled on SoC. 9980 */ 9981 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check(soc)) 9982 goto non_ppeds; 9983 9984 /* 9985 * Start the UMAC pre reset done service. 9986 */ 9987 soc->arch_ops.txrx_soc_ppeds_service_status_update(soc, true); 9988 9989 dp_register_notify_umac_pre_reset_fw_callback(soc); 9990 9991 soc->arch_ops.txrx_soc_ppeds_interrupt_stop(soc); 9992 9993 dp_soc_ppeds_stop((struct cdp_soc_t *)soc); 9994 9995 /* 9996 * UMAC pre reset service complete 9997 */ 9998 soc->arch_ops.txrx_soc_ppeds_service_status_update(soc, false); 9999 10000 soc->umac_reset_ctx.nbuf_list = NULL; 10001 return QDF_STATUS_SUCCESS; 10002 10003 non_ppeds: 10004 dp_register_notify_umac_pre_reset_fw_callback(soc); 10005 dp_check_n_notify_umac_prereset_done(soc); 10006 soc->umac_reset_ctx.nbuf_list = NULL; 10007 return QDF_STATUS_SUCCESS; 10008 } 10009 10010 static inline void dp_umac_reset_ppeds_txdesc_pool_reset(struct dp_soc *soc, 10011 qdf_nbuf_t *nbuf_list) 10012 { 10013 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check || 10014 !soc->arch_ops.txrx_soc_ppeds_txdesc_pool_reset) 10015 return; 10016 10017 /* 10018 * Deinit of PPEDS Tx desc rings. 10019 */ 10020 if (soc->arch_ops.txrx_soc_ppeds_enabled_check(soc)) 10021 soc->arch_ops.txrx_soc_ppeds_txdesc_pool_reset(soc, nbuf_list); 10022 } 10023 10024 static inline void dp_umac_reset_ppeds_start(struct dp_soc *soc) 10025 { 10026 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check || 10027 !soc->arch_ops.txrx_soc_ppeds_start || 10028 !soc->arch_ops.txrx_soc_ppeds_interrupt_start) 10029 return; 10030 10031 /* 10032 * Start PPEDS node and enable interrupt. 10033 */ 10034 if (soc->arch_ops.txrx_soc_ppeds_enabled_check(soc)) { 10035 soc->arch_ops.txrx_soc_ppeds_start(soc); 10036 soc->arch_ops.txrx_soc_ppeds_interrupt_start(soc); 10037 } 10038 } 10039 #else 10040 static QDF_STATUS dp_umac_reset_service_handle_n_notify_done(struct dp_soc *soc) 10041 { 10042 dp_register_notify_umac_pre_reset_fw_callback(soc); 10043 dp_check_n_notify_umac_prereset_done(soc); 10044 soc->umac_reset_ctx.nbuf_list = NULL; 10045 return QDF_STATUS_SUCCESS; 10046 } 10047 10048 static inline void dp_umac_reset_ppeds_txdesc_pool_reset(struct dp_soc *soc, 10049 qdf_nbuf_t *nbuf_list) 10050 { 10051 } 10052 10053 static inline void dp_umac_reset_ppeds_start(struct dp_soc *soc) 10054 { 10055 } 10056 #endif 10057 10058 /** 10059 * dp_umac_reset_handle_pre_reset() - Handle Umac prereset interrupt from FW 10060 * @soc: dp soc handle 10061 * 10062 * Return: QDF_STATUS 10063 */ 10064 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc) 10065 { 10066 dp_reset_interrupt_ring_masks(soc); 10067 10068 dp_pause_tx_hardstart(soc); 10069 dp_pause_reo_send_cmd(soc); 10070 dp_umac_reset_service_handle_n_notify_done(soc); 10071 return QDF_STATUS_SUCCESS; 10072 } 10073 10074 /** 10075 * dp_umac_reset_handle_post_reset() - Handle Umac postreset interrupt from FW 10076 * @soc: dp soc handle 10077 * 10078 * Return: QDF_STATUS 10079 */ 10080 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc) 10081 { 10082 if (!soc->umac_reset_ctx.skel_enable) { 10083 qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list; 10084 10085 dp_set_umac_regs(soc); 10086 10087 dp_reinit_rings(soc); 10088 10089 dp_rx_desc_reuse(soc, nbuf_list); 10090 10091 dp_cleanup_reo_cmd_module(soc); 10092 10093 dp_umac_reset_ppeds_txdesc_pool_reset(soc, nbuf_list); 10094 10095 dp_tx_desc_pool_cleanup(soc, nbuf_list); 10096 10097 dp_reset_tid_q_setup(soc); 10098 } 10099 10100 return dp_umac_reset_notify_action_completion(soc, 10101 UMAC_RESET_ACTION_DO_POST_RESET_START); 10102 } 10103 10104 /** 10105 * dp_umac_reset_handle_post_reset_complete() - Handle Umac postreset_complete 10106 * interrupt from FW 10107 * @soc: dp soc handle 10108 * 10109 * Return: QDF_STATUS 10110 */ 10111 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc) 10112 { 10113 QDF_STATUS status; 10114 qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list; 10115 10116 soc->umac_reset_ctx.nbuf_list = NULL; 10117 10118 dp_resume_reo_send_cmd(soc); 10119 10120 dp_umac_reset_ppeds_start(soc); 10121 10122 dp_restore_interrupt_ring_masks(soc); 10123 10124 dp_resume_tx_hardstart(soc); 10125 10126 status = dp_umac_reset_notify_action_completion(soc, 10127 UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE); 10128 10129 while (nbuf_list) { 10130 qdf_nbuf_t nbuf = nbuf_list->next; 10131 10132 qdf_nbuf_free(nbuf_list); 10133 nbuf_list = nbuf; 10134 } 10135 10136 dp_umac_reset_info("Umac reset done on soc %pK\n trigger start : %u us " 10137 "trigger done : %u us prereset : %u us\n" 10138 "postreset : %u us \n postreset complete: %u us \n", 10139 soc, 10140 soc->umac_reset_ctx.ts.trigger_done - 10141 soc->umac_reset_ctx.ts.trigger_start, 10142 soc->umac_reset_ctx.ts.pre_reset_done - 10143 soc->umac_reset_ctx.ts.pre_reset_start, 10144 soc->umac_reset_ctx.ts.post_reset_done - 10145 soc->umac_reset_ctx.ts.post_reset_start, 10146 soc->umac_reset_ctx.ts.post_reset_complete_done - 10147 soc->umac_reset_ctx.ts.post_reset_complete_start); 10148 10149 return status; 10150 } 10151 #endif 10152 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 10153 static void 10154 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val) 10155 { 10156 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10157 10158 soc->wlan_cfg_ctx->pkt_capture_mode = val; 10159 } 10160 #endif 10161 10162 #ifdef HW_TX_DELAY_STATS_ENABLE 10163 /** 10164 * dp_enable_disable_vdev_tx_delay_stats() - Start/Stop tx delay stats capture 10165 * @soc_hdl: DP soc handle 10166 * @vdev_id: vdev id 10167 * @value: value 10168 * 10169 * Return: None 10170 */ 10171 static void 10172 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl, 10173 uint8_t vdev_id, 10174 uint8_t value) 10175 { 10176 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10177 struct dp_vdev *vdev = NULL; 10178 10179 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 10180 if (!vdev) 10181 return; 10182 10183 vdev->hw_tx_delay_stats_enabled = value; 10184 10185 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10186 } 10187 10188 /** 10189 * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not 10190 * @soc_hdl: DP soc handle 10191 * @vdev_id: vdev id 10192 * 10193 * Return: 1 if enabled, 0 if disabled 10194 */ 10195 static uint8_t 10196 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl, 10197 uint8_t vdev_id) 10198 { 10199 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10200 struct dp_vdev *vdev; 10201 uint8_t ret_val = 0; 10202 10203 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 10204 if (!vdev) 10205 return ret_val; 10206 10207 ret_val = vdev->hw_tx_delay_stats_enabled; 10208 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10209 10210 return ret_val; 10211 } 10212 #endif 10213 10214 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 10215 static void 10216 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc, 10217 uint8_t vdev_id, 10218 bool mlo_peers_only) 10219 { 10220 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 10221 struct dp_vdev *vdev; 10222 10223 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 10224 10225 if (!vdev) 10226 return; 10227 10228 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only); 10229 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10230 } 10231 #endif 10232 #ifdef QCA_GET_TSF_VIA_REG 10233 /** 10234 * dp_get_tsf_time() - get tsf time 10235 * @soc_hdl: Datapath soc handle 10236 * @tsf_id: TSF identifier 10237 * @mac_id: mac_id 10238 * @tsf: pointer to update tsf value 10239 * @tsf_sync_soc_time: pointer to update tsf sync time 10240 * 10241 * Return: None. 10242 */ 10243 static inline void 10244 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id, 10245 uint64_t *tsf, uint64_t *tsf_sync_soc_time) 10246 { 10247 hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id, 10248 tsf, tsf_sync_soc_time); 10249 } 10250 #else 10251 static inline void 10252 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id, 10253 uint64_t *tsf, uint64_t *tsf_sync_soc_time) 10254 { 10255 } 10256 #endif 10257 10258 /** 10259 * dp_get_tsf2_scratch_reg() - get tsf2 offset from the scratch register 10260 * @soc_hdl: Datapath soc handle 10261 * @mac_id: mac_id 10262 * @value: pointer to update tsf2 offset value 10263 * 10264 * Return: None. 10265 */ 10266 static inline void 10267 dp_get_tsf2_scratch_reg(struct cdp_soc_t *soc_hdl, uint8_t mac_id, 10268 uint64_t *value) 10269 { 10270 hal_get_tsf2_offset(((struct dp_soc *)soc_hdl)->hal_soc, mac_id, value); 10271 } 10272 10273 /** 10274 * dp_get_tqm_scratch_reg() - get tqm offset from the scratch register 10275 * @soc_hdl: Datapath soc handle 10276 * @value: pointer to update tqm offset value 10277 * 10278 * Return: None. 10279 */ 10280 static inline void 10281 dp_get_tqm_scratch_reg(struct cdp_soc_t *soc_hdl, uint64_t *value) 10282 { 10283 hal_get_tqm_offset(((struct dp_soc *)soc_hdl)->hal_soc, value); 10284 } 10285 10286 /** 10287 * dp_set_tx_pause() - Pause or resume tx path 10288 * @soc_hdl: Datapath soc handle 10289 * @flag: set or clear is_tx_pause 10290 * 10291 * Return: None. 10292 */ 10293 static inline 10294 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag) 10295 { 10296 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10297 10298 soc->is_tx_pause = flag; 10299 } 10300 10301 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP 10302 /** 10303 * dp_evaluate_update_tx_ilp_config() - Evaluate and update DP TX 10304 * ILP configuration 10305 * @soc_hdl: CDP SOC handle 10306 * @num_msdu_idx_map: Number of HTT msdu index to qtype map in array 10307 * @msdu_idx_map_arr: Pointer to HTT msdu index to qtype map array 10308 * 10309 * This function will check: (a) TX ILP INI configuration, 10310 * (b) index 3 value in array same as HTT_MSDU_QTYPE_LATENCY_TOLERANT, 10311 * only if both (a) and (b) condition is met, then TX ILP feature is 10312 * considered to be enabled. 10313 * 10314 * Return: Final updated TX ILP enable result in dp_soc, 10315 * true is enabled, false is not 10316 */ 10317 static 10318 bool dp_evaluate_update_tx_ilp_config(struct cdp_soc_t *soc_hdl, 10319 uint8_t num_msdu_idx_map, 10320 uint8_t *msdu_idx_map_arr) 10321 { 10322 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10323 bool enable_tx_ilp = false; 10324 10325 /** 10326 * Check INI configuration firstly, if it's disabled, 10327 * then keep feature disabled. 10328 */ 10329 if (!wlan_cfg_get_tx_ilp_inspect_config(soc->wlan_cfg_ctx)) { 10330 dp_info("TX ILP INI is disabled already"); 10331 goto update_tx_ilp; 10332 } 10333 10334 /* Check if the msdu index to qtype map table is valid */ 10335 if (num_msdu_idx_map != HTT_MSDUQ_MAX_INDEX || !msdu_idx_map_arr) { 10336 dp_info("Invalid msdu_idx qtype map num: 0x%x, arr_addr %pK", 10337 num_msdu_idx_map, msdu_idx_map_arr); 10338 goto update_tx_ilp; 10339 } 10340 10341 dp_info("msdu_idx_map_arr idx 0x%x value 0x%x", 10342 HTT_MSDUQ_INDEX_CUSTOM_PRIO_1, 10343 msdu_idx_map_arr[HTT_MSDUQ_INDEX_CUSTOM_PRIO_1]); 10344 10345 if (HTT_MSDU_QTYPE_USER_SPECIFIED == 10346 msdu_idx_map_arr[HTT_MSDUQ_INDEX_CUSTOM_PRIO_1]) 10347 enable_tx_ilp = true; 10348 10349 update_tx_ilp: 10350 soc->tx_ilp_enable = enable_tx_ilp; 10351 dp_info("configure tx ilp enable %d", soc->tx_ilp_enable); 10352 10353 return soc->tx_ilp_enable; 10354 } 10355 #endif 10356 10357 static struct cdp_cmn_ops dp_ops_cmn = { 10358 .txrx_soc_attach_target = dp_soc_attach_target_wifi3, 10359 .txrx_vdev_attach = dp_vdev_attach_wifi3, 10360 .txrx_vdev_detach = dp_vdev_detach_wifi3, 10361 .txrx_pdev_attach = dp_pdev_attach_wifi3, 10362 .txrx_pdev_post_attach = dp_pdev_post_attach_wifi3, 10363 .txrx_pdev_detach = dp_pdev_detach_wifi3, 10364 .txrx_pdev_deinit = dp_pdev_deinit_wifi3, 10365 .txrx_peer_create = dp_peer_create_wifi3, 10366 .txrx_peer_setup = dp_peer_setup_wifi3_wrapper, 10367 #ifdef FEATURE_AST 10368 .txrx_peer_teardown = dp_peer_teardown_wifi3, 10369 #else 10370 .txrx_peer_teardown = NULL, 10371 #endif 10372 .txrx_peer_add_ast = dp_peer_add_ast_wifi3, 10373 .txrx_peer_update_ast = dp_peer_update_ast_wifi3, 10374 .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3, 10375 .txrx_peer_get_ast_info_by_pdev = 10376 dp_peer_get_ast_info_by_pdevid_wifi3, 10377 .txrx_peer_ast_delete_by_soc = 10378 dp_peer_ast_entry_del_by_soc, 10379 .txrx_peer_ast_delete_by_pdev = 10380 dp_peer_ast_entry_del_by_pdev, 10381 .txrx_peer_HMWDS_ast_delete = dp_peer_HMWDS_ast_entry_del, 10382 .txrx_peer_delete = dp_peer_delete_wifi3, 10383 #ifdef DP_RX_UDP_OVER_PEER_ROAM 10384 .txrx_update_roaming_peer = dp_update_roaming_peer_wifi3, 10385 #endif 10386 .txrx_vdev_register = dp_vdev_register_wifi3, 10387 .txrx_soc_detach = dp_soc_detach_wifi3, 10388 .txrx_soc_deinit = dp_soc_deinit_wifi3, 10389 .txrx_soc_init = dp_soc_init_wifi3, 10390 #ifndef QCA_HOST_MODE_WIFI_DISABLED 10391 .txrx_tso_soc_attach = dp_tso_soc_attach, 10392 .txrx_tso_soc_detach = dp_tso_soc_detach, 10393 .tx_send = dp_tx_send, 10394 .tx_send_exc = dp_tx_send_exception, 10395 #endif 10396 .set_tx_pause = dp_set_tx_pause, 10397 .txrx_pdev_init = dp_pdev_init_wifi3, 10398 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, 10399 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, 10400 .txrx_ath_getstats = dp_get_device_stats, 10401 #ifndef WLAN_SOFTUMAC_SUPPORT 10402 .addba_requestprocess = dp_addba_requestprocess_wifi3, 10403 .addba_responsesetup = dp_addba_responsesetup_wifi3, 10404 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3, 10405 .delba_process = dp_delba_process_wifi3, 10406 .set_addba_response = dp_set_addba_response, 10407 .flush_cache_rx_queue = NULL, 10408 .tid_update_ba_win_size = dp_rx_tid_update_ba_win_size, 10409 #endif 10410 /* TODO: get API's for dscp-tid need to be added*/ 10411 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3, 10412 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3, 10413 .txrx_get_total_per = dp_get_total_per, 10414 .txrx_stats_request = dp_txrx_stats_request, 10415 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id, 10416 .display_stats = dp_txrx_dump_stats, 10417 .notify_asserted_soc = dp_soc_notify_asserted_soc, 10418 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper, 10419 .txrx_intr_detach = dp_soc_interrupt_detach, 10420 .txrx_ppeds_stop = dp_soc_ppeds_stop, 10421 .set_key_sec_type = dp_set_key_sec_type_wifi3, 10422 .update_config_parameters = dp_update_config_parameters, 10423 /* TODO: Add other functions */ 10424 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set, 10425 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle, 10426 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle, 10427 .get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle, 10428 .set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle, 10429 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle, 10430 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle, 10431 .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac, 10432 .handle_mode_change = dp_soc_handle_pdev_mode_change, 10433 .set_pdev_status_down = dp_soc_set_pdev_status_down, 10434 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3, 10435 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3, 10436 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3, 10437 .txrx_peer_map_attach = dp_peer_map_attach_wifi3, 10438 .set_soc_param = dp_soc_set_param, 10439 .txrx_get_os_rx_handles_from_vdev = 10440 dp_get_os_rx_handles_from_vdev_wifi3, 10441 #ifndef WLAN_SOFTUMAC_SUPPORT 10442 .set_pn_check = dp_set_pn_check_wifi3, 10443 .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout, 10444 .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout, 10445 .delba_tx_completion = dp_delba_tx_completion_wifi3, 10446 .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3, 10447 .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3, 10448 #endif 10449 .get_dp_capabilities = dp_get_cfg_capabilities, 10450 .txrx_get_cfg = dp_get_cfg, 10451 .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx, 10452 .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx, 10453 .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats, 10454 .txrx_flush_rate_stats_request = dp_flush_rate_stats_req, 10455 .txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx, 10456 10457 .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler, 10458 #ifdef QCA_MULTIPASS_SUPPORT 10459 .set_vlan_groupkey = dp_set_vlan_groupkey, 10460 #endif 10461 .get_peer_mac_list = dp_get_peer_mac_list, 10462 .get_peer_id = dp_get_peer_id, 10463 #ifdef QCA_SUPPORT_WDS_EXTENDED 10464 .set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx, 10465 .get_wds_ext_peer_osif_handle = dp_wds_ext_get_peer_osif_handle, 10466 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 10467 10468 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE) 10469 .txrx_drain = dp_drain_txrx, 10470 #endif 10471 #if defined(FEATURE_RUNTIME_PM) 10472 .set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement, 10473 #endif 10474 #ifdef WLAN_SYSFS_DP_STATS 10475 .txrx_sysfs_fill_stats = dp_sysfs_fill_stats, 10476 .txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type, 10477 #endif /* WLAN_SYSFS_DP_STATS */ 10478 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 10479 .set_pkt_capture_mode = dp_set_pkt_capture_mode, 10480 #endif 10481 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 10482 .txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers, 10483 #endif 10484 .txrx_umac_reset_deinit = dp_soc_umac_reset_deinit, 10485 .txrx_umac_reset_init = dp_soc_umac_reset_init, 10486 .txrx_get_tsf_time = dp_get_tsf_time, 10487 .txrx_get_tsf2_offset = dp_get_tsf2_scratch_reg, 10488 .txrx_get_tqm_offset = dp_get_tqm_scratch_reg, 10489 }; 10490 10491 static struct cdp_ctrl_ops dp_ops_ctrl = { 10492 .txrx_peer_authorize = dp_peer_authorize, 10493 .txrx_peer_get_authorize = dp_peer_get_authorize, 10494 #ifdef VDEV_PEER_PROTOCOL_COUNT 10495 .txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count, 10496 .txrx_set_peer_protocol_drop_mask = 10497 dp_enable_vdev_peer_protocol_drop_mask, 10498 .txrx_is_peer_protocol_count_enabled = 10499 dp_is_vdev_peer_protocol_count_enabled, 10500 .txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask, 10501 #endif 10502 .txrx_set_vdev_param = dp_set_vdev_param_wrapper, 10503 .txrx_set_psoc_param = dp_set_psoc_param, 10504 .txrx_get_psoc_param = dp_get_psoc_param, 10505 #ifndef WLAN_SOFTUMAC_SUPPORT 10506 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest, 10507 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest, 10508 #endif 10509 .txrx_get_sec_type = dp_get_sec_type, 10510 .txrx_wdi_event_sub = dp_wdi_event_sub, 10511 .txrx_wdi_event_unsub = dp_wdi_event_unsub, 10512 .txrx_set_pdev_param = dp_set_pdev_param, 10513 .txrx_get_pdev_param = dp_get_pdev_param, 10514 .txrx_set_peer_param = dp_set_peer_param, 10515 .txrx_get_peer_param = dp_get_peer_param, 10516 #ifdef VDEV_PEER_PROTOCOL_COUNT 10517 .txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt, 10518 #endif 10519 #ifdef WLAN_SUPPORT_MSCS 10520 .txrx_record_mscs_params = dp_record_mscs_params, 10521 #endif 10522 .set_key = dp_set_michael_key, 10523 .txrx_get_vdev_param = dp_get_vdev_param, 10524 .calculate_delay_stats = dp_calculate_delay_stats, 10525 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 10526 .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag, 10527 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS 10528 .txrx_dump_pdev_rx_protocol_tag_stats = 10529 dp_dump_pdev_rx_protocol_tag_stats, 10530 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 10531 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 10532 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 10533 .txrx_set_rx_flow_tag = dp_set_rx_flow_tag, 10534 .txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats, 10535 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 10536 #ifdef QCA_MULTIPASS_SUPPORT 10537 .txrx_peer_set_vlan_id = dp_peer_set_vlan_id, 10538 #endif /*QCA_MULTIPASS_SUPPORT*/ 10539 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY) 10540 .txrx_set_delta_tsf = dp_set_delta_tsf, 10541 #endif 10542 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY 10543 .txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report, 10544 .txrx_get_uplink_delay = dp_get_uplink_delay, 10545 #endif 10546 #ifdef QCA_UNDECODED_METADATA_SUPPORT 10547 .txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask, 10548 .txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask, 10549 #endif 10550 .txrx_peer_flush_frags = dp_peer_flush_frags, 10551 }; 10552 10553 static struct cdp_me_ops dp_ops_me = { 10554 #ifndef QCA_HOST_MODE_WIFI_DISABLED 10555 #ifdef ATH_SUPPORT_IQUE 10556 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor, 10557 .tx_me_free_descriptor = dp_tx_me_free_descriptor, 10558 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast, 10559 #endif 10560 #endif 10561 }; 10562 10563 static struct cdp_host_stats_ops dp_ops_host_stats = { 10564 .txrx_per_peer_stats = dp_get_host_peer_stats, 10565 .get_fw_peer_stats = dp_get_fw_peer_stats, 10566 .get_htt_stats = dp_get_htt_stats, 10567 .txrx_stats_publish = dp_txrx_stats_publish, 10568 .txrx_get_vdev_stats = dp_txrx_get_vdev_stats, 10569 .txrx_get_peer_stats = dp_txrx_get_peer_stats, 10570 .txrx_get_soc_stats = dp_txrx_get_soc_stats, 10571 .txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param, 10572 .txrx_reset_peer_stats = dp_txrx_reset_peer_stats, 10573 .txrx_get_pdev_stats = dp_txrx_get_pdev_stats, 10574 #if defined(IPA_OFFLOAD) && defined(QCA_ENHANCED_STATS_SUPPORT) 10575 .txrx_get_peer_stats = dp_ipa_txrx_get_peer_stats, 10576 .txrx_get_vdev_stats = dp_ipa_txrx_get_vdev_stats, 10577 .txrx_get_pdev_stats = dp_ipa_txrx_get_pdev_stats, 10578 #endif 10579 .txrx_get_ratekbps = dp_txrx_get_ratekbps, 10580 .txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats, 10581 .txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats, 10582 .txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats, 10583 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 10584 .txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id, 10585 .txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id, 10586 #endif 10587 #ifdef WLAN_TX_PKT_CAPTURE_ENH 10588 .get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats, 10589 .get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats, 10590 #endif /* WLAN_TX_PKT_CAPTURE_ENH */ 10591 #ifdef HW_TX_DELAY_STATS_ENABLE 10592 .enable_disable_vdev_tx_delay_stats = 10593 dp_enable_disable_vdev_tx_delay_stats, 10594 .is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled, 10595 #endif 10596 .txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats, 10597 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 10598 .txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats, 10599 .txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats, 10600 .txrx_pdev_deter_stats = dp_get_pdev_deter_stats, 10601 .txrx_peer_deter_stats = dp_get_peer_deter_stats, 10602 .txrx_update_pdev_chan_util_stats = dp_update_pdev_chan_util_stats, 10603 #endif 10604 .txrx_get_peer_extd_rate_link_stats = 10605 dp_get_peer_extd_rate_link_stats, 10606 .get_pdev_obss_stats = dp_get_obss_stats, 10607 .clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats, 10608 /* TODO */ 10609 }; 10610 10611 static struct cdp_raw_ops dp_ops_raw = { 10612 /* TODO */ 10613 }; 10614 10615 #ifdef PEER_FLOW_CONTROL 10616 static struct cdp_pflow_ops dp_ops_pflow = { 10617 dp_tx_flow_ctrl_configure_pdev, 10618 }; 10619 #endif 10620 10621 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 10622 static struct cdp_cfr_ops dp_ops_cfr = { 10623 .txrx_get_cfr_rcc = dp_get_cfr_rcc, 10624 .txrx_set_cfr_rcc = dp_set_cfr_rcc, 10625 .txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats, 10626 .txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats, 10627 }; 10628 #endif 10629 10630 #ifdef WLAN_SUPPORT_MSCS 10631 static struct cdp_mscs_ops dp_ops_mscs = { 10632 .mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority, 10633 }; 10634 #endif 10635 10636 #ifdef WLAN_SUPPORT_MESH_LATENCY 10637 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = { 10638 .mesh_latency_update_peer_parameter = 10639 dp_mesh_latency_update_peer_parameter, 10640 }; 10641 #endif 10642 10643 #ifdef WLAN_SUPPORT_SCS 10644 static struct cdp_scs_ops dp_ops_scs = { 10645 .scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match, 10646 }; 10647 #endif 10648 10649 #ifdef CONFIG_SAWF_DEF_QUEUES 10650 static struct cdp_sawf_ops dp_ops_sawf = { 10651 .sawf_def_queues_map_req = dp_sawf_def_queues_map_req, 10652 .sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req, 10653 .sawf_def_queues_get_map_report = 10654 dp_sawf_def_queues_get_map_report, 10655 #ifdef CONFIG_SAWF_STATS 10656 .sawf_get_peer_msduq_info = dp_sawf_get_peer_msduq_info, 10657 .txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats, 10658 .txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats, 10659 .sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req, 10660 .sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req, 10661 .txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params, 10662 .txrx_sawf_set_sla_params = dp_sawf_set_sla_params, 10663 .txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params, 10664 .telemetry_get_throughput_stats = dp_sawf_get_tx_stats, 10665 .telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats, 10666 .telemetry_get_drop_stats = dp_sawf_get_drop_stats, 10667 .peer_config_ul = dp_sawf_peer_config_ul, 10668 .swaf_peer_is_sla_configured = dp_swaf_peer_is_sla_configured, 10669 #endif 10670 }; 10671 #endif 10672 10673 #ifdef DP_TX_TRACKING 10674 10675 #define DP_TX_COMP_MAX_LATENCY_MS 60000 10676 /** 10677 * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt 10678 * @tx_desc: tx descriptor 10679 * 10680 * Calculate time latency for tx completion per pkt and trigger self recovery 10681 * when the delay is more than threshold value. 10682 * 10683 * Return: True if delay is more than threshold 10684 */ 10685 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc) 10686 { 10687 uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick; 10688 qdf_ktime_t current_time = qdf_ktime_real_get(); 10689 qdf_ktime_t timestamp = tx_desc->timestamp; 10690 10691 if (dp_tx_pkt_tracepoints_enabled()) { 10692 if (!timestamp) 10693 return false; 10694 10695 time_latency = qdf_ktime_to_ms(current_time) - 10696 qdf_ktime_to_ms(timestamp); 10697 if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) { 10698 dp_err_rl("enqueued: %llu ms, current : %llu ms", 10699 timestamp, current_time); 10700 return true; 10701 } 10702 } else { 10703 if (!timestamp_tick) 10704 return false; 10705 10706 current_time = qdf_system_ticks(); 10707 time_latency = qdf_system_ticks_to_msecs(current_time - 10708 timestamp_tick); 10709 if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) { 10710 dp_err_rl("enqueued: %u ms, current : %u ms", 10711 qdf_system_ticks_to_msecs(timestamp_tick), 10712 qdf_system_ticks_to_msecs(current_time)); 10713 return true; 10714 } 10715 } 10716 10717 return false; 10718 } 10719 10720 void dp_find_missing_tx_comp(struct dp_soc *soc) 10721 { 10722 uint8_t i; 10723 uint32_t j; 10724 uint32_t num_desc, page_id, offset; 10725 uint16_t num_desc_per_page; 10726 struct dp_tx_desc_s *tx_desc = NULL; 10727 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 10728 10729 for (i = 0; i < MAX_TXDESC_POOLS; i++) { 10730 tx_desc_pool = &soc->tx_desc[i]; 10731 if (!(tx_desc_pool->pool_size) || 10732 IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || 10733 !(tx_desc_pool->desc_pages.cacheable_pages)) 10734 continue; 10735 10736 num_desc = tx_desc_pool->pool_size; 10737 num_desc_per_page = 10738 tx_desc_pool->desc_pages.num_element_per_page; 10739 for (j = 0; j < num_desc; j++) { 10740 page_id = j / num_desc_per_page; 10741 offset = j % num_desc_per_page; 10742 10743 if (qdf_unlikely(!(tx_desc_pool-> 10744 desc_pages.cacheable_pages))) 10745 break; 10746 10747 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 10748 if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) { 10749 continue; 10750 } else if (tx_desc->magic == 10751 DP_TX_MAGIC_PATTERN_INUSE) { 10752 if (dp_tx_comp_delay_check(tx_desc)) { 10753 dp_err_rl("Tx completion not rcvd for id: %u", 10754 tx_desc->id); 10755 if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) { 10756 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH; 10757 dp_err_rl("Freed tx_desc %u", 10758 tx_desc->id); 10759 dp_tx_comp_free_buf(soc, 10760 tx_desc, 10761 false); 10762 dp_tx_desc_release(tx_desc, i); 10763 DP_STATS_INC(soc, 10764 tx.tx_comp_force_freed, 1); 10765 } 10766 } 10767 } else { 10768 dp_err_rl("tx desc %u corrupted, flags: 0x%x", 10769 tx_desc->id, tx_desc->flags); 10770 } 10771 } 10772 } 10773 } 10774 #else 10775 inline void dp_find_missing_tx_comp(struct dp_soc *soc) 10776 { 10777 } 10778 #endif 10779 10780 #ifdef FEATURE_RUNTIME_PM 10781 /** 10782 * dp_runtime_suspend() - ensure DP is ready to runtime suspend 10783 * @soc_hdl: Datapath soc handle 10784 * @pdev_id: id of data path pdev handle 10785 * 10786 * DP is ready to runtime suspend if there are no pending TX packets. 10787 * 10788 * Return: QDF_STATUS 10789 */ 10790 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 10791 { 10792 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10793 struct dp_pdev *pdev; 10794 int32_t tx_pending; 10795 10796 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 10797 if (!pdev) { 10798 dp_err("pdev is NULL"); 10799 return QDF_STATUS_E_INVAL; 10800 } 10801 10802 /* Abort if there are any pending TX packets */ 10803 tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)); 10804 if (tx_pending) { 10805 dp_info_rl("%pK: Abort suspend due to pending TX packets %d", 10806 soc, tx_pending); 10807 dp_find_missing_tx_comp(soc); 10808 /* perform a force flush if tx is pending */ 10809 soc->arch_ops.dp_update_ring_hptp(soc, true); 10810 qdf_atomic_set(&soc->tx_pending_rtpm, 0); 10811 10812 return QDF_STATUS_E_AGAIN; 10813 } 10814 10815 if (dp_runtime_get_refcount(soc)) { 10816 dp_init_info("refcount: %d", dp_runtime_get_refcount(soc)); 10817 10818 return QDF_STATUS_E_AGAIN; 10819 } 10820 10821 if (soc->intr_mode == DP_INTR_POLL) 10822 qdf_timer_stop(&soc->int_timer); 10823 10824 dp_rx_fst_update_pm_suspend_status(soc, true); 10825 10826 return QDF_STATUS_SUCCESS; 10827 } 10828 10829 #define DP_FLUSH_WAIT_CNT 10 10830 #define DP_RUNTIME_SUSPEND_WAIT_MS 10 10831 /** 10832 * dp_runtime_resume() - ensure DP is ready to runtime resume 10833 * @soc_hdl: Datapath soc handle 10834 * @pdev_id: id of data path pdev handle 10835 * 10836 * Resume DP for runtime PM. 10837 * 10838 * Return: QDF_STATUS 10839 */ 10840 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 10841 { 10842 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10843 int suspend_wait = 0; 10844 10845 if (soc->intr_mode == DP_INTR_POLL) 10846 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 10847 10848 /* 10849 * Wait until dp runtime refcount becomes zero or time out, then flush 10850 * pending tx for runtime suspend. 10851 */ 10852 while (dp_runtime_get_refcount(soc) && 10853 suspend_wait < DP_FLUSH_WAIT_CNT) { 10854 qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS); 10855 suspend_wait++; 10856 } 10857 10858 soc->arch_ops.dp_update_ring_hptp(soc, false); 10859 qdf_atomic_set(&soc->tx_pending_rtpm, 0); 10860 10861 dp_rx_fst_update_pm_suspend_status(soc, false); 10862 10863 return QDF_STATUS_SUCCESS; 10864 } 10865 #endif /* FEATURE_RUNTIME_PM */ 10866 10867 /** 10868 * dp_tx_get_success_ack_stats() - get tx success completion count 10869 * @soc_hdl: Datapath soc handle 10870 * @vdev_id: vdev identifier 10871 * 10872 * Return: tx success ack count 10873 */ 10874 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl, 10875 uint8_t vdev_id) 10876 { 10877 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10878 struct cdp_vdev_stats *vdev_stats = NULL; 10879 uint32_t tx_success; 10880 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10881 DP_MOD_ID_CDP); 10882 10883 if (!vdev) { 10884 dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id); 10885 return 0; 10886 } 10887 10888 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 10889 if (!vdev_stats) { 10890 dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc); 10891 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10892 return 0; 10893 } 10894 10895 dp_aggregate_vdev_stats(vdev, vdev_stats); 10896 10897 tx_success = vdev_stats->tx.tx_success.num; 10898 qdf_mem_free(vdev_stats); 10899 10900 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10901 return tx_success; 10902 } 10903 10904 #ifdef WLAN_SUPPORT_DATA_STALL 10905 /** 10906 * dp_register_data_stall_detect_cb() - register data stall callback 10907 * @soc_hdl: Datapath soc handle 10908 * @pdev_id: id of data path pdev handle 10909 * @data_stall_detect_callback: data stall callback function 10910 * 10911 * Return: QDF_STATUS Enumeration 10912 */ 10913 static 10914 QDF_STATUS dp_register_data_stall_detect_cb( 10915 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10916 data_stall_detect_cb data_stall_detect_callback) 10917 { 10918 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10919 struct dp_pdev *pdev; 10920 10921 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 10922 if (!pdev) { 10923 dp_err("pdev NULL!"); 10924 return QDF_STATUS_E_INVAL; 10925 } 10926 10927 pdev->data_stall_detect_callback = data_stall_detect_callback; 10928 return QDF_STATUS_SUCCESS; 10929 } 10930 10931 /** 10932 * dp_deregister_data_stall_detect_cb() - de-register data stall callback 10933 * @soc_hdl: Datapath soc handle 10934 * @pdev_id: id of data path pdev handle 10935 * @data_stall_detect_callback: data stall callback function 10936 * 10937 * Return: QDF_STATUS Enumeration 10938 */ 10939 static 10940 QDF_STATUS dp_deregister_data_stall_detect_cb( 10941 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10942 data_stall_detect_cb data_stall_detect_callback) 10943 { 10944 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10945 struct dp_pdev *pdev; 10946 10947 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 10948 if (!pdev) { 10949 dp_err("pdev NULL!"); 10950 return QDF_STATUS_E_INVAL; 10951 } 10952 10953 pdev->data_stall_detect_callback = NULL; 10954 return QDF_STATUS_SUCCESS; 10955 } 10956 10957 /** 10958 * dp_txrx_post_data_stall_event() - post data stall event 10959 * @soc_hdl: Datapath soc handle 10960 * @indicator: Module triggering data stall 10961 * @data_stall_type: data stall event type 10962 * @pdev_id: pdev id 10963 * @vdev_id_bitmap: vdev id bitmap 10964 * @recovery_type: data stall recovery type 10965 * 10966 * Return: None 10967 */ 10968 static void 10969 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl, 10970 enum data_stall_log_event_indicator indicator, 10971 enum data_stall_log_event_type data_stall_type, 10972 uint32_t pdev_id, uint32_t vdev_id_bitmap, 10973 enum data_stall_log_recovery_type recovery_type) 10974 { 10975 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10976 struct data_stall_event_info data_stall_info; 10977 struct dp_pdev *pdev; 10978 10979 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 10980 if (!pdev) { 10981 dp_err("pdev NULL!"); 10982 return; 10983 } 10984 10985 if (!pdev->data_stall_detect_callback) { 10986 dp_err("data stall cb not registered!"); 10987 return; 10988 } 10989 10990 dp_info("data_stall_type: %x pdev_id: %d", 10991 data_stall_type, pdev_id); 10992 10993 data_stall_info.indicator = indicator; 10994 data_stall_info.data_stall_type = data_stall_type; 10995 data_stall_info.vdev_id_bitmap = vdev_id_bitmap; 10996 data_stall_info.pdev_id = pdev_id; 10997 data_stall_info.recovery_type = recovery_type; 10998 10999 pdev->data_stall_detect_callback(&data_stall_info); 11000 } 11001 #endif /* WLAN_SUPPORT_DATA_STALL */ 11002 11003 #ifdef WLAN_FEATURE_STATS_EXT 11004 /** 11005 * dp_txrx_ext_stats_request() - request dp txrx extended stats request 11006 * @soc_hdl: soc handle 11007 * @pdev_id: pdev id 11008 * @req: stats request 11009 * 11010 * Return: QDF_STATUS 11011 */ 11012 static QDF_STATUS 11013 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 11014 struct cdp_txrx_ext_stats *req) 11015 { 11016 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11017 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11018 int i = 0; 11019 int tcl_ring_full = 0; 11020 11021 if (!pdev) { 11022 dp_err("pdev is null"); 11023 return QDF_STATUS_E_INVAL; 11024 } 11025 11026 dp_aggregate_pdev_stats(pdev); 11027 11028 for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++) 11029 tcl_ring_full += soc->stats.tx.tcl_ring_full[i]; 11030 11031 req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num; 11032 req->tx_msdu_overflow = tcl_ring_full; 11033 /* Error rate at LMAC */ 11034 req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received + 11035 pdev->stats.err.fw_reported_rxdma_error; 11036 /* only count error source from RXDMA */ 11037 req->rx_mpdu_error = pdev->stats.err.fw_reported_rxdma_error; 11038 11039 /* Error rate at above the MAC */ 11040 req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received; 11041 req->rx_mpdu_missed = pdev->stats.err.reo_error; 11042 11043 dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, " 11044 "rx_mpdu_receive = %u, rx_mpdu_delivered = %u, " 11045 "rx_mpdu_missed = %u, rx_mpdu_error = %u", 11046 req->tx_msdu_enqueue, 11047 req->tx_msdu_overflow, 11048 req->rx_mpdu_received, 11049 req->rx_mpdu_delivered, 11050 req->rx_mpdu_missed, 11051 req->rx_mpdu_error); 11052 11053 return QDF_STATUS_SUCCESS; 11054 } 11055 11056 #endif /* WLAN_FEATURE_STATS_EXT */ 11057 11058 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 11059 /** 11060 * dp_mark_first_wakeup_packet() - set flag to indicate that 11061 * fw is compatible for marking first packet after wow wakeup 11062 * @soc_hdl: Datapath soc handle 11063 * @pdev_id: id of data path pdev handle 11064 * @value: 1 for enabled/ 0 for disabled 11065 * 11066 * Return: None 11067 */ 11068 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl, 11069 uint8_t pdev_id, uint8_t value) 11070 { 11071 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11072 struct dp_pdev *pdev; 11073 11074 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11075 if (!pdev) { 11076 dp_err("pdev is NULL"); 11077 return; 11078 } 11079 11080 pdev->is_first_wakeup_packet = value; 11081 } 11082 #endif 11083 11084 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF 11085 /** 11086 * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration 11087 * @soc_hdl: Opaque handle to the DP soc object 11088 * @vdev_id: VDEV identifier 11089 * @mac: MAC address of the peer 11090 * @ac: access category mask 11091 * @tid: TID mask 11092 * @policy: Flush policy 11093 * 11094 * Return: 0 on success, errno on failure 11095 */ 11096 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl, 11097 uint8_t vdev_id, uint8_t *mac, 11098 uint8_t ac, uint32_t tid, 11099 enum cdp_peer_txq_flush_policy policy) 11100 { 11101 struct dp_soc *soc; 11102 11103 if (!soc_hdl) { 11104 dp_err("soc is null"); 11105 return -EINVAL; 11106 } 11107 soc = cdp_soc_t_to_dp_soc(soc_hdl); 11108 return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id, 11109 mac, ac, tid, policy); 11110 } 11111 #endif 11112 11113 #ifdef CONNECTIVITY_PKTLOG 11114 /** 11115 * dp_register_packetdump_callback() - registers 11116 * tx data packet, tx mgmt. packet and rx data packet 11117 * dump callback handler. 11118 * 11119 * @soc_hdl: Datapath soc handle 11120 * @pdev_id: id of data path pdev handle 11121 * @dp_tx_packetdump_cb: tx packetdump cb 11122 * @dp_rx_packetdump_cb: rx packetdump cb 11123 * 11124 * This function is used to register tx data pkt, tx mgmt. 11125 * pkt and rx data pkt dump callback 11126 * 11127 * Return: None 11128 * 11129 */ 11130 static inline 11131 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 11132 ol_txrx_pktdump_cb dp_tx_packetdump_cb, 11133 ol_txrx_pktdump_cb dp_rx_packetdump_cb) 11134 { 11135 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11136 struct dp_pdev *pdev; 11137 11138 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11139 if (!pdev) { 11140 dp_err("pdev is NULL!"); 11141 return; 11142 } 11143 11144 pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb; 11145 pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb; 11146 } 11147 11148 /** 11149 * dp_deregister_packetdump_callback() - deregidters 11150 * tx data packet, tx mgmt. packet and rx data packet 11151 * dump callback handler 11152 * @soc_hdl: Datapath soc handle 11153 * @pdev_id: id of data path pdev handle 11154 * 11155 * This function is used to deregidter tx data pkt., 11156 * tx mgmt. pkt and rx data pkt. dump callback 11157 * 11158 * Return: None 11159 * 11160 */ 11161 static inline 11162 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl, 11163 uint8_t pdev_id) 11164 { 11165 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11166 struct dp_pdev *pdev; 11167 11168 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11169 if (!pdev) { 11170 dp_err("pdev is NULL!"); 11171 return; 11172 } 11173 11174 pdev->dp_tx_packetdump_cb = NULL; 11175 pdev->dp_rx_packetdump_cb = NULL; 11176 } 11177 #endif 11178 11179 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 11180 /** 11181 * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp 11182 * @soc_hdl: Datapath soc handle 11183 * @high: whether the bus bw is high or not 11184 * 11185 * Return: void 11186 */ 11187 static void 11188 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high) 11189 { 11190 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11191 11192 soc->high_throughput = high; 11193 } 11194 11195 /** 11196 * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp 11197 * @soc_hdl: Datapath soc handle 11198 * 11199 * Return: bool 11200 */ 11201 static bool 11202 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl) 11203 { 11204 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11205 11206 return soc->high_throughput; 11207 } 11208 #endif 11209 11210 #ifdef DP_PEER_EXTENDED_API 11211 static struct cdp_misc_ops dp_ops_misc = { 11212 #ifdef FEATURE_WLAN_TDLS 11213 .tx_non_std = dp_tx_non_std, 11214 #endif /* FEATURE_WLAN_TDLS */ 11215 .get_opmode = dp_get_opmode, 11216 #ifdef FEATURE_RUNTIME_PM 11217 .runtime_suspend = dp_runtime_suspend, 11218 .runtime_resume = dp_runtime_resume, 11219 #endif /* FEATURE_RUNTIME_PM */ 11220 .get_num_rx_contexts = dp_get_num_rx_contexts, 11221 .get_tx_ack_stats = dp_tx_get_success_ack_stats, 11222 #ifdef WLAN_SUPPORT_DATA_STALL 11223 .txrx_data_stall_cb_register = dp_register_data_stall_detect_cb, 11224 .txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb, 11225 .txrx_post_data_stall_event = dp_txrx_post_data_stall_event, 11226 #endif 11227 11228 #ifdef WLAN_FEATURE_STATS_EXT 11229 .txrx_ext_stats_request = dp_txrx_ext_stats_request, 11230 #ifndef WLAN_SOFTUMAC_SUPPORT 11231 .request_rx_hw_stats = dp_request_rx_hw_stats, 11232 .reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats, 11233 #endif 11234 #endif /* WLAN_FEATURE_STATS_EXT */ 11235 .vdev_inform_ll_conn = dp_vdev_inform_ll_conn, 11236 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 11237 .set_swlm_enable = dp_soc_set_swlm_enable, 11238 .is_swlm_enabled = dp_soc_is_swlm_enabled, 11239 #endif 11240 .display_txrx_hw_info = dp_display_srng_info, 11241 #ifndef WLAN_SOFTUMAC_SUPPORT 11242 .get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap, 11243 #endif 11244 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 11245 .mark_first_wakeup_packet = dp_mark_first_wakeup_packet, 11246 #endif 11247 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF 11248 .set_peer_txq_flush_config = dp_set_peer_txq_flush_config, 11249 #endif 11250 #ifdef CONNECTIVITY_PKTLOG 11251 .register_pktdump_cb = dp_register_packetdump_callback, 11252 .unregister_pktdump_cb = dp_deregister_packetdump_callback, 11253 #endif 11254 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 11255 .set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high, 11256 .get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high, 11257 #endif 11258 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP 11259 .evaluate_update_tx_ilp_cfg = dp_evaluate_update_tx_ilp_config, 11260 #endif 11261 }; 11262 #endif 11263 11264 #ifdef DP_FLOW_CTL 11265 static struct cdp_flowctl_ops dp_ops_flowctl = { 11266 /* WIFI 3.0 DP implement as required. */ 11267 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 11268 #ifndef WLAN_SOFTUMAC_SUPPORT 11269 .flow_pool_map_handler = dp_tx_flow_pool_map, 11270 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap, 11271 #endif /*WLAN_SOFTUMAC_SUPPORT */ 11272 .register_pause_cb = dp_txrx_register_pause_cb, 11273 .dump_flow_pool_info = dp_tx_dump_flow_pool_info, 11274 .tx_desc_thresh_reached = dp_tx_desc_thresh_reached, 11275 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 11276 }; 11277 11278 static struct cdp_lflowctl_ops dp_ops_l_flowctl = { 11279 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11280 }; 11281 #endif 11282 11283 #ifdef IPA_OFFLOAD 11284 static struct cdp_ipa_ops dp_ops_ipa = { 11285 .ipa_get_resource = dp_ipa_get_resource, 11286 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr, 11287 .ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr, 11288 .ipa_op_response = dp_ipa_op_response, 11289 .ipa_register_op_cb = dp_ipa_register_op_cb, 11290 .ipa_deregister_op_cb = dp_ipa_deregister_op_cb, 11291 .ipa_get_stat = dp_ipa_get_stat, 11292 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame, 11293 .ipa_enable_autonomy = dp_ipa_enable_autonomy, 11294 .ipa_disable_autonomy = dp_ipa_disable_autonomy, 11295 .ipa_setup = dp_ipa_setup, 11296 .ipa_cleanup = dp_ipa_cleanup, 11297 .ipa_setup_iface = dp_ipa_setup_iface, 11298 .ipa_cleanup_iface = dp_ipa_cleanup_iface, 11299 .ipa_enable_pipes = dp_ipa_enable_pipes, 11300 .ipa_disable_pipes = dp_ipa_disable_pipes, 11301 .ipa_set_perf_level = dp_ipa_set_perf_level, 11302 .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd, 11303 .ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping, 11304 .ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping, 11305 #ifdef QCA_ENHANCED_STATS_SUPPORT 11306 .ipa_update_peer_rx_stats = dp_ipa_update_peer_rx_stats, 11307 #endif 11308 #ifdef IPA_OPT_WIFI_DP 11309 .ipa_rx_super_rule_setup = dp_ipa_rx_super_rule_setup, 11310 .ipa_pcie_link_up = dp_ipa_pcie_link_up, 11311 .ipa_pcie_link_down = dp_ipa_pcie_link_down, 11312 #endif 11313 #ifdef IPA_WDS_EASYMESH_FEATURE 11314 .ipa_ast_create = dp_ipa_ast_create, 11315 #endif 11316 .ipa_get_wdi_version = dp_ipa_get_wdi_version, 11317 }; 11318 #endif 11319 11320 #ifdef DP_POWER_SAVE 11321 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11322 { 11323 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11324 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11325 int timeout = SUSPEND_DRAIN_WAIT; 11326 int drain_wait_delay = 50; /* 50 ms */ 11327 int32_t tx_pending; 11328 11329 if (qdf_unlikely(!pdev)) { 11330 dp_err("pdev is NULL"); 11331 return QDF_STATUS_E_INVAL; 11332 } 11333 11334 /* Abort if there are any pending TX packets */ 11335 while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) { 11336 qdf_sleep(drain_wait_delay); 11337 if (timeout <= 0) { 11338 dp_info("TX frames are pending %d, abort suspend", 11339 tx_pending); 11340 dp_find_missing_tx_comp(soc); 11341 return QDF_STATUS_E_TIMEOUT; 11342 } 11343 timeout = timeout - drain_wait_delay; 11344 } 11345 11346 if (soc->intr_mode == DP_INTR_POLL) 11347 qdf_timer_stop(&soc->int_timer); 11348 11349 /* Stop monitor reap timer and reap any pending frames in ring */ 11350 dp_monitor_reap_timer_suspend(soc); 11351 11352 dp_suspend_fse_cache_flush(soc); 11353 dp_rx_fst_update_pm_suspend_status(soc, true); 11354 11355 return QDF_STATUS_SUCCESS; 11356 } 11357 11358 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11359 { 11360 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11361 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11362 11363 if (qdf_unlikely(!pdev)) { 11364 dp_err("pdev is NULL"); 11365 return QDF_STATUS_E_INVAL; 11366 } 11367 11368 if (soc->intr_mode == DP_INTR_POLL) 11369 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 11370 11371 /* Start monitor reap timer */ 11372 dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY); 11373 11374 dp_resume_fse_cache_flush(soc); 11375 11376 soc->arch_ops.dp_update_ring_hptp(soc, false); 11377 11378 dp_rx_fst_update_pm_suspend_status(soc, false); 11379 11380 dp_rx_fst_requeue_wq(soc); 11381 11382 return QDF_STATUS_SUCCESS; 11383 } 11384 11385 /** 11386 * dp_process_wow_ack_rsp() - process wow ack response 11387 * @soc_hdl: datapath soc handle 11388 * @pdev_id: data path pdev handle id 11389 * 11390 * Return: none 11391 */ 11392 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11393 { 11394 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11395 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11396 11397 if (qdf_unlikely(!pdev)) { 11398 dp_err("pdev is NULL"); 11399 return; 11400 } 11401 11402 /* 11403 * As part of wow enable FW disables the mon status ring and in wow ack 11404 * response from FW reap mon status ring to make sure no packets pending 11405 * in the ring. 11406 */ 11407 dp_monitor_reap_timer_suspend(soc); 11408 } 11409 11410 /** 11411 * dp_process_target_suspend_req() - process target suspend request 11412 * @soc_hdl: datapath soc handle 11413 * @pdev_id: data path pdev handle id 11414 * 11415 * Return: none 11416 */ 11417 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl, 11418 uint8_t pdev_id) 11419 { 11420 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11421 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11422 11423 if (qdf_unlikely(!pdev)) { 11424 dp_err("pdev is NULL"); 11425 return; 11426 } 11427 11428 /* Stop monitor reap timer and reap any pending frames in ring */ 11429 dp_monitor_reap_timer_suspend(soc); 11430 } 11431 11432 static struct cdp_bus_ops dp_ops_bus = { 11433 .bus_suspend = dp_bus_suspend, 11434 .bus_resume = dp_bus_resume, 11435 .process_wow_ack_rsp = dp_process_wow_ack_rsp, 11436 .process_target_suspend_req = dp_process_target_suspend_req 11437 }; 11438 #endif 11439 11440 #ifdef DP_FLOW_CTL 11441 static struct cdp_throttle_ops dp_ops_throttle = { 11442 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11443 }; 11444 11445 static struct cdp_cfg_ops dp_ops_cfg = { 11446 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11447 }; 11448 #endif 11449 11450 #ifdef DP_PEER_EXTENDED_API 11451 static struct cdp_ocb_ops dp_ops_ocb = { 11452 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11453 }; 11454 11455 static struct cdp_mob_stats_ops dp_ops_mob_stats = { 11456 .clear_stats = dp_txrx_clear_dump_stats, 11457 }; 11458 11459 static struct cdp_peer_ops dp_ops_peer = { 11460 .register_peer = dp_register_peer, 11461 .clear_peer = dp_clear_peer, 11462 .find_peer_exist = dp_find_peer_exist, 11463 .find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev, 11464 .find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev, 11465 .peer_state_update = dp_peer_state_update, 11466 .get_vdevid = dp_get_vdevid, 11467 .get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr, 11468 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr, 11469 .get_peer_state = dp_get_peer_state, 11470 .peer_flush_frags = dp_peer_flush_frags, 11471 .set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer, 11472 }; 11473 #endif 11474 11475 static void dp_soc_txrx_ops_attach(struct dp_soc *soc) 11476 { 11477 soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn; 11478 soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl; 11479 soc->cdp_soc.ops->me_ops = &dp_ops_me; 11480 soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats; 11481 soc->cdp_soc.ops->wds_ops = &dp_ops_wds; 11482 soc->cdp_soc.ops->raw_ops = &dp_ops_raw; 11483 #ifdef PEER_FLOW_CONTROL 11484 soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow; 11485 #endif /* PEER_FLOW_CONTROL */ 11486 #ifdef DP_PEER_EXTENDED_API 11487 soc->cdp_soc.ops->misc_ops = &dp_ops_misc; 11488 soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb; 11489 soc->cdp_soc.ops->peer_ops = &dp_ops_peer; 11490 soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats; 11491 #endif 11492 #ifdef DP_FLOW_CTL 11493 soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg; 11494 soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl; 11495 soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl; 11496 soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle; 11497 #endif 11498 #ifdef IPA_OFFLOAD 11499 soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa; 11500 #endif 11501 #ifdef DP_POWER_SAVE 11502 soc->cdp_soc.ops->bus_ops = &dp_ops_bus; 11503 #endif 11504 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 11505 soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr; 11506 #endif 11507 #ifdef WLAN_SUPPORT_MSCS 11508 soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs; 11509 #endif 11510 #ifdef WLAN_SUPPORT_MESH_LATENCY 11511 soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency; 11512 #endif 11513 #ifdef CONFIG_SAWF_DEF_QUEUES 11514 soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf; 11515 #endif 11516 #ifdef WLAN_SUPPORT_SCS 11517 soc->cdp_soc.ops->scs_ops = &dp_ops_scs; 11518 #endif 11519 }; 11520 11521 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \ 11522 defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \ 11523 defined(QCA_WIFI_QCA5332) 11524 /** 11525 * dp_soc_attach_wifi3() - Attach txrx SOC 11526 * @ctrl_psoc: Opaque SOC handle from control plane 11527 * @params: SOC attach params 11528 * 11529 * Return: DP SOC handle on success, NULL on failure 11530 */ 11531 struct cdp_soc_t * 11532 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 11533 struct cdp_soc_attach_params *params) 11534 { 11535 struct dp_soc *dp_soc = NULL; 11536 11537 dp_soc = dp_soc_attach(ctrl_psoc, params); 11538 11539 return dp_soc_to_cdp_soc_t(dp_soc); 11540 } 11541 11542 static inline void dp_soc_set_def_pdev(struct dp_soc *soc) 11543 { 11544 int lmac_id; 11545 11546 for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) { 11547 /*Set default host PDEV ID for lmac_id*/ 11548 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 11549 INVALID_PDEV_ID, lmac_id); 11550 } 11551 } 11552 11553 static uint32_t 11554 dp_get_link_desc_id_start(uint16_t arch_id) 11555 { 11556 switch (arch_id) { 11557 case CDP_ARCH_TYPE_LI: 11558 case CDP_ARCH_TYPE_RH: 11559 return LINK_DESC_ID_START_21_BITS_COOKIE; 11560 case CDP_ARCH_TYPE_BE: 11561 return LINK_DESC_ID_START_20_BITS_COOKIE; 11562 default: 11563 dp_err("unknown arch_id 0x%x", arch_id); 11564 QDF_BUG(0); 11565 return LINK_DESC_ID_START_21_BITS_COOKIE; 11566 } 11567 } 11568 11569 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP 11570 static inline 11571 void dp_soc_init_tx_ilp(struct dp_soc *soc) 11572 { 11573 soc->tx_ilp_enable = false; 11574 } 11575 #else 11576 static inline 11577 void dp_soc_init_tx_ilp(struct dp_soc *soc) 11578 { 11579 } 11580 #endif 11581 11582 /** 11583 * dp_soc_attach() - Attach txrx SOC 11584 * @ctrl_psoc: Opaque SOC handle from control plane 11585 * @params: SOC attach params 11586 * 11587 * Return: DP SOC handle on success, NULL on failure 11588 */ 11589 static struct dp_soc * 11590 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 11591 struct cdp_soc_attach_params *params) 11592 { 11593 struct dp_soc *soc = NULL; 11594 uint16_t arch_id; 11595 struct hif_opaque_softc *hif_handle = params->hif_handle; 11596 qdf_device_t qdf_osdev = params->qdf_osdev; 11597 struct ol_if_ops *ol_ops = params->ol_ops; 11598 uint16_t device_id = params->device_id; 11599 11600 if (!hif_handle) { 11601 dp_err("HIF handle is NULL"); 11602 goto fail0; 11603 } 11604 arch_id = cdp_get_arch_type_from_devid(device_id); 11605 soc = qdf_mem_common_alloc(dp_get_soc_context_size(device_id)); 11606 if (!soc) { 11607 dp_err("DP SOC memory allocation failed"); 11608 goto fail0; 11609 } 11610 11611 dp_info("soc memory allocated %pK", soc); 11612 soc->hif_handle = hif_handle; 11613 soc->hal_soc = hif_get_hal_handle(soc->hif_handle); 11614 if (!soc->hal_soc) 11615 goto fail1; 11616 11617 hif_get_cmem_info(soc->hif_handle, 11618 &soc->cmem_base, 11619 &soc->cmem_total_size); 11620 soc->cmem_avail_size = soc->cmem_total_size; 11621 soc->device_id = device_id; 11622 soc->cdp_soc.ops = 11623 (struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops)); 11624 if (!soc->cdp_soc.ops) 11625 goto fail1; 11626 11627 dp_soc_txrx_ops_attach(soc); 11628 soc->cdp_soc.ol_ops = ol_ops; 11629 soc->ctrl_psoc = ctrl_psoc; 11630 soc->osdev = qdf_osdev; 11631 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS; 11632 dp_soc_init_tx_ilp(soc); 11633 hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size, 11634 &soc->rx_mon_pkt_tlv_size); 11635 soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc, 11636 params->mlo_chip_id); 11637 soc->features.dmac_cmn_src_rxbuf_ring_enabled = 11638 hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc); 11639 soc->arch_id = arch_id; 11640 soc->link_desc_id_start = 11641 dp_get_link_desc_id_start(soc->arch_id); 11642 dp_configure_arch_ops(soc); 11643 11644 /* Reset wbm sg list and flags */ 11645 dp_rx_wbm_sg_list_reset(soc); 11646 11647 dp_soc_cfg_history_attach(soc); 11648 dp_soc_tx_hw_desc_history_attach(soc); 11649 dp_soc_rx_history_attach(soc); 11650 dp_soc_mon_status_ring_history_attach(soc); 11651 dp_soc_tx_history_attach(soc); 11652 wlan_set_srng_cfg(&soc->wlan_srng_cfg); 11653 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc); 11654 if (!soc->wlan_cfg_ctx) { 11655 dp_err("wlan_cfg_ctx failed\n"); 11656 goto fail2; 11657 } 11658 11659 soc->arch_ops.soc_cfg_attach(soc); 11660 11661 if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) { 11662 dp_err("failed to allocate link desc pool banks"); 11663 goto fail3; 11664 } 11665 11666 if (dp_hw_link_desc_ring_alloc(soc)) { 11667 dp_err("failed to allocate link_desc_ring"); 11668 goto fail4; 11669 } 11670 11671 if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc, 11672 params))) { 11673 dp_err("unable to do target specific attach"); 11674 goto fail5; 11675 } 11676 11677 if (dp_soc_srng_alloc(soc)) { 11678 dp_err("failed to allocate soc srng rings"); 11679 goto fail6; 11680 } 11681 11682 if (dp_soc_tx_desc_sw_pools_alloc(soc)) { 11683 dp_err("dp_soc_tx_desc_sw_pools_alloc failed"); 11684 goto fail7; 11685 } 11686 11687 if (!dp_monitor_modularized_enable()) { 11688 if (dp_mon_soc_attach_wrapper(soc)) { 11689 dp_err("failed to attach monitor"); 11690 goto fail8; 11691 } 11692 } 11693 11694 if (hal_reo_shared_qaddr_setup((hal_soc_handle_t)soc->hal_soc, 11695 &soc->reo_qref) 11696 != QDF_STATUS_SUCCESS) { 11697 dp_err("unable to setup reo shared qaddr"); 11698 goto fail9; 11699 } 11700 11701 if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) { 11702 dp_err("failed to initialize dp stats sysfs file"); 11703 dp_sysfs_deinitialize_stats(soc); 11704 } 11705 11706 dp_soc_swlm_attach(soc); 11707 dp_soc_set_interrupt_mode(soc); 11708 dp_soc_set_def_pdev(soc); 11709 11710 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 11711 qdf_dma_mem_stats_read(), 11712 qdf_heap_mem_stats_read(), 11713 qdf_skb_total_mem_stats_read()); 11714 11715 return soc; 11716 fail9: 11717 if (!dp_monitor_modularized_enable()) 11718 dp_mon_soc_detach_wrapper(soc); 11719 fail8: 11720 dp_soc_tx_desc_sw_pools_free(soc); 11721 fail7: 11722 dp_soc_srng_free(soc); 11723 fail6: 11724 soc->arch_ops.txrx_soc_detach(soc); 11725 fail5: 11726 dp_hw_link_desc_ring_free(soc); 11727 fail4: 11728 dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); 11729 fail3: 11730 wlan_cfg_soc_detach(soc->wlan_cfg_ctx); 11731 fail2: 11732 qdf_mem_free(soc->cdp_soc.ops); 11733 fail1: 11734 qdf_mem_common_free(soc); 11735 fail0: 11736 return NULL; 11737 } 11738 11739 void *dp_soc_init_wifi3(struct cdp_soc_t *cdp_soc, 11740 struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 11741 struct hif_opaque_softc *hif_handle, 11742 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, 11743 struct ol_if_ops *ol_ops, uint16_t device_id) 11744 { 11745 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 11746 11747 return soc->arch_ops.txrx_soc_init(soc, htc_handle, hif_handle); 11748 } 11749 11750 #endif 11751 11752 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id) 11753 { 11754 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 11755 return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL; 11756 11757 /* Typically for MCL as there only 1 PDEV*/ 11758 return soc->pdev_list[0]; 11759 } 11760 11761 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 11762 int *max_mac_rings) 11763 { 11764 bool dbs_enable = false; 11765 11766 if (soc->cdp_soc.ol_ops->is_hw_dbs_capable) 11767 dbs_enable = soc->cdp_soc.ol_ops-> 11768 is_hw_dbs_capable((void *)soc->ctrl_psoc); 11769 11770 *max_mac_rings = dbs_enable ? (*max_mac_rings) : 1; 11771 dp_info("dbs_enable %d, max_mac_rings %d", 11772 dbs_enable, *max_mac_rings); 11773 } 11774 11775 qdf_export_symbol(dp_update_num_mac_rings_for_dbs); 11776 11777 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 11778 /** 11779 * dp_get_cfr_rcc() - get cfr rcc config 11780 * @soc_hdl: Datapath soc handle 11781 * @pdev_id: id of objmgr pdev 11782 * 11783 * Return: true/false based on cfr mode setting 11784 */ 11785 static 11786 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11787 { 11788 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11789 struct dp_pdev *pdev = NULL; 11790 11791 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11792 if (!pdev) { 11793 dp_err("pdev is NULL"); 11794 return false; 11795 } 11796 11797 return pdev->cfr_rcc_mode; 11798 } 11799 11800 /** 11801 * dp_set_cfr_rcc() - enable/disable cfr rcc config 11802 * @soc_hdl: Datapath soc handle 11803 * @pdev_id: id of objmgr pdev 11804 * @enable: Enable/Disable cfr rcc mode 11805 * 11806 * Return: none 11807 */ 11808 static 11809 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable) 11810 { 11811 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11812 struct dp_pdev *pdev = NULL; 11813 11814 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11815 if (!pdev) { 11816 dp_err("pdev is NULL"); 11817 return; 11818 } 11819 11820 pdev->cfr_rcc_mode = enable; 11821 } 11822 11823 /** 11824 * dp_get_cfr_dbg_stats - Get the debug statistics for CFR 11825 * @soc_hdl: Datapath soc handle 11826 * @pdev_id: id of data path pdev handle 11827 * @cfr_rcc_stats: CFR RCC debug statistics buffer 11828 * 11829 * Return: none 11830 */ 11831 static inline void 11832 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 11833 struct cdp_cfr_rcc_stats *cfr_rcc_stats) 11834 { 11835 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11836 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11837 11838 if (!pdev) { 11839 dp_err("Invalid pdev"); 11840 return; 11841 } 11842 11843 qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc, 11844 sizeof(struct cdp_cfr_rcc_stats)); 11845 } 11846 11847 /** 11848 * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR 11849 * @soc_hdl: Datapath soc handle 11850 * @pdev_id: id of data path pdev handle 11851 * 11852 * Return: none 11853 */ 11854 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, 11855 uint8_t pdev_id) 11856 { 11857 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11858 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11859 11860 if (!pdev) { 11861 dp_err("dp pdev is NULL"); 11862 return; 11863 } 11864 11865 qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc)); 11866 } 11867 #endif 11868 11869 /** 11870 * dp_bucket_index() - Return index from array 11871 * 11872 * @delay: delay measured 11873 * @array: array used to index corresponding delay 11874 * @delay_in_us: flag to indicate whether the delay in ms or us 11875 * 11876 * Return: index 11877 */ 11878 static uint8_t 11879 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us) 11880 { 11881 uint8_t i = CDP_DELAY_BUCKET_0; 11882 uint32_t thr_low, thr_high; 11883 11884 for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) { 11885 thr_low = array[i]; 11886 thr_high = array[i + 1]; 11887 11888 if (delay_in_us) { 11889 thr_low = thr_low * USEC_PER_MSEC; 11890 thr_high = thr_high * USEC_PER_MSEC; 11891 } 11892 if (delay >= thr_low && delay <= thr_high) 11893 return i; 11894 } 11895 return (CDP_DELAY_BUCKET_MAX - 1); 11896 } 11897 11898 #ifdef HW_TX_DELAY_STATS_ENABLE 11899 /* 11900 * cdp_fw_to_hw_delay_range 11901 * Fw to hw delay ranges in milliseconds 11902 */ 11903 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = { 11904 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500}; 11905 #else 11906 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = { 11907 0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500}; 11908 #endif 11909 11910 /* 11911 * cdp_sw_enq_delay_range 11912 * Software enqueue delay ranges in milliseconds 11913 */ 11914 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = { 11915 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; 11916 11917 /* 11918 * cdp_intfrm_delay_range 11919 * Interframe delay ranges in milliseconds 11920 */ 11921 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = { 11922 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60}; 11923 11924 /** 11925 * dp_fill_delay_buckets() - Fill delay statistics bucket for each 11926 * type of delay 11927 * @tstats: tid tx stats 11928 * @rstats: tid rx stats 11929 * @delay: delay in ms 11930 * @tid: tid value 11931 * @mode: type of tx delay mode 11932 * @ring_id: ring number 11933 * @delay_in_us: flag to indicate whether the delay in ms or us 11934 * 11935 * Return: pointer to cdp_delay_stats structure 11936 */ 11937 static struct cdp_delay_stats * 11938 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats, 11939 struct cdp_tid_rx_stats *rstats, uint32_t delay, 11940 uint8_t tid, uint8_t mode, uint8_t ring_id, 11941 bool delay_in_us) 11942 { 11943 uint8_t delay_index = 0; 11944 struct cdp_delay_stats *stats = NULL; 11945 11946 /* 11947 * Update delay stats in proper bucket 11948 */ 11949 switch (mode) { 11950 /* Software Enqueue delay ranges */ 11951 case CDP_DELAY_STATS_SW_ENQ: 11952 if (!tstats) 11953 break; 11954 11955 delay_index = dp_bucket_index(delay, cdp_sw_enq_delay, 11956 delay_in_us); 11957 tstats->swq_delay.delay_bucket[delay_index]++; 11958 stats = &tstats->swq_delay; 11959 break; 11960 11961 /* Tx Completion delay ranges */ 11962 case CDP_DELAY_STATS_FW_HW_TRANSMIT: 11963 if (!tstats) 11964 break; 11965 11966 delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay, 11967 delay_in_us); 11968 tstats->hwtx_delay.delay_bucket[delay_index]++; 11969 stats = &tstats->hwtx_delay; 11970 break; 11971 11972 /* Interframe tx delay ranges */ 11973 case CDP_DELAY_STATS_TX_INTERFRAME: 11974 if (!tstats) 11975 break; 11976 11977 delay_index = dp_bucket_index(delay, cdp_intfrm_delay, 11978 delay_in_us); 11979 tstats->intfrm_delay.delay_bucket[delay_index]++; 11980 stats = &tstats->intfrm_delay; 11981 break; 11982 11983 /* Interframe rx delay ranges */ 11984 case CDP_DELAY_STATS_RX_INTERFRAME: 11985 if (!rstats) 11986 break; 11987 11988 delay_index = dp_bucket_index(delay, cdp_intfrm_delay, 11989 delay_in_us); 11990 rstats->intfrm_delay.delay_bucket[delay_index]++; 11991 stats = &rstats->intfrm_delay; 11992 break; 11993 11994 /* Ring reap to indication to network stack */ 11995 case CDP_DELAY_STATS_REAP_STACK: 11996 if (!rstats) 11997 break; 11998 11999 delay_index = dp_bucket_index(delay, cdp_intfrm_delay, 12000 delay_in_us); 12001 rstats->to_stack_delay.delay_bucket[delay_index]++; 12002 stats = &rstats->to_stack_delay; 12003 break; 12004 default: 12005 dp_debug("Incorrect delay mode: %d", mode); 12006 } 12007 12008 return stats; 12009 } 12010 12011 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 12012 struct cdp_tid_rx_stats *rstats, uint32_t delay, 12013 uint8_t tid, uint8_t mode, uint8_t ring_id, 12014 bool delay_in_us) 12015 { 12016 struct cdp_delay_stats *dstats = NULL; 12017 12018 /* 12019 * Delay ranges are different for different delay modes 12020 * Get the correct index to update delay bucket 12021 */ 12022 dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode, 12023 ring_id, delay_in_us); 12024 if (qdf_unlikely(!dstats)) 12025 return; 12026 12027 if (delay != 0) { 12028 /* 12029 * Compute minimum,average and maximum 12030 * delay 12031 */ 12032 if (delay < dstats->min_delay) 12033 dstats->min_delay = delay; 12034 12035 if (delay > dstats->max_delay) 12036 dstats->max_delay = delay; 12037 12038 /* 12039 * Average over delay measured till now 12040 */ 12041 if (!dstats->avg_delay) 12042 dstats->avg_delay = delay; 12043 else 12044 dstats->avg_delay = ((delay + dstats->avg_delay) >> 1); 12045 } 12046 } 12047 12048 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 12049 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 12050 u_int16_t mac_cnt, bool limit) 12051 { 12052 struct dp_soc *dp_soc = (struct dp_soc *)soc; 12053 struct dp_vdev *vdev = 12054 dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP); 12055 struct dp_peer *peer; 12056 uint16_t new_mac_cnt = 0; 12057 12058 if (!vdev) 12059 return new_mac_cnt; 12060 12061 if (limit && (vdev->num_peers > mac_cnt)) 12062 return 0; 12063 12064 qdf_spin_lock_bh(&vdev->peer_list_lock); 12065 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 12066 if (peer->bss_peer) 12067 continue; 12068 if (new_mac_cnt < mac_cnt) { 12069 WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw); 12070 new_mac_cnt++; 12071 } 12072 } 12073 qdf_spin_unlock_bh(&vdev->peer_list_lock); 12074 dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP); 12075 return new_mac_cnt; 12076 } 12077 12078 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac) 12079 { 12080 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 12081 mac, 0, vdev_id, 12082 DP_MOD_ID_CDP); 12083 uint16_t peer_id = HTT_INVALID_PEER; 12084 12085 if (!peer) { 12086 dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc); 12087 return peer_id; 12088 } 12089 12090 peer_id = peer->peer_id; 12091 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12092 return peer_id; 12093 } 12094 12095 #ifdef QCA_SUPPORT_WDS_EXTENDED 12096 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 12097 uint8_t vdev_id, 12098 uint8_t *mac, 12099 ol_txrx_rx_fp rx, 12100 ol_osif_peer_handle osif_peer) 12101 { 12102 struct dp_txrx_peer *txrx_peer = NULL; 12103 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 12104 mac, 0, vdev_id, 12105 DP_MOD_ID_CDP); 12106 QDF_STATUS status = QDF_STATUS_E_INVAL; 12107 12108 if (!peer) { 12109 dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc); 12110 return status; 12111 } 12112 12113 txrx_peer = dp_get_txrx_peer(peer); 12114 if (!txrx_peer) { 12115 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12116 return status; 12117 } 12118 12119 if (rx) { 12120 if (txrx_peer->osif_rx) { 12121 status = QDF_STATUS_E_ALREADY; 12122 } else { 12123 txrx_peer->osif_rx = rx; 12124 status = QDF_STATUS_SUCCESS; 12125 } 12126 } else { 12127 if (txrx_peer->osif_rx) { 12128 txrx_peer->osif_rx = NULL; 12129 status = QDF_STATUS_SUCCESS; 12130 } else { 12131 status = QDF_STATUS_E_ALREADY; 12132 } 12133 } 12134 12135 txrx_peer->wds_ext.osif_peer = osif_peer; 12136 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12137 12138 return status; 12139 } 12140 12141 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 12142 ol_txrx_soc_handle soc, 12143 uint8_t vdev_id, 12144 uint8_t *mac, 12145 ol_osif_peer_handle *osif_peer) 12146 { 12147 struct dp_soc *dp_soc = (struct dp_soc *)soc; 12148 struct dp_txrx_peer *txrx_peer = NULL; 12149 struct dp_peer *peer = dp_peer_find_hash_find(dp_soc, 12150 mac, 0, vdev_id, 12151 DP_MOD_ID_CDP); 12152 12153 if (!peer) { 12154 dp_cdp_debug("%pK: Peer is NULL!\n", dp_soc); 12155 return QDF_STATUS_E_INVAL; 12156 } 12157 12158 txrx_peer = dp_get_txrx_peer(peer); 12159 if (!txrx_peer) { 12160 dp_cdp_debug("%pK: TXRX Peer is NULL!\n", dp_soc); 12161 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12162 return QDF_STATUS_E_INVAL; 12163 } 12164 12165 *osif_peer = txrx_peer->wds_ext.osif_peer; 12166 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12167 12168 return QDF_STATUS_SUCCESS; 12169 } 12170 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 12171 12172 /** 12173 * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including 12174 * monitor rings 12175 * @pdev: Datapath pdev handle 12176 * 12177 */ 12178 static void dp_pdev_srng_deinit(struct dp_pdev *pdev) 12179 { 12180 struct dp_soc *soc = pdev->soc; 12181 uint8_t i; 12182 12183 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) 12184 dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 12185 RXDMA_BUF, 12186 pdev->lmac_id); 12187 12188 if (!soc->rxdma2sw_rings_not_supported) { 12189 for (i = 0; 12190 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 12191 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 12192 pdev->pdev_id); 12193 12194 wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id]. 12195 base_vaddr_unaligned, 12196 soc->rxdma_err_dst_ring[lmac_id]. 12197 alloc_size, 12198 soc->ctrl_psoc, 12199 WLAN_MD_DP_SRNG_RXDMA_ERR_DST, 12200 "rxdma_err_dst"); 12201 dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id], 12202 RXDMA_DST, lmac_id); 12203 } 12204 } 12205 12206 12207 } 12208 12209 /** 12210 * dp_pdev_srng_init() - initialize all pdev srng rings including 12211 * monitor rings 12212 * @pdev: Datapath pdev handle 12213 * 12214 * Return: QDF_STATUS_SUCCESS on success 12215 * QDF_STATUS_E_NOMEM on failure 12216 */ 12217 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev) 12218 { 12219 struct dp_soc *soc = pdev->soc; 12220 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 12221 uint32_t i; 12222 12223 soc_cfg_ctx = soc->wlan_cfg_ctx; 12224 12225 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 12226 if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 12227 RXDMA_BUF, 0, pdev->lmac_id)) { 12228 dp_init_err("%pK: dp_srng_init failed rx refill ring", 12229 soc); 12230 goto fail1; 12231 } 12232 } 12233 12234 /* LMAC RxDMA to SW Rings configuration */ 12235 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) 12236 /* Only valid for MCL */ 12237 pdev = soc->pdev_list[0]; 12238 12239 if (!soc->rxdma2sw_rings_not_supported) { 12240 for (i = 0; 12241 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 12242 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 12243 pdev->pdev_id); 12244 struct dp_srng *srng = 12245 &soc->rxdma_err_dst_ring[lmac_id]; 12246 12247 if (srng->hal_srng) 12248 continue; 12249 12250 if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) { 12251 dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring", 12252 soc); 12253 goto fail1; 12254 } 12255 wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id]. 12256 base_vaddr_unaligned, 12257 soc->rxdma_err_dst_ring[lmac_id]. 12258 alloc_size, 12259 soc->ctrl_psoc, 12260 WLAN_MD_DP_SRNG_RXDMA_ERR_DST, 12261 "rxdma_err_dst"); 12262 } 12263 } 12264 return QDF_STATUS_SUCCESS; 12265 12266 fail1: 12267 dp_pdev_srng_deinit(pdev); 12268 return QDF_STATUS_E_NOMEM; 12269 } 12270 12271 /** 12272 * dp_pdev_srng_free() - free all pdev srng rings including monitor rings 12273 * @pdev: Datapath pdev handle 12274 * 12275 */ 12276 static void dp_pdev_srng_free(struct dp_pdev *pdev) 12277 { 12278 struct dp_soc *soc = pdev->soc; 12279 uint8_t i; 12280 12281 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) 12282 dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]); 12283 12284 if (!soc->rxdma2sw_rings_not_supported) { 12285 for (i = 0; 12286 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 12287 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 12288 pdev->pdev_id); 12289 12290 dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]); 12291 } 12292 } 12293 } 12294 12295 /** 12296 * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including 12297 * monitor rings 12298 * @pdev: Datapath pdev handle 12299 * 12300 * Return: QDF_STATUS_SUCCESS on success 12301 * QDF_STATUS_E_NOMEM on failure 12302 */ 12303 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev) 12304 { 12305 struct dp_soc *soc = pdev->soc; 12306 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 12307 uint32_t ring_size; 12308 uint32_t i; 12309 12310 soc_cfg_ctx = soc->wlan_cfg_ctx; 12311 12312 ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 12313 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 12314 if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 12315 RXDMA_BUF, ring_size, 0)) { 12316 dp_init_err("%pK: dp_srng_alloc failed rx refill ring", 12317 soc); 12318 goto fail1; 12319 } 12320 } 12321 12322 ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); 12323 /* LMAC RxDMA to SW Rings configuration */ 12324 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) 12325 /* Only valid for MCL */ 12326 pdev = soc->pdev_list[0]; 12327 12328 if (!soc->rxdma2sw_rings_not_supported) { 12329 for (i = 0; 12330 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 12331 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 12332 pdev->pdev_id); 12333 struct dp_srng *srng = 12334 &soc->rxdma_err_dst_ring[lmac_id]; 12335 12336 if (srng->base_vaddr_unaligned) 12337 continue; 12338 12339 if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) { 12340 dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring", 12341 soc); 12342 goto fail1; 12343 } 12344 } 12345 } 12346 12347 return QDF_STATUS_SUCCESS; 12348 fail1: 12349 dp_pdev_srng_free(pdev); 12350 return QDF_STATUS_E_NOMEM; 12351 } 12352 12353 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc, 12354 HTC_HANDLE htc_handle, 12355 qdf_device_t qdf_osdev, 12356 uint8_t pdev_id) 12357 { 12358 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 12359 int nss_cfg; 12360 void *sojourn_buf; 12361 12362 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 12363 struct dp_pdev *pdev = soc->pdev_list[pdev_id]; 12364 12365 soc_cfg_ctx = soc->wlan_cfg_ctx; 12366 pdev->soc = soc; 12367 pdev->pdev_id = pdev_id; 12368 12369 /* 12370 * Variable to prevent double pdev deinitialization during 12371 * radio detach execution .i.e. in the absence of any vdev. 12372 */ 12373 pdev->pdev_deinit = 0; 12374 12375 if (dp_wdi_event_attach(pdev)) { 12376 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 12377 "dp_wdi_evet_attach failed"); 12378 goto fail0; 12379 } 12380 12381 if (dp_pdev_srng_init(pdev)) { 12382 dp_init_err("%pK: Failed to initialize pdev srng rings", soc); 12383 goto fail1; 12384 } 12385 12386 /* Initialize descriptors in TCL Rings used by IPA */ 12387 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 12388 hal_tx_init_data_ring(soc->hal_soc, 12389 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng); 12390 dp_ipa_hal_tx_init_alt_data_ring(soc); 12391 } 12392 12393 /* 12394 * Initialize command/credit ring descriptor 12395 * Command/CREDIT ring also used for sending DATA cmds 12396 */ 12397 dp_tx_init_cmd_credit_ring(soc); 12398 12399 dp_tx_pdev_init(pdev); 12400 12401 /* 12402 * set nss pdev config based on soc config 12403 */ 12404 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); 12405 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, 12406 (nss_cfg & (1 << pdev_id))); 12407 pdev->target_pdev_id = 12408 dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); 12409 12410 if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB && 12411 pdev->lmac_id == PHYB_2G_LMAC_ID) { 12412 pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID; 12413 } 12414 12415 /* Reset the cpu ring map if radio is NSS offloaded */ 12416 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 12417 dp_soc_reset_cpu_ring_map(soc); 12418 dp_soc_reset_intr_mask(soc); 12419 } 12420 12421 /* Reset the cpu ring map if radio is NSS offloaded */ 12422 dp_soc_reset_ipa_vlan_intr_mask(soc); 12423 12424 TAILQ_INIT(&pdev->vdev_list); 12425 qdf_spinlock_create(&pdev->vdev_list_lock); 12426 pdev->vdev_count = 0; 12427 pdev->is_lro_hash_configured = 0; 12428 12429 qdf_spinlock_create(&pdev->tx_mutex); 12430 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID; 12431 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID; 12432 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID; 12433 12434 DP_STATS_INIT(pdev); 12435 12436 dp_local_peer_id_pool_init(pdev); 12437 12438 dp_dscp_tid_map_setup(pdev); 12439 dp_pcp_tid_map_setup(pdev); 12440 12441 /* set the reo destination during initialization */ 12442 dp_pdev_set_default_reo(pdev); 12443 12444 qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats)); 12445 12446 pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev, 12447 sizeof(struct cdp_tx_sojourn_stats), 0, 4, 12448 TRUE); 12449 12450 if (!pdev->sojourn_buf) { 12451 dp_init_err("%pK: Failed to allocate sojourn buf", soc); 12452 goto fail2; 12453 } 12454 sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf); 12455 qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats)); 12456 12457 qdf_event_create(&pdev->fw_peer_stats_event); 12458 qdf_event_create(&pdev->fw_stats_event); 12459 qdf_event_create(&pdev->fw_obss_stats_event); 12460 12461 pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 12462 pdev->num_tx_spl_allowed = 12463 wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx); 12464 pdev->num_reg_tx_allowed = 12465 pdev->num_tx_allowed - pdev->num_tx_spl_allowed; 12466 if (dp_rxdma_ring_setup(soc, pdev)) { 12467 dp_init_err("%pK: RXDMA ring config failed", soc); 12468 goto fail3; 12469 } 12470 12471 if (dp_init_ipa_rx_refill_buf_ring(soc, pdev)) 12472 goto fail3; 12473 12474 if (dp_ipa_ring_resource_setup(soc, pdev)) 12475 goto fail4; 12476 12477 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { 12478 dp_init_err("%pK: dp_ipa_uc_attach failed", soc); 12479 goto fail4; 12480 } 12481 12482 if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) { 12483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 12484 FL("dp_pdev_bkp_stats_attach failed")); 12485 goto fail5; 12486 } 12487 12488 if (dp_monitor_pdev_init(pdev)) { 12489 dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc); 12490 goto fail6; 12491 } 12492 12493 /* initialize sw rx descriptors */ 12494 dp_rx_pdev_desc_pool_init(pdev); 12495 /* allocate buffers and replenish the RxDMA ring */ 12496 dp_rx_pdev_buffers_alloc(pdev); 12497 12498 dp_init_tso_stats(pdev); 12499 12500 pdev->rx_fast_flag = false; 12501 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 12502 qdf_dma_mem_stats_read(), 12503 qdf_heap_mem_stats_read(), 12504 qdf_skb_total_mem_stats_read()); 12505 12506 return QDF_STATUS_SUCCESS; 12507 fail6: 12508 dp_pdev_bkp_stats_detach(pdev); 12509 fail5: 12510 dp_ipa_uc_detach(soc, pdev); 12511 fail4: 12512 dp_deinit_ipa_rx_refill_buf_ring(soc, pdev); 12513 fail3: 12514 dp_rxdma_ring_cleanup(soc, pdev); 12515 qdf_nbuf_free(pdev->sojourn_buf); 12516 fail2: 12517 qdf_spinlock_destroy(&pdev->tx_mutex); 12518 qdf_spinlock_destroy(&pdev->vdev_list_lock); 12519 dp_pdev_srng_deinit(pdev); 12520 fail1: 12521 dp_wdi_event_detach(pdev); 12522 fail0: 12523 return QDF_STATUS_E_FAILURE; 12524 } 12525 12526 /** 12527 * dp_pdev_init_wifi3() - Init txrx pdev 12528 * @txrx_soc: 12529 * @htc_handle: HTC handle for host-target interface 12530 * @qdf_osdev: QDF OS device 12531 * @pdev_id: pdev Id 12532 * 12533 * Return: QDF_STATUS 12534 */ 12535 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, 12536 HTC_HANDLE htc_handle, 12537 qdf_device_t qdf_osdev, 12538 uint8_t pdev_id) 12539 { 12540 return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id); 12541 } 12542 12543 #ifdef FEATURE_DIRECT_LINK 12544 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 12545 uint8_t pdev_id) 12546 { 12547 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12548 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12549 12550 if (!pdev) { 12551 dp_err("DP pdev is NULL"); 12552 return NULL; 12553 } 12554 12555 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring4, 12556 RXDMA_BUF, DIRECT_LINK_REFILL_RING_ENTRIES, false)) { 12557 dp_err("SRNG alloc failed for rx_refill_buf_ring4"); 12558 return NULL; 12559 } 12560 12561 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring4, 12562 RXDMA_BUF, DIRECT_LINK_REFILL_RING_IDX, 0)) { 12563 dp_err("SRNG init failed for rx_refill_buf_ring4"); 12564 dp_srng_free(soc, &pdev->rx_refill_buf_ring4); 12565 return NULL; 12566 } 12567 12568 if (htt_srng_setup(soc->htt_handle, pdev_id, 12569 pdev->rx_refill_buf_ring4.hal_srng, RXDMA_BUF)) { 12570 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 12571 DIRECT_LINK_REFILL_RING_IDX); 12572 dp_srng_free(soc, &pdev->rx_refill_buf_ring4); 12573 return NULL; 12574 } 12575 12576 return &pdev->rx_refill_buf_ring4; 12577 } 12578 12579 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 12580 uint8_t pdev_id) 12581 { 12582 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12583 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12584 12585 if (!pdev) { 12586 dp_err("DP pdev is NULL"); 12587 return; 12588 } 12589 12590 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 0); 12591 dp_srng_free(soc, &pdev->rx_refill_buf_ring4); 12592 } 12593 #endif 12594 12595 #ifdef QCA_MULTIPASS_SUPPORT 12596 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 12597 uint16_t vlan_id, uint16_t group_key) 12598 { 12599 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12600 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 12601 DP_MOD_ID_TX_MULTIPASS); 12602 QDF_STATUS status; 12603 12604 if (!vdev || !vdev->multipass_en) { 12605 status = QDF_STATUS_E_INVAL; 12606 goto fail; 12607 } 12608 12609 if (!vdev->iv_vlan_map) { 12610 uint16_t vlan_map_size = (sizeof(uint16_t)) * DP_MAX_VLAN_IDS; 12611 12612 vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size); 12613 if (!vdev->iv_vlan_map) { 12614 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map"); 12615 status = QDF_STATUS_E_NOMEM; 12616 goto fail; 12617 } 12618 12619 /* 12620 * 0 is invalid group key. 12621 * Initilalize array with invalid group keys. 12622 */ 12623 qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size); 12624 } 12625 12626 if (vlan_id >= DP_MAX_VLAN_IDS) { 12627 status = QDF_STATUS_E_INVAL; 12628 goto fail; 12629 } 12630 12631 vdev->iv_vlan_map[vlan_id] = group_key; 12632 status = QDF_STATUS_SUCCESS; 12633 fail: 12634 if (vdev) 12635 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS); 12636 return status; 12637 } 12638 12639 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) 12640 { 12641 struct dp_txrx_peer *txrx_peer = NULL; 12642 12643 qdf_spin_lock_bh(&vdev->mpass_peer_mutex); 12644 TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem) 12645 qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id); 12646 qdf_spin_unlock_bh(&vdev->mpass_peer_mutex); 12647 12648 if (vdev->iv_vlan_map) { 12649 qdf_mem_free(vdev->iv_vlan_map); 12650 vdev->iv_vlan_map = NULL; 12651 } 12652 12653 qdf_spinlock_destroy(&vdev->mpass_peer_mutex); 12654 } 12655 12656 void dp_peer_multipass_list_init(struct dp_vdev *vdev) 12657 { 12658 /* 12659 * vdev->iv_vlan_map is allocated when the first configuration command 12660 * is issued to avoid unnecessary allocation for regular mode VAP. 12661 */ 12662 TAILQ_INIT(&vdev->mpass_peer_list); 12663 qdf_spinlock_create(&vdev->mpass_peer_mutex); 12664 } 12665 #endif /* QCA_MULTIPASS_SUPPORT */ 12666