1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <qdf_types.h> 20 #include <qdf_lock.h> 21 #include <qdf_net_types.h> 22 #include <qdf_lro.h> 23 #include <qdf_module.h> 24 #include <hal_hw_headers.h> 25 #include <hal_api.h> 26 #include <hif.h> 27 #include <htt.h> 28 #include <wdi_event.h> 29 #include <queue.h> 30 #include "dp_types.h" 31 #include "dp_internal.h" 32 #include "dp_tx.h" 33 #include "dp_tx_desc.h" 34 #include "dp_rx.h" 35 #include "dp_rx_mon.h" 36 #ifdef DP_RATETABLE_SUPPORT 37 #include "dp_ratetable.h" 38 #endif 39 #include <cdp_txrx_handle.h> 40 #include <wlan_cfg.h> 41 #include <wlan_utility.h> 42 #include "cdp_txrx_cmn_struct.h" 43 #include "cdp_txrx_stats_struct.h" 44 #include "cdp_txrx_cmn_reg.h" 45 #include <qdf_util.h> 46 #include "dp_peer.h" 47 #include "dp_rx_mon.h" 48 #include "htt_stats.h" 49 #include "dp_htt.h" 50 #ifdef WLAN_SUPPORT_RX_FISA 51 #include <dp_fisa_rx.h> 52 #endif 53 #include "htt_ppdu_stats.h" 54 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 55 #include "cfg_ucfg_api.h" 56 #include "dp_mon_filter.h" 57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 58 #include "cdp_txrx_flow_ctrl_v2.h" 59 #else 60 static inline void 61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc) 62 { 63 return; 64 } 65 #endif 66 #include "dp_ipa.h" 67 #include "dp_cal_client_api.h" 68 #ifdef FEATURE_WDS 69 #include "dp_txrx_wds.h" 70 #endif 71 #ifdef WLAN_SUPPORT_MSCS 72 #include "dp_mscs.h" 73 #endif 74 #ifdef ATH_SUPPORT_IQUE 75 #include "dp_txrx_me.h" 76 #endif 77 #if defined(DP_CON_MON) 78 #ifndef REMOVE_PKT_LOG 79 #include <pktlog_ac_api.h> 80 #include <pktlog_ac.h> 81 #endif 82 #endif 83 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 84 #include <dp_swlm.h> 85 #endif 86 87 #ifdef WLAN_FEATURE_STATS_EXT 88 #define INIT_RX_HW_STATS_LOCK(_soc) \ 89 qdf_spinlock_create(&(_soc)->rx_hw_stats_lock) 90 #define DEINIT_RX_HW_STATS_LOCK(_soc) \ 91 qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock) 92 #else 93 #define INIT_RX_HW_STATS_LOCK(_soc) /* no op */ 94 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */ 95 #endif 96 97 #ifdef DP_PEER_EXTENDED_API 98 #define SET_PEER_REF_CNT_ONE(_peer) \ 99 qdf_atomic_set(&(_peer)->ref_cnt, 1) 100 #else 101 #define SET_PEER_REF_CNT_ONE(_peer) 102 #endif 103 104 /* 105 * The max size of cdp_peer_stats_param_t is limited to 16 bytes. 106 * If the buffer size is exceeding this size limit, 107 * dp_txrx_get_peer_stats is to be used instead. 108 */ 109 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size, 110 (sizeof(cdp_peer_stats_param_t) <= 16)); 111 112 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 113 /* 114 * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS 115 * also should be updated accordingly 116 */ 117 QDF_COMPILE_TIME_ASSERT(num_intr_grps, 118 HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS); 119 120 /* 121 * HIF_EVENT_HIST_MAX should always be power of 2 122 */ 123 QDF_COMPILE_TIME_ASSERT(hif_event_history_size, 124 (HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0); 125 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 126 127 /* 128 * If WLAN_CFG_INT_NUM_CONTEXTS is changed, 129 * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated 130 */ 131 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs, 132 WLAN_CFG_INT_NUM_CONTEXTS_MAX >= 133 WLAN_CFG_INT_NUM_CONTEXTS); 134 135 #ifdef WLAN_RX_PKT_CAPTURE_ENH 136 #include "dp_rx_mon_feature.h" 137 #else 138 /* 139 * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture 140 * @pdev_handle: DP_PDEV handle 141 * @val: user provided value 142 * 143 * Return: QDF_STATUS 144 */ 145 static QDF_STATUS 146 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val) 147 { 148 return QDF_STATUS_E_INVAL; 149 } 150 #endif /* WLAN_RX_PKT_CAPTURE_ENH */ 151 152 #ifdef WLAN_TX_PKT_CAPTURE_ENH 153 #include "dp_tx_capture.h" 154 #else 155 /* 156 * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture 157 * @pdev_handle: DP_PDEV handle 158 * @val: user provided value 159 * 160 * Return: QDF_STATUS 161 */ 162 static QDF_STATUS 163 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val) 164 { 165 return QDF_STATUS_E_INVAL; 166 } 167 #endif 168 169 static void dp_pdev_srng_deinit(struct dp_pdev *pdev); 170 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev); 171 static void dp_pdev_srng_free(struct dp_pdev *pdev); 172 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev); 173 174 static void dp_soc_srng_deinit(struct dp_soc *soc); 175 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc); 176 static void dp_soc_srng_free(struct dp_soc *soc); 177 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc); 178 179 static void dp_soc_cfg_init(struct dp_soc *soc); 180 static void dp_soc_cfg_attach(struct dp_soc *soc); 181 182 static inline 183 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, 184 HTC_HANDLE htc_handle, 185 qdf_device_t qdf_osdev, 186 uint8_t pdev_id); 187 188 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id); 189 190 static QDF_STATUS 191 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, 192 HTC_HANDLE htc_handle, 193 qdf_device_t qdf_osdev, 194 uint8_t pdev_id); 195 196 static QDF_STATUS 197 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force); 198 199 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc); 200 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc); 201 202 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, 203 struct hif_opaque_softc *hif_handle); 204 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force); 205 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, 206 uint8_t pdev_id, 207 int force); 208 static struct dp_soc * 209 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 210 struct hif_opaque_softc *hif_handle, 211 HTC_HANDLE htc_handle, 212 qdf_device_t qdf_osdev, 213 struct ol_if_ops *ol_ops, uint16_t device_id); 214 static void dp_pktlogmod_exit(struct dp_pdev *handle); 215 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, 216 uint8_t vdev_id, 217 uint8_t *peer_mac_addr); 218 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, 219 uint8_t vdev_id, 220 uint8_t *peer_mac, uint32_t bitmap); 221 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, 222 bool unmap_only); 223 #ifdef ENABLE_VERBOSE_DEBUG 224 bool is_dp_verbose_debug_enabled; 225 #endif 226 227 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 228 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, 229 uint8_t pdev_id, 230 bool enable, 231 struct cdp_monitor_filter *filter_val); 232 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 233 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 234 bool enable); 235 static inline void 236 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 237 struct cdp_cfr_rcc_stats *cfr_rcc_stats); 238 static inline void 239 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 240 static inline void 241 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 242 bool enable); 243 #endif 244 static inline bool 245 dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev); 246 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, 247 enum hal_ring_type ring_type, 248 int ring_num); 249 #define DP_INTR_POLL_TIMER_MS 5 250 251 #define MON_VDEV_TIMER_INIT 0x1 252 #define MON_VDEV_TIMER_RUNNING 0x2 253 254 /* Generic AST entry aging timer value */ 255 #define DP_AST_AGING_TIMER_DEFAULT_MS 1000 256 #define DP_MCS_LENGTH (6*MAX_MCS) 257 258 #define DP_CURR_FW_STATS_AVAIL 19 259 #define DP_HTT_DBG_EXT_STATS_MAX 256 260 #define DP_MAX_SLEEP_TIME 100 261 #ifndef QCA_WIFI_3_0_EMU 262 #define SUSPEND_DRAIN_WAIT 500 263 #else 264 #define SUSPEND_DRAIN_WAIT 3000 265 #endif 266 267 #ifdef IPA_OFFLOAD 268 /* Exclude IPA rings from the interrupt context */ 269 #define TX_RING_MASK_VAL 0xb 270 #define RX_RING_MASK_VAL 0x7 271 #else 272 #define TX_RING_MASK_VAL 0xF 273 #define RX_RING_MASK_VAL 0xF 274 #endif 275 276 #define STR_MAXLEN 64 277 278 #define RNG_ERR "SRNG setup failed for" 279 280 /* Threshold for peer's cached buf queue beyond which frames are dropped */ 281 #define DP_RX_CACHED_BUFQ_THRESH 64 282 283 /* Budget to reap monitor status ring */ 284 #define DP_MON_REAP_BUDGET 1024 285 286 /** 287 * default_dscp_tid_map - Default DSCP-TID mapping 288 * 289 * DSCP TID 290 * 000000 0 291 * 001000 1 292 * 010000 2 293 * 011000 3 294 * 100000 4 295 * 101000 5 296 * 110000 6 297 * 111000 7 298 */ 299 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = { 300 0, 0, 0, 0, 0, 0, 0, 0, 301 1, 1, 1, 1, 1, 1, 1, 1, 302 2, 2, 2, 2, 2, 2, 2, 2, 303 3, 3, 3, 3, 3, 3, 3, 3, 304 4, 4, 4, 4, 4, 4, 4, 4, 305 5, 5, 5, 5, 5, 5, 5, 5, 306 6, 6, 6, 6, 6, 6, 6, 6, 307 7, 7, 7, 7, 7, 7, 7, 7, 308 }; 309 310 /** 311 * default_pcp_tid_map - Default PCP-TID mapping 312 * 313 * PCP TID 314 * 000 0 315 * 001 1 316 * 010 2 317 * 011 3 318 * 100 4 319 * 101 5 320 * 110 6 321 * 111 7 322 */ 323 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = { 324 0, 1, 2, 3, 4, 5, 6, 7, 325 }; 326 327 /** 328 * @brief Cpu to tx ring map 329 */ 330 uint8_t 331 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = { 332 {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2}, 333 {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1}, 334 {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0}, 335 {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2}, 336 {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}, 337 #ifdef WLAN_TX_PKT_CAPTURE_ENH 338 {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1} 339 #endif 340 }; 341 342 /** 343 * @brief Select the type of statistics 344 */ 345 enum dp_stats_type { 346 STATS_FW = 0, 347 STATS_HOST = 1, 348 STATS_TYPE_MAX = 2, 349 }; 350 351 /** 352 * @brief General Firmware statistics options 353 * 354 */ 355 enum dp_fw_stats { 356 TXRX_FW_STATS_INVALID = -1, 357 }; 358 359 /** 360 * dp_stats_mapping_table - Firmware and Host statistics 361 * currently supported 362 */ 363 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = { 364 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID}, 365 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID}, 366 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID}, 367 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID}, 368 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID}, 369 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID}, 370 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID}, 371 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID}, 372 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID}, 373 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID}, 374 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID}, 375 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 376 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID}, 377 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID}, 378 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID}, 379 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID}, 380 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID}, 381 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID}, 382 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID}, 383 /* Last ENUM for HTT FW STATS */ 384 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID}, 385 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS}, 386 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS}, 387 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS}, 388 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS}, 389 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS}, 390 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS}, 391 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS}, 392 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS}, 393 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS}, 394 {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS}, 395 {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS}, 396 {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS}, 397 {TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS}, 398 {TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS}, 399 {HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID} 400 }; 401 402 /* MCL specific functions */ 403 #if defined(DP_CON_MON) 404 /** 405 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode 406 * @soc: pointer to dp_soc handle 407 * @intr_ctx_num: interrupt context number for which mon mask is needed 408 * 409 * For MCL, monitor mode rings are being processed in timer contexts (polled). 410 * This function is returning 0, since in interrupt mode(softirq based RX), 411 * we donot want to process monitor mode rings in a softirq. 412 * 413 * So, in case packet log is enabled for SAP/STA/P2P modes, 414 * regular interrupt processing will not process monitor mode rings. It would be 415 * done in a separate timer context. 416 * 417 * Return: 0 418 */ 419 static inline 420 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num) 421 { 422 return 0; 423 } 424 425 /* 426 * dp_service_mon_rings()- service monitor rings 427 * @soc: soc dp handle 428 * @quota: number of ring entry that can be serviced 429 * 430 * Return: None 431 * 432 */ 433 static void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota) 434 { 435 int ring = 0, work_done; 436 struct dp_pdev *pdev = NULL; 437 438 for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { 439 pdev = dp_get_pdev_for_lmac_id(soc, ring); 440 if (!pdev) 441 continue; 442 work_done = dp_mon_process(soc, NULL, ring, quota); 443 444 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 445 FL("Reaped %d descs from Monitor rings"), 446 work_done); 447 } 448 } 449 450 /* 451 * dp_mon_reap_timer_handler()- timer to reap monitor rings 452 * reqd as we are not getting ppdu end interrupts 453 * @arg: SoC Handle 454 * 455 * Return: 456 * 457 */ 458 static void dp_mon_reap_timer_handler(void *arg) 459 { 460 struct dp_soc *soc = (struct dp_soc *)arg; 461 462 dp_service_mon_rings(soc, QCA_NAPI_BUDGET); 463 464 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS); 465 } 466 467 #ifndef REMOVE_PKT_LOG 468 /** 469 * dp_pkt_log_init() - API to initialize packet log 470 * @soc_hdl: Datapath soc handle 471 * @pdev_id: id of data path pdev handle 472 * @scn: HIF context 473 * 474 * Return: none 475 */ 476 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) 477 { 478 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 479 struct dp_pdev *handle = 480 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 481 482 if (!handle) { 483 dp_err("pdev handle is NULL"); 484 return; 485 } 486 487 if (handle->pkt_log_init) { 488 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 489 "%s: Packet log not initialized", __func__); 490 return; 491 } 492 493 pktlog_sethandle(&handle->pl_dev, scn); 494 pktlog_set_pdev_id(handle->pl_dev, pdev_id); 495 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); 496 497 if (pktlogmod_init(scn)) { 498 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 499 "%s: pktlogmod_init failed", __func__); 500 handle->pkt_log_init = false; 501 } else { 502 handle->pkt_log_init = true; 503 } 504 } 505 506 /** 507 * dp_pkt_log_con_service() - connect packet log service 508 * @soc_hdl: Datapath soc handle 509 * @pdev_id: id of data path pdev handle 510 * @scn: device context 511 * 512 * Return: none 513 */ 514 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 515 uint8_t pdev_id, void *scn) 516 { 517 dp_pkt_log_init(soc_hdl, pdev_id, scn); 518 pktlog_htc_attach(); 519 } 520 521 /** 522 * dp_pktlogmod_exit() - API to cleanup pktlog info 523 * @pdev: Pdev handle 524 * 525 * Return: none 526 */ 527 static void dp_pktlogmod_exit(struct dp_pdev *pdev) 528 { 529 struct dp_soc *soc = pdev->soc; 530 struct hif_opaque_softc *scn = soc->hif_handle; 531 532 if (!scn) { 533 dp_err("Invalid hif(scn) handle"); 534 return; 535 } 536 537 /* stop mon_reap_timer if it has been started */ 538 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED && 539 soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev))) 540 qdf_timer_sync_cancel(&soc->mon_reap_timer); 541 542 pktlogmod_exit(scn); 543 pdev->pkt_log_init = false; 544 } 545 #else 546 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 547 uint8_t pdev_id, void *scn) 548 { 549 } 550 551 static void dp_pktlogmod_exit(struct dp_pdev *handle) { } 552 #endif 553 /** 554 * dp_get_num_rx_contexts() - get number of RX contexts 555 * @soc_hdl: cdp opaque soc handle 556 * 557 * Return: number of RX contexts 558 */ 559 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) 560 { 561 int i; 562 int num_rx_contexts = 0; 563 564 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 565 566 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) 567 if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i)) 568 num_rx_contexts++; 569 570 return num_rx_contexts; 571 } 572 573 #else 574 static void dp_pktlogmod_exit(struct dp_pdev *handle) { } 575 576 /** 577 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode 578 * @soc: pointer to dp_soc handle 579 * @intr_ctx_num: interrupt context number for which mon mask is needed 580 * 581 * Return: mon mask value 582 */ 583 static inline 584 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num) 585 { 586 return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 587 } 588 589 /* 590 * dp_service_lmac_rings()- timer to reap lmac rings 591 * @arg: SoC Handle 592 * 593 * Return: 594 * 595 */ 596 static void dp_service_lmac_rings(void *arg) 597 { 598 struct dp_soc *soc = (struct dp_soc *)arg; 599 int ring = 0, i; 600 struct dp_pdev *pdev = NULL; 601 union dp_rx_desc_list_elem_t *desc_list = NULL; 602 union dp_rx_desc_list_elem_t *tail = NULL; 603 604 /* Process LMAC interrupts */ 605 for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { 606 int mac_for_pdev = ring; 607 struct dp_srng *rx_refill_buf_ring; 608 609 pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev); 610 if (!pdev) 611 continue; 612 613 rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev]; 614 615 dp_mon_process(soc, NULL, mac_for_pdev, 616 QCA_NAPI_BUDGET); 617 618 for (i = 0; 619 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) 620 dp_rxdma_err_process(&soc->intr_ctx[i], soc, 621 mac_for_pdev, 622 QCA_NAPI_BUDGET); 623 624 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, 625 mac_for_pdev)) 626 dp_rx_buffers_replenish(soc, mac_for_pdev, 627 rx_refill_buf_ring, 628 &soc->rx_desc_buf[mac_for_pdev], 629 0, &desc_list, &tail); 630 } 631 632 qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS); 633 } 634 635 #endif 636 637 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl, 638 uint8_t vdev_id, 639 uint8_t *peer_mac, 640 uint8_t *mac_addr, 641 enum cdp_txrx_ast_entry_type type, 642 uint32_t flags) 643 { 644 int ret = -1; 645 QDF_STATUS status = QDF_STATUS_SUCCESS; 646 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, 647 peer_mac, 0, vdev_id, 648 DP_MOD_ID_CDP); 649 650 if (!peer) { 651 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 652 "%s: Peer is NULL!\n", __func__); 653 return ret; 654 } 655 656 status = dp_peer_add_ast((struct dp_soc *)soc_hdl, 657 peer, 658 mac_addr, 659 type, 660 flags); 661 if ((status == QDF_STATUS_SUCCESS) || 662 (status == QDF_STATUS_E_ALREADY) || 663 (status == QDF_STATUS_E_AGAIN)) 664 ret = 0; 665 666 dp_hmwds_ast_add_notify(peer, mac_addr, 667 type, status, false); 668 669 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 670 671 return ret; 672 } 673 674 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl, 675 uint8_t vdev_id, 676 uint8_t *peer_mac, 677 uint8_t *wds_macaddr, 678 uint32_t flags) 679 { 680 int status = -1; 681 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 682 struct dp_ast_entry *ast_entry = NULL; 683 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, 684 peer_mac, 0, vdev_id, 685 DP_MOD_ID_CDP); 686 687 if (!peer) { 688 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 689 "%s: Peer is NULL!\n", __func__); 690 return status; 691 } 692 693 qdf_spin_lock_bh(&soc->ast_lock); 694 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, 695 peer->vdev->pdev->pdev_id); 696 697 if (ast_entry) { 698 status = dp_peer_update_ast(soc, 699 peer, 700 ast_entry, flags); 701 } 702 qdf_spin_unlock_bh(&soc->ast_lock); 703 704 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 705 706 return status; 707 } 708 709 /* 710 * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer 711 * @soc_handle: Datapath SOC handle 712 * @peer: DP peer 713 * @arg: callback argument 714 * 715 * Return: None 716 */ 717 static void 718 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 719 { 720 struct dp_ast_entry *ast_entry = NULL; 721 struct dp_ast_entry *tmp_ast_entry; 722 723 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) { 724 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || 725 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 726 dp_peer_del_ast(soc, ast_entry); 727 } 728 } 729 730 /* 731 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry 732 * @soc_handle: Datapath SOC handle 733 * @wds_macaddr: WDS entry MAC Address 734 * @peer_macaddr: WDS entry MAC Address 735 * @vdev_id: id of vdev handle 736 * Return: QDF_STATUS 737 */ 738 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl, 739 uint8_t *wds_macaddr, 740 uint8_t *peer_mac_addr, 741 uint8_t vdev_id) 742 { 743 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 744 struct dp_ast_entry *ast_entry = NULL; 745 struct dp_peer *peer; 746 struct dp_pdev *pdev; 747 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 748 DP_MOD_ID_CDP); 749 750 if (!vdev) 751 return QDF_STATUS_E_FAILURE; 752 753 pdev = vdev->pdev; 754 755 if (peer_mac_addr) { 756 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 757 0, vdev->vdev_id, 758 DP_MOD_ID_CDP); 759 if (!peer) { 760 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 761 return QDF_STATUS_E_FAILURE; 762 } 763 764 qdf_spin_lock_bh(&soc->ast_lock); 765 dp_peer_reset_ast_entries(soc, peer, NULL); 766 qdf_spin_unlock_bh(&soc->ast_lock); 767 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 768 } else if (wds_macaddr) { 769 qdf_spin_lock_bh(&soc->ast_lock); 770 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, 771 pdev->pdev_id); 772 773 if (ast_entry) { 774 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || 775 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 776 dp_peer_del_ast(soc, ast_entry); 777 } 778 qdf_spin_unlock_bh(&soc->ast_lock); 779 } 780 781 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 782 return QDF_STATUS_SUCCESS; 783 } 784 785 /* 786 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry 787 * @soc: Datapath SOC handle 788 * @vdev_id: id of vdev object 789 * 790 * Return: QDF_STATUS 791 */ 792 static QDF_STATUS 793 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl, 794 uint8_t vdev_id) 795 { 796 struct dp_soc *soc = (struct dp_soc *) soc_hdl; 797 798 qdf_spin_lock_bh(&soc->ast_lock); 799 800 dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL, 801 DP_MOD_ID_CDP); 802 qdf_spin_unlock_bh(&soc->ast_lock); 803 804 return QDF_STATUS_SUCCESS; 805 } 806 807 /* 808 * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer 809 * @soc: Datapath SOC 810 * @peer: Datapath peer 811 * @arg: arg to callback 812 * 813 * Return: None 814 */ 815 static void 816 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 817 { 818 struct dp_ast_entry *ase = NULL; 819 struct dp_ast_entry *temp_ase; 820 821 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { 822 if ((ase->type == 823 CDP_TXRX_AST_TYPE_STATIC) || 824 (ase->type == 825 CDP_TXRX_AST_TYPE_SELF) || 826 (ase->type == 827 CDP_TXRX_AST_TYPE_STA_BSS)) 828 continue; 829 dp_peer_del_ast(soc, ase); 830 } 831 } 832 833 /* 834 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry 835 * @soc: Datapath SOC handle 836 * 837 * Return: None 838 */ 839 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl) 840 { 841 struct dp_soc *soc = (struct dp_soc *) soc_hdl; 842 843 qdf_spin_lock_bh(&soc->ast_lock); 844 845 dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL, 846 DP_MOD_ID_CDP); 847 848 qdf_spin_unlock_bh(&soc->ast_lock); 849 } 850 851 /** 852 * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table 853 * and return ast entry information 854 * of first ast entry found in the 855 * table with given mac address 856 * 857 * @soc : data path soc handle 858 * @ast_mac_addr : AST entry mac address 859 * @ast_entry_info : ast entry information 860 * 861 * return : true if ast entry found with ast_mac_addr 862 * false if ast entry not found 863 */ 864 static bool dp_peer_get_ast_info_by_soc_wifi3 865 (struct cdp_soc_t *soc_hdl, 866 uint8_t *ast_mac_addr, 867 struct cdp_ast_entry_info *ast_entry_info) 868 { 869 struct dp_ast_entry *ast_entry = NULL; 870 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 871 struct dp_peer *peer = NULL; 872 873 qdf_spin_lock_bh(&soc->ast_lock); 874 875 ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr); 876 if ((!ast_entry) || 877 (ast_entry->delete_in_progress && !ast_entry->callback)) { 878 qdf_spin_unlock_bh(&soc->ast_lock); 879 return false; 880 } 881 882 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 883 DP_MOD_ID_AST); 884 if (!peer) { 885 qdf_spin_unlock_bh(&soc->ast_lock); 886 return false; 887 } 888 889 ast_entry_info->type = ast_entry->type; 890 ast_entry_info->pdev_id = ast_entry->pdev_id; 891 ast_entry_info->vdev_id = ast_entry->vdev_id; 892 ast_entry_info->peer_id = ast_entry->peer_id; 893 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], 894 &peer->mac_addr.raw[0], 895 QDF_MAC_ADDR_SIZE); 896 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 897 qdf_spin_unlock_bh(&soc->ast_lock); 898 return true; 899 } 900 901 /** 902 * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table 903 * and return ast entry information 904 * if mac address and pdev_id matches 905 * 906 * @soc : data path soc handle 907 * @ast_mac_addr : AST entry mac address 908 * @pdev_id : pdev_id 909 * @ast_entry_info : ast entry information 910 * 911 * return : true if ast entry found with ast_mac_addr 912 * false if ast entry not found 913 */ 914 static bool dp_peer_get_ast_info_by_pdevid_wifi3 915 (struct cdp_soc_t *soc_hdl, 916 uint8_t *ast_mac_addr, 917 uint8_t pdev_id, 918 struct cdp_ast_entry_info *ast_entry_info) 919 { 920 struct dp_ast_entry *ast_entry; 921 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 922 struct dp_peer *peer = NULL; 923 924 qdf_spin_lock_bh(&soc->ast_lock); 925 926 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, 927 pdev_id); 928 929 if ((!ast_entry) || 930 (ast_entry->delete_in_progress && !ast_entry->callback)) { 931 qdf_spin_unlock_bh(&soc->ast_lock); 932 return false; 933 } 934 935 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 936 DP_MOD_ID_AST); 937 if (!peer) { 938 qdf_spin_unlock_bh(&soc->ast_lock); 939 return false; 940 } 941 942 ast_entry_info->type = ast_entry->type; 943 ast_entry_info->pdev_id = ast_entry->pdev_id; 944 ast_entry_info->vdev_id = ast_entry->vdev_id; 945 ast_entry_info->peer_id = ast_entry->peer_id; 946 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], 947 &peer->mac_addr.raw[0], 948 QDF_MAC_ADDR_SIZE); 949 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 950 qdf_spin_unlock_bh(&soc->ast_lock); 951 return true; 952 } 953 954 /** 955 * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table 956 * with given mac address 957 * 958 * @soc : data path soc handle 959 * @ast_mac_addr : AST entry mac address 960 * @callback : callback function to called on ast delete response from FW 961 * @cookie : argument to be passed to callback 962 * 963 * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 964 * is sent 965 * QDF_STATUS_E_INVAL false if ast entry not found 966 */ 967 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle, 968 uint8_t *mac_addr, 969 txrx_ast_free_cb callback, 970 void *cookie) 971 972 { 973 struct dp_soc *soc = (struct dp_soc *)soc_handle; 974 struct dp_ast_entry *ast_entry = NULL; 975 txrx_ast_free_cb cb = NULL; 976 void *arg = NULL; 977 978 qdf_spin_lock_bh(&soc->ast_lock); 979 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); 980 if (!ast_entry) { 981 qdf_spin_unlock_bh(&soc->ast_lock); 982 return -QDF_STATUS_E_INVAL; 983 } 984 985 if (ast_entry->callback) { 986 cb = ast_entry->callback; 987 arg = ast_entry->cookie; 988 } 989 990 ast_entry->callback = callback; 991 ast_entry->cookie = cookie; 992 993 /* 994 * if delete_in_progress is set AST delete is sent to target 995 * and host is waiting for response should not send delete 996 * again 997 */ 998 if (!ast_entry->delete_in_progress) 999 dp_peer_del_ast(soc, ast_entry); 1000 1001 qdf_spin_unlock_bh(&soc->ast_lock); 1002 if (cb) { 1003 cb(soc->ctrl_psoc, 1004 dp_soc_to_cdp_soc(soc), 1005 arg, 1006 CDP_TXRX_AST_DELETE_IN_PROGRESS); 1007 } 1008 return QDF_STATUS_SUCCESS; 1009 } 1010 1011 /** 1012 * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash 1013 * table if mac address and pdev_id matches 1014 * 1015 * @soc : data path soc handle 1016 * @ast_mac_addr : AST entry mac address 1017 * @pdev_id : pdev id 1018 * @callback : callback function to called on ast delete response from FW 1019 * @cookie : argument to be passed to callback 1020 * 1021 * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 1022 * is sent 1023 * QDF_STATUS_E_INVAL false if ast entry not found 1024 */ 1025 1026 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle, 1027 uint8_t *mac_addr, 1028 uint8_t pdev_id, 1029 txrx_ast_free_cb callback, 1030 void *cookie) 1031 1032 { 1033 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1034 struct dp_ast_entry *ast_entry; 1035 txrx_ast_free_cb cb = NULL; 1036 void *arg = NULL; 1037 1038 qdf_spin_lock_bh(&soc->ast_lock); 1039 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id); 1040 1041 if (!ast_entry) { 1042 qdf_spin_unlock_bh(&soc->ast_lock); 1043 return -QDF_STATUS_E_INVAL; 1044 } 1045 1046 if (ast_entry->callback) { 1047 cb = ast_entry->callback; 1048 arg = ast_entry->cookie; 1049 } 1050 1051 ast_entry->callback = callback; 1052 ast_entry->cookie = cookie; 1053 1054 /* 1055 * if delete_in_progress is set AST delete is sent to target 1056 * and host is waiting for response should not sent delete 1057 * again 1058 */ 1059 if (!ast_entry->delete_in_progress) 1060 dp_peer_del_ast(soc, ast_entry); 1061 1062 qdf_spin_unlock_bh(&soc->ast_lock); 1063 1064 if (cb) { 1065 cb(soc->ctrl_psoc, 1066 dp_soc_to_cdp_soc(soc), 1067 arg, 1068 CDP_TXRX_AST_DELETE_IN_PROGRESS); 1069 } 1070 return QDF_STATUS_SUCCESS; 1071 } 1072 1073 /** 1074 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs 1075 * @ring_num: ring num of the ring being queried 1076 * @grp_mask: the grp_mask array for the ring type in question. 1077 * 1078 * The grp_mask array is indexed by group number and the bit fields correspond 1079 * to ring numbers. We are finding which interrupt group a ring belongs to. 1080 * 1081 * Return: the index in the grp_mask array with the ring number. 1082 * -QDF_STATUS_E_NOENT if no entry is found 1083 */ 1084 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask) 1085 { 1086 int ext_group_num; 1087 int mask = 1 << ring_num; 1088 1089 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS; 1090 ext_group_num++) { 1091 if (mask & grp_mask[ext_group_num]) 1092 return ext_group_num; 1093 } 1094 1095 return -QDF_STATUS_E_NOENT; 1096 } 1097 1098 static int dp_srng_calculate_msi_group(struct dp_soc *soc, 1099 enum hal_ring_type ring_type, 1100 int ring_num) 1101 { 1102 int *grp_mask; 1103 1104 switch (ring_type) { 1105 case WBM2SW_RELEASE: 1106 /* dp_tx_comp_handler - soc->tx_comp_ring */ 1107 if (ring_num < 3) 1108 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; 1109 1110 /* dp_rx_wbm_err_process - soc->rx_rel_ring */ 1111 else if (ring_num == 3) { 1112 /* sw treats this as a separate ring type */ 1113 grp_mask = &soc->wlan_cfg_ctx-> 1114 int_rx_wbm_rel_ring_mask[0]; 1115 ring_num = 0; 1116 } else { 1117 qdf_assert(0); 1118 return -QDF_STATUS_E_NOENT; 1119 } 1120 break; 1121 1122 case REO_EXCEPTION: 1123 /* dp_rx_err_process - &soc->reo_exception_ring */ 1124 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0]; 1125 break; 1126 1127 case REO_DST: 1128 /* dp_rx_process - soc->reo_dest_ring */ 1129 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; 1130 break; 1131 1132 case REO_STATUS: 1133 /* dp_reo_status_ring_handler - soc->reo_status_ring */ 1134 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0]; 1135 break; 1136 1137 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/ 1138 case RXDMA_MONITOR_STATUS: 1139 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */ 1140 case RXDMA_MONITOR_DST: 1141 /* dp_mon_process */ 1142 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0]; 1143 break; 1144 case RXDMA_DST: 1145 /* dp_rxdma_err_process */ 1146 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0]; 1147 break; 1148 1149 case RXDMA_BUF: 1150 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; 1151 break; 1152 1153 case RXDMA_MONITOR_BUF: 1154 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0]; 1155 break; 1156 1157 case TCL_DATA: 1158 /* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */ 1159 case TCL_CMD_CREDIT: 1160 case REO_CMD: 1161 case SW2WBM_RELEASE: 1162 case WBM_IDLE_LINK: 1163 /* normally empty SW_TO_HW rings */ 1164 return -QDF_STATUS_E_NOENT; 1165 break; 1166 1167 case TCL_STATUS: 1168 case REO_REINJECT: 1169 /* misc unused rings */ 1170 return -QDF_STATUS_E_NOENT; 1171 break; 1172 1173 case CE_SRC: 1174 case CE_DST: 1175 case CE_DST_STATUS: 1176 /* CE_rings - currently handled by hif */ 1177 default: 1178 return -QDF_STATUS_E_NOENT; 1179 break; 1180 } 1181 1182 return dp_srng_find_ring_in_mask(ring_num, grp_mask); 1183 } 1184 1185 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params 1186 *ring_params, int ring_type, int ring_num) 1187 { 1188 int msi_group_number; 1189 int msi_data_count; 1190 int ret; 1191 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high; 1192 1193 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", 1194 &msi_data_count, &msi_data_start, 1195 &msi_irq_start); 1196 1197 if (ret) 1198 return; 1199 1200 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type, 1201 ring_num); 1202 if (msi_group_number < 0) { 1203 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 1204 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"), 1205 ring_type, ring_num); 1206 ring_params->msi_addr = 0; 1207 ring_params->msi_data = 0; 1208 return; 1209 } 1210 1211 if (msi_group_number > msi_data_count) { 1212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, 1213 FL("2 msi_groups will share an msi; msi_group_num %d"), 1214 msi_group_number); 1215 1216 QDF_ASSERT(0); 1217 } 1218 1219 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high); 1220 1221 ring_params->msi_addr = addr_low; 1222 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); 1223 ring_params->msi_data = (msi_group_number % msi_data_count) 1224 + msi_data_start; 1225 ring_params->flags |= HAL_SRNG_MSI_INTR; 1226 } 1227 1228 #ifdef FEATURE_AST 1229 /** 1230 * dp_print_peer_ast_entries() - Dump AST entries of peer 1231 * @soc: Datapath soc handle 1232 * @peer: Datapath peer 1233 * @arg: argument to iterate function 1234 * 1235 * return void 1236 */ 1237 static void 1238 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 1239 { 1240 struct dp_ast_entry *ase, *tmp_ase; 1241 uint32_t num_entries = 0; 1242 char type[CDP_TXRX_AST_TYPE_MAX][10] = { 1243 "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS", 1244 "DA", "HMWDS_SEC"}; 1245 1246 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) { 1247 DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT 1248 " peer_mac_addr = "QDF_MAC_ADDR_FMT 1249 " peer_id = %u" 1250 " type = %s" 1251 " next_hop = %d" 1252 " is_active = %d" 1253 " ast_idx = %d" 1254 " ast_hash = %d" 1255 " delete_in_progress = %d" 1256 " pdev_id = %d" 1257 " vdev_id = %d", 1258 ++num_entries, 1259 QDF_MAC_ADDR_REF(ase->mac_addr.raw), 1260 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1261 ase->peer_id, 1262 type[ase->type], 1263 ase->next_hop, 1264 ase->is_active, 1265 ase->ast_idx, 1266 ase->ast_hash_value, 1267 ase->delete_in_progress, 1268 ase->pdev_id, 1269 ase->vdev_id); 1270 } 1271 } 1272 1273 /** 1274 * dp_print_ast_stats() - Dump AST table contents 1275 * @soc: Datapath soc handle 1276 * 1277 * return void 1278 */ 1279 void dp_print_ast_stats(struct dp_soc *soc) 1280 { 1281 DP_PRINT_STATS("AST Stats:"); 1282 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added); 1283 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted); 1284 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out); 1285 DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err); 1286 DP_PRINT_STATS(" Entries Mismatch ERR = %d", 1287 soc->stats.ast.ast_mismatch); 1288 1289 DP_PRINT_STATS("AST Table:"); 1290 1291 qdf_spin_lock_bh(&soc->ast_lock); 1292 1293 dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL, 1294 DP_MOD_ID_GENERIC_STATS); 1295 1296 qdf_spin_unlock_bh(&soc->ast_lock); 1297 } 1298 #else 1299 void dp_print_ast_stats(struct dp_soc *soc) 1300 { 1301 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST"); 1302 return; 1303 } 1304 #endif 1305 1306 /** 1307 * dp_print_peer_info() - Dump peer info 1308 * @soc: Datapath soc handle 1309 * @peer: Datapath peer handle 1310 * @arg: argument to iter function 1311 * 1312 * return void 1313 */ 1314 static void 1315 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg) 1316 { 1317 DP_PRINT_STATS(" peer_mac_addr = "QDF_MAC_ADDR_FMT 1318 " nawds_enabled = %d" 1319 " bss_peer = %d" 1320 " wds_enabled = %d" 1321 " tx_cap_enabled = %d" 1322 " rx_cap_enabled = %d" 1323 " peer id = %d", 1324 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1325 peer->nawds_enabled, 1326 peer->bss_peer, 1327 peer->wds_enabled, 1328 peer->tx_cap_enabled, 1329 peer->rx_cap_enabled, 1330 peer->peer_id); 1331 } 1332 1333 /** 1334 * dp_print_peer_table() - Dump all Peer stats 1335 * @vdev: Datapath Vdev handle 1336 * 1337 * return void 1338 */ 1339 static void dp_print_peer_table(struct dp_vdev *vdev) 1340 { 1341 DP_PRINT_STATS("Dumping Peer Table Stats:"); 1342 dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL, 1343 DP_MOD_ID_GENERIC_STATS); 1344 } 1345 1346 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG 1347 /** 1348 * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt 1349 * threshold values from the wlan_srng_cfg table for each ring type 1350 * @soc: device handle 1351 * @ring_params: per ring specific parameters 1352 * @ring_type: Ring type 1353 * @ring_num: Ring number for a given ring type 1354 * 1355 * Fill the ring params with the interrupt threshold 1356 * configuration parameters available in the per ring type wlan_srng_cfg 1357 * table. 1358 * 1359 * Return: None 1360 */ 1361 static void 1362 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc, 1363 struct hal_srng_params *ring_params, 1364 int ring_type, int ring_num, 1365 int num_entries) 1366 { 1367 if (ring_type == REO_DST) { 1368 ring_params->intr_timer_thres_us = 1369 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1370 ring_params->intr_batch_cntr_thres_entries = 1371 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx); 1372 } else if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) { 1373 ring_params->intr_timer_thres_us = 1374 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); 1375 ring_params->intr_batch_cntr_thres_entries = 1376 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); 1377 } else { 1378 ring_params->intr_timer_thres_us = 1379 soc->wlan_srng_cfg[ring_type].timer_threshold; 1380 ring_params->intr_batch_cntr_thres_entries = 1381 soc->wlan_srng_cfg[ring_type].batch_count_threshold; 1382 } 1383 ring_params->low_threshold = 1384 soc->wlan_srng_cfg[ring_type].low_threshold; 1385 if (ring_params->low_threshold) 1386 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 1387 } 1388 #else 1389 static void 1390 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc, 1391 struct hal_srng_params *ring_params, 1392 int ring_type, int ring_num, 1393 int num_entries) 1394 { 1395 if (ring_type == REO_DST) { 1396 ring_params->intr_timer_thres_us = 1397 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1398 ring_params->intr_batch_cntr_thres_entries = 1399 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx); 1400 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) { 1401 ring_params->intr_timer_thres_us = 1402 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx); 1403 ring_params->intr_batch_cntr_thres_entries = 1404 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx); 1405 } else { 1406 ring_params->intr_timer_thres_us = 1407 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); 1408 ring_params->intr_batch_cntr_thres_entries = 1409 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); 1410 } 1411 1412 /* Enable low threshold interrupts for rx buffer rings (regular and 1413 * monitor buffer rings. 1414 * TODO: See if this is required for any other ring 1415 */ 1416 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) || 1417 (ring_type == RXDMA_MONITOR_STATUS)) { 1418 /* TODO: Setting low threshold to 1/8th of ring size 1419 * see if this needs to be configurable 1420 */ 1421 ring_params->low_threshold = num_entries >> 3; 1422 ring_params->intr_timer_thres_us = 1423 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1424 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 1425 ring_params->intr_batch_cntr_thres_entries = 0; 1426 } 1427 1428 /* During initialisation monitor rings are only filled with 1429 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to 1430 * a value less than that. Low threshold value is reconfigured again 1431 * to 1/8th of the ring size when monitor vap is created. 1432 */ 1433 if (ring_type == RXDMA_MONITOR_BUF) 1434 ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1; 1435 1436 /* In case of PCI chipsets, we dont have PPDU end interrupts, 1437 * so MONITOR STATUS ring is reaped by receiving MSI from srng. 1438 * Keep batch threshold as 8 so that interrupt is received for 1439 * every 4 packets in MONITOR_STATUS ring 1440 */ 1441 if ((ring_type == RXDMA_MONITOR_STATUS) && 1442 (soc->intr_mode == DP_INTR_MSI)) 1443 ring_params->intr_batch_cntr_thres_entries = 4; 1444 } 1445 #endif 1446 1447 #ifdef DP_MEM_PRE_ALLOC 1448 1449 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 1450 size_t ctxt_size) 1451 { 1452 void *ctxt_mem; 1453 1454 if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) { 1455 dp_warn("dp_prealloc_get_context null!"); 1456 goto dynamic_alloc; 1457 } 1458 1459 ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type); 1460 1461 if (ctxt_mem) 1462 goto end; 1463 1464 dynamic_alloc: 1465 dp_info("Pre-alloc of ctxt failed. Dynamic allocation"); 1466 ctxt_mem = qdf_mem_malloc(ctxt_size); 1467 end: 1468 return ctxt_mem; 1469 } 1470 1471 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 1472 void *vaddr) 1473 { 1474 QDF_STATUS status; 1475 1476 if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) { 1477 status = soc->cdp_soc.ol_ops->dp_prealloc_put_context( 1478 DP_PDEV_TYPE, 1479 vaddr); 1480 } else { 1481 dp_warn("dp_prealloc_get_context null!"); 1482 status = QDF_STATUS_E_NOSUPPORT; 1483 } 1484 1485 if (QDF_IS_STATUS_ERROR(status)) { 1486 dp_info("Context not pre-allocated"); 1487 qdf_mem_free(vaddr); 1488 } 1489 } 1490 1491 static inline 1492 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, 1493 struct dp_srng *srng, 1494 uint32_t ring_type) 1495 { 1496 void *mem; 1497 1498 qdf_assert(!srng->is_mem_prealloc); 1499 1500 if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) { 1501 dp_warn("dp_prealloc_get_consistent is null!"); 1502 goto qdf; 1503 } 1504 1505 mem = 1506 soc->cdp_soc.ol_ops->dp_prealloc_get_consistent 1507 (&srng->alloc_size, 1508 &srng->base_vaddr_unaligned, 1509 &srng->base_paddr_unaligned, 1510 &srng->base_paddr_aligned, 1511 DP_RING_BASE_ALIGN, ring_type); 1512 1513 if (mem) { 1514 srng->is_mem_prealloc = true; 1515 goto end; 1516 } 1517 qdf: 1518 mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, 1519 &srng->base_vaddr_unaligned, 1520 &srng->base_paddr_unaligned, 1521 &srng->base_paddr_aligned, 1522 DP_RING_BASE_ALIGN); 1523 end: 1524 dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d", 1525 srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem, 1526 srng, ring_type, srng->alloc_size, srng->num_entries); 1527 return mem; 1528 } 1529 1530 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, 1531 struct dp_srng *srng) 1532 { 1533 if (srng->is_mem_prealloc) { 1534 if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) { 1535 dp_warn("dp_prealloc_put_consistent is null!"); 1536 QDF_BUG(0); 1537 return; 1538 } 1539 soc->cdp_soc.ol_ops->dp_prealloc_put_consistent 1540 (srng->alloc_size, 1541 srng->base_vaddr_unaligned, 1542 srng->base_paddr_unaligned); 1543 1544 } else { 1545 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 1546 srng->alloc_size, 1547 srng->base_vaddr_unaligned, 1548 srng->base_paddr_unaligned, 0); 1549 } 1550 } 1551 1552 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 1553 enum dp_desc_type desc_type, 1554 struct qdf_mem_multi_page_t *pages, 1555 size_t element_size, 1556 uint16_t element_num, 1557 qdf_dma_context_t memctxt, 1558 bool cacheable) 1559 { 1560 if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) { 1561 dp_warn("dp_get_multi_pages is null!"); 1562 goto qdf; 1563 } 1564 1565 pages->num_pages = 0; 1566 pages->is_mem_prealloc = 0; 1567 soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type, 1568 element_size, 1569 element_num, 1570 pages, 1571 cacheable); 1572 if (pages->num_pages) 1573 goto end; 1574 1575 qdf: 1576 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 1577 element_num, memctxt, cacheable); 1578 end: 1579 dp_info("%s desc_type %d element_size %d element_num %d cacheable %d", 1580 pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", 1581 desc_type, (int)element_size, element_num, cacheable); 1582 } 1583 1584 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 1585 enum dp_desc_type desc_type, 1586 struct qdf_mem_multi_page_t *pages, 1587 qdf_dma_context_t memctxt, 1588 bool cacheable) 1589 { 1590 if (pages->is_mem_prealloc) { 1591 if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) { 1592 dp_warn("dp_put_multi_pages is null!"); 1593 QDF_BUG(0); 1594 return; 1595 } 1596 1597 soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages); 1598 qdf_mem_zero(pages, sizeof(*pages)); 1599 } else { 1600 qdf_mem_multi_pages_free(soc->osdev, pages, 1601 memctxt, cacheable); 1602 } 1603 } 1604 1605 #else 1606 1607 static inline 1608 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, 1609 struct dp_srng *srng, 1610 uint32_t ring_type) 1611 1612 { 1613 return qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, 1614 &srng->base_vaddr_unaligned, 1615 &srng->base_paddr_unaligned, 1616 &srng->base_paddr_aligned, 1617 DP_RING_BASE_ALIGN); 1618 } 1619 1620 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, 1621 struct dp_srng *srng) 1622 { 1623 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 1624 srng->alloc_size, 1625 srng->base_vaddr_unaligned, 1626 srng->base_paddr_unaligned, 0); 1627 } 1628 1629 #endif /* DP_MEM_PRE_ALLOC */ 1630 1631 /* 1632 * dp_srng_free() - Free SRNG memory 1633 * @soc : Data path soc handle 1634 * @srng : SRNG pointer 1635 * 1636 * return: None 1637 */ 1638 static void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng) 1639 { 1640 if (srng->alloc_size && srng->base_vaddr_unaligned) { 1641 if (!srng->cached) { 1642 dp_srng_mem_free_consistent(soc, srng); 1643 } else { 1644 qdf_mem_free(srng->base_vaddr_unaligned); 1645 } 1646 srng->alloc_size = 0; 1647 srng->base_vaddr_unaligned = NULL; 1648 } 1649 srng->hal_srng = NULL; 1650 } 1651 1652 /* 1653 * dp_srng_init() - Initialize SRNG 1654 * @soc : Data path soc handle 1655 * @srng : SRNG pointer 1656 * @ring_type : Ring Type 1657 * @ring_num: Ring number 1658 * @mac_id: mac_id 1659 * 1660 * return: QDF_STATUS 1661 */ 1662 static QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, 1663 int ring_type, int ring_num, int mac_id) 1664 { 1665 hal_soc_handle_t hal_soc = soc->hal_soc; 1666 struct hal_srng_params ring_params; 1667 1668 if (srng->hal_srng) { 1669 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1670 FL("Ring type: %d, num:%d is already initialized"), 1671 ring_type, ring_num); 1672 return QDF_STATUS_SUCCESS; 1673 } 1674 1675 /* memset the srng ring to zero */ 1676 qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size); 1677 1678 qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params)); 1679 ring_params.ring_base_paddr = srng->base_paddr_aligned; 1680 ring_params.ring_base_vaddr = srng->base_vaddr_aligned; 1681 1682 ring_params.num_entries = srng->num_entries; 1683 1684 dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u", 1685 ring_type, ring_num, 1686 (void *)ring_params.ring_base_vaddr, 1687 (void *)ring_params.ring_base_paddr, 1688 ring_params.num_entries); 1689 1690 if (soc->intr_mode == DP_INTR_MSI) { 1691 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num); 1692 dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d", 1693 ring_type, ring_num); 1694 1695 } else { 1696 ring_params.msi_data = 0; 1697 ring_params.msi_addr = 0; 1698 dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d", 1699 ring_type, ring_num); 1700 } 1701 1702 dp_srng_configure_interrupt_thresholds(soc, &ring_params, 1703 ring_type, ring_num, 1704 srng->num_entries); 1705 1706 if (srng->cached) 1707 ring_params.flags |= HAL_SRNG_CACHED_DESC; 1708 1709 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num, 1710 mac_id, &ring_params); 1711 1712 if (!srng->hal_srng) { 1713 dp_srng_free(soc, srng); 1714 return QDF_STATUS_E_FAILURE; 1715 } 1716 1717 return QDF_STATUS_SUCCESS; 1718 } 1719 1720 /* 1721 * dp_srng_alloc() - Allocate memory for SRNG 1722 * @soc : Data path soc handle 1723 * @srng : SRNG pointer 1724 * @ring_type : Ring Type 1725 * @num_entries: Number of entries 1726 * @cached: cached flag variable 1727 * 1728 * return: QDF_STATUS 1729 */ 1730 static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng, 1731 int ring_type, uint32_t num_entries, 1732 bool cached) 1733 { 1734 hal_soc_handle_t hal_soc = soc->hal_soc; 1735 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type); 1736 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type); 1737 1738 if (srng->base_vaddr_unaligned) { 1739 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1740 FL("Ring type: %d, is already allocated"), ring_type); 1741 return QDF_STATUS_SUCCESS; 1742 } 1743 1744 num_entries = (num_entries > max_entries) ? max_entries : num_entries; 1745 srng->hal_srng = NULL; 1746 srng->alloc_size = num_entries * entry_size; 1747 srng->num_entries = num_entries; 1748 srng->cached = cached; 1749 1750 if (!cached) { 1751 srng->base_vaddr_aligned = 1752 dp_srng_aligned_mem_alloc_consistent(soc, 1753 srng, 1754 ring_type); 1755 } else { 1756 srng->base_vaddr_aligned = qdf_aligned_malloc( 1757 &srng->alloc_size, 1758 &srng->base_vaddr_unaligned, 1759 &srng->base_paddr_unaligned, 1760 &srng->base_paddr_aligned, 1761 DP_RING_BASE_ALIGN); 1762 } 1763 1764 if (!srng->base_vaddr_aligned) 1765 return QDF_STATUS_E_NOMEM; 1766 1767 return QDF_STATUS_SUCCESS; 1768 } 1769 1770 /* 1771 * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path 1772 * @soc: DP SOC handle 1773 * @srng: source ring structure 1774 * @ring_type: type of ring 1775 * @ring_num: ring number 1776 * 1777 * Return: None 1778 */ 1779 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, 1780 int ring_type, int ring_num) 1781 { 1782 if (!srng->hal_srng) { 1783 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1784 FL("Ring type: %d, num:%d not setup"), 1785 ring_type, ring_num); 1786 return; 1787 } 1788 1789 hal_srng_cleanup(soc->hal_soc, srng->hal_srng); 1790 srng->hal_srng = NULL; 1791 } 1792 1793 /* TODO: Need this interface from HIF */ 1794 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle); 1795 1796 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 1797 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 1798 hal_ring_handle_t hal_ring_hdl) 1799 { 1800 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 1801 uint32_t hp, tp; 1802 uint8_t ring_id; 1803 1804 if (!int_ctx) 1805 return hal_srng_access_start(hal_soc, hal_ring_hdl); 1806 1807 hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); 1808 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 1809 1810 hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, 1811 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START); 1812 1813 return hal_srng_access_start(hal_soc, hal_ring_hdl); 1814 } 1815 1816 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 1817 hal_ring_handle_t hal_ring_hdl) 1818 { 1819 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 1820 uint32_t hp, tp; 1821 uint8_t ring_id; 1822 1823 if (!int_ctx) 1824 return hal_srng_access_end(hal_soc, hal_ring_hdl); 1825 1826 hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); 1827 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 1828 1829 hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, 1830 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END); 1831 1832 return hal_srng_access_end(hal_soc, hal_ring_hdl); 1833 } 1834 1835 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc, 1836 uint8_t hist_group_id) 1837 { 1838 hif_record_event(dp_soc->hif_handle, hist_group_id, 1839 0, 0, 0, HIF_EVENT_TIMER_ENTRY); 1840 } 1841 1842 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc, 1843 uint8_t hist_group_id) 1844 { 1845 hif_record_event(dp_soc->hif_handle, hist_group_id, 1846 0, 0, 0, HIF_EVENT_TIMER_EXIT); 1847 } 1848 #else 1849 1850 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc, 1851 uint8_t hist_group_id) 1852 { 1853 } 1854 1855 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc, 1856 uint8_t hist_group_id) 1857 { 1858 } 1859 1860 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 1861 1862 /* 1863 * dp_should_timer_irq_yield() - Decide if the bottom half should yield 1864 * @soc: DP soc handle 1865 * @work_done: work done in softirq context 1866 * @start_time: start time for the softirq 1867 * 1868 * Return: enum with yield code 1869 */ 1870 static enum timer_yield_status 1871 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done, 1872 uint64_t start_time) 1873 { 1874 uint64_t cur_time = qdf_get_log_timestamp(); 1875 1876 if (!work_done) 1877 return DP_TIMER_WORK_DONE; 1878 1879 if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS) 1880 return DP_TIMER_TIME_EXHAUST; 1881 1882 return DP_TIMER_NO_YIELD; 1883 } 1884 1885 /** 1886 * dp_process_lmac_rings() - Process LMAC rings 1887 * @int_ctx: interrupt context 1888 * @total_budget: budget of work which can be done 1889 * 1890 * Return: work done 1891 */ 1892 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget) 1893 { 1894 struct dp_intr_stats *intr_stats = &int_ctx->intr_stats; 1895 struct dp_soc *soc = int_ctx->soc; 1896 uint32_t remaining_quota = total_budget; 1897 struct dp_pdev *pdev = NULL; 1898 uint32_t work_done = 0; 1899 int budget = total_budget; 1900 int ring = 0; 1901 1902 /* Process LMAC interrupts */ 1903 for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { 1904 int mac_for_pdev = ring; 1905 1906 pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev); 1907 if (!pdev) 1908 continue; 1909 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) { 1910 work_done = dp_mon_process(soc, int_ctx, mac_for_pdev, 1911 remaining_quota); 1912 if (work_done) 1913 intr_stats->num_rx_mon_ring_masks++; 1914 budget -= work_done; 1915 if (budget <= 0) 1916 goto budget_done; 1917 remaining_quota = budget; 1918 } 1919 1920 if (int_ctx->rxdma2host_ring_mask & 1921 (1 << mac_for_pdev)) { 1922 work_done = dp_rxdma_err_process(int_ctx, soc, 1923 mac_for_pdev, 1924 remaining_quota); 1925 if (work_done) 1926 intr_stats->num_rxdma2host_ring_masks++; 1927 budget -= work_done; 1928 if (budget <= 0) 1929 goto budget_done; 1930 remaining_quota = budget; 1931 } 1932 1933 if (int_ctx->host2rxdma_ring_mask & 1934 (1 << mac_for_pdev)) { 1935 union dp_rx_desc_list_elem_t *desc_list = NULL; 1936 union dp_rx_desc_list_elem_t *tail = NULL; 1937 struct dp_srng *rx_refill_buf_ring; 1938 1939 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 1940 rx_refill_buf_ring = 1941 &soc->rx_refill_buf_ring[mac_for_pdev]; 1942 else 1943 rx_refill_buf_ring = 1944 &soc->rx_refill_buf_ring[pdev->lmac_id]; 1945 1946 intr_stats->num_host2rxdma_ring_masks++; 1947 DP_STATS_INC(pdev, replenish.low_thresh_intrs, 1948 1); 1949 dp_rx_buffers_replenish(soc, mac_for_pdev, 1950 rx_refill_buf_ring, 1951 &soc->rx_desc_buf[mac_for_pdev], 1952 0, &desc_list, &tail); 1953 } 1954 } 1955 1956 budget_done: 1957 return total_budget - budget; 1958 } 1959 1960 /* 1961 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts 1962 * @dp_ctx: DP SOC handle 1963 * @budget: Number of frames/descriptors that can be processed in one shot 1964 * 1965 * Return: remaining budget/quota for the soc device 1966 */ 1967 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget) 1968 { 1969 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx; 1970 struct dp_intr_stats *intr_stats = &int_ctx->intr_stats; 1971 struct dp_soc *soc = int_ctx->soc; 1972 int ring = 0; 1973 uint32_t work_done = 0; 1974 int budget = dp_budget; 1975 uint8_t tx_mask = int_ctx->tx_ring_mask; 1976 uint8_t rx_mask = int_ctx->rx_ring_mask; 1977 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask; 1978 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask; 1979 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask; 1980 uint32_t remaining_quota = dp_budget; 1981 1982 dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n", 1983 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask, 1984 reo_status_mask, 1985 int_ctx->rx_mon_ring_mask, 1986 int_ctx->host2rxdma_ring_mask, 1987 int_ctx->rxdma2host_ring_mask); 1988 1989 /* Process Tx completion interrupts first to return back buffers */ 1990 while (tx_mask) { 1991 if (tx_mask & 0x1) { 1992 work_done = dp_tx_comp_handler(int_ctx, 1993 soc, 1994 soc->tx_comp_ring[ring].hal_srng, 1995 ring, remaining_quota); 1996 1997 if (work_done) { 1998 intr_stats->num_tx_ring_masks[ring]++; 1999 dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d", 2000 tx_mask, ring, budget, 2001 work_done); 2002 } 2003 2004 budget -= work_done; 2005 if (budget <= 0) 2006 goto budget_done; 2007 2008 remaining_quota = budget; 2009 } 2010 tx_mask = tx_mask >> 1; 2011 ring++; 2012 } 2013 2014 /* Process REO Exception ring interrupt */ 2015 if (rx_err_mask) { 2016 work_done = dp_rx_err_process(int_ctx, soc, 2017 soc->reo_exception_ring.hal_srng, 2018 remaining_quota); 2019 2020 if (work_done) { 2021 intr_stats->num_rx_err_ring_masks++; 2022 dp_verbose_debug("REO Exception Ring: work_done %d budget %d", 2023 work_done, budget); 2024 } 2025 2026 budget -= work_done; 2027 if (budget <= 0) { 2028 goto budget_done; 2029 } 2030 remaining_quota = budget; 2031 } 2032 2033 /* Process Rx WBM release ring interrupt */ 2034 if (rx_wbm_rel_mask) { 2035 work_done = dp_rx_wbm_err_process(int_ctx, soc, 2036 soc->rx_rel_ring.hal_srng, 2037 remaining_quota); 2038 2039 if (work_done) { 2040 intr_stats->num_rx_wbm_rel_ring_masks++; 2041 dp_verbose_debug("WBM Release Ring: work_done %d budget %d", 2042 work_done, budget); 2043 } 2044 2045 budget -= work_done; 2046 if (budget <= 0) { 2047 goto budget_done; 2048 } 2049 remaining_quota = budget; 2050 } 2051 2052 /* Process Rx interrupts */ 2053 if (rx_mask) { 2054 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { 2055 if (!(rx_mask & (1 << ring))) 2056 continue; 2057 work_done = dp_rx_process(int_ctx, 2058 soc->reo_dest_ring[ring].hal_srng, 2059 ring, 2060 remaining_quota); 2061 if (work_done) { 2062 intr_stats->num_rx_ring_masks[ring]++; 2063 dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d", 2064 rx_mask, ring, 2065 work_done, budget); 2066 budget -= work_done; 2067 if (budget <= 0) 2068 goto budget_done; 2069 remaining_quota = budget; 2070 } 2071 } 2072 } 2073 2074 if (reo_status_mask) { 2075 if (dp_reo_status_ring_handler(int_ctx, soc)) 2076 int_ctx->intr_stats.num_reo_status_ring_masks++; 2077 } 2078 2079 if (qdf_unlikely(!(soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING))) { 2080 work_done = dp_process_lmac_rings(int_ctx, remaining_quota); 2081 if (work_done) { 2082 budget -= work_done; 2083 if (budget <= 0) 2084 goto budget_done; 2085 remaining_quota = budget; 2086 } 2087 } 2088 2089 qdf_lro_flush(int_ctx->lro_ctx); 2090 intr_stats->num_masks++; 2091 2092 budget_done: 2093 return dp_budget - budget; 2094 } 2095 2096 /* dp_mon_vdev_timer()- timer poll for interrupts 2097 * 2098 * @arg: SoC Handle 2099 * 2100 * Return: 2101 * 2102 */ 2103 static void dp_mon_vdev_timer(void *arg) 2104 { 2105 struct dp_soc *soc = (struct dp_soc *)arg; 2106 struct dp_pdev *pdev = soc->pdev_list[0]; 2107 enum timer_yield_status yield = DP_TIMER_NO_YIELD; 2108 uint32_t work_done = 0, total_work_done = 0; 2109 int budget = 0xffff; 2110 uint32_t remaining_quota = budget; 2111 uint64_t start_time; 2112 uint32_t lmac_id = DP_MON_INVALID_LMAC_ID; 2113 uint32_t lmac_iter; 2114 int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 2115 2116 if (!qdf_atomic_read(&soc->cmn_init_done)) 2117 return; 2118 2119 if (pdev->mon_chan_band != REG_BAND_UNKNOWN) 2120 lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band]; 2121 2122 start_time = qdf_get_log_timestamp(); 2123 dp_is_hw_dbs_enable(soc, &max_mac_rings); 2124 2125 while (yield == DP_TIMER_NO_YIELD) { 2126 for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { 2127 if (lmac_iter == lmac_id) 2128 work_done = dp_mon_process( 2129 soc, NULL, 2130 lmac_iter, remaining_quota); 2131 else 2132 work_done = 2133 dp_mon_drop_packets_for_mac(pdev, 2134 lmac_iter, 2135 remaining_quota); 2136 if (work_done) { 2137 budget -= work_done; 2138 if (budget <= 0) { 2139 yield = DP_TIMER_WORK_EXHAUST; 2140 goto budget_done; 2141 } 2142 remaining_quota = budget; 2143 total_work_done += work_done; 2144 } 2145 } 2146 2147 yield = dp_should_timer_irq_yield(soc, total_work_done, 2148 start_time); 2149 total_work_done = 0; 2150 } 2151 2152 budget_done: 2153 if (yield == DP_TIMER_WORK_EXHAUST || 2154 yield == DP_TIMER_TIME_EXHAUST) 2155 qdf_timer_mod(&soc->mon_vdev_timer, 1); 2156 else 2157 qdf_timer_mod(&soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS); 2158 } 2159 2160 /* dp_interrupt_timer()- timer poll for interrupts 2161 * 2162 * @arg: SoC Handle 2163 * 2164 * Return: 2165 * 2166 */ 2167 static void dp_interrupt_timer(void *arg) 2168 { 2169 struct dp_soc *soc = (struct dp_soc *) arg; 2170 struct dp_pdev *pdev = soc->pdev_list[0]; 2171 enum timer_yield_status yield = DP_TIMER_NO_YIELD; 2172 uint32_t work_done = 0, total_work_done = 0; 2173 int budget = 0xffff, i; 2174 uint32_t remaining_quota = budget; 2175 uint64_t start_time; 2176 uint32_t lmac_id = DP_MON_INVALID_LMAC_ID; 2177 uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 2178 uint32_t lmac_iter; 2179 int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 2180 2181 /* 2182 * this logic makes all data path interfacing rings (UMAC/LMAC) 2183 * and Monitor rings polling mode when NSS offload is disabled 2184 */ 2185 if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) && 2186 !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 2187 if (qdf_atomic_read(&soc->cmn_init_done)) { 2188 for (i = 0; i < wlan_cfg_get_num_contexts( 2189 soc->wlan_cfg_ctx); i++) 2190 dp_service_srngs(&soc->intr_ctx[i], 0xffff); 2191 2192 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 2193 } 2194 return; 2195 } 2196 2197 if (!qdf_atomic_read(&soc->cmn_init_done)) 2198 return; 2199 2200 if (pdev->mon_chan_band != REG_BAND_UNKNOWN) { 2201 lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band]; 2202 if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) { 2203 dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; 2204 dp_srng_record_timer_entry(soc, dp_intr_id); 2205 } 2206 } 2207 2208 start_time = qdf_get_log_timestamp(); 2209 dp_is_hw_dbs_enable(soc, &max_mac_rings); 2210 2211 while (yield == DP_TIMER_NO_YIELD) { 2212 for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { 2213 if (lmac_iter == lmac_id) 2214 work_done = dp_mon_process(soc, 2215 &soc->intr_ctx[dp_intr_id], 2216 lmac_iter, remaining_quota); 2217 else 2218 work_done = dp_mon_drop_packets_for_mac(pdev, 2219 lmac_iter, 2220 remaining_quota); 2221 if (work_done) { 2222 budget -= work_done; 2223 if (budget <= 0) { 2224 yield = DP_TIMER_WORK_EXHAUST; 2225 goto budget_done; 2226 } 2227 remaining_quota = budget; 2228 total_work_done += work_done; 2229 } 2230 } 2231 2232 yield = dp_should_timer_irq_yield(soc, total_work_done, 2233 start_time); 2234 total_work_done = 0; 2235 } 2236 2237 budget_done: 2238 if (yield == DP_TIMER_WORK_EXHAUST || 2239 yield == DP_TIMER_TIME_EXHAUST) 2240 qdf_timer_mod(&soc->int_timer, 1); 2241 else 2242 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 2243 2244 if (lmac_id != DP_MON_INVALID_LMAC_ID) 2245 dp_srng_record_timer_exit(soc, dp_intr_id); 2246 } 2247 2248 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 2249 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc, 2250 struct dp_intr *intr_ctx) 2251 { 2252 if (intr_ctx->rx_mon_ring_mask) 2253 return true; 2254 2255 return false; 2256 } 2257 #else 2258 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc, 2259 struct dp_intr *intr_ctx) 2260 { 2261 return false; 2262 } 2263 #endif 2264 2265 /* 2266 * dp_soc_attach_poll() - Register handlers for DP interrupts 2267 * @txrx_soc: DP SOC handle 2268 * 2269 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI 2270 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and 2271 * rx_monitor_ring mask to indicate the rings that are processed by the handler. 2272 * 2273 * Return: 0 for success, nonzero for failure. 2274 */ 2275 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc) 2276 { 2277 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2278 int i; 2279 int lmac_id = 0; 2280 2281 qdf_mem_set(&soc->mon_intr_id_lmac_map, 2282 sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID); 2283 soc->intr_mode = DP_INTR_POLL; 2284 2285 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { 2286 soc->intr_ctx[i].dp_intr_id = i; 2287 soc->intr_ctx[i].tx_ring_mask = 2288 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i); 2289 soc->intr_ctx[i].rx_ring_mask = 2290 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i); 2291 soc->intr_ctx[i].rx_mon_ring_mask = 2292 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i); 2293 soc->intr_ctx[i].rx_err_ring_mask = 2294 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i); 2295 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 2296 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i); 2297 soc->intr_ctx[i].reo_status_ring_mask = 2298 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i); 2299 soc->intr_ctx[i].rxdma2host_ring_mask = 2300 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i); 2301 soc->intr_ctx[i].soc = soc; 2302 soc->intr_ctx[i].lro_ctx = qdf_lro_init(); 2303 2304 if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) { 2305 hif_event_history_init(soc->hif_handle, i); 2306 soc->mon_intr_id_lmac_map[lmac_id] = i; 2307 lmac_id++; 2308 } 2309 } 2310 2311 qdf_timer_init(soc->osdev, &soc->int_timer, 2312 dp_interrupt_timer, (void *)soc, 2313 QDF_TIMER_TYPE_WAKE_APPS); 2314 2315 return QDF_STATUS_SUCCESS; 2316 } 2317 2318 /** 2319 * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc 2320 * soc: DP soc handle 2321 * 2322 * Set the appropriate interrupt mode flag in the soc 2323 */ 2324 static void dp_soc_set_interrupt_mode(struct dp_soc *soc) 2325 { 2326 uint32_t msi_base_data, msi_vector_start; 2327 int msi_vector_count, ret; 2328 2329 soc->intr_mode = DP_INTR_INTEGRATED; 2330 2331 if (!(soc->wlan_cfg_ctx->napi_enabled) || 2332 (soc->cdp_soc.ol_ops->get_con_mode && 2333 soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) { 2334 soc->intr_mode = DP_INTR_POLL; 2335 } else { 2336 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", 2337 &msi_vector_count, 2338 &msi_base_data, 2339 &msi_vector_start); 2340 if (ret) 2341 return; 2342 2343 soc->intr_mode = DP_INTR_MSI; 2344 } 2345 } 2346 2347 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc); 2348 #if defined(DP_INTR_POLL_BOTH) 2349 /* 2350 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts 2351 * @txrx_soc: DP SOC handle 2352 * 2353 * Call the appropriate attach function based on the mode of operation. 2354 * This is a WAR for enabling monitor mode. 2355 * 2356 * Return: 0 for success. nonzero for failure. 2357 */ 2358 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 2359 { 2360 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2361 2362 if (!(soc->wlan_cfg_ctx->napi_enabled) || 2363 (soc->cdp_soc.ol_ops->get_con_mode && 2364 soc->cdp_soc.ol_ops->get_con_mode() == 2365 QDF_GLOBAL_MONITOR_MODE)) { 2366 dp_info("Poll mode"); 2367 return dp_soc_attach_poll(txrx_soc); 2368 } else { 2369 dp_info("Interrupt mode"); 2370 return dp_soc_interrupt_attach(txrx_soc); 2371 } 2372 } 2373 #else 2374 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED 2375 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 2376 { 2377 return dp_soc_attach_poll(txrx_soc); 2378 } 2379 #else 2380 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 2381 { 2382 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2383 2384 if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx)) 2385 return dp_soc_attach_poll(txrx_soc); 2386 else 2387 return dp_soc_interrupt_attach(txrx_soc); 2388 } 2389 #endif 2390 #endif 2391 2392 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc, 2393 int intr_ctx_num, int *irq_id_map, int *num_irq_r) 2394 { 2395 int j; 2396 int num_irq = 0; 2397 2398 int tx_mask = 2399 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 2400 int rx_mask = 2401 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 2402 int rx_mon_mask = 2403 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 2404 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( 2405 soc->wlan_cfg_ctx, intr_ctx_num); 2406 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( 2407 soc->wlan_cfg_ctx, intr_ctx_num); 2408 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( 2409 soc->wlan_cfg_ctx, intr_ctx_num); 2410 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( 2411 soc->wlan_cfg_ctx, intr_ctx_num); 2412 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( 2413 soc->wlan_cfg_ctx, intr_ctx_num); 2414 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask( 2415 soc->wlan_cfg_ctx, intr_ctx_num); 2416 2417 soc->intr_mode = DP_INTR_INTEGRATED; 2418 2419 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) { 2420 2421 if (tx_mask & (1 << j)) { 2422 irq_id_map[num_irq++] = 2423 (wbm2host_tx_completions_ring1 - j); 2424 } 2425 2426 if (rx_mask & (1 << j)) { 2427 irq_id_map[num_irq++] = 2428 (reo2host_destination_ring1 - j); 2429 } 2430 2431 if (rxdma2host_ring_mask & (1 << j)) { 2432 irq_id_map[num_irq++] = 2433 rxdma2host_destination_ring_mac1 - j; 2434 } 2435 2436 if (host2rxdma_ring_mask & (1 << j)) { 2437 irq_id_map[num_irq++] = 2438 host2rxdma_host_buf_ring_mac1 - j; 2439 } 2440 2441 if (host2rxdma_mon_ring_mask & (1 << j)) { 2442 irq_id_map[num_irq++] = 2443 host2rxdma_monitor_ring1 - j; 2444 } 2445 2446 if (rx_mon_mask & (1 << j)) { 2447 irq_id_map[num_irq++] = 2448 ppdu_end_interrupts_mac1 - j; 2449 irq_id_map[num_irq++] = 2450 rxdma2host_monitor_status_ring_mac1 - j; 2451 irq_id_map[num_irq++] = 2452 rxdma2host_monitor_destination_mac1 - j; 2453 } 2454 2455 if (rx_wbm_rel_ring_mask & (1 << j)) 2456 irq_id_map[num_irq++] = wbm2host_rx_release; 2457 2458 if (rx_err_ring_mask & (1 << j)) 2459 irq_id_map[num_irq++] = reo2host_exception; 2460 2461 if (reo_status_ring_mask & (1 << j)) 2462 irq_id_map[num_irq++] = reo2host_status; 2463 2464 } 2465 *num_irq_r = num_irq; 2466 } 2467 2468 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc, 2469 int intr_ctx_num, int *irq_id_map, int *num_irq_r, 2470 int msi_vector_count, int msi_vector_start) 2471 { 2472 int tx_mask = wlan_cfg_get_tx_ring_mask( 2473 soc->wlan_cfg_ctx, intr_ctx_num); 2474 int rx_mask = wlan_cfg_get_rx_ring_mask( 2475 soc->wlan_cfg_ctx, intr_ctx_num); 2476 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask( 2477 soc->wlan_cfg_ctx, intr_ctx_num); 2478 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( 2479 soc->wlan_cfg_ctx, intr_ctx_num); 2480 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( 2481 soc->wlan_cfg_ctx, intr_ctx_num); 2482 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( 2483 soc->wlan_cfg_ctx, intr_ctx_num); 2484 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( 2485 soc->wlan_cfg_ctx, intr_ctx_num); 2486 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( 2487 soc->wlan_cfg_ctx, intr_ctx_num); 2488 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask( 2489 soc->wlan_cfg_ctx, intr_ctx_num); 2490 2491 unsigned int vector = 2492 (intr_ctx_num % msi_vector_count) + msi_vector_start; 2493 int num_irq = 0; 2494 2495 soc->intr_mode = DP_INTR_MSI; 2496 2497 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask | 2498 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask | 2499 host2rxdma_ring_mask | host2rxdma_mon_ring_mask) 2500 irq_id_map[num_irq++] = 2501 pld_get_msi_irq(soc->osdev->dev, vector); 2502 2503 *num_irq_r = num_irq; 2504 } 2505 2506 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num, 2507 int *irq_id_map, int *num_irq) 2508 { 2509 int msi_vector_count, ret; 2510 uint32_t msi_base_data, msi_vector_start; 2511 2512 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", 2513 &msi_vector_count, 2514 &msi_base_data, 2515 &msi_vector_start); 2516 if (ret) 2517 return dp_soc_interrupt_map_calculate_integrated(soc, 2518 intr_ctx_num, irq_id_map, num_irq); 2519 2520 else 2521 dp_soc_interrupt_map_calculate_msi(soc, 2522 intr_ctx_num, irq_id_map, num_irq, 2523 msi_vector_count, msi_vector_start); 2524 } 2525 2526 /* 2527 * dp_soc_interrupt_attach() - Register handlers for DP interrupts 2528 * @txrx_soc: DP SOC handle 2529 * 2530 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI 2531 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and 2532 * rx_monitor_ring mask to indicate the rings that are processed by the handler. 2533 * 2534 * Return: 0 for success. nonzero for failure. 2535 */ 2536 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc) 2537 { 2538 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2539 2540 int i = 0; 2541 int num_irq = 0; 2542 2543 qdf_mem_set(&soc->mon_intr_id_lmac_map, 2544 sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID); 2545 2546 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { 2547 int ret = 0; 2548 2549 /* Map of IRQ ids registered with one interrupt context */ 2550 int irq_id_map[HIF_MAX_GRP_IRQ]; 2551 2552 int tx_mask = 2553 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i); 2554 int rx_mask = 2555 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i); 2556 int rx_mon_mask = 2557 dp_soc_get_mon_mask_for_interrupt_mode(soc, i); 2558 int rx_err_ring_mask = 2559 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i); 2560 int rx_wbm_rel_ring_mask = 2561 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i); 2562 int reo_status_ring_mask = 2563 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i); 2564 int rxdma2host_ring_mask = 2565 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i); 2566 int host2rxdma_ring_mask = 2567 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i); 2568 int host2rxdma_mon_ring_mask = 2569 wlan_cfg_get_host2rxdma_mon_ring_mask( 2570 soc->wlan_cfg_ctx, i); 2571 2572 soc->intr_ctx[i].dp_intr_id = i; 2573 soc->intr_ctx[i].tx_ring_mask = tx_mask; 2574 soc->intr_ctx[i].rx_ring_mask = rx_mask; 2575 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask; 2576 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask; 2577 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask; 2578 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask; 2579 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask; 2580 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask; 2581 soc->intr_ctx[i].host2rxdma_mon_ring_mask = 2582 host2rxdma_mon_ring_mask; 2583 2584 soc->intr_ctx[i].soc = soc; 2585 2586 num_irq = 0; 2587 2588 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0], 2589 &num_irq); 2590 2591 ret = hif_register_ext_group(soc->hif_handle, 2592 num_irq, irq_id_map, dp_service_srngs, 2593 &soc->intr_ctx[i], "dp_intr", 2594 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT); 2595 2596 if (ret) { 2597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2598 FL("failed, ret = %d"), ret); 2599 2600 return QDF_STATUS_E_FAILURE; 2601 } 2602 2603 hif_event_history_init(soc->hif_handle, i); 2604 soc->intr_ctx[i].lro_ctx = qdf_lro_init(); 2605 } 2606 2607 hif_configure_ext_group_interrupts(soc->hif_handle); 2608 2609 return QDF_STATUS_SUCCESS; 2610 } 2611 2612 /* 2613 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 2614 * @txrx_soc: DP SOC handle 2615 * 2616 * Return: none 2617 */ 2618 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc) 2619 { 2620 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2621 int i; 2622 2623 if (soc->intr_mode == DP_INTR_POLL) { 2624 qdf_timer_free(&soc->int_timer); 2625 } else { 2626 hif_deregister_exec_group(soc->hif_handle, "dp_intr"); 2627 } 2628 2629 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { 2630 soc->intr_ctx[i].tx_ring_mask = 0; 2631 soc->intr_ctx[i].rx_ring_mask = 0; 2632 soc->intr_ctx[i].rx_mon_ring_mask = 0; 2633 soc->intr_ctx[i].rx_err_ring_mask = 0; 2634 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0; 2635 soc->intr_ctx[i].reo_status_ring_mask = 0; 2636 soc->intr_ctx[i].rxdma2host_ring_mask = 0; 2637 soc->intr_ctx[i].host2rxdma_ring_mask = 0; 2638 soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0; 2639 2640 hif_event_history_deinit(soc->hif_handle, i); 2641 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx); 2642 } 2643 2644 qdf_mem_set(&soc->mon_intr_id_lmac_map, 2645 sizeof(soc->mon_intr_id_lmac_map), 2646 DP_MON_INVALID_LMAC_ID); 2647 } 2648 2649 #define AVG_MAX_MPDUS_PER_TID 128 2650 #define AVG_TIDS_PER_CLIENT 2 2651 #define AVG_FLOWS_PER_TID 2 2652 #define AVG_MSDUS_PER_FLOW 128 2653 #define AVG_MSDUS_PER_MPDU 4 2654 2655 /* 2656 * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks 2657 * @soc: DP SOC handle 2658 * @mac_id: mac id 2659 * 2660 * Return: none 2661 */ 2662 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id) 2663 { 2664 struct qdf_mem_multi_page_t *pages; 2665 2666 if (mac_id != WLAN_INVALID_PDEV_ID) 2667 pages = &soc->mon_link_desc_pages[mac_id]; 2668 else 2669 pages = &soc->link_desc_pages; 2670 2671 if (pages->dma_pages) { 2672 wlan_minidump_remove((void *) 2673 pages->dma_pages->page_v_addr_start); 2674 dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE, 2675 pages, 0, false); 2676 } 2677 } 2678 2679 /* 2680 * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks 2681 * @soc: DP SOC handle 2682 * @mac_id: mac id 2683 * 2684 * Allocates memory pages for link descriptors, the page size is 4K for 2685 * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are 2686 * allocated for regular RX/TX and if the there is a proper mac_id link 2687 * descriptors are allocated for RX monitor mode. 2688 * 2689 * Return: QDF_STATUS_SUCCESS: Success 2690 * QDF_STATUS_E_FAILURE: Failure 2691 */ 2692 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id) 2693 { 2694 hal_soc_handle_t hal_soc = soc->hal_soc; 2695 int link_desc_size = hal_get_link_desc_size(soc->hal_soc); 2696 int link_desc_align = hal_get_link_desc_align(soc->hal_soc); 2697 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx); 2698 uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc); 2699 uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc); 2700 uint32_t num_mpdu_links_per_queue_desc = 2701 hal_num_mpdu_links_per_queue_desc(hal_soc); 2702 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); 2703 uint32_t *total_link_descs, total_mem_size; 2704 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs; 2705 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs; 2706 uint32_t num_entries; 2707 struct qdf_mem_multi_page_t *pages; 2708 struct dp_srng *dp_srng; 2709 uint8_t minidump_str[MINIDUMP_STR_SIZE]; 2710 2711 /* Only Tx queue descriptors are allocated from common link descriptor 2712 * pool Rx queue descriptors are not included in this because (REO queue 2713 * extension descriptors) they are expected to be allocated contiguously 2714 * with REO queue descriptors 2715 */ 2716 if (mac_id != WLAN_INVALID_PDEV_ID) { 2717 pages = &soc->mon_link_desc_pages[mac_id]; 2718 dp_srng = &soc->rxdma_mon_desc_ring[mac_id]; 2719 num_entries = dp_srng->alloc_size / 2720 hal_srng_get_entrysize(soc->hal_soc, 2721 RXDMA_MONITOR_DESC); 2722 total_link_descs = &soc->total_mon_link_descs[mac_id]; 2723 qdf_str_lcopy(minidump_str, "mon_link_desc_bank", 2724 MINIDUMP_STR_SIZE); 2725 } else { 2726 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * 2727 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc; 2728 2729 num_mpdu_queue_descs = num_mpdu_link_descs / 2730 num_mpdu_links_per_queue_desc; 2731 2732 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * 2733 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) / 2734 num_msdus_per_link_desc; 2735 2736 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * 2737 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6; 2738 2739 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs + 2740 num_tx_msdu_link_descs + num_rx_msdu_link_descs; 2741 2742 pages = &soc->link_desc_pages; 2743 total_link_descs = &soc->total_link_descs; 2744 qdf_str_lcopy(minidump_str, "link_desc_bank", 2745 MINIDUMP_STR_SIZE); 2746 } 2747 2748 /* Round up to power of 2 */ 2749 *total_link_descs = 1; 2750 while (*total_link_descs < num_entries) 2751 *total_link_descs <<= 1; 2752 2753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 2754 FL("total_link_descs: %u, link_desc_size: %d"), 2755 *total_link_descs, link_desc_size); 2756 total_mem_size = *total_link_descs * link_desc_size; 2757 total_mem_size += link_desc_align; 2758 2759 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 2760 FL("total_mem_size: %d"), total_mem_size); 2761 2762 dp_set_max_page_size(pages, max_alloc_size); 2763 dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE, 2764 pages, 2765 link_desc_size, 2766 *total_link_descs, 2767 0, false); 2768 if (!pages->num_pages) { 2769 dp_err("Multi page alloc fail for hw link desc pool"); 2770 return QDF_STATUS_E_FAULT; 2771 } 2772 2773 wlan_minidump_log(pages->dma_pages->page_v_addr_start, 2774 pages->num_pages * pages->page_size, 2775 soc->ctrl_psoc, 2776 WLAN_MD_DP_SRNG_WBM_IDLE_LINK, 2777 "hw_link_desc_bank"); 2778 2779 return QDF_STATUS_SUCCESS; 2780 } 2781 2782 /* 2783 * dp_hw_link_desc_ring_free() - Free h/w link desc rings 2784 * @soc: DP SOC handle 2785 * 2786 * Return: none 2787 */ 2788 static void dp_hw_link_desc_ring_free(struct dp_soc *soc) 2789 { 2790 uint32_t i; 2791 uint32_t size = soc->wbm_idle_scatter_buf_size; 2792 void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned; 2793 qdf_dma_addr_t paddr; 2794 2795 if (soc->wbm_idle_scatter_buf_base_vaddr[0]) { 2796 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) { 2797 vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i]; 2798 paddr = soc->wbm_idle_scatter_buf_base_paddr[i]; 2799 if (vaddr) { 2800 qdf_mem_free_consistent(soc->osdev, 2801 soc->osdev->dev, 2802 size, 2803 vaddr, 2804 paddr, 2805 0); 2806 vaddr = NULL; 2807 } 2808 } 2809 } else { 2810 wlan_minidump_remove(vaddr); 2811 dp_srng_free(soc, &soc->wbm_idle_link_ring); 2812 } 2813 } 2814 2815 /* 2816 * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings 2817 * @soc: DP SOC handle 2818 * 2819 * Allocate memory for WBM_IDLE_LINK srng ring if the number of 2820 * link descriptors is less then the max_allocated size. else 2821 * allocate memory for wbm_idle_scatter_buffer. 2822 * 2823 * Return: QDF_STATUS_SUCCESS: success 2824 * QDF_STATUS_E_NO_MEM: No memory (Failure) 2825 */ 2826 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc) 2827 { 2828 uint32_t entry_size, i; 2829 uint32_t total_mem_size; 2830 qdf_dma_addr_t *baseaddr = NULL; 2831 struct dp_srng *dp_srng; 2832 uint32_t ring_type; 2833 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); 2834 uint32_t tlds; 2835 2836 ring_type = WBM_IDLE_LINK; 2837 dp_srng = &soc->wbm_idle_link_ring; 2838 tlds = soc->total_link_descs; 2839 2840 entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type); 2841 total_mem_size = entry_size * tlds; 2842 2843 if (total_mem_size <= max_alloc_size) { 2844 if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) { 2845 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2846 FL("Link desc idle ring setup failed")); 2847 goto fail; 2848 } 2849 2850 wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned, 2851 soc->wbm_idle_link_ring.alloc_size, 2852 soc->ctrl_psoc, 2853 WLAN_MD_DP_SRNG_WBM_IDLE_LINK, 2854 "wbm_idle_link_ring"); 2855 } else { 2856 uint32_t num_scatter_bufs; 2857 uint32_t num_entries_per_buf; 2858 uint32_t buf_size = 0; 2859 2860 soc->wbm_idle_scatter_buf_size = 2861 hal_idle_list_scatter_buf_size(soc->hal_soc); 2862 num_entries_per_buf = hal_idle_scatter_buf_num_entries( 2863 soc->hal_soc, soc->wbm_idle_scatter_buf_size); 2864 num_scatter_bufs = hal_idle_list_num_scatter_bufs( 2865 soc->hal_soc, total_mem_size, 2866 soc->wbm_idle_scatter_buf_size); 2867 2868 if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) { 2869 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2870 FL("scatter bufs size out of bounds")); 2871 goto fail; 2872 } 2873 2874 for (i = 0; i < num_scatter_bufs; i++) { 2875 baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i]; 2876 buf_size = soc->wbm_idle_scatter_buf_size; 2877 soc->wbm_idle_scatter_buf_base_vaddr[i] = 2878 qdf_mem_alloc_consistent(soc->osdev, 2879 soc->osdev->dev, 2880 buf_size, 2881 baseaddr); 2882 2883 if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) { 2884 QDF_TRACE(QDF_MODULE_ID_DP, 2885 QDF_TRACE_LEVEL_ERROR, 2886 FL("Scatter lst memory alloc fail")); 2887 goto fail; 2888 } 2889 } 2890 soc->num_scatter_bufs = num_scatter_bufs; 2891 } 2892 return QDF_STATUS_SUCCESS; 2893 2894 fail: 2895 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) { 2896 void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i]; 2897 qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i]; 2898 2899 if (vaddr) { 2900 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 2901 soc->wbm_idle_scatter_buf_size, 2902 vaddr, 2903 paddr, 0); 2904 vaddr = NULL; 2905 } 2906 } 2907 return QDF_STATUS_E_NOMEM; 2908 } 2909 2910 /* 2911 * dp_hw_link_desc_ring_init() - Initialize hw link desc rings 2912 * @soc: DP SOC handle 2913 * 2914 * Return: QDF_STATUS_SUCCESS: success 2915 * QDF_STATUS_E_FAILURE: failure 2916 */ 2917 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc) 2918 { 2919 struct dp_srng *dp_srng = &soc->wbm_idle_link_ring; 2920 2921 if (dp_srng->base_vaddr_unaligned) { 2922 if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0)) 2923 return QDF_STATUS_E_FAILURE; 2924 } 2925 return QDF_STATUS_SUCCESS; 2926 } 2927 2928 /* 2929 * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings 2930 * @soc: DP SOC handle 2931 * 2932 * Return: None 2933 */ 2934 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc) 2935 { 2936 dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0); 2937 } 2938 2939 /* 2940 * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings 2941 * @soc: DP SOC handle 2942 * @mac_id: mac id 2943 * 2944 * Return: None 2945 */ 2946 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id) 2947 { 2948 uint32_t cookie = 0; 2949 uint32_t page_idx = 0; 2950 struct qdf_mem_multi_page_t *pages; 2951 struct qdf_mem_dma_page_t *dma_pages; 2952 uint32_t offset = 0; 2953 uint32_t count = 0; 2954 void *desc_srng; 2955 int link_desc_size = hal_get_link_desc_size(soc->hal_soc); 2956 uint32_t total_link_descs; 2957 uint32_t scatter_buf_num; 2958 uint32_t num_entries_per_buf = 0; 2959 uint32_t rem_entries; 2960 uint32_t num_descs_per_page; 2961 uint32_t num_scatter_bufs = 0; 2962 uint8_t *scatter_buf_ptr; 2963 void *desc; 2964 2965 num_scatter_bufs = soc->num_scatter_bufs; 2966 2967 if (mac_id == WLAN_INVALID_PDEV_ID) { 2968 pages = &soc->link_desc_pages; 2969 total_link_descs = soc->total_link_descs; 2970 desc_srng = soc->wbm_idle_link_ring.hal_srng; 2971 } else { 2972 pages = &soc->mon_link_desc_pages[mac_id]; 2973 total_link_descs = soc->total_mon_link_descs[mac_id]; 2974 desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng; 2975 } 2976 2977 dma_pages = pages->dma_pages; 2978 do { 2979 qdf_mem_zero(dma_pages[page_idx].page_v_addr_start, 2980 pages->page_size); 2981 page_idx++; 2982 } while (page_idx < pages->num_pages); 2983 2984 if (desc_srng) { 2985 hal_srng_access_start_unlocked(soc->hal_soc, desc_srng); 2986 page_idx = 0; 2987 count = 0; 2988 offset = 0; 2989 pages = &soc->link_desc_pages; 2990 while ((desc = hal_srng_src_get_next(soc->hal_soc, 2991 desc_srng)) && 2992 (count < total_link_descs)) { 2993 page_idx = count / pages->num_element_per_page; 2994 offset = count % pages->num_element_per_page; 2995 cookie = LINK_DESC_COOKIE(count, page_idx); 2996 2997 hal_set_link_desc_addr(desc, cookie, 2998 dma_pages[page_idx].page_p_addr 2999 + (offset * link_desc_size)); 3000 count++; 3001 } 3002 hal_srng_access_end_unlocked(soc->hal_soc, desc_srng); 3003 } else { 3004 /* Populate idle list scatter buffers with link descriptor 3005 * pointers 3006 */ 3007 scatter_buf_num = 0; 3008 num_entries_per_buf = hal_idle_scatter_buf_num_entries( 3009 soc->hal_soc, 3010 soc->wbm_idle_scatter_buf_size); 3011 3012 scatter_buf_ptr = (uint8_t *)( 3013 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]); 3014 rem_entries = num_entries_per_buf; 3015 pages = &soc->link_desc_pages; 3016 page_idx = 0; count = 0; 3017 offset = 0; 3018 num_descs_per_page = pages->num_element_per_page; 3019 3020 while (count < total_link_descs) { 3021 page_idx = count / num_descs_per_page; 3022 offset = count % num_descs_per_page; 3023 cookie = LINK_DESC_COOKIE(count, page_idx); 3024 hal_set_link_desc_addr((void *)scatter_buf_ptr, 3025 cookie, 3026 dma_pages[page_idx].page_p_addr + 3027 (offset * link_desc_size)); 3028 rem_entries--; 3029 if (rem_entries) { 3030 scatter_buf_ptr += link_desc_size; 3031 } else { 3032 rem_entries = num_entries_per_buf; 3033 scatter_buf_num++; 3034 if (scatter_buf_num >= num_scatter_bufs) 3035 break; 3036 scatter_buf_ptr = (uint8_t *) 3037 (soc->wbm_idle_scatter_buf_base_vaddr[ 3038 scatter_buf_num]); 3039 } 3040 count++; 3041 } 3042 /* Setup link descriptor idle list in HW */ 3043 hal_setup_link_idle_list(soc->hal_soc, 3044 soc->wbm_idle_scatter_buf_base_paddr, 3045 soc->wbm_idle_scatter_buf_base_vaddr, 3046 num_scatter_bufs, soc->wbm_idle_scatter_buf_size, 3047 (uint32_t)(scatter_buf_ptr - 3048 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[ 3049 scatter_buf_num-1])), total_link_descs); 3050 } 3051 } 3052 3053 #ifdef IPA_OFFLOAD 3054 #define REO_DST_RING_SIZE_QCA6290 1023 3055 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0 3056 #define REO_DST_RING_SIZE_QCA8074 1023 3057 #define REO_DST_RING_SIZE_QCN9000 2048 3058 #else 3059 #define REO_DST_RING_SIZE_QCA8074 8 3060 #define REO_DST_RING_SIZE_QCN9000 8 3061 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */ 3062 3063 #else 3064 3065 #define REO_DST_RING_SIZE_QCA6290 1024 3066 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0 3067 #define REO_DST_RING_SIZE_QCA8074 2048 3068 #define REO_DST_RING_SIZE_QCN9000 2048 3069 #else 3070 #define REO_DST_RING_SIZE_QCA8074 8 3071 #define REO_DST_RING_SIZE_QCN9000 8 3072 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */ 3073 #endif /* IPA_OFFLOAD */ 3074 3075 #ifndef FEATURE_WDS 3076 static void dp_soc_wds_attach(struct dp_soc *soc) 3077 { 3078 } 3079 3080 static void dp_soc_wds_detach(struct dp_soc *soc) 3081 { 3082 } 3083 #endif 3084 /* 3085 * dp_soc_reset_ring_map() - Reset cpu ring map 3086 * @soc: Datapath soc handler 3087 * 3088 * This api resets the default cpu ring map 3089 */ 3090 3091 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc) 3092 { 3093 uint8_t i; 3094 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 3095 3096 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { 3097 switch (nss_config) { 3098 case dp_nss_cfg_first_radio: 3099 /* 3100 * Setting Tx ring map for one nss offloaded radio 3101 */ 3102 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i]; 3103 break; 3104 3105 case dp_nss_cfg_second_radio: 3106 /* 3107 * Setting Tx ring for two nss offloaded radios 3108 */ 3109 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i]; 3110 break; 3111 3112 case dp_nss_cfg_dbdc: 3113 /* 3114 * Setting Tx ring map for 2 nss offloaded radios 3115 */ 3116 soc->tx_ring_map[i] = 3117 dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i]; 3118 break; 3119 3120 case dp_nss_cfg_dbtc: 3121 /* 3122 * Setting Tx ring map for 3 nss offloaded radios 3123 */ 3124 soc->tx_ring_map[i] = 3125 dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i]; 3126 break; 3127 3128 default: 3129 dp_err("tx_ring_map failed due to invalid nss cfg"); 3130 break; 3131 } 3132 } 3133 } 3134 3135 /* 3136 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS 3137 * @dp_soc - DP soc handle 3138 * @ring_type - ring type 3139 * @ring_num - ring_num 3140 * 3141 * return 0 or 1 3142 */ 3143 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num) 3144 { 3145 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 3146 uint8_t status = 0; 3147 3148 switch (ring_type) { 3149 case WBM2SW_RELEASE: 3150 case REO_DST: 3151 case RXDMA_BUF: 3152 case REO_EXCEPTION: 3153 status = ((nss_config) & (1 << ring_num)); 3154 break; 3155 default: 3156 break; 3157 } 3158 3159 return status; 3160 } 3161 3162 /* 3163 * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for 3164 * unused WMAC hw rings 3165 * @dp_soc - DP Soc handle 3166 * @mac_num - wmac num 3167 * 3168 * Return: Return void 3169 */ 3170 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc, 3171 int mac_num) 3172 { 3173 int *grp_mask = NULL; 3174 int group_number; 3175 3176 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; 3177 group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask); 3178 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx, 3179 group_number, 0x0); 3180 3181 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0]; 3182 group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask); 3183 wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx, 3184 group_number, 0x0); 3185 3186 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0]; 3187 group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask); 3188 wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx, 3189 group_number, 0x0); 3190 3191 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0]; 3192 group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask); 3193 wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx, 3194 group_number, 0x0); 3195 } 3196 3197 /* 3198 * dp_soc_reset_intr_mask() - reset interrupt mask 3199 * @dp_soc - DP Soc handle 3200 * 3201 * Return: Return void 3202 */ 3203 static void dp_soc_reset_intr_mask(struct dp_soc *soc) 3204 { 3205 uint8_t j; 3206 int *grp_mask = NULL; 3207 int group_number, mask, num_ring; 3208 3209 /* number of tx ring */ 3210 num_ring = soc->num_tcl_data_rings; 3211 3212 /* 3213 * group mask for tx completion ring. 3214 */ 3215 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; 3216 3217 /* loop and reset the mask for only offloaded ring */ 3218 for (j = 0; j < num_ring; j++) { 3219 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) { 3220 continue; 3221 } 3222 3223 /* 3224 * Group number corresponding to tx offloaded ring. 3225 */ 3226 group_number = dp_srng_find_ring_in_mask(j, grp_mask); 3227 if (group_number < 0) { 3228 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3229 FL("ring not part of any group; ring_type: %d,ring_num %d"), 3230 WBM2SW_RELEASE, j); 3231 return; 3232 } 3233 3234 /* reset the tx mask for offloaded ring */ 3235 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number); 3236 mask &= (~(1 << j)); 3237 3238 /* 3239 * reset the interrupt mask for offloaded ring. 3240 */ 3241 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask); 3242 } 3243 3244 /* number of rx rings */ 3245 num_ring = soc->num_reo_dest_rings; 3246 3247 /* 3248 * group mask for reo destination ring. 3249 */ 3250 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; 3251 3252 /* loop and reset the mask for only offloaded ring */ 3253 for (j = 0; j < num_ring; j++) { 3254 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) { 3255 continue; 3256 } 3257 3258 /* 3259 * Group number corresponding to rx offloaded ring. 3260 */ 3261 group_number = dp_srng_find_ring_in_mask(j, grp_mask); 3262 if (group_number < 0) { 3263 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3264 FL("ring not part of any group; ring_type: %d,ring_num %d"), 3265 REO_DST, j); 3266 return; 3267 } 3268 3269 /* set the interrupt mask for offloaded ring */ 3270 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number); 3271 mask &= (~(1 << j)); 3272 3273 /* 3274 * set the interrupt mask to zero for rx offloaded radio. 3275 */ 3276 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask); 3277 } 3278 3279 /* 3280 * group mask for Rx buffer refill ring 3281 */ 3282 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; 3283 3284 /* loop and reset the mask for only offloaded ring */ 3285 for (j = 0; j < MAX_PDEV_CNT; j++) { 3286 int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j); 3287 3288 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) { 3289 continue; 3290 } 3291 3292 /* 3293 * Group number corresponding to rx offloaded ring. 3294 */ 3295 group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask); 3296 if (group_number < 0) { 3297 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3298 FL("ring not part of any group; ring_type: %d,ring_num %d"), 3299 REO_DST, lmac_id); 3300 return; 3301 } 3302 3303 /* set the interrupt mask for offloaded ring */ 3304 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, 3305 group_number); 3306 mask &= (~(1 << lmac_id)); 3307 3308 /* 3309 * set the interrupt mask to zero for rx offloaded radio. 3310 */ 3311 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx, 3312 group_number, mask); 3313 } 3314 3315 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0]; 3316 3317 for (j = 0; j < num_ring; j++) { 3318 if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) { 3319 continue; 3320 } 3321 3322 /* 3323 * Group number corresponding to rx err ring. 3324 */ 3325 group_number = dp_srng_find_ring_in_mask(j, grp_mask); 3326 if (group_number < 0) { 3327 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3328 FL("ring not part of any group; ring_type: %d,ring_num %d"), 3329 REO_EXCEPTION, j); 3330 return; 3331 } 3332 3333 wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx, 3334 group_number, 0); 3335 } 3336 } 3337 3338 #ifdef IPA_OFFLOAD 3339 /** 3340 * dp_reo_remap_config() - configure reo remap register value based 3341 * nss configuration. 3342 * based on offload_radio value below remap configuration 3343 * get applied. 3344 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4) 3345 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4) 3346 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4) 3347 * 3 - both Radios handled by NSS (remap not required) 3348 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3) 3349 * 3350 * @remap1: output parameter indicates reo remap 1 register value 3351 * @remap2: output parameter indicates reo remap 2 register value 3352 * Return: bool type, true if remap is configured else false. 3353 */ 3354 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2) 3355 { 3356 uint32_t ring[4] = {REO_REMAP_SW1, REO_REMAP_SW2, 3357 REO_REMAP_SW3}; 3358 hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring, 3359 3, remap1, remap2); 3360 dp_debug("remap1 %x remap2 %x", *remap1, *remap2); 3361 3362 return true; 3363 } 3364 3365 /** 3366 * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA 3367 * 3368 * @tx_ring_num: Tx ring number 3369 * @tx_ipa_ring_sz: Return param only updated for IPA. 3370 * 3371 * Return: None 3372 */ 3373 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz) 3374 { 3375 if (tx_ring_num == IPA_TCL_DATA_RING_IDX) 3376 *tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE; 3377 } 3378 3379 /** 3380 * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA 3381 * 3382 * @tx_comp_ring_num: Tx comp ring number 3383 * @tx_comp_ipa_ring_sz: Return param only updated for IPA. 3384 * 3385 * Return: None 3386 */ 3387 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num, 3388 int *tx_comp_ipa_ring_sz) 3389 { 3390 if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX) 3391 *tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE; 3392 } 3393 #else 3394 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring) 3395 { 3396 uint8_t num = 0; 3397 3398 switch (value) { 3399 case 0xF: 3400 num = 4; 3401 ring[0] = REO_REMAP_SW1; 3402 ring[1] = REO_REMAP_SW2; 3403 ring[2] = REO_REMAP_SW3; 3404 ring[3] = REO_REMAP_SW4; 3405 break; 3406 case 0xE: 3407 num = 3; 3408 ring[0] = REO_REMAP_SW2; 3409 ring[1] = REO_REMAP_SW3; 3410 ring[2] = REO_REMAP_SW4; 3411 break; 3412 case 0xD: 3413 num = 3; 3414 ring[0] = REO_REMAP_SW1; 3415 ring[1] = REO_REMAP_SW3; 3416 ring[2] = REO_REMAP_SW4; 3417 break; 3418 case 0xC: 3419 num = 2; 3420 ring[0] = REO_REMAP_SW3; 3421 ring[1] = REO_REMAP_SW4; 3422 break; 3423 case 0xB: 3424 num = 3; 3425 ring[0] = REO_REMAP_SW1; 3426 ring[1] = REO_REMAP_SW2; 3427 ring[2] = REO_REMAP_SW4; 3428 break; 3429 case 0xA: 3430 num = 2; 3431 ring[0] = REO_REMAP_SW2; 3432 ring[1] = REO_REMAP_SW4; 3433 break; 3434 case 0x9: 3435 num = 2; 3436 ring[0] = REO_REMAP_SW1; 3437 ring[1] = REO_REMAP_SW4; 3438 break; 3439 case 0x8: 3440 num = 1; 3441 ring[0] = REO_REMAP_SW4; 3442 break; 3443 case 0x7: 3444 num = 3; 3445 ring[0] = REO_REMAP_SW1; 3446 ring[1] = REO_REMAP_SW2; 3447 ring[2] = REO_REMAP_SW3; 3448 break; 3449 case 0x6: 3450 num = 2; 3451 ring[0] = REO_REMAP_SW2; 3452 ring[1] = REO_REMAP_SW3; 3453 break; 3454 case 0x5: 3455 num = 2; 3456 ring[0] = REO_REMAP_SW1; 3457 ring[1] = REO_REMAP_SW3; 3458 break; 3459 case 0x4: 3460 num = 1; 3461 ring[0] = REO_REMAP_SW3; 3462 break; 3463 case 0x3: 3464 num = 2; 3465 ring[0] = REO_REMAP_SW1; 3466 ring[1] = REO_REMAP_SW2; 3467 break; 3468 case 0x2: 3469 num = 1; 3470 ring[0] = REO_REMAP_SW2; 3471 break; 3472 case 0x1: 3473 num = 1; 3474 ring[0] = REO_REMAP_SW1; 3475 break; 3476 } 3477 return num; 3478 } 3479 3480 static bool dp_reo_remap_config(struct dp_soc *soc, 3481 uint32_t *remap1, 3482 uint32_t *remap2) 3483 { 3484 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 3485 uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx); 3486 uint8_t target_type, num; 3487 uint32_t ring[4]; 3488 uint32_t value; 3489 3490 target_type = hal_get_target_type(soc->hal_soc); 3491 3492 switch (offload_radio) { 3493 case dp_nss_cfg_default: 3494 value = reo_config & 0xF; 3495 num = dp_reo_ring_selection(value, ring); 3496 hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring, 3497 num, remap1, remap2); 3498 3499 break; 3500 case dp_nss_cfg_first_radio: 3501 value = reo_config & 0xE; 3502 num = dp_reo_ring_selection(value, ring); 3503 hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring, 3504 num, remap1, remap2); 3505 3506 break; 3507 case dp_nss_cfg_second_radio: 3508 value = reo_config & 0xD; 3509 num = dp_reo_ring_selection(value, ring); 3510 hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring, 3511 num, remap1, remap2); 3512 3513 break; 3514 case dp_nss_cfg_dbdc: 3515 case dp_nss_cfg_dbtc: 3516 /* return false if both or all are offloaded to NSS */ 3517 return false; 3518 } 3519 3520 dp_debug("remap1 %x remap2 %x offload_radio %u", 3521 *remap1, *remap2, offload_radio); 3522 return true; 3523 } 3524 3525 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz) 3526 { 3527 } 3528 3529 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num, 3530 int *tx_comp_ipa_ring_sz) 3531 { 3532 } 3533 #endif /* IPA_OFFLOAD */ 3534 3535 /* 3536 * dp_reo_frag_dst_set() - configure reo register to set the 3537 * fragment destination ring 3538 * @soc : Datapath soc 3539 * @frag_dst_ring : output parameter to set fragment destination ring 3540 * 3541 * Based on offload_radio below fragment destination rings is selected 3542 * 0 - TCL 3543 * 1 - SW1 3544 * 2 - SW2 3545 * 3 - SW3 3546 * 4 - SW4 3547 * 5 - Release 3548 * 6 - FW 3549 * 7 - alternate select 3550 * 3551 * return: void 3552 */ 3553 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring) 3554 { 3555 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 3556 3557 switch (offload_radio) { 3558 case dp_nss_cfg_default: 3559 *frag_dst_ring = REO_REMAP_TCL; 3560 break; 3561 case dp_nss_cfg_first_radio: 3562 /* 3563 * This configuration is valid for single band radio which 3564 * is also NSS offload. 3565 */ 3566 case dp_nss_cfg_dbdc: 3567 case dp_nss_cfg_dbtc: 3568 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT; 3569 break; 3570 default: 3571 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3572 FL("dp_reo_frag_dst_set invalid offload radio config")); 3573 break; 3574 } 3575 } 3576 3577 #ifdef ENABLE_VERBOSE_DEBUG 3578 static void dp_enable_verbose_debug(struct dp_soc *soc) 3579 { 3580 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 3581 3582 soc_cfg_ctx = soc->wlan_cfg_ctx; 3583 3584 if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask) 3585 is_dp_verbose_debug_enabled = true; 3586 3587 if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask) 3588 hal_set_verbose_debug(true); 3589 else 3590 hal_set_verbose_debug(false); 3591 } 3592 #else 3593 static void dp_enable_verbose_debug(struct dp_soc *soc) 3594 { 3595 } 3596 #endif 3597 3598 #ifdef WLAN_FEATURE_STATS_EXT 3599 static inline void dp_create_ext_stats_event(struct dp_soc *soc) 3600 { 3601 qdf_event_create(&soc->rx_hw_stats_event); 3602 } 3603 #else 3604 static inline void dp_create_ext_stats_event(struct dp_soc *soc) 3605 { 3606 } 3607 #endif 3608 3609 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index) 3610 { 3611 wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned); 3612 dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index); 3613 3614 wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned); 3615 dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index); 3616 } 3617 3618 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc, 3619 uint8_t index) 3620 { 3621 if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) { 3622 dp_err("dp_srng_init failed for tcl_data_ring"); 3623 goto fail1; 3624 } 3625 wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned, 3626 soc->tcl_data_ring[index].alloc_size, 3627 soc->ctrl_psoc, 3628 WLAN_MD_DP_SRNG_TCL_DATA, 3629 "tcl_data_ring"); 3630 3631 if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, 3632 index, 0)) { 3633 dp_err("dp_srng_init failed for tx_comp_ring"); 3634 goto fail1; 3635 } 3636 wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned, 3637 soc->tx_comp_ring[index].alloc_size, 3638 soc->ctrl_psoc, 3639 WLAN_MD_DP_SRNG_TX_COMP, 3640 "tcl_comp_ring"); 3641 3642 return QDF_STATUS_SUCCESS; 3643 3644 fail1: 3645 return QDF_STATUS_E_FAILURE; 3646 } 3647 3648 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index) 3649 { 3650 dp_srng_free(soc, &soc->tcl_data_ring[index]); 3651 dp_srng_free(soc, &soc->tx_comp_ring[index]); 3652 } 3653 3654 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc, 3655 uint8_t index) 3656 { 3657 int tx_ring_size; 3658 int tx_comp_ring_size; 3659 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 3660 int cached = 0; 3661 3662 tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx); 3663 dp_ipa_get_tx_ring_size(index, &tx_ring_size); 3664 3665 if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA, 3666 tx_ring_size, cached)) { 3667 dp_err("dp_srng_alloc failed for tcl_data_ring"); 3668 goto fail1; 3669 } 3670 3671 tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); 3672 dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size); 3673 /* Enable cached TCL desc if NSS offload is disabled */ 3674 if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) 3675 cached = WLAN_CFG_DST_RING_CACHED_DESC; 3676 3677 if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, 3678 tx_comp_ring_size, cached)) { 3679 dp_err("dp_srng_alloc failed for tx_comp_ring"); 3680 goto fail1; 3681 } 3682 3683 return QDF_STATUS_SUCCESS; 3684 3685 fail1: 3686 return QDF_STATUS_E_FAILURE; 3687 } 3688 3689 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) 3690 { 3691 struct cdp_lro_hash_config lro_hash; 3692 QDF_STATUS status; 3693 3694 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) && 3695 !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) && 3696 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 3697 dp_err("LRO, GRO and RX hash disabled"); 3698 return QDF_STATUS_E_FAILURE; 3699 } 3700 3701 qdf_mem_zero(&lro_hash, sizeof(lro_hash)); 3702 3703 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) || 3704 wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) { 3705 lro_hash.lro_enable = 1; 3706 lro_hash.tcp_flag = QDF_TCPHDR_ACK; 3707 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN | 3708 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG | 3709 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR; 3710 } 3711 3712 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4, 3713 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * 3714 LRO_IPV4_SEED_ARR_SZ)); 3715 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6, 3716 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * 3717 LRO_IPV6_SEED_ARR_SZ)); 3718 3719 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config); 3720 3721 if (!soc->cdp_soc.ol_ops->lro_hash_config) { 3722 QDF_BUG(0); 3723 dp_err("lro_hash_config not configured"); 3724 return QDF_STATUS_E_FAILURE; 3725 } 3726 3727 status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc, 3728 pdev->pdev_id, 3729 &lro_hash); 3730 if (!QDF_IS_STATUS_SUCCESS(status)) { 3731 dp_err("failed to send lro_hash_config to FW %u", status); 3732 return status; 3733 } 3734 3735 dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x", 3736 lro_hash.lro_enable, lro_hash.tcp_flag, 3737 lro_hash.tcp_flag_mask); 3738 3739 dp_info("toeplitz_hash_ipv4:"); 3740 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3741 lro_hash.toeplitz_hash_ipv4, 3742 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * 3743 LRO_IPV4_SEED_ARR_SZ)); 3744 3745 dp_info("toeplitz_hash_ipv6:"); 3746 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3747 lro_hash.toeplitz_hash_ipv6, 3748 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * 3749 LRO_IPV6_SEED_ARR_SZ)); 3750 3751 return status; 3752 } 3753 3754 /* 3755 * dp_rxdma_ring_setup() - configure the RX DMA rings 3756 * @soc: data path SoC handle 3757 * @pdev: Physical device handle 3758 * 3759 * Return: 0 - success, > 0 - failure 3760 */ 3761 #ifdef QCA_HOST2FW_RXBUF_RING 3762 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) 3763 { 3764 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 3765 int max_mac_rings; 3766 int i; 3767 int ring_size; 3768 3769 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 3770 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx); 3771 ring_size = wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx); 3772 3773 for (i = 0; i < max_mac_rings; i++) { 3774 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); 3775 if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i], 3776 RXDMA_BUF, ring_size, 0)) { 3777 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3778 FL("failed rx mac ring setup")); 3779 return QDF_STATUS_E_FAILURE; 3780 } 3781 3782 if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i], 3783 RXDMA_BUF, 1, i)) { 3784 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3785 FL("failed rx mac ring setup")); 3786 3787 dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]); 3788 return QDF_STATUS_E_FAILURE; 3789 } 3790 } 3791 return QDF_STATUS_SUCCESS; 3792 } 3793 #else 3794 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) 3795 { 3796 return QDF_STATUS_SUCCESS; 3797 } 3798 #endif 3799 3800 /** 3801 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps 3802 * @pdev - DP_PDEV handle 3803 * 3804 * Return: void 3805 */ 3806 static inline void 3807 dp_dscp_tid_map_setup(struct dp_pdev *pdev) 3808 { 3809 uint8_t map_id; 3810 struct dp_soc *soc = pdev->soc; 3811 3812 if (!soc) 3813 return; 3814 3815 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) { 3816 qdf_mem_copy(pdev->dscp_tid_map[map_id], 3817 default_dscp_tid_map, 3818 sizeof(default_dscp_tid_map)); 3819 } 3820 3821 for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) { 3822 hal_tx_set_dscp_tid_map(soc->hal_soc, 3823 default_dscp_tid_map, 3824 map_id); 3825 } 3826 } 3827 3828 /** 3829 * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps 3830 * @pdev - DP_PDEV handle 3831 * 3832 * Return: void 3833 */ 3834 static inline void 3835 dp_pcp_tid_map_setup(struct dp_pdev *pdev) 3836 { 3837 struct dp_soc *soc = pdev->soc; 3838 3839 if (!soc) 3840 return; 3841 3842 qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map, 3843 sizeof(default_pcp_tid_map)); 3844 hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map); 3845 } 3846 3847 #ifdef IPA_OFFLOAD 3848 /** 3849 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring 3850 * @soc: data path instance 3851 * @pdev: core txrx pdev context 3852 * 3853 * Return: QDF_STATUS_SUCCESS: success 3854 * QDF_STATUS_E_RESOURCES: Error return 3855 */ 3856 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3857 struct dp_pdev *pdev) 3858 { 3859 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 3860 int entries; 3861 3862 soc_cfg_ctx = soc->wlan_cfg_ctx; 3863 entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 3864 3865 /* Setup second Rx refill buffer ring */ 3866 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 3867 entries, 0)) { 3868 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3869 FL("dp_srng_alloc failed second rx refill ring")); 3870 return QDF_STATUS_E_FAILURE; 3871 } 3872 3873 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 3874 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) { 3875 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3876 FL("dp_srng_init failed second rx refill ring")); 3877 return QDF_STATUS_E_FAILURE; 3878 } 3879 3880 return QDF_STATUS_SUCCESS; 3881 } 3882 3883 /** 3884 * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring 3885 * @soc: data path instance 3886 * @pdev: core txrx pdev context 3887 * 3888 * Return: void 3889 */ 3890 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3891 struct dp_pdev *pdev) 3892 { 3893 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0); 3894 dp_srng_free(soc, &pdev->rx_refill_buf_ring2); 3895 } 3896 3897 #else 3898 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3899 struct dp_pdev *pdev) 3900 { 3901 return QDF_STATUS_SUCCESS; 3902 } 3903 3904 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3905 struct dp_pdev *pdev) 3906 { 3907 } 3908 #endif 3909 3910 #if !defined(DISABLE_MON_CONFIG) 3911 /** 3912 * dp_mon_ring_deinit() - Deinitialize monitor rings 3913 * @pdev: DP pdev handle 3914 * 3915 */ 3916 static void dp_mon_rings_deinit(struct dp_pdev *pdev) 3917 { 3918 int mac_id = 0; 3919 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 3920 struct dp_soc *soc = pdev->soc; 3921 3922 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 3923 3924 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 3925 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, 3926 pdev->pdev_id); 3927 3928 dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id], 3929 RXDMA_MONITOR_STATUS, 0); 3930 3931 if (!soc->wlan_cfg_ctx->rxdma1_enable) 3932 continue; 3933 3934 dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id], 3935 RXDMA_MONITOR_BUF, 0); 3936 dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id], 3937 RXDMA_MONITOR_DST, 0); 3938 dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id], 3939 RXDMA_MONITOR_DESC, 0); 3940 } 3941 } 3942 3943 /** 3944 * dp_mon_rings_free() - free monitor rings 3945 * @pdev: Datapath pdev handle 3946 * 3947 */ 3948 static void dp_mon_rings_free(struct dp_pdev *pdev) 3949 { 3950 int mac_id = 0; 3951 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 3952 struct dp_soc *soc = pdev->soc; 3953 3954 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 3955 3956 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 3957 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, 3958 pdev->pdev_id); 3959 3960 dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]); 3961 3962 if (!soc->wlan_cfg_ctx->rxdma1_enable) 3963 continue; 3964 3965 dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]); 3966 dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]); 3967 dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]); 3968 } 3969 } 3970 3971 /** 3972 * dp_mon_rings_init() - Initialize monitor srng rings 3973 * @pdev: Datapath pdev handle 3974 * 3975 * return: QDF_STATUS_SUCCESS on success 3976 * QDF_STATUS_E_NOMEM on failure 3977 */ 3978 static 3979 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev) 3980 { 3981 int mac_id = 0; 3982 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 3983 3984 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 3985 3986 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 3987 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, 3988 pdev->pdev_id); 3989 3990 if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id], 3991 RXDMA_MONITOR_STATUS, 0, lmac_id)) { 3992 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3993 FL(RNG_ERR "rxdma_mon_status_ring")); 3994 goto fail1; 3995 } 3996 3997 if (!soc->wlan_cfg_ctx->rxdma1_enable) 3998 continue; 3999 4000 if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id], 4001 RXDMA_MONITOR_BUF, 0, lmac_id)) { 4002 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4003 FL(RNG_ERR "rxdma_mon_buf_ring ")); 4004 goto fail1; 4005 } 4006 4007 if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id], 4008 RXDMA_MONITOR_DST, 0, lmac_id)) { 4009 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4010 FL(RNG_ERR "rxdma_mon_dst_ring")); 4011 goto fail1; 4012 } 4013 4014 if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id], 4015 RXDMA_MONITOR_DESC, 0, lmac_id)) { 4016 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4017 FL(RNG_ERR "rxdma_mon_desc_ring")); 4018 goto fail1; 4019 } 4020 } 4021 return QDF_STATUS_SUCCESS; 4022 4023 fail1: 4024 dp_mon_rings_deinit(pdev); 4025 return QDF_STATUS_E_NOMEM; 4026 } 4027 4028 /** 4029 * dp_mon_rings_alloc() - Allocate memory for monitor srng rings 4030 * @soc: Datapath soc handle 4031 * @pdev: Datapath pdev handle 4032 * 4033 * return: QDF_STATUS_SUCCESS on success 4034 * QDF_STATUS_E_NOMEM on failure 4035 */ 4036 static 4037 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev) 4038 { 4039 int mac_id = 0; 4040 int entries; 4041 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 4042 4043 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 4044 4045 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 4046 int lmac_id = 4047 dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); 4048 entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); 4049 if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id], 4050 RXDMA_MONITOR_STATUS, entries, 0)) { 4051 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4052 FL(RNG_ERR "rxdma_mon_status_ring")); 4053 goto fail1; 4054 } 4055 4056 if (!soc->wlan_cfg_ctx->rxdma1_enable) 4057 continue; 4058 4059 entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx); 4060 if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id], 4061 RXDMA_MONITOR_BUF, entries, 0)) { 4062 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4063 FL(RNG_ERR "rxdma_mon_buf_ring ")); 4064 goto fail1; 4065 } 4066 4067 entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx); 4068 if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id], 4069 RXDMA_MONITOR_DST, entries, 0)) { 4070 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4071 FL(RNG_ERR "rxdma_mon_dst_ring")); 4072 goto fail1; 4073 } 4074 4075 entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx); 4076 if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id], 4077 RXDMA_MONITOR_DESC, entries, 0)) { 4078 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4079 FL(RNG_ERR "rxdma_mon_desc_ring")); 4080 goto fail1; 4081 } 4082 } 4083 return QDF_STATUS_SUCCESS; 4084 4085 fail1: 4086 dp_mon_rings_free(pdev); 4087 return QDF_STATUS_E_NOMEM; 4088 } 4089 #else 4090 static void dp_mon_rings_free(struct dp_pdev *pdev) 4091 { 4092 } 4093 4094 static void dp_mon_rings_deinit(struct dp_pdev *pdev) 4095 { 4096 } 4097 4098 static 4099 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev) 4100 { 4101 return QDF_STATUS_SUCCESS; 4102 } 4103 4104 static 4105 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev) 4106 { 4107 return QDF_STATUS_SUCCESS; 4108 } 4109 #endif 4110 4111 #ifdef ATH_SUPPORT_EXT_STAT 4112 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer 4113 * @soc : Datapath SOC 4114 * @peer : Datapath peer 4115 * @arg : argument to iter function 4116 */ 4117 static void 4118 dp_peer_cal_clients_stats_update(struct dp_soc *soc, 4119 struct dp_peer *peer, 4120 void *arg) 4121 { 4122 dp_cal_client_update_peer_stats(&peer->stats); 4123 } 4124 4125 /*dp_iterate_update_peer_list - update peer stats on cal client timer 4126 * @pdev_hdl: pdev handle 4127 */ 4128 void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 4129 { 4130 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; 4131 4132 dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL, 4133 DP_MOD_ID_CDP); 4134 } 4135 #else 4136 void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 4137 { 4138 } 4139 #endif 4140 4141 /* 4142 * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing 4143 * @pdev: Datapath PDEV handle 4144 * 4145 * Return: QDF_STATUS_SUCCESS: Success 4146 * QDF_STATUS_E_NOMEM: Error 4147 */ 4148 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) 4149 { 4150 pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); 4151 4152 if (!pdev->ppdu_tlv_buf) { 4153 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); 4154 return QDF_STATUS_E_NOMEM; 4155 } 4156 4157 return QDF_STATUS_SUCCESS; 4158 } 4159 4160 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 4161 /** 4162 * dp_soc_rx_history_attach() - Attach the ring history record buffers 4163 * @soc: DP soc structure 4164 * 4165 * This function allocates the memory for recording the rx ring, rx error 4166 * ring and the reinject ring entries. There is no error returned in case 4167 * of allocation failure since the record function checks if the history is 4168 * initialized or not. We do not want to fail the driver load in case of 4169 * failure to allocate memory for debug history. 4170 * 4171 * Returns: None 4172 */ 4173 static void dp_soc_rx_history_attach(struct dp_soc *soc) 4174 { 4175 int i; 4176 uint32_t rx_ring_hist_size; 4177 uint32_t rx_err_ring_hist_size; 4178 uint32_t rx_reinject_hist_size; 4179 4180 rx_ring_hist_size = sizeof(*soc->rx_ring_history[i]); 4181 rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history); 4182 rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history); 4183 4184 for (i = 0; i < MAX_REO_DEST_RINGS; i++) { 4185 soc->rx_ring_history[i] = qdf_mem_malloc(rx_ring_hist_size); 4186 if (soc->rx_ring_history[i]) 4187 qdf_atomic_init(&soc->rx_ring_history[i]->index); 4188 } 4189 4190 soc->rx_err_ring_history = qdf_mem_malloc(rx_err_ring_hist_size); 4191 if (soc->rx_err_ring_history) 4192 qdf_atomic_init(&soc->rx_err_ring_history->index); 4193 4194 soc->rx_reinject_ring_history = qdf_mem_malloc(rx_reinject_hist_size); 4195 if (soc->rx_reinject_ring_history) 4196 qdf_atomic_init(&soc->rx_reinject_ring_history->index); 4197 } 4198 4199 static void dp_soc_rx_history_detach(struct dp_soc *soc) 4200 { 4201 int i; 4202 4203 for (i = 0; i < MAX_REO_DEST_RINGS; i++) 4204 qdf_mem_free(soc->rx_ring_history[i]); 4205 4206 qdf_mem_free(soc->rx_err_ring_history); 4207 qdf_mem_free(soc->rx_reinject_ring_history); 4208 } 4209 4210 #else 4211 static inline void dp_soc_rx_history_attach(struct dp_soc *soc) 4212 { 4213 } 4214 4215 static inline void dp_soc_rx_history_detach(struct dp_soc *soc) 4216 { 4217 } 4218 #endif 4219 4220 /* 4221 * dp_pdev_attach_wifi3() - attach txrx pdev 4222 * @txrx_soc: Datapath SOC handle 4223 * @htc_handle: HTC handle for host-target interface 4224 * @qdf_osdev: QDF OS device 4225 * @pdev_id: PDEV ID 4226 * 4227 * Return: QDF_STATUS 4228 */ 4229 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, 4230 HTC_HANDLE htc_handle, 4231 qdf_device_t qdf_osdev, 4232 uint8_t pdev_id) 4233 { 4234 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4235 struct dp_pdev *pdev = NULL; 4236 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 4237 int nss_cfg; 4238 4239 pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, sizeof(*pdev)); 4240 if (!pdev) { 4241 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4242 FL("DP PDEV memory allocation failed")); 4243 goto fail0; 4244 } 4245 wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc, 4246 WLAN_MD_DP_PDEV, "dp_pdev"); 4247 4248 soc_cfg_ctx = soc->wlan_cfg_ctx; 4249 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc); 4250 4251 if (!pdev->wlan_cfg_ctx) { 4252 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4253 FL("pdev cfg_attach failed")); 4254 goto fail1; 4255 } 4256 4257 /* 4258 * set nss pdev config based on soc config 4259 */ 4260 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); 4261 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, 4262 (nss_cfg & (1 << pdev_id))); 4263 4264 pdev->soc = soc; 4265 pdev->pdev_id = pdev_id; 4266 soc->pdev_list[pdev_id] = pdev; 4267 4268 pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id); 4269 soc->pdev_count++; 4270 4271 /* Allocate memory for pdev srng rings */ 4272 if (dp_pdev_srng_alloc(pdev)) { 4273 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4274 FL("dp_pdev_srng_alloc failed")); 4275 goto fail2; 4276 } 4277 4278 /* Rx specific init */ 4279 if (dp_rx_pdev_desc_pool_alloc(pdev)) { 4280 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4281 FL("dp_rx_pdev_attach failed")); 4282 goto fail3; 4283 } 4284 4285 /* Rx monitor mode specific init */ 4286 if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) { 4287 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 4288 "dp_rx_pdev_mon_attach failed"); 4289 goto fail4; 4290 } 4291 4292 return QDF_STATUS_SUCCESS; 4293 fail4: 4294 dp_rx_pdev_desc_pool_free(pdev); 4295 fail3: 4296 dp_pdev_srng_free(pdev); 4297 fail2: 4298 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); 4299 fail1: 4300 qdf_mem_free(pdev); 4301 fail0: 4302 return QDF_STATUS_E_FAILURE; 4303 } 4304 4305 /* 4306 * dp_rxdma_ring_cleanup() - configure the RX DMA rings 4307 * @soc: data path SoC handle 4308 * @pdev: Physical device handle 4309 * 4310 * Return: void 4311 */ 4312 #ifdef QCA_HOST2FW_RXBUF_RING 4313 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 4314 { 4315 int i; 4316 4317 for (i = 0; i < MAX_RX_MAC_RINGS; i++) { 4318 dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1); 4319 dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]); 4320 } 4321 4322 if (soc->reap_timer_init) { 4323 qdf_timer_free(&soc->mon_reap_timer); 4324 soc->reap_timer_init = 0; 4325 } 4326 } 4327 #else 4328 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 4329 { 4330 if (soc->lmac_timer_init) { 4331 qdf_timer_stop(&soc->lmac_reap_timer); 4332 qdf_timer_free(&soc->lmac_reap_timer); 4333 soc->lmac_timer_init = 0; 4334 } 4335 } 4336 #endif 4337 4338 /* 4339 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients) 4340 * @pdev: device object 4341 * 4342 * Return: void 4343 */ 4344 static void dp_neighbour_peers_detach(struct dp_pdev *pdev) 4345 { 4346 struct dp_neighbour_peer *peer = NULL; 4347 struct dp_neighbour_peer *temp_peer = NULL; 4348 4349 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, 4350 neighbour_peer_list_elem, temp_peer) { 4351 /* delete this peer from the list */ 4352 TAILQ_REMOVE(&pdev->neighbour_peers_list, 4353 peer, neighbour_peer_list_elem); 4354 qdf_mem_free(peer); 4355 } 4356 4357 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); 4358 } 4359 4360 /** 4361 * dp_htt_ppdu_stats_detach() - detach stats resources 4362 * @pdev: Datapath PDEV handle 4363 * 4364 * Return: void 4365 */ 4366 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) 4367 { 4368 struct ppdu_info *ppdu_info, *ppdu_info_next; 4369 4370 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, 4371 ppdu_info_list_elem, ppdu_info_next) { 4372 if (!ppdu_info) 4373 break; 4374 TAILQ_REMOVE(&pdev->ppdu_info_list, 4375 ppdu_info, ppdu_info_list_elem); 4376 pdev->list_depth--; 4377 qdf_assert_always(ppdu_info->nbuf); 4378 qdf_nbuf_free(ppdu_info->nbuf); 4379 qdf_mem_free(ppdu_info); 4380 } 4381 4382 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list, 4383 ppdu_info_list_elem, ppdu_info_next) { 4384 if (!ppdu_info) 4385 break; 4386 TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, 4387 ppdu_info, ppdu_info_list_elem); 4388 pdev->sched_comp_list_depth--; 4389 qdf_assert_always(ppdu_info->nbuf); 4390 qdf_nbuf_free(ppdu_info->nbuf); 4391 qdf_mem_free(ppdu_info); 4392 } 4393 4394 if (pdev->ppdu_tlv_buf) 4395 qdf_mem_free(pdev->ppdu_tlv_buf); 4396 4397 } 4398 4399 #ifdef WLAN_DP_PENDING_MEM_FLUSH 4400 /** 4401 * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev 4402 * @pdev: Datapath PDEV handle 4403 * 4404 * This is the last chance to flush all pending dp vdevs/peers, 4405 * some peer/vdev leak case like Non-SSR + peer unmap missing 4406 * will be covered here. 4407 * 4408 * Return: None 4409 */ 4410 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) 4411 { 4412 struct dp_vdev *vdev = NULL; 4413 4414 while (true) { 4415 qdf_spin_lock_bh(&pdev->vdev_list_lock); 4416 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 4417 if (vdev->delete.pending) 4418 break; 4419 } 4420 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 4421 4422 /* 4423 * vdev will be freed when all peers get cleanup, 4424 * dp_delete_pending_vdev will remove vdev from vdev_list 4425 * in pdev. 4426 */ 4427 if (vdev) 4428 dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0); 4429 else 4430 break; 4431 } 4432 } 4433 #else 4434 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) 4435 { 4436 } 4437 #endif 4438 4439 /** 4440 * dp_pdev_deinit() - Deinit txrx pdev 4441 * @txrx_pdev: Datapath PDEV handle 4442 * @force: Force deinit 4443 * 4444 * Return: None 4445 */ 4446 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force) 4447 { 4448 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 4449 qdf_nbuf_t curr_nbuf, next_nbuf; 4450 4451 if (pdev->pdev_deinit) 4452 return; 4453 4454 dp_tx_me_exit(pdev); 4455 dp_rx_fst_detach(pdev->soc, pdev); 4456 dp_rx_pdev_mon_buffers_free(pdev); 4457 dp_rx_pdev_buffers_free(pdev); 4458 dp_rx_pdev_mon_desc_pool_deinit(pdev); 4459 dp_rx_pdev_desc_pool_deinit(pdev); 4460 dp_htt_ppdu_stats_detach(pdev); 4461 dp_tx_ppdu_stats_detach(pdev); 4462 qdf_event_destroy(&pdev->fw_peer_stats_event); 4463 dp_cal_client_detach(&pdev->cal_client_ctx); 4464 if (pdev->sojourn_buf) 4465 qdf_nbuf_free(pdev->sojourn_buf); 4466 4467 dp_pdev_flush_pending_vdevs(pdev); 4468 dp_tx_desc_flush(pdev, NULL, true); 4469 dp_pktlogmod_exit(pdev); 4470 dp_neighbour_peers_detach(pdev); 4471 4472 qdf_spinlock_destroy(&pdev->tx_mutex); 4473 qdf_spinlock_destroy(&pdev->vdev_list_lock); 4474 4475 if (pdev->invalid_peer) 4476 qdf_mem_free(pdev->invalid_peer); 4477 4478 if (pdev->filter) 4479 dp_mon_filter_dealloc(pdev); 4480 4481 dp_pdev_srng_deinit(pdev); 4482 4483 dp_ipa_uc_detach(pdev->soc, pdev); 4484 dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev); 4485 dp_rxdma_ring_cleanup(pdev->soc, pdev); 4486 4487 curr_nbuf = pdev->invalid_peer_head_msdu; 4488 while (curr_nbuf) { 4489 next_nbuf = qdf_nbuf_next(curr_nbuf); 4490 qdf_nbuf_free(curr_nbuf); 4491 curr_nbuf = next_nbuf; 4492 } 4493 pdev->invalid_peer_head_msdu = NULL; 4494 pdev->invalid_peer_tail_msdu = NULL; 4495 4496 dp_wdi_event_detach(pdev); 4497 pdev->pdev_deinit = 1; 4498 } 4499 4500 /** 4501 * dp_pdev_deinit_wifi3() - Deinit txrx pdev 4502 * @psoc: Datapath psoc handle 4503 * @pdev_id: Id of datapath PDEV handle 4504 * @force: Force deinit 4505 * 4506 * Return: QDF_STATUS 4507 */ 4508 static QDF_STATUS 4509 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, 4510 int force) 4511 { 4512 struct dp_pdev *txrx_pdev; 4513 4514 txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, 4515 pdev_id); 4516 4517 if (!txrx_pdev) 4518 return QDF_STATUS_E_FAILURE; 4519 4520 dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); 4521 4522 return QDF_STATUS_SUCCESS; 4523 } 4524 4525 /* 4526 * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name 4527 * @txrx_pdev: Datapath PDEV handle 4528 * 4529 * Return: None 4530 */ 4531 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev) 4532 { 4533 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 4534 4535 dp_tx_capture_debugfs_init(pdev); 4536 4537 if (dp_pdev_htt_stats_dbgfs_init(pdev)) { 4538 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4539 "Failed to initialize pdev HTT stats debugfs"); 4540 } 4541 } 4542 4543 /* 4544 * dp_pdev_post_attach_wifi3() - attach txrx pdev post 4545 * @psoc: Datapath soc handle 4546 * @pdev_id: pdev id of pdev 4547 * 4548 * Return: QDF_STATUS 4549 */ 4550 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc, 4551 uint8_t pdev_id) 4552 { 4553 struct dp_pdev *pdev; 4554 4555 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 4556 pdev_id); 4557 4558 if (!pdev) { 4559 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4560 FL("DP PDEV is Null for pdev id %d"), pdev_id); 4561 return QDF_STATUS_E_FAILURE; 4562 } 4563 4564 dp_pdev_post_attach((struct cdp_pdev *)pdev); 4565 return QDF_STATUS_SUCCESS; 4566 } 4567 4568 /* 4569 * dp_pdev_detach() - Complete rest of pdev detach 4570 * @txrx_pdev: Datapath PDEV handle 4571 * @force: Force deinit 4572 * 4573 * Return: None 4574 */ 4575 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force) 4576 { 4577 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 4578 struct dp_soc *soc = pdev->soc; 4579 4580 dp_pdev_htt_stats_dbgfs_deinit(pdev); 4581 dp_rx_pdev_mon_desc_pool_free(pdev); 4582 dp_rx_pdev_desc_pool_free(pdev); 4583 dp_pdev_srng_free(pdev); 4584 4585 soc->pdev_count--; 4586 soc->pdev_list[pdev->pdev_id] = NULL; 4587 4588 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); 4589 wlan_minidump_remove(pdev); 4590 dp_context_free_mem(soc, DP_PDEV_TYPE, pdev); 4591 } 4592 4593 /* 4594 * dp_pdev_detach_wifi3() - detach txrx pdev 4595 * @psoc: Datapath soc handle 4596 * @pdev_id: pdev id of pdev 4597 * @force: Force detach 4598 * 4599 * Return: QDF_STATUS 4600 */ 4601 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, 4602 int force) 4603 { 4604 struct dp_pdev *pdev; 4605 4606 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, 4607 pdev_id); 4608 4609 if (!pdev) { 4610 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4611 FL("DP PDEV is Null for pdev id %d"), pdev_id); 4612 return QDF_STATUS_E_FAILURE; 4613 } 4614 4615 dp_pdev_detach((struct cdp_pdev *)pdev, force); 4616 return QDF_STATUS_SUCCESS; 4617 } 4618 4619 /* 4620 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist 4621 * @soc: DP SOC handle 4622 */ 4623 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc) 4624 { 4625 struct reo_desc_list_node *desc; 4626 struct dp_rx_tid *rx_tid; 4627 4628 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); 4629 while (qdf_list_remove_front(&soc->reo_desc_freelist, 4630 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) { 4631 rx_tid = &desc->rx_tid; 4632 qdf_mem_unmap_nbytes_single(soc->osdev, 4633 rx_tid->hw_qdesc_paddr, 4634 QDF_DMA_BIDIRECTIONAL, 4635 rx_tid->hw_qdesc_alloc_size); 4636 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 4637 qdf_mem_free(desc); 4638 } 4639 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); 4640 qdf_list_destroy(&soc->reo_desc_freelist); 4641 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock); 4642 } 4643 4644 /* 4645 * dp_soc_reset_txrx_ring_map() - reset tx ring map 4646 * @soc: DP SOC handle 4647 * 4648 */ 4649 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc) 4650 { 4651 uint32_t i; 4652 4653 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) 4654 soc->tx_ring_map[i] = 0; 4655 } 4656 4657 /* 4658 * dp_soc_print_inactive_objects() - prints inactive peer and vdev list 4659 * @soc: DP SOC handle 4660 * 4661 */ 4662 static void dp_soc_print_inactive_objects(struct dp_soc *soc) 4663 { 4664 struct dp_peer *peer = NULL; 4665 struct dp_peer *tmp_peer = NULL; 4666 struct dp_vdev *vdev = NULL; 4667 struct dp_vdev *tmp_vdev = NULL; 4668 int i = 0; 4669 uint32_t count; 4670 4671 if (TAILQ_EMPTY(&soc->inactive_peer_list) && 4672 TAILQ_EMPTY(&soc->inactive_vdev_list)) 4673 return; 4674 4675 TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list, 4676 inactive_list_elem, tmp_peer) { 4677 for (i = 0; i < DP_MOD_ID_MAX; i++) { 4678 count = qdf_atomic_read(&peer->mod_refs[i]); 4679 if (count) 4680 DP_PRINT_STATS("peer %pK Module id %u ==> %u", 4681 peer, i, count); 4682 } 4683 } 4684 4685 TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list, 4686 inactive_list_elem, tmp_vdev) { 4687 for (i = 0; i < DP_MOD_ID_MAX; i++) { 4688 count = qdf_atomic_read(&vdev->mod_refs[i]); 4689 if (count) 4690 DP_PRINT_STATS("vdev %pK Module id %u ==> %u", 4691 vdev, i, count); 4692 } 4693 } 4694 QDF_BUG(0); 4695 } 4696 4697 /** 4698 * dp_soc_deinit() - Deinitialize txrx SOC 4699 * @txrx_soc: Opaque DP SOC handle 4700 * 4701 * Return: None 4702 */ 4703 static void dp_soc_deinit(void *txrx_soc) 4704 { 4705 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4706 struct htt_soc *htt_soc = soc->htt_handle; 4707 4708 qdf_atomic_set(&soc->cmn_init_done, 0); 4709 4710 /* free peer tables & AST tables allocated during peer_map_attach */ 4711 if (soc->peer_map_attach_success) { 4712 dp_peer_find_detach(soc); 4713 soc->peer_map_attach_success = FALSE; 4714 } 4715 4716 qdf_flush_work(&soc->htt_stats.work); 4717 qdf_disable_work(&soc->htt_stats.work); 4718 4719 qdf_spinlock_destroy(&soc->htt_stats.lock); 4720 4721 dp_soc_reset_txrx_ring_map(soc); 4722 4723 dp_reo_desc_freelist_destroy(soc); 4724 4725 DEINIT_RX_HW_STATS_LOCK(soc); 4726 4727 qdf_spinlock_destroy(&soc->ast_lock); 4728 4729 qdf_nbuf_queue_free(&soc->htt_stats.msg); 4730 4731 dp_soc_wds_detach(soc); 4732 4733 qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock); 4734 4735 qdf_spinlock_destroy(&soc->vdev_map_lock); 4736 4737 dp_reo_cmdlist_destroy(soc); 4738 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock); 4739 4740 dp_soc_tx_desc_sw_pools_deinit(soc); 4741 4742 dp_soc_srng_deinit(soc); 4743 4744 dp_hw_link_desc_ring_deinit(soc); 4745 4746 dp_soc_print_inactive_objects(soc); 4747 qdf_spinlock_destroy(&soc->inactive_peer_list_lock); 4748 qdf_spinlock_destroy(&soc->inactive_vdev_list_lock); 4749 4750 htt_soc_htc_dealloc(soc->htt_handle); 4751 4752 htt_soc_detach(htt_soc); 4753 4754 /* Free wbm sg list and reset flags in down path */ 4755 dp_rx_wbm_sg_list_deinit(soc); 4756 4757 wlan_minidump_remove(soc); 4758 } 4759 4760 /** 4761 * dp_soc_deinit_wifi3() - Deinitialize txrx SOC 4762 * @txrx_soc: Opaque DP SOC handle 4763 * 4764 * Return: None 4765 */ 4766 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc) 4767 { 4768 dp_soc_deinit(txrx_soc); 4769 } 4770 4771 /* 4772 * dp_soc_detach() - Detach rest of txrx SOC 4773 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. 4774 * 4775 * Return: None 4776 */ 4777 static void dp_soc_detach(struct cdp_soc_t *txrx_soc) 4778 { 4779 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4780 4781 dp_soc_swlm_detach(soc); 4782 dp_soc_tx_desc_sw_pools_free(soc); 4783 dp_soc_srng_free(soc); 4784 dp_hw_link_desc_ring_free(soc); 4785 dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); 4786 wlan_cfg_soc_detach(soc->wlan_cfg_ctx); 4787 dp_soc_rx_history_detach(soc); 4788 if (soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) { 4789 qdf_timer_free(&soc->mon_vdev_timer); 4790 soc->mon_vdev_timer_state = 0; 4791 } 4792 4793 qdf_mem_free(soc); 4794 } 4795 4796 /* 4797 * dp_soc_detach_wifi3() - Detach txrx SOC 4798 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. 4799 * 4800 * Return: None 4801 */ 4802 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc) 4803 { 4804 dp_soc_detach(txrx_soc); 4805 } 4806 4807 #if !defined(DISABLE_MON_CONFIG) 4808 /** 4809 * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings 4810 * @soc: soc handle 4811 * @pdev: physical device handle 4812 * @mac_id: ring number 4813 * @mac_for_pdev: mac_id 4814 * 4815 * Return: non-zero for failure, zero for success 4816 */ 4817 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, 4818 struct dp_pdev *pdev, 4819 int mac_id, 4820 int mac_for_pdev) 4821 { 4822 QDF_STATUS status = QDF_STATUS_SUCCESS; 4823 4824 if (soc->wlan_cfg_ctx->rxdma1_enable) { 4825 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 4826 soc->rxdma_mon_buf_ring[mac_id] 4827 .hal_srng, 4828 RXDMA_MONITOR_BUF); 4829 4830 if (status != QDF_STATUS_SUCCESS) { 4831 dp_err("Failed to send htt srng setup message for Rxdma mon buf ring"); 4832 return status; 4833 } 4834 4835 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 4836 soc->rxdma_mon_dst_ring[mac_id] 4837 .hal_srng, 4838 RXDMA_MONITOR_DST); 4839 4840 if (status != QDF_STATUS_SUCCESS) { 4841 dp_err("Failed to send htt srng setup message for Rxdma mon dst ring"); 4842 return status; 4843 } 4844 4845 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 4846 soc->rxdma_mon_status_ring[mac_id] 4847 .hal_srng, 4848 RXDMA_MONITOR_STATUS); 4849 4850 if (status != QDF_STATUS_SUCCESS) { 4851 dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); 4852 return status; 4853 } 4854 4855 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 4856 soc->rxdma_mon_desc_ring[mac_id] 4857 .hal_srng, 4858 RXDMA_MONITOR_DESC); 4859 4860 if (status != QDF_STATUS_SUCCESS) { 4861 dp_err("Failed to send htt srng message for Rxdma mon desc ring"); 4862 return status; 4863 } 4864 } else { 4865 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 4866 soc->rxdma_mon_status_ring[mac_id] 4867 .hal_srng, 4868 RXDMA_MONITOR_STATUS); 4869 4870 if (status != QDF_STATUS_SUCCESS) { 4871 dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); 4872 return status; 4873 } 4874 } 4875 4876 return status; 4877 4878 } 4879 #else 4880 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, 4881 struct dp_pdev *pdev, 4882 int mac_id, 4883 int mac_for_pdev) 4884 { 4885 return QDF_STATUS_SUCCESS; 4886 } 4887 #endif 4888 4889 /* 4890 * dp_rxdma_ring_config() - configure the RX DMA rings 4891 * 4892 * This function is used to configure the MAC rings. 4893 * On MCL host provides buffers in Host2FW ring 4894 * FW refills (copies) buffers to the ring and updates 4895 * ring_idx in register 4896 * 4897 * @soc: data path SoC handle 4898 * 4899 * Return: zero on success, non-zero on failure 4900 */ 4901 #ifdef QCA_HOST2FW_RXBUF_RING 4902 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) 4903 { 4904 int i; 4905 QDF_STATUS status = QDF_STATUS_SUCCESS; 4906 for (i = 0; i < MAX_PDEV_CNT; i++) { 4907 struct dp_pdev *pdev = soc->pdev_list[i]; 4908 4909 if (pdev) { 4910 int mac_id; 4911 bool dbs_enable = 0; 4912 int max_mac_rings = 4913 wlan_cfg_get_num_mac_rings 4914 (pdev->wlan_cfg_ctx); 4915 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); 4916 4917 htt_srng_setup(soc->htt_handle, 0, 4918 soc->rx_refill_buf_ring[lmac_id] 4919 .hal_srng, 4920 RXDMA_BUF); 4921 4922 if (pdev->rx_refill_buf_ring2.hal_srng) 4923 htt_srng_setup(soc->htt_handle, 0, 4924 pdev->rx_refill_buf_ring2.hal_srng, 4925 RXDMA_BUF); 4926 4927 if (soc->cdp_soc.ol_ops-> 4928 is_hw_dbs_2x2_capable) { 4929 dbs_enable = soc->cdp_soc.ol_ops-> 4930 is_hw_dbs_2x2_capable( 4931 (void *)soc->ctrl_psoc); 4932 } 4933 4934 if (dbs_enable) { 4935 QDF_TRACE(QDF_MODULE_ID_TXRX, 4936 QDF_TRACE_LEVEL_ERROR, 4937 FL("DBS enabled max_mac_rings %d"), 4938 max_mac_rings); 4939 } else { 4940 max_mac_rings = 1; 4941 QDF_TRACE(QDF_MODULE_ID_TXRX, 4942 QDF_TRACE_LEVEL_ERROR, 4943 FL("DBS disabled, max_mac_rings %d"), 4944 max_mac_rings); 4945 } 4946 4947 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 4948 FL("pdev_id %d max_mac_rings %d"), 4949 pdev->pdev_id, max_mac_rings); 4950 4951 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 4952 int mac_for_pdev = 4953 dp_get_mac_id_for_pdev(mac_id, 4954 pdev->pdev_id); 4955 /* 4956 * Obtain lmac id from pdev to access the LMAC 4957 * ring in soc context 4958 */ 4959 lmac_id = 4960 dp_get_lmac_id_for_pdev_id(soc, 4961 mac_id, 4962 pdev->pdev_id); 4963 QDF_TRACE(QDF_MODULE_ID_TXRX, 4964 QDF_TRACE_LEVEL_ERROR, 4965 FL("mac_id %d"), mac_for_pdev); 4966 4967 htt_srng_setup(soc->htt_handle, mac_for_pdev, 4968 pdev->rx_mac_buf_ring[mac_id] 4969 .hal_srng, 4970 RXDMA_BUF); 4971 htt_srng_setup(soc->htt_handle, mac_for_pdev, 4972 soc->rxdma_err_dst_ring[lmac_id] 4973 .hal_srng, 4974 RXDMA_DST); 4975 4976 /* Configure monitor mode rings */ 4977 status = dp_mon_htt_srng_setup(soc, pdev, 4978 lmac_id, 4979 mac_for_pdev); 4980 if (status != QDF_STATUS_SUCCESS) { 4981 dp_err("Failed to send htt monitor messages to target"); 4982 return status; 4983 } 4984 4985 } 4986 } 4987 } 4988 4989 /* 4990 * Timer to reap rxdma status rings. 4991 * Needed until we enable ppdu end interrupts 4992 */ 4993 qdf_timer_init(soc->osdev, &soc->mon_reap_timer, 4994 dp_mon_reap_timer_handler, (void *)soc, 4995 QDF_TIMER_TYPE_WAKE_APPS); 4996 soc->reap_timer_init = 1; 4997 qdf_timer_init(soc->osdev, &soc->mon_vdev_timer, 4998 dp_mon_vdev_timer, (void *)soc, 4999 QDF_TIMER_TYPE_WAKE_APPS); 5000 soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT; 5001 return status; 5002 } 5003 #else 5004 /* This is only for WIN */ 5005 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) 5006 { 5007 int i; 5008 QDF_STATUS status = QDF_STATUS_SUCCESS; 5009 int mac_for_pdev; 5010 int lmac_id; 5011 5012 for (i = 0; i < MAX_PDEV_CNT; i++) { 5013 struct dp_pdev *pdev = soc->pdev_list[i]; 5014 5015 if (!pdev) 5016 continue; 5017 5018 mac_for_pdev = i; 5019 lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); 5020 5021 htt_srng_setup(soc->htt_handle, mac_for_pdev, 5022 soc->rx_refill_buf_ring[lmac_id]. 5023 hal_srng, RXDMA_BUF); 5024 #ifndef DISABLE_MON_CONFIG 5025 5026 if (soc->wlan_cfg_ctx->rxdma1_enable) { 5027 htt_srng_setup(soc->htt_handle, mac_for_pdev, 5028 soc->rxdma_mon_buf_ring[lmac_id].hal_srng, 5029 RXDMA_MONITOR_BUF); 5030 htt_srng_setup(soc->htt_handle, mac_for_pdev, 5031 soc->rxdma_mon_dst_ring[lmac_id].hal_srng, 5032 RXDMA_MONITOR_DST); 5033 htt_srng_setup(soc->htt_handle, mac_for_pdev, 5034 soc->rxdma_mon_desc_ring[lmac_id].hal_srng, 5035 RXDMA_MONITOR_DESC); 5036 } 5037 htt_srng_setup(soc->htt_handle, mac_for_pdev, 5038 soc->rxdma_mon_status_ring[lmac_id].hal_srng, 5039 RXDMA_MONITOR_STATUS); 5040 #endif 5041 htt_srng_setup(soc->htt_handle, mac_for_pdev, 5042 soc->rxdma_err_dst_ring[lmac_id].hal_srng, 5043 RXDMA_DST); 5044 } 5045 5046 /* Configure LMAC rings in Polled mode */ 5047 if (soc->lmac_polled_mode) { 5048 /* 5049 * Timer to reap lmac rings. 5050 */ 5051 qdf_timer_init(soc->osdev, &soc->lmac_reap_timer, 5052 dp_service_lmac_rings, (void *)soc, 5053 QDF_TIMER_TYPE_WAKE_APPS); 5054 soc->lmac_timer_init = 1; 5055 qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS); 5056 } 5057 return status; 5058 } 5059 #endif 5060 5061 #ifdef NO_RX_PKT_HDR_TLV 5062 static QDF_STATUS 5063 dp_rxdma_ring_sel_cfg(struct dp_soc *soc) 5064 { 5065 int i; 5066 int mac_id; 5067 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; 5068 QDF_STATUS status = QDF_STATUS_SUCCESS; 5069 5070 htt_tlv_filter.mpdu_start = 1; 5071 htt_tlv_filter.msdu_start = 1; 5072 htt_tlv_filter.mpdu_end = 1; 5073 htt_tlv_filter.msdu_end = 1; 5074 htt_tlv_filter.attention = 1; 5075 htt_tlv_filter.packet = 1; 5076 htt_tlv_filter.packet_header = 0; 5077 5078 htt_tlv_filter.ppdu_start = 0; 5079 htt_tlv_filter.ppdu_end = 0; 5080 htt_tlv_filter.ppdu_end_user_stats = 0; 5081 htt_tlv_filter.ppdu_end_user_stats_ext = 0; 5082 htt_tlv_filter.ppdu_end_status_done = 0; 5083 htt_tlv_filter.enable_fp = 1; 5084 htt_tlv_filter.enable_md = 0; 5085 htt_tlv_filter.enable_md = 0; 5086 htt_tlv_filter.enable_mo = 0; 5087 5088 htt_tlv_filter.fp_mgmt_filter = 0; 5089 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ; 5090 htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST | 5091 FILTER_DATA_MCAST | 5092 FILTER_DATA_DATA); 5093 htt_tlv_filter.mo_mgmt_filter = 0; 5094 htt_tlv_filter.mo_ctrl_filter = 0; 5095 htt_tlv_filter.mo_data_filter = 0; 5096 htt_tlv_filter.md_data_filter = 0; 5097 5098 htt_tlv_filter.offset_valid = true; 5099 5100 htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN; 5101 /*Not subscribing rx_pkt_header*/ 5102 htt_tlv_filter.rx_header_offset = 0; 5103 htt_tlv_filter.rx_mpdu_start_offset = 5104 hal_rx_mpdu_start_offset_get(soc->hal_soc); 5105 htt_tlv_filter.rx_mpdu_end_offset = 5106 hal_rx_mpdu_end_offset_get(soc->hal_soc); 5107 htt_tlv_filter.rx_msdu_start_offset = 5108 hal_rx_msdu_start_offset_get(soc->hal_soc); 5109 htt_tlv_filter.rx_msdu_end_offset = 5110 hal_rx_msdu_end_offset_get(soc->hal_soc); 5111 htt_tlv_filter.rx_attn_offset = 5112 hal_rx_attn_offset_get(soc->hal_soc); 5113 5114 for (i = 0; i < MAX_PDEV_CNT; i++) { 5115 struct dp_pdev *pdev = soc->pdev_list[i]; 5116 5117 if (!pdev) 5118 continue; 5119 5120 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 5121 int mac_for_pdev = 5122 dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 5123 /* 5124 * Obtain lmac id from pdev to access the LMAC ring 5125 * in soc context 5126 */ 5127 int lmac_id = 5128 dp_get_lmac_id_for_pdev_id(soc, mac_id, 5129 pdev->pdev_id); 5130 5131 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 5132 soc->rx_refill_buf_ring[lmac_id]. 5133 hal_srng, 5134 RXDMA_BUF, RX_DATA_BUFFER_SIZE, 5135 &htt_tlv_filter); 5136 } 5137 } 5138 return status; 5139 } 5140 #else 5141 static QDF_STATUS 5142 dp_rxdma_ring_sel_cfg(struct dp_soc *soc) 5143 { 5144 return QDF_STATUS_SUCCESS; 5145 } 5146 #endif 5147 5148 /* 5149 * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine 5150 * 5151 * This function is used to configure the FSE HW block in RX OLE on a 5152 * per pdev basis. Here, we will be programming parameters related to 5153 * the Flow Search Table. 5154 * 5155 * @soc: data path SoC handle 5156 * 5157 * Return: zero on success, non-zero on failure 5158 */ 5159 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 5160 static QDF_STATUS 5161 dp_rx_target_fst_config(struct dp_soc *soc) 5162 { 5163 int i; 5164 QDF_STATUS status = QDF_STATUS_SUCCESS; 5165 5166 for (i = 0; i < MAX_PDEV_CNT; i++) { 5167 struct dp_pdev *pdev = soc->pdev_list[i]; 5168 5169 /* Flow search is not enabled if NSS offload is enabled */ 5170 if (pdev && 5171 !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 5172 status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev); 5173 if (status != QDF_STATUS_SUCCESS) 5174 break; 5175 } 5176 } 5177 return status; 5178 } 5179 #elif defined(WLAN_SUPPORT_RX_FISA) 5180 /** 5181 * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW 5182 * @soc: SoC handle 5183 * 5184 * Return: Success 5185 */ 5186 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) 5187 { 5188 /* Check if it is enabled in the INI */ 5189 if (!soc->fisa_enable) { 5190 dp_err("RX FISA feature is disabled"); 5191 return QDF_STATUS_E_NOSUPPORT; 5192 } 5193 5194 return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]); 5195 } 5196 5197 #define FISA_MAX_TIMEOUT 0xffffffff 5198 #define FISA_DISABLE_TIMEOUT 0 5199 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc) 5200 { 5201 struct dp_htt_rx_fisa_cfg fisa_config; 5202 5203 fisa_config.pdev_id = 0; 5204 fisa_config.fisa_timeout = FISA_MAX_TIMEOUT; 5205 5206 return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config); 5207 } 5208 #else /* !WLAN_SUPPORT_RX_FISA */ 5209 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) 5210 { 5211 return QDF_STATUS_SUCCESS; 5212 } 5213 #endif /* !WLAN_SUPPORT_RX_FISA */ 5214 5215 #ifndef WLAN_SUPPORT_RX_FISA 5216 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc) 5217 { 5218 return QDF_STATUS_SUCCESS; 5219 } 5220 5221 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc) 5222 { 5223 return QDF_STATUS_SUCCESS; 5224 } 5225 5226 static void dp_rx_dump_fisa_table(struct dp_soc *soc) 5227 { 5228 } 5229 #endif /* !WLAN_SUPPORT_RX_FISA */ 5230 5231 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 5232 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc) 5233 { 5234 return QDF_STATUS_SUCCESS; 5235 } 5236 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 5237 5238 /* 5239 * dp_soc_attach_target_wifi3() - SOC initialization in the target 5240 * @cdp_soc: Opaque Datapath SOC handle 5241 * 5242 * Return: zero on success, non-zero on failure 5243 */ 5244 static QDF_STATUS 5245 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc) 5246 { 5247 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 5248 QDF_STATUS status = QDF_STATUS_SUCCESS; 5249 5250 htt_soc_attach_target(soc->htt_handle); 5251 5252 status = dp_rxdma_ring_config(soc); 5253 if (status != QDF_STATUS_SUCCESS) { 5254 dp_err("Failed to send htt srng setup messages to target"); 5255 return status; 5256 } 5257 5258 status = dp_rxdma_ring_sel_cfg(soc); 5259 if (status != QDF_STATUS_SUCCESS) { 5260 dp_err("Failed to send htt ring config message to target"); 5261 return status; 5262 } 5263 5264 status = dp_rx_target_fst_config(soc); 5265 if (status != QDF_STATUS_SUCCESS && 5266 status != QDF_STATUS_E_NOSUPPORT) { 5267 dp_err("Failed to send htt fst setup config message to target"); 5268 return status; 5269 } 5270 5271 if (status == QDF_STATUS_SUCCESS) { 5272 status = dp_rx_fisa_config(soc); 5273 if (status != QDF_STATUS_SUCCESS) { 5274 dp_err("Failed to send htt FISA config message to target"); 5275 return status; 5276 } 5277 } 5278 5279 DP_STATS_INIT(soc); 5280 5281 /* initialize work queue for stats processing */ 5282 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); 5283 5284 return QDF_STATUS_SUCCESS; 5285 } 5286 5287 #ifdef QCA_SUPPORT_FULL_MON 5288 static inline QDF_STATUS 5289 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val) 5290 { 5291 struct dp_soc *soc = pdev->soc; 5292 QDF_STATUS status = QDF_STATUS_SUCCESS; 5293 5294 if (!soc->full_mon_mode) 5295 return QDF_STATUS_SUCCESS; 5296 5297 if ((htt_h2t_full_mon_cfg(soc->htt_handle, 5298 pdev->pdev_id, 5299 val)) != QDF_STATUS_SUCCESS) { 5300 status = QDF_STATUS_E_FAILURE; 5301 } 5302 5303 return status; 5304 } 5305 #else 5306 static inline QDF_STATUS 5307 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val) 5308 { 5309 return 0; 5310 } 5311 #endif 5312 5313 /* 5314 * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table 5315 * @soc: SoC handle 5316 * @vdev: vdev handle 5317 * @vdev_id: vdev_id 5318 * 5319 * Return: None 5320 */ 5321 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc, 5322 struct dp_vdev *vdev, 5323 uint8_t vdev_id) 5324 { 5325 QDF_ASSERT(vdev_id <= MAX_VDEV_CNT); 5326 5327 qdf_spin_lock_bh(&soc->vdev_map_lock); 5328 5329 if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) != 5330 QDF_STATUS_SUCCESS) { 5331 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 5332 "unable to get vdev reference at MAP vdev %pK vdev_id %u", 5333 vdev, vdev_id); 5334 qdf_spin_unlock_bh(&soc->vdev_map_lock); 5335 return; 5336 } 5337 5338 if (!soc->vdev_id_map[vdev_id]) 5339 soc->vdev_id_map[vdev_id] = vdev; 5340 else 5341 QDF_ASSERT(0); 5342 5343 qdf_spin_unlock_bh(&soc->vdev_map_lock); 5344 } 5345 5346 /* 5347 * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table 5348 * @soc: SoC handle 5349 * @vdev: vdev handle 5350 * 5351 * Return: None 5352 */ 5353 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc, 5354 struct dp_vdev *vdev) 5355 { 5356 qdf_spin_lock_bh(&soc->vdev_map_lock); 5357 QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev); 5358 5359 soc->vdev_id_map[vdev->vdev_id] = NULL; 5360 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 5361 qdf_spin_unlock_bh(&soc->vdev_map_lock); 5362 } 5363 5364 /* 5365 * dp_vdev_pdev_list_add() - add vdev into pdev's list 5366 * @soc: soc handle 5367 * @pdev: pdev handle 5368 * @vdev: vdev handle 5369 * 5370 * return: none 5371 */ 5372 static void dp_vdev_pdev_list_add(struct dp_soc *soc, 5373 struct dp_pdev *pdev, 5374 struct dp_vdev *vdev) 5375 { 5376 qdf_spin_lock_bh(&pdev->vdev_list_lock); 5377 if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) != 5378 QDF_STATUS_SUCCESS) { 5379 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 5380 "unable to get vdev reference at MAP vdev %pK", 5381 vdev); 5382 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 5383 return; 5384 } 5385 /* add this vdev into the pdev's list */ 5386 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem); 5387 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 5388 } 5389 5390 /* 5391 * dp_vdev_pdev_list_remove() - remove vdev from pdev's list 5392 * @soc: SoC handle 5393 * @pdev: pdev handle 5394 * @vdev: VDEV handle 5395 * 5396 * Return: none 5397 */ 5398 static void dp_vdev_pdev_list_remove(struct dp_soc *soc, 5399 struct dp_pdev *pdev, 5400 struct dp_vdev *vdev) 5401 { 5402 uint8_t found = 0; 5403 struct dp_vdev *tmpvdev = NULL; 5404 5405 qdf_spin_lock_bh(&pdev->vdev_list_lock); 5406 TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) { 5407 if (tmpvdev == vdev) { 5408 found = 1; 5409 break; 5410 } 5411 } 5412 5413 if (found) { 5414 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem); 5415 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 5416 } else { 5417 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 5418 "vdev:%pK not found in pdev:%pK vdevlist:%pK", 5419 vdev, pdev, &pdev->vdev_list); 5420 QDF_ASSERT(0); 5421 } 5422 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 5423 } 5424 5425 /* 5426 * dp_vdev_attach_wifi3() - attach txrx vdev 5427 * @txrx_pdev: Datapath PDEV handle 5428 * @vdev_mac_addr: MAC address of the virtual interface 5429 * @vdev_id: VDEV Id 5430 * @wlan_op_mode: VDEV operating mode 5431 * @subtype: VDEV operating subtype 5432 * 5433 * Return: status 5434 */ 5435 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc, 5436 uint8_t pdev_id, 5437 uint8_t *vdev_mac_addr, 5438 uint8_t vdev_id, 5439 enum wlan_op_mode op_mode, 5440 enum wlan_op_subtype subtype) 5441 { 5442 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 5443 struct dp_pdev *pdev = 5444 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 5445 pdev_id); 5446 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev)); 5447 int i = 0; 5448 5449 if (!pdev) { 5450 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 5451 FL("DP PDEV is Null for pdev id %d"), pdev_id); 5452 qdf_mem_free(vdev); 5453 goto fail0; 5454 } 5455 5456 if (!vdev) { 5457 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 5458 FL("DP VDEV memory allocation failed")); 5459 goto fail0; 5460 } 5461 5462 wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc, 5463 WLAN_MD_DP_VDEV, "dp_vdev"); 5464 5465 vdev->pdev = pdev; 5466 vdev->vdev_id = vdev_id; 5467 vdev->opmode = op_mode; 5468 vdev->subtype = subtype; 5469 vdev->osdev = soc->osdev; 5470 5471 vdev->osif_rx = NULL; 5472 vdev->osif_rsim_rx_decap = NULL; 5473 vdev->osif_get_key = NULL; 5474 vdev->osif_rx_mon = NULL; 5475 vdev->osif_tx_free_ext = NULL; 5476 vdev->osif_vdev = NULL; 5477 5478 vdev->delete.pending = 0; 5479 vdev->safemode = 0; 5480 vdev->drop_unenc = 1; 5481 vdev->sec_type = cdp_sec_type_none; 5482 vdev->multipass_en = false; 5483 qdf_atomic_init(&vdev->ref_cnt); 5484 for (i = 0; i < DP_MOD_ID_MAX; i++) 5485 qdf_atomic_init(&vdev->mod_refs[i]); 5486 5487 /* Take one reference for create*/ 5488 qdf_atomic_inc(&vdev->ref_cnt); 5489 qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]); 5490 vdev->num_peers = 0; 5491 #ifdef notyet 5492 vdev->filters_num = 0; 5493 #endif 5494 vdev->lmac_id = pdev->lmac_id; 5495 5496 qdf_mem_copy( 5497 &vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE); 5498 5499 /* TODO: Initialize default HTT meta data that will be used in 5500 * TCL descriptors for packets transmitted from this VDEV 5501 */ 5502 5503 qdf_spinlock_create(&vdev->peer_list_lock); 5504 TAILQ_INIT(&vdev->peer_list); 5505 dp_peer_multipass_list_init(vdev); 5506 5507 if ((soc->intr_mode == DP_INTR_POLL) && 5508 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) { 5509 if ((pdev->vdev_count == 0) || 5510 (wlan_op_mode_monitor == vdev->opmode)) 5511 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 5512 } else if (soc->intr_mode == DP_INTR_MSI && 5513 wlan_op_mode_monitor == vdev->opmode && 5514 soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) { 5515 qdf_timer_mod(&soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS); 5516 soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING; 5517 } 5518 5519 dp_vdev_id_map_tbl_add(soc, vdev, vdev_id); 5520 5521 if (wlan_op_mode_monitor == vdev->opmode) { 5522 pdev->monitor_vdev = vdev; 5523 return QDF_STATUS_SUCCESS; 5524 } 5525 5526 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); 5527 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); 5528 vdev->dscp_tid_map_id = 0; 5529 vdev->mcast_enhancement_en = 0; 5530 vdev->igmp_mcast_enhanc_en = 0; 5531 vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx); 5532 vdev->prev_tx_enq_tstamp = 0; 5533 vdev->prev_rx_deliver_tstamp = 0; 5534 vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID; 5535 5536 dp_vdev_pdev_list_add(soc, pdev, vdev); 5537 pdev->vdev_count++; 5538 5539 if (wlan_op_mode_sta != vdev->opmode) 5540 vdev->ap_bridge_enabled = true; 5541 else 5542 vdev->ap_bridge_enabled = false; 5543 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 5544 "%s: wlan_cfg_ap_bridge_enabled %d", 5545 __func__, vdev->ap_bridge_enabled); 5546 5547 dp_tx_vdev_attach(vdev); 5548 5549 if (pdev->vdev_count == 1) 5550 dp_lro_hash_setup(soc, pdev); 5551 5552 dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev, 5553 QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 5554 DP_STATS_INIT(vdev); 5555 5556 if (wlan_op_mode_sta == vdev->opmode) 5557 dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id, 5558 vdev->mac_addr.raw); 5559 return QDF_STATUS_SUCCESS; 5560 5561 fail0: 5562 return QDF_STATUS_E_FAILURE; 5563 } 5564 5565 /** 5566 * dp_vdev_register_tx_handler() - Register Tx handler 5567 * @vdev: struct dp_vdev * 5568 * @soc: struct dp_soc * 5569 * @txrx_ops: struct ol_txrx_ops * 5570 */ 5571 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev, 5572 struct dp_soc *soc, 5573 struct ol_txrx_ops *txrx_ops) 5574 { 5575 /* Enable vdev_id check only for ap, if flag is enabled */ 5576 5577 if (vdev->mesh_vdev) 5578 txrx_ops->tx.tx = dp_tx_send_mesh; 5579 else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) && 5580 (vdev->opmode == wlan_op_mode_ap)) 5581 txrx_ops->tx.tx = dp_tx_send_vdev_id_check; 5582 else 5583 txrx_ops->tx.tx = dp_tx_send; 5584 5585 /* Avoid check in regular exception Path */ 5586 if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) && 5587 (vdev->opmode == wlan_op_mode_ap)) 5588 txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check; 5589 else 5590 txrx_ops->tx.tx_exception = dp_tx_send_exception; 5591 5592 dp_alert("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d", 5593 wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx), 5594 vdev->opmode, vdev->vdev_id); 5595 } 5596 5597 /** 5598 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer 5599 * @soc: Datapath soc handle 5600 * @vdev_id: id of Datapath VDEV handle 5601 * @osif_vdev: OSIF vdev handle 5602 * @txrx_ops: Tx and Rx operations 5603 * 5604 * Return: DP VDEV handle on success, NULL on failure 5605 */ 5606 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl, 5607 uint8_t vdev_id, 5608 ol_osif_vdev_handle osif_vdev, 5609 struct ol_txrx_ops *txrx_ops) 5610 { 5611 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5612 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5613 DP_MOD_ID_CDP); 5614 5615 if (!vdev) 5616 return QDF_STATUS_E_FAILURE; 5617 5618 vdev->osif_vdev = osif_vdev; 5619 vdev->osif_rx = txrx_ops->rx.rx; 5620 vdev->osif_rx_stack = txrx_ops->rx.rx_stack; 5621 vdev->osif_rx_flush = txrx_ops->rx.rx_flush; 5622 vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush; 5623 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap; 5624 vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx; 5625 vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush; 5626 vdev->osif_get_key = txrx_ops->get_key; 5627 vdev->osif_rx_mon = txrx_ops->rx.mon; 5628 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext; 5629 vdev->tx_comp = txrx_ops->tx.tx_comp; 5630 vdev->stats_cb = txrx_ops->rx.stats_rx; 5631 #ifdef notyet 5632 #if ATH_SUPPORT_WAPI 5633 vdev->osif_check_wai = txrx_ops->rx.wai_check; 5634 #endif 5635 #endif 5636 #ifdef UMAC_SUPPORT_PROXY_ARP 5637 vdev->osif_proxy_arp = txrx_ops->proxy_arp; 5638 #endif 5639 vdev->me_convert = txrx_ops->me_convert; 5640 5641 dp_vdev_register_tx_handler(vdev, soc, txrx_ops); 5642 5643 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 5644 "DP Vdev Register success"); 5645 5646 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5647 return QDF_STATUS_SUCCESS; 5648 } 5649 5650 /** 5651 * dp_peer_delete() - delete DP peer 5652 * 5653 * @soc: Datatpath soc 5654 * @peer: Datapath peer 5655 * @arg: argument to iter function 5656 * 5657 * Return: void 5658 */ 5659 static void 5660 dp_peer_delete(struct dp_soc *soc, 5661 struct dp_peer *peer, 5662 void *arg) 5663 { 5664 if (!peer->valid) 5665 return; 5666 5667 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 5668 peer->vdev->vdev_id, 5669 peer->mac_addr.raw, 0); 5670 } 5671 5672 /** 5673 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev 5674 * @vdev: Datapath VDEV handle 5675 * @unmap_only: Flag to indicate "only unmap" 5676 * 5677 * Return: void 5678 */ 5679 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only) 5680 { 5681 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 5682 struct dp_pdev *pdev = vdev->pdev; 5683 struct dp_soc *soc = pdev->soc; 5684 struct dp_peer *peer; 5685 uint32_t i = 0; 5686 5687 5688 if (!unmap_only) 5689 dp_vdev_iterate_peer(vdev, dp_peer_delete, NULL, 5690 DP_MOD_ID_CDP); 5691 5692 for (i = 0; i < soc->max_peers ; i++) { 5693 peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP); 5694 5695 if (!peer) 5696 continue; 5697 5698 if (peer->vdev != vdev) { 5699 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5700 continue; 5701 } 5702 5703 dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap", 5704 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 5705 5706 dp_rx_peer_unmap_handler(soc, i, 5707 vdev->vdev_id, 5708 peer->mac_addr.raw, 0, 5709 DP_PEER_WDS_COUNT_INVALID); 5710 SET_PEER_REF_CNT_ONE(peer); 5711 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5712 } 5713 5714 } 5715 5716 /* 5717 * dp_vdev_detach_wifi3() - Detach txrx vdev 5718 * @cdp_soc: Datapath soc handle 5719 * @vdev_id: VDEV Id 5720 * @callback: Callback OL_IF on completion of detach 5721 * @cb_context: Callback context 5722 * 5723 */ 5724 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc, 5725 uint8_t vdev_id, 5726 ol_txrx_vdev_delete_cb callback, 5727 void *cb_context) 5728 { 5729 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 5730 struct dp_pdev *pdev; 5731 struct dp_neighbour_peer *peer = NULL; 5732 struct dp_neighbour_peer *temp_peer = NULL; 5733 struct dp_peer *vap_self_peer = NULL; 5734 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5735 DP_MOD_ID_CDP); 5736 5737 if (!vdev) 5738 return QDF_STATUS_E_FAILURE; 5739 5740 pdev = vdev->pdev; 5741 5742 vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev, 5743 DP_MOD_ID_CONFIG); 5744 if (vap_self_peer) { 5745 qdf_spin_lock_bh(&soc->ast_lock); 5746 if (vap_self_peer->self_ast_entry) { 5747 dp_peer_del_ast(soc, vap_self_peer->self_ast_entry); 5748 vap_self_peer->self_ast_entry = NULL; 5749 } 5750 qdf_spin_unlock_bh(&soc->ast_lock); 5751 5752 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id, 5753 vap_self_peer->mac_addr.raw, 0); 5754 dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG); 5755 } 5756 5757 /* 5758 * If Target is hung, flush all peers before detaching vdev 5759 * this will free all references held due to missing 5760 * unmap commands from Target 5761 */ 5762 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 5763 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false); 5764 else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) 5765 dp_vdev_flush_peers((struct cdp_vdev *)vdev, true); 5766 5767 dp_rx_vdev_detach(vdev); 5768 /* 5769 * move it after dp_rx_vdev_detach(), 5770 * as the call back done in dp_rx_vdev_detach() 5771 * still need to get vdev pointer by vdev_id. 5772 */ 5773 dp_vdev_id_map_tbl_remove(soc, vdev); 5774 5775 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 5776 if (!soc->hw_nac_monitor_support) { 5777 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 5778 neighbour_peer_list_elem) { 5779 QDF_ASSERT(peer->vdev != vdev); 5780 } 5781 } else { 5782 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, 5783 neighbour_peer_list_elem, temp_peer) { 5784 if (peer->vdev == vdev) { 5785 TAILQ_REMOVE(&pdev->neighbour_peers_list, peer, 5786 neighbour_peer_list_elem); 5787 qdf_mem_free(peer); 5788 } 5789 } 5790 } 5791 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 5792 5793 if (vdev->vdev_dp_ext_handle) { 5794 qdf_mem_free(vdev->vdev_dp_ext_handle); 5795 vdev->vdev_dp_ext_handle = NULL; 5796 } 5797 /* indicate that the vdev needs to be deleted */ 5798 vdev->delete.pending = 1; 5799 vdev->delete.callback = callback; 5800 vdev->delete.context = cb_context; 5801 5802 if (vdev->opmode != wlan_op_mode_monitor) 5803 dp_vdev_pdev_list_remove(soc, pdev, vdev); 5804 5805 /* release reference taken above for find */ 5806 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5807 5808 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 5809 TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem); 5810 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 5811 5812 /* release reference taken at dp_vdev_create */ 5813 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 5814 5815 return QDF_STATUS_SUCCESS; 5816 } 5817 5818 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev, 5819 uint8_t *peer_mac_addr) 5820 { 5821 struct dp_peer *peer; 5822 struct dp_soc *soc = vdev->pdev->soc; 5823 5824 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 5825 TAILQ_FOREACH(peer, &soc->inactive_peer_list, 5826 inactive_list_elem) { 5827 5828 /* reuse bss peer only when vdev matches*/ 5829 if (peer->bss_peer && (peer->vdev == vdev) && 5830 qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw, 5831 QDF_MAC_ADDR_SIZE) == 0) { 5832 /* increment ref count for cdp_peer_create*/ 5833 if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) == 5834 QDF_STATUS_SUCCESS) { 5835 TAILQ_REMOVE(&soc->inactive_peer_list, peer, 5836 inactive_list_elem); 5837 qdf_spin_unlock_bh 5838 (&soc->inactive_peer_list_lock); 5839 return peer; 5840 } 5841 } 5842 } 5843 5844 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 5845 return NULL; 5846 } 5847 5848 #ifdef FEATURE_AST 5849 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc, 5850 struct dp_pdev *pdev, 5851 uint8_t *peer_mac_addr) 5852 { 5853 struct dp_ast_entry *ast_entry; 5854 5855 qdf_spin_lock_bh(&soc->ast_lock); 5856 if (soc->ast_override_support) 5857 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr, 5858 pdev->pdev_id); 5859 else 5860 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); 5861 5862 if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress) 5863 dp_peer_del_ast(soc, ast_entry); 5864 5865 qdf_spin_unlock_bh(&soc->ast_lock); 5866 } 5867 #endif 5868 5869 #ifdef PEER_CACHE_RX_PKTS 5870 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer) 5871 { 5872 qdf_spinlock_create(&peer->bufq_info.bufq_lock); 5873 peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH; 5874 qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH); 5875 } 5876 #else 5877 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer) 5878 { 5879 } 5880 #endif 5881 5882 /* 5883 * dp_peer_create_wifi3() - attach txrx peer 5884 * @soc_hdl: Datapath soc handle 5885 * @vdev_id: id of vdev 5886 * @peer_mac_addr: Peer MAC address 5887 * 5888 * Return: 0 on success, -1 on failure 5889 */ 5890 static QDF_STATUS 5891 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5892 uint8_t *peer_mac_addr) 5893 { 5894 struct dp_peer *peer; 5895 int i; 5896 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 5897 struct dp_pdev *pdev; 5898 struct cdp_peer_cookie peer_cookie; 5899 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC; 5900 struct dp_vdev *vdev = NULL; 5901 5902 if (!peer_mac_addr) 5903 return QDF_STATUS_E_FAILURE; 5904 5905 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5906 5907 if (!vdev) 5908 return QDF_STATUS_E_FAILURE; 5909 5910 pdev = vdev->pdev; 5911 soc = pdev->soc; 5912 5913 /* 5914 * If a peer entry with given MAC address already exists, 5915 * reuse the peer and reset the state of peer. 5916 */ 5917 peer = dp_peer_can_reuse(vdev, peer_mac_addr); 5918 5919 if (peer) { 5920 dp_peer_vdev_list_add(soc, vdev, peer); 5921 5922 dp_peer_find_hash_add(soc, peer); 5923 qdf_atomic_init(&peer->is_default_route_set); 5924 dp_peer_cleanup(vdev, peer); 5925 5926 for (i = 0; i < DP_MAX_TIDS; i++) 5927 qdf_spinlock_create(&peer->rx_tid[i].tid_lock); 5928 5929 qdf_spin_lock_bh(&soc->ast_lock); 5930 dp_peer_delete_ast_entries(soc, peer); 5931 qdf_spin_unlock_bh(&soc->ast_lock); 5932 5933 if ((vdev->opmode == wlan_op_mode_sta) && 5934 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], 5935 QDF_MAC_ADDR_SIZE)) { 5936 ast_type = CDP_TXRX_AST_TYPE_SELF; 5937 } 5938 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); 5939 5940 peer->valid = 1; 5941 dp_local_peer_id_alloc(pdev, peer); 5942 5943 qdf_spinlock_create(&peer->peer_info_lock); 5944 dp_peer_rx_bufq_resources_init(peer); 5945 5946 DP_STATS_INIT(peer); 5947 DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI); 5948 5949 /* 5950 * In tx_monitor mode, filter may be set for unassociated peer 5951 * when unassociated peer get associated peer need to 5952 * update tx_cap_enabled flag to support peer filter. 5953 */ 5954 dp_peer_tx_capture_filter_check(pdev, peer); 5955 5956 dp_set_peer_isolation(peer, false); 5957 5958 dp_wds_ext_peer_init(peer); 5959 5960 dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT); 5961 5962 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5963 return QDF_STATUS_SUCCESS; 5964 } else { 5965 /* 5966 * When a STA roams from RPTR AP to ROOT AP and vice versa, we 5967 * need to remove the AST entry which was earlier added as a WDS 5968 * entry. 5969 * If an AST entry exists, but no peer entry exists with a given 5970 * MAC addresses, we could deduce it as a WDS entry 5971 */ 5972 dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr); 5973 } 5974 5975 #ifdef notyet 5976 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev, 5977 soc->mempool_ol_ath_peer); 5978 #else 5979 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer)); 5980 #endif 5981 wlan_minidump_log(peer, 5982 sizeof(*peer), 5983 soc->ctrl_psoc, 5984 WLAN_MD_DP_PEER, "dp_peer"); 5985 if (!peer) { 5986 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5987 return QDF_STATUS_E_FAILURE; /* failure */ 5988 } 5989 5990 qdf_mem_zero(peer, sizeof(struct dp_peer)); 5991 5992 TAILQ_INIT(&peer->ast_entry_list); 5993 5994 /* store provided params */ 5995 peer->vdev = vdev; 5996 /* get the vdev reference for new peer */ 5997 dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD); 5998 5999 if ((vdev->opmode == wlan_op_mode_sta) && 6000 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], 6001 QDF_MAC_ADDR_SIZE)) { 6002 ast_type = CDP_TXRX_AST_TYPE_SELF; 6003 } 6004 qdf_spinlock_create(&peer->peer_state_lock); 6005 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); 6006 qdf_spinlock_create(&peer->peer_info_lock); 6007 dp_wds_ext_peer_init(peer); 6008 6009 dp_peer_rx_bufq_resources_init(peer); 6010 6011 qdf_mem_copy( 6012 &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE); 6013 6014 /* initialize the peer_id */ 6015 peer->peer_id = HTT_INVALID_PEER; 6016 6017 /* reset the ast index to flowid table */ 6018 dp_peer_reset_flowq_map(peer); 6019 6020 qdf_atomic_init(&peer->ref_cnt); 6021 6022 for (i = 0; i < DP_MOD_ID_MAX; i++) 6023 qdf_atomic_init(&peer->mod_refs[i]); 6024 6025 /* keep one reference for attach */ 6026 qdf_atomic_inc(&peer->ref_cnt); 6027 qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]); 6028 6029 dp_peer_vdev_list_add(soc, vdev, peer); 6030 6031 /* TODO: See if hash based search is required */ 6032 dp_peer_find_hash_add(soc, peer); 6033 6034 /* Initialize the peer state */ 6035 peer->state = OL_TXRX_PEER_STATE_DISC; 6036 6037 dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d", 6038 vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 6039 qdf_atomic_read(&peer->ref_cnt)); 6040 /* 6041 * For every peer MAp message search and set if bss_peer 6042 */ 6043 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6044 QDF_MAC_ADDR_SIZE) == 0 && 6045 (wlan_op_mode_sta != vdev->opmode)) { 6046 dp_info("vdev bss_peer!!"); 6047 peer->bss_peer = 1; 6048 } 6049 6050 if (wlan_op_mode_sta == vdev->opmode && 6051 qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6052 QDF_MAC_ADDR_SIZE) == 0) { 6053 peer->sta_self_peer = 1; 6054 } 6055 6056 for (i = 0; i < DP_MAX_TIDS; i++) 6057 qdf_spinlock_create(&peer->rx_tid[i].tid_lock); 6058 6059 peer->valid = 1; 6060 dp_local_peer_id_alloc(pdev, peer); 6061 DP_STATS_INIT(peer); 6062 DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI); 6063 6064 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 6065 QDF_MAC_ADDR_SIZE); 6066 peer_cookie.ctx = NULL; 6067 peer_cookie.pdev_id = pdev->pdev_id; 6068 peer_cookie.cookie = pdev->next_peer_cookie++; 6069 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6070 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc, 6071 (void *)&peer_cookie, 6072 peer->peer_id, WDI_NO_VAL, pdev->pdev_id); 6073 #endif 6074 if (soc->rdkstats_enabled) { 6075 if (!peer_cookie.ctx) { 6076 pdev->next_peer_cookie--; 6077 qdf_err("Failed to initialize peer rate stats"); 6078 } else { 6079 peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *) 6080 peer_cookie.ctx; 6081 } 6082 } 6083 6084 /* 6085 * Allocate peer extended stats context. Fall through in 6086 * case of failure as its not an implicit requirement to have 6087 * this object for regular statistics updates. 6088 */ 6089 if (dp_peer_ext_stats_ctx_alloc(soc, peer) != 6090 QDF_STATUS_SUCCESS) 6091 dp_warn("peer ext_stats ctx alloc failed"); 6092 6093 /* 6094 * In tx_monitor mode, filter may be set for unassociated peer 6095 * when unassociated peer get associated peer need to 6096 * update tx_cap_enabled flag to support peer filter. 6097 */ 6098 dp_peer_tx_capture_filter_check(pdev, peer); 6099 6100 dp_set_peer_isolation(peer, false); 6101 6102 dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT); 6103 6104 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6105 6106 return QDF_STATUS_SUCCESS; 6107 } 6108 6109 /* 6110 * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev 6111 * @vdev: Datapath VDEV handle 6112 * @reo_dest: pointer to default reo_dest ring for vdev to be populated 6113 * @hash_based: pointer to hash value (enabled/disabled) to be populated 6114 * 6115 * Return: None 6116 */ 6117 static 6118 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev, 6119 enum cdp_host_reo_dest_ring *reo_dest, 6120 bool *hash_based) 6121 { 6122 struct dp_soc *soc; 6123 struct dp_pdev *pdev; 6124 6125 pdev = vdev->pdev; 6126 soc = pdev->soc; 6127 /* 6128 * hash based steering is disabled for Radios which are offloaded 6129 * to NSS 6130 */ 6131 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) 6132 *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx); 6133 6134 /* 6135 * Below line of code will ensure the proper reo_dest ring is chosen 6136 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP) 6137 */ 6138 *reo_dest = pdev->reo_dest; 6139 } 6140 6141 #ifdef IPA_OFFLOAD 6142 /** 6143 * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P 6144 * @vdev: Virtual device 6145 * 6146 * Return: true if the vdev is of subtype P2P 6147 * false if the vdev is of any other subtype 6148 */ 6149 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev) 6150 { 6151 if (vdev->subtype == wlan_op_subtype_p2p_device || 6152 vdev->subtype == wlan_op_subtype_p2p_cli || 6153 vdev->subtype == wlan_op_subtype_p2p_go) 6154 return true; 6155 6156 return false; 6157 } 6158 6159 /* 6160 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer 6161 * @vdev: Datapath VDEV handle 6162 * @reo_dest: pointer to default reo_dest ring for vdev to be populated 6163 * @hash_based: pointer to hash value (enabled/disabled) to be populated 6164 * 6165 * If IPA is enabled in ini, for SAP mode, disable hash based 6166 * steering, use default reo_dst ring for RX. Use config values for other modes. 6167 * Return: None 6168 */ 6169 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev, 6170 enum cdp_host_reo_dest_ring *reo_dest, 6171 bool *hash_based) 6172 { 6173 struct dp_soc *soc; 6174 struct dp_pdev *pdev; 6175 6176 pdev = vdev->pdev; 6177 soc = pdev->soc; 6178 6179 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based); 6180 6181 /* For P2P-GO interfaces we do not need to change the REO 6182 * configuration even if IPA config is enabled 6183 */ 6184 if (dp_is_vdev_subtype_p2p(vdev)) 6185 return; 6186 6187 /* 6188 * If IPA is enabled, disable hash-based flow steering and set 6189 * reo_dest_ring_4 as the REO ring to receive packets on. 6190 * IPA is configured to reap reo_dest_ring_4. 6191 * 6192 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring 6193 * value enum value is from 1 - 4. 6194 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1 6195 */ 6196 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 6197 if (vdev->opmode == wlan_op_mode_ap) { 6198 *reo_dest = IPA_REO_DEST_RING_IDX + 1; 6199 *hash_based = 0; 6200 } else if (vdev->opmode == wlan_op_mode_sta && 6201 dp_ipa_is_mdm_platform()) { 6202 *reo_dest = IPA_REO_DEST_RING_IDX + 1; 6203 } 6204 } 6205 } 6206 6207 #else 6208 6209 /* 6210 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer 6211 * @vdev: Datapath VDEV handle 6212 * @reo_dest: pointer to default reo_dest ring for vdev to be populated 6213 * @hash_based: pointer to hash value (enabled/disabled) to be populated 6214 * 6215 * Use system config values for hash based steering. 6216 * Return: None 6217 */ 6218 6219 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev, 6220 enum cdp_host_reo_dest_ring *reo_dest, 6221 bool *hash_based) 6222 { 6223 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based); 6224 } 6225 #endif /* IPA_OFFLOAD */ 6226 6227 /* 6228 * dp_peer_setup_wifi3() - initialize the peer 6229 * @soc_hdl: soc handle object 6230 * @vdev_id : vdev_id of vdev object 6231 * @peer_mac: Peer's mac address 6232 * 6233 * Return: QDF_STATUS 6234 */ 6235 static QDF_STATUS 6236 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6237 uint8_t *peer_mac) 6238 { 6239 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6240 struct dp_pdev *pdev; 6241 bool hash_based = 0; 6242 enum cdp_host_reo_dest_ring reo_dest; 6243 QDF_STATUS status = QDF_STATUS_SUCCESS; 6244 struct dp_vdev *vdev = NULL; 6245 struct dp_peer *peer = 6246 dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 6247 DP_MOD_ID_CDP); 6248 enum wlan_op_mode vdev_opmode; 6249 6250 if (!peer) 6251 return QDF_STATUS_E_FAILURE; 6252 6253 vdev = peer->vdev; 6254 if (!vdev) { 6255 status = QDF_STATUS_E_FAILURE; 6256 goto fail; 6257 } 6258 6259 /* save vdev related member in case vdev freed */ 6260 vdev_opmode = vdev->opmode; 6261 pdev = vdev->pdev; 6262 dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based); 6263 6264 dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u", 6265 pdev->pdev_id, vdev->vdev_id, 6266 vdev->opmode, hash_based, reo_dest); 6267 6268 /* 6269 * There are corner cases where the AD1 = AD2 = "VAPs address" 6270 * i.e both the devices have same MAC address. In these 6271 * cases we want such pkts to be processed in NULL Q handler 6272 * which is REO2TCL ring. for this reason we should 6273 * not setup reo_queues and default route for bss_peer. 6274 */ 6275 dp_peer_tx_init(pdev, peer); 6276 if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) { 6277 status = QDF_STATUS_E_FAILURE; 6278 goto fail; 6279 } 6280 6281 if (soc->cdp_soc.ol_ops->peer_set_default_routing) { 6282 /* TODO: Check the destination ring number to be passed to FW */ 6283 soc->cdp_soc.ol_ops->peer_set_default_routing( 6284 soc->ctrl_psoc, 6285 peer->vdev->pdev->pdev_id, 6286 peer->mac_addr.raw, 6287 peer->vdev->vdev_id, hash_based, reo_dest); 6288 } 6289 6290 qdf_atomic_set(&peer->is_default_route_set, 1); 6291 6292 if (vdev_opmode != wlan_op_mode_monitor) 6293 dp_peer_rx_init(pdev, peer); 6294 6295 dp_peer_ppdu_delayed_ba_init(peer); 6296 6297 fail: 6298 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6299 return status; 6300 } 6301 6302 /* 6303 * dp_cp_peer_del_resp_handler - Handle the peer delete response 6304 * @soc_hdl: Datapath SOC handle 6305 * @vdev_id: id of virtual device object 6306 * @mac_addr: Mac address of the peer 6307 * 6308 * Return: QDF_STATUS 6309 */ 6310 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl, 6311 uint8_t vdev_id, 6312 uint8_t *mac_addr) 6313 { 6314 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6315 struct dp_ast_entry *ast_entry = NULL; 6316 txrx_ast_free_cb cb = NULL; 6317 void *cookie; 6318 6319 qdf_spin_lock_bh(&soc->ast_lock); 6320 6321 ast_entry = 6322 dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, 6323 vdev_id); 6324 6325 /* in case of qwrap we have multiple BSS peers 6326 * with same mac address 6327 * 6328 * AST entry for this mac address will be created 6329 * only for one peer hence it will be NULL here 6330 */ 6331 if ((!ast_entry || !ast_entry->delete_in_progress) || 6332 (ast_entry->peer_id != HTT_INVALID_PEER)) { 6333 qdf_spin_unlock_bh(&soc->ast_lock); 6334 return QDF_STATUS_E_FAILURE; 6335 } 6336 6337 if (ast_entry->is_mapped) 6338 soc->ast_table[ast_entry->ast_idx] = NULL; 6339 6340 DP_STATS_INC(soc, ast.deleted, 1); 6341 dp_peer_ast_hash_remove(soc, ast_entry); 6342 6343 cb = ast_entry->callback; 6344 cookie = ast_entry->cookie; 6345 ast_entry->callback = NULL; 6346 ast_entry->cookie = NULL; 6347 6348 soc->num_ast_entries--; 6349 qdf_spin_unlock_bh(&soc->ast_lock); 6350 6351 if (cb) { 6352 cb(soc->ctrl_psoc, 6353 dp_soc_to_cdp_soc(soc), 6354 cookie, 6355 CDP_TXRX_AST_DELETED); 6356 } 6357 qdf_mem_free(ast_entry); 6358 6359 return QDF_STATUS_SUCCESS; 6360 } 6361 6362 /* 6363 * dp_set_ba_aging_timeout() - set ba aging timeout per AC 6364 * @txrx_soc: cdp soc handle 6365 * @ac: Access category 6366 * @value: timeout value in millisec 6367 * 6368 * Return: void 6369 */ 6370 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc, 6371 uint8_t ac, uint32_t value) 6372 { 6373 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 6374 6375 hal_set_ba_aging_timeout(soc->hal_soc, ac, value); 6376 } 6377 6378 /* 6379 * dp_get_ba_aging_timeout() - get ba aging timeout per AC 6380 * @txrx_soc: cdp soc handle 6381 * @ac: access category 6382 * @value: timeout value in millisec 6383 * 6384 * Return: void 6385 */ 6386 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc, 6387 uint8_t ac, uint32_t *value) 6388 { 6389 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 6390 6391 hal_get_ba_aging_timeout(soc->hal_soc, ac, value); 6392 } 6393 6394 /* 6395 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev 6396 * @txrx_soc: cdp soc handle 6397 * @pdev_id: id of physical device object 6398 * @val: reo destination ring index (1 - 4) 6399 * 6400 * Return: QDF_STATUS 6401 */ 6402 static QDF_STATUS 6403 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id, 6404 enum cdp_host_reo_dest_ring val) 6405 { 6406 struct dp_pdev *pdev = 6407 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc, 6408 pdev_id); 6409 6410 if (pdev) { 6411 pdev->reo_dest = val; 6412 return QDF_STATUS_SUCCESS; 6413 } 6414 6415 return QDF_STATUS_E_FAILURE; 6416 } 6417 6418 /* 6419 * dp_get_pdev_reo_dest() - get the reo destination for this pdev 6420 * @txrx_soc: cdp soc handle 6421 * @pdev_id: id of physical device object 6422 * 6423 * Return: reo destination ring index 6424 */ 6425 static enum cdp_host_reo_dest_ring 6426 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id) 6427 { 6428 struct dp_pdev *pdev = 6429 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc, 6430 pdev_id); 6431 6432 if (pdev) 6433 return pdev->reo_dest; 6434 else 6435 return cdp_host_reo_dest_ring_unknown; 6436 } 6437 6438 #ifdef ATH_SUPPORT_NAC 6439 /* 6440 * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh 6441 * @pdev_handle: device object 6442 * @val: value to be set 6443 * 6444 * Return: void 6445 */ 6446 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, 6447 bool val) 6448 { 6449 /* Enable/Disable smart mesh filtering. This flag will be checked 6450 * during rx processing to check if packets are from NAC clients. 6451 */ 6452 pdev->filter_neighbour_peers = val; 6453 return 0; 6454 } 6455 #else 6456 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, 6457 bool val) 6458 { 6459 return 0; 6460 } 6461 #endif /* ATH_SUPPORT_NAC */ 6462 6463 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 6464 /* 6465 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) 6466 * address for smart mesh filtering 6467 * @txrx_soc: cdp soc handle 6468 * @vdev_id: id of virtual device object 6469 * @cmd: Add/Del command 6470 * @macaddr: nac client mac address 6471 * 6472 * Return: success/failure 6473 */ 6474 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl, 6475 uint8_t vdev_id, 6476 uint32_t cmd, uint8_t *macaddr) 6477 { 6478 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6479 struct dp_pdev *pdev; 6480 struct dp_neighbour_peer *peer = NULL; 6481 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6482 DP_MOD_ID_CDP); 6483 6484 if (!vdev || !macaddr) 6485 goto fail0; 6486 6487 pdev = vdev->pdev; 6488 6489 if (!pdev) 6490 goto fail0; 6491 6492 /* Store address of NAC (neighbour peer) which will be checked 6493 * against TA of received packets. 6494 */ 6495 if (cmd == DP_NAC_PARAM_ADD) { 6496 peer = (struct dp_neighbour_peer *) qdf_mem_malloc( 6497 sizeof(*peer)); 6498 6499 if (!peer) { 6500 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 6501 FL("DP neighbour peer node memory allocation failed")); 6502 goto fail0; 6503 } 6504 6505 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], 6506 macaddr, QDF_MAC_ADDR_SIZE); 6507 peer->vdev = vdev; 6508 6509 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 6510 6511 /* add this neighbour peer into the list */ 6512 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer, 6513 neighbour_peer_list_elem); 6514 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 6515 6516 /* first neighbour */ 6517 if (!pdev->neighbour_peers_added) { 6518 QDF_STATUS status = QDF_STATUS_SUCCESS; 6519 6520 pdev->neighbour_peers_added = true; 6521 dp_mon_filter_setup_smart_monitor(pdev); 6522 status = dp_mon_filter_update(pdev); 6523 if (status != QDF_STATUS_SUCCESS) { 6524 QDF_TRACE(QDF_MODULE_ID_DP, 6525 QDF_TRACE_LEVEL_ERROR, 6526 FL("smart mon filter setup failed")); 6527 dp_mon_filter_reset_smart_monitor(pdev); 6528 pdev->neighbour_peers_added = false; 6529 } 6530 } 6531 6532 } else if (cmd == DP_NAC_PARAM_DEL) { 6533 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 6534 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 6535 neighbour_peer_list_elem) { 6536 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 6537 macaddr, QDF_MAC_ADDR_SIZE)) { 6538 /* delete this peer from the list */ 6539 TAILQ_REMOVE(&pdev->neighbour_peers_list, 6540 peer, neighbour_peer_list_elem); 6541 qdf_mem_free(peer); 6542 break; 6543 } 6544 } 6545 /* last neighbour deleted */ 6546 if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) { 6547 QDF_STATUS status = QDF_STATUS_SUCCESS; 6548 6549 pdev->neighbour_peers_added = false; 6550 dp_mon_filter_reset_smart_monitor(pdev); 6551 status = dp_mon_filter_update(pdev); 6552 if (status != QDF_STATUS_SUCCESS) { 6553 QDF_TRACE(QDF_MODULE_ID_DP, 6554 QDF_TRACE_LEVEL_ERROR, 6555 FL("smart mon filter clear failed")); 6556 } 6557 6558 } 6559 6560 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 6561 } 6562 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6563 return 1; 6564 6565 fail0: 6566 if (vdev) 6567 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6568 return 0; 6569 } 6570 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 6571 6572 #ifdef WLAN_SUPPORT_MSCS 6573 /* 6574 * dp_record_mscs_params - MSCS parameters sent by the STA in 6575 * the MSCS Request to the AP. The AP makes a note of these 6576 * parameters while comparing the MSDUs sent by the STA, to 6577 * send the downlink traffic with correct User priority. 6578 * @soc - Datapath soc handle 6579 * @peer_mac - STA Mac address 6580 * @vdev_id - ID of the vdev handle 6581 * @mscs_params - Structure having MSCS parameters obtained 6582 * from handshake 6583 * @active - Flag to set MSCS active/inactive 6584 * return type - QDF_STATUS - Success/Invalid 6585 */ 6586 static QDF_STATUS 6587 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 6588 uint8_t vdev_id, struct cdp_mscs_params *mscs_params, 6589 bool active) 6590 { 6591 struct dp_peer *peer; 6592 QDF_STATUS status = QDF_STATUS_E_INVAL; 6593 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6594 6595 peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 6596 DP_MOD_ID_CDP); 6597 6598 if (!peer) { 6599 dp_err("Peer is NULL!"); 6600 goto fail; 6601 } 6602 if (!active) { 6603 dp_info("MSCS Procedure is terminated"); 6604 peer->mscs_active = active; 6605 goto fail; 6606 } 6607 6608 if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) { 6609 /* Populate entries inside IPV4 database first */ 6610 peer->mscs_ipv4_parameter.user_priority_bitmap = 6611 mscs_params->user_pri_bitmap; 6612 peer->mscs_ipv4_parameter.user_priority_limit = 6613 mscs_params->user_pri_limit; 6614 peer->mscs_ipv4_parameter.classifier_mask = 6615 mscs_params->classifier_mask; 6616 6617 /* Populate entries inside IPV6 database */ 6618 peer->mscs_ipv6_parameter.user_priority_bitmap = 6619 mscs_params->user_pri_bitmap; 6620 peer->mscs_ipv6_parameter.user_priority_limit = 6621 mscs_params->user_pri_limit; 6622 peer->mscs_ipv6_parameter.classifier_mask = 6623 mscs_params->classifier_mask; 6624 peer->mscs_active = 1; 6625 dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n" 6626 "\tClassifier_type = %d\tUser priority bitmap = %x\n" 6627 "\tUser priority limit = %x\tClassifier mask = %x", 6628 QDF_MAC_ADDR_REF(peer_mac), 6629 mscs_params->classifier_type, 6630 peer->mscs_ipv4_parameter.user_priority_bitmap, 6631 peer->mscs_ipv4_parameter.user_priority_limit, 6632 peer->mscs_ipv4_parameter.classifier_mask); 6633 } 6634 6635 status = QDF_STATUS_SUCCESS; 6636 fail: 6637 if (peer) 6638 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6639 return status; 6640 } 6641 #endif 6642 6643 /* 6644 * dp_get_sec_type() - Get the security type 6645 * @soc: soc handle 6646 * @vdev_id: id of dp handle 6647 * @peer_mac: mac of datapath PEER handle 6648 * @sec_idx: Security id (mcast, ucast) 6649 * 6650 * return sec_type: Security type 6651 */ 6652 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id, 6653 uint8_t *peer_mac, uint8_t sec_idx) 6654 { 6655 int sec_type = 0; 6656 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 6657 peer_mac, 0, vdev_id, 6658 DP_MOD_ID_CDP); 6659 6660 if (!peer) { 6661 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 6662 "%s: Peer is NULL!\n", __func__); 6663 return sec_type; 6664 } 6665 6666 sec_type = peer->security[sec_idx].sec_type; 6667 6668 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6669 return sec_type; 6670 } 6671 6672 /* 6673 * dp_peer_authorize() - authorize txrx peer 6674 * @soc: soc handle 6675 * @vdev_id: id of dp handle 6676 * @peer_mac: mac of datapath PEER handle 6677 * @authorize 6678 * 6679 */ 6680 static QDF_STATUS 6681 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6682 uint8_t *peer_mac, uint32_t authorize) 6683 { 6684 QDF_STATUS status = QDF_STATUS_SUCCESS; 6685 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6686 struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 6687 0, vdev_id, 6688 DP_MOD_ID_CDP); 6689 6690 if (!peer) { 6691 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 6692 "%s: Peer is NULL!\n", __func__); 6693 status = QDF_STATUS_E_FAILURE; 6694 } else { 6695 peer->authorize = authorize ? 1 : 0; 6696 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6697 } 6698 6699 return status; 6700 } 6701 6702 static void dp_flush_monitor_rings(struct dp_soc *soc) 6703 { 6704 struct dp_pdev *pdev = soc->pdev_list[0]; 6705 hal_soc_handle_t hal_soc = soc->hal_soc; 6706 uint32_t lmac_id; 6707 uint32_t hp, tp; 6708 uint8_t dp_intr_id; 6709 int budget; 6710 void *mon_dst_srng; 6711 6712 /* Reset monitor filters before reaping the ring*/ 6713 qdf_spin_lock_bh(&pdev->mon_lock); 6714 dp_mon_filter_reset_mon_mode(pdev); 6715 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) 6716 dp_info("failed to reset monitor filters"); 6717 qdf_spin_unlock_bh(&pdev->mon_lock); 6718 6719 if (pdev->mon_chan_band == REG_BAND_UNKNOWN) 6720 return; 6721 6722 lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band]; 6723 if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) 6724 return; 6725 6726 dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; 6727 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id); 6728 6729 /* reap full ring */ 6730 budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx); 6731 6732 hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); 6733 dp_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp); 6734 6735 dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget); 6736 6737 hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); 6738 dp_info("After reap: Monitor DST ring HP %u TP %u", hp, tp); 6739 } 6740 6741 /** 6742 * dp_vdev_unref_delete() - check and process vdev delete 6743 * @soc : DP specific soc pointer 6744 * @vdev: DP specific vdev pointer 6745 * @mod_id: module id 6746 * 6747 */ 6748 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 6749 enum dp_mod_id mod_id) 6750 { 6751 ol_txrx_vdev_delete_cb vdev_delete_cb = NULL; 6752 void *vdev_delete_context = NULL; 6753 uint8_t vdev_id = vdev->vdev_id; 6754 struct dp_pdev *pdev = vdev->pdev; 6755 struct dp_vdev *tmp_vdev = NULL; 6756 uint8_t found = 0; 6757 6758 QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0); 6759 6760 /* Return if this is not the last reference*/ 6761 if (!qdf_atomic_dec_and_test(&vdev->ref_cnt)) 6762 return; 6763 6764 /* 6765 * This should be set as last reference need to released 6766 * after cdp_vdev_detach() is called 6767 * 6768 * if this assert is hit there is a ref count issue 6769 */ 6770 QDF_ASSERT(vdev->delete.pending); 6771 6772 vdev_delete_cb = vdev->delete.callback; 6773 vdev_delete_context = vdev->delete.context; 6774 6775 dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done", 6776 vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 6777 6778 if (wlan_op_mode_monitor == vdev->opmode) { 6779 if (soc->intr_mode == DP_INTR_POLL) { 6780 qdf_timer_sync_cancel(&soc->int_timer); 6781 dp_flush_monitor_rings(soc); 6782 } else if (soc->intr_mode == DP_INTR_MSI && 6783 soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) { 6784 qdf_timer_sync_cancel(&soc->mon_vdev_timer); 6785 dp_flush_monitor_rings(soc); 6786 soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING; 6787 } 6788 pdev->monitor_vdev = NULL; 6789 goto free_vdev; 6790 } 6791 /* all peers are gone, go ahead and delete it */ 6792 dp_tx_flow_pool_unmap_handler(pdev, vdev_id, 6793 FLOW_TYPE_VDEV, vdev_id); 6794 dp_tx_vdev_detach(vdev); 6795 6796 free_vdev: 6797 qdf_spinlock_destroy(&vdev->peer_list_lock); 6798 6799 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 6800 TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list, 6801 inactive_list_elem) { 6802 if (tmp_vdev == vdev) { 6803 found = 1; 6804 break; 6805 } 6806 } 6807 if (found) 6808 TAILQ_REMOVE(&soc->inactive_vdev_list, vdev, 6809 inactive_list_elem); 6810 /* delete this peer from the list */ 6811 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 6812 6813 dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")", 6814 vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 6815 wlan_minidump_remove(vdev); 6816 qdf_mem_free(vdev); 6817 vdev = NULL; 6818 6819 if (vdev_delete_cb) 6820 vdev_delete_cb(vdev_delete_context); 6821 } 6822 6823 /* 6824 * dp_peer_unref_delete() - unref and delete peer 6825 * @peer_handle: Datapath peer handle 6826 * @mod_id: ID of module releasing reference 6827 * 6828 */ 6829 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id) 6830 { 6831 struct dp_vdev *vdev = peer->vdev; 6832 struct dp_pdev *pdev = vdev->pdev; 6833 struct dp_soc *soc = pdev->soc; 6834 uint16_t peer_id; 6835 struct cdp_peer_cookie peer_cookie; 6836 struct dp_peer *tmp_peer; 6837 bool found = false; 6838 int tid = 0; 6839 6840 if (mod_id > DP_MOD_ID_RX) 6841 QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0); 6842 6843 /* 6844 * Hold the lock all the way from checking if the peer ref count 6845 * is zero until the peer references are removed from the hash 6846 * table and vdev list (if the peer ref count is zero). 6847 * This protects against a new HL tx operation starting to use the 6848 * peer object just after this function concludes it's done being used. 6849 * Furthermore, the lock needs to be held while checking whether the 6850 * vdev's list of peers is empty, to make sure that list is not modified 6851 * concurrently with the empty check. 6852 */ 6853 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) { 6854 peer_id = peer->peer_id; 6855 6856 /* 6857 * Make sure that the reference to the peer in 6858 * peer object map is removed 6859 */ 6860 QDF_ASSERT(peer_id == HTT_INVALID_PEER); 6861 6862 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 6863 "Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer, 6864 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 6865 6866 /* 6867 * Deallocate the extended stats contenxt 6868 */ 6869 dp_peer_ext_stats_ctx_dealloc(soc, peer); 6870 6871 /* send peer destroy event to upper layer */ 6872 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 6873 QDF_MAC_ADDR_SIZE); 6874 peer_cookie.ctx = NULL; 6875 peer_cookie.ctx = (struct cdp_stats_cookie *) 6876 peer->rdkstats_ctx; 6877 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6878 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY, 6879 soc, 6880 (void *)&peer_cookie, 6881 peer->peer_id, 6882 WDI_NO_VAL, 6883 pdev->pdev_id); 6884 #endif 6885 peer->rdkstats_ctx = NULL; 6886 wlan_minidump_remove(peer); 6887 6888 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 6889 TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list, 6890 inactive_list_elem) { 6891 if (tmp_peer == peer) { 6892 found = 1; 6893 break; 6894 } 6895 } 6896 if (found) 6897 TAILQ_REMOVE(&soc->inactive_peer_list, peer, 6898 inactive_list_elem); 6899 /* delete this peer from the list */ 6900 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 6901 DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list)); 6902 dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED); 6903 6904 /* cleanup the peer data */ 6905 dp_peer_cleanup(vdev, peer); 6906 for (tid = 0; tid < DP_MAX_TIDS; tid++) 6907 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock); 6908 6909 qdf_spinlock_destroy(&peer->peer_state_lock); 6910 qdf_mem_free(peer); 6911 6912 /* 6913 * Decrement ref count taken at peer create 6914 */ 6915 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD); 6916 } 6917 } 6918 6919 #ifdef PEER_CACHE_RX_PKTS 6920 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer) 6921 { 6922 qdf_list_destroy(&peer->bufq_info.cached_bufq); 6923 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock); 6924 } 6925 #else 6926 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer) 6927 { 6928 } 6929 #endif 6930 6931 /* 6932 * dp_peer_detach_wifi3() – Detach txrx peer 6933 * @soc_hdl: soc handle 6934 * @vdev_id: id of dp handle 6935 * @peer_mac: mac of datapath PEER handle 6936 * @bitmap: bitmap indicating special handling of request. 6937 * 6938 */ 6939 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, 6940 uint8_t vdev_id, 6941 uint8_t *peer_mac, uint32_t bitmap) 6942 { 6943 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6944 struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 6945 0, vdev_id, 6946 DP_MOD_ID_CDP); 6947 struct dp_vdev *vdev = NULL; 6948 6949 /* Peer can be null for monitor vap mac address */ 6950 if (!peer) { 6951 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 6952 "%s: Invalid peer\n", __func__); 6953 return QDF_STATUS_E_FAILURE; 6954 } 6955 6956 if (!peer->valid) { 6957 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6958 dp_err("Invalid peer: "QDF_MAC_ADDR_FMT, 6959 QDF_MAC_ADDR_REF(peer_mac)); 6960 return QDF_STATUS_E_ALREADY; 6961 } 6962 6963 vdev = peer->vdev; 6964 6965 if (!vdev) 6966 return QDF_STATUS_E_FAILURE; 6967 peer->valid = 0; 6968 6969 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 6970 FL("peer %pK ("QDF_MAC_ADDR_FMT")"), peer, 6971 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 6972 6973 dp_local_peer_id_free(peer->vdev->pdev, peer); 6974 6975 /* Drop all rx packets before deleting peer */ 6976 dp_clear_peer_internal(soc, peer); 6977 6978 dp_peer_rx_bufq_resources_deinit(peer); 6979 6980 qdf_spinlock_destroy(&peer->peer_info_lock); 6981 dp_peer_multipass_list_remove(peer); 6982 6983 /* remove the reference to the peer from the hash table */ 6984 dp_peer_find_hash_remove(soc, peer); 6985 6986 dp_peer_vdev_list_remove(soc, vdev, peer); 6987 6988 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 6989 TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer, 6990 inactive_list_elem); 6991 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 6992 6993 /* 6994 * Remove the reference added during peer_attach. 6995 * The peer will still be left allocated until the 6996 * PEER_UNMAP message arrives to remove the other 6997 * reference, added by the PEER_MAP message. 6998 */ 6999 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 7000 /* 7001 * Remove the reference taken above 7002 */ 7003 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7004 7005 return QDF_STATUS_SUCCESS; 7006 } 7007 7008 /* 7009 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer 7010 * @soc_hdl: Datapath soc handle 7011 * @vdev_id: virtual interface id 7012 * 7013 * Return: MAC address on success, NULL on failure. 7014 * 7015 */ 7016 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl, 7017 uint8_t vdev_id) 7018 { 7019 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7020 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7021 DP_MOD_ID_CDP); 7022 uint8_t *mac = NULL; 7023 7024 if (!vdev) 7025 return NULL; 7026 7027 mac = vdev->mac_addr.raw; 7028 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7029 7030 return mac; 7031 } 7032 7033 /* 7034 * dp_vdev_set_wds() - Enable per packet stats 7035 * @soc: DP soc handle 7036 * @vdev_id: id of DP VDEV handle 7037 * @val: value 7038 * 7039 * Return: none 7040 */ 7041 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 7042 uint32_t val) 7043 { 7044 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7045 struct dp_vdev *vdev = 7046 dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id, 7047 DP_MOD_ID_CDP); 7048 7049 if (!vdev) 7050 return QDF_STATUS_E_FAILURE; 7051 7052 vdev->wds_enabled = val; 7053 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7054 7055 return QDF_STATUS_SUCCESS; 7056 } 7057 7058 /* 7059 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode 7060 * @soc_hdl: datapath soc handle 7061 * @pdev_id: physical device instance id 7062 * 7063 * Return: virtual interface id 7064 */ 7065 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, 7066 uint8_t pdev_id) 7067 { 7068 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7069 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 7070 7071 if (qdf_unlikely(!pdev)) 7072 return -EINVAL; 7073 7074 return pdev->monitor_vdev->vdev_id; 7075 } 7076 7077 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) 7078 { 7079 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7080 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7081 DP_MOD_ID_CDP); 7082 int opmode; 7083 7084 if (!vdev) { 7085 dp_err("vdev for id %d is NULL", vdev_id); 7086 return -EINVAL; 7087 } 7088 opmode = vdev->opmode; 7089 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7090 7091 return opmode; 7092 } 7093 7094 /** 7095 * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev 7096 * @soc_hdl: ol_txrx_soc_handle handle 7097 * @vdev_id: vdev id for which os rx handles are needed 7098 * @stack_fn_p: pointer to stack function pointer 7099 * @osif_handle_p: pointer to ol_osif_vdev_handle 7100 * 7101 * Return: void 7102 */ 7103 static 7104 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl, 7105 uint8_t vdev_id, 7106 ol_txrx_rx_fp *stack_fn_p, 7107 ol_osif_vdev_handle *osif_vdev_p) 7108 { 7109 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7110 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7111 DP_MOD_ID_CDP); 7112 7113 if (!vdev) 7114 return; 7115 7116 *stack_fn_p = vdev->osif_rx_stack; 7117 *osif_vdev_p = vdev->osif_vdev; 7118 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7119 } 7120 7121 /** 7122 * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev 7123 * @soc_hdl: datapath soc handle 7124 * @vdev_id: virtual device/interface id 7125 * 7126 * Return: Handle to control pdev 7127 */ 7128 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3( 7129 struct cdp_soc_t *soc_hdl, 7130 uint8_t vdev_id) 7131 { 7132 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7133 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7134 DP_MOD_ID_CDP); 7135 struct dp_pdev *pdev; 7136 7137 if (!vdev) 7138 return NULL; 7139 7140 pdev = vdev->pdev; 7141 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7142 return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL; 7143 } 7144 7145 /** 7146 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer 7147 * ring based on target 7148 * @soc: soc handle 7149 * @mac_for_pdev: WIN- pdev_id, MCL- mac id 7150 * @pdev: physical device handle 7151 * @ring_num: mac id 7152 * @htt_tlv_filter: tlv filter 7153 * 7154 * Return: zero on success, non-zero on failure 7155 */ 7156 static inline 7157 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, 7158 struct dp_pdev *pdev, uint8_t ring_num, 7159 struct htt_rx_ring_tlv_filter htt_tlv_filter) 7160 { 7161 QDF_STATUS status; 7162 7163 if (soc->wlan_cfg_ctx->rxdma1_enable) 7164 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 7165 soc->rxdma_mon_buf_ring[ring_num] 7166 .hal_srng, 7167 RXDMA_MONITOR_BUF, 7168 RX_MONITOR_BUFFER_SIZE, 7169 &htt_tlv_filter); 7170 else 7171 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 7172 pdev->rx_mac_buf_ring[ring_num] 7173 .hal_srng, 7174 RXDMA_BUF, RX_DATA_BUFFER_SIZE, 7175 &htt_tlv_filter); 7176 7177 return status; 7178 } 7179 7180 static inline void 7181 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev) 7182 { 7183 pdev->mcopy_mode = M_COPY_DISABLED; 7184 pdev->monitor_configured = false; 7185 pdev->monitor_vdev = NULL; 7186 } 7187 7188 /** 7189 * dp_reset_monitor_mode() - Disable monitor mode 7190 * @soc_hdl: Datapath soc handle 7191 * @pdev_id: id of datapath PDEV handle 7192 * 7193 * Return: QDF_STATUS 7194 */ 7195 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl, 7196 uint8_t pdev_id, 7197 uint8_t special_monitor) 7198 { 7199 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 7200 struct dp_pdev *pdev = 7201 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 7202 pdev_id); 7203 QDF_STATUS status = QDF_STATUS_SUCCESS; 7204 7205 if (!pdev) 7206 return QDF_STATUS_E_FAILURE; 7207 7208 qdf_spin_lock_bh(&pdev->mon_lock); 7209 7210 dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE); 7211 pdev->monitor_vdev = NULL; 7212 pdev->monitor_configured = false; 7213 7214 /* 7215 * Lite monitor mode, smart monitor mode and monitor 7216 * mode uses this APIs to filter reset and mode disable 7217 */ 7218 if (pdev->mcopy_mode) { 7219 #if defined(FEATURE_PERPKT_INFO) 7220 dp_pdev_disable_mcopy_code(pdev); 7221 dp_mon_filter_reset_mcopy_mode(pdev); 7222 #endif /* FEATURE_PERPKT_INFO */ 7223 } else if (special_monitor) { 7224 #if defined(ATH_SUPPORT_NAC) 7225 dp_mon_filter_reset_smart_monitor(pdev); 7226 #endif /* ATH_SUPPORT_NAC */ 7227 } else { 7228 dp_mon_filter_reset_mon_mode(pdev); 7229 } 7230 7231 status = dp_mon_filter_update(pdev); 7232 if (status != QDF_STATUS_SUCCESS) { 7233 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 7234 FL("Failed to reset monitor filters")); 7235 } 7236 7237 qdf_spin_unlock_bh(&pdev->mon_lock); 7238 return QDF_STATUS_SUCCESS; 7239 } 7240 7241 /** 7242 * dp_get_tx_pending() - read pending tx 7243 * @pdev_handle: Datapath PDEV handle 7244 * 7245 * Return: outstanding tx 7246 */ 7247 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle) 7248 { 7249 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 7250 7251 return qdf_atomic_read(&pdev->num_tx_outstanding); 7252 } 7253 7254 /** 7255 * dp_get_peer_mac_from_peer_id() - get peer mac 7256 * @pdev_handle: Datapath PDEV handle 7257 * @peer_id: Peer ID 7258 * @peer_mac: MAC addr of PEER 7259 * 7260 * Return: QDF_STATUS 7261 */ 7262 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc, 7263 uint32_t peer_id, 7264 uint8_t *peer_mac) 7265 { 7266 struct dp_peer *peer; 7267 7268 if (soc && peer_mac) { 7269 peer = dp_peer_get_ref_by_id((struct dp_soc *)soc, 7270 (uint16_t)peer_id, 7271 DP_MOD_ID_CDP); 7272 if (peer) { 7273 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 7274 QDF_MAC_ADDR_SIZE); 7275 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7276 return QDF_STATUS_SUCCESS; 7277 } 7278 } 7279 7280 return QDF_STATUS_E_FAILURE; 7281 } 7282 7283 /** 7284 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode 7285 * @vdev_handle: Datapath VDEV handle 7286 * @smart_monitor: Flag to denote if its smart monitor mode 7287 * 7288 * Return: 0 on success, not 0 on failure 7289 */ 7290 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc_hdl, 7291 uint8_t vdev_id, 7292 uint8_t special_monitor) 7293 { 7294 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7295 uint32_t mac_id; 7296 uint32_t mac_for_pdev; 7297 struct dp_pdev *pdev; 7298 uint32_t num_entries; 7299 struct dp_srng *mon_buf_ring; 7300 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 7301 DP_MOD_ID_CDP); 7302 QDF_STATUS status = QDF_STATUS_SUCCESS; 7303 7304 if (!vdev) 7305 return QDF_STATUS_E_FAILURE; 7306 7307 pdev = vdev->pdev; 7308 pdev->monitor_vdev = vdev; 7309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 7310 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", 7311 pdev, pdev->pdev_id, pdev->soc, vdev); 7312 7313 /* 7314 * do not configure monitor buf ring and filter for smart and 7315 * lite monitor 7316 * for smart monitor filters are added along with first NAC 7317 * for lite monitor required configuration done through 7318 * dp_set_pdev_param 7319 */ 7320 if (special_monitor) { 7321 status = QDF_STATUS_SUCCESS; 7322 goto fail; 7323 } 7324 7325 /*Check if current pdev's monitor_vdev exists */ 7326 if (pdev->monitor_configured) { 7327 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7328 "monitor vap already created vdev=%pK\n", vdev); 7329 status = QDF_STATUS_E_RESOURCES; 7330 goto fail; 7331 } 7332 7333 pdev->monitor_configured = true; 7334 7335 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 7336 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 7337 pdev->pdev_id); 7338 dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, 7339 FALSE); 7340 /* 7341 * Configure low interrupt threshld when monitor mode is 7342 * configured. 7343 */ 7344 mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; 7345 if (mon_buf_ring->hal_srng) { 7346 num_entries = mon_buf_ring->num_entries; 7347 hal_set_low_threshold(mon_buf_ring->hal_srng, 7348 num_entries >> 3); 7349 htt_srng_setup(pdev->soc->htt_handle, 7350 pdev->pdev_id, 7351 mon_buf_ring->hal_srng, 7352 RXDMA_MONITOR_BUF); 7353 } 7354 } 7355 7356 dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE); 7357 7358 dp_mon_filter_setup_mon_mode(pdev); 7359 status = dp_mon_filter_update(pdev); 7360 if (status != QDF_STATUS_SUCCESS) { 7361 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 7362 FL("Failed to reset monitor filters")); 7363 dp_mon_filter_reset_mon_mode(pdev); 7364 pdev->monitor_configured = false; 7365 pdev->monitor_vdev = NULL; 7366 } 7367 7368 fail: 7369 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7370 return status; 7371 } 7372 7373 /** 7374 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter 7375 * @soc: soc handle 7376 * @pdev_id: id of Datapath PDEV handle 7377 * @filter_val: Flag to select Filter for monitor mode 7378 * Return: 0 on success, not 0 on failure 7379 */ 7380 static QDF_STATUS 7381 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 7382 struct cdp_monitor_filter *filter_val) 7383 { 7384 /* Many monitor VAPs can exists in a system but only one can be up at 7385 * anytime 7386 */ 7387 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 7388 struct dp_vdev *vdev; 7389 struct dp_pdev *pdev = 7390 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 7391 pdev_id); 7392 QDF_STATUS status = QDF_STATUS_SUCCESS; 7393 7394 if (!pdev) 7395 return QDF_STATUS_E_FAILURE; 7396 7397 vdev = pdev->monitor_vdev; 7398 7399 if (!vdev) 7400 return QDF_STATUS_E_FAILURE; 7401 7402 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 7403 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK", 7404 pdev, pdev_id, soc, vdev); 7405 7406 /*Check if current pdev's monitor_vdev exists */ 7407 if (!pdev->monitor_vdev) { 7408 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 7409 "vdev=%pK", vdev); 7410 qdf_assert(vdev); 7411 } 7412 7413 /* update filter mode, type in pdev structure */ 7414 pdev->mon_filter_mode = filter_val->mode; 7415 pdev->fp_mgmt_filter = filter_val->fp_mgmt; 7416 pdev->fp_ctrl_filter = filter_val->fp_ctrl; 7417 pdev->fp_data_filter = filter_val->fp_data; 7418 pdev->mo_mgmt_filter = filter_val->mo_mgmt; 7419 pdev->mo_ctrl_filter = filter_val->mo_ctrl; 7420 pdev->mo_data_filter = filter_val->mo_data; 7421 7422 dp_mon_filter_setup_mon_mode(pdev); 7423 status = dp_mon_filter_update(pdev); 7424 if (status != QDF_STATUS_SUCCESS) { 7425 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 7426 FL("Failed to set filter for advance mon mode")); 7427 dp_mon_filter_reset_mon_mode(pdev); 7428 } 7429 7430 return status; 7431 } 7432 7433 /** 7434 * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture 7435 * @cdp_soc : data path soc handle 7436 * @pdev_id : pdev_id 7437 * @nbuf: Management frame buffer 7438 */ 7439 static QDF_STATUS 7440 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf) 7441 { 7442 struct dp_pdev *pdev = 7443 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 7444 pdev_id); 7445 7446 if (!pdev) 7447 return QDF_STATUS_E_FAILURE; 7448 7449 dp_deliver_mgmt_frm(pdev, nbuf); 7450 7451 return QDF_STATUS_SUCCESS; 7452 } 7453 7454 /** 7455 * dp_set_bsscolor() - sets bsscolor for tx capture 7456 * @pdev: Datapath PDEV handle 7457 * @bsscolor: new bsscolor 7458 */ 7459 static void 7460 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 7461 { 7462 pdev->rx_mon_recv_status.bsscolor = bsscolor; 7463 } 7464 7465 /** 7466 * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter 7467 * @soc : data path soc handle 7468 * @pdev_id : pdev_id 7469 * Return: true on ucast filter flag set 7470 */ 7471 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 7472 { 7473 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 7474 7475 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) || 7476 (pdev->mo_data_filter & FILTER_DATA_UCAST)) 7477 return true; 7478 7479 return false; 7480 } 7481 7482 /** 7483 * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter 7484 * @pdev_handle: Datapath PDEV handle 7485 * Return: true on mcast filter flag set 7486 */ 7487 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 7488 { 7489 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 7490 7491 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) || 7492 (pdev->mo_data_filter & FILTER_DATA_MCAST)) 7493 return true; 7494 7495 return false; 7496 } 7497 7498 /** 7499 * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter 7500 * @pdev_handle: Datapath PDEV handle 7501 * Return: true on non data filter flag set 7502 */ 7503 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 7504 { 7505 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 7506 7507 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || 7508 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { 7509 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || 7510 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { 7511 return true; 7512 } 7513 } 7514 7515 return false; 7516 } 7517 7518 #ifdef MESH_MODE_SUPPORT 7519 static 7520 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val) 7521 { 7522 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 7523 7524 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 7525 FL("val %d"), val); 7526 vdev->mesh_vdev = val; 7527 if (val) 7528 vdev->skip_sw_tid_classification |= 7529 DP_TX_MESH_ENABLED; 7530 else 7531 vdev->skip_sw_tid_classification &= 7532 ~DP_TX_MESH_ENABLED; 7533 } 7534 7535 /* 7536 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter 7537 * @vdev_hdl: virtual device object 7538 * @val: value to be set 7539 * 7540 * Return: void 7541 */ 7542 static 7543 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val) 7544 { 7545 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 7546 7547 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 7548 FL("val %d"), val); 7549 vdev->mesh_rx_filter = val; 7550 } 7551 #endif 7552 7553 /* 7554 * dp_vdev_set_hlos_tid_override() - to set hlos tid override 7555 * @vdev_hdl: virtual device object 7556 * @val: value to be set 7557 * 7558 * Return: void 7559 */ 7560 static 7561 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val) 7562 { 7563 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 7564 FL("val %d"), val); 7565 if (val) 7566 vdev->skip_sw_tid_classification |= 7567 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED; 7568 else 7569 vdev->skip_sw_tid_classification &= 7570 ~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED; 7571 } 7572 7573 /* 7574 * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag 7575 * @vdev_hdl: virtual device object 7576 * @val: value to be set 7577 * 7578 * Return: 1 if this flag is set 7579 */ 7580 static 7581 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl) 7582 { 7583 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 7584 7585 return !!(vdev->skip_sw_tid_classification & 7586 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED); 7587 } 7588 7589 #ifdef VDEV_PEER_PROTOCOL_COUNT 7590 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl, 7591 int8_t vdev_id, 7592 bool enable) 7593 { 7594 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7595 struct dp_vdev *vdev; 7596 7597 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 7598 if (!vdev) 7599 return; 7600 7601 dp_info("enable %d vdev_id %d", enable, vdev_id); 7602 vdev->peer_protocol_count_track = enable; 7603 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7604 } 7605 7606 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl, 7607 int8_t vdev_id, 7608 int drop_mask) 7609 { 7610 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7611 struct dp_vdev *vdev; 7612 7613 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 7614 if (!vdev) 7615 return; 7616 7617 dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id); 7618 vdev->peer_protocol_count_dropmask = drop_mask; 7619 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7620 } 7621 7622 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl, 7623 int8_t vdev_id) 7624 { 7625 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7626 struct dp_vdev *vdev; 7627 int peer_protocol_count_track; 7628 7629 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 7630 if (!vdev) 7631 return 0; 7632 7633 dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track, 7634 vdev_id); 7635 peer_protocol_count_track = 7636 vdev->peer_protocol_count_track; 7637 7638 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7639 return peer_protocol_count_track; 7640 } 7641 7642 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl, 7643 int8_t vdev_id) 7644 { 7645 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7646 struct dp_vdev *vdev; 7647 int peer_protocol_count_dropmask; 7648 7649 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 7650 if (!vdev) 7651 return 0; 7652 7653 dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask, 7654 vdev_id); 7655 peer_protocol_count_dropmask = 7656 vdev->peer_protocol_count_dropmask; 7657 7658 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7659 return peer_protocol_count_dropmask; 7660 } 7661 7662 #endif 7663 7664 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data) 7665 { 7666 uint8_t pdev_count; 7667 7668 for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) { 7669 if (soc->pdev_list[pdev_count] && 7670 soc->pdev_list[pdev_count] == data) 7671 return true; 7672 } 7673 return false; 7674 } 7675 7676 /** 7677 * dp_rx_bar_stats_cb(): BAR received stats callback 7678 * @soc: SOC handle 7679 * @cb_ctxt: Call back context 7680 * @reo_status: Reo status 7681 * 7682 * return: void 7683 */ 7684 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 7685 union hal_reo_status *reo_status) 7686 { 7687 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt; 7688 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); 7689 7690 if (!dp_check_pdev_exists(soc, pdev)) { 7691 dp_err_rl("pdev doesn't exist"); 7692 return; 7693 } 7694 7695 if (!qdf_atomic_read(&soc->cmn_init_done)) 7696 return; 7697 7698 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 7699 DP_PRINT_STATS("REO stats failure %d", 7700 queue_status->header.status); 7701 qdf_atomic_set(&(pdev->stats_cmd_complete), 1); 7702 return; 7703 } 7704 7705 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt; 7706 qdf_atomic_set(&(pdev->stats_cmd_complete), 1); 7707 7708 } 7709 7710 /** 7711 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level 7712 * @vdev: DP VDEV handle 7713 * 7714 * return: void 7715 */ 7716 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 7717 struct cdp_vdev_stats *vdev_stats) 7718 { 7719 struct dp_soc *soc = NULL; 7720 7721 if (!vdev || !vdev->pdev) 7722 return; 7723 7724 soc = vdev->pdev->soc; 7725 7726 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); 7727 7728 dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats, 7729 DP_MOD_ID_GENERIC_STATS); 7730 7731 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 7732 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 7733 vdev_stats, vdev->vdev_id, 7734 UPDATE_VDEV_STATS, vdev->pdev->pdev_id); 7735 #endif 7736 } 7737 7738 void dp_aggregate_pdev_stats(struct dp_pdev *pdev) 7739 { 7740 struct dp_vdev *vdev = NULL; 7741 struct dp_soc *soc; 7742 struct cdp_vdev_stats *vdev_stats = 7743 qdf_mem_malloc(sizeof(struct cdp_vdev_stats)); 7744 7745 if (!vdev_stats) { 7746 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 7747 "DP alloc failure - unable to get alloc vdev stats"); 7748 return; 7749 } 7750 7751 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx)); 7752 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx)); 7753 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i)); 7754 7755 if (pdev->mcopy_mode) 7756 DP_UPDATE_STATS(pdev, pdev->invalid_peer); 7757 7758 soc = pdev->soc; 7759 qdf_spin_lock_bh(&pdev->vdev_list_lock); 7760 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 7761 7762 dp_aggregate_vdev_stats(vdev, vdev_stats); 7763 dp_update_pdev_stats(pdev, vdev_stats); 7764 dp_update_pdev_ingress_stats(pdev, vdev); 7765 } 7766 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 7767 qdf_mem_free(vdev_stats); 7768 7769 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 7770 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats, 7771 pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id); 7772 #endif 7773 } 7774 7775 /** 7776 * dp_vdev_getstats() - get vdev packet level stats 7777 * @vdev_handle: Datapath VDEV handle 7778 * @stats: cdp network device stats structure 7779 * 7780 * Return: QDF_STATUS 7781 */ 7782 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle, 7783 struct cdp_dev_stats *stats) 7784 { 7785 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 7786 struct dp_pdev *pdev; 7787 struct dp_soc *soc; 7788 struct cdp_vdev_stats *vdev_stats; 7789 7790 if (!vdev) 7791 return QDF_STATUS_E_FAILURE; 7792 7793 pdev = vdev->pdev; 7794 if (!pdev) 7795 return QDF_STATUS_E_FAILURE; 7796 7797 soc = pdev->soc; 7798 7799 vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats)); 7800 7801 if (!vdev_stats) { 7802 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 7803 "DP alloc failure - unable to get alloc vdev stats"); 7804 return QDF_STATUS_E_FAILURE; 7805 } 7806 7807 dp_aggregate_vdev_stats(vdev, vdev_stats); 7808 7809 stats->tx_packets = vdev_stats->tx_i.rcvd.num; 7810 stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes; 7811 7812 stats->tx_errors = vdev_stats->tx.tx_failed + 7813 vdev_stats->tx_i.dropped.dropped_pkt.num; 7814 stats->tx_dropped = stats->tx_errors; 7815 7816 stats->rx_packets = vdev_stats->rx.unicast.num + 7817 vdev_stats->rx.multicast.num + 7818 vdev_stats->rx.bcast.num; 7819 stats->rx_bytes = vdev_stats->rx.unicast.bytes + 7820 vdev_stats->rx.multicast.bytes + 7821 vdev_stats->rx.bcast.bytes; 7822 7823 qdf_mem_free(vdev_stats); 7824 7825 return QDF_STATUS_SUCCESS; 7826 } 7827 7828 /** 7829 * dp_pdev_getstats() - get pdev packet level stats 7830 * @pdev_handle: Datapath PDEV handle 7831 * @stats: cdp network device stats structure 7832 * 7833 * Return: QDF_STATUS 7834 */ 7835 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle, 7836 struct cdp_dev_stats *stats) 7837 { 7838 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 7839 7840 dp_aggregate_pdev_stats(pdev); 7841 7842 stats->tx_packets = pdev->stats.tx_i.rcvd.num; 7843 stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes; 7844 7845 stats->tx_errors = pdev->stats.tx.tx_failed + 7846 pdev->stats.tx_i.dropped.dropped_pkt.num; 7847 stats->tx_dropped = stats->tx_errors; 7848 7849 stats->rx_packets = pdev->stats.rx.unicast.num + 7850 pdev->stats.rx.multicast.num + 7851 pdev->stats.rx.bcast.num; 7852 stats->rx_bytes = pdev->stats.rx.unicast.bytes + 7853 pdev->stats.rx.multicast.bytes + 7854 pdev->stats.rx.bcast.bytes; 7855 stats->rx_errors = pdev->stats.err.desc_alloc_fail + 7856 pdev->stats.err.ip_csum_err + 7857 pdev->stats.err.tcp_udp_csum_err + 7858 pdev->stats.rx.err.mic_err + 7859 pdev->stats.rx.err.decrypt_err + 7860 pdev->stats.err.rxdma_error + 7861 pdev->stats.err.reo_error; 7862 stats->rx_dropped = pdev->stats.dropped.msdu_not_done + 7863 pdev->stats.dropped.mec + 7864 pdev->stats.dropped.mesh_filter + 7865 pdev->stats.dropped.wifi_parse + 7866 pdev->stats.dropped.mon_rx_drop + 7867 pdev->stats.dropped.mon_radiotap_update_err; 7868 } 7869 7870 /** 7871 * dp_get_device_stats() - get interface level packet stats 7872 * @soc: soc handle 7873 * @id : vdev_id or pdev_id based on type 7874 * @stats: cdp network device stats structure 7875 * @type: device type pdev/vdev 7876 * 7877 * Return: QDF_STATUS 7878 */ 7879 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id, 7880 struct cdp_dev_stats *stats, 7881 uint8_t type) 7882 { 7883 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7884 QDF_STATUS status = QDF_STATUS_E_FAILURE; 7885 struct dp_vdev *vdev; 7886 7887 switch (type) { 7888 case UPDATE_VDEV_STATS: 7889 vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP); 7890 7891 if (vdev) { 7892 status = dp_vdev_getstats((struct cdp_vdev *)vdev, 7893 stats); 7894 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7895 } 7896 return status; 7897 case UPDATE_PDEV_STATS: 7898 { 7899 struct dp_pdev *pdev = 7900 dp_get_pdev_from_soc_pdev_id_wifi3( 7901 (struct dp_soc *)soc, 7902 id); 7903 if (pdev) { 7904 dp_pdev_getstats((struct cdp_pdev *)pdev, 7905 stats); 7906 return QDF_STATUS_SUCCESS; 7907 } 7908 } 7909 break; 7910 default: 7911 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 7912 "apstats cannot be updated for this input " 7913 "type %d", type); 7914 break; 7915 } 7916 7917 return QDF_STATUS_E_FAILURE; 7918 } 7919 7920 const 7921 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type) 7922 { 7923 switch (ring_type) { 7924 case REO_DST: 7925 return "Reo_dst"; 7926 case REO_EXCEPTION: 7927 return "Reo_exception"; 7928 case REO_CMD: 7929 return "Reo_cmd"; 7930 case REO_REINJECT: 7931 return "Reo_reinject"; 7932 case REO_STATUS: 7933 return "Reo_status"; 7934 case WBM2SW_RELEASE: 7935 return "wbm2sw_release"; 7936 case TCL_DATA: 7937 return "tcl_data"; 7938 case TCL_CMD_CREDIT: 7939 return "tcl_cmd_credit"; 7940 case TCL_STATUS: 7941 return "tcl_status"; 7942 case SW2WBM_RELEASE: 7943 return "sw2wbm_release"; 7944 case RXDMA_BUF: 7945 return "Rxdma_buf"; 7946 case RXDMA_DST: 7947 return "Rxdma_dst"; 7948 case RXDMA_MONITOR_BUF: 7949 return "Rxdma_monitor_buf"; 7950 case RXDMA_MONITOR_DESC: 7951 return "Rxdma_monitor_desc"; 7952 case RXDMA_MONITOR_STATUS: 7953 return "Rxdma_monitor_status"; 7954 default: 7955 dp_err("Invalid ring type"); 7956 break; 7957 } 7958 return "Invalid"; 7959 } 7960 7961 /* 7962 * dp_print_napi_stats(): NAPI stats 7963 * @soc - soc handle 7964 */ 7965 void dp_print_napi_stats(struct dp_soc *soc) 7966 { 7967 hif_print_napi_stats(soc->hif_handle); 7968 } 7969 7970 #ifdef QCA_PEER_EXT_STATS 7971 /** 7972 * dp_txrx_host_peer_ext_stats_clr: Reinitialize the txrx peer ext stats 7973 * 7974 */ 7975 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer) 7976 { 7977 if (peer->pext_stats) 7978 qdf_mem_zero(peer->pext_stats, sizeof(*peer->pext_stats)); 7979 } 7980 #else 7981 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer) 7982 { 7983 } 7984 #endif 7985 7986 /** 7987 * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats 7988 * @soc: Datapath soc 7989 * @peer: Datatpath peer 7990 * @arg: argument to iter function 7991 * 7992 * Return: QDF_STATUS 7993 */ 7994 static inline void 7995 dp_txrx_host_peer_stats_clr(struct dp_soc *soc, 7996 struct dp_peer *peer, 7997 void *arg) 7998 { 7999 struct dp_rx_tid *rx_tid; 8000 uint8_t tid; 8001 8002 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 8003 rx_tid = &peer->rx_tid[tid]; 8004 DP_STATS_CLR(rx_tid); 8005 } 8006 8007 DP_STATS_CLR(peer); 8008 8009 dp_txrx_host_peer_ext_stats_clr(peer); 8010 8011 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 8012 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc, 8013 &peer->stats, peer->peer_id, 8014 UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id); 8015 #endif 8016 } 8017 8018 /** 8019 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats 8020 * @vdev: DP_VDEV handle 8021 * @dp_soc: DP_SOC handle 8022 * 8023 * Return: QDF_STATUS 8024 */ 8025 static inline QDF_STATUS 8026 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc) 8027 { 8028 if (!vdev || !vdev->pdev) 8029 return QDF_STATUS_E_FAILURE; 8030 8031 /* 8032 * if NSS offload is enabled, then send message 8033 * to NSS FW to clear the stats. Once NSS FW clears the statistics 8034 * then clear host statistics. 8035 */ 8036 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 8037 if (soc->cdp_soc.ol_ops->nss_stats_clr) 8038 soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc, 8039 vdev->vdev_id); 8040 } 8041 8042 DP_STATS_CLR(vdev->pdev); 8043 DP_STATS_CLR(vdev->pdev->soc); 8044 DP_STATS_CLR(vdev); 8045 8046 hif_clear_napi_stats(vdev->pdev->soc->hif_handle); 8047 8048 dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL, 8049 DP_MOD_ID_GENERIC_STATS); 8050 8051 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 8052 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 8053 &vdev->stats, vdev->vdev_id, 8054 UPDATE_VDEV_STATS, vdev->pdev->pdev_id); 8055 #endif 8056 return QDF_STATUS_SUCCESS; 8057 } 8058 8059 /* 8060 * dp_get_host_peer_stats()- function to print peer stats 8061 * @soc: dp_soc handle 8062 * @mac_addr: mac address of the peer 8063 * 8064 * Return: QDF_STATUS 8065 */ 8066 static QDF_STATUS 8067 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr) 8068 { 8069 struct dp_peer *peer = NULL; 8070 8071 if (!mac_addr) { 8072 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 8073 "%s: NULL peer mac addr\n", __func__); 8074 return QDF_STATUS_E_FAILURE; 8075 } 8076 8077 peer = dp_peer_find_hash_find((struct dp_soc *)soc, 8078 mac_addr, 0, 8079 DP_VDEV_ALL, 8080 DP_MOD_ID_CDP); 8081 if (!peer) { 8082 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 8083 "%s: Invalid peer\n", __func__); 8084 return QDF_STATUS_E_FAILURE; 8085 } 8086 8087 dp_print_peer_stats(peer); 8088 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL); 8089 8090 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8091 8092 return QDF_STATUS_SUCCESS; 8093 } 8094 8095 /** 8096 * dp_txrx_stats_help() - Helper function for Txrx_Stats 8097 * 8098 * Return: None 8099 */ 8100 static void dp_txrx_stats_help(void) 8101 { 8102 dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>"); 8103 dp_info("stats_option:"); 8104 dp_info(" 1 -- HTT Tx Statistics"); 8105 dp_info(" 2 -- HTT Rx Statistics"); 8106 dp_info(" 3 -- HTT Tx HW Queue Statistics"); 8107 dp_info(" 4 -- HTT Tx HW Sched Statistics"); 8108 dp_info(" 5 -- HTT Error Statistics"); 8109 dp_info(" 6 -- HTT TQM Statistics"); 8110 dp_info(" 7 -- HTT TQM CMDQ Statistics"); 8111 dp_info(" 8 -- HTT TX_DE_CMN Statistics"); 8112 dp_info(" 9 -- HTT Tx Rate Statistics"); 8113 dp_info(" 10 -- HTT Rx Rate Statistics"); 8114 dp_info(" 11 -- HTT Peer Statistics"); 8115 dp_info(" 12 -- HTT Tx SelfGen Statistics"); 8116 dp_info(" 13 -- HTT Tx MU HWQ Statistics"); 8117 dp_info(" 14 -- HTT RING_IF_INFO Statistics"); 8118 dp_info(" 15 -- HTT SRNG Statistics"); 8119 dp_info(" 16 -- HTT SFM Info Statistics"); 8120 dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics"); 8121 dp_info(" 18 -- HTT Peer List Details"); 8122 dp_info(" 20 -- Clear Host Statistics"); 8123 dp_info(" 21 -- Host Rx Rate Statistics"); 8124 dp_info(" 22 -- Host Tx Rate Statistics"); 8125 dp_info(" 23 -- Host Tx Statistics"); 8126 dp_info(" 24 -- Host Rx Statistics"); 8127 dp_info(" 25 -- Host AST Statistics"); 8128 dp_info(" 26 -- Host SRNG PTR Statistics"); 8129 dp_info(" 27 -- Host Mon Statistics"); 8130 dp_info(" 28 -- Host REO Queue Statistics"); 8131 dp_info(" 29 -- Host Soc cfg param Statistics"); 8132 dp_info(" 30 -- Host pdev cfg param Statistics"); 8133 dp_info(" 31 -- Host FISA stats"); 8134 dp_info(" 32 -- Host Register Work stats"); 8135 } 8136 8137 /** 8138 * dp_print_host_stats()- Function to print the stats aggregated at host 8139 * @vdev_handle: DP_VDEV handle 8140 * @req: host stats type 8141 * @soc: dp soc handler 8142 * 8143 * Return: 0 on success, print error message in case of failure 8144 */ 8145 static int 8146 dp_print_host_stats(struct dp_vdev *vdev, 8147 struct cdp_txrx_stats_req *req, 8148 struct dp_soc *soc) 8149 { 8150 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 8151 enum cdp_host_txrx_stats type = 8152 dp_stats_mapping_table[req->stats][STATS_HOST]; 8153 8154 dp_aggregate_pdev_stats(pdev); 8155 8156 switch (type) { 8157 case TXRX_CLEAR_STATS: 8158 dp_txrx_host_stats_clr(vdev, soc); 8159 break; 8160 case TXRX_RX_RATE_STATS: 8161 dp_print_rx_rates(vdev); 8162 break; 8163 case TXRX_TX_RATE_STATS: 8164 dp_print_tx_rates(vdev); 8165 break; 8166 case TXRX_TX_HOST_STATS: 8167 dp_print_pdev_tx_stats(pdev); 8168 dp_print_soc_tx_stats(pdev->soc); 8169 break; 8170 case TXRX_RX_HOST_STATS: 8171 dp_print_pdev_rx_stats(pdev); 8172 dp_print_soc_rx_stats(pdev->soc); 8173 break; 8174 case TXRX_AST_STATS: 8175 dp_print_ast_stats(pdev->soc); 8176 dp_print_peer_table(vdev); 8177 break; 8178 case TXRX_SRNG_PTR_STATS: 8179 dp_print_ring_stats(pdev); 8180 break; 8181 case TXRX_RX_MON_STATS: 8182 dp_print_pdev_rx_mon_stats(pdev); 8183 break; 8184 case TXRX_REO_QUEUE_STATS: 8185 dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc, 8186 req->peer_addr); 8187 break; 8188 case TXRX_SOC_CFG_PARAMS: 8189 dp_print_soc_cfg_params(pdev->soc); 8190 break; 8191 case TXRX_PDEV_CFG_PARAMS: 8192 dp_print_pdev_cfg_params(pdev); 8193 break; 8194 case TXRX_NAPI_STATS: 8195 dp_print_napi_stats(pdev->soc); 8196 break; 8197 case TXRX_SOC_INTERRUPT_STATS: 8198 dp_print_soc_interrupt_stats(pdev->soc); 8199 break; 8200 case TXRX_SOC_FSE_STATS: 8201 dp_rx_dump_fisa_table(pdev->soc); 8202 break; 8203 case TXRX_HAL_REG_WRITE_STATS: 8204 hal_dump_reg_write_stats(pdev->soc->hal_soc); 8205 hal_dump_reg_write_srng_stats(pdev->soc->hal_soc); 8206 break; 8207 default: 8208 dp_info("Wrong Input For TxRx Host Stats"); 8209 dp_txrx_stats_help(); 8210 break; 8211 } 8212 return 0; 8213 } 8214 8215 /* 8216 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer 8217 * modes are enabled or not. 8218 * @dp_pdev: dp pdev handle. 8219 * 8220 * Return: bool 8221 */ 8222 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev) 8223 { 8224 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && 8225 !pdev->mcopy_mode) 8226 return true; 8227 else 8228 return false; 8229 } 8230 8231 /* 8232 *dp_set_bpr_enable() - API to enable/disable bpr feature 8233 *@pdev_handle: DP_PDEV handle. 8234 *@val: Provided value. 8235 * 8236 *Return: 0 for success. nonzero for failure. 8237 */ 8238 static QDF_STATUS 8239 dp_set_bpr_enable(struct dp_pdev *pdev, int val) 8240 { 8241 switch (val) { 8242 case CDP_BPR_DISABLE: 8243 pdev->bpr_enable = CDP_BPR_DISABLE; 8244 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en && 8245 !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { 8246 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 8247 } else if (pdev->enhanced_stats_en && 8248 !pdev->tx_sniffer_enable && !pdev->mcopy_mode && 8249 !pdev->pktlog_ppdu_stats) { 8250 dp_h2t_cfg_stats_msg_send(pdev, 8251 DP_PPDU_STATS_CFG_ENH_STATS, 8252 pdev->pdev_id); 8253 } 8254 break; 8255 case CDP_BPR_ENABLE: 8256 pdev->bpr_enable = CDP_BPR_ENABLE; 8257 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && 8258 !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) { 8259 dp_h2t_cfg_stats_msg_send(pdev, 8260 DP_PPDU_STATS_CFG_BPR, 8261 pdev->pdev_id); 8262 } else if (pdev->enhanced_stats_en && 8263 !pdev->tx_sniffer_enable && !pdev->mcopy_mode && 8264 !pdev->pktlog_ppdu_stats) { 8265 dp_h2t_cfg_stats_msg_send(pdev, 8266 DP_PPDU_STATS_CFG_BPR_ENH, 8267 pdev->pdev_id); 8268 } else if (pdev->pktlog_ppdu_stats) { 8269 dp_h2t_cfg_stats_msg_send(pdev, 8270 DP_PPDU_STATS_CFG_BPR_PKTLOG, 8271 pdev->pdev_id); 8272 } 8273 break; 8274 default: 8275 break; 8276 } 8277 8278 return QDF_STATUS_SUCCESS; 8279 } 8280 8281 /* 8282 * dp_pdev_tid_stats_ingress_inc 8283 * @pdev: pdev handle 8284 * @val: increase in value 8285 * 8286 * Return: void 8287 */ 8288 static void 8289 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val) 8290 { 8291 pdev->stats.tid_stats.ingress_stack += val; 8292 } 8293 8294 /* 8295 * dp_pdev_tid_stats_osif_drop 8296 * @pdev: pdev handle 8297 * @val: increase in value 8298 * 8299 * Return: void 8300 */ 8301 static void 8302 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val) 8303 { 8304 pdev->stats.tid_stats.osif_drop += val; 8305 } 8306 8307 /* 8308 * dp_config_debug_sniffer()- API to enable/disable debug sniffer 8309 * @pdev: DP_PDEV handle 8310 * @val: user provided value 8311 * 8312 * Return: 0 for success. nonzero for failure. 8313 */ 8314 static QDF_STATUS 8315 dp_config_debug_sniffer(struct dp_pdev *pdev, int val) 8316 { 8317 QDF_STATUS status = QDF_STATUS_SUCCESS; 8318 8319 /* 8320 * Note: The mirror copy mode cannot co-exist with any other 8321 * monitor modes. Hence disabling the filter for this mode will 8322 * reset the monitor destination ring filters. 8323 */ 8324 if (pdev->mcopy_mode) { 8325 #ifdef FEATURE_PERPKT_INFO 8326 dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE); 8327 dp_pdev_disable_mcopy_code(pdev); 8328 dp_mon_filter_reset_mcopy_mode(pdev); 8329 status = dp_mon_filter_update(pdev); 8330 if (status != QDF_STATUS_SUCCESS) { 8331 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8332 FL("Failed to reset AM copy mode filters")); 8333 } 8334 #endif /* FEATURE_PERPKT_INFO */ 8335 } 8336 switch (val) { 8337 case 0: 8338 pdev->tx_sniffer_enable = 0; 8339 pdev->monitor_configured = false; 8340 8341 /* 8342 * We don't need to reset the Rx monitor status ring or call 8343 * the API dp_ppdu_ring_reset() if all debug sniffer mode is 8344 * disabled. The Rx monitor status ring will be disabled when 8345 * the last mode using the monitor status ring get disabled. 8346 */ 8347 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en && 8348 !pdev->bpr_enable) { 8349 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 8350 } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) { 8351 dp_h2t_cfg_stats_msg_send(pdev, 8352 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); 8353 } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) { 8354 dp_h2t_cfg_stats_msg_send(pdev, 8355 DP_PPDU_STATS_CFG_BPR_ENH, 8356 pdev->pdev_id); 8357 } else { 8358 dp_h2t_cfg_stats_msg_send(pdev, 8359 DP_PPDU_STATS_CFG_BPR, 8360 pdev->pdev_id); 8361 } 8362 break; 8363 8364 case 1: 8365 pdev->tx_sniffer_enable = 1; 8366 pdev->monitor_configured = false; 8367 8368 if (!pdev->pktlog_ppdu_stats) 8369 dp_h2t_cfg_stats_msg_send(pdev, 8370 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id); 8371 break; 8372 case 2: 8373 case 4: 8374 if (pdev->monitor_vdev) { 8375 status = QDF_STATUS_E_RESOURCES; 8376 break; 8377 } 8378 8379 #ifdef FEATURE_PERPKT_INFO 8380 pdev->mcopy_mode = val; 8381 pdev->tx_sniffer_enable = 0; 8382 pdev->monitor_configured = true; 8383 8384 /* 8385 * Setup the M copy mode filter. 8386 */ 8387 dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE); 8388 dp_mon_filter_setup_mcopy_mode(pdev); 8389 status = dp_mon_filter_update(pdev); 8390 if (status != QDF_STATUS_SUCCESS) { 8391 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8392 FL("Failed to set M_copy mode filters")); 8393 dp_mon_filter_reset_mcopy_mode(pdev); 8394 dp_pdev_disable_mcopy_code(pdev); 8395 return status; 8396 } 8397 8398 if (!pdev->pktlog_ppdu_stats) 8399 dp_h2t_cfg_stats_msg_send(pdev, 8400 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id); 8401 #endif /* FEATURE_PERPKT_INFO */ 8402 break; 8403 8404 default: 8405 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8406 "Invalid value"); 8407 break; 8408 } 8409 return status; 8410 } 8411 8412 #ifdef FEATURE_PERPKT_INFO 8413 /* 8414 * dp_enable_enhanced_stats()- API to enable enhanced statistcs 8415 * @soc_handle: DP_SOC handle 8416 * @pdev_id: id of DP_PDEV handle 8417 * 8418 * Return: QDF_STATUS 8419 */ 8420 static QDF_STATUS 8421 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 8422 { 8423 struct dp_pdev *pdev = NULL; 8424 QDF_STATUS status = QDF_STATUS_SUCCESS; 8425 8426 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8427 pdev_id); 8428 8429 if (!pdev) 8430 return QDF_STATUS_E_FAILURE; 8431 8432 if (pdev->enhanced_stats_en == 0) 8433 dp_cal_client_timer_start(pdev->cal_client_ctx); 8434 8435 pdev->enhanced_stats_en = 1; 8436 8437 dp_mon_filter_setup_enhanced_stats(pdev); 8438 status = dp_mon_filter_update(pdev); 8439 if (status != QDF_STATUS_SUCCESS) { 8440 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8441 FL("Failed to set enhanced mode filters")); 8442 dp_mon_filter_reset_enhanced_stats(pdev); 8443 dp_cal_client_timer_stop(pdev->cal_client_ctx); 8444 pdev->enhanced_stats_en = 0; 8445 return QDF_STATUS_E_FAILURE; 8446 } 8447 8448 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { 8449 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); 8450 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { 8451 dp_h2t_cfg_stats_msg_send(pdev, 8452 DP_PPDU_STATS_CFG_BPR_ENH, 8453 pdev->pdev_id); 8454 } 8455 8456 return QDF_STATUS_SUCCESS; 8457 } 8458 8459 /* 8460 * dp_disable_enhanced_stats()- API to disable enhanced statistcs 8461 * 8462 * @param soc - the soc handle 8463 * @param pdev_id - pdev_id of pdev 8464 * @return - QDF_STATUS 8465 */ 8466 static QDF_STATUS 8467 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 8468 { 8469 struct dp_pdev *pdev = 8470 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8471 pdev_id); 8472 8473 if (!pdev) 8474 return QDF_STATUS_E_FAILURE; 8475 8476 if (pdev->enhanced_stats_en == 1) 8477 dp_cal_client_timer_stop(pdev->cal_client_ctx); 8478 8479 pdev->enhanced_stats_en = 0; 8480 8481 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { 8482 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 8483 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { 8484 dp_h2t_cfg_stats_msg_send(pdev, 8485 DP_PPDU_STATS_CFG_BPR, 8486 pdev->pdev_id); 8487 } 8488 8489 dp_mon_filter_reset_enhanced_stats(pdev); 8490 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 8491 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8492 FL("Failed to reset enhanced mode filters")); 8493 } 8494 8495 return QDF_STATUS_SUCCESS; 8496 } 8497 #endif /* FEATURE_PERPKT_INFO */ 8498 8499 /* 8500 * dp_get_fw_peer_stats()- function to print peer stats 8501 * @soc: soc handle 8502 * @pdev_id : id of the pdev handle 8503 * @mac_addr: mac address of the peer 8504 * @cap: Type of htt stats requested 8505 * @is_wait: if set, wait on completion from firmware response 8506 * 8507 * Currently Supporting only MAC ID based requests Only 8508 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY 8509 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM 8510 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM 8511 * 8512 * Return: QDF_STATUS 8513 */ 8514 static QDF_STATUS 8515 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 8516 uint8_t *mac_addr, 8517 uint32_t cap, uint32_t is_wait) 8518 { 8519 int i; 8520 uint32_t config_param0 = 0; 8521 uint32_t config_param1 = 0; 8522 uint32_t config_param2 = 0; 8523 uint32_t config_param3 = 0; 8524 struct dp_pdev *pdev = 8525 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8526 pdev_id); 8527 8528 if (!pdev) 8529 return QDF_STATUS_E_FAILURE; 8530 8531 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1); 8532 config_param0 |= (1 << (cap + 1)); 8533 8534 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) { 8535 config_param1 |= (1 << i); 8536 } 8537 8538 config_param2 |= (mac_addr[0] & 0x000000ff); 8539 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00); 8540 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000); 8541 config_param2 |= ((mac_addr[3] << 24) & 0xff000000); 8542 8543 config_param3 |= (mac_addr[4] & 0x000000ff); 8544 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00); 8545 8546 if (is_wait) { 8547 qdf_event_reset(&pdev->fw_peer_stats_event); 8548 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, 8549 config_param0, config_param1, 8550 config_param2, config_param3, 8551 0, DBG_STATS_COOKIE_DP_STATS, 0); 8552 qdf_wait_single_event(&pdev->fw_peer_stats_event, 8553 DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC); 8554 } else { 8555 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, 8556 config_param0, config_param1, 8557 config_param2, config_param3, 8558 0, DBG_STATS_COOKIE_DEFAULT, 0); 8559 } 8560 8561 return QDF_STATUS_SUCCESS; 8562 8563 } 8564 8565 /* This struct definition will be removed from here 8566 * once it get added in FW headers*/ 8567 struct httstats_cmd_req { 8568 uint32_t config_param0; 8569 uint32_t config_param1; 8570 uint32_t config_param2; 8571 uint32_t config_param3; 8572 int cookie; 8573 u_int8_t stats_id; 8574 }; 8575 8576 /* 8577 * dp_get_htt_stats: function to process the httstas request 8578 * @soc: DP soc handle 8579 * @pdev_id: id of pdev handle 8580 * @data: pointer to request data 8581 * @data_len: length for request data 8582 * 8583 * return: QDF_STATUS 8584 */ 8585 static QDF_STATUS 8586 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data, 8587 uint32_t data_len) 8588 { 8589 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data; 8590 struct dp_pdev *pdev = 8591 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8592 pdev_id); 8593 8594 if (!pdev) 8595 return QDF_STATUS_E_FAILURE; 8596 8597 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req)); 8598 dp_h2t_ext_stats_msg_send(pdev, req->stats_id, 8599 req->config_param0, req->config_param1, 8600 req->config_param2, req->config_param3, 8601 req->cookie, DBG_STATS_COOKIE_DEFAULT, 0); 8602 8603 return QDF_STATUS_SUCCESS; 8604 } 8605 8606 /** 8607 * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev 8608 * @pdev: DP_PDEV handle 8609 * @prio: tidmap priority value passed by the user 8610 * 8611 * Return: QDF_STATUS_SUCCESS on success 8612 */ 8613 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev, 8614 uint8_t prio) 8615 { 8616 struct dp_soc *soc = pdev->soc; 8617 8618 soc->tidmap_prty = prio; 8619 8620 hal_tx_set_tidmap_prty(soc->hal_soc, prio); 8621 return QDF_STATUS_SUCCESS; 8622 } 8623 8624 /* 8625 * dp_get_peer_param: function to get parameters in peer 8626 * @cdp_soc: DP soc handle 8627 * @vdev_id: id of vdev handle 8628 * @peer_mac: peer mac address 8629 * @param: parameter type to be set 8630 * @val : address of buffer 8631 * 8632 * Return: val 8633 */ 8634 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8635 uint8_t *peer_mac, 8636 enum cdp_peer_param_type param, 8637 cdp_config_param_type *val) 8638 { 8639 return QDF_STATUS_SUCCESS; 8640 } 8641 8642 #ifdef WLAN_ATF_ENABLE 8643 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 8644 { 8645 if (!pdev) { 8646 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8647 "Invalid pdev"); 8648 return; 8649 } 8650 8651 pdev->dp_atf_stats_enable = value; 8652 } 8653 #else 8654 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 8655 { 8656 } 8657 #endif 8658 8659 /* 8660 * dp_set_peer_param: function to set parameters in peer 8661 * @cdp_soc: DP soc handle 8662 * @vdev_id: id of vdev handle 8663 * @peer_mac: peer mac address 8664 * @param: parameter type to be set 8665 * @val: value of parameter to be set 8666 * 8667 * Return: 0 for success. nonzero for failure. 8668 */ 8669 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8670 uint8_t *peer_mac, 8671 enum cdp_peer_param_type param, 8672 cdp_config_param_type val) 8673 { 8674 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, 8675 peer_mac, 0, vdev_id, 8676 DP_MOD_ID_CDP); 8677 8678 if (!peer) 8679 return QDF_STATUS_E_FAILURE; 8680 8681 switch (param) { 8682 case CDP_CONFIG_NAWDS: 8683 peer->nawds_enabled = val.cdp_peer_param_nawds; 8684 break; 8685 case CDP_CONFIG_NAC: 8686 peer->nac = !!(val.cdp_peer_param_nac); 8687 break; 8688 case CDP_CONFIG_ISOLATION: 8689 dp_set_peer_isolation(peer, val.cdp_peer_param_isolation); 8690 break; 8691 case CDP_CONFIG_IN_TWT: 8692 peer->in_twt = !!(val.cdp_peer_param_in_twt); 8693 break; 8694 default: 8695 break; 8696 } 8697 8698 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8699 8700 return QDF_STATUS_SUCCESS; 8701 } 8702 8703 /* 8704 * dp_get_pdev_param: function to get parameters from pdev 8705 * @cdp_soc: DP soc handle 8706 * @pdev_id: id of pdev handle 8707 * @param: parameter type to be get 8708 * @value : buffer for value 8709 * 8710 * Return: status 8711 */ 8712 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 8713 enum cdp_pdev_param_type param, 8714 cdp_config_param_type *val) 8715 { 8716 struct cdp_pdev *pdev = (struct cdp_pdev *) 8717 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 8718 pdev_id); 8719 if (!pdev) 8720 return QDF_STATUS_E_FAILURE; 8721 8722 switch (param) { 8723 case CDP_CONFIG_VOW: 8724 val->cdp_pdev_param_cfg_vow = 8725 ((struct dp_pdev *)pdev)->delay_stats_flag; 8726 break; 8727 case CDP_TX_PENDING: 8728 val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev); 8729 break; 8730 case CDP_FILTER_MCAST_DATA: 8731 val->cdp_pdev_param_fltr_mcast = 8732 dp_pdev_get_filter_mcast_data(pdev); 8733 break; 8734 case CDP_FILTER_NO_DATA: 8735 val->cdp_pdev_param_fltr_none = 8736 dp_pdev_get_filter_non_data(pdev); 8737 break; 8738 case CDP_FILTER_UCAST_DATA: 8739 val->cdp_pdev_param_fltr_ucast = 8740 dp_pdev_get_filter_ucast_data(pdev); 8741 break; 8742 default: 8743 return QDF_STATUS_E_FAILURE; 8744 } 8745 8746 return QDF_STATUS_SUCCESS; 8747 } 8748 8749 /* 8750 * dp_set_pdev_param: function to set parameters in pdev 8751 * @cdp_soc: DP soc handle 8752 * @pdev_id: id of pdev handle 8753 * @param: parameter type to be set 8754 * @val: value of parameter to be set 8755 * 8756 * Return: 0 for success. nonzero for failure. 8757 */ 8758 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 8759 enum cdp_pdev_param_type param, 8760 cdp_config_param_type val) 8761 { 8762 int target_type; 8763 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 8764 struct dp_pdev *pdev = 8765 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 8766 pdev_id); 8767 if (!pdev) 8768 return QDF_STATUS_E_FAILURE; 8769 8770 target_type = hal_get_target_type(soc->hal_soc); 8771 switch (target_type) { 8772 case TARGET_TYPE_QCA6750: 8773 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID; 8774 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID; 8775 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID; 8776 break; 8777 default: 8778 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID; 8779 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID; 8780 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID; 8781 break; 8782 } 8783 8784 switch (param) { 8785 case CDP_CONFIG_TX_CAPTURE: 8786 return dp_config_debug_sniffer(pdev, 8787 val.cdp_pdev_param_tx_capture); 8788 case CDP_CONFIG_DEBUG_SNIFFER: 8789 return dp_config_debug_sniffer(pdev, 8790 val.cdp_pdev_param_dbg_snf); 8791 case CDP_CONFIG_BPR_ENABLE: 8792 return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable); 8793 case CDP_CONFIG_PRIMARY_RADIO: 8794 pdev->is_primary = val.cdp_pdev_param_primary_radio; 8795 break; 8796 case CDP_CONFIG_CAPTURE_LATENCY: 8797 pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy; 8798 break; 8799 case CDP_INGRESS_STATS: 8800 dp_pdev_tid_stats_ingress_inc(pdev, 8801 val.cdp_pdev_param_ingrs_stats); 8802 break; 8803 case CDP_OSIF_DROP: 8804 dp_pdev_tid_stats_osif_drop(pdev, 8805 val.cdp_pdev_param_osif_drop); 8806 break; 8807 case CDP_CONFIG_ENH_RX_CAPTURE: 8808 return dp_config_enh_rx_capture(pdev, 8809 val.cdp_pdev_param_en_rx_cap); 8810 case CDP_CONFIG_ENH_TX_CAPTURE: 8811 return dp_config_enh_tx_capture(pdev, 8812 val.cdp_pdev_param_en_tx_cap); 8813 case CDP_CONFIG_HMMC_TID_OVERRIDE: 8814 pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd; 8815 break; 8816 case CDP_CONFIG_HMMC_TID_VALUE: 8817 pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid; 8818 break; 8819 case CDP_CHAN_NOISE_FLOOR: 8820 pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr; 8821 break; 8822 case CDP_TIDMAP_PRTY: 8823 dp_set_pdev_tidmap_prty_wifi3(pdev, 8824 val.cdp_pdev_param_tidmap_prty); 8825 break; 8826 case CDP_FILTER_NEIGH_PEERS: 8827 dp_set_filter_neigh_peers(pdev, 8828 val.cdp_pdev_param_fltr_neigh_peers); 8829 break; 8830 case CDP_MONITOR_CHANNEL: 8831 pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan; 8832 break; 8833 case CDP_MONITOR_FREQUENCY: 8834 pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq; 8835 pdev->mon_chan_band = 8836 wlan_reg_freq_to_band(pdev->mon_chan_freq); 8837 break; 8838 case CDP_CONFIG_BSS_COLOR: 8839 dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color); 8840 break; 8841 case CDP_SET_ATF_STATS_ENABLE: 8842 dp_set_atf_stats_enable(pdev, 8843 val.cdp_pdev_param_atf_stats_enable); 8844 break; 8845 default: 8846 return QDF_STATUS_E_INVAL; 8847 } 8848 return QDF_STATUS_SUCCESS; 8849 } 8850 8851 #ifdef QCA_PEER_EXT_STATS 8852 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc, 8853 qdf_nbuf_t nbuf) 8854 { 8855 struct dp_peer *peer = NULL; 8856 uint16_t peer_id, ring_id; 8857 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 8858 struct cdp_peer_ext_stats *pext_stats = NULL; 8859 8860 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 8861 if (peer_id > soc->max_peers) 8862 return; 8863 8864 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP); 8865 if (qdf_unlikely(!peer)) 8866 return; 8867 8868 if (qdf_likely(peer->pext_stats)) { 8869 pext_stats = peer->pext_stats; 8870 ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 8871 dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id], 8872 nbuf); 8873 } 8874 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8875 } 8876 #else 8877 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc, 8878 qdf_nbuf_t nbuf) 8879 { 8880 } 8881 #endif 8882 8883 /* 8884 * dp_calculate_delay_stats: function to get rx delay stats 8885 * @cdp_soc: DP soc handle 8886 * @vdev_id: id of DP vdev handle 8887 * @nbuf: skb 8888 * 8889 * Return: QDF_STATUS 8890 */ 8891 static QDF_STATUS 8892 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8893 qdf_nbuf_t nbuf) 8894 { 8895 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 8896 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8897 DP_MOD_ID_CDP); 8898 8899 if (!vdev) 8900 return QDF_STATUS_SUCCESS; 8901 8902 if (vdev->pdev->delay_stats_flag) 8903 dp_rx_compute_delay(vdev, nbuf); 8904 else 8905 dp_rx_update_peer_delay_stats(soc, nbuf); 8906 8907 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8908 return QDF_STATUS_SUCCESS; 8909 } 8910 8911 /* 8912 * dp_get_vdev_param: function to get parameters from vdev 8913 * @cdp_soc : DP soc handle 8914 * @vdev_id: id of DP vdev handle 8915 * @param: parameter type to get value 8916 * @val: buffer address 8917 * 8918 * return: status 8919 */ 8920 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8921 enum cdp_vdev_param_type param, 8922 cdp_config_param_type *val) 8923 { 8924 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 8925 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8926 DP_MOD_ID_CDP); 8927 8928 if (!vdev) 8929 return QDF_STATUS_E_FAILURE; 8930 8931 switch (param) { 8932 case CDP_ENABLE_WDS: 8933 val->cdp_vdev_param_wds = vdev->wds_enabled; 8934 break; 8935 case CDP_ENABLE_MEC: 8936 val->cdp_vdev_param_mec = vdev->mec_enabled; 8937 break; 8938 case CDP_ENABLE_DA_WAR: 8939 val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled; 8940 break; 8941 case CDP_ENABLE_IGMP_MCAST_EN: 8942 val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en; 8943 break; 8944 case CDP_ENABLE_MCAST_EN: 8945 val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en; 8946 break; 8947 case CDP_ENABLE_HLOS_TID_OVERRIDE: 8948 val->cdp_vdev_param_hlos_tid_override = 8949 dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev); 8950 break; 8951 default: 8952 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8953 "param value %d is wrong\n", 8954 param); 8955 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8956 return QDF_STATUS_E_FAILURE; 8957 } 8958 8959 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8960 return QDF_STATUS_SUCCESS; 8961 } 8962 8963 /* 8964 * dp_set_vdev_param: function to set parameters in vdev 8965 * @cdp_soc : DP soc handle 8966 * @vdev_id: id of DP vdev handle 8967 * @param: parameter type to get value 8968 * @val: value 8969 * 8970 * return: QDF_STATUS 8971 */ 8972 static QDF_STATUS 8973 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8974 enum cdp_vdev_param_type param, cdp_config_param_type val) 8975 { 8976 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc; 8977 struct dp_vdev *vdev = 8978 dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP); 8979 uint32_t var = 0; 8980 8981 if (!vdev) 8982 return QDF_STATUS_E_FAILURE; 8983 8984 switch (param) { 8985 case CDP_ENABLE_WDS: 8986 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8987 "wds_enable %d for vdev(%pK) id(%d)\n", 8988 val.cdp_vdev_param_wds, vdev, vdev->vdev_id); 8989 vdev->wds_enabled = val.cdp_vdev_param_wds; 8990 break; 8991 case CDP_ENABLE_MEC: 8992 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8993 "mec_enable %d for vdev(%pK) id(%d)\n", 8994 val.cdp_vdev_param_mec, vdev, vdev->vdev_id); 8995 vdev->mec_enabled = val.cdp_vdev_param_mec; 8996 break; 8997 case CDP_ENABLE_DA_WAR: 8998 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 8999 "da_war_enable %d for vdev(%pK) id(%d)\n", 9000 val.cdp_vdev_param_da_war, vdev, vdev->vdev_id); 9001 vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war; 9002 dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *) 9003 vdev->pdev->soc)); 9004 break; 9005 case CDP_ENABLE_NAWDS: 9006 vdev->nawds_enabled = val.cdp_vdev_param_nawds; 9007 break; 9008 case CDP_ENABLE_MCAST_EN: 9009 vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en; 9010 break; 9011 case CDP_ENABLE_IGMP_MCAST_EN: 9012 vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en; 9013 break; 9014 case CDP_ENABLE_PROXYSTA: 9015 vdev->proxysta_vdev = val.cdp_vdev_param_proxysta; 9016 break; 9017 case CDP_UPDATE_TDLS_FLAGS: 9018 vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags; 9019 break; 9020 case CDP_CFG_WDS_AGING_TIMER: 9021 var = val.cdp_vdev_param_aging_tmr; 9022 if (!var) 9023 qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer); 9024 else if (var != vdev->wds_aging_timer_val) 9025 qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var); 9026 9027 vdev->wds_aging_timer_val = var; 9028 break; 9029 case CDP_ENABLE_AP_BRIDGE: 9030 if (wlan_op_mode_sta != vdev->opmode) 9031 vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en; 9032 else 9033 vdev->ap_bridge_enabled = false; 9034 break; 9035 case CDP_ENABLE_CIPHER: 9036 vdev->sec_type = val.cdp_vdev_param_cipher_en; 9037 break; 9038 case CDP_ENABLE_QWRAP_ISOLATION: 9039 vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation; 9040 break; 9041 case CDP_UPDATE_MULTIPASS: 9042 vdev->multipass_en = val.cdp_vdev_param_update_multipass; 9043 break; 9044 case CDP_TX_ENCAP_TYPE: 9045 vdev->tx_encap_type = val.cdp_vdev_param_tx_encap; 9046 break; 9047 case CDP_RX_DECAP_TYPE: 9048 vdev->rx_decap_type = val.cdp_vdev_param_rx_decap; 9049 break; 9050 case CDP_TID_VDEV_PRTY: 9051 vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty; 9052 break; 9053 case CDP_TIDMAP_TBL_ID: 9054 vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id; 9055 break; 9056 #ifdef MESH_MODE_SUPPORT 9057 case CDP_MESH_RX_FILTER: 9058 dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev, 9059 val.cdp_vdev_param_mesh_rx_filter); 9060 break; 9061 case CDP_MESH_MODE: 9062 dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev, 9063 val.cdp_vdev_param_mesh_mode); 9064 break; 9065 #endif 9066 case CDP_ENABLE_CSUM: 9067 dp_info("vdev_id %d enable Checksum %d", vdev_id, 9068 val.cdp_enable_tx_checksum); 9069 vdev->csum_enabled = val.cdp_enable_tx_checksum; 9070 break; 9071 case CDP_ENABLE_HLOS_TID_OVERRIDE: 9072 dp_info("vdev_id %d enable hlod tid override %d", vdev_id, 9073 val.cdp_vdev_param_hlos_tid_override); 9074 dp_vdev_set_hlos_tid_override(vdev, 9075 val.cdp_vdev_param_hlos_tid_override); 9076 break; 9077 #ifdef QCA_SUPPORT_WDS_EXTENDED 9078 case CDP_CFG_WDS_EXT: 9079 vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext; 9080 break; 9081 #endif 9082 default: 9083 break; 9084 } 9085 9086 dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev); 9087 dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP); 9088 9089 return QDF_STATUS_SUCCESS; 9090 } 9091 9092 /* 9093 * dp_set_psoc_param: function to set parameters in psoc 9094 * @cdp_soc : DP soc handle 9095 * @param: parameter type to be set 9096 * @val: value of parameter to be set 9097 * 9098 * return: QDF_STATUS 9099 */ 9100 static QDF_STATUS 9101 dp_set_psoc_param(struct cdp_soc_t *cdp_soc, 9102 enum cdp_psoc_param_type param, cdp_config_param_type val) 9103 { 9104 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 9105 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx; 9106 9107 switch (param) { 9108 case CDP_ENABLE_RATE_STATS: 9109 soc->rdkstats_enabled = val.cdp_psoc_param_en_rate_stats; 9110 break; 9111 case CDP_SET_NSS_CFG: 9112 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, 9113 val.cdp_psoc_param_en_nss_cfg); 9114 /* 9115 * TODO: masked out based on the per offloaded radio 9116 */ 9117 switch (val.cdp_psoc_param_en_nss_cfg) { 9118 case dp_nss_cfg_default: 9119 break; 9120 case dp_nss_cfg_first_radio: 9121 /* 9122 * This configuration is valid for single band radio which 9123 * is also NSS offload. 9124 */ 9125 case dp_nss_cfg_dbdc: 9126 case dp_nss_cfg_dbtc: 9127 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0); 9128 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0); 9129 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0); 9130 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0); 9131 break; 9132 default: 9133 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9134 "Invalid offload config %d", 9135 val.cdp_psoc_param_en_nss_cfg); 9136 } 9137 9138 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 9139 FL("nss-wifi<0> nss config is enabled")); 9140 break; 9141 case CDP_SET_PREFERRED_HW_MODE: 9142 soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode; 9143 break; 9144 default: 9145 break; 9146 } 9147 9148 return QDF_STATUS_SUCCESS; 9149 } 9150 9151 /* 9152 * dp_get_psoc_param: function to get parameters in soc 9153 * @cdp_soc : DP soc handle 9154 * @param: parameter type to be set 9155 * @val: address of buffer 9156 * 9157 * return: status 9158 */ 9159 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc, 9160 enum cdp_psoc_param_type param, 9161 cdp_config_param_type *val) 9162 { 9163 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 9164 9165 if (!soc) 9166 return QDF_STATUS_E_FAILURE; 9167 9168 switch (param) { 9169 case CDP_CFG_PEER_EXT_STATS: 9170 val->cdp_psoc_param_pext_stats = 9171 wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 9172 break; 9173 default: 9174 dp_warn("Invalid param"); 9175 break; 9176 } 9177 9178 return QDF_STATUS_SUCCESS; 9179 } 9180 9181 /** 9182 * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer 9183 * @soc: DP_SOC handle 9184 * @pdev_id: id of DP_PDEV handle 9185 * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode 9186 * @is_tx_pkt_cap_enable: enable/disable/delete/print 9187 * Tx packet capture in monitor mode 9188 * @peer_mac: MAC address for which the above need to be enabled/disabled 9189 * 9190 * Return: Success if Rx & Tx capture is enabled for peer, false otherwise 9191 */ 9192 QDF_STATUS 9193 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, 9194 uint8_t pdev_id, 9195 bool is_rx_pkt_cap_enable, 9196 uint8_t is_tx_pkt_cap_enable, 9197 uint8_t *peer_mac) 9198 { 9199 struct dp_peer *peer; 9200 QDF_STATUS status; 9201 struct dp_pdev *pdev = 9202 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9203 pdev_id); 9204 if (!pdev) 9205 return QDF_STATUS_E_FAILURE; 9206 9207 peer = dp_peer_find_hash_find((struct dp_soc *)soc, 9208 peer_mac, 0, DP_VDEV_ALL, 9209 DP_MOD_ID_CDP); 9210 if (!peer) 9211 return QDF_STATUS_E_FAILURE; 9212 9213 /* we need to set tx pkt capture for non associated peer */ 9214 status = dp_peer_set_tx_capture_enabled(pdev, peer, 9215 is_tx_pkt_cap_enable, 9216 peer_mac); 9217 9218 status = dp_peer_set_rx_capture_enabled(pdev, peer, 9219 is_rx_pkt_cap_enable, 9220 peer_mac); 9221 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9222 9223 return status; 9224 } 9225 9226 /* 9227 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev 9228 * @soc: DP_SOC handle 9229 * @vdev_id: id of DP_VDEV handle 9230 * @map_id:ID of map that needs to be updated 9231 * 9232 * Return: QDF_STATUS 9233 */ 9234 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc, 9235 uint8_t vdev_id, 9236 uint8_t map_id) 9237 { 9238 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 9239 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9240 DP_MOD_ID_CDP); 9241 if (vdev) { 9242 vdev->dscp_tid_map_id = map_id; 9243 /* Updatr flag for transmit tid classification */ 9244 if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map) 9245 vdev->skip_sw_tid_classification |= 9246 DP_TX_HW_DSCP_TID_MAP_VALID; 9247 else 9248 vdev->skip_sw_tid_classification &= 9249 ~DP_TX_HW_DSCP_TID_MAP_VALID; 9250 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9251 return QDF_STATUS_SUCCESS; 9252 } 9253 9254 return QDF_STATUS_E_FAILURE; 9255 } 9256 9257 #ifdef DP_RATETABLE_SUPPORT 9258 static int dp_txrx_get_ratekbps(int preamb, int mcs, 9259 int htflag, int gintval) 9260 { 9261 uint32_t rix; 9262 uint16_t ratecode; 9263 9264 return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1, 9265 (uint8_t)preamb, 1, &rix, &ratecode); 9266 } 9267 #else 9268 static int dp_txrx_get_ratekbps(int preamb, int mcs, 9269 int htflag, int gintval) 9270 { 9271 return 0; 9272 } 9273 #endif 9274 9275 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats 9276 * @soc: DP soc handle 9277 * @pdev_id: id of DP pdev handle 9278 * @pdev_stats: buffer to copy to 9279 * 9280 * return : status success/failure 9281 */ 9282 static QDF_STATUS 9283 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 9284 struct cdp_pdev_stats *pdev_stats) 9285 { 9286 struct dp_pdev *pdev = 9287 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9288 pdev_id); 9289 if (!pdev) 9290 return QDF_STATUS_E_FAILURE; 9291 9292 dp_aggregate_pdev_stats(pdev); 9293 9294 qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats)); 9295 return QDF_STATUS_SUCCESS; 9296 } 9297 9298 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP 9299 * @vdev: DP vdev handle 9300 * @buf: buffer containing specific stats structure 9301 * 9302 * Returns: void 9303 */ 9304 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev, 9305 void *buf) 9306 { 9307 struct cdp_tx_ingress_stats *host_stats = NULL; 9308 9309 if (!buf) { 9310 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9311 "Invalid host stats buf"); 9312 return; 9313 } 9314 host_stats = (struct cdp_tx_ingress_stats *)buf; 9315 9316 DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 9317 host_stats->mcast_en.mcast_pkt.num, 9318 host_stats->mcast_en.mcast_pkt.bytes); 9319 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 9320 host_stats->mcast_en.dropped_map_error); 9321 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac, 9322 host_stats->mcast_en.dropped_self_mac); 9323 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail, 9324 host_stats->mcast_en.dropped_send_fail); 9325 DP_STATS_INC(vdev, tx_i.mcast_en.ucast, 9326 host_stats->mcast_en.ucast); 9327 DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 9328 host_stats->mcast_en.fail_seg_alloc); 9329 DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 9330 host_stats->mcast_en.clone_fail); 9331 } 9332 9333 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP 9334 * @vdev: DP vdev handle 9335 * @buf: buffer containing specific stats structure 9336 * 9337 * Returns: void 9338 */ 9339 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev, 9340 void *buf) 9341 { 9342 struct cdp_tx_ingress_stats *host_stats = NULL; 9343 9344 if (!buf) { 9345 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9346 "Invalid host stats buf"); 9347 return; 9348 } 9349 host_stats = (struct cdp_tx_ingress_stats *)buf; 9350 9351 DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd, 9352 host_stats->igmp_mcast_en.igmp_rcvd); 9353 DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted, 9354 host_stats->igmp_mcast_en.igmp_ucast_converted); 9355 } 9356 9357 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP 9358 * @soc: DP soc handle 9359 * @vdev_id: id of DP vdev handle 9360 * @buf: buffer containing specific stats structure 9361 * @stats_id: stats type 9362 * 9363 * Returns: QDF_STATUS 9364 */ 9365 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl, 9366 uint8_t vdev_id, 9367 void *buf, 9368 uint16_t stats_id) 9369 { 9370 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 9371 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9372 DP_MOD_ID_CDP); 9373 9374 if (!vdev) { 9375 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9376 "Invalid vdev handle"); 9377 return QDF_STATUS_E_FAILURE; 9378 } 9379 9380 switch (stats_id) { 9381 case DP_VDEV_STATS_PKT_CNT_ONLY: 9382 break; 9383 case DP_VDEV_STATS_TX_ME: 9384 dp_txrx_update_vdev_me_stats(vdev, buf); 9385 dp_txrx_update_vdev_igmp_me_stats(vdev, buf); 9386 break; 9387 default: 9388 qdf_info("Invalid stats_id %d", stats_id); 9389 break; 9390 } 9391 9392 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9393 return QDF_STATUS_SUCCESS; 9394 } 9395 9396 /* dp_txrx_get_peer_stats - will return cdp_peer_stats 9397 * @soc: soc handle 9398 * @vdev_id: id of vdev handle 9399 * @peer_mac: mac of DP_PEER handle 9400 * @peer_stats: buffer to copy to 9401 * return : status success/failure 9402 */ 9403 static QDF_STATUS 9404 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 9405 uint8_t *peer_mac, struct cdp_peer_stats *peer_stats) 9406 { 9407 QDF_STATUS status = QDF_STATUS_SUCCESS; 9408 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 9409 peer_mac, 0, vdev_id, 9410 DP_MOD_ID_CDP); 9411 9412 if (!peer) 9413 return QDF_STATUS_E_FAILURE; 9414 9415 qdf_mem_copy(peer_stats, &peer->stats, 9416 sizeof(struct cdp_peer_stats)); 9417 9418 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9419 9420 return status; 9421 } 9422 9423 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats 9424 * @param soc - soc handle 9425 * @param vdev_id - vdev_id of vdev object 9426 * @param peer_mac - mac address of the peer 9427 * @param type - enum of required stats 9428 * @param buf - buffer to hold the value 9429 * return : status success/failure 9430 */ 9431 static QDF_STATUS 9432 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id, 9433 uint8_t *peer_mac, enum cdp_peer_stats_type type, 9434 cdp_peer_stats_param_t *buf) 9435 { 9436 QDF_STATUS ret = QDF_STATUS_SUCCESS; 9437 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 9438 peer_mac, 0, vdev_id, 9439 DP_MOD_ID_CDP); 9440 9441 if (!peer) { 9442 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9443 "Invalid Peer for Mac "QDF_MAC_ADDR_FMT, 9444 QDF_MAC_ADDR_REF(peer_mac)); 9445 return QDF_STATUS_E_FAILURE; 9446 } else if (type < cdp_peer_stats_max) { 9447 switch (type) { 9448 case cdp_peer_tx_ucast: 9449 buf->tx_ucast = peer->stats.tx.ucast; 9450 break; 9451 case cdp_peer_tx_mcast: 9452 buf->tx_mcast = peer->stats.tx.mcast; 9453 break; 9454 case cdp_peer_tx_rate: 9455 buf->tx_rate = peer->stats.tx.tx_rate; 9456 break; 9457 case cdp_peer_tx_last_tx_rate: 9458 buf->last_tx_rate = peer->stats.tx.last_tx_rate; 9459 break; 9460 case cdp_peer_tx_inactive_time: 9461 buf->tx_inactive_time = peer->stats.tx.inactive_time; 9462 break; 9463 case cdp_peer_tx_ratecode: 9464 buf->tx_ratecode = peer->stats.tx.tx_ratecode; 9465 break; 9466 case cdp_peer_tx_flags: 9467 buf->tx_flags = peer->stats.tx.tx_flags; 9468 break; 9469 case cdp_peer_tx_power: 9470 buf->tx_power = peer->stats.tx.tx_power; 9471 break; 9472 case cdp_peer_rx_rate: 9473 buf->rx_rate = peer->stats.rx.rx_rate; 9474 break; 9475 case cdp_peer_rx_last_rx_rate: 9476 buf->last_rx_rate = peer->stats.rx.last_rx_rate; 9477 break; 9478 case cdp_peer_rx_ratecode: 9479 buf->rx_ratecode = peer->stats.rx.rx_ratecode; 9480 break; 9481 case cdp_peer_rx_ucast: 9482 buf->rx_ucast = peer->stats.rx.unicast; 9483 break; 9484 case cdp_peer_rx_flags: 9485 buf->rx_flags = peer->stats.rx.rx_flags; 9486 break; 9487 case cdp_peer_rx_avg_rssi: 9488 buf->rx_avg_rssi = peer->stats.rx.avg_rssi; 9489 break; 9490 default: 9491 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9492 "Invalid value"); 9493 ret = QDF_STATUS_E_FAILURE; 9494 break; 9495 } 9496 } else { 9497 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9498 "Invalid value"); 9499 ret = QDF_STATUS_E_FAILURE; 9500 } 9501 9502 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9503 9504 return ret; 9505 } 9506 9507 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer 9508 * @soc: soc handle 9509 * @vdev_id: id of vdev handle 9510 * @peer_mac: mac of DP_PEER handle 9511 * 9512 * return : QDF_STATUS 9513 */ 9514 static QDF_STATUS 9515 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 9516 uint8_t *peer_mac) 9517 { 9518 QDF_STATUS status = QDF_STATUS_SUCCESS; 9519 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 9520 peer_mac, 0, vdev_id, 9521 DP_MOD_ID_CDP); 9522 9523 if (!peer) 9524 return QDF_STATUS_E_FAILURE; 9525 9526 qdf_mem_zero(&peer->stats, sizeof(peer->stats)); 9527 9528 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9529 9530 return status; 9531 } 9532 9533 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats 9534 * @vdev_handle: DP_VDEV handle 9535 * @buf: buffer for vdev stats 9536 * 9537 * return : int 9538 */ 9539 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 9540 void *buf, bool is_aggregate) 9541 { 9542 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 9543 struct cdp_vdev_stats *vdev_stats; 9544 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9545 DP_MOD_ID_CDP); 9546 9547 if (!vdev) 9548 return 1; 9549 9550 vdev_stats = (struct cdp_vdev_stats *)buf; 9551 9552 if (is_aggregate) { 9553 dp_aggregate_vdev_stats(vdev, buf); 9554 } else { 9555 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); 9556 } 9557 9558 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9559 return 0; 9560 } 9561 9562 /* 9563 * dp_get_total_per(): get total per 9564 * @soc: DP soc handle 9565 * @pdev_id: id of DP_PDEV handle 9566 * 9567 * Return: % error rate using retries per packet and success packets 9568 */ 9569 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id) 9570 { 9571 struct dp_pdev *pdev = 9572 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9573 pdev_id); 9574 9575 if (!pdev) 9576 return 0; 9577 9578 dp_aggregate_pdev_stats(pdev); 9579 if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0) 9580 return 0; 9581 return ((pdev->stats.tx.retries * 100) / 9582 ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries))); 9583 } 9584 9585 /* 9586 * dp_txrx_stats_publish(): publish pdev stats into a buffer 9587 * @soc: DP soc handle 9588 * @pdev_id: id of DP_PDEV handle 9589 * @buf: to hold pdev_stats 9590 * 9591 * Return: int 9592 */ 9593 static int 9594 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id, 9595 struct cdp_stats_extd *buf) 9596 { 9597 struct cdp_txrx_stats_req req = {0,}; 9598 struct dp_pdev *pdev = 9599 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9600 pdev_id); 9601 9602 if (!pdev) 9603 return TXRX_STATS_LEVEL_OFF; 9604 9605 dp_aggregate_pdev_stats(pdev); 9606 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX; 9607 req.cookie_val = DBG_STATS_COOKIE_DP_STATS; 9608 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, 9609 req.param1, req.param2, req.param3, 0, 9610 req.cookie_val, 0); 9611 9612 msleep(DP_MAX_SLEEP_TIME); 9613 9614 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX; 9615 req.cookie_val = DBG_STATS_COOKIE_DP_STATS; 9616 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, 9617 req.param1, req.param2, req.param3, 0, 9618 req.cookie_val, 0); 9619 9620 msleep(DP_MAX_SLEEP_TIME); 9621 qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats)); 9622 9623 return TXRX_STATS_LEVEL; 9624 } 9625 9626 /** 9627 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev 9628 * @soc: soc handle 9629 * @pdev_id: id of DP_PDEV handle 9630 * @map_id: ID of map that needs to be updated 9631 * @tos: index value in map 9632 * @tid: tid value passed by the user 9633 * 9634 * Return: QDF_STATUS 9635 */ 9636 static QDF_STATUS 9637 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle, 9638 uint8_t pdev_id, 9639 uint8_t map_id, 9640 uint8_t tos, uint8_t tid) 9641 { 9642 uint8_t dscp; 9643 struct dp_soc *soc = (struct dp_soc *)soc_handle; 9644 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 9645 9646 if (!pdev) 9647 return QDF_STATUS_E_FAILURE; 9648 9649 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 9650 pdev->dscp_tid_map[map_id][dscp] = tid; 9651 9652 if (map_id < soc->num_hw_dscp_tid_map) 9653 hal_tx_update_dscp_tid(soc->hal_soc, tid, 9654 map_id, dscp); 9655 else 9656 return QDF_STATUS_E_FAILURE; 9657 9658 return QDF_STATUS_SUCCESS; 9659 } 9660 9661 /** 9662 * dp_fw_stats_process(): Process TxRX FW stats request 9663 * @vdev_handle: DP VDEV handle 9664 * @req: stats request 9665 * 9666 * return: int 9667 */ 9668 static int dp_fw_stats_process(struct dp_vdev *vdev, 9669 struct cdp_txrx_stats_req *req) 9670 { 9671 struct dp_pdev *pdev = NULL; 9672 uint32_t stats = req->stats; 9673 uint8_t mac_id = req->mac_id; 9674 9675 if (!vdev) { 9676 DP_TRACE(NONE, "VDEV not found"); 9677 return 1; 9678 } 9679 pdev = vdev->pdev; 9680 9681 /* 9682 * For HTT_DBG_EXT_STATS_RESET command, FW need to config 9683 * from param0 to param3 according to below rule: 9684 * 9685 * PARAM: 9686 * - config_param0 : start_offset (stats type) 9687 * - config_param1 : stats bmask from start offset 9688 * - config_param2 : stats bmask from start offset + 32 9689 * - config_param3 : stats bmask from start offset + 64 9690 */ 9691 if (req->stats == CDP_TXRX_STATS_0) { 9692 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX; 9693 req->param1 = 0xFFFFFFFF; 9694 req->param2 = 0xFFFFFFFF; 9695 req->param3 = 0xFFFFFFFF; 9696 } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) { 9697 req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id); 9698 } 9699 9700 if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) { 9701 return dp_h2t_ext_stats_msg_send(pdev, 9702 HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, 9703 req->param0, req->param1, req->param2, 9704 req->param3, 0, DBG_STATS_COOKIE_DEFAULT, 9705 mac_id); 9706 } else { 9707 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0, 9708 req->param1, req->param2, req->param3, 9709 0, DBG_STATS_COOKIE_DEFAULT, mac_id); 9710 } 9711 } 9712 9713 /** 9714 * dp_txrx_stats_request - function to map to firmware and host stats 9715 * @soc: soc handle 9716 * @vdev_id: virtual device ID 9717 * @req: stats request 9718 * 9719 * Return: QDF_STATUS 9720 */ 9721 static 9722 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle, 9723 uint8_t vdev_id, 9724 struct cdp_txrx_stats_req *req) 9725 { 9726 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle); 9727 int host_stats; 9728 int fw_stats; 9729 enum cdp_stats stats; 9730 int num_stats; 9731 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9732 DP_MOD_ID_CDP); 9733 QDF_STATUS status = QDF_STATUS_E_INVAL; 9734 9735 if (!vdev || !req) { 9736 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9737 "Invalid vdev/req instance"); 9738 status = QDF_STATUS_E_INVAL; 9739 goto fail0; 9740 } 9741 9742 if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) { 9743 dp_err("Invalid mac id request"); 9744 status = QDF_STATUS_E_INVAL; 9745 goto fail0; 9746 } 9747 9748 stats = req->stats; 9749 if (stats >= CDP_TXRX_MAX_STATS) { 9750 status = QDF_STATUS_E_INVAL; 9751 goto fail0; 9752 } 9753 9754 /* 9755 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available 9756 * has to be updated if new FW HTT stats added 9757 */ 9758 if (stats > CDP_TXRX_STATS_HTT_MAX) 9759 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; 9760 9761 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table); 9762 9763 if (stats >= num_stats) { 9764 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9765 "%s: Invalid stats option: %d", __func__, stats); 9766 status = QDF_STATUS_E_INVAL; 9767 goto fail0; 9768 } 9769 9770 req->stats = stats; 9771 fw_stats = dp_stats_mapping_table[stats][STATS_FW]; 9772 host_stats = dp_stats_mapping_table[stats][STATS_HOST]; 9773 9774 dp_info("stats: %u fw_stats_type: %d host_stats: %d", 9775 stats, fw_stats, host_stats); 9776 9777 if (fw_stats != TXRX_FW_STATS_INVALID) { 9778 /* update request with FW stats type */ 9779 req->stats = fw_stats; 9780 status = dp_fw_stats_process(vdev, req); 9781 } else if ((host_stats != TXRX_HOST_STATS_INVALID) && 9782 (host_stats <= TXRX_HOST_STATS_MAX)) 9783 status = dp_print_host_stats(vdev, req, soc); 9784 else 9785 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 9786 "Wrong Input for TxRx Stats"); 9787 fail0: 9788 if (vdev) 9789 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9790 return status; 9791 } 9792 9793 /* 9794 * dp_txrx_dump_stats() - Dump statistics 9795 * @value - Statistics option 9796 */ 9797 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value, 9798 enum qdf_stats_verbosity_level level) 9799 { 9800 struct dp_soc *soc = 9801 (struct dp_soc *)psoc; 9802 QDF_STATUS status = QDF_STATUS_SUCCESS; 9803 9804 if (!soc) { 9805 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9806 "%s: soc is NULL", __func__); 9807 return QDF_STATUS_E_INVAL; 9808 } 9809 9810 switch (value) { 9811 case CDP_TXRX_PATH_STATS: 9812 dp_txrx_path_stats(soc); 9813 dp_print_soc_interrupt_stats(soc); 9814 hal_dump_reg_write_stats(soc->hal_soc); 9815 break; 9816 9817 case CDP_RX_RING_STATS: 9818 dp_print_per_ring_stats(soc); 9819 break; 9820 9821 case CDP_TXRX_TSO_STATS: 9822 dp_print_tso_stats(soc, level); 9823 break; 9824 9825 case CDP_DUMP_TX_FLOW_POOL_INFO: 9826 if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH) 9827 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc); 9828 break; 9829 9830 case CDP_DP_NAPI_STATS: 9831 dp_print_napi_stats(soc); 9832 break; 9833 9834 case CDP_TXRX_DESC_STATS: 9835 /* TODO: NOT IMPLEMENTED */ 9836 break; 9837 9838 case CDP_DP_RX_FISA_STATS: 9839 dp_rx_dump_fisa_stats(soc); 9840 break; 9841 9842 case CDP_DP_SWLM_STATS: 9843 dp_print_swlm_stats(soc); 9844 break; 9845 9846 default: 9847 status = QDF_STATUS_E_INVAL; 9848 break; 9849 } 9850 9851 return status; 9852 9853 } 9854 9855 /** 9856 * dp_txrx_clear_dump_stats() - clear dumpStats 9857 * @soc- soc handle 9858 * @value - stats option 9859 * 9860 * Return: 0 - Success, non-zero - failure 9861 */ 9862 static 9863 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 9864 uint8_t value) 9865 { 9866 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 9867 QDF_STATUS status = QDF_STATUS_SUCCESS; 9868 9869 if (!soc) { 9870 dp_err("soc is NULL"); 9871 return QDF_STATUS_E_INVAL; 9872 } 9873 9874 switch (value) { 9875 case CDP_TXRX_TSO_STATS: 9876 dp_txrx_clear_tso_stats(soc); 9877 break; 9878 9879 default: 9880 status = QDF_STATUS_E_INVAL; 9881 break; 9882 } 9883 9884 return status; 9885 } 9886 9887 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 9888 /** 9889 * dp_update_flow_control_parameters() - API to store datapath 9890 * config parameters 9891 * @soc: soc handle 9892 * @cfg: ini parameter handle 9893 * 9894 * Return: void 9895 */ 9896 static inline 9897 void dp_update_flow_control_parameters(struct dp_soc *soc, 9898 struct cdp_config_params *params) 9899 { 9900 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold = 9901 params->tx_flow_stop_queue_threshold; 9902 soc->wlan_cfg_ctx->tx_flow_start_queue_offset = 9903 params->tx_flow_start_queue_offset; 9904 } 9905 #else 9906 static inline 9907 void dp_update_flow_control_parameters(struct dp_soc *soc, 9908 struct cdp_config_params *params) 9909 { 9910 } 9911 #endif 9912 9913 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 9914 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */ 9915 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024 9916 9917 /* Max packet limit for RX REAP Loop (dp_rx_process) */ 9918 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024 9919 9920 static 9921 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, 9922 struct cdp_config_params *params) 9923 { 9924 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = 9925 params->tx_comp_loop_pkt_limit; 9926 9927 if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX) 9928 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true; 9929 else 9930 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false; 9931 9932 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = 9933 params->rx_reap_loop_pkt_limit; 9934 9935 if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX) 9936 soc->wlan_cfg_ctx->rx_enable_eol_data_check = true; 9937 else 9938 soc->wlan_cfg_ctx->rx_enable_eol_data_check = false; 9939 9940 soc->wlan_cfg_ctx->rx_hp_oos_update_limit = 9941 params->rx_hp_oos_update_limit; 9942 9943 dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u", 9944 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit, 9945 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check, 9946 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit, 9947 soc->wlan_cfg_ctx->rx_enable_eol_data_check, 9948 soc->wlan_cfg_ctx->rx_hp_oos_update_limit); 9949 } 9950 #else 9951 static inline 9952 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, 9953 struct cdp_config_params *params) 9954 { } 9955 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 9956 9957 /** 9958 * dp_update_config_parameters() - API to store datapath 9959 * config parameters 9960 * @soc: soc handle 9961 * @cfg: ini parameter handle 9962 * 9963 * Return: status 9964 */ 9965 static 9966 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc, 9967 struct cdp_config_params *params) 9968 { 9969 struct dp_soc *soc = (struct dp_soc *)psoc; 9970 9971 if (!(soc)) { 9972 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 9973 "%s: Invalid handle", __func__); 9974 return QDF_STATUS_E_INVAL; 9975 } 9976 9977 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable; 9978 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable; 9979 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable; 9980 soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload = 9981 params->p2p_tcp_udp_checksumoffload; 9982 soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload = 9983 params->nan_tcp_udp_checksumoffload; 9984 soc->wlan_cfg_ctx->tcp_udp_checksumoffload = 9985 params->tcp_udp_checksumoffload; 9986 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable; 9987 soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable; 9988 soc->wlan_cfg_ctx->gro_enabled = params->gro_enable; 9989 9990 dp_update_rx_soft_irq_limit_params(soc, params); 9991 dp_update_flow_control_parameters(soc, params); 9992 9993 return QDF_STATUS_SUCCESS; 9994 } 9995 9996 static struct cdp_wds_ops dp_ops_wds = { 9997 .vdev_set_wds = dp_vdev_set_wds, 9998 #ifdef WDS_VENDOR_EXTENSION 9999 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy, 10000 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update, 10001 #endif 10002 }; 10003 10004 /* 10005 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx 10006 * @soc_hdl - datapath soc handle 10007 * @vdev_id - virtual interface id 10008 * @callback - callback function 10009 * @ctxt: callback context 10010 * 10011 */ 10012 static void 10013 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 10014 ol_txrx_data_tx_cb callback, void *ctxt) 10015 { 10016 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10017 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10018 DP_MOD_ID_CDP); 10019 10020 if (!vdev) 10021 return; 10022 10023 vdev->tx_non_std_data_callback.func = callback; 10024 vdev->tx_non_std_data_callback.ctxt = ctxt; 10025 10026 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10027 } 10028 10029 /** 10030 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev 10031 * @soc: datapath soc handle 10032 * @pdev_id: id of datapath pdev handle 10033 * 10034 * Return: opaque pointer to dp txrx handle 10035 */ 10036 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id) 10037 { 10038 struct dp_pdev *pdev = 10039 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10040 pdev_id); 10041 if (qdf_unlikely(!pdev)) 10042 return NULL; 10043 10044 return pdev->dp_txrx_handle; 10045 } 10046 10047 /** 10048 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev 10049 * @soc: datapath soc handle 10050 * @pdev_id: id of datapath pdev handle 10051 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle 10052 * 10053 * Return: void 10054 */ 10055 static void 10056 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id, 10057 void *dp_txrx_hdl) 10058 { 10059 struct dp_pdev *pdev = 10060 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10061 pdev_id); 10062 10063 if (!pdev) 10064 return; 10065 10066 pdev->dp_txrx_handle = dp_txrx_hdl; 10067 } 10068 10069 /** 10070 * dp_vdev_get_dp_ext_handle() - get dp handle from vdev 10071 * @soc: datapath soc handle 10072 * @vdev_id: vdev id 10073 * 10074 * Return: opaque pointer to dp txrx handle 10075 */ 10076 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl, 10077 uint8_t vdev_id) 10078 { 10079 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10080 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10081 DP_MOD_ID_CDP); 10082 void *dp_ext_handle; 10083 10084 if (!vdev) 10085 return NULL; 10086 dp_ext_handle = vdev->vdev_dp_ext_handle; 10087 10088 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10089 return dp_ext_handle; 10090 } 10091 10092 /** 10093 * dp_vdev_set_dp_ext_handle() - set dp handle in vdev 10094 * @soc: datapath soc handle 10095 * @vdev_id: vdev id 10096 * @size: size of advance dp handle 10097 * 10098 * Return: QDF_STATUS 10099 */ 10100 static QDF_STATUS 10101 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id, 10102 uint16_t size) 10103 { 10104 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10105 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10106 DP_MOD_ID_CDP); 10107 void *dp_ext_handle; 10108 10109 if (!vdev) 10110 return QDF_STATUS_E_FAILURE; 10111 10112 dp_ext_handle = qdf_mem_malloc(size); 10113 10114 if (!dp_ext_handle) { 10115 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10116 return QDF_STATUS_E_FAILURE; 10117 } 10118 10119 vdev->vdev_dp_ext_handle = dp_ext_handle; 10120 10121 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10122 return QDF_STATUS_SUCCESS; 10123 } 10124 10125 /** 10126 * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical 10127 * connection for this vdev 10128 * @soc_hdl: CDP soc handle 10129 * @vdev_id: vdev ID 10130 * @action: Add/Delete action 10131 * 10132 * Returns: QDF_STATUS. 10133 */ 10134 static QDF_STATUS 10135 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 10136 enum vdev_ll_conn_actions action) 10137 { 10138 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10139 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10140 DP_MOD_ID_CDP); 10141 10142 if (!vdev) { 10143 dp_err("LL connection action for invalid vdev %d", vdev_id); 10144 return QDF_STATUS_E_FAILURE; 10145 } 10146 10147 switch (action) { 10148 case CDP_VDEV_LL_CONN_ADD: 10149 vdev->num_latency_critical_conn++; 10150 break; 10151 10152 case CDP_VDEV_LL_CONN_DEL: 10153 vdev->num_latency_critical_conn--; 10154 break; 10155 10156 default: 10157 dp_err("LL connection action invalid %d", action); 10158 break; 10159 } 10160 10161 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10162 return QDF_STATUS_SUCCESS; 10163 } 10164 10165 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 10166 /** 10167 * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized. 10168 * @soc_hdl: CDP Soc handle 10169 * @value: Enable/Disable value 10170 * 10171 * Returns: QDF_STATUS 10172 */ 10173 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl, 10174 uint8_t value) 10175 { 10176 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10177 10178 if (!soc->swlm.is_init) { 10179 dp_err("SWLM is not initialized"); 10180 return QDF_STATUS_E_FAILURE; 10181 } 10182 10183 soc->swlm.is_enabled = !!value; 10184 10185 return QDF_STATUS_SUCCESS; 10186 } 10187 10188 /** 10189 * dp_soc_is_swlm_enabled() - Check if SWLM is enabled. 10190 * @soc_hdl: CDP Soc handle 10191 * 10192 * Returns: QDF_STATUS 10193 */ 10194 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl) 10195 { 10196 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10197 10198 return soc->swlm.is_enabled; 10199 } 10200 #endif 10201 10202 /** 10203 * dp_display_srng_info() - Dump the srng HP TP info 10204 * @soc_hdl: CDP Soc handle 10205 * 10206 * This function dumps the SW hp/tp values for the important rings. 10207 * HW hp/tp values are not being dumped, since it can lead to 10208 * READ NOC error when UMAC is in low power state. MCC does not have 10209 * device force wake working yet. 10210 * 10211 * Return: none 10212 */ 10213 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl) 10214 { 10215 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10216 hal_soc_handle_t hal_soc = soc->hal_soc; 10217 uint32_t hp, tp, i; 10218 10219 dp_info("SRNG HP-TP data:"); 10220 for (i = 0; i < soc->num_tcl_data_rings; i++) { 10221 hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng, 10222 &hp, &tp); 10223 dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp); 10224 10225 hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng, 10226 &hp, &tp); 10227 dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp); 10228 } 10229 10230 for (i = 0; i < soc->num_reo_dest_rings; i++) { 10231 hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng, 10232 &hp, &tp); 10233 dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp); 10234 } 10235 10236 hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &hp, &tp); 10237 dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp); 10238 10239 hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &hp, &tp); 10240 dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp); 10241 10242 hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &hp, &tp); 10243 dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp); 10244 } 10245 10246 /** 10247 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc 10248 * @soc_handle: datapath soc handle 10249 * 10250 * Return: opaque pointer to external dp (non-core DP) 10251 */ 10252 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle) 10253 { 10254 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10255 10256 return soc->external_txrx_handle; 10257 } 10258 10259 /** 10260 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc 10261 * @soc_handle: datapath soc handle 10262 * @txrx_handle: opaque pointer to external dp (non-core DP) 10263 * 10264 * Return: void 10265 */ 10266 static void 10267 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle) 10268 { 10269 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10270 10271 soc->external_txrx_handle = txrx_handle; 10272 } 10273 10274 /** 10275 * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping 10276 * @soc_hdl: datapath soc handle 10277 * @pdev_id: id of the datapath pdev handle 10278 * @lmac_id: lmac id 10279 * 10280 * Return: QDF_STATUS 10281 */ 10282 static QDF_STATUS 10283 dp_soc_map_pdev_to_lmac 10284 (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10285 uint32_t lmac_id) 10286 { 10287 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10288 10289 wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx, 10290 pdev_id, 10291 lmac_id); 10292 10293 /*Set host PDEV ID for lmac_id*/ 10294 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 10295 pdev_id, 10296 lmac_id); 10297 10298 return QDF_STATUS_SUCCESS; 10299 } 10300 10301 /** 10302 * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping 10303 * @soc_hdl: datapath soc handle 10304 * @pdev_id: id of the datapath pdev handle 10305 * @lmac_id: lmac id 10306 * 10307 * In the event of a dynamic mode change, update the pdev to lmac mapping 10308 * 10309 * Return: QDF_STATUS 10310 */ 10311 static QDF_STATUS 10312 dp_soc_handle_pdev_mode_change 10313 (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10314 uint32_t lmac_id) 10315 { 10316 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10317 struct dp_vdev *vdev = NULL; 10318 uint8_t hw_pdev_id, mac_id; 10319 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, 10320 pdev_id); 10321 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 10322 10323 if (qdf_unlikely(!pdev)) 10324 return QDF_STATUS_E_FAILURE; 10325 10326 pdev->lmac_id = lmac_id; 10327 pdev->target_pdev_id = 10328 dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); 10329 dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id); 10330 10331 /*Set host PDEV ID for lmac_id*/ 10332 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 10333 pdev->pdev_id, 10334 lmac_id); 10335 10336 hw_pdev_id = 10337 dp_get_target_pdev_id_for_host_pdev_id(soc, 10338 pdev->pdev_id); 10339 10340 /* 10341 * When NSS offload is enabled, send pdev_id->lmac_id 10342 * and pdev_id to hw_pdev_id to NSS FW 10343 */ 10344 if (nss_config) { 10345 mac_id = pdev->lmac_id; 10346 if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id) 10347 soc->cdp_soc.ol_ops-> 10348 pdev_update_lmac_n_target_pdev_id( 10349 soc->ctrl_psoc, 10350 &pdev_id, &mac_id, &hw_pdev_id); 10351 } 10352 10353 qdf_spin_lock_bh(&pdev->vdev_list_lock); 10354 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 10355 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 10356 hw_pdev_id); 10357 vdev->lmac_id = pdev->lmac_id; 10358 } 10359 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 10360 10361 return QDF_STATUS_SUCCESS; 10362 } 10363 10364 /** 10365 * dp_soc_set_pdev_status_down() - set pdev down/up status 10366 * @soc: datapath soc handle 10367 * @pdev_id: id of datapath pdev handle 10368 * @is_pdev_down: pdev down/up status 10369 * 10370 * Return: QDF_STATUS 10371 */ 10372 static QDF_STATUS 10373 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id, 10374 bool is_pdev_down) 10375 { 10376 struct dp_pdev *pdev = 10377 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10378 pdev_id); 10379 if (!pdev) 10380 return QDF_STATUS_E_FAILURE; 10381 10382 pdev->is_pdev_down = is_pdev_down; 10383 return QDF_STATUS_SUCCESS; 10384 } 10385 10386 /** 10387 * dp_get_cfg_capabilities() - get dp capabilities 10388 * @soc_handle: datapath soc handle 10389 * @dp_caps: enum for dp capabilities 10390 * 10391 * Return: bool to determine if dp caps is enabled 10392 */ 10393 static bool 10394 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle, 10395 enum cdp_capabilities dp_caps) 10396 { 10397 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10398 10399 return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps); 10400 } 10401 10402 #ifdef FEATURE_AST 10403 static QDF_STATUS 10404 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 10405 uint8_t *peer_mac) 10406 { 10407 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10408 QDF_STATUS status = QDF_STATUS_SUCCESS; 10409 struct dp_peer *peer = 10410 dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 10411 DP_MOD_ID_CDP); 10412 10413 /* Peer can be null for monitor vap mac address */ 10414 if (!peer) { 10415 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 10416 "%s: Invalid peer\n", __func__); 10417 return QDF_STATUS_E_FAILURE; 10418 } 10419 10420 dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE); 10421 10422 qdf_spin_lock_bh(&soc->ast_lock); 10423 dp_peer_delete_ast_entries(soc, peer); 10424 qdf_spin_unlock_bh(&soc->ast_lock); 10425 10426 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 10427 return status; 10428 } 10429 #endif 10430 10431 #ifdef ATH_SUPPORT_NAC_RSSI 10432 /** 10433 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC 10434 * @soc_hdl: DP soc handle 10435 * @vdev_id: id of DP vdev handle 10436 * @mac_addr: neighbour mac 10437 * @rssi: rssi value 10438 * 10439 * Return: 0 for success. nonzero for failure. 10440 */ 10441 static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl, 10442 uint8_t vdev_id, 10443 char *mac_addr, 10444 uint8_t *rssi) 10445 { 10446 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10447 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10448 DP_MOD_ID_CDP); 10449 struct dp_pdev *pdev; 10450 struct dp_neighbour_peer *peer = NULL; 10451 QDF_STATUS status = QDF_STATUS_E_FAILURE; 10452 10453 if (!vdev) 10454 return status; 10455 10456 pdev = vdev->pdev; 10457 *rssi = 0; 10458 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 10459 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 10460 neighbour_peer_list_elem) { 10461 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 10462 mac_addr, QDF_MAC_ADDR_SIZE) == 0) { 10463 *rssi = peer->rssi; 10464 status = QDF_STATUS_SUCCESS; 10465 break; 10466 } 10467 } 10468 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 10469 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10470 return status; 10471 } 10472 10473 static QDF_STATUS 10474 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc, 10475 uint8_t vdev_id, 10476 enum cdp_nac_param_cmd cmd, char *bssid, 10477 char *client_macaddr, 10478 uint8_t chan_num) 10479 { 10480 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 10481 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10482 DP_MOD_ID_CDP); 10483 struct dp_pdev *pdev; 10484 10485 if (!vdev) 10486 return QDF_STATUS_E_FAILURE; 10487 10488 pdev = (struct dp_pdev *)vdev->pdev; 10489 pdev->nac_rssi_filtering = 1; 10490 /* Store address of NAC (neighbour peer) which will be checked 10491 * against TA of received packets. 10492 */ 10493 10494 if (cmd == CDP_NAC_PARAM_ADD) { 10495 dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, 10496 DP_NAC_PARAM_ADD, 10497 (uint8_t *)client_macaddr); 10498 } else if (cmd == CDP_NAC_PARAM_DEL) { 10499 dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, 10500 DP_NAC_PARAM_DEL, 10501 (uint8_t *)client_macaddr); 10502 } 10503 10504 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) 10505 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi 10506 (soc->ctrl_psoc, pdev->pdev_id, 10507 vdev->vdev_id, cmd, bssid, client_macaddr); 10508 10509 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10510 return QDF_STATUS_SUCCESS; 10511 } 10512 #endif 10513 10514 /** 10515 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering 10516 * for pktlog 10517 * @soc: cdp_soc handle 10518 * @pdev_id: id of dp pdev handle 10519 * @mac_addr: Peer mac address 10520 * @enb_dsb: Enable or disable peer based filtering 10521 * 10522 * Return: QDF_STATUS 10523 */ 10524 static int 10525 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, 10526 uint8_t *mac_addr, uint8_t enb_dsb) 10527 { 10528 struct dp_peer *peer; 10529 struct dp_pdev *pdev = 10530 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10531 pdev_id); 10532 10533 if (!pdev) 10534 return QDF_STATUS_E_FAILURE; 10535 10536 peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr, 10537 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 10538 10539 if (!peer) { 10540 dp_err("Invalid Peer"); 10541 return QDF_STATUS_E_FAILURE; 10542 } 10543 10544 peer->peer_based_pktlog_filter = enb_dsb; 10545 pdev->dp_peer_based_pktlog = enb_dsb; 10546 10547 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 10548 10549 return QDF_STATUS_SUCCESS; 10550 } 10551 10552 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS 10553 /** 10554 * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for 10555 * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol) 10556 * @soc: cdp_soc handle 10557 * @pdev_id: id of cdp_pdev handle 10558 * @protocol_type: protocol type for which stats should be displayed 10559 * 10560 * Return: none 10561 */ 10562 static inline void 10563 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 10564 uint16_t protocol_type) 10565 { 10566 } 10567 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 10568 10569 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 10570 /** 10571 * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be 10572 * applied to the desired protocol type packets 10573 * @soc: soc handle 10574 * @pdev_id: id of cdp_pdev handle 10575 * @enable_rx_protocol_tag - bitmask that indicates what protocol types 10576 * are enabled for tagging. zero indicates disable feature, non-zero indicates 10577 * enable feature 10578 * @protocol_type: new protocol type for which the tag is being added 10579 * @tag: user configured tag for the new protocol 10580 * 10581 * Return: Success 10582 */ 10583 static inline QDF_STATUS 10584 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t *soc, uint8_t pdev_id, 10585 uint32_t enable_rx_protocol_tag, 10586 uint16_t protocol_type, 10587 uint16_t tag) 10588 { 10589 return QDF_STATUS_SUCCESS; 10590 } 10591 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 10592 10593 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 10594 /** 10595 * dp_set_rx_flow_tag - add/delete a flow 10596 * @soc: soc handle 10597 * @pdev_id: id of cdp_pdev handle 10598 * @flow_info: flow tuple that is to be added to/deleted from flow search table 10599 * 10600 * Return: Success 10601 */ 10602 static inline QDF_STATUS 10603 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 10604 struct cdp_rx_flow_info *flow_info) 10605 { 10606 return QDF_STATUS_SUCCESS; 10607 } 10608 /** 10609 * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for 10610 * given flow 5-tuple 10611 * @cdp_soc: soc handle 10612 * @pdev_id: id of cdp_pdev handle 10613 * @flow_info: flow 5-tuple for which stats should be displayed 10614 * 10615 * Return: Success 10616 */ 10617 static inline QDF_STATUS 10618 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 10619 struct cdp_rx_flow_info *flow_info) 10620 { 10621 return QDF_STATUS_SUCCESS; 10622 } 10623 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 10624 10625 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl, 10626 uint32_t max_peers, 10627 uint32_t max_ast_index, 10628 bool peer_map_unmap_v2) 10629 { 10630 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10631 10632 soc->max_peers = max_peers; 10633 10634 qdf_print ("%s max_peers %u, max_ast_index: %u\n", 10635 __func__, max_peers, max_ast_index); 10636 wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index); 10637 10638 if (dp_peer_find_attach(soc)) 10639 return QDF_STATUS_E_FAILURE; 10640 10641 soc->is_peer_map_unmap_v2 = peer_map_unmap_v2; 10642 soc->peer_map_attach_success = TRUE; 10643 10644 return QDF_STATUS_SUCCESS; 10645 } 10646 10647 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t *soc_hdl, 10648 enum cdp_soc_param_t param, 10649 uint32_t value) 10650 { 10651 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10652 10653 switch (param) { 10654 case DP_SOC_PARAM_MSDU_EXCEPTION_DESC: 10655 soc->num_msdu_exception_desc = value; 10656 dp_info("num_msdu exception_desc %u", 10657 value); 10658 break; 10659 case DP_SOC_PARAM_CMEM_FSE_SUPPORT: 10660 if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx)) 10661 soc->fst_in_cmem = !!value; 10662 dp_info("FW supports CMEM FSE %u", value); 10663 break; 10664 default: 10665 dp_info("not handled param %d ", param); 10666 break; 10667 } 10668 10669 return QDF_STATUS_SUCCESS; 10670 } 10671 10672 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle, 10673 void *stats_ctx) 10674 { 10675 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10676 10677 soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx; 10678 } 10679 10680 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 10681 /** 10682 * dp_peer_flush_rate_stats_req(): Flush peer rate stats 10683 * @soc: Datapath SOC handle 10684 * @peer: Datapath peer 10685 * @arg: argument to iter function 10686 * 10687 * Return: QDF_STATUS 10688 */ 10689 static void 10690 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer, 10691 void *arg) 10692 { 10693 if (peer->bss_peer) 10694 return; 10695 10696 dp_wdi_event_handler( 10697 WDI_EVENT_FLUSH_RATE_STATS_REQ, 10698 soc, peer->rdkstats_ctx, 10699 peer->peer_id, 10700 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 10701 } 10702 10703 /** 10704 * dp_flush_rate_stats_req(): Flush peer rate stats in pdev 10705 * @soc_hdl: Datapath SOC handle 10706 * @pdev_id: pdev_id 10707 * 10708 * Return: QDF_STATUS 10709 */ 10710 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, 10711 uint8_t pdev_id) 10712 { 10713 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10714 struct dp_pdev *pdev = 10715 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10716 pdev_id); 10717 if (!pdev) 10718 return QDF_STATUS_E_FAILURE; 10719 10720 dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL, 10721 DP_MOD_ID_CDP); 10722 10723 return QDF_STATUS_SUCCESS; 10724 } 10725 #else 10726 static inline QDF_STATUS 10727 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, 10728 uint8_t pdev_id) 10729 { 10730 return QDF_STATUS_SUCCESS; 10731 } 10732 #endif 10733 10734 static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl, 10735 uint8_t vdev_id, 10736 uint8_t *mac_addr) 10737 { 10738 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10739 struct dp_peer *peer; 10740 void *rdkstats_ctx = NULL; 10741 10742 if (mac_addr) { 10743 peer = dp_peer_find_hash_find(soc, mac_addr, 10744 0, vdev_id, 10745 DP_MOD_ID_CDP); 10746 if (!peer) 10747 return NULL; 10748 10749 rdkstats_ctx = peer->rdkstats_ctx; 10750 10751 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 10752 } 10753 10754 return rdkstats_ctx; 10755 } 10756 10757 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 10758 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc, 10759 uint8_t pdev_id, 10760 void *buf) 10761 { 10762 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS, 10763 (struct dp_soc *)soc, buf, HTT_INVALID_PEER, 10764 WDI_NO_VAL, pdev_id); 10765 return QDF_STATUS_SUCCESS; 10766 } 10767 #else 10768 static inline QDF_STATUS 10769 dp_peer_flush_rate_stats(struct cdp_soc_t *soc, 10770 uint8_t pdev_id, 10771 void *buf) 10772 { 10773 return QDF_STATUS_SUCCESS; 10774 } 10775 #endif 10776 10777 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle) 10778 { 10779 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10780 10781 return soc->rate_stats_ctx; 10782 } 10783 10784 /* 10785 * dp_get_cfg() - get dp cfg 10786 * @soc: cdp soc handle 10787 * @cfg: cfg enum 10788 * 10789 * Return: cfg value 10790 */ 10791 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg) 10792 { 10793 struct dp_soc *dpsoc = (struct dp_soc *)soc; 10794 uint32_t value = 0; 10795 10796 switch (cfg) { 10797 case cfg_dp_enable_data_stall: 10798 value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection; 10799 break; 10800 case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload: 10801 value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload; 10802 break; 10803 case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload: 10804 value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload; 10805 break; 10806 case cfg_dp_enable_ip_tcp_udp_checksum_offload: 10807 value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload; 10808 break; 10809 case cfg_dp_disable_legacy_mode_csum_offload: 10810 value = dpsoc->wlan_cfg_ctx-> 10811 legacy_mode_checksumoffload_disable; 10812 break; 10813 case cfg_dp_tso_enable: 10814 value = dpsoc->wlan_cfg_ctx->tso_enabled; 10815 break; 10816 case cfg_dp_lro_enable: 10817 value = dpsoc->wlan_cfg_ctx->lro_enabled; 10818 break; 10819 case cfg_dp_gro_enable: 10820 value = dpsoc->wlan_cfg_ctx->gro_enabled; 10821 break; 10822 case cfg_dp_sg_enable: 10823 value = dpsoc->wlan_cfg_ctx->sg_enabled; 10824 break; 10825 case cfg_dp_tx_flow_start_queue_offset: 10826 value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset; 10827 break; 10828 case cfg_dp_tx_flow_stop_queue_threshold: 10829 value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold; 10830 break; 10831 case cfg_dp_disable_intra_bss_fwd: 10832 value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd; 10833 break; 10834 case cfg_dp_pktlog_buffer_size: 10835 value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size; 10836 break; 10837 case cfg_dp_wow_check_rx_pending: 10838 value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable; 10839 break; 10840 default: 10841 value = 0; 10842 } 10843 10844 return value; 10845 } 10846 10847 #ifdef PEER_FLOW_CONTROL 10848 /** 10849 * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params 10850 * @soc_handle: datapath soc handle 10851 * @pdev_id: id of datapath pdev handle 10852 * @param: ol ath params 10853 * @value: value of the flag 10854 * @buff: Buffer to be passed 10855 * 10856 * Implemented this function same as legacy function. In legacy code, single 10857 * function is used to display stats and update pdev params. 10858 * 10859 * Return: 0 for success. nonzero for failure. 10860 */ 10861 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle, 10862 uint8_t pdev_id, 10863 enum _dp_param_t param, 10864 uint32_t value, void *buff) 10865 { 10866 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10867 struct dp_pdev *pdev = 10868 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10869 pdev_id); 10870 10871 if (qdf_unlikely(!pdev)) 10872 return 1; 10873 10874 soc = pdev->soc; 10875 if (!soc) 10876 return 1; 10877 10878 switch (param) { 10879 #ifdef QCA_ENH_V3_STATS_SUPPORT 10880 case DP_PARAM_VIDEO_DELAY_STATS_FC: 10881 if (value) 10882 pdev->delay_stats_flag = true; 10883 else 10884 pdev->delay_stats_flag = false; 10885 break; 10886 case DP_PARAM_VIDEO_STATS_FC: 10887 qdf_print("------- TID Stats ------\n"); 10888 dp_pdev_print_tid_stats(pdev); 10889 qdf_print("------ Delay Stats ------\n"); 10890 dp_pdev_print_delay_stats(pdev); 10891 break; 10892 #endif 10893 case DP_PARAM_TOTAL_Q_SIZE: 10894 { 10895 uint32_t tx_min, tx_max; 10896 10897 tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx); 10898 tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 10899 10900 if (!buff) { 10901 if ((value >= tx_min) && (value <= tx_max)) { 10902 pdev->num_tx_allowed = value; 10903 } else { 10904 QDF_TRACE(QDF_MODULE_ID_DP, 10905 QDF_TRACE_LEVEL_INFO, 10906 "Failed to update num_tx_allowed, Q_min = %d Q_max = %d", 10907 tx_min, tx_max); 10908 break; 10909 } 10910 } else { 10911 *(int *)buff = pdev->num_tx_allowed; 10912 } 10913 } 10914 break; 10915 default: 10916 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 10917 "%s: not handled param %d ", __func__, param); 10918 break; 10919 } 10920 10921 return 0; 10922 } 10923 #endif 10924 10925 /** 10926 * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev 10927 * @psoc: dp soc handle 10928 * @pdev_id: id of DP_PDEV handle 10929 * @pcp: pcp value 10930 * @tid: tid value passed by the user 10931 * 10932 * Return: QDF_STATUS_SUCCESS on success 10933 */ 10934 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc, 10935 uint8_t pdev_id, 10936 uint8_t pcp, uint8_t tid) 10937 { 10938 struct dp_soc *soc = (struct dp_soc *)psoc; 10939 10940 soc->pcp_tid_map[pcp] = tid; 10941 10942 hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid); 10943 return QDF_STATUS_SUCCESS; 10944 } 10945 10946 /** 10947 * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev 10948 * @soc: DP soc handle 10949 * @vdev_id: id of DP_VDEV handle 10950 * @pcp: pcp value 10951 * @tid: tid value passed by the user 10952 * 10953 * Return: QDF_STATUS_SUCCESS on success 10954 */ 10955 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl, 10956 uint8_t vdev_id, 10957 uint8_t pcp, uint8_t tid) 10958 { 10959 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10960 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10961 DP_MOD_ID_CDP); 10962 10963 if (!vdev) 10964 return QDF_STATUS_E_FAILURE; 10965 10966 vdev->pcp_tid_map[pcp] = tid; 10967 10968 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10969 return QDF_STATUS_SUCCESS; 10970 } 10971 10972 #ifdef QCA_SUPPORT_FULL_MON 10973 static inline QDF_STATUS 10974 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle, 10975 uint8_t val) 10976 { 10977 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10978 10979 soc->full_mon_mode = val; 10980 qdf_alert("Configure full monitor mode val: %d ", val); 10981 10982 return QDF_STATUS_SUCCESS; 10983 } 10984 #else 10985 static inline QDF_STATUS 10986 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle, 10987 uint8_t val) 10988 { 10989 return 0; 10990 } 10991 #endif 10992 10993 static struct cdp_cmn_ops dp_ops_cmn = { 10994 .txrx_soc_attach_target = dp_soc_attach_target_wifi3, 10995 .txrx_vdev_attach = dp_vdev_attach_wifi3, 10996 .txrx_vdev_detach = dp_vdev_detach_wifi3, 10997 .txrx_pdev_attach = dp_pdev_attach_wifi3, 10998 .txrx_pdev_post_attach = dp_pdev_post_attach_wifi3, 10999 .txrx_pdev_detach = dp_pdev_detach_wifi3, 11000 .txrx_pdev_deinit = dp_pdev_deinit_wifi3, 11001 .txrx_peer_create = dp_peer_create_wifi3, 11002 .txrx_peer_setup = dp_peer_setup_wifi3, 11003 #ifdef FEATURE_AST 11004 .txrx_peer_teardown = dp_peer_teardown_wifi3, 11005 #else 11006 .txrx_peer_teardown = NULL, 11007 #endif 11008 .txrx_peer_add_ast = dp_peer_add_ast_wifi3, 11009 .txrx_peer_update_ast = dp_peer_update_ast_wifi3, 11010 .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3, 11011 .txrx_peer_get_ast_info_by_pdev = 11012 dp_peer_get_ast_info_by_pdevid_wifi3, 11013 .txrx_peer_ast_delete_by_soc = 11014 dp_peer_ast_entry_del_by_soc, 11015 .txrx_peer_ast_delete_by_pdev = 11016 dp_peer_ast_entry_del_by_pdev, 11017 .txrx_peer_delete = dp_peer_delete_wifi3, 11018 .txrx_vdev_register = dp_vdev_register_wifi3, 11019 .txrx_soc_detach = dp_soc_detach_wifi3, 11020 .txrx_soc_deinit = dp_soc_deinit_wifi3, 11021 .txrx_soc_init = dp_soc_init_wifi3, 11022 .txrx_tso_soc_attach = dp_tso_soc_attach, 11023 .txrx_tso_soc_detach = dp_tso_soc_detach, 11024 .txrx_pdev_init = dp_pdev_init_wifi3, 11025 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, 11026 .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3, 11027 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, 11028 .txrx_ath_getstats = dp_get_device_stats, 11029 .addba_requestprocess = dp_addba_requestprocess_wifi3, 11030 .addba_responsesetup = dp_addba_responsesetup_wifi3, 11031 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3, 11032 .delba_process = dp_delba_process_wifi3, 11033 .set_addba_response = dp_set_addba_response, 11034 .flush_cache_rx_queue = NULL, 11035 /* TODO: get API's for dscp-tid need to be added*/ 11036 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3, 11037 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3, 11038 .txrx_get_total_per = dp_get_total_per, 11039 .txrx_stats_request = dp_txrx_stats_request, 11040 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode, 11041 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id, 11042 .display_stats = dp_txrx_dump_stats, 11043 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper, 11044 .txrx_intr_detach = dp_soc_interrupt_detach, 11045 .set_pn_check = dp_set_pn_check_wifi3, 11046 .set_key_sec_type = dp_set_key_sec_type_wifi3, 11047 .update_config_parameters = dp_update_config_parameters, 11048 /* TODO: Add other functions */ 11049 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set, 11050 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle, 11051 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle, 11052 .get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle, 11053 .set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle, 11054 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle, 11055 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle, 11056 .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac, 11057 .handle_mode_change = dp_soc_handle_pdev_mode_change, 11058 .set_pdev_status_down = dp_soc_set_pdev_status_down, 11059 .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout, 11060 .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout, 11061 .tx_send = dp_tx_send, 11062 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3, 11063 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3, 11064 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3, 11065 .txrx_peer_map_attach = dp_peer_map_attach_wifi3, 11066 .set_soc_param = dp_soc_set_param, 11067 .txrx_get_os_rx_handles_from_vdev = 11068 dp_get_os_rx_handles_from_vdev_wifi3, 11069 .delba_tx_completion = dp_delba_tx_completion_wifi3, 11070 .get_dp_capabilities = dp_get_cfg_capabilities, 11071 .txrx_get_cfg = dp_get_cfg, 11072 .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx, 11073 .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx, 11074 .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats, 11075 .txrx_flush_rate_stats_request = dp_flush_rate_stats_req, 11076 .txrx_peer_get_rdkstats_ctx = dp_peer_get_rdkstats_ctx, 11077 11078 .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3, 11079 .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3, 11080 11081 .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler, 11082 #ifdef QCA_MULTIPASS_SUPPORT 11083 .set_vlan_groupkey = dp_set_vlan_groupkey, 11084 #endif 11085 .get_peer_mac_list = dp_get_peer_mac_list, 11086 .tx_send_exc = dp_tx_send_exception, 11087 #ifdef QCA_SUPPORT_WDS_EXTENDED 11088 .get_wds_ext_peer_id = dp_wds_ext_get_peer_id, 11089 .set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx, 11090 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 11091 }; 11092 11093 static struct cdp_ctrl_ops dp_ops_ctrl = { 11094 .txrx_peer_authorize = dp_peer_authorize, 11095 #ifdef VDEV_PEER_PROTOCOL_COUNT 11096 .txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count, 11097 .txrx_set_peer_protocol_drop_mask = 11098 dp_enable_vdev_peer_protocol_drop_mask, 11099 .txrx_is_peer_protocol_count_enabled = 11100 dp_is_vdev_peer_protocol_count_enabled, 11101 .txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask, 11102 #endif 11103 .txrx_set_vdev_param = dp_set_vdev_param, 11104 .txrx_set_psoc_param = dp_set_psoc_param, 11105 .txrx_get_psoc_param = dp_get_psoc_param, 11106 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest, 11107 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest, 11108 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 11109 .txrx_update_filter_neighbour_peers = 11110 dp_update_filter_neighbour_peers, 11111 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 11112 .txrx_get_sec_type = dp_get_sec_type, 11113 .txrx_wdi_event_sub = dp_wdi_event_sub, 11114 .txrx_wdi_event_unsub = dp_wdi_event_unsub, 11115 #ifdef WDI_EVENT_ENABLE 11116 .txrx_get_pldev = dp_get_pldev, 11117 #endif 11118 .txrx_set_pdev_param = dp_set_pdev_param, 11119 .txrx_get_pdev_param = dp_get_pdev_param, 11120 .txrx_set_peer_param = dp_set_peer_param, 11121 .txrx_get_peer_param = dp_get_peer_param, 11122 #ifdef VDEV_PEER_PROTOCOL_COUNT 11123 .txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt, 11124 #endif 11125 #ifdef ATH_SUPPORT_NAC_RSSI 11126 .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi, 11127 .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi, 11128 #endif 11129 #ifdef WLAN_SUPPORT_MSCS 11130 .txrx_record_mscs_params = dp_record_mscs_params, 11131 #endif 11132 .set_key = dp_set_michael_key, 11133 .txrx_get_vdev_param = dp_get_vdev_param, 11134 .enable_peer_based_pktlog = dp_enable_peer_based_pktlog, 11135 .calculate_delay_stats = dp_calculate_delay_stats, 11136 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 11137 .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag, 11138 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS 11139 .txrx_dump_pdev_rx_protocol_tag_stats = 11140 dp_dump_pdev_rx_protocol_tag_stats, 11141 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 11142 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 11143 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 11144 .txrx_set_rx_flow_tag = dp_set_rx_flow_tag, 11145 .txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats, 11146 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 11147 #ifdef QCA_MULTIPASS_SUPPORT 11148 .txrx_peer_set_vlan_id = dp_peer_set_vlan_id, 11149 #endif /*QCA_MULTIPASS_SUPPORT*/ 11150 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 11151 .txrx_update_peer_pkt_capture_params = 11152 dp_peer_update_pkt_capture_params, 11153 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 11154 }; 11155 11156 static struct cdp_me_ops dp_ops_me = { 11157 #ifdef ATH_SUPPORT_IQUE 11158 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor, 11159 .tx_me_free_descriptor = dp_tx_me_free_descriptor, 11160 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast, 11161 #endif 11162 }; 11163 11164 static struct cdp_mon_ops dp_ops_mon = { 11165 .txrx_reset_monitor_mode = dp_reset_monitor_mode, 11166 /* Added support for HK advance filter */ 11167 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter, 11168 .txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt, 11169 .config_full_mon_mode = dp_config_full_mon_mode, 11170 }; 11171 11172 static struct cdp_host_stats_ops dp_ops_host_stats = { 11173 .txrx_per_peer_stats = dp_get_host_peer_stats, 11174 .get_fw_peer_stats = dp_get_fw_peer_stats, 11175 .get_htt_stats = dp_get_htt_stats, 11176 #ifdef FEATURE_PERPKT_INFO 11177 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats, 11178 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats, 11179 #endif /* FEATURE_PERPKT_INFO */ 11180 .txrx_stats_publish = dp_txrx_stats_publish, 11181 .txrx_get_vdev_stats = dp_txrx_get_vdev_stats, 11182 .txrx_get_peer_stats = dp_txrx_get_peer_stats, 11183 .txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param, 11184 .txrx_reset_peer_stats = dp_txrx_reset_peer_stats, 11185 .txrx_get_pdev_stats = dp_txrx_get_pdev_stats, 11186 .txrx_get_ratekbps = dp_txrx_get_ratekbps, 11187 .txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats, 11188 /* TODO */ 11189 }; 11190 11191 static struct cdp_raw_ops dp_ops_raw = { 11192 /* TODO */ 11193 }; 11194 11195 #ifdef PEER_FLOW_CONTROL 11196 static struct cdp_pflow_ops dp_ops_pflow = { 11197 dp_tx_flow_ctrl_configure_pdev, 11198 }; 11199 #endif /* CONFIG_WIN */ 11200 11201 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 11202 static struct cdp_cfr_ops dp_ops_cfr = { 11203 .txrx_cfr_filter = dp_cfr_filter, 11204 .txrx_get_cfr_rcc = dp_get_cfr_rcc, 11205 .txrx_set_cfr_rcc = dp_set_cfr_rcc, 11206 .txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats, 11207 .txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats, 11208 .txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer, 11209 }; 11210 #endif 11211 11212 #ifdef WLAN_SUPPORT_MSCS 11213 static struct cdp_mscs_ops dp_ops_mscs = { 11214 .mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority, 11215 }; 11216 #endif 11217 11218 #ifdef FEATURE_RUNTIME_PM 11219 /** 11220 * dp_runtime_suspend() - ensure DP is ready to runtime suspend 11221 * @soc_hdl: Datapath soc handle 11222 * @pdev_id: id of data path pdev handle 11223 * 11224 * DP is ready to runtime suspend if there are no pending TX packets. 11225 * 11226 * Return: QDF_STATUS 11227 */ 11228 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11229 { 11230 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11231 struct dp_pdev *pdev; 11232 11233 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11234 if (!pdev) { 11235 dp_err("pdev is NULL"); 11236 return QDF_STATUS_E_INVAL; 11237 } 11238 11239 /* Abort if there are any pending TX packets */ 11240 if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) { 11241 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 11242 FL("Abort suspend due to pending TX packets")); 11243 return QDF_STATUS_E_AGAIN; 11244 } 11245 11246 if (soc->intr_mode == DP_INTR_POLL) 11247 qdf_timer_stop(&soc->int_timer); 11248 11249 return QDF_STATUS_SUCCESS; 11250 } 11251 11252 /** 11253 * dp_flush_ring_hptp() - Update ring shadow 11254 * register HP/TP address when runtime 11255 * resume 11256 * @opaque_soc: DP soc context 11257 * 11258 * Return: None 11259 */ 11260 static 11261 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng) 11262 { 11263 if (hal_srng && hal_srng_get_clear_event(hal_srng, 11264 HAL_SRNG_FLUSH_EVENT)) { 11265 /* Acquire the lock */ 11266 hal_srng_access_start(soc->hal_soc, hal_srng); 11267 11268 hal_srng_access_end(soc->hal_soc, hal_srng); 11269 11270 hal_srng_set_flush_last_ts(hal_srng); 11271 } 11272 } 11273 11274 /** 11275 * dp_runtime_resume() - ensure DP is ready to runtime resume 11276 * @soc_hdl: Datapath soc handle 11277 * @pdev_id: id of data path pdev handle 11278 * 11279 * Resume DP for runtime PM. 11280 * 11281 * Return: QDF_STATUS 11282 */ 11283 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11284 { 11285 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11286 int i; 11287 11288 if (soc->intr_mode == DP_INTR_POLL) 11289 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 11290 11291 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { 11292 dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng); 11293 } 11294 11295 dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng); 11296 11297 return QDF_STATUS_SUCCESS; 11298 } 11299 #endif /* FEATURE_RUNTIME_PM */ 11300 11301 /** 11302 * dp_tx_get_success_ack_stats() - get tx success completion count 11303 * @soc_hdl: Datapath soc handle 11304 * @vdevid: vdev identifier 11305 * 11306 * Return: tx success ack count 11307 */ 11308 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl, 11309 uint8_t vdev_id) 11310 { 11311 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11312 struct cdp_vdev_stats *vdev_stats = NULL; 11313 uint32_t tx_success; 11314 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 11315 DP_MOD_ID_CDP); 11316 11317 if (!vdev) { 11318 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 11319 FL("Invalid vdev id %d"), vdev_id); 11320 return 0; 11321 } 11322 11323 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 11324 if (!vdev_stats) { 11325 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 11326 "DP alloc failure - unable to get alloc vdev stats"); 11327 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 11328 return 0; 11329 } 11330 11331 dp_aggregate_vdev_stats(vdev, vdev_stats); 11332 11333 tx_success = vdev_stats->tx.tx_success.num; 11334 qdf_mem_free(vdev_stats); 11335 11336 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 11337 return tx_success; 11338 } 11339 11340 #ifdef WLAN_SUPPORT_DATA_STALL 11341 /** 11342 * dp_register_data_stall_detect_cb() - register data stall callback 11343 * @soc_hdl: Datapath soc handle 11344 * @pdev_id: id of data path pdev handle 11345 * @data_stall_detect_callback: data stall callback function 11346 * 11347 * Return: QDF_STATUS Enumeration 11348 */ 11349 static 11350 QDF_STATUS dp_register_data_stall_detect_cb( 11351 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 11352 data_stall_detect_cb data_stall_detect_callback) 11353 { 11354 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11355 struct dp_pdev *pdev; 11356 11357 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11358 if (!pdev) { 11359 dp_err("pdev NULL!"); 11360 return QDF_STATUS_E_INVAL; 11361 } 11362 11363 pdev->data_stall_detect_callback = data_stall_detect_callback; 11364 return QDF_STATUS_SUCCESS; 11365 } 11366 11367 /** 11368 * dp_deregister_data_stall_detect_cb() - de-register data stall callback 11369 * @soc_hdl: Datapath soc handle 11370 * @pdev_id: id of data path pdev handle 11371 * @data_stall_detect_callback: data stall callback function 11372 * 11373 * Return: QDF_STATUS Enumeration 11374 */ 11375 static 11376 QDF_STATUS dp_deregister_data_stall_detect_cb( 11377 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 11378 data_stall_detect_cb data_stall_detect_callback) 11379 { 11380 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11381 struct dp_pdev *pdev; 11382 11383 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11384 if (!pdev) { 11385 dp_err("pdev NULL!"); 11386 return QDF_STATUS_E_INVAL; 11387 } 11388 11389 pdev->data_stall_detect_callback = NULL; 11390 return QDF_STATUS_SUCCESS; 11391 } 11392 11393 /** 11394 * dp_txrx_post_data_stall_event() - post data stall event 11395 * @soc_hdl: Datapath soc handle 11396 * @indicator: Module triggering data stall 11397 * @data_stall_type: data stall event type 11398 * @pdev_id: pdev id 11399 * @vdev_id_bitmap: vdev id bitmap 11400 * @recovery_type: data stall recovery type 11401 * 11402 * Return: None 11403 */ 11404 static void 11405 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl, 11406 enum data_stall_log_event_indicator indicator, 11407 enum data_stall_log_event_type data_stall_type, 11408 uint32_t pdev_id, uint32_t vdev_id_bitmap, 11409 enum data_stall_log_recovery_type recovery_type) 11410 { 11411 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11412 struct data_stall_event_info data_stall_info; 11413 struct dp_pdev *pdev; 11414 11415 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11416 if (!pdev) { 11417 dp_err("pdev NULL!"); 11418 return; 11419 } 11420 11421 if (!pdev->data_stall_detect_callback) { 11422 dp_err("data stall cb not registered!"); 11423 return; 11424 } 11425 11426 dp_info("data_stall_type: %x pdev_id: %d", 11427 data_stall_type, pdev_id); 11428 11429 data_stall_info.indicator = indicator; 11430 data_stall_info.data_stall_type = data_stall_type; 11431 data_stall_info.vdev_id_bitmap = vdev_id_bitmap; 11432 data_stall_info.pdev_id = pdev_id; 11433 data_stall_info.recovery_type = recovery_type; 11434 11435 pdev->data_stall_detect_callback(&data_stall_info); 11436 } 11437 #endif /* WLAN_SUPPORT_DATA_STALL */ 11438 11439 #ifdef WLAN_FEATURE_STATS_EXT 11440 /* rx hw stats event wait timeout in ms */ 11441 #define DP_REO_STATUS_STATS_TIMEOUT 1500 11442 /** 11443 * dp_txrx_ext_stats_request - request dp txrx extended stats request 11444 * @soc_hdl: soc handle 11445 * @pdev_id: pdev id 11446 * @req: stats request 11447 * 11448 * Return: QDF_STATUS 11449 */ 11450 static QDF_STATUS 11451 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 11452 struct cdp_txrx_ext_stats *req) 11453 { 11454 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11455 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11456 11457 if (!pdev) { 11458 dp_err("pdev is null"); 11459 return QDF_STATUS_E_INVAL; 11460 } 11461 11462 dp_aggregate_pdev_stats(pdev); 11463 11464 req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num; 11465 req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full; 11466 req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received; 11467 req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received; 11468 req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed; 11469 /* only count error source from RXDMA */ 11470 req->rx_mpdu_error = pdev->stats.err.rxdma_error; 11471 11472 return QDF_STATUS_SUCCESS; 11473 } 11474 11475 /** 11476 * dp_rx_hw_stats_cb - request rx hw stats response callback 11477 * @soc: soc handle 11478 * @cb_ctxt: callback context 11479 * @reo_status: reo command response status 11480 * 11481 * Return: None 11482 */ 11483 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt, 11484 union hal_reo_status *reo_status) 11485 { 11486 struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt; 11487 struct hal_reo_queue_status *queue_status = &reo_status->queue_status; 11488 bool is_query_timeout; 11489 11490 qdf_spin_lock_bh(&soc->rx_hw_stats_lock); 11491 is_query_timeout = rx_hw_stats->is_query_timeout; 11492 /* free the cb_ctxt if all pending tid stats query is received */ 11493 if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) { 11494 if (!is_query_timeout) { 11495 qdf_event_set(&soc->rx_hw_stats_event); 11496 soc->is_last_stats_ctx_init = false; 11497 } 11498 11499 qdf_mem_free(rx_hw_stats); 11500 } 11501 11502 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 11503 dp_info("REO stats failure %d", 11504 queue_status->header.status); 11505 qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); 11506 return; 11507 } 11508 11509 if (!is_query_timeout) { 11510 soc->ext_stats.rx_mpdu_received += 11511 queue_status->mpdu_frms_cnt; 11512 soc->ext_stats.rx_mpdu_missed += 11513 queue_status->late_recv_mpdu_cnt; 11514 } 11515 qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); 11516 } 11517 11518 /** 11519 * dp_request_rx_hw_stats - request rx hardware stats 11520 * @soc_hdl: soc handle 11521 * @vdev_id: vdev id 11522 * 11523 * Return: None 11524 */ 11525 static QDF_STATUS 11526 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) 11527 { 11528 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11529 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 11530 DP_MOD_ID_CDP); 11531 struct dp_peer *peer = NULL; 11532 QDF_STATUS status; 11533 struct dp_req_rx_hw_stats_t *rx_hw_stats; 11534 int rx_stats_sent_cnt = 0; 11535 uint32_t last_rx_mpdu_received; 11536 uint32_t last_rx_mpdu_missed; 11537 11538 if (!vdev) { 11539 dp_err("vdev is null for vdev_id: %u", vdev_id); 11540 status = QDF_STATUS_E_INVAL; 11541 goto out; 11542 } 11543 11544 peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP); 11545 11546 if (!peer) { 11547 dp_err("Peer is NULL"); 11548 status = QDF_STATUS_E_INVAL; 11549 goto out; 11550 } 11551 11552 rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats)); 11553 11554 if (!rx_hw_stats) { 11555 dp_err("malloc failed for hw stats structure"); 11556 status = QDF_STATUS_E_INVAL; 11557 goto out; 11558 } 11559 11560 qdf_event_reset(&soc->rx_hw_stats_event); 11561 qdf_spin_lock_bh(&soc->rx_hw_stats_lock); 11562 /* save the last soc cumulative stats and reset it to 0 */ 11563 last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received; 11564 last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed; 11565 soc->ext_stats.rx_mpdu_received = 0; 11566 soc->ext_stats.rx_mpdu_missed = 0; 11567 11568 rx_stats_sent_cnt = 11569 dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats); 11570 if (!rx_stats_sent_cnt) { 11571 dp_err("no tid stats sent successfully"); 11572 qdf_mem_free(rx_hw_stats); 11573 qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); 11574 status = QDF_STATUS_E_INVAL; 11575 goto out; 11576 } 11577 qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt, 11578 rx_stats_sent_cnt); 11579 rx_hw_stats->is_query_timeout = false; 11580 soc->is_last_stats_ctx_init = true; 11581 qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); 11582 11583 status = qdf_wait_single_event(&soc->rx_hw_stats_event, 11584 DP_REO_STATUS_STATS_TIMEOUT); 11585 11586 qdf_spin_lock_bh(&soc->rx_hw_stats_lock); 11587 if (status != QDF_STATUS_SUCCESS) { 11588 dp_info("rx hw stats event timeout"); 11589 if (soc->is_last_stats_ctx_init) 11590 rx_hw_stats->is_query_timeout = true; 11591 /** 11592 * If query timeout happened, use the last saved stats 11593 * for this time query. 11594 */ 11595 soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received; 11596 soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed; 11597 } 11598 qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); 11599 11600 out: 11601 if (peer) 11602 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 11603 if (vdev) 11604 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 11605 11606 return status; 11607 } 11608 #endif /* WLAN_FEATURE_STATS_EXT */ 11609 11610 #ifdef DP_PEER_EXTENDED_API 11611 static struct cdp_misc_ops dp_ops_misc = { 11612 #ifdef FEATURE_WLAN_TDLS 11613 .tx_non_std = dp_tx_non_std, 11614 #endif /* FEATURE_WLAN_TDLS */ 11615 .get_opmode = dp_get_opmode, 11616 #ifdef FEATURE_RUNTIME_PM 11617 .runtime_suspend = dp_runtime_suspend, 11618 .runtime_resume = dp_runtime_resume, 11619 #endif /* FEATURE_RUNTIME_PM */ 11620 .pkt_log_init = dp_pkt_log_init, 11621 .pkt_log_con_service = dp_pkt_log_con_service, 11622 .get_num_rx_contexts = dp_get_num_rx_contexts, 11623 .get_tx_ack_stats = dp_tx_get_success_ack_stats, 11624 #ifdef WLAN_SUPPORT_DATA_STALL 11625 .txrx_data_stall_cb_register = dp_register_data_stall_detect_cb, 11626 .txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb, 11627 .txrx_post_data_stall_event = dp_txrx_post_data_stall_event, 11628 #endif 11629 11630 #ifdef WLAN_FEATURE_STATS_EXT 11631 .txrx_ext_stats_request = dp_txrx_ext_stats_request, 11632 .request_rx_hw_stats = dp_request_rx_hw_stats, 11633 #endif /* WLAN_FEATURE_STATS_EXT */ 11634 .vdev_inform_ll_conn = dp_vdev_inform_ll_conn, 11635 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 11636 .set_swlm_enable = dp_soc_set_swlm_enable, 11637 .is_swlm_enabled = dp_soc_is_swlm_enabled, 11638 #endif 11639 .display_txrx_hw_info = dp_display_srng_info, 11640 }; 11641 #endif 11642 11643 #ifdef DP_FLOW_CTL 11644 static struct cdp_flowctl_ops dp_ops_flowctl = { 11645 /* WIFI 3.0 DP implement as required. */ 11646 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 11647 .flow_pool_map_handler = dp_tx_flow_pool_map, 11648 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap, 11649 .register_pause_cb = dp_txrx_register_pause_cb, 11650 .dump_flow_pool_info = dp_tx_dump_flow_pool_info, 11651 .tx_desc_thresh_reached = dp_tx_desc_thresh_reached, 11652 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 11653 }; 11654 11655 static struct cdp_lflowctl_ops dp_ops_l_flowctl = { 11656 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11657 }; 11658 #endif 11659 11660 #ifdef IPA_OFFLOAD 11661 static struct cdp_ipa_ops dp_ops_ipa = { 11662 .ipa_get_resource = dp_ipa_get_resource, 11663 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr, 11664 .ipa_op_response = dp_ipa_op_response, 11665 .ipa_register_op_cb = dp_ipa_register_op_cb, 11666 .ipa_deregister_op_cb = dp_ipa_deregister_op_cb, 11667 .ipa_get_stat = dp_ipa_get_stat, 11668 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame, 11669 .ipa_enable_autonomy = dp_ipa_enable_autonomy, 11670 .ipa_disable_autonomy = dp_ipa_disable_autonomy, 11671 .ipa_setup = dp_ipa_setup, 11672 .ipa_cleanup = dp_ipa_cleanup, 11673 .ipa_setup_iface = dp_ipa_setup_iface, 11674 .ipa_cleanup_iface = dp_ipa_cleanup_iface, 11675 .ipa_enable_pipes = dp_ipa_enable_pipes, 11676 .ipa_disable_pipes = dp_ipa_disable_pipes, 11677 .ipa_set_perf_level = dp_ipa_set_perf_level, 11678 .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd, 11679 .ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping, 11680 .ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping 11681 }; 11682 #endif 11683 11684 #ifdef DP_POWER_SAVE 11685 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11686 { 11687 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11688 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11689 int timeout = SUSPEND_DRAIN_WAIT; 11690 int drain_wait_delay = 50; /* 50 ms */ 11691 11692 if (qdf_unlikely(!pdev)) { 11693 dp_err("pdev is NULL"); 11694 return QDF_STATUS_E_INVAL; 11695 } 11696 11697 /* Abort if there are any pending TX packets */ 11698 while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) { 11699 qdf_sleep(drain_wait_delay); 11700 if (timeout <= 0) { 11701 dp_err("TX frames are pending, abort suspend"); 11702 return QDF_STATUS_E_TIMEOUT; 11703 } 11704 timeout = timeout - drain_wait_delay; 11705 } 11706 11707 if (soc->intr_mode == DP_INTR_POLL) 11708 qdf_timer_stop(&soc->int_timer); 11709 11710 /* Stop monitor reap timer and reap any pending frames in ring */ 11711 if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || 11712 dp_is_enable_reap_timer_non_pkt(pdev)) && 11713 soc->reap_timer_init) { 11714 qdf_timer_sync_cancel(&soc->mon_reap_timer); 11715 dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); 11716 } 11717 11718 return QDF_STATUS_SUCCESS; 11719 } 11720 11721 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11722 { 11723 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11724 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11725 11726 if (qdf_unlikely(!pdev)) { 11727 dp_err("pdev is NULL"); 11728 return QDF_STATUS_E_INVAL; 11729 } 11730 11731 if (soc->intr_mode == DP_INTR_POLL) 11732 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 11733 11734 /* Start monitor reap timer */ 11735 if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || 11736 dp_is_enable_reap_timer_non_pkt(pdev)) && 11737 soc->reap_timer_init) 11738 qdf_timer_mod(&soc->mon_reap_timer, 11739 DP_INTR_POLL_TIMER_MS); 11740 11741 return QDF_STATUS_SUCCESS; 11742 } 11743 11744 /** 11745 * dp_process_wow_ack_rsp() - process wow ack response 11746 * @soc_hdl: datapath soc handle 11747 * @pdev_id: data path pdev handle id 11748 * 11749 * Return: none 11750 */ 11751 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 11752 { 11753 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11754 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11755 11756 if (qdf_unlikely(!pdev)) { 11757 dp_err("pdev is NULL"); 11758 return; 11759 } 11760 11761 /* 11762 * As part of wow enable FW disables the mon status ring and in wow ack 11763 * response from FW reap mon status ring to make sure no packets pending 11764 * in the ring. 11765 */ 11766 if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || 11767 dp_is_enable_reap_timer_non_pkt(pdev)) && 11768 soc->reap_timer_init) { 11769 dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); 11770 } 11771 } 11772 11773 /** 11774 * dp_process_target_suspend_req() - process target suspend request 11775 * @soc_hdl: datapath soc handle 11776 * @pdev_id: data path pdev handle id 11777 * 11778 * Return: none 11779 */ 11780 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl, 11781 uint8_t pdev_id) 11782 { 11783 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 11784 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 11785 11786 if (qdf_unlikely(!pdev)) { 11787 dp_err("pdev is NULL"); 11788 return; 11789 } 11790 11791 /* Stop monitor reap timer and reap any pending frames in ring */ 11792 if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || 11793 dp_is_enable_reap_timer_non_pkt(pdev)) && 11794 soc->reap_timer_init) { 11795 qdf_timer_sync_cancel(&soc->mon_reap_timer); 11796 dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); 11797 } 11798 } 11799 11800 static struct cdp_bus_ops dp_ops_bus = { 11801 .bus_suspend = dp_bus_suspend, 11802 .bus_resume = dp_bus_resume, 11803 .process_wow_ack_rsp = dp_process_wow_ack_rsp, 11804 .process_target_suspend_req = dp_process_target_suspend_req 11805 }; 11806 #endif 11807 11808 #ifdef DP_FLOW_CTL 11809 static struct cdp_throttle_ops dp_ops_throttle = { 11810 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11811 }; 11812 11813 static struct cdp_cfg_ops dp_ops_cfg = { 11814 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11815 }; 11816 #endif 11817 11818 #ifdef DP_PEER_EXTENDED_API 11819 static struct cdp_ocb_ops dp_ops_ocb = { 11820 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 11821 }; 11822 11823 static struct cdp_mob_stats_ops dp_ops_mob_stats = { 11824 .clear_stats = dp_txrx_clear_dump_stats, 11825 }; 11826 11827 static struct cdp_peer_ops dp_ops_peer = { 11828 .register_peer = dp_register_peer, 11829 .clear_peer = dp_clear_peer, 11830 .find_peer_exist = dp_find_peer_exist, 11831 .find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev, 11832 .find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev, 11833 .peer_state_update = dp_peer_state_update, 11834 .get_vdevid = dp_get_vdevid, 11835 .get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr, 11836 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr, 11837 .get_peer_state = dp_get_peer_state, 11838 }; 11839 #endif 11840 11841 static struct cdp_ops dp_txrx_ops = { 11842 .cmn_drv_ops = &dp_ops_cmn, 11843 .ctrl_ops = &dp_ops_ctrl, 11844 .me_ops = &dp_ops_me, 11845 .mon_ops = &dp_ops_mon, 11846 .host_stats_ops = &dp_ops_host_stats, 11847 .wds_ops = &dp_ops_wds, 11848 .raw_ops = &dp_ops_raw, 11849 #ifdef PEER_FLOW_CONTROL 11850 .pflow_ops = &dp_ops_pflow, 11851 #endif /* PEER_FLOW_CONTROL */ 11852 #ifdef DP_PEER_EXTENDED_API 11853 .misc_ops = &dp_ops_misc, 11854 .ocb_ops = &dp_ops_ocb, 11855 .peer_ops = &dp_ops_peer, 11856 .mob_stats_ops = &dp_ops_mob_stats, 11857 #endif 11858 #ifdef DP_FLOW_CTL 11859 .cfg_ops = &dp_ops_cfg, 11860 .flowctl_ops = &dp_ops_flowctl, 11861 .l_flowctl_ops = &dp_ops_l_flowctl, 11862 .throttle_ops = &dp_ops_throttle, 11863 #endif 11864 #ifdef IPA_OFFLOAD 11865 .ipa_ops = &dp_ops_ipa, 11866 #endif 11867 #ifdef DP_POWER_SAVE 11868 .bus_ops = &dp_ops_bus, 11869 #endif 11870 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 11871 .cfr_ops = &dp_ops_cfr, 11872 #endif 11873 #ifdef WLAN_SUPPORT_MSCS 11874 .mscs_ops = &dp_ops_mscs, 11875 #endif 11876 }; 11877 11878 /* 11879 * dp_soc_set_txrx_ring_map() 11880 * @dp_soc: DP handler for soc 11881 * 11882 * Return: Void 11883 */ 11884 void dp_soc_set_txrx_ring_map(struct dp_soc *soc) 11885 { 11886 uint32_t i; 11887 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { 11888 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i]; 11889 } 11890 } 11891 11892 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \ 11893 defined(QCA_WIFI_QCA5018) 11894 /** 11895 * dp_soc_attach_wifi3() - Attach txrx SOC 11896 * @ctrl_psoc: Opaque SOC handle from control plane 11897 * @htc_handle: Opaque HTC handle 11898 * @hif_handle: Opaque HIF handle 11899 * @qdf_osdev: QDF device 11900 * @ol_ops: Offload Operations 11901 * @device_id: Device ID 11902 * 11903 * Return: DP SOC handle on success, NULL on failure 11904 */ 11905 struct cdp_soc_t * 11906 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 11907 struct hif_opaque_softc *hif_handle, 11908 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, 11909 struct ol_if_ops *ol_ops, uint16_t device_id) 11910 { 11911 struct dp_soc *dp_soc = NULL; 11912 11913 dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev, 11914 ol_ops, device_id); 11915 return dp_soc_to_cdp_soc_t(dp_soc); 11916 } 11917 11918 static inline void dp_soc_set_def_pdev(struct dp_soc *soc) 11919 { 11920 int lmac_id; 11921 11922 for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) { 11923 /*Set default host PDEV ID for lmac_id*/ 11924 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 11925 INVALID_PDEV_ID, lmac_id); 11926 } 11927 } 11928 11929 /** 11930 * dp_soc_attach() - Attach txrx SOC 11931 * @ctrl_psoc: Opaque SOC handle from control plane 11932 * @hif_handle: Opaque HIF handle 11933 * @htc_handle: Opaque HTC handle 11934 * @qdf_osdev: QDF device 11935 * @ol_ops: Offload Operations 11936 * @device_id: Device ID 11937 * 11938 * Return: DP SOC handle on success, NULL on failure 11939 */ 11940 static struct dp_soc * 11941 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 11942 struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle, 11943 qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops, 11944 uint16_t device_id) 11945 { 11946 int int_ctx; 11947 struct dp_soc *soc = NULL; 11948 11949 if (!hif_handle) { 11950 dp_err("HIF handle is NULL"); 11951 goto fail0; 11952 } 11953 11954 soc = qdf_mem_malloc(sizeof(*soc)); 11955 if (!soc) { 11956 dp_err("DP SOC memory allocation failed"); 11957 goto fail0; 11958 } 11959 11960 soc->hif_handle = hif_handle; 11961 soc->hal_soc = hif_get_hal_handle(soc->hif_handle); 11962 if (!soc->hal_soc) 11963 goto fail1; 11964 11965 int_ctx = 0; 11966 soc->device_id = device_id; 11967 soc->cdp_soc.ops = &dp_txrx_ops; 11968 soc->cdp_soc.ol_ops = ol_ops; 11969 soc->ctrl_psoc = ctrl_psoc; 11970 soc->osdev = qdf_osdev; 11971 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS; 11972 11973 /* Reset wbm sg list and flags */ 11974 dp_rx_wbm_sg_list_reset(soc); 11975 11976 dp_soc_rx_history_attach(soc); 11977 wlan_set_srng_cfg(&soc->wlan_srng_cfg); 11978 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc); 11979 if (!soc->wlan_cfg_ctx) { 11980 dp_err("wlan_cfg_ctx failed\n"); 11981 goto fail1; 11982 } 11983 11984 dp_soc_cfg_attach(soc); 11985 11986 if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) { 11987 dp_err("failed to allocate link desc pool banks"); 11988 goto fail2; 11989 } 11990 11991 if (dp_hw_link_desc_ring_alloc(soc)) { 11992 dp_err("failed to allocate link_desc_ring"); 11993 goto fail3; 11994 } 11995 11996 if (dp_soc_srng_alloc(soc)) { 11997 dp_err("failed to allocate soc srng rings"); 11998 goto fail4; 11999 } 12000 12001 if (dp_soc_tx_desc_sw_pools_alloc(soc)) { 12002 dp_err("dp_soc_tx_desc_sw_pools_alloc failed"); 12003 goto fail5; 12004 } 12005 12006 dp_soc_swlm_attach(soc); 12007 dp_soc_set_interrupt_mode(soc); 12008 dp_soc_set_def_pdev(soc); 12009 12010 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 12011 qdf_dma_mem_stats_read(), 12012 qdf_heap_mem_stats_read(), 12013 qdf_skb_total_mem_stats_read()); 12014 12015 return soc; 12016 fail5: 12017 dp_soc_srng_free(soc); 12018 fail4: 12019 dp_hw_link_desc_ring_free(soc); 12020 fail3: 12021 dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); 12022 fail2: 12023 wlan_cfg_soc_detach(soc->wlan_cfg_ctx); 12024 fail1: 12025 qdf_mem_free(soc); 12026 fail0: 12027 return NULL; 12028 } 12029 12030 /** 12031 * dp_soc_init() - Initialize txrx SOC 12032 * @dp_soc: Opaque DP SOC handle 12033 * @htc_handle: Opaque HTC handle 12034 * @hif_handle: Opaque HIF handle 12035 * 12036 * Return: DP SOC handle on success, NULL on failure 12037 */ 12038 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, 12039 struct hif_opaque_softc *hif_handle) 12040 { 12041 struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle; 12042 bool is_monitor_mode = false; 12043 struct hal_reo_params reo_params; 12044 uint8_t i; 12045 12046 wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc, 12047 WLAN_MD_DP_SOC, "dp_soc"); 12048 12049 htt_soc = htt_soc_attach(soc, htc_handle); 12050 if (!htt_soc) 12051 goto fail0; 12052 12053 soc->htt_handle = htt_soc; 12054 12055 if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS) 12056 goto fail1; 12057 12058 htt_set_htc_handle(htt_soc, htc_handle); 12059 soc->hif_handle = hif_handle; 12060 12061 soc->hal_soc = hif_get_hal_handle(soc->hif_handle); 12062 if (!soc->hal_soc) 12063 goto fail2; 12064 12065 dp_soc_cfg_init(soc); 12066 12067 /* Reset/Initialize wbm sg list and flags */ 12068 dp_rx_wbm_sg_list_reset(soc); 12069 12070 /* Note: Any SRNG ring initialization should happen only after 12071 * Interrupt mode is set and followed by filling up the 12072 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER. 12073 */ 12074 dp_soc_set_interrupt_mode(soc); 12075 if (soc->cdp_soc.ol_ops->get_con_mode && 12076 soc->cdp_soc.ol_ops->get_con_mode() == 12077 QDF_GLOBAL_MONITOR_MODE) 12078 is_monitor_mode = true; 12079 12080 wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode, 12081 is_monitor_mode); 12082 12083 /* initialize WBM_IDLE_LINK ring */ 12084 if (dp_hw_link_desc_ring_init(soc)) { 12085 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 12086 FL("dp_hw_link_desc_ring_init failed")); 12087 goto fail3; 12088 } 12089 12090 dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID); 12091 12092 if (dp_soc_srng_init(soc)) { 12093 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 12094 FL("dp_soc_srng_init failed")); 12095 goto fail4; 12096 } 12097 12098 if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, 12099 htt_get_htc_handle(htt_soc), 12100 soc->hal_soc, soc->osdev) == NULL) 12101 goto fail5; 12102 12103 /* Initialize descriptors in TCL Rings */ 12104 for (i = 0; i < soc->num_tcl_data_rings; i++) { 12105 hal_tx_init_data_ring(soc->hal_soc, 12106 soc->tcl_data_ring[i].hal_srng); 12107 } 12108 12109 if (dp_soc_tx_desc_sw_pools_init(soc)) { 12110 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 12111 FL("dp_tx_soc_attach failed")); 12112 goto fail6; 12113 } 12114 12115 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, 12116 cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH)); 12117 soc->cce_disable = false; 12118 12119 qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map)); 12120 qdf_spinlock_create(&soc->vdev_map_lock); 12121 qdf_atomic_init(&soc->num_tx_outstanding); 12122 qdf_atomic_init(&soc->num_tx_exception); 12123 soc->num_tx_allowed = 12124 wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx); 12125 12126 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) { 12127 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, 12128 CDP_CFG_MAX_PEER_ID); 12129 12130 if (ret != -EINVAL) 12131 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret); 12132 12133 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, 12134 CDP_CFG_CCE_DISABLE); 12135 if (ret == 1) 12136 soc->cce_disable = true; 12137 } 12138 12139 /* 12140 * Skip registering hw ring interrupts for WMAC2 on IPQ6018 12141 * and IPQ5018 WMAC2 is not there in these platforms. 12142 */ 12143 if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 || 12144 soc->disable_mac2_intr) 12145 dp_soc_disable_unused_mac_intr_mask(soc, 0x2); 12146 12147 /* 12148 * Skip registering hw ring interrupts for WMAC1 on IPQ5018 12149 * WMAC1 is not there in this platform. 12150 */ 12151 if (soc->disable_mac1_intr) 12152 dp_soc_disable_unused_mac_intr_mask(soc, 0x1); 12153 12154 /* Setup HW REO */ 12155 qdf_mem_zero(&reo_params, sizeof(reo_params)); 12156 12157 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 12158 /* 12159 * Reo ring remap is not required if both radios 12160 * are offloaded to NSS 12161 */ 12162 if (dp_reo_remap_config(soc, 12163 &reo_params.remap1, 12164 &reo_params.remap2)) 12165 reo_params.rx_hash_enabled = true; 12166 else 12167 reo_params.rx_hash_enabled = false; 12168 } 12169 12170 /* setup the global rx defrag waitlist */ 12171 TAILQ_INIT(&soc->rx.defrag.waitlist); 12172 soc->rx.defrag.timeout_ms = 12173 wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx); 12174 soc->rx.defrag.next_flush_ms = 0; 12175 soc->rx.flags.defrag_timeout_check = 12176 wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx); 12177 qdf_spinlock_create(&soc->rx.defrag.defrag_lock); 12178 12179 /* 12180 * set the fragment destination ring 12181 */ 12182 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); 12183 12184 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) 12185 reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE; 12186 12187 hal_reo_setup(soc->hal_soc, &reo_params); 12188 12189 hal_reo_set_err_dst_remap(soc->hal_soc); 12190 12191 qdf_atomic_set(&soc->cmn_init_done, 1); 12192 12193 dp_soc_wds_attach(soc); 12194 12195 qdf_nbuf_queue_init(&soc->htt_stats.msg); 12196 12197 qdf_spinlock_create(&soc->ast_lock); 12198 12199 qdf_spinlock_create(&soc->reo_desc_freelist_lock); 12200 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE); 12201 INIT_RX_HW_STATS_LOCK(soc); 12202 12203 /* fill the tx/rx cpu ring map*/ 12204 dp_soc_set_txrx_ring_map(soc); 12205 12206 TAILQ_INIT(&soc->inactive_peer_list); 12207 qdf_spinlock_create(&soc->inactive_peer_list_lock); 12208 TAILQ_INIT(&soc->inactive_vdev_list); 12209 qdf_spinlock_create(&soc->inactive_vdev_list_lock); 12210 qdf_spinlock_create(&soc->htt_stats.lock); 12211 /* initialize work queue for stats processing */ 12212 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); 12213 12214 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 12215 qdf_dma_mem_stats_read(), 12216 qdf_heap_mem_stats_read(), 12217 qdf_skb_total_mem_stats_read()); 12218 12219 return soc; 12220 fail6: 12221 htt_soc_htc_dealloc(soc->htt_handle); 12222 fail5: 12223 dp_soc_srng_deinit(soc); 12224 fail4: 12225 dp_hw_link_desc_ring_deinit(soc); 12226 fail3: 12227 dp_hw_link_desc_ring_free(soc); 12228 fail2: 12229 htt_htc_pkt_pool_free(htt_soc); 12230 fail1: 12231 htt_soc_detach(htt_soc); 12232 fail0: 12233 return NULL; 12234 } 12235 12236 /** 12237 * dp_soc_init_wifi3() - Initialize txrx SOC 12238 * @soc: Opaque DP SOC handle 12239 * @ctrl_psoc: Opaque SOC handle from control plane(Unused) 12240 * @hif_handle: Opaque HIF handle 12241 * @htc_handle: Opaque HTC handle 12242 * @qdf_osdev: QDF device (Unused) 12243 * @ol_ops: Offload Operations (Unused) 12244 * @device_id: Device ID (Unused) 12245 * 12246 * Return: DP SOC handle on success, NULL on failure 12247 */ 12248 void *dp_soc_init_wifi3(struct cdp_soc_t *soc, 12249 struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 12250 struct hif_opaque_softc *hif_handle, 12251 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, 12252 struct ol_if_ops *ol_ops, uint16_t device_id) 12253 { 12254 return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle); 12255 } 12256 12257 #endif 12258 12259 /* 12260 * dp_get_pdev_for_mac_id() - Return pdev for mac_id 12261 * 12262 * @soc: handle to DP soc 12263 * @mac_id: MAC id 12264 * 12265 * Return: Return pdev corresponding to MAC 12266 */ 12267 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id) 12268 { 12269 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 12270 return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL; 12271 12272 /* Typically for MCL as there only 1 PDEV*/ 12273 return soc->pdev_list[0]; 12274 } 12275 12276 /* 12277 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported 12278 * @soc: DP SoC context 12279 * @max_mac_rings: No of MAC rings 12280 * 12281 * Return: None 12282 */ 12283 void dp_is_hw_dbs_enable(struct dp_soc *soc, 12284 int *max_mac_rings) 12285 { 12286 bool dbs_enable = false; 12287 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable) 12288 dbs_enable = soc->cdp_soc.ol_ops-> 12289 is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc); 12290 12291 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1; 12292 } 12293 12294 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 12295 /* 12296 * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR 12297 * @soc_hdl: Datapath soc handle 12298 * @pdev_id: id of data path pdev handle 12299 * @enable: Enable/Disable CFR 12300 * @filter_val: Flag to select Filter for monitor mode 12301 */ 12302 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, 12303 uint8_t pdev_id, 12304 bool enable, 12305 struct cdp_monitor_filter *filter_val) 12306 { 12307 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12308 struct dp_pdev *pdev = NULL; 12309 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; 12310 int max_mac_rings; 12311 uint8_t mac_id = 0; 12312 12313 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12314 if (!pdev) { 12315 dp_err("pdev is NULL"); 12316 return; 12317 } 12318 12319 if (pdev->monitor_vdev) { 12320 dp_info("No action is needed since monitor mode is enabled\n"); 12321 return; 12322 } 12323 soc = pdev->soc; 12324 pdev->cfr_rcc_mode = false; 12325 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 12326 dp_is_hw_dbs_enable(soc, &max_mac_rings); 12327 12328 dp_debug("Max_mac_rings %d", max_mac_rings); 12329 dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode); 12330 12331 if (enable) { 12332 pdev->cfr_rcc_mode = true; 12333 12334 htt_tlv_filter.ppdu_start = 1; 12335 htt_tlv_filter.ppdu_end = 1; 12336 htt_tlv_filter.ppdu_end_user_stats = 1; 12337 htt_tlv_filter.ppdu_end_user_stats_ext = 1; 12338 htt_tlv_filter.ppdu_end_status_done = 1; 12339 htt_tlv_filter.mpdu_start = 1; 12340 htt_tlv_filter.offset_valid = false; 12341 12342 htt_tlv_filter.enable_fp = 12343 (filter_val->mode & MON_FILTER_PASS) ? 1 : 0; 12344 htt_tlv_filter.enable_md = 0; 12345 htt_tlv_filter.enable_mo = 12346 (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0; 12347 htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt; 12348 htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl; 12349 htt_tlv_filter.fp_data_filter = filter_val->fp_data; 12350 htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt; 12351 htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl; 12352 htt_tlv_filter.mo_data_filter = filter_val->mo_data; 12353 } 12354 12355 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 12356 int mac_for_pdev = 12357 dp_get_mac_id_for_pdev(mac_id, 12358 pdev->pdev_id); 12359 12360 htt_h2t_rx_ring_cfg(soc->htt_handle, 12361 mac_for_pdev, 12362 soc->rxdma_mon_status_ring[mac_id] 12363 .hal_srng, 12364 RXDMA_MONITOR_STATUS, 12365 RX_MON_STATUS_BUF_SIZE, 12366 &htt_tlv_filter); 12367 } 12368 } 12369 12370 /** 12371 * dp_get_cfr_rcc() - get cfr rcc config 12372 * @soc_hdl: Datapath soc handle 12373 * @pdev_id: id of objmgr pdev 12374 * 12375 * Return: true/false based on cfr mode setting 12376 */ 12377 static 12378 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 12379 { 12380 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12381 struct dp_pdev *pdev = NULL; 12382 12383 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12384 if (!pdev) { 12385 dp_err("pdev is NULL"); 12386 return false; 12387 } 12388 12389 return pdev->cfr_rcc_mode; 12390 } 12391 12392 /** 12393 * dp_set_cfr_rcc() - enable/disable cfr rcc config 12394 * @soc_hdl: Datapath soc handle 12395 * @pdev_id: id of objmgr pdev 12396 * @enable: Enable/Disable cfr rcc mode 12397 * 12398 * Return: none 12399 */ 12400 static 12401 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable) 12402 { 12403 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12404 struct dp_pdev *pdev = NULL; 12405 12406 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12407 if (!pdev) { 12408 dp_err("pdev is NULL"); 12409 return; 12410 } 12411 12412 pdev->cfr_rcc_mode = enable; 12413 } 12414 12415 /* 12416 * dp_get_cfr_dbg_stats - Get the debug statistics for CFR 12417 * @soc_hdl: Datapath soc handle 12418 * @pdev_id: id of data path pdev handle 12419 * @cfr_rcc_stats: CFR RCC debug statistics buffer 12420 * 12421 * Return: none 12422 */ 12423 static inline void 12424 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 12425 struct cdp_cfr_rcc_stats *cfr_rcc_stats) 12426 { 12427 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12428 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12429 12430 if (!pdev) { 12431 dp_err("Invalid pdev"); 12432 return; 12433 } 12434 12435 qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc, 12436 sizeof(struct cdp_cfr_rcc_stats)); 12437 } 12438 12439 /* 12440 * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR 12441 * @soc_hdl: Datapath soc handle 12442 * @pdev_id: id of data path pdev handle 12443 * 12444 * Return: none 12445 */ 12446 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, 12447 uint8_t pdev_id) 12448 { 12449 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12450 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12451 12452 if (!pdev) { 12453 dp_err("dp pdev is NULL"); 12454 return; 12455 } 12456 12457 qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc)); 12458 } 12459 12460 /* 12461 * dp_enable_mon_reap_timer() - enable/disable reap timer 12462 * @soc_hdl: Datapath soc handle 12463 * @pdev_id: id of objmgr pdev 12464 * @enable: Enable/Disable reap timer of monitor status ring 12465 * 12466 * Return: none 12467 */ 12468 static void 12469 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 12470 bool enable) 12471 { 12472 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12473 struct dp_pdev *pdev = NULL; 12474 12475 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12476 if (!pdev) { 12477 dp_err("pdev is NULL"); 12478 return; 12479 } 12480 12481 pdev->enable_reap_timer_non_pkt = enable; 12482 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { 12483 dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode); 12484 return; 12485 } 12486 12487 if (!soc->reap_timer_init) { 12488 dp_err("reap timer not init"); 12489 return; 12490 } 12491 12492 if (enable) 12493 qdf_timer_mod(&soc->mon_reap_timer, 12494 DP_INTR_POLL_TIMER_MS); 12495 else 12496 qdf_timer_sync_cancel(&soc->mon_reap_timer); 12497 } 12498 #endif 12499 12500 /* 12501 * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is 12502 * enabled by non-pkt log or not 12503 * @pdev: point to dp pdev 12504 * 12505 * Return: true if mon reap timer is enabled by non-pkt log 12506 */ 12507 static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 12508 { 12509 if (!pdev) { 12510 dp_err("null pdev"); 12511 return false; 12512 } 12513 12514 return pdev->enable_reap_timer_non_pkt; 12515 } 12516 12517 /* 12518 * dp_set_pktlog_wifi3() - attach txrx vdev 12519 * @pdev: Datapath PDEV handle 12520 * @event: which event's notifications are being subscribed to 12521 * @enable: WDI event subscribe or not. (True or False) 12522 * 12523 * Return: Success, NULL on failure 12524 */ 12525 #ifdef WDI_EVENT_ENABLE 12526 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 12527 bool enable) 12528 { 12529 struct dp_soc *soc = NULL; 12530 int max_mac_rings = wlan_cfg_get_num_mac_rings 12531 (pdev->wlan_cfg_ctx); 12532 uint8_t mac_id = 0; 12533 12534 soc = pdev->soc; 12535 dp_is_hw_dbs_enable(soc, &max_mac_rings); 12536 12537 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 12538 FL("Max_mac_rings %d "), 12539 max_mac_rings); 12540 12541 if (enable) { 12542 switch (event) { 12543 case WDI_EVENT_RX_DESC: 12544 if (pdev->monitor_vdev) { 12545 /* Nothing needs to be done if monitor mode is 12546 * enabled 12547 */ 12548 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 12549 return 0; 12550 } 12551 12552 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) { 12553 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 12554 dp_mon_filter_setup_rx_pkt_log_full(pdev); 12555 if (dp_mon_filter_update(pdev) != 12556 QDF_STATUS_SUCCESS) { 12557 QDF_TRACE(QDF_MODULE_ID_DP, 12558 QDF_TRACE_LEVEL_ERROR, 12559 FL("Pktlog full filters set failed")); 12560 dp_mon_filter_reset_rx_pkt_log_full(pdev); 12561 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; 12562 return 0; 12563 } 12564 12565 if (soc->reap_timer_init && 12566 (!dp_is_enable_reap_timer_non_pkt(pdev))) 12567 qdf_timer_mod(&soc->mon_reap_timer, 12568 DP_INTR_POLL_TIMER_MS); 12569 } 12570 break; 12571 12572 case WDI_EVENT_LITE_RX: 12573 if (pdev->monitor_vdev) { 12574 /* Nothing needs to be done if monitor mode is 12575 * enabled 12576 */ 12577 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 12578 return 0; 12579 } 12580 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) { 12581 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 12582 12583 /* 12584 * Set the packet log lite mode filter. 12585 */ 12586 dp_mon_filter_setup_rx_pkt_log_lite(pdev); 12587 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 12588 QDF_TRACE(QDF_MODULE_ID_DP, 12589 QDF_TRACE_LEVEL_ERROR, 12590 FL("Pktlog lite filters set failed")); 12591 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 12592 pdev->rx_pktlog_mode = 12593 DP_RX_PKTLOG_DISABLED; 12594 return 0; 12595 } 12596 12597 if (soc->reap_timer_init && 12598 (!dp_is_enable_reap_timer_non_pkt(pdev))) 12599 qdf_timer_mod(&soc->mon_reap_timer, 12600 DP_INTR_POLL_TIMER_MS); 12601 } 12602 break; 12603 12604 case WDI_EVENT_LITE_T2H: 12605 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 12606 int mac_for_pdev = dp_get_mac_id_for_pdev( 12607 mac_id, pdev->pdev_id); 12608 12609 pdev->pktlog_ppdu_stats = true; 12610 dp_h2t_cfg_stats_msg_send(pdev, 12611 DP_PPDU_TXLITE_STATS_BITMASK_CFG, 12612 mac_for_pdev); 12613 } 12614 break; 12615 12616 default: 12617 /* Nothing needs to be done for other pktlog types */ 12618 break; 12619 } 12620 } else { 12621 switch (event) { 12622 case WDI_EVENT_RX_DESC: 12623 case WDI_EVENT_LITE_RX: 12624 if (pdev->monitor_vdev) { 12625 /* Nothing needs to be done if monitor mode is 12626 * enabled 12627 */ 12628 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; 12629 return 0; 12630 } 12631 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { 12632 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; 12633 dp_mon_filter_reset_rx_pkt_log_full(pdev); 12634 if (dp_mon_filter_update(pdev) != 12635 QDF_STATUS_SUCCESS) { 12636 QDF_TRACE(QDF_MODULE_ID_DP, 12637 QDF_TRACE_LEVEL_ERROR, 12638 FL("Pktlog filters reset failed")); 12639 return 0; 12640 } 12641 12642 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 12643 if (dp_mon_filter_update(pdev) != 12644 QDF_STATUS_SUCCESS) { 12645 QDF_TRACE(QDF_MODULE_ID_DP, 12646 QDF_TRACE_LEVEL_ERROR, 12647 FL("Pktlog filters reset failed")); 12648 return 0; 12649 } 12650 12651 if (soc->reap_timer_init && 12652 (!dp_is_enable_reap_timer_non_pkt(pdev))) 12653 qdf_timer_stop(&soc->mon_reap_timer); 12654 } 12655 break; 12656 case WDI_EVENT_LITE_T2H: 12657 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW 12658 * passing value 0. Once these macros will define in htt 12659 * header file will use proper macros 12660 */ 12661 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 12662 int mac_for_pdev = 12663 dp_get_mac_id_for_pdev(mac_id, 12664 pdev->pdev_id); 12665 12666 pdev->pktlog_ppdu_stats = false; 12667 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { 12668 dp_h2t_cfg_stats_msg_send(pdev, 0, 12669 mac_for_pdev); 12670 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { 12671 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER, 12672 mac_for_pdev); 12673 } else if (pdev->enhanced_stats_en) { 12674 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, 12675 mac_for_pdev); 12676 } 12677 } 12678 12679 break; 12680 default: 12681 /* Nothing needs to be done for other pktlog types */ 12682 break; 12683 } 12684 } 12685 return 0; 12686 } 12687 #endif 12688 12689 /** 12690 * dp_bucket_index() - Return index from array 12691 * 12692 * @delay: delay measured 12693 * @array: array used to index corresponding delay 12694 * 12695 * Return: index 12696 */ 12697 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array) 12698 { 12699 uint8_t i = CDP_DELAY_BUCKET_0; 12700 12701 for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) { 12702 if (delay >= array[i] && delay <= array[i + 1]) 12703 return i; 12704 } 12705 12706 return (CDP_DELAY_BUCKET_MAX - 1); 12707 } 12708 12709 /** 12710 * dp_fill_delay_buckets() - Fill delay statistics bucket for each 12711 * type of delay 12712 * 12713 * @pdev: pdev handle 12714 * @delay: delay in ms 12715 * @tid: tid value 12716 * @mode: type of tx delay mode 12717 * @ring_id: ring number 12718 * Return: pointer to cdp_delay_stats structure 12719 */ 12720 static struct cdp_delay_stats * 12721 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay, 12722 uint8_t tid, uint8_t mode, uint8_t ring_id) 12723 { 12724 uint8_t delay_index = 0; 12725 struct cdp_tid_tx_stats *tstats = 12726 &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 12727 struct cdp_tid_rx_stats *rstats = 12728 &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 12729 /* 12730 * cdp_fw_to_hw_delay_range 12731 * Fw to hw delay ranges in milliseconds 12732 */ 12733 uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = { 12734 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500}; 12735 12736 /* 12737 * cdp_sw_enq_delay_range 12738 * Software enqueue delay ranges in milliseconds 12739 */ 12740 uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = { 12741 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; 12742 12743 /* 12744 * cdp_intfrm_delay_range 12745 * Interframe delay ranges in milliseconds 12746 */ 12747 uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = { 12748 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60}; 12749 12750 /* 12751 * Update delay stats in proper bucket 12752 */ 12753 switch (mode) { 12754 /* Software Enqueue delay ranges */ 12755 case CDP_DELAY_STATS_SW_ENQ: 12756 12757 delay_index = dp_bucket_index(delay, cdp_sw_enq_delay); 12758 tstats->swq_delay.delay_bucket[delay_index]++; 12759 return &tstats->swq_delay; 12760 12761 /* Tx Completion delay ranges */ 12762 case CDP_DELAY_STATS_FW_HW_TRANSMIT: 12763 12764 delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay); 12765 tstats->hwtx_delay.delay_bucket[delay_index]++; 12766 return &tstats->hwtx_delay; 12767 12768 /* Interframe tx delay ranges */ 12769 case CDP_DELAY_STATS_TX_INTERFRAME: 12770 12771 delay_index = dp_bucket_index(delay, cdp_intfrm_delay); 12772 tstats->intfrm_delay.delay_bucket[delay_index]++; 12773 return &tstats->intfrm_delay; 12774 12775 /* Interframe rx delay ranges */ 12776 case CDP_DELAY_STATS_RX_INTERFRAME: 12777 12778 delay_index = dp_bucket_index(delay, cdp_intfrm_delay); 12779 rstats->intfrm_delay.delay_bucket[delay_index]++; 12780 return &rstats->intfrm_delay; 12781 12782 /* Ring reap to indication to network stack */ 12783 case CDP_DELAY_STATS_REAP_STACK: 12784 12785 delay_index = dp_bucket_index(delay, cdp_intfrm_delay); 12786 rstats->to_stack_delay.delay_bucket[delay_index]++; 12787 return &rstats->to_stack_delay; 12788 default: 12789 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 12790 "%s Incorrect delay mode: %d", __func__, mode); 12791 } 12792 12793 return NULL; 12794 } 12795 12796 /** 12797 * dp_update_delay_stats() - Update delay statistics in structure 12798 * and fill min, max and avg delay 12799 * 12800 * @pdev: pdev handle 12801 * @delay: delay in ms 12802 * @tid: tid value 12803 * @mode: type of tx delay mode 12804 * @ring id: ring number 12805 * Return: none 12806 */ 12807 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay, 12808 uint8_t tid, uint8_t mode, uint8_t ring_id) 12809 { 12810 struct cdp_delay_stats *dstats = NULL; 12811 12812 /* 12813 * Delay ranges are different for different delay modes 12814 * Get the correct index to update delay bucket 12815 */ 12816 dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id); 12817 if (qdf_unlikely(!dstats)) 12818 return; 12819 12820 if (delay != 0) { 12821 /* 12822 * Compute minimum,average and maximum 12823 * delay 12824 */ 12825 if (delay < dstats->min_delay) 12826 dstats->min_delay = delay; 12827 12828 if (delay > dstats->max_delay) 12829 dstats->max_delay = delay; 12830 12831 /* 12832 * Average over delay measured till now 12833 */ 12834 if (!dstats->avg_delay) 12835 dstats->avg_delay = delay; 12836 else 12837 dstats->avg_delay = ((delay + dstats->avg_delay) / 2); 12838 } 12839 } 12840 12841 /** 12842 * dp_get_peer_mac_list(): function to get peer mac list of vdev 12843 * @soc: Datapath soc handle 12844 * @vdev_id: vdev id 12845 * @newmac: Table of the clients mac 12846 * @mac_cnt: No. of MACs required 12847 * @limit: Limit the number of clients 12848 * 12849 * return: no of clients 12850 */ 12851 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 12852 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 12853 u_int16_t mac_cnt, bool limit) 12854 { 12855 struct dp_soc *dp_soc = (struct dp_soc *)soc; 12856 struct dp_vdev *vdev = 12857 dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP); 12858 struct dp_peer *peer; 12859 uint16_t new_mac_cnt = 0; 12860 12861 if (!vdev) 12862 return new_mac_cnt; 12863 12864 if (limit && (vdev->num_peers > mac_cnt)) 12865 return 0; 12866 12867 qdf_spin_lock_bh(&vdev->peer_list_lock); 12868 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 12869 if (peer->bss_peer) 12870 continue; 12871 if (new_mac_cnt < mac_cnt) { 12872 WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw); 12873 new_mac_cnt++; 12874 } 12875 } 12876 qdf_spin_unlock_bh(&vdev->peer_list_lock); 12877 dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP); 12878 return new_mac_cnt; 12879 } 12880 12881 #ifdef QCA_SUPPORT_WDS_EXTENDED 12882 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc, 12883 uint8_t vdev_id, 12884 uint8_t *mac) 12885 { 12886 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 12887 mac, 0, vdev_id, 12888 DP_MOD_ID_CDP); 12889 uint16_t peer_id = HTT_INVALID_PEER; 12890 12891 if (!peer) { 12892 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 12893 "%s: Peer is NULL!\n", __func__); 12894 return peer_id; 12895 } 12896 12897 peer_id = peer->peer_id; 12898 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12899 return peer_id; 12900 } 12901 12902 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 12903 uint8_t vdev_id, 12904 uint8_t *mac, 12905 ol_txrx_rx_fp rx, 12906 ol_osif_peer_handle osif_peer) 12907 { 12908 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 12909 mac, 0, vdev_id, 12910 DP_MOD_ID_CDP); 12911 QDF_STATUS status = QDF_STATUS_E_INVAL; 12912 12913 if (!peer) { 12914 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 12915 "%s: Peer is NULL!\n", __func__); 12916 return status; 12917 } 12918 12919 if (rx) { 12920 if (peer->osif_rx) { 12921 status = QDF_STATUS_E_ALREADY; 12922 } else { 12923 peer->osif_rx = rx; 12924 status = QDF_STATUS_SUCCESS; 12925 } 12926 } else { 12927 if (peer->osif_rx) { 12928 peer->osif_rx = NULL; 12929 status = QDF_STATUS_SUCCESS; 12930 } else { 12931 status = QDF_STATUS_E_ALREADY; 12932 } 12933 } 12934 12935 peer->wds_ext.osif_peer = osif_peer; 12936 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 12937 12938 return status; 12939 } 12940 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 12941 12942 /** 12943 * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including 12944 * monitor rings 12945 * @pdev: Datapath pdev handle 12946 * 12947 */ 12948 static void dp_pdev_srng_deinit(struct dp_pdev *pdev) 12949 { 12950 struct dp_soc *soc = pdev->soc; 12951 uint8_t i; 12952 12953 dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF, 12954 pdev->lmac_id); 12955 12956 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 12957 dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX); 12958 12959 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { 12960 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); 12961 12962 wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned); 12963 dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id], 12964 RXDMA_DST, lmac_id); 12965 } 12966 12967 dp_mon_rings_deinit(pdev); 12968 } 12969 12970 /** 12971 * dp_pdev_srng_init() - initialize all pdev srng rings including 12972 * monitor rings 12973 * @pdev: Datapath pdev handle 12974 * 12975 * return: QDF_STATUS_SUCCESS on success 12976 * QDF_STATUS_E_NOMEM on failure 12977 */ 12978 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev) 12979 { 12980 struct dp_soc *soc = pdev->soc; 12981 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 12982 uint32_t i; 12983 12984 soc_cfg_ctx = soc->wlan_cfg_ctx; 12985 12986 if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 12987 RXDMA_BUF, 0, pdev->lmac_id)) { 12988 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 12989 FL("dp_srng_init failed rx refill ring")); 12990 goto fail1; 12991 } 12992 12993 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 12994 if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX)) 12995 goto fail1; 12996 } 12997 12998 if (dp_mon_rings_init(soc, pdev)) { 12999 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13000 FL("MONITOR rings setup failed")); 13001 goto fail1; 13002 } 13003 13004 /* LMAC RxDMA to SW Rings configuration */ 13005 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) 13006 /* Only valid for MCL */ 13007 pdev = soc->pdev_list[0]; 13008 13009 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { 13010 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); 13011 struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id]; 13012 13013 if (srng->hal_srng) 13014 continue; 13015 13016 if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) { 13017 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13018 FL(RNG_ERR "rxdma_err_dst_ring")); 13019 goto fail1; 13020 } 13021 wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned, 13022 soc->rxdma_err_dst_ring[lmac_id].alloc_size, 13023 soc->ctrl_psoc, 13024 WLAN_MD_DP_SRNG_RXDMA_ERR_DST, 13025 "rxdma_err_dst"); 13026 } 13027 return QDF_STATUS_SUCCESS; 13028 13029 fail1: 13030 dp_pdev_srng_deinit(pdev); 13031 return QDF_STATUS_E_NOMEM; 13032 } 13033 13034 /** 13035 * dp_pdev_srng_free() - free all pdev srng rings including monitor rings 13036 * pdev: Datapath pdev handle 13037 * 13038 */ 13039 static void dp_pdev_srng_free(struct dp_pdev *pdev) 13040 { 13041 struct dp_soc *soc = pdev->soc; 13042 uint8_t i; 13043 13044 dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]); 13045 dp_mon_rings_free(pdev); 13046 13047 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 13048 dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX); 13049 13050 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { 13051 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); 13052 13053 dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]); 13054 } 13055 } 13056 13057 /** 13058 * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including 13059 * monitor rings 13060 * pdev: Datapath pdev handle 13061 * 13062 * return: QDF_STATUS_SUCCESS on success 13063 * QDF_STATUS_E_NOMEM on failure 13064 */ 13065 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev) 13066 { 13067 struct dp_soc *soc = pdev->soc; 13068 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 13069 uint32_t ring_size; 13070 uint32_t i; 13071 13072 soc_cfg_ctx = soc->wlan_cfg_ctx; 13073 13074 ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 13075 if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 13076 RXDMA_BUF, ring_size, 0)) { 13077 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13078 FL("dp_srng_alloc failed rx refill ring")); 13079 goto fail1; 13080 } 13081 13082 if (dp_mon_rings_alloc(soc, pdev)) { 13083 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13084 FL("MONITOR rings setup failed")); 13085 goto fail1; 13086 } 13087 13088 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 13089 if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX)) 13090 goto fail1; 13091 } 13092 13093 ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); 13094 /* LMAC RxDMA to SW Rings configuration */ 13095 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) 13096 /* Only valid for MCL */ 13097 pdev = soc->pdev_list[0]; 13098 13099 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { 13100 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); 13101 struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id]; 13102 13103 if (srng->base_vaddr_unaligned) 13104 continue; 13105 13106 if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) { 13107 QDF_TRACE(QDF_MODULE_ID_DP, 13108 QDF_TRACE_LEVEL_ERROR, 13109 FL(RNG_ERR "rxdma_err_dst_ring")); 13110 goto fail1; 13111 } 13112 } 13113 13114 return QDF_STATUS_SUCCESS; 13115 fail1: 13116 dp_pdev_srng_free(pdev); 13117 return QDF_STATUS_E_NOMEM; 13118 } 13119 13120 /** 13121 * dp_soc_srng_deinit() - de-initialize soc srng rings 13122 * @soc: Datapath soc handle 13123 * 13124 */ 13125 static void dp_soc_srng_deinit(struct dp_soc *soc) 13126 { 13127 uint32_t i; 13128 /* Free the ring memories */ 13129 /* Common rings */ 13130 wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned); 13131 dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); 13132 13133 /* Tx data rings */ 13134 for (i = 0; i < soc->num_tcl_data_rings; i++) 13135 dp_deinit_tx_pair_by_index(soc, i); 13136 13137 /* TCL command and status rings */ 13138 if (soc->init_tcl_cmd_cred_ring) { 13139 wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned); 13140 dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, 13141 TCL_CMD_CREDIT, 0); 13142 } 13143 13144 wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned); 13145 dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0); 13146 13147 for (i = 0; i < soc->num_reo_dest_rings; i++) { 13148 /* TODO: Get number of rings and ring sizes 13149 * from wlan_cfg 13150 */ 13151 wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned); 13152 dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i); 13153 } 13154 13155 /* REO reinjection ring */ 13156 wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned); 13157 dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); 13158 13159 /* Rx release ring */ 13160 wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned); 13161 dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); 13162 13163 /* Rx exception ring */ 13164 /* TODO: Better to store ring_type and ring_num in 13165 * dp_srng during setup 13166 */ 13167 wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned); 13168 dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); 13169 13170 /* REO command and status rings */ 13171 wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned); 13172 dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0); 13173 wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned); 13174 dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0); 13175 } 13176 13177 /** 13178 * dp_soc_srng_init() - Initialize soc level srng rings 13179 * @soc: Datapath soc handle 13180 * 13181 * return: QDF_STATUS_SUCCESS on success 13182 * QDF_STATUS_E_FAILURE on failure 13183 */ 13184 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc) 13185 { 13186 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 13187 uint8_t i; 13188 13189 soc_cfg_ctx = soc->wlan_cfg_ctx; 13190 13191 dp_enable_verbose_debug(soc); 13192 13193 /* WBM descriptor release ring */ 13194 if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) { 13195 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13196 FL("dp_srng_init failed for wbm_desc_rel_ring")); 13197 goto fail1; 13198 } 13199 13200 wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned, 13201 soc->wbm_desc_rel_ring.alloc_size, 13202 soc->ctrl_psoc, 13203 WLAN_MD_DP_SRNG_WBM_DESC_REL, 13204 "wbm_desc_rel_ring"); 13205 13206 if (soc->init_tcl_cmd_cred_ring) { 13207 /* TCL command and status rings */ 13208 if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring, 13209 TCL_CMD_CREDIT, 0, 0)) { 13210 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13211 FL("dp_srng_init failed for tcl_cmd_ring")); 13212 goto fail1; 13213 } 13214 13215 wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned, 13216 soc->tcl_cmd_credit_ring.alloc_size, 13217 soc->ctrl_psoc, 13218 WLAN_MD_DP_SRNG_TCL_CMD, 13219 "wbm_desc_rel_ring"); 13220 } 13221 13222 if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) { 13223 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13224 FL("dp_srng_init failed for tcl_status_ring")); 13225 goto fail1; 13226 } 13227 13228 wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned, 13229 soc->tcl_status_ring.alloc_size, 13230 soc->ctrl_psoc, 13231 WLAN_MD_DP_SRNG_TCL_STATUS, 13232 "wbm_desc_rel_ring"); 13233 13234 /* REO reinjection ring */ 13235 if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) { 13236 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13237 FL("dp_srng_init failed for reo_reinject_ring")); 13238 goto fail1; 13239 } 13240 13241 wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned, 13242 soc->reo_reinject_ring.alloc_size, 13243 soc->ctrl_psoc, 13244 WLAN_MD_DP_SRNG_REO_REINJECT, 13245 "reo_reinject_ring"); 13246 13247 /* Rx release ring */ 13248 if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) { 13249 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13250 FL("dp_srng_init failed for rx_rel_ring")); 13251 goto fail1; 13252 } 13253 13254 wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned, 13255 soc->rx_rel_ring.alloc_size, 13256 soc->ctrl_psoc, 13257 WLAN_MD_DP_SRNG_RX_REL, 13258 "reo_release_ring"); 13259 13260 /* Rx exception ring */ 13261 if (dp_srng_init(soc, &soc->reo_exception_ring, 13262 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) { 13263 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13264 FL("dp_srng_init failed - reo_exception")); 13265 goto fail1; 13266 } 13267 13268 wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned, 13269 soc->reo_exception_ring.alloc_size, 13270 soc->ctrl_psoc, 13271 WLAN_MD_DP_SRNG_REO_EXCEPTION, 13272 "reo_exception_ring"); 13273 13274 /* REO command and status rings */ 13275 if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) { 13276 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13277 FL("dp_srng_init failed for reo_cmd_ring")); 13278 goto fail1; 13279 } 13280 13281 wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned, 13282 soc->reo_cmd_ring.alloc_size, 13283 soc->ctrl_psoc, 13284 WLAN_MD_DP_SRNG_REO_CMD, 13285 "reo_cmd_ring"); 13286 13287 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng); 13288 TAILQ_INIT(&soc->rx.reo_cmd_list); 13289 qdf_spinlock_create(&soc->rx.reo_cmd_lock); 13290 13291 if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) { 13292 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13293 FL("dp_srng_init failed for reo_status_ring")); 13294 goto fail1; 13295 } 13296 13297 wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned, 13298 soc->reo_status_ring.alloc_size, 13299 soc->ctrl_psoc, 13300 WLAN_MD_DP_SRNG_REO_STATUS, 13301 "reo_status_ring"); 13302 13303 for (i = 0; i < soc->num_tcl_data_rings; i++) { 13304 if (dp_init_tx_ring_pair_by_index(soc, i)) 13305 goto fail1; 13306 } 13307 13308 dp_create_ext_stats_event(soc); 13309 13310 for (i = 0; i < soc->num_reo_dest_rings; i++) { 13311 /* Initialize REO destination ring */ 13312 if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) { 13313 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13314 FL("dp_srng_init failed for reo_dest_ringn")); 13315 goto fail1; 13316 } 13317 13318 wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned, 13319 soc->reo_dest_ring[i].alloc_size, 13320 soc->ctrl_psoc, 13321 WLAN_MD_DP_SRNG_REO_DEST, 13322 "reo_dest_ring"); 13323 } 13324 13325 return QDF_STATUS_SUCCESS; 13326 fail1: 13327 /* 13328 * Cleanup will be done as part of soc_detach, which will 13329 * be called on pdev attach failure 13330 */ 13331 dp_soc_srng_deinit(soc); 13332 return QDF_STATUS_E_FAILURE; 13333 } 13334 13335 /** 13336 * dp_soc_srng_free() - free soc level srng rings 13337 * @soc: Datapath soc handle 13338 * 13339 */ 13340 static void dp_soc_srng_free(struct dp_soc *soc) 13341 { 13342 uint32_t i; 13343 13344 dp_srng_free(soc, &soc->wbm_desc_rel_ring); 13345 13346 for (i = 0; i < soc->num_tcl_data_rings; i++) 13347 dp_free_tx_ring_pair_by_index(soc, i); 13348 13349 if (soc->init_tcl_cmd_cred_ring) 13350 dp_srng_free(soc, &soc->tcl_cmd_credit_ring); 13351 13352 dp_srng_free(soc, &soc->tcl_status_ring); 13353 13354 for (i = 0; i < soc->num_reo_dest_rings; i++) 13355 dp_srng_free(soc, &soc->reo_dest_ring[i]); 13356 13357 dp_srng_free(soc, &soc->reo_reinject_ring); 13358 dp_srng_free(soc, &soc->rx_rel_ring); 13359 13360 dp_srng_free(soc, &soc->reo_exception_ring); 13361 13362 dp_srng_free(soc, &soc->reo_cmd_ring); 13363 dp_srng_free(soc, &soc->reo_status_ring); 13364 } 13365 13366 /** 13367 * dp_soc_srng_alloc() - Allocate memory for soc level srng rings 13368 * @soc: Datapath soc handle 13369 * 13370 * return: QDF_STATUS_SUCCESS on success 13371 * QDF_STATUS_E_NOMEM on failure 13372 */ 13373 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc) 13374 { 13375 uint32_t entries; 13376 uint32_t i; 13377 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 13378 uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC; 13379 uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size; 13380 13381 soc_cfg_ctx = soc->wlan_cfg_ctx; 13382 13383 /* sw2wbm link descriptor release ring */ 13384 entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx); 13385 if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 13386 entries, 0)) { 13387 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13388 FL("dp_srng_alloc failed for wbm_desc_rel_ring")); 13389 goto fail1; 13390 } 13391 13392 entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx); 13393 /* TCL command and status rings */ 13394 if (soc->init_tcl_cmd_cred_ring) { 13395 if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring, 13396 TCL_CMD_CREDIT, entries, 0)) { 13397 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13398 FL("dp_srng_alloc failed for tcl_cmd_ring")); 13399 goto fail1; 13400 } 13401 } 13402 13403 entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx); 13404 if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries, 13405 0)) { 13406 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13407 FL("dp_srng_alloc failed for tcl_status_ring")); 13408 goto fail1; 13409 } 13410 13411 /* REO reinjection ring */ 13412 entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx); 13413 if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT, 13414 entries, 0)) { 13415 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13416 FL("dp_srng_alloc failed for reo_reinject_ring")); 13417 goto fail1; 13418 } 13419 13420 /* Rx release ring */ 13421 entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx); 13422 if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 13423 entries, 0)) { 13424 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13425 FL("dp_srng_alloc failed for rx_rel_ring")); 13426 goto fail1; 13427 } 13428 13429 /* Rx exception ring */ 13430 entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx); 13431 if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION, 13432 entries, 0)) { 13433 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13434 FL("dp_srng_alloc failed - reo_exception")); 13435 goto fail1; 13436 } 13437 13438 /* REO command and status rings */ 13439 entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx); 13440 if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) { 13441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13442 FL("dp_srng_alloc failed for reo_cmd_ring")); 13443 goto fail1; 13444 } 13445 13446 entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx); 13447 if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS, 13448 entries, 0)) { 13449 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13450 FL("dp_srng_alloc failed for reo_status_ring")); 13451 goto fail1; 13452 } 13453 13454 tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); 13455 tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx); 13456 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx); 13457 13458 /* Disable cached desc if NSS offload is enabled */ 13459 if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) 13460 cached = 0; 13461 13462 for (i = 0; i < soc->num_tcl_data_rings; i++) { 13463 if (dp_alloc_tx_ring_pair_by_index(soc, i)) 13464 goto fail1; 13465 } 13466 13467 for (i = 0; i < soc->num_reo_dest_rings; i++) { 13468 /* Setup REO destination ring */ 13469 if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST, 13470 reo_dst_ring_size, cached)) { 13471 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13472 FL("dp_srng_alloc failed for reo_dest_ring")); 13473 goto fail1; 13474 } 13475 } 13476 13477 return QDF_STATUS_SUCCESS; 13478 13479 fail1: 13480 dp_soc_srng_free(soc); 13481 return QDF_STATUS_E_NOMEM; 13482 } 13483 13484 /** 13485 * dp_soc_cfg_init() - initialize target specific configuration 13486 * during dp_soc_init 13487 * @soc: dp soc handle 13488 */ 13489 static void dp_soc_cfg_init(struct dp_soc *soc) 13490 { 13491 int target_type; 13492 13493 target_type = hal_get_target_type(soc->hal_soc); 13494 switch (target_type) { 13495 case TARGET_TYPE_QCA6290: 13496 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13497 REO_DST_RING_SIZE_QCA6290); 13498 soc->ast_override_support = 1; 13499 soc->da_war_enabled = false; 13500 break; 13501 case TARGET_TYPE_QCA6390: 13502 case TARGET_TYPE_QCA6490: 13503 case TARGET_TYPE_QCA6750: 13504 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13505 REO_DST_RING_SIZE_QCA6290); 13506 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); 13507 soc->ast_override_support = 1; 13508 if (soc->cdp_soc.ol_ops->get_con_mode && 13509 soc->cdp_soc.ol_ops->get_con_mode() == 13510 QDF_GLOBAL_MONITOR_MODE) { 13511 int int_ctx; 13512 13513 for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) { 13514 soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0; 13515 soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0; 13516 } 13517 } 13518 soc->wlan_cfg_ctx->rxdma1_enable = 0; 13519 break; 13520 case TARGET_TYPE_QCA8074: 13521 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 13522 MON_BUF_MIN_ENTRIES); 13523 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13524 REO_DST_RING_SIZE_QCA8074); 13525 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); 13526 soc->da_war_enabled = true; 13527 soc->is_rx_fse_full_cache_invalidate_war_enabled = true; 13528 break; 13529 case TARGET_TYPE_QCA8074V2: 13530 case TARGET_TYPE_QCA6018: 13531 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 13532 MON_BUF_MIN_ENTRIES); 13533 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13534 REO_DST_RING_SIZE_QCA8074); 13535 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); 13536 soc->hw_nac_monitor_support = 1; 13537 soc->ast_override_support = 1; 13538 soc->per_tid_basize_max_tid = 8; 13539 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; 13540 soc->da_war_enabled = false; 13541 soc->is_rx_fse_full_cache_invalidate_war_enabled = true; 13542 break; 13543 case TARGET_TYPE_QCN9000: 13544 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 13545 MON_BUF_MIN_ENTRIES); 13546 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13547 REO_DST_RING_SIZE_QCN9000); 13548 soc->ast_override_support = 1; 13549 soc->da_war_enabled = false; 13550 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); 13551 soc->hw_nac_monitor_support = 1; 13552 soc->per_tid_basize_max_tid = 8; 13553 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; 13554 soc->lmac_polled_mode = 0; 13555 soc->wbm_release_desc_rx_sg_support = 1; 13556 if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) 13557 dp_config_full_mon_mode((struct cdp_soc_t *)soc, 1); 13558 break; 13559 case TARGET_TYPE_QCA5018: 13560 case TARGET_TYPE_QCN9100: 13561 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13562 REO_DST_RING_SIZE_QCA8074); 13563 soc->ast_override_support = 1; 13564 soc->da_war_enabled = false; 13565 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); 13566 soc->hw_nac_monitor_support = 1; 13567 soc->per_tid_basize_max_tid = 8; 13568 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX; 13569 soc->disable_mac1_intr = 1; 13570 soc->disable_mac2_intr = 1; 13571 soc->wbm_release_desc_rx_sg_support = 1; 13572 break; 13573 default: 13574 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type); 13575 qdf_assert_always(0); 13576 break; 13577 } 13578 } 13579 13580 /** 13581 * dp_soc_cfg_attach() - set target specific configuration in 13582 * dp soc cfg. 13583 * @soc: dp soc handle 13584 */ 13585 static void dp_soc_cfg_attach(struct dp_soc *soc) 13586 { 13587 int target_type; 13588 int nss_cfg = 0; 13589 13590 target_type = hal_get_target_type(soc->hal_soc); 13591 switch (target_type) { 13592 case TARGET_TYPE_QCA6290: 13593 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13594 REO_DST_RING_SIZE_QCA6290); 13595 break; 13596 case TARGET_TYPE_QCA6390: 13597 case TARGET_TYPE_QCA6490: 13598 case TARGET_TYPE_QCA6750: 13599 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13600 REO_DST_RING_SIZE_QCA6290); 13601 soc->wlan_cfg_ctx->rxdma1_enable = 0; 13602 break; 13603 case TARGET_TYPE_QCA8074: 13604 wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1); 13605 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13606 REO_DST_RING_SIZE_QCA8074); 13607 break; 13608 case TARGET_TYPE_QCA8074V2: 13609 case TARGET_TYPE_QCA6018: 13610 case TARGET_TYPE_QCN9100: 13611 case TARGET_TYPE_QCA5018: 13612 wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1); 13613 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13614 REO_DST_RING_SIZE_QCA8074); 13615 wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx); 13616 break; 13617 case TARGET_TYPE_QCN9000: 13618 wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1); 13619 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, 13620 REO_DST_RING_SIZE_QCN9000); 13621 wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx); 13622 break; 13623 default: 13624 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type); 13625 qdf_assert_always(0); 13626 break; 13627 } 13628 13629 if (soc->cdp_soc.ol_ops->get_soc_nss_cfg) 13630 nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc); 13631 13632 wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg); 13633 13634 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 13635 wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0); 13636 wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0); 13637 wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0); 13638 wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0); 13639 soc->init_tcl_cmd_cred_ring = false; 13640 soc->num_tcl_data_rings = 13641 wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx); 13642 soc->num_reo_dest_rings = 13643 wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx); 13644 13645 } else { 13646 soc->init_tcl_cmd_cred_ring = true; 13647 soc->num_tcl_data_rings = 13648 wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx); 13649 soc->num_reo_dest_rings = 13650 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); 13651 } 13652 } 13653 13654 static inline void dp_pdev_set_default_reo(struct dp_pdev *pdev) 13655 { 13656 struct dp_soc *soc = pdev->soc; 13657 13658 switch (pdev->pdev_id) { 13659 case 0: 13660 pdev->reo_dest = 13661 wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx); 13662 break; 13663 13664 case 1: 13665 pdev->reo_dest = 13666 wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx); 13667 break; 13668 13669 case 2: 13670 pdev->reo_dest = 13671 wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx); 13672 break; 13673 13674 default: 13675 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13676 "Invalid pdev_id %d for reo selection", pdev->pdev_id); 13677 break; 13678 } 13679 } 13680 13681 static inline QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc, 13682 HTC_HANDLE htc_handle, 13683 qdf_device_t qdf_osdev, 13684 uint8_t pdev_id) 13685 { 13686 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 13687 int nss_cfg; 13688 void *sojourn_buf; 13689 QDF_STATUS ret; 13690 13691 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 13692 struct dp_pdev *pdev = soc->pdev_list[pdev_id]; 13693 13694 soc_cfg_ctx = soc->wlan_cfg_ctx; 13695 pdev->soc = soc; 13696 pdev->pdev_id = pdev_id; 13697 13698 pdev->filter = dp_mon_filter_alloc(pdev); 13699 if (!pdev->filter) { 13700 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13701 FL("Memory allocation failed for monitor filters")); 13702 ret = QDF_STATUS_E_NOMEM; 13703 goto fail0; 13704 } 13705 13706 /* 13707 * Variable to prevent double pdev deinitialization during 13708 * radio detach execution .i.e. in the absence of any vdev. 13709 */ 13710 pdev->pdev_deinit = 0; 13711 13712 if (dp_wdi_event_attach(pdev)) { 13713 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 13714 "dp_wdi_evet_attach failed"); 13715 goto fail1; 13716 } 13717 13718 if (dp_pdev_srng_init(pdev)) { 13719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13720 FL("Failed to initialize pdev srng rings")); 13721 goto fail2; 13722 } 13723 13724 /* Initialize descriptors in TCL Rings used by IPA */ 13725 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 13726 hal_tx_init_data_ring(soc->hal_soc, 13727 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng); 13728 13729 /* 13730 * Initialize command/credit ring descriptor 13731 * Command/CREDIT ring also used for sending DATA cmds 13732 */ 13733 if (soc->init_tcl_cmd_cred_ring) 13734 hal_tx_init_cmd_credit_ring(soc->hal_soc, 13735 soc->tcl_cmd_credit_ring.hal_srng); 13736 13737 dp_tx_pdev_init(pdev); 13738 /* 13739 * Variable to prevent double pdev deinitialization during 13740 * radio detach execution .i.e. in the absence of any vdev. 13741 */ 13742 pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer)); 13743 13744 if (!pdev->invalid_peer) { 13745 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13746 FL("Invalid peer memory allocation failed")); 13747 goto fail3; 13748 } 13749 13750 /* 13751 * set nss pdev config based on soc config 13752 */ 13753 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); 13754 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, 13755 (nss_cfg & (1 << pdev_id))); 13756 pdev->target_pdev_id = 13757 dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); 13758 13759 if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB && 13760 pdev->lmac_id == PHYB_2G_LMAC_ID) { 13761 pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID; 13762 } 13763 13764 /* Reset the cpu ring map if radio is NSS offloaded */ 13765 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 13766 dp_soc_reset_cpu_ring_map(soc); 13767 dp_soc_reset_intr_mask(soc); 13768 } 13769 13770 TAILQ_INIT(&pdev->vdev_list); 13771 qdf_spinlock_create(&pdev->vdev_list_lock); 13772 pdev->vdev_count = 0; 13773 13774 qdf_spinlock_create(&pdev->tx_mutex); 13775 qdf_spinlock_create(&pdev->neighbour_peer_mutex); 13776 TAILQ_INIT(&pdev->neighbour_peers_list); 13777 pdev->neighbour_peers_added = false; 13778 pdev->monitor_configured = false; 13779 pdev->mon_chan_band = REG_BAND_UNKNOWN; 13780 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID; 13781 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID; 13782 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID; 13783 13784 DP_STATS_INIT(pdev); 13785 13786 /* Monitor filter init */ 13787 pdev->mon_filter_mode = MON_FILTER_ALL; 13788 pdev->fp_mgmt_filter = FILTER_MGMT_ALL; 13789 pdev->fp_ctrl_filter = FILTER_CTRL_ALL; 13790 pdev->fp_data_filter = FILTER_DATA_ALL; 13791 pdev->mo_mgmt_filter = FILTER_MGMT_ALL; 13792 pdev->mo_ctrl_filter = FILTER_CTRL_ALL; 13793 pdev->mo_data_filter = FILTER_DATA_ALL; 13794 13795 dp_local_peer_id_pool_init(pdev); 13796 13797 dp_dscp_tid_map_setup(pdev); 13798 dp_pcp_tid_map_setup(pdev); 13799 13800 /* set the reo destination during initialization */ 13801 dp_pdev_set_default_reo(pdev); 13802 13803 /* 13804 * initialize ppdu tlv list 13805 */ 13806 TAILQ_INIT(&pdev->ppdu_info_list); 13807 TAILQ_INIT(&pdev->sched_comp_ppdu_list); 13808 pdev->tlv_count = 0; 13809 pdev->list_depth = 0; 13810 13811 qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats)); 13812 13813 pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev, 13814 sizeof(struct cdp_tx_sojourn_stats), 0, 4, 13815 TRUE); 13816 13817 if (!pdev->sojourn_buf) { 13818 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13819 FL("Failed to allocate sojourn buf")); 13820 goto fail4; 13821 } 13822 sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf); 13823 qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats)); 13824 13825 /* initlialize cal client timer */ 13826 dp_cal_client_attach(&pdev->cal_client_ctx, 13827 dp_pdev_to_cdp_pdev(pdev), 13828 pdev->soc->osdev, 13829 &dp_iterate_update_peer_list); 13830 qdf_event_create(&pdev->fw_peer_stats_event); 13831 13832 pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 13833 if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) 13834 goto fail5; 13835 13836 if (dp_rxdma_ring_setup(soc, pdev)) { 13837 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13838 FL("RXDMA ring config failed")); 13839 goto fail6; 13840 } 13841 13842 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) 13843 goto fail7; 13844 13845 if (dp_ipa_ring_resource_setup(soc, pdev)) 13846 goto fail8; 13847 13848 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { 13849 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 13850 FL("dp_ipa_uc_attach failed")); 13851 goto fail8; 13852 } 13853 13854 ret = dp_rx_fst_attach(soc, pdev); 13855 if ((ret != QDF_STATUS_SUCCESS) && 13856 (ret != QDF_STATUS_E_NOSUPPORT)) { 13857 QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR, 13858 "RX Flow Search Table attach failed: pdev %d err %d", 13859 pdev_id, ret); 13860 goto fail9; 13861 } 13862 13863 /* initialize sw rx descriptors */ 13864 dp_rx_pdev_desc_pool_init(pdev); 13865 /* initialize sw monitor rx descriptors */ 13866 dp_rx_pdev_mon_desc_pool_init(pdev); 13867 /* allocate buffers and replenish the RxDMA ring */ 13868 dp_rx_pdev_buffers_alloc(pdev); 13869 /* allocate buffers and replenish the monitor RxDMA ring */ 13870 dp_rx_pdev_mon_buffers_alloc(pdev); 13871 13872 dp_init_tso_stats(pdev); 13873 dp_tx_ppdu_stats_attach(pdev); 13874 13875 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 13876 qdf_dma_mem_stats_read(), 13877 qdf_heap_mem_stats_read(), 13878 qdf_skb_total_mem_stats_read()); 13879 13880 return QDF_STATUS_SUCCESS; 13881 fail9: 13882 dp_ipa_uc_detach(soc, pdev); 13883 fail8: 13884 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); 13885 fail7: 13886 dp_rxdma_ring_cleanup(soc, pdev); 13887 fail6: 13888 dp_htt_ppdu_stats_detach(pdev); 13889 fail5: 13890 qdf_nbuf_free(pdev->sojourn_buf); 13891 fail4: 13892 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); 13893 qdf_spinlock_destroy(&pdev->tx_mutex); 13894 qdf_spinlock_destroy(&pdev->vdev_list_lock); 13895 qdf_mem_free(pdev->invalid_peer); 13896 fail3: 13897 dp_pdev_srng_deinit(pdev); 13898 fail2: 13899 dp_wdi_event_detach(pdev); 13900 fail1: 13901 dp_mon_filter_dealloc(pdev); 13902 fail0: 13903 return QDF_STATUS_E_FAILURE; 13904 } 13905 13906 /* 13907 * dp_pdev_init_wifi3() - Init txrx pdev 13908 * @htc_handle: HTC handle for host-target interface 13909 * @qdf_osdev: QDF OS device 13910 * @force: Force deinit 13911 * 13912 * Return: QDF_STATUS 13913 */ 13914 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, 13915 HTC_HANDLE htc_handle, 13916 qdf_device_t qdf_osdev, 13917 uint8_t pdev_id) 13918 { 13919 return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id); 13920 } 13921 13922