1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 #ifndef __DP_TX_H 20 #define __DP_TX_H 21 22 #include <qdf_types.h> 23 #include <qdf_nbuf.h> 24 #include "dp_types.h" 25 #ifdef FEATURE_PERPKT_INFO 26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \ 27 defined(QCA_TX_CAPTURE_SUPPORT) || \ 28 defined(QCA_MCOPY_SUPPORT) 29 #include "if_meta_hdr.h" 30 #endif 31 #endif 32 #include "dp_internal.h" 33 #include "hal_tx.h" 34 #include <qdf_tracepoint.h> 35 #ifdef CONFIG_SAWF 36 #include "dp_sawf.h" 37 #endif 38 #include <qdf_pkt_add_timestamp.h> 39 40 #define DP_INVALID_VDEV_ID 0xFF 41 42 #define DP_TX_MAX_NUM_FRAGS 6 43 44 /* 45 * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1 46 * please do not change this flag's definition 47 */ 48 #define DP_TX_DESC_FLAG_FRAG 0x1 49 #define DP_TX_DESC_FLAG_TO_FW 0x2 50 #define DP_TX_DESC_FLAG_SIMPLE 0x4 51 #define DP_TX_DESC_FLAG_RAW 0x8 52 #define DP_TX_DESC_FLAG_MESH 0x10 53 #define DP_TX_DESC_FLAG_QUEUED_TX 0x20 54 #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40 55 #define DP_TX_DESC_FLAG_ME 0x80 56 #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100 57 #define DP_TX_DESC_FLAG_ALLOCATED 0x200 58 #define DP_TX_DESC_FLAG_MESH_MODE 0x400 59 #define DP_TX_DESC_FLAG_UNMAP_DONE 0x800 60 #define DP_TX_DESC_FLAG_TX_COMP_ERR 0x1000 61 #define DP_TX_DESC_FLAG_FLUSH 0x2000 62 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND 0x4000 63 64 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1 65 66 #define DP_TX_FREE_SINGLE_BUF(soc, buf) \ 67 do { \ 68 qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \ 69 qdf_nbuf_free(buf); \ 70 } while (0) 71 72 #define OCB_HEADER_VERSION 1 73 74 #ifdef TX_PER_PDEV_DESC_POOL 75 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 76 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 77 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 78 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) 79 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 80 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 81 #else 82 #ifdef TX_PER_VDEV_DESC_POOL 83 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 84 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 85 #endif /* TX_PER_VDEV_DESC_POOL */ 86 #endif /* TX_PER_PDEV_DESC_POOL */ 87 #define DP_TX_QUEUE_MASK 0x3 88 89 #define MAX_CDP_SEC_TYPE 12 90 91 /* number of dwords for htt_tx_msdu_desc_ext2_t */ 92 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7 93 94 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params) 95 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params) 96 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params) 97 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params) 98 #define dp_tx_info(params...) \ 99 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params) 100 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params) 101 102 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params) 103 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params) 104 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params) 105 #define dp_tx_comp_info(params...) \ 106 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params) 107 #define dp_tx_comp_info_rl(params...) \ 108 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params) 109 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params) 110 111 #ifndef QCA_HOST_MODE_WIFI_DISABLED 112 113 /** 114 * struct dp_tx_frag_info_s 115 * @vaddr: hlos virtual address for buffer 116 * @paddr_lo: physical address lower 32bits 117 * @paddr_hi: physical address higher bits 118 * @len: length of the buffer 119 */ 120 struct dp_tx_frag_info_s { 121 uint8_t *vaddr; 122 uint32_t paddr_lo; 123 uint16_t paddr_hi; 124 uint16_t len; 125 }; 126 127 /** 128 * struct dp_tx_seg_info_s - Segmentation Descriptor 129 * @nbuf: NBUF pointer if segment corresponds to separate nbuf 130 * @frag_cnt: Fragment count in this segment 131 * @total_len: Total length of segment 132 * @frags: per-Fragment information 133 * @next: pointer to next MSDU segment 134 */ 135 struct dp_tx_seg_info_s { 136 qdf_nbuf_t nbuf; 137 uint16_t frag_cnt; 138 uint16_t total_len; 139 struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS]; 140 struct dp_tx_seg_info_s *next; 141 }; 142 143 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 144 145 /** 146 * struct dp_tx_sg_info_s - Scatter Gather Descriptor 147 * @num_segs: Number of segments (TSO/ME) in the frame 148 * @total_len: Total length of the frame 149 * @curr_seg: Points to current segment descriptor to be processed. Chain of 150 * descriptors for SG frames/multicast-unicast converted packets. 151 * 152 * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to 153 * carry fragmentation information 154 * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries 155 * indicated through flags in SKB CB (first_msdu and last_msdu). This will be 156 * converted into set of skb sg (nr_frags) structures. 157 */ 158 struct dp_tx_sg_info_s { 159 uint32_t num_segs; 160 uint32_t total_len; 161 struct dp_tx_seg_info_s *curr_seg; 162 }; 163 164 /** 165 * struct dp_tx_queue - Tx queue 166 * @desc_pool_id: Descriptor Pool to be used for the tx queue 167 * @ring_id: TCL descriptor ring ID corresponding to the tx queue 168 * 169 * Tx queue contains information of the software (Descriptor pool) 170 * and hardware resources (TCL ring id) to be used for a particular 171 * transmit queue (obtained from skb_queue_mapping in case of linux) 172 */ 173 struct dp_tx_queue { 174 uint8_t desc_pool_id; 175 uint8_t ring_id; 176 }; 177 178 /** 179 * struct dp_tx_msdu_info_s - MSDU Descriptor 180 * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement 181 * @tx_queue: Tx queue on which this MSDU should be transmitted 182 * @num_seg: Number of segments (TSO) 183 * @tid: TID (override) that is sent from HLOS 184 * @u.tso_info: TSO information for TSO frame types 185 * (chain of the TSO segments, number of segments) 186 * @u.sg_info: Scatter Gather information for non-TSO SG frames 187 * @meta_data: Mesh meta header information 188 * @exception_fw: Duplicate frame to be sent to firmware 189 * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions 190 * @ix_tx_sniffer: Indicates if the packet has to be sniffed 191 * @gsn: global sequence for reinjected mcast packets 192 * @vdev_id : vdev_id for reinjected mcast packets 193 * @skip_hp_update : Skip HP update for TSO segments and update in last segment 194 * 195 * This structure holds the complete MSDU information needed to program the 196 * Hardware TCL and MSDU extension descriptors for different frame types 197 * 198 */ 199 struct dp_tx_msdu_info_s { 200 enum dp_tx_frm_type frm_type; 201 struct dp_tx_queue tx_queue; 202 uint32_t num_seg; 203 uint8_t tid; 204 uint8_t exception_fw; 205 uint8_t is_tx_sniffer; 206 union { 207 struct qdf_tso_info_t tso_info; 208 struct dp_tx_sg_info_s sg_info; 209 } u; 210 uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS]; 211 uint16_t ppdu_cookie; 212 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 213 #ifdef WLAN_MCAST_MLO 214 uint16_t gsn; 215 uint8_t vdev_id; 216 #endif 217 #endif 218 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 219 uint8_t skip_hp_update; 220 #endif 221 }; 222 223 #ifndef QCA_HOST_MODE_WIFI_DISABLED 224 /** 225 * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index 226 * @soc: core txrx context 227 * @index: index of ring to deinit 228 * 229 * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using 230 * index of the respective TCL/WBM2SW release in soc structure. 231 * For example, if the index is 2 then &soc->tcl_data_ring[2] 232 * and &soc->tx_comp_ring[2] will be deinitialized. 233 * 234 * Return: none 235 */ 236 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index); 237 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 238 239 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 240 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 241 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 242 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 243 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 244 uint8_t num_pool, 245 uint32_t num_desc); 246 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 247 uint8_t num_pool, 248 uint32_t num_desc); 249 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc, 250 bool delayed_free); 251 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id); 252 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc, 253 uint8_t tid, uint8_t ring_id); 254 void dp_tx_comp_process_tx_status(struct dp_soc *soc, 255 struct dp_tx_desc_s *tx_desc, 256 struct hal_tx_completion_status *ts, 257 struct dp_txrx_peer *txrx_peer, 258 uint8_t ring_id); 259 void dp_tx_comp_process_desc(struct dp_soc *soc, 260 struct dp_tx_desc_s *desc, 261 struct hal_tx_completion_status *ts, 262 struct dp_txrx_peer *txrx_peer); 263 void dp_tx_reinject_handler(struct dp_soc *soc, 264 struct dp_vdev *vdev, 265 struct dp_tx_desc_s *tx_desc, 266 uint8_t *status, 267 uint8_t reinject_reason); 268 void dp_tx_inspect_handler(struct dp_soc *soc, 269 struct dp_vdev *vdev, 270 struct dp_tx_desc_s *tx_desc, 271 uint8_t *status); 272 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 273 uint32_t length, uint8_t tx_status, 274 bool update); 275 276 #ifdef DP_UMAC_HW_RESET_SUPPORT 277 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); 278 279 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 280 qdf_nbuf_t nbuf, 281 struct cdp_tx_exception_metadata *tx_exc_metadata); 282 #endif 283 #ifndef QCA_HOST_MODE_WIFI_DISABLED 284 /** 285 * dp_tso_attach() - TSO Attach handler 286 * @txrx_soc: Opaque Dp handle 287 * 288 * Reserve TSO descriptor buffers 289 * 290 * Return: QDF_STATUS_E_FAILURE on failure or 291 * QDF_STATUS_SUCCESS on success 292 */ 293 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc); 294 295 /** 296 * dp_tso_detach() - TSO Detach handler 297 * @txrx_soc: Opaque Dp handle 298 * 299 * Deallocate TSO descriptor buffers 300 * 301 * Return: QDF_STATUS_E_FAILURE on failure or 302 * QDF_STATUS_SUCCESS on success 303 */ 304 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc); 305 306 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); 307 308 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id, 309 qdf_nbuf_t nbuf); 310 311 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, 312 qdf_nbuf_t nbuf, 313 struct cdp_tx_exception_metadata *tx_exc); 314 315 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc, 316 uint8_t vdev_id, 317 qdf_nbuf_t nbuf, 318 struct cdp_tx_exception_metadata *tx_exc); 319 320 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 321 qdf_nbuf_t nbuf); 322 qdf_nbuf_t 323 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 324 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 325 struct cdp_tx_exception_metadata *tx_exc_metadata); 326 327 #if QDF_LOCK_STATS 328 noinline qdf_nbuf_t 329 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 330 struct dp_tx_msdu_info_s *msdu_info); 331 #else 332 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 333 struct dp_tx_msdu_info_s *msdu_info); 334 #endif 335 #ifdef FEATURE_WLAN_TDLS 336 /** 337 * dp_tx_non_std() - Allow the control-path SW to send data frames 338 * @soc_hdl: Datapath soc handle 339 * @vdev_id: id of vdev 340 * @tx_spec: what non-standard handling to apply to the tx data frames 341 * @msdu_list: NULL-terminated list of tx MSDUs 342 * 343 * Return: NULL on success, 344 * nbuf when it fails to send 345 */ 346 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 347 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 348 #endif 349 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac); 350 351 /** 352 * dp_tx_comp_handler() - Tx completion handler 353 * @int_ctx: pointer to DP interrupt context 354 * @soc: core txrx main context 355 * @hal_srng: Opaque HAL SRNG pointer 356 * @ring_id: completion ring id 357 * @quota: No. of packets/descriptors that can be serviced in one loop 358 * 359 * This function will collect hardware release ring element contents and 360 * handle descriptor contents. Based on contents, free packet or handle error 361 * conditions 362 * 363 * Return: Number of TX completions processed 364 */ 365 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 366 hal_ring_handle_t hal_srng, uint8_t ring_id, 367 uint32_t quota); 368 369 QDF_STATUS 370 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 371 372 QDF_STATUS 373 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 374 375 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 376 377 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE) 378 static inline void dp_tx_me_exit(struct dp_pdev *pdev) 379 { 380 return; 381 } 382 #endif 383 384 /** 385 * dp_tx_pdev_init() - dp tx pdev init 386 * @pdev: physical device instance 387 * 388 * Return: QDF_STATUS_SUCCESS: success 389 * QDF_STATUS_E_RESOURCES: Error return 390 */ 391 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev) 392 { 393 struct dp_soc *soc = pdev->soc; 394 395 /* Initialize Flow control counters */ 396 qdf_atomic_init(&pdev->num_tx_outstanding); 397 pdev->tx_descs_max = 0; 398 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 399 /* Initialize descriptors in TCL Ring */ 400 hal_tx_init_data_ring(soc->hal_soc, 401 soc->tcl_data_ring[pdev->pdev_id].hal_srng); 402 } 403 404 return QDF_STATUS_SUCCESS; 405 } 406 407 /** 408 * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc 409 * @soc: Handle to HAL Soc structure 410 * @hal_soc: HAL SOC handle 411 * @num_avail_for_reap: descriptors available for reap 412 * @hal_ring_hdl: ring pointer 413 * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor 414 * @last_prefetched_sw_desc: pointer to last prefetch SW desc 415 * 416 * Return: None 417 */ 418 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH 419 static inline 420 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc, 421 hal_soc_handle_t hal_soc, 422 uint32_t num_avail_for_reap, 423 hal_ring_handle_t hal_ring_hdl, 424 void **last_prefetched_hw_desc, 425 struct dp_tx_desc_s 426 **last_prefetched_sw_desc) 427 { 428 if (*last_prefetched_sw_desc) { 429 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf); 430 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64); 431 } 432 433 if (num_avail_for_reap && *last_prefetched_hw_desc) { 434 soc->arch_ops.tx_comp_get_params_from_hal_desc(soc, 435 *last_prefetched_hw_desc, 436 last_prefetched_sw_desc); 437 438 if ((uintptr_t)*last_prefetched_hw_desc & 0x3f) 439 *last_prefetched_hw_desc = 440 hal_srng_dst_prefetch_next_cached_desc( 441 hal_soc, 442 hal_ring_hdl, 443 (uint8_t *)*last_prefetched_hw_desc); 444 else 445 *last_prefetched_hw_desc = 446 hal_srng_dst_get_next_32_byte_desc(hal_soc, 447 hal_ring_hdl, 448 (uint8_t *)*last_prefetched_hw_desc); 449 } 450 } 451 #else 452 static inline 453 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc, 454 hal_soc_handle_t hal_soc, 455 uint32_t num_avail_for_reap, 456 hal_ring_handle_t hal_ring_hdl, 457 void **last_prefetched_hw_desc, 458 struct dp_tx_desc_s 459 **last_prefetched_sw_desc) 460 { 461 } 462 #endif 463 464 #ifndef FEATURE_WDS 465 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 466 { 467 return; 468 } 469 #endif 470 471 #ifndef QCA_MULTIPASS_SUPPORT 472 static inline 473 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, 474 qdf_nbuf_t nbuf, 475 struct dp_tx_msdu_info_s *msdu_info) 476 { 477 return true; 478 } 479 480 static inline 481 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) 482 { 483 } 484 485 #else 486 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, 487 qdf_nbuf_t nbuf, 488 struct dp_tx_msdu_info_s *msdu_info); 489 490 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev); 491 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 492 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev, 493 struct dp_tx_msdu_info_s *msdu_info, 494 uint16_t group_key); 495 #endif 496 497 /** 498 * dp_tx_hw_to_qdf()- convert hw status to qdf status 499 * @status: hw status 500 * 501 * Return: qdf tx rx status 502 */ 503 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status) 504 { 505 switch (status) { 506 case HAL_TX_TQM_RR_FRAME_ACKED: 507 return QDF_TX_RX_STATUS_OK; 508 case HAL_TX_TQM_RR_REM_CMD_TX: 509 return QDF_TX_RX_STATUS_NO_ACK; 510 case HAL_TX_TQM_RR_REM_CMD_REM: 511 case HAL_TX_TQM_RR_REM_CMD_NOTX: 512 case HAL_TX_TQM_RR_REM_CMD_AGED: 513 return QDF_TX_RX_STATUS_FW_DISCARD; 514 default: 515 return QDF_TX_RX_STATUS_DEFAULT; 516 } 517 } 518 519 #ifndef QCA_HOST_MODE_WIFI_DISABLED 520 /** 521 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame 522 * @vdev: DP Virtual device handle 523 * @nbuf: Buffer pointer 524 * @queue: queue ids container for nbuf 525 * 526 * TX packet queue has 2 instances, software descriptors id and dma ring id 527 * Based on tx feature and hardware configuration queue id combination could be 528 * different. 529 * For example - 530 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id 531 * With no XPS,lock based resource protection, Descriptor pool ids are different 532 * for each vdev, dma ring id will be same as single pdev id 533 * 534 * Return: None 535 */ 536 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 537 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 538 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 539 { 540 queue->ring_id = qdf_get_cpu(); 541 queue->desc_pool_id = queue->ring_id; 542 } 543 544 /* 545 * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission 546 * @dp_soc - DP soc structure pointer 547 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 548 * 549 * Return - HAL ring handle 550 */ 551 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc, 552 uint8_t ring_id) 553 { 554 if (ring_id == soc->num_tcl_data_rings) 555 return soc->tcl_cmd_credit_ring.hal_srng; 556 557 return soc->tcl_data_ring[ring_id].hal_srng; 558 } 559 560 #else /* QCA_OL_TX_MULTIQ_SUPPORT */ 561 562 #ifdef TX_MULTI_TCL 563 #ifdef IPA_OFFLOAD 564 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 565 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 566 { 567 /* get flow id */ 568 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 569 if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled) 570 queue->ring_id = DP_TX_GET_RING_ID(vdev); 571 else 572 queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) % 573 vdev->pdev->soc->num_tcl_data_rings); 574 } 575 #else 576 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 577 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 578 { 579 /* get flow id */ 580 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 581 queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) % 582 vdev->pdev->soc->num_tcl_data_rings); 583 } 584 #endif 585 #else 586 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 587 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 588 { 589 /* get flow id */ 590 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 591 queue->ring_id = DP_TX_GET_RING_ID(vdev); 592 } 593 #endif 594 595 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc, 596 uint8_t ring_id) 597 { 598 return soc->tcl_data_ring[ring_id].hal_srng; 599 } 600 #endif 601 602 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS 603 /* 604 * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission 605 * @dp_soc - DP soc structure pointer 606 * @hal_ring_hdl - HAL ring handle 607 * 608 * Return - None 609 */ 610 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc, 611 hal_ring_handle_t hal_ring_hdl) 612 { 613 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 614 } 615 616 /* 617 * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission 618 * @dp_soc - DP soc structure pointer 619 * @hal_ring_hdl - HAL ring handle 620 * 621 * Return - None 622 */ 623 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc, 624 hal_ring_handle_t hal_ring_hdl) 625 { 626 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 627 } 628 629 /* 630 * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission 631 * @dp_soc - DP soc structure pointer 632 * @hal_ring_hdl - HAL ring handle 633 * 634 * Return - None 635 */ 636 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc, 637 hal_ring_handle_t 638 hal_ring_hdl) 639 { 640 } 641 642 #else 643 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc, 644 hal_ring_handle_t hal_ring_hdl) 645 { 646 return hal_srng_access_start(soc->hal_soc, hal_ring_hdl); 647 } 648 649 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc, 650 hal_ring_handle_t hal_ring_hdl) 651 { 652 hal_srng_access_end(soc->hal_soc, hal_ring_hdl); 653 } 654 655 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc, 656 hal_ring_handle_t 657 hal_ring_hdl) 658 { 659 hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); 660 } 661 #endif 662 663 #ifdef ATH_TX_PRI_OVERRIDE 664 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \ 665 ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf)) 666 #else 667 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) 668 #endif 669 670 /* TODO TX_FEATURE_NOT_YET */ 671 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) 672 { 673 return; 674 } 675 /* TODO TX_FEATURE_NOT_YET */ 676 677 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 678 bool force_free); 679 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); 680 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); 681 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); 682 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc); 683 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc); 684 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc); 685 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc); 686 void 687 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 688 uint32_t buf_type); 689 #else /* QCA_HOST_MODE_WIFI_DISABLED */ 690 691 static inline 692 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc) 693 { 694 return QDF_STATUS_SUCCESS; 695 } 696 697 static inline 698 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc) 699 { 700 return QDF_STATUS_SUCCESS; 701 } 702 703 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc) 704 { 705 } 706 707 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc) 708 { 709 } 710 711 static inline 712 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 713 bool force_free) 714 { 715 } 716 717 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 718 { 719 return QDF_STATUS_SUCCESS; 720 } 721 722 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 723 { 724 return QDF_STATUS_SUCCESS; 725 } 726 727 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 728 { 729 } 730 731 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 732 733 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \ 734 defined(QCA_TX_CAPTURE_SUPPORT) || \ 735 defined(QCA_MCOPY_SUPPORT) 736 #ifdef FEATURE_PERPKT_INFO 737 QDF_STATUS 738 dp_get_completion_indication_for_stack(struct dp_soc *soc, 739 struct dp_pdev *pdev, 740 struct dp_txrx_peer *peer, 741 struct hal_tx_completion_status *ts, 742 qdf_nbuf_t netbuf, 743 uint64_t time_latency); 744 745 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 746 uint16_t peer_id, uint32_t ppdu_id, 747 qdf_nbuf_t netbuf); 748 #endif 749 #else 750 static inline 751 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc, 752 struct dp_pdev *pdev, 753 struct dp_txrx_peer *peer, 754 struct hal_tx_completion_status *ts, 755 qdf_nbuf_t netbuf, 756 uint64_t time_latency) 757 { 758 return QDF_STATUS_E_NOSUPPORT; 759 } 760 761 static inline 762 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 763 uint16_t peer_id, uint32_t ppdu_id, 764 qdf_nbuf_t netbuf) 765 { 766 } 767 #endif 768 769 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 770 void dp_send_completion_to_pkt_capture(struct dp_soc *soc, 771 struct dp_tx_desc_s *desc, 772 struct hal_tx_completion_status *ts); 773 #else 774 static inline void 775 dp_send_completion_to_pkt_capture(struct dp_soc *soc, 776 struct dp_tx_desc_s *desc, 777 struct hal_tx_completion_status *ts) 778 { 779 } 780 #endif 781 782 #ifndef QCA_HOST_MODE_WIFI_DISABLED 783 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 784 /** 785 * dp_tx_update_stats() - Update soc level tx stats 786 * @soc: DP soc handle 787 * @tx_desc: TX descriptor reference 788 * @ring_id: TCL ring id 789 * 790 * Returns: none 791 */ 792 void dp_tx_update_stats(struct dp_soc *soc, 793 struct dp_tx_desc_s *tx_desc, 794 uint8_t ring_id); 795 796 /** 797 * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing 798 * @soc: Datapath soc handle 799 * @tx_desc: tx packet descriptor 800 * @tid: TID for pkt transmission 801 * @msdu_info: MSDU info of tx packet 802 * @ring_id: TCL ring id 803 * 804 * Returns: 1, if coalescing is to be done 805 * 0, if coalescing is not to be done 806 */ 807 int 808 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev, 809 struct dp_tx_desc_s *tx_desc, 810 uint8_t tid, 811 struct dp_tx_msdu_info_s *msdu_info, 812 uint8_t ring_id); 813 814 /** 815 * dp_tx_ring_access_end() - HAL ring access end for data transmission 816 * @soc: Datapath soc handle 817 * @hal_ring_hdl: HAL ring handle 818 * @coalesce: Coalesce the current write or not 819 * 820 * Returns: none 821 */ 822 void 823 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl, 824 int coalesce); 825 #else 826 /** 827 * dp_tx_update_stats() - Update soc level tx stats 828 * @soc: DP soc handle 829 * @tx_desc: TX descriptor reference 830 * @ring_id: TCL ring id 831 * 832 * Returns: none 833 */ 834 static inline void dp_tx_update_stats(struct dp_soc *soc, 835 struct dp_tx_desc_s *tx_desc, 836 uint8_t ring_id){ } 837 838 static inline void 839 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl, 840 int coalesce) 841 { 842 dp_tx_hal_ring_access_end(soc, hal_ring_hdl); 843 } 844 845 static inline int 846 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev, 847 struct dp_tx_desc_s *tx_desc, 848 uint8_t tid, 849 struct dp_tx_msdu_info_s *msdu_info, 850 uint8_t ring_id) 851 { 852 return 0; 853 } 854 855 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */ 856 857 #ifdef FEATURE_RUNTIME_PM 858 /** 859 * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy 860 * @soc_hdl: DP soc handle 861 * @is_high_tput: flag to indicate whether throughput is high 862 * 863 * Returns: none 864 */ 865 static inline 866 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl, 867 bool is_high_tput) 868 { 869 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 870 871 qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput); 872 } 873 874 void 875 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 876 hal_ring_handle_t hal_ring_hdl, 877 int coalesce); 878 #else 879 #ifdef DP_POWER_SAVE 880 void 881 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 882 hal_ring_handle_t hal_ring_hdl, 883 int coalesce); 884 #else 885 static inline void 886 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 887 hal_ring_handle_t hal_ring_hdl, 888 int coalesce) 889 { 890 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 891 } 892 #endif 893 894 static inline void 895 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl, 896 bool is_high_tput) 897 { } 898 #endif 899 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 900 901 #ifdef DP_TX_HW_DESC_HISTORY 902 static inline void 903 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached, 904 hal_ring_handle_t hal_ring_hdl, 905 struct dp_soc *soc, uint8_t ring_id) 906 { 907 struct dp_tx_hw_desc_history *tx_hw_desc_history = 908 &soc->tx_hw_desc_history; 909 struct dp_tx_hw_desc_evt *evt; 910 uint32_t idx = 0; 911 uint16_t slot = 0; 912 913 if (!tx_hw_desc_history->allocated) 914 return; 915 916 dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx, 917 &slot, 918 DP_TX_HW_DESC_HIST_SLOT_SHIFT, 919 DP_TX_HW_DESC_HIST_PER_SLOT_MAX, 920 DP_TX_HW_DESC_HIST_MAX); 921 922 evt = &tx_hw_desc_history->entry[slot][idx]; 923 qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); 924 evt->posted = qdf_get_log_timestamp(); 925 evt->tcl_ring_id = ring_id; 926 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp); 927 } 928 #else 929 static inline void 930 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached, 931 hal_ring_handle_t hal_ring_hdl, 932 struct dp_soc *soc, uint8_t ring_id) 933 { 934 } 935 #endif 936 937 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY) 938 /** 939 * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay 940 * @ts: Tx completion status 941 * @delta_tsf: Difference between TSF clock and qtimer 942 * @delay_us: Delay in microseconds 943 * 944 * Return: QDF_STATUS_SUCCESS : Success 945 * QDF_STATUS_E_INVAL : Tx completion status is invalid or 946 * delay_us is NULL 947 * QDF_STATUS_E_FAILURE : Error in delay calculation 948 */ 949 QDF_STATUS 950 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts, 951 uint32_t delta_tsf, 952 uint32_t *delay_us); 953 954 /** 955 * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure 956 * @soc_hdl: cdp soc pointer 957 * @vdev_id: vdev id 958 * @delta_tsf: difference between TSF clock and qtimer 959 * 960 * Return: None 961 */ 962 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 963 uint32_t delta_tsf); 964 #endif 965 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY 966 /** 967 * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay 968 * @soc_hdl: cdp soc pointer 969 * @vdev_id: vdev id 970 * @enable: true to enable and false to disable 971 * 972 * Return: QDF_STATUS 973 */ 974 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl, 975 uint8_t vdev_id, bool enable); 976 977 /** 978 * dp_get_uplink_delay() - Get uplink delay value 979 * @soc_hdl: cdp soc pointer 980 * @vdev_id: vdev id 981 * @val: pointer to save uplink delay value 982 * 983 * Return: QDF_STATUS 984 */ 985 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 986 uint32_t *val); 987 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */ 988 989 /** 990 * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint 991 * 992 * Return: True if any tx pkt tracepoint is enabled else false 993 */ 994 static inline 995 bool dp_tx_pkt_tracepoints_enabled(void) 996 { 997 return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() || 998 qdf_trace_dp_tx_comp_udp_pkt_enabled() || 999 qdf_trace_dp_tx_comp_pkt_enabled()); 1000 } 1001 1002 #ifdef DP_TX_TRACKING 1003 /** 1004 * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor 1005 * @tx_desc - tx descriptor 1006 * 1007 * Return: None 1008 */ 1009 static inline 1010 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc) 1011 { 1012 tx_desc->timestamp_tick = qdf_system_ticks(); 1013 } 1014 1015 /** 1016 * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor 1017 * @tx_desc: tx descriptor 1018 * 1019 * Check for corruption in tx descriptor, if magic pattern is not matching 1020 * trigger self recovery 1021 * 1022 * Return: none 1023 */ 1024 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc); 1025 #else 1026 static inline 1027 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc) 1028 { 1029 } 1030 1031 static inline 1032 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc) 1033 { 1034 } 1035 #endif 1036 1037 #ifndef CONFIG_SAWF 1038 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf) 1039 { 1040 return false; 1041 } 1042 #endif 1043 1044 #ifdef HW_TX_DELAY_STATS_ENABLE 1045 /** 1046 * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor 1047 * @vdev: DP vdev handle 1048 * @tx_desc: tx descriptor 1049 * 1050 * Return: true when descriptor is timestamped, false otherwise 1051 */ 1052 static inline 1053 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev, 1054 struct dp_tx_desc_s *tx_desc) 1055 { 1056 if (qdf_unlikely(vdev->pdev->delay_stats_flag) || 1057 qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) || 1058 qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) || 1059 qdf_unlikely(vdev->pdev->soc->peerstats_enabled) || 1060 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) { 1061 tx_desc->timestamp = qdf_ktime_real_get(); 1062 return true; 1063 } 1064 return false; 1065 } 1066 #else 1067 static inline 1068 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev, 1069 struct dp_tx_desc_s *tx_desc) 1070 { 1071 if (qdf_unlikely(vdev->pdev->delay_stats_flag) || 1072 qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) || 1073 qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) || 1074 qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) { 1075 tx_desc->timestamp = qdf_ktime_real_get(); 1076 return true; 1077 } 1078 return false; 1079 } 1080 #endif 1081 1082 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP 1083 /** 1084 * dp_pkt_add_timestamp() - add timestamp in data payload 1085 * 1086 * @vdev: dp vdev 1087 * @index: index to decide offset in payload 1088 * @time: timestamp to add in data payload 1089 * @nbuf: network buffer 1090 * 1091 * Return: none 1092 */ 1093 void dp_pkt_add_timestamp(struct dp_vdev *vdev, 1094 enum qdf_pkt_timestamp_index index, uint64_t time, 1095 qdf_nbuf_t nbuf); 1096 /** 1097 * dp_pkt_get_timestamp() - get current system time 1098 * 1099 * @time: return current system time 1100 * 1101 * Return: none 1102 */ 1103 void dp_pkt_get_timestamp(uint64_t *time); 1104 #else 1105 #define dp_pkt_add_timestamp(vdev, index, time, nbuf) 1106 1107 static inline 1108 void dp_pkt_get_timestamp(uint64_t *time) 1109 { 1110 } 1111 #endif 1112 1113 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS 1114 /** 1115 * dp_update_tx_desc_stats - Update the increase or decrease in 1116 * outstanding tx desc count 1117 * values on pdev and soc 1118 * @vdev: DP pdev handle 1119 * 1120 * Return: void 1121 */ 1122 static inline void 1123 dp_update_tx_desc_stats(struct dp_pdev *pdev) 1124 { 1125 int32_t tx_descs_cnt = 1126 qdf_atomic_read(&pdev->num_tx_outstanding); 1127 if (pdev->tx_descs_max < tx_descs_cnt) 1128 pdev->tx_descs_max = tx_descs_cnt; 1129 qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding, 1130 pdev->tx_descs_max); 1131 } 1132 1133 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */ 1134 1135 static inline void 1136 dp_update_tx_desc_stats(struct dp_pdev *pdev) 1137 { 1138 } 1139 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */ 1140 1141 #ifdef QCA_TX_LIMIT_CHECK 1142 /** 1143 * dp_tx_limit_check - Check if allocated tx descriptors reached 1144 * soc max limit and pdev max limit 1145 * @vdev: DP vdev handle 1146 * 1147 * Return: true if allocated tx descriptors reached max configured value, else 1148 * false 1149 */ 1150 static inline bool 1151 dp_tx_limit_check(struct dp_vdev *vdev) 1152 { 1153 struct dp_pdev *pdev = vdev->pdev; 1154 struct dp_soc *soc = pdev->soc; 1155 1156 if (qdf_atomic_read(&soc->num_tx_outstanding) >= 1157 soc->num_tx_allowed) { 1158 dp_tx_info("queued packets are more than max tx, drop the frame"); 1159 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1160 return true; 1161 } 1162 1163 if (qdf_atomic_read(&pdev->num_tx_outstanding) >= 1164 pdev->num_tx_allowed) { 1165 dp_tx_info("queued packets are more than max tx, drop the frame"); 1166 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1167 DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1); 1168 return true; 1169 } 1170 return false; 1171 } 1172 1173 /** 1174 * dp_tx_exception_limit_check - Check if allocated tx exception descriptors 1175 * reached soc max limit 1176 * @vdev: DP vdev handle 1177 * 1178 * Return: true if allocated tx descriptors reached max configured value, else 1179 * false 1180 */ 1181 static inline bool 1182 dp_tx_exception_limit_check(struct dp_vdev *vdev) 1183 { 1184 struct dp_pdev *pdev = vdev->pdev; 1185 struct dp_soc *soc = pdev->soc; 1186 1187 if (qdf_atomic_read(&soc->num_tx_exception) >= 1188 soc->num_msdu_exception_desc) { 1189 dp_info("exc packets are more than max drop the exc pkt"); 1190 DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1); 1191 return true; 1192 } 1193 1194 return false; 1195 } 1196 1197 /** 1198 * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc 1199 * @vdev: DP pdev handle 1200 * 1201 * Return: void 1202 */ 1203 static inline void 1204 dp_tx_outstanding_inc(struct dp_pdev *pdev) 1205 { 1206 struct dp_soc *soc = pdev->soc; 1207 1208 qdf_atomic_inc(&pdev->num_tx_outstanding); 1209 qdf_atomic_inc(&soc->num_tx_outstanding); 1210 dp_update_tx_desc_stats(pdev); 1211 } 1212 1213 /** 1214 * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc 1215 * @vdev: DP pdev handle 1216 * 1217 * Return: void 1218 */ 1219 static inline void 1220 dp_tx_outstanding_dec(struct dp_pdev *pdev) 1221 { 1222 struct dp_soc *soc = pdev->soc; 1223 1224 qdf_atomic_dec(&pdev->num_tx_outstanding); 1225 qdf_atomic_dec(&soc->num_tx_outstanding); 1226 dp_update_tx_desc_stats(pdev); 1227 } 1228 1229 #else //QCA_TX_LIMIT_CHECK 1230 static inline bool 1231 dp_tx_limit_check(struct dp_vdev *vdev) 1232 { 1233 return false; 1234 } 1235 1236 static inline bool 1237 dp_tx_exception_limit_check(struct dp_vdev *vdev) 1238 { 1239 return false; 1240 } 1241 1242 static inline void 1243 dp_tx_outstanding_inc(struct dp_pdev *pdev) 1244 { 1245 qdf_atomic_inc(&pdev->num_tx_outstanding); 1246 dp_update_tx_desc_stats(pdev); 1247 } 1248 1249 static inline void 1250 dp_tx_outstanding_dec(struct dp_pdev *pdev) 1251 { 1252 qdf_atomic_dec(&pdev->num_tx_outstanding); 1253 dp_update_tx_desc_stats(pdev); 1254 } 1255 #endif //QCA_TX_LIMIT_CHECK 1256 #endif 1257