1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 #ifndef __DP_TX_H 19 #define __DP_TX_H 20 21 #include <qdf_types.h> 22 #include <qdf_nbuf.h> 23 #include "dp_types.h" 24 25 26 #define DP_TX_MAX_NUM_FRAGS 6 27 28 #define DP_TX_DESC_FLAG_SIMPLE 0x1 29 #define DP_TX_DESC_FLAG_TO_FW 0x2 30 #define DP_TX_DESC_FLAG_FRAG 0x4 31 #define DP_TX_DESC_FLAG_RAW 0x8 32 #define DP_TX_DESC_FLAG_MESH 0x10 33 #define DP_TX_DESC_FLAG_QUEUED_TX 0x20 34 #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40 35 #define DP_TX_DESC_FLAG_ME 0x80 36 #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100 37 #define DP_TX_DESC_FLAG_ALLOCATED 0x200 38 39 #define DP_TX_FREE_SINGLE_BUF(soc, buf) \ 40 do { \ 41 qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \ 42 qdf_nbuf_free(buf); \ 43 } while (0) 44 45 #define OCB_HEADER_VERSION 1 46 47 #ifdef TX_PER_PDEV_DESC_POOL 48 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 49 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 50 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 51 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) 52 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 53 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 54 #else 55 #ifdef TX_PER_VDEV_DESC_POOL 56 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 57 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 58 #endif /* TX_PER_VDEV_DESC_POOL */ 59 #endif /* TX_PER_PDEV_DESC_POOL */ 60 #define DP_TX_QUEUE_MASK 0x3 61 /** 62 * struct dp_tx_frag_info_s 63 * @vaddr: hlos vritual address for buffer 64 * @paddr_lo: physical address lower 32bits 65 * @paddr_hi: physical address higher bits 66 * @len: length of the buffer 67 */ 68 struct dp_tx_frag_info_s { 69 uint8_t *vaddr; 70 uint32_t paddr_lo; 71 uint16_t paddr_hi; 72 uint16_t len; 73 }; 74 75 /** 76 * struct dp_tx_seg_info_s - Segmentation Descriptor 77 * @nbuf: NBUF pointer if segment corresponds to separate nbuf 78 * @frag_cnt: Fragment count in this segment 79 * @total_len: Total length of segment 80 * @frags: per-Fragment information 81 * @next: pointer to next MSDU segment 82 */ 83 struct dp_tx_seg_info_s { 84 qdf_nbuf_t nbuf; 85 uint16_t frag_cnt; 86 uint16_t total_len; 87 struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS]; 88 struct dp_tx_seg_info_s *next; 89 }; 90 91 /** 92 * struct dp_tx_sg_info_s - Scatter Gather Descriptor 93 * @num_segs: Number of segments (TSO/ME) in the frame 94 * @total_len: Total length of the frame 95 * @curr_seg: Points to current segment descriptor to be processed. Chain of 96 * descriptors for SG frames/multicast-unicast converted packets. 97 * 98 * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to 99 * carry fragmentation information 100 * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries 101 * indicated through flags in SKB CB (first_msdu and last_msdu). This will be 102 * converted into set of skb sg (nr_frags) structures. 103 */ 104 struct dp_tx_sg_info_s { 105 uint32_t num_segs; 106 uint32_t total_len; 107 struct dp_tx_seg_info_s *curr_seg; 108 }; 109 110 /** 111 * struct dp_tx_queue - Tx queue 112 * @desc_pool_id: Descriptor Pool to be used for the tx queue 113 * @ring_id: TCL descriptor ring ID corresponding to the tx queue 114 * 115 * Tx queue contains information of the software (Descriptor pool) 116 * and hardware resources (TCL ring id) to be used for a particular 117 * transmit queue (obtained from skb_queue_mapping in case of linux) 118 */ 119 struct dp_tx_queue { 120 uint8_t desc_pool_id; 121 uint8_t ring_id; 122 }; 123 124 /** 125 * struct dp_tx_msdu_info_s - MSDU Descriptor 126 * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement 127 * @tx_queue: Tx queue on which this MSDU should be transmitted 128 * @num_seg: Number of segments (TSO) 129 * @tid: TID (override) that is sent from HLOS 130 * @u.tso_info: TSO information for TSO frame types 131 * (chain of the TSO segments, number of segments) 132 * @u.sg_info: Scatter Gather information for non-TSO SG frames 133 * @meta_data: Mesh meta header information 134 * @exception_fw: Duplicate frame to be sent to firmware 135 * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions 136 * @ix_tx_sniffer: Indicates if the packet has to be sniffed 137 * 138 * This structure holds the complete MSDU information needed to program the 139 * Hardware TCL and MSDU extension descriptors for different frame types 140 * 141 */ 142 struct dp_tx_msdu_info_s { 143 enum dp_tx_frm_type frm_type; 144 struct dp_tx_queue tx_queue; 145 uint32_t num_seg; 146 uint8_t tid; 147 union { 148 struct qdf_tso_info_t tso_info; 149 struct dp_tx_sg_info_s sg_info; 150 } u; 151 uint32_t meta_data[7]; 152 uint8_t exception_fw; 153 uint16_t ppdu_cookie; 154 uint8_t is_tx_sniffer; 155 }; 156 157 /** 158 * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index 159 * @soc: core txrx context 160 * @index: index of ring to deinit 161 * 162 * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using 163 * index of the respective TCL/WBM2SW release in soc structure. 164 * For example, if the index is 2 then &soc->tcl_data_ring[2] 165 * and &soc->tx_comp_ring[2] will be deinitialized. 166 * 167 * Return: none 168 */ 169 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index); 170 171 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); 172 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); 173 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); 174 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 175 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 176 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 177 uint8_t num_pool, 178 uint16_t num_desc); 179 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 180 uint8_t num_pool, 181 uint16_t num_desc); 182 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev); 183 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev); 184 185 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 186 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 187 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc); 188 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc); 189 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 190 uint8_t num_pool, 191 uint16_t num_desc); 192 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 193 uint8_t num_pool, 194 uint16_t num_desc); 195 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc); 196 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc); 197 198 /** 199 * dp_tso_attach() - TSO Attach handler 200 * @txrx_soc: Opaque Dp handle 201 * 202 * Reserve TSO descriptor buffers 203 * 204 * Return: QDF_STATUS_E_FAILURE on failure or 205 * QDF_STATUS_SUCCESS on success 206 */ 207 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc); 208 209 /** 210 * dp_tso_detach() - TSO Detach handler 211 * @txrx_soc: Opaque Dp handle 212 * 213 * Deallocate TSO descriptor buffers 214 * 215 * Return: QDF_STATUS_E_FAILURE on failure or 216 * QDF_STATUS_SUCCESS on success 217 */ 218 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc); 219 220 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev); 221 222 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); 223 224 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, 225 qdf_nbuf_t nbuf, 226 struct cdp_tx_exception_metadata *tx_exc); 227 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 228 qdf_nbuf_t nbuf); 229 qdf_nbuf_t 230 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 231 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 232 struct cdp_tx_exception_metadata *tx_exc_metadata); 233 234 #if QDF_LOCK_STATS 235 noinline qdf_nbuf_t 236 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 237 struct dp_tx_msdu_info_s *msdu_info); 238 #else 239 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 240 struct dp_tx_msdu_info_s *msdu_info); 241 #endif 242 #ifdef FEATURE_WLAN_TDLS 243 /** 244 * dp_tx_non_std() - Allow the control-path SW to send data frames 245 * @soc_hdl: Datapath soc handle 246 * @vdev_id: id of vdev 247 * @tx_spec: what non-standard handling to apply to the tx data frames 248 * @msdu_list: NULL-terminated list of tx MSDUs 249 * 250 * Return: NULL on success, 251 * nbuf when it fails to send 252 */ 253 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 254 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); 255 #endif 256 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac); 257 258 /** 259 * dp_tx_comp_handler() - Tx completion handler 260 * @int_ctx: pointer to DP interrupt context 261 * @soc: core txrx main context 262 * @hal_srng: Opaque HAL SRNG pointer 263 * @ring_id: completion ring id 264 * @quota: No. of packets/descriptors that can be serviced in one loop 265 * 266 * This function will collect hardware release ring element contents and 267 * handle descriptor contents. Based on contents, free packet or handle error 268 * conditions 269 * 270 * Return: Number of TX completions processed 271 */ 272 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 273 hal_ring_handle_t hal_srng, uint8_t ring_id, 274 uint32_t quota); 275 276 QDF_STATUS 277 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 278 279 #ifndef FEATURE_WDS 280 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 281 { 282 return; 283 } 284 #endif 285 286 #ifndef ATH_SUPPORT_IQUE 287 static inline void dp_tx_me_exit(struct dp_pdev *pdev) 288 { 289 return; 290 } 291 #endif 292 293 #ifndef QCA_MULTIPASS_SUPPORT 294 static inline 295 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, 296 qdf_nbuf_t nbuf, 297 struct dp_tx_msdu_info_s *msdu_info) 298 { 299 return true; 300 } 301 302 static inline 303 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) 304 { 305 } 306 307 #else 308 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, 309 qdf_nbuf_t nbuf, 310 struct dp_tx_msdu_info_s *msdu_info); 311 312 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev); 313 #endif 314 315 /** 316 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame 317 * @vdev: DP Virtual device handle 318 * @nbuf: Buffer pointer 319 * @queue: queue ids container for nbuf 320 * 321 * TX packet queue has 2 instances, software descriptors id and dma ring id 322 * Based on tx feature and hardware configuration queue id combination could be 323 * different. 324 * For example - 325 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id 326 * With no XPS,lock based resource protection, Descriptor pool ids are different 327 * for each vdev, dma ring id will be same as single pdev id 328 * 329 * Return: None 330 */ 331 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 332 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 333 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 334 { 335 uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & 336 DP_TX_QUEUE_MASK; 337 338 queue->desc_pool_id = queue_offset; 339 queue->ring_id = qdf_get_cpu(); 340 341 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 342 "%s, pool_id:%d ring_id: %d", 343 __func__, queue->desc_pool_id, queue->ring_id); 344 } 345 346 /* 347 * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission 348 * @dp_soc - DP soc structure pointer 349 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 350 * 351 * Return - HAL ring handle 352 */ 353 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc, 354 uint8_t ring_id) 355 { 356 if (ring_id == soc->num_tcl_data_rings) 357 return soc->tcl_cmd_credit_ring.hal_srng; 358 359 return soc->tcl_data_ring[ring_id].hal_srng; 360 } 361 362 /* 363 * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion. 364 * @dp_soc - DP soc structure pointer 365 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 366 * 367 * Return - HAL ring handle 368 */ 369 static inline uint8_t dp_tx_get_rbm_id(struct dp_soc *doc, 370 uint8_t ring_id) 371 { 372 return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) : 373 HAL_WBM_SW2_BM_ID); 374 } 375 376 #else /* QCA_OL_TX_MULTIQ_SUPPORT */ 377 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 378 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 379 { 380 /* get flow id */ 381 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 382 queue->ring_id = DP_TX_GET_RING_ID(vdev); 383 384 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 385 "%s, pool_id:%d ring_id: %d", 386 __func__, queue->desc_pool_id, queue->ring_id); 387 } 388 389 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc, 390 uint8_t ring_id) 391 { 392 return soc->tcl_data_ring[ring_id].hal_srng; 393 } 394 395 static inline uint8_t dp_tx_get_rbm_id(struct dp_soc *soc, 396 uint8_t ring_id) 397 { 398 return (ring_id + HAL_WBM_SW0_BM_ID); 399 } 400 #endif 401 402 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS 403 /* 404 * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission 405 * @dp_soc - DP soc structure pointer 406 * @hal_ring_hdl - HAL ring handle 407 * 408 * Return - None 409 */ 410 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc, 411 hal_ring_handle_t hal_ring_hdl) 412 { 413 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 414 } 415 416 /* 417 * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission 418 * @dp_soc - DP soc structure pointer 419 * @hal_ring_hdl - HAL ring handle 420 * 421 * Return - None 422 */ 423 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc, 424 hal_ring_handle_t hal_ring_hdl) 425 { 426 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 427 } 428 429 /* 430 * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission 431 * @dp_soc - DP soc structure pointer 432 * @hal_ring_hdl - HAL ring handle 433 * 434 * Return - None 435 */ 436 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc, 437 hal_ring_handle_t 438 hal_ring_hdl) 439 { 440 } 441 442 #else 443 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc, 444 hal_ring_handle_t hal_ring_hdl) 445 { 446 return hal_srng_access_start(soc->hal_soc, hal_ring_hdl); 447 } 448 449 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc, 450 hal_ring_handle_t hal_ring_hdl) 451 { 452 hal_srng_access_end(soc->hal_soc, hal_ring_hdl); 453 } 454 455 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc, 456 hal_ring_handle_t 457 hal_ring_hdl) 458 { 459 hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); 460 } 461 #endif 462 463 #ifdef FEATURE_PERPKT_INFO 464 QDF_STATUS 465 dp_get_completion_indication_for_stack(struct dp_soc *soc, 466 struct dp_pdev *pdev, 467 struct dp_peer *peer, 468 struct hal_tx_completion_status *ts, 469 qdf_nbuf_t netbuf, 470 uint64_t time_latency); 471 472 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 473 uint16_t peer_id, uint32_t ppdu_id, 474 qdf_nbuf_t netbuf); 475 #endif 476 477 void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl); 478 479 #ifdef ATH_TX_PRI_OVERRIDE 480 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \ 481 ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf)) 482 #else 483 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) 484 #endif 485 486 void 487 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 488 uint32_t buf_type); 489 490 /* TODO TX_FEATURE_NOT_YET */ 491 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) 492 { 493 return; 494 } 495 /* TODO TX_FEATURE_NOT_YET */ 496 497 #ifndef WLAN_TX_PKT_CAPTURE_ENH 498 static inline 499 QDF_STATUS dp_peer_set_tx_capture_enabled(struct dp_pdev *pdev, 500 struct dp_peer *peer_handle, 501 uint8_t value, uint8_t *peer_mac) 502 { 503 return QDF_STATUS_SUCCESS; 504 } 505 #endif 506 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 507 bool force_free); 508 #endif 509