1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef DP_TX_DESC_H 21 #define DP_TX_DESC_H 22 23 #include "dp_types.h" 24 #include "dp_tx.h" 25 #include "dp_internal.h" 26 27 /* 28 * 21 bits cookie 29 * 2 bits pool id 0 ~ 3, 30 * 10 bits page id 0 ~ 1023 31 * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32) 32 */ 33 /* ???Ring ID needed??? */ 34 #define DP_TX_DESC_ID_POOL_MASK 0x018000 35 #define DP_TX_DESC_ID_POOL_OS 15 36 #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0 37 #define DP_TX_DESC_ID_PAGE_OS 5 38 #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F 39 #define DP_TX_DESC_ID_OFFSET_OS 0 40 41 /* 42 * Compilation assert on tx desc size 43 * 44 * if assert is hit please update POOL_MASK, 45 * PAGE_MASK according to updated size 46 * 47 * for current PAGE mask allowed size range of tx_desc 48 * is between 128 and 256 49 */ 50 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size, 51 ((sizeof(struct dp_tx_desc_s)) <= 52 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) && 53 ((sizeof(struct dp_tx_desc_s)) > 54 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1))) 55 ); 56 57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 58 #define TX_DESC_LOCK_CREATE(lock) 59 #define TX_DESC_LOCK_DESTROY(lock) 60 #define TX_DESC_LOCK_LOCK(lock) 61 #define TX_DESC_LOCK_UNLOCK(lock) 62 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \ 63 ((pool)->status == FLOW_POOL_INACTIVE) 64 #ifdef QCA_AC_BASED_FLOW_CONTROL 65 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ 66 dp_tx_flow_pool_member_clean(_tx_desc_pool) 67 68 #else /* !QCA_AC_BASED_FLOW_CONTROL */ 69 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ 70 do { \ 71 (_tx_desc_pool)->elem_size = 0; \ 72 (_tx_desc_pool)->freelist = NULL; \ 73 (_tx_desc_pool)->pool_size = 0; \ 74 (_tx_desc_pool)->avail_desc = 0; \ 75 (_tx_desc_pool)->start_th = 0; \ 76 (_tx_desc_pool)->stop_th = 0; \ 77 (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \ 78 } while (0) 79 #endif /* QCA_AC_BASED_FLOW_CONTROL */ 80 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 81 #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock) 82 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock) 83 #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock) 84 #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock) 85 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false) 86 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ 87 do { \ 88 (_tx_desc_pool)->elem_size = 0; \ 89 (_tx_desc_pool)->num_allocated = 0; \ 90 (_tx_desc_pool)->freelist = NULL; \ 91 (_tx_desc_pool)->elem_count = 0; \ 92 (_tx_desc_pool)->num_free = 0; \ 93 } while (0) 94 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 95 #define MAX_POOL_BUFF_COUNT 10000 96 97 #ifdef DP_TX_TRACKING 98 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc, 99 uint32_t magic_pattern) 100 { 101 tx_desc->magic = magic_pattern; 102 } 103 #else 104 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc, 105 uint32_t magic_pattern) 106 { 107 } 108 #endif 109 110 /** 111 * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s) 112 * @soc: Handle to DP SoC structure 113 * @pool_id: pool to allocate 114 * @num_elem: Number of descriptor elements per pool 115 * 116 * This function allocates memory for SW tx descriptors 117 * (used within host for tx data path). 118 * The number of tx descriptors required will be large 119 * since based on number of clients (1024 clients x 3 radios), 120 * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly 121 * large. 122 * 123 * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf 124 * function to allocate memory 125 * in multiple pages. It then iterates through the memory allocated across pages 126 * and links each descriptor 127 * to next descriptor, taking care of page boundaries. 128 * 129 * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated, 130 * one for each ring; 131 * This minimizes lock contention when hard_start_xmit is called 132 * from multiple CPUs. 133 * Alternately, multiple pools can be used for multiple VDEVs for VDEV level 134 * flow control. 135 * 136 * Return: Status code. 0 for success. 137 */ 138 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, 139 uint32_t num_elem); 140 141 /** 142 * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s) 143 * @soc: Handle to DP SoC structure 144 * @pool_id: pool to allocate 145 * @num_elem: Number of descriptor elements per pool 146 * 147 * Return: QDF_STATUS_SUCCESS 148 * QDF_STATUS_E_FAULT 149 */ 150 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id, 151 uint32_t num_elem); 152 153 /** 154 * dp_tx_desc_pool_free() - Free the tx dexcriptor pools 155 * @soc: Handle to DP SoC structure 156 * @pool_id: pool to free 157 * 158 */ 159 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); 160 161 /** 162 * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s) 163 * @soc: Handle to DP SoC structure 164 * @pool_id: pool to de-initialize 165 * 166 */ 167 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id); 168 169 /** 170 * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s) 171 * @soc: Handle to DP SoC structure 172 * @num_pool: Number of pools to allocate 173 * @num_elem: Number of descriptor elements per pool 174 * 175 * Return: QDF_STATUS_SUCCESS 176 * QDF_STATUS_E_NOMEM 177 */ 178 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool, 179 uint32_t num_elem); 180 181 /** 182 * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s) 183 * @soc: Handle to DP SoC structure 184 * @num_pool: Number of pools to initialize 185 * @num_elem: Number of descriptor elements per pool 186 * 187 * Return: QDF_STATUS_SUCCESS 188 * QDF_STATUS_E_NOMEM 189 */ 190 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool, 191 uint32_t num_elem); 192 193 /** 194 * dp_tx_ext_desc_pool_free() - free Tx extension Descriptor pool(s) 195 * @soc: Handle to DP SoC structure 196 * @num_pool: Number of pools to free 197 * 198 */ 199 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 200 201 /** 202 * dp_tx_ext_desc_pool_deinit() - deinit Tx extension Descriptor pool(s) 203 * @soc: Handle to DP SoC structure 204 * @num_pool: Number of pools to de-initialize 205 * 206 */ 207 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 208 209 /** 210 * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s) 211 * @soc: Handle to DP SoC structure 212 * @num_pool: Number of pools to allocate 213 * @num_elem: Number of descriptor elements per pool 214 * 215 * Return: QDF_STATUS_SUCCESS 216 * QDF_STATUS_E_NOMEM 217 */ 218 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool, 219 uint32_t num_elem); 220 221 /** 222 * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s) 223 * @soc: Handle to DP SoC structure 224 * @num_pool: Number of pools to initialize 225 * @num_elem: Number of descriptor elements per pool 226 * 227 * Return: QDF_STATUS_SUCCESS 228 * QDF_STATUS_E_NOMEM 229 */ 230 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool, 231 uint32_t num_elem); 232 233 /** 234 * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s) 235 * @soc: Handle to DP SoC structure 236 * @num_pool: Number of pools to free 237 * 238 */ 239 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); 240 241 /** 242 * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s) 243 * @soc: Handle to DP SoC structure 244 * @num_pool: Number of pools to free 245 * 246 */ 247 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 248 249 /** 250 * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the 251 * fragments in each tso segment 252 * 253 * @soc: handle to dp soc structure 254 * @num_pool: number of pools to allocate 255 * @num_elem: total number of descriptors to be allocated 256 * 257 * Return: QDF_STATUS_SUCCESS 258 * QDF_STATUS_E_NOMEM 259 */ 260 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool, 261 uint32_t num_elem); 262 263 /** 264 * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the 265 * fragments in each tso segment 266 * 267 * @soc: handle to dp soc structure 268 * @num_pool: number of pools to initialize 269 * @num_elem: total number of descriptors to be initialized 270 * 271 * Return: QDF_STATUS_SUCCESS 272 * QDF_STATUS_E_FAULT 273 */ 274 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool, 275 uint32_t num_elem); 276 277 /** 278 * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the 279 * fragments in each tso segment 280 * 281 * @soc: handle to dp soc structure 282 * @num_pool: number of pools to free 283 */ 284 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool); 285 286 /** 287 * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the 288 * fragments in each tso segment 289 * 290 * @soc: handle to dp soc structure 291 * @num_pool: number of pools to de-initialize 292 * 293 * Return: QDF_STATUS_SUCCESS 294 * QDF_STATUS_E_FAULT 295 */ 296 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool); 297 298 #ifdef DP_UMAC_HW_RESET_SUPPORT 299 /** 300 * dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools 301 * @soc: Handle to DP SoC structure 302 * @nbuf_list: nbuf list for delayed free 303 * 304 */ 305 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list); 306 #endif 307 308 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 309 void dp_tx_flow_control_init(struct dp_soc *); 310 void dp_tx_flow_control_deinit(struct dp_soc *); 311 312 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc, 313 tx_pause_callback pause_cb); 314 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id, 315 uint8_t vdev_id); 316 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id, 317 uint8_t vdev_id); 318 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc); 319 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, 320 uint8_t flow_pool_id, uint32_t flow_pool_size); 321 322 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id, 323 uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size); 324 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id, 325 uint8_t flow_type, uint8_t flow_pool_id); 326 327 /** 328 * dp_tx_get_desc_flow_pool() - get descriptor from flow pool 329 * @pool: flow pool 330 * 331 * Caller needs to take lock and do sanity checks. 332 * 333 * Return: tx descriptor 334 */ 335 static inline 336 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool) 337 { 338 struct dp_tx_desc_s *tx_desc = pool->freelist; 339 340 pool->freelist = pool->freelist->next; 341 pool->avail_desc--; 342 return tx_desc; 343 } 344 345 /** 346 * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist 347 * @pool: flow pool 348 * @tx_desc: tx descriptor 349 * 350 * Caller needs to take lock and do sanity checks. 351 * 352 * Return: none 353 */ 354 static inline 355 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool, 356 struct dp_tx_desc_s *tx_desc) 357 { 358 tx_desc->next = pool->freelist; 359 pool->freelist = tx_desc; 360 pool->avail_desc++; 361 } 362 363 #ifdef QCA_AC_BASED_FLOW_CONTROL 364 365 /** 366 * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool 367 * @pool: flow pool 368 * 369 * Return: None 370 */ 371 static inline void 372 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool) 373 { 374 pool->elem_size = 0; 375 pool->freelist = NULL; 376 pool->pool_size = 0; 377 pool->avail_desc = 0; 378 qdf_mem_zero(pool->start_th, FL_TH_MAX); 379 qdf_mem_zero(pool->stop_th, FL_TH_MAX); 380 pool->status = FLOW_POOL_INACTIVE; 381 } 382 383 /** 384 * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold 385 * @pool: flow pool 386 * @avail_desc: available descriptor number 387 * 388 * Return: true if threshold is met, false if not 389 */ 390 static inline bool 391 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc) 392 { 393 if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK])) 394 return true; 395 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI])) 396 return true; 397 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO])) 398 return true; 399 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI])) 400 return true; 401 else 402 return false; 403 } 404 405 /** 406 * dp_tx_adjust_flow_pool_state() - Adjust flow pool state 407 * @soc: dp soc 408 * @pool: flow pool 409 */ 410 static inline void 411 dp_tx_adjust_flow_pool_state(struct dp_soc *soc, 412 struct dp_tx_desc_pool_s *pool) 413 { 414 if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) { 415 pool->status = FLOW_POOL_ACTIVE_UNPAUSED; 416 return; 417 } else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] && 418 pool->avail_desc > pool->stop_th[DP_TH_VI]) { 419 pool->status = FLOW_POOL_BE_BK_PAUSED; 420 } else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] && 421 pool->avail_desc > pool->stop_th[DP_TH_VO]) { 422 pool->status = FLOW_POOL_VI_PAUSED; 423 } else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] && 424 pool->avail_desc > pool->stop_th[DP_TH_HI]) { 425 pool->status = FLOW_POOL_VO_PAUSED; 426 } else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) { 427 pool->status = FLOW_POOL_ACTIVE_PAUSED; 428 } 429 430 switch (pool->status) { 431 case FLOW_POOL_ACTIVE_PAUSED: 432 soc->pause_cb(pool->flow_pool_id, 433 WLAN_NETIF_PRIORITY_QUEUE_OFF, 434 WLAN_DATA_FLOW_CTRL_PRI); 435 fallthrough; 436 437 case FLOW_POOL_VO_PAUSED: 438 soc->pause_cb(pool->flow_pool_id, 439 WLAN_NETIF_VO_QUEUE_OFF, 440 WLAN_DATA_FLOW_CTRL_VO); 441 fallthrough; 442 443 case FLOW_POOL_VI_PAUSED: 444 soc->pause_cb(pool->flow_pool_id, 445 WLAN_NETIF_VI_QUEUE_OFF, 446 WLAN_DATA_FLOW_CTRL_VI); 447 fallthrough; 448 449 case FLOW_POOL_BE_BK_PAUSED: 450 soc->pause_cb(pool->flow_pool_id, 451 WLAN_NETIF_BE_BK_QUEUE_OFF, 452 WLAN_DATA_FLOW_CTRL_BE_BK); 453 break; 454 default: 455 dp_err("Invalid pool status:%u to adjust", pool->status); 456 } 457 } 458 459 /** 460 * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool 461 * @soc: Handle to DP SoC structure 462 * @desc_pool_id: ID of the flow control fool 463 * 464 * Return: TX descriptor allocated or NULL 465 */ 466 static inline struct dp_tx_desc_s * 467 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) 468 { 469 struct dp_tx_desc_s *tx_desc = NULL; 470 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 471 bool is_pause = false; 472 enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE; 473 enum dp_fl_ctrl_threshold level = DP_TH_BE_BK; 474 enum netif_reason_type reason; 475 476 if (qdf_likely(pool)) { 477 qdf_spin_lock_bh(&pool->flow_pool_lock); 478 if (qdf_likely(pool->avail_desc && 479 pool->status != FLOW_POOL_INVALID && 480 pool->status != FLOW_POOL_INACTIVE)) { 481 tx_desc = dp_tx_get_desc_flow_pool(pool); 482 tx_desc->pool_id = desc_pool_id; 483 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 484 dp_tx_desc_set_magic(tx_desc, 485 DP_TX_MAGIC_PATTERN_INUSE); 486 is_pause = dp_tx_is_threshold_reached(pool, 487 pool->avail_desc); 488 489 if (qdf_unlikely(pool->status == 490 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) { 491 dp_tx_adjust_flow_pool_state(soc, pool); 492 is_pause = false; 493 } 494 495 if (qdf_unlikely(is_pause)) { 496 switch (pool->status) { 497 case FLOW_POOL_ACTIVE_UNPAUSED: 498 /* pause network BE\BK queue */ 499 act = WLAN_NETIF_BE_BK_QUEUE_OFF; 500 reason = WLAN_DATA_FLOW_CTRL_BE_BK; 501 level = DP_TH_BE_BK; 502 pool->status = FLOW_POOL_BE_BK_PAUSED; 503 break; 504 case FLOW_POOL_BE_BK_PAUSED: 505 /* pause network VI queue */ 506 act = WLAN_NETIF_VI_QUEUE_OFF; 507 reason = WLAN_DATA_FLOW_CTRL_VI; 508 level = DP_TH_VI; 509 pool->status = FLOW_POOL_VI_PAUSED; 510 break; 511 case FLOW_POOL_VI_PAUSED: 512 /* pause network VO queue */ 513 act = WLAN_NETIF_VO_QUEUE_OFF; 514 reason = WLAN_DATA_FLOW_CTRL_VO; 515 level = DP_TH_VO; 516 pool->status = FLOW_POOL_VO_PAUSED; 517 break; 518 case FLOW_POOL_VO_PAUSED: 519 /* pause network HI PRI queue */ 520 act = WLAN_NETIF_PRIORITY_QUEUE_OFF; 521 reason = WLAN_DATA_FLOW_CTRL_PRI; 522 level = DP_TH_HI; 523 pool->status = FLOW_POOL_ACTIVE_PAUSED; 524 break; 525 case FLOW_POOL_ACTIVE_PAUSED: 526 act = WLAN_NETIF_ACTION_TYPE_NONE; 527 break; 528 default: 529 dp_err_rl("pool status is %d!", 530 pool->status); 531 break; 532 } 533 534 if (act != WLAN_NETIF_ACTION_TYPE_NONE) { 535 pool->latest_pause_time[level] = 536 qdf_get_system_timestamp(); 537 soc->pause_cb(desc_pool_id, 538 act, 539 reason); 540 } 541 } 542 } else { 543 pool->pkt_drop_no_desc++; 544 } 545 qdf_spin_unlock_bh(&pool->flow_pool_lock); 546 } else { 547 dp_err_rl("NULL desc pool pool_id %d", desc_pool_id); 548 soc->pool_stats.pkt_drop_no_pool++; 549 } 550 551 return tx_desc; 552 } 553 554 /** 555 * dp_tx_desc_free() - Free a tx descriptor and attach it to free list 556 * @soc: Handle to DP SoC structure 557 * @tx_desc: the tx descriptor to be freed 558 * @desc_pool_id: ID of the flow control pool 559 * 560 * Return: None 561 */ 562 static inline void 563 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 564 uint8_t desc_pool_id) 565 { 566 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 567 qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur; 568 enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE; 569 enum netif_reason_type reason; 570 571 qdf_spin_lock_bh(&pool->flow_pool_lock); 572 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 573 tx_desc->nbuf = NULL; 574 tx_desc->flags = 0; 575 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE); 576 dp_tx_put_desc_flow_pool(pool, tx_desc); 577 switch (pool->status) { 578 case FLOW_POOL_ACTIVE_PAUSED: 579 if (pool->avail_desc > pool->start_th[DP_TH_HI]) { 580 act = WLAN_NETIF_PRIORITY_QUEUE_ON; 581 reason = WLAN_DATA_FLOW_CTRL_PRI; 582 pool->status = FLOW_POOL_VO_PAUSED; 583 584 /* Update maximum pause duration for HI queue */ 585 pause_dur = unpause_time - 586 pool->latest_pause_time[DP_TH_HI]; 587 if (pool->max_pause_time[DP_TH_HI] < pause_dur) 588 pool->max_pause_time[DP_TH_HI] = pause_dur; 589 } 590 break; 591 case FLOW_POOL_VO_PAUSED: 592 if (pool->avail_desc > pool->start_th[DP_TH_VO]) { 593 act = WLAN_NETIF_VO_QUEUE_ON; 594 reason = WLAN_DATA_FLOW_CTRL_VO; 595 pool->status = FLOW_POOL_VI_PAUSED; 596 597 /* Update maximum pause duration for VO queue */ 598 pause_dur = unpause_time - 599 pool->latest_pause_time[DP_TH_VO]; 600 if (pool->max_pause_time[DP_TH_VO] < pause_dur) 601 pool->max_pause_time[DP_TH_VO] = pause_dur; 602 } 603 break; 604 case FLOW_POOL_VI_PAUSED: 605 if (pool->avail_desc > pool->start_th[DP_TH_VI]) { 606 act = WLAN_NETIF_VI_QUEUE_ON; 607 reason = WLAN_DATA_FLOW_CTRL_VI; 608 pool->status = FLOW_POOL_BE_BK_PAUSED; 609 610 /* Update maximum pause duration for VI queue */ 611 pause_dur = unpause_time - 612 pool->latest_pause_time[DP_TH_VI]; 613 if (pool->max_pause_time[DP_TH_VI] < pause_dur) 614 pool->max_pause_time[DP_TH_VI] = pause_dur; 615 } 616 break; 617 case FLOW_POOL_BE_BK_PAUSED: 618 if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) { 619 act = WLAN_NETIF_BE_BK_QUEUE_ON; 620 reason = WLAN_DATA_FLOW_CTRL_BE_BK; 621 pool->status = FLOW_POOL_ACTIVE_UNPAUSED; 622 623 /* Update maximum pause duration for BE_BK queue */ 624 pause_dur = unpause_time - 625 pool->latest_pause_time[DP_TH_BE_BK]; 626 if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur) 627 pool->max_pause_time[DP_TH_BE_BK] = pause_dur; 628 } 629 break; 630 case FLOW_POOL_INVALID: 631 if (pool->avail_desc == pool->pool_size) { 632 dp_tx_desc_pool_deinit(soc, desc_pool_id); 633 dp_tx_desc_pool_free(soc, desc_pool_id); 634 qdf_spin_unlock_bh(&pool->flow_pool_lock); 635 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 636 "%s %d pool is freed!!", 637 __func__, __LINE__); 638 return; 639 } 640 break; 641 642 case FLOW_POOL_ACTIVE_UNPAUSED: 643 break; 644 default: 645 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 646 "%s %d pool is INACTIVE State!!", 647 __func__, __LINE__); 648 break; 649 }; 650 651 if (act != WLAN_WAKE_ALL_NETIF_QUEUE) 652 soc->pause_cb(pool->flow_pool_id, 653 act, reason); 654 qdf_spin_unlock_bh(&pool->flow_pool_lock); 655 } 656 #else /* QCA_AC_BASED_FLOW_CONTROL */ 657 658 static inline bool 659 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc) 660 { 661 if (qdf_unlikely(avail_desc < pool->stop_th)) 662 return true; 663 else 664 return false; 665 } 666 667 /** 668 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool 669 * @soc: Handle to DP SoC structure 670 * @desc_pool_id: 671 * 672 * Return: Tx descriptor or NULL 673 */ 674 static inline struct dp_tx_desc_s * 675 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) 676 { 677 struct dp_tx_desc_s *tx_desc = NULL; 678 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 679 680 if (pool) { 681 qdf_spin_lock_bh(&pool->flow_pool_lock); 682 if (pool->status <= FLOW_POOL_ACTIVE_PAUSED && 683 pool->avail_desc) { 684 tx_desc = dp_tx_get_desc_flow_pool(pool); 685 tx_desc->pool_id = desc_pool_id; 686 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 687 dp_tx_desc_set_magic(tx_desc, 688 DP_TX_MAGIC_PATTERN_INUSE); 689 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) { 690 pool->status = FLOW_POOL_ACTIVE_PAUSED; 691 qdf_spin_unlock_bh(&pool->flow_pool_lock); 692 /* pause network queues */ 693 soc->pause_cb(desc_pool_id, 694 WLAN_STOP_ALL_NETIF_QUEUE, 695 WLAN_DATA_FLOW_CONTROL); 696 } else { 697 qdf_spin_unlock_bh(&pool->flow_pool_lock); 698 } 699 } else { 700 pool->pkt_drop_no_desc++; 701 qdf_spin_unlock_bh(&pool->flow_pool_lock); 702 } 703 } else { 704 soc->pool_stats.pkt_drop_no_pool++; 705 } 706 707 return tx_desc; 708 } 709 710 /** 711 * dp_tx_desc_free() - Free a tx descriptor and attach it to free list 712 * @soc: Handle to DP SoC structure 713 * @tx_desc: Descriptor to free 714 * @desc_pool_id: Descriptor pool Id 715 * 716 * Return: None 717 */ 718 static inline void 719 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 720 uint8_t desc_pool_id) 721 { 722 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 723 724 qdf_spin_lock_bh(&pool->flow_pool_lock); 725 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 726 tx_desc->nbuf = NULL; 727 tx_desc->flags = 0; 728 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE); 729 dp_tx_put_desc_flow_pool(pool, tx_desc); 730 switch (pool->status) { 731 case FLOW_POOL_ACTIVE_PAUSED: 732 if (pool->avail_desc > pool->start_th) { 733 soc->pause_cb(pool->flow_pool_id, 734 WLAN_WAKE_ALL_NETIF_QUEUE, 735 WLAN_DATA_FLOW_CONTROL); 736 pool->status = FLOW_POOL_ACTIVE_UNPAUSED; 737 } 738 break; 739 case FLOW_POOL_INVALID: 740 if (pool->avail_desc == pool->pool_size) { 741 dp_tx_desc_pool_deinit(soc, desc_pool_id); 742 dp_tx_desc_pool_free(soc, desc_pool_id); 743 qdf_spin_unlock_bh(&pool->flow_pool_lock); 744 qdf_print("%s %d pool is freed!!", 745 __func__, __LINE__); 746 return; 747 } 748 break; 749 750 case FLOW_POOL_ACTIVE_UNPAUSED: 751 break; 752 default: 753 qdf_print("%s %d pool is INACTIVE State!!", 754 __func__, __LINE__); 755 break; 756 }; 757 758 qdf_spin_unlock_bh(&pool->flow_pool_lock); 759 } 760 761 #endif /* QCA_AC_BASED_FLOW_CONTROL */ 762 763 static inline bool 764 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) 765 { 766 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 767 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 768 DP_MOD_ID_CDP); 769 struct dp_tx_desc_pool_s *pool; 770 bool status; 771 772 if (!vdev) 773 return false; 774 775 pool = vdev->pool; 776 status = dp_tx_is_threshold_reached(pool, pool->avail_desc); 777 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 778 779 return status; 780 } 781 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 782 783 static inline void dp_tx_flow_control_init(struct dp_soc *handle) 784 { 785 } 786 787 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle) 788 { 789 } 790 791 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, 792 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id, 793 uint32_t flow_pool_size) 794 { 795 return QDF_STATUS_SUCCESS; 796 } 797 798 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, 799 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id) 800 { 801 } 802 803 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH 804 static inline 805 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc) 806 { 807 if (tx_desc) 808 prefetch(tx_desc); 809 } 810 #else 811 static inline 812 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc) 813 { 814 } 815 #endif 816 817 /** 818 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool 819 * @soc: Handle to DP SoC structure 820 * @desc_pool_id: pool id 821 * 822 * Return: Tx Descriptor or NULL 823 */ 824 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc, 825 uint8_t desc_pool_id) 826 { 827 struct dp_tx_desc_s *tx_desc = NULL; 828 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 829 830 TX_DESC_LOCK_LOCK(&pool->lock); 831 832 tx_desc = pool->freelist; 833 834 /* Pool is exhausted */ 835 if (!tx_desc) { 836 TX_DESC_LOCK_UNLOCK(&pool->lock); 837 return NULL; 838 } 839 840 pool->freelist = pool->freelist->next; 841 pool->num_allocated++; 842 pool->num_free--; 843 dp_tx_prefetch_desc(pool->freelist); 844 845 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 846 847 TX_DESC_LOCK_UNLOCK(&pool->lock); 848 849 return tx_desc; 850 } 851 852 /** 853 * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors 854 * from given pool 855 * @soc: Handle to DP SoC structure 856 * @desc_pool_id: pool id should pick up 857 * @num_requested: number of required descriptor 858 * 859 * allocate multiple tx descriptor and make a link 860 * 861 * Return: first descriptor pointer or NULL 862 */ 863 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple( 864 struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested) 865 { 866 struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL; 867 uint8_t count; 868 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 869 870 TX_DESC_LOCK_LOCK(&pool->lock); 871 872 if ((num_requested == 0) || 873 (pool->num_free < num_requested)) { 874 TX_DESC_LOCK_UNLOCK(&pool->lock); 875 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 876 "%s, No Free Desc: Available(%d) num_requested(%d)", 877 __func__, pool->num_free, 878 num_requested); 879 return NULL; 880 } 881 882 h_desc = pool->freelist; 883 884 /* h_desc should never be NULL since num_free > requested */ 885 qdf_assert_always(h_desc); 886 887 c_desc = h_desc; 888 for (count = 0; count < (num_requested - 1); count++) { 889 c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 890 c_desc = c_desc->next; 891 } 892 pool->num_free -= count; 893 pool->num_allocated += count; 894 pool->freelist = c_desc->next; 895 c_desc->next = NULL; 896 897 TX_DESC_LOCK_UNLOCK(&pool->lock); 898 return h_desc; 899 } 900 901 /** 902 * dp_tx_desc_free() - Free a tx descriptor and attach it to free list 903 * @soc: Handle to DP SoC structure 904 * @tx_desc: descriptor to free 905 * @desc_pool_id: ID of the free pool 906 */ 907 static inline void 908 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 909 uint8_t desc_pool_id) 910 { 911 struct dp_tx_desc_pool_s *pool = NULL; 912 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 913 tx_desc->nbuf = NULL; 914 tx_desc->flags = 0; 915 916 pool = &soc->tx_desc[desc_pool_id]; 917 TX_DESC_LOCK_LOCK(&pool->lock); 918 tx_desc->next = pool->freelist; 919 pool->freelist = tx_desc; 920 pool->num_allocated--; 921 pool->num_free++; 922 TX_DESC_LOCK_UNLOCK(&pool->lock); 923 } 924 925 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 926 927 #ifdef QCA_DP_TX_DESC_ID_CHECK 928 /** 929 * dp_tx_is_desc_id_valid() - check is the tx desc id valid 930 * @soc: Handle to DP SoC structure 931 * @tx_desc_id: 932 * 933 * Return: true or false 934 */ 935 static inline bool 936 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id) 937 { 938 uint8_t pool_id; 939 uint16_t page_id, offset; 940 struct dp_tx_desc_pool_s *pool; 941 942 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 943 DP_TX_DESC_ID_POOL_OS; 944 /* Pool ID is out of limit */ 945 if (pool_id > wlan_cfg_get_num_tx_desc_pool( 946 soc->wlan_cfg_ctx)) { 947 QDF_TRACE(QDF_MODULE_ID_DP, 948 QDF_TRACE_LEVEL_FATAL, 949 "%s:Tx Comp pool id %d not valid", 950 __func__, 951 pool_id); 952 goto warn_exit; 953 } 954 955 pool = &soc->tx_desc[pool_id]; 956 /* the pool is freed */ 957 if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) { 958 QDF_TRACE(QDF_MODULE_ID_DP, 959 QDF_TRACE_LEVEL_FATAL, 960 "%s:the pool %d has been freed", 961 __func__, 962 pool_id); 963 goto warn_exit; 964 } 965 966 page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 967 DP_TX_DESC_ID_PAGE_OS; 968 /* the page id is out of limit */ 969 if (page_id >= pool->desc_pages.num_pages) { 970 QDF_TRACE(QDF_MODULE_ID_DP, 971 QDF_TRACE_LEVEL_FATAL, 972 "%s:the page id %d invalid, pool id %d, num_page %d", 973 __func__, 974 page_id, 975 pool_id, 976 pool->desc_pages.num_pages); 977 goto warn_exit; 978 } 979 980 offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 981 DP_TX_DESC_ID_OFFSET_OS; 982 /* the offset is out of limit */ 983 if (offset >= pool->desc_pages.num_element_per_page) { 984 QDF_TRACE(QDF_MODULE_ID_DP, 985 QDF_TRACE_LEVEL_FATAL, 986 "%s:offset %d invalid, pool%d,num_elem_per_page %d", 987 __func__, 988 offset, 989 pool_id, 990 pool->desc_pages.num_element_per_page); 991 goto warn_exit; 992 } 993 994 return true; 995 996 warn_exit: 997 QDF_TRACE(QDF_MODULE_ID_DP, 998 QDF_TRACE_LEVEL_FATAL, 999 "%s:Tx desc id 0x%x not valid", 1000 __func__, 1001 tx_desc_id); 1002 qdf_assert_always(0); 1003 return false; 1004 } 1005 1006 #else 1007 static inline bool 1008 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id) 1009 { 1010 return true; 1011 } 1012 #endif /* QCA_DP_TX_DESC_ID_CHECK */ 1013 1014 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE 1015 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc, 1016 struct dp_tx_desc_s *desc, 1017 uint8_t allow_fast_comp) 1018 { 1019 if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) && 1020 qdf_likely(allow_fast_comp)) { 1021 desc->flags |= DP_TX_DESC_FLAG_SIMPLE; 1022 } 1023 } 1024 #else 1025 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc, 1026 struct dp_tx_desc_s *desc, 1027 uint8_t allow_fast_comp) 1028 { 1029 } 1030 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */ 1031 1032 /** 1033 * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset 1034 * @soc: handle for the device sending the data 1035 * @pool_id: 1036 * @page_id: 1037 * @offset: 1038 * 1039 * Use page and offset to find the corresponding descriptor object in 1040 * the given descriptor pool. 1041 * 1042 * Return: the descriptor object that has the specified ID 1043 */ 1044 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc, 1045 uint8_t pool_id, uint16_t page_id, uint16_t offset) 1046 { 1047 struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id]; 1048 1049 return tx_desc_pool->desc_pages.cacheable_pages[page_id] + 1050 tx_desc_pool->elem_size * offset; 1051 } 1052 1053 /** 1054 * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool 1055 * @soc: handle for the device sending the data 1056 * @desc_pool_id: target pool id 1057 * 1058 * Return: None 1059 */ 1060 static inline 1061 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc, 1062 uint8_t desc_pool_id) 1063 { 1064 struct dp_tx_ext_desc_elem_s *c_elem; 1065 1066 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1067 if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) { 1068 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1069 return NULL; 1070 } 1071 c_elem = soc->tx_ext_desc[desc_pool_id].freelist; 1072 soc->tx_ext_desc[desc_pool_id].freelist = 1073 soc->tx_ext_desc[desc_pool_id].freelist->next; 1074 soc->tx_ext_desc[desc_pool_id].num_free--; 1075 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1076 return c_elem; 1077 } 1078 1079 /** 1080 * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool 1081 * @soc: handle for the device sending the data 1082 * @elem: ext descriptor pointer should release 1083 * @desc_pool_id: target pool id 1084 * 1085 * Return: None 1086 */ 1087 static inline void dp_tx_ext_desc_free(struct dp_soc *soc, 1088 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id) 1089 { 1090 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1091 elem->next = soc->tx_ext_desc[desc_pool_id].freelist; 1092 soc->tx_ext_desc[desc_pool_id].freelist = elem; 1093 soc->tx_ext_desc[desc_pool_id].num_free++; 1094 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1095 return; 1096 } 1097 1098 /** 1099 * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and 1100 * attach it to free list 1101 * @soc: Handle to DP SoC structure 1102 * @desc_pool_id: pool id should pick up 1103 * @elem: tx descriptor should be freed 1104 * @num_free: number of descriptors should be freed 1105 * 1106 * Return: none 1107 */ 1108 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc, 1109 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id, 1110 uint8_t num_free) 1111 { 1112 struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem; 1113 uint8_t freed = num_free; 1114 1115 /* caller should always guarantee atleast list of num_free nodes */ 1116 qdf_assert_always(elem); 1117 1118 head = elem; 1119 c_elem = head; 1120 tail = head; 1121 while (c_elem && freed) { 1122 tail = c_elem; 1123 c_elem = c_elem->next; 1124 freed--; 1125 } 1126 1127 /* caller should always guarantee atleast list of num_free nodes */ 1128 qdf_assert_always(tail); 1129 1130 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1131 tail->next = soc->tx_ext_desc[desc_pool_id].freelist; 1132 soc->tx_ext_desc[desc_pool_id].freelist = head; 1133 soc->tx_ext_desc[desc_pool_id].num_free += num_free; 1134 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 1135 1136 return; 1137 } 1138 1139 #if defined(FEATURE_TSO) 1140 /** 1141 * dp_tx_tso_desc_alloc() - function to allocate a TSO segment 1142 * @soc: device soc instance 1143 * @pool_id: pool id should pick up tso descriptor 1144 * 1145 * Allocates a TSO segment element from the free list held in 1146 * the soc 1147 * 1148 * Return: tso_seg, tso segment memory pointer 1149 */ 1150 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc( 1151 struct dp_soc *soc, uint8_t pool_id) 1152 { 1153 struct qdf_tso_seg_elem_t *tso_seg = NULL; 1154 1155 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); 1156 if (soc->tx_tso_desc[pool_id].freelist) { 1157 soc->tx_tso_desc[pool_id].num_free--; 1158 tso_seg = soc->tx_tso_desc[pool_id].freelist; 1159 soc->tx_tso_desc[pool_id].freelist = 1160 soc->tx_tso_desc[pool_id].freelist->next; 1161 } 1162 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); 1163 1164 return tso_seg; 1165 } 1166 1167 /** 1168 * dp_tx_tso_desc_free() - function to free a TSO segment 1169 * @soc: device soc instance 1170 * @pool_id: pool id should pick up tso descriptor 1171 * @tso_seg: tso segment memory pointer 1172 * 1173 * Returns a TSO segment element to the free list held in the 1174 * HTT pdev 1175 * 1176 * Return: none 1177 */ 1178 static inline void dp_tx_tso_desc_free(struct dp_soc *soc, 1179 uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg) 1180 { 1181 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); 1182 tso_seg->next = soc->tx_tso_desc[pool_id].freelist; 1183 soc->tx_tso_desc[pool_id].freelist = tso_seg; 1184 soc->tx_tso_desc[pool_id].num_free++; 1185 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); 1186 } 1187 1188 static inline 1189 struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc, 1190 uint8_t pool_id) 1191 { 1192 struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL; 1193 1194 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); 1195 if (soc->tx_tso_num_seg[pool_id].freelist) { 1196 soc->tx_tso_num_seg[pool_id].num_free--; 1197 tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist; 1198 soc->tx_tso_num_seg[pool_id].freelist = 1199 soc->tx_tso_num_seg[pool_id].freelist->next; 1200 } 1201 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); 1202 1203 return tso_num_seg; 1204 } 1205 1206 static inline 1207 void dp_tso_num_seg_free(struct dp_soc *soc, 1208 uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg) 1209 { 1210 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); 1211 tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist; 1212 soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg; 1213 soc->tx_tso_num_seg[pool_id].num_free++; 1214 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); 1215 } 1216 #endif 1217 1218 /** 1219 * dp_tx_me_alloc_buf() - Alloc descriptor from me pool 1220 * @pdev: DP_PDEV handle for datapath 1221 * 1222 * Return: tx descriptor on success, NULL on error 1223 */ 1224 static inline struct dp_tx_me_buf_t* 1225 dp_tx_me_alloc_buf(struct dp_pdev *pdev) 1226 { 1227 struct dp_tx_me_buf_t *buf = NULL; 1228 qdf_spin_lock_bh(&pdev->tx_mutex); 1229 if (pdev->me_buf.freelist) { 1230 buf = pdev->me_buf.freelist; 1231 pdev->me_buf.freelist = pdev->me_buf.freelist->next; 1232 pdev->me_buf.buf_in_use++; 1233 } else { 1234 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1235 "Error allocating memory in pool"); 1236 qdf_spin_unlock_bh(&pdev->tx_mutex); 1237 return NULL; 1238 } 1239 qdf_spin_unlock_bh(&pdev->tx_mutex); 1240 return buf; 1241 } 1242 1243 /** 1244 * dp_tx_me_free_buf() - Unmap the buffer holding the dest 1245 * address, free me descriptor and add it to the free-pool 1246 * @pdev: DP_PDEV handle for datapath 1247 * @buf : Allocated ME BUF 1248 * 1249 * Return:void 1250 */ 1251 static inline void 1252 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf) 1253 { 1254 /* 1255 * If the buf containing mac address was mapped, 1256 * it must be unmapped before freeing the me_buf. 1257 * The "paddr_macbuf" member in the me_buf structure 1258 * holds the mapped physical address and it must be 1259 * set to 0 after unmapping. 1260 */ 1261 if (buf->paddr_macbuf) { 1262 qdf_mem_unmap_nbytes_single(pdev->soc->osdev, 1263 buf->paddr_macbuf, 1264 QDF_DMA_TO_DEVICE, 1265 QDF_MAC_ADDR_SIZE); 1266 buf->paddr_macbuf = 0; 1267 } 1268 qdf_spin_lock_bh(&pdev->tx_mutex); 1269 buf->next = pdev->me_buf.freelist; 1270 pdev->me_buf.freelist = buf; 1271 pdev->me_buf.buf_in_use--; 1272 qdf_spin_unlock_bh(&pdev->tx_mutex); 1273 } 1274 #endif /* DP_TX_DESC_H */ 1275