1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef DP_TX_DESC_H 20 #define DP_TX_DESC_H 21 22 #include "dp_types.h" 23 #include "dp_tx.h" 24 #include "dp_internal.h" 25 26 #ifdef TX_PER_PDEV_DESC_POOL 27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) 31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 32 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 33 #else 34 #ifdef TX_PER_VDEV_DESC_POOL 35 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 36 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 37 #endif /* TX_PER_VDEV_DESC_POOL */ 38 #endif /* TX_PER_PDEV_DESC_POOL */ 39 40 /** 41 * 21 bits cookie 42 * 2 bits pool id 0 ~ 3, 43 * 10 bits page id 0 ~ 1023 44 * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32) 45 */ 46 /* ???Ring ID needed??? */ 47 #define DP_TX_DESC_ID_POOL_MASK 0x018000 48 #define DP_TX_DESC_ID_POOL_OS 15 49 #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0 50 #define DP_TX_DESC_ID_PAGE_OS 5 51 #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F 52 #define DP_TX_DESC_ID_OFFSET_OS 0 53 54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 55 #define TX_DESC_LOCK_CREATE(lock) 56 #define TX_DESC_LOCK_DESTROY(lock) 57 #define TX_DESC_LOCK_LOCK(lock) 58 #define TX_DESC_LOCK_UNLOCK(lock) 59 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \ 60 ((pool)->status == FLOW_POOL_INACTIVE) 61 #ifdef QCA_AC_BASED_FLOW_CONTROL 62 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ 63 dp_tx_flow_pool_member_clean(_tx_desc_pool) 64 65 #else /* !QCA_AC_BASED_FLOW_CONTROL */ 66 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ 67 do { \ 68 (_tx_desc_pool)->elem_size = 0; \ 69 (_tx_desc_pool)->freelist = NULL; \ 70 (_tx_desc_pool)->pool_size = 0; \ 71 (_tx_desc_pool)->avail_desc = 0; \ 72 (_tx_desc_pool)->start_th = 0; \ 73 (_tx_desc_pool)->stop_th = 0; \ 74 (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \ 75 } while (0) 76 #endif /* QCA_AC_BASED_FLOW_CONTROL */ 77 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 78 #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock) 79 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock) 80 #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock) 81 #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock) 82 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false) 83 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ 84 do { \ 85 (_tx_desc_pool)->elem_size = 0; \ 86 (_tx_desc_pool)->num_allocated = 0; \ 87 (_tx_desc_pool)->freelist = NULL; \ 88 (_tx_desc_pool)->elem_count = 0; \ 89 (_tx_desc_pool)->num_free = 0; \ 90 } while (0) 91 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 92 #define MAX_POOL_BUFF_COUNT 10000 93 94 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, 95 uint16_t num_elem); 96 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); 97 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, 98 uint16_t num_elem); 99 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); 100 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, 101 uint16_t num_elem); 102 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); 103 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, 104 uint16_t num_elem); 105 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id); 106 107 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 108 void dp_tx_flow_control_init(struct dp_soc *); 109 void dp_tx_flow_control_deinit(struct dp_soc *); 110 111 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc, 112 tx_pause_callback pause_cb); 113 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev, 114 uint8_t vdev_id); 115 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev, 116 uint8_t vdev_id); 117 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc); 118 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, 119 uint8_t flow_pool_id, uint16_t flow_pool_size); 120 121 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id, 122 uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size); 123 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id, 124 uint8_t flow_type, uint8_t flow_pool_id); 125 126 /** 127 * dp_tx_get_desc_flow_pool() - get descriptor from flow pool 128 * @pool: flow pool 129 * 130 * Caller needs to take lock and do sanity checks. 131 * 132 * Return: tx descriptor 133 */ 134 static inline 135 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool) 136 { 137 struct dp_tx_desc_s *tx_desc = pool->freelist; 138 139 pool->freelist = pool->freelist->next; 140 pool->avail_desc--; 141 return tx_desc; 142 } 143 144 /** 145 * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist 146 * @pool: flow pool 147 * @tx_desc: tx descriptor 148 * 149 * Caller needs to take lock and do sanity checks. 150 * 151 * Return: none 152 */ 153 static inline 154 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool, 155 struct dp_tx_desc_s *tx_desc) 156 { 157 tx_desc->next = pool->freelist; 158 pool->freelist = tx_desc; 159 pool->avail_desc++; 160 } 161 162 #ifdef QCA_AC_BASED_FLOW_CONTROL 163 164 /** 165 * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool 166 * 167 * @pool: flow pool 168 * 169 * Return: None 170 */ 171 static inline void 172 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool) 173 { 174 pool->elem_size = 0; 175 pool->freelist = NULL; 176 pool->pool_size = 0; 177 pool->avail_desc = 0; 178 qdf_mem_zero(pool->start_th, FL_TH_MAX); 179 qdf_mem_zero(pool->stop_th, FL_TH_MAX); 180 pool->status = FLOW_POOL_INACTIVE; 181 } 182 183 /** 184 * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold 185 * 186 * @pool: flow pool 187 * @avail_desc: available descriptor number 188 * 189 * Return: true if threshold is met, false if not 190 */ 191 static inline bool 192 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc) 193 { 194 if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK])) 195 return true; 196 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI])) 197 return true; 198 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO])) 199 return true; 200 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI])) 201 return true; 202 else 203 return false; 204 } 205 206 /** 207 * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool 208 * 209 * @soc: Handle to DP SoC structure 210 * @desc_pool_id: ID of the flow control fool 211 * 212 * Return: TX descriptor allocated or NULL 213 */ 214 static inline struct dp_tx_desc_s * 215 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) 216 { 217 struct dp_tx_desc_s *tx_desc = NULL; 218 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 219 bool is_pause = false; 220 enum netif_action_type act = WLAN_WAKE_NON_PRIORITY_QUEUE; 221 enum dp_fl_ctrl_threshold level = DP_TH_BE_BK; 222 223 if (qdf_likely(pool)) { 224 qdf_spin_lock_bh(&pool->flow_pool_lock); 225 if (qdf_likely(pool->avail_desc)) { 226 tx_desc = dp_tx_get_desc_flow_pool(pool); 227 tx_desc->pool_id = desc_pool_id; 228 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 229 is_pause = dp_tx_is_threshold_reached(pool, 230 pool->avail_desc); 231 232 if (qdf_unlikely(is_pause)) { 233 switch (pool->status) { 234 case FLOW_POOL_ACTIVE_UNPAUSED: 235 /* pause network BE\BK queue */ 236 act = WLAN_NETIF_BE_BK_QUEUE_OFF; 237 level = DP_TH_BE_BK; 238 pool->status = FLOW_POOL_BE_BK_PAUSED; 239 break; 240 case FLOW_POOL_BE_BK_PAUSED: 241 /* pause network VI queue */ 242 act = WLAN_NETIF_VI_QUEUE_OFF; 243 level = DP_TH_VI; 244 pool->status = FLOW_POOL_VI_PAUSED; 245 break; 246 case FLOW_POOL_VI_PAUSED: 247 /* pause network VO queue */ 248 act = WLAN_NETIF_VO_QUEUE_OFF; 249 level = DP_TH_VO; 250 pool->status = FLOW_POOL_VO_PAUSED; 251 break; 252 case FLOW_POOL_VO_PAUSED: 253 /* pause network HI PRI queue */ 254 act = WLAN_NETIF_PRIORITY_QUEUE_OFF; 255 level = DP_TH_HI; 256 pool->status = FLOW_POOL_ACTIVE_PAUSED; 257 break; 258 default: 259 QDF_TRACE(QDF_MODULE_ID_DP, 260 QDF_TRACE_LEVEL_ERROR, 261 "%s %d pool is %d status!", 262 __func__, __LINE__, 263 pool->status); 264 break; 265 } 266 pool->latest_pause_time[level] = 267 qdf_get_system_timestamp(); 268 soc->pause_cb(desc_pool_id, 269 act, WLAN_DATA_FLOW_CONTROL); 270 qdf_spin_unlock_bh(&pool->flow_pool_lock); 271 } else { 272 qdf_spin_unlock_bh(&pool->flow_pool_lock); 273 } 274 } else { 275 pool->pkt_drop_no_desc++; 276 qdf_spin_unlock_bh(&pool->flow_pool_lock); 277 } 278 } else { 279 soc->pool_stats.pkt_drop_no_pool++; 280 } 281 282 return tx_desc; 283 } 284 285 /** 286 * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list 287 * 288 * @soc: Handle to DP SoC structure 289 * @tx_desc: the tx descriptor to be freed 290 * @desc_pool_id: ID of the flow control fool 291 * 292 * Return: None 293 */ 294 static inline void 295 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 296 uint8_t desc_pool_id) 297 { 298 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 299 qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur; 300 enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE; 301 302 qdf_spin_lock_bh(&pool->flow_pool_lock); 303 tx_desc->flags = 0; 304 dp_tx_put_desc_flow_pool(pool, tx_desc); 305 switch (pool->status) { 306 case FLOW_POOL_ACTIVE_PAUSED: 307 if (pool->avail_desc > pool->start_th[DP_TH_HI]) { 308 act = WLAN_NETIF_PRIORITY_QUEUE_ON; 309 pool->status = FLOW_POOL_VO_PAUSED; 310 311 /* Update maxinum pause duration for HI queue */ 312 pause_dur = unpause_time - 313 pool->latest_pause_time[DP_TH_HI]; 314 if (pool->max_pause_time[DP_TH_HI] < pause_dur) 315 pool->max_pause_time[DP_TH_HI] = pause_dur; 316 } 317 break; 318 case FLOW_POOL_VO_PAUSED: 319 if (pool->avail_desc > pool->start_th[DP_TH_VO]) { 320 act = WLAN_NETIF_VO_QUEUE_ON; 321 pool->status = FLOW_POOL_VI_PAUSED; 322 323 /* Update maxinum pause duration for VO queue */ 324 pause_dur = unpause_time - 325 pool->latest_pause_time[DP_TH_VO]; 326 if (pool->max_pause_time[DP_TH_VO] < pause_dur) 327 pool->max_pause_time[DP_TH_VO] = pause_dur; 328 } 329 break; 330 case FLOW_POOL_VI_PAUSED: 331 if (pool->avail_desc > pool->start_th[DP_TH_VI]) { 332 act = WLAN_NETIF_VI_QUEUE_ON; 333 pool->status = FLOW_POOL_BE_BK_PAUSED; 334 335 /* Update maxinum pause duration for VI queue */ 336 pause_dur = unpause_time - 337 pool->latest_pause_time[DP_TH_VI]; 338 if (pool->max_pause_time[DP_TH_VI] < pause_dur) 339 pool->max_pause_time[DP_TH_VI] = pause_dur; 340 } 341 break; 342 case FLOW_POOL_BE_BK_PAUSED: 343 if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) { 344 act = WLAN_WAKE_NON_PRIORITY_QUEUE; 345 pool->status = FLOW_POOL_ACTIVE_UNPAUSED; 346 347 /* Update maxinum pause duration for BE_BK queue */ 348 pause_dur = unpause_time - 349 pool->latest_pause_time[DP_TH_BE_BK]; 350 if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur) 351 pool->max_pause_time[DP_TH_BE_BK] = pause_dur; 352 } 353 break; 354 case FLOW_POOL_INVALID: 355 if (pool->avail_desc == pool->pool_size) { 356 dp_tx_desc_pool_free(soc, desc_pool_id); 357 qdf_spin_unlock_bh(&pool->flow_pool_lock); 358 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 359 "%s %d pool is freed!!", 360 __func__, __LINE__); 361 return; 362 } 363 break; 364 365 case FLOW_POOL_ACTIVE_UNPAUSED: 366 break; 367 default: 368 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 369 "%s %d pool is INACTIVE State!!", 370 __func__, __LINE__); 371 break; 372 }; 373 374 if (act != WLAN_WAKE_ALL_NETIF_QUEUE) 375 soc->pause_cb(pool->flow_pool_id, 376 act, WLAN_DATA_FLOW_CONTROL); 377 qdf_spin_unlock_bh(&pool->flow_pool_lock); 378 } 379 #else /* QCA_AC_BASED_FLOW_CONTROL */ 380 /** 381 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool 382 * 383 * @soc Handle to DP SoC structure 384 * @pool_id 385 * 386 * Return: 387 */ 388 static inline struct dp_tx_desc_s * 389 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) 390 { 391 struct dp_tx_desc_s *tx_desc = NULL; 392 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 393 394 if (pool) { 395 qdf_spin_lock_bh(&pool->flow_pool_lock); 396 if (pool->status <= FLOW_POOL_ACTIVE_PAUSED && 397 pool->avail_desc) { 398 tx_desc = dp_tx_get_desc_flow_pool(pool); 399 tx_desc->pool_id = desc_pool_id; 400 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 401 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) { 402 pool->status = FLOW_POOL_ACTIVE_PAUSED; 403 qdf_spin_unlock_bh(&pool->flow_pool_lock); 404 /* pause network queues */ 405 soc->pause_cb(desc_pool_id, 406 WLAN_STOP_ALL_NETIF_QUEUE, 407 WLAN_DATA_FLOW_CONTROL); 408 } else { 409 qdf_spin_unlock_bh(&pool->flow_pool_lock); 410 } 411 412 /* 413 * If one packet is going to be sent, PM usage count 414 * needs to be incremented by one to prevent future 415 * runtime suspend. This should be tied with the 416 * success of allocating one descriptor. It will be 417 * decremented after the packet has been sent. 418 */ 419 hif_pm_runtime_get_noresume(soc->hif_handle); 420 } else { 421 pool->pkt_drop_no_desc++; 422 qdf_spin_unlock_bh(&pool->flow_pool_lock); 423 } 424 } else { 425 soc->pool_stats.pkt_drop_no_pool++; 426 } 427 428 429 return tx_desc; 430 } 431 432 /** 433 * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list 434 * 435 * @soc Handle to DP SoC structure 436 * @pool_id 437 * @tx_desc 438 * 439 * Return: None 440 */ 441 static inline void 442 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 443 uint8_t desc_pool_id) 444 { 445 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 446 447 qdf_spin_lock_bh(&pool->flow_pool_lock); 448 tx_desc->flags = 0; 449 dp_tx_put_desc_flow_pool(pool, tx_desc); 450 switch (pool->status) { 451 case FLOW_POOL_ACTIVE_PAUSED: 452 if (pool->avail_desc > pool->start_th) { 453 soc->pause_cb(pool->flow_pool_id, 454 WLAN_WAKE_ALL_NETIF_QUEUE, 455 WLAN_DATA_FLOW_CONTROL); 456 pool->status = FLOW_POOL_ACTIVE_UNPAUSED; 457 } 458 break; 459 case FLOW_POOL_INVALID: 460 if (pool->avail_desc == pool->pool_size) { 461 dp_tx_desc_pool_free(soc, desc_pool_id); 462 qdf_spin_unlock_bh(&pool->flow_pool_lock); 463 qdf_print("%s %d pool is freed!!", 464 __func__, __LINE__); 465 goto out; 466 } 467 break; 468 469 case FLOW_POOL_ACTIVE_UNPAUSED: 470 break; 471 default: 472 qdf_print("%s %d pool is INACTIVE State!!", 473 __func__, __LINE__); 474 break; 475 }; 476 477 qdf_spin_unlock_bh(&pool->flow_pool_lock); 478 479 out: 480 /** 481 * Decrement PM usage count if the packet has been sent. This 482 * should be tied with the success of freeing one descriptor. 483 */ 484 hif_pm_runtime_put(soc->hif_handle); 485 } 486 487 #endif /* QCA_AC_BASED_FLOW_CONTROL */ 488 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 489 490 static inline void dp_tx_flow_control_init(struct dp_soc *handle) 491 { 492 } 493 494 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle) 495 { 496 } 497 498 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, 499 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id, 500 uint16_t flow_pool_size) 501 { 502 return QDF_STATUS_SUCCESS; 503 } 504 505 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, 506 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id) 507 { 508 } 509 510 /** 511 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool 512 * 513 * @param soc Handle to DP SoC structure 514 * @param pool_id 515 * 516 * Return: 517 */ 518 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc, 519 uint8_t desc_pool_id) 520 { 521 struct dp_tx_desc_s *tx_desc = NULL; 522 523 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 524 525 tx_desc = soc->tx_desc[desc_pool_id].freelist; 526 527 /* Pool is exhausted */ 528 if (!tx_desc) { 529 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 530 return NULL; 531 } 532 533 soc->tx_desc[desc_pool_id].freelist = 534 soc->tx_desc[desc_pool_id].freelist->next; 535 soc->tx_desc[desc_pool_id].num_allocated++; 536 soc->tx_desc[desc_pool_id].num_free--; 537 538 tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get()); 539 540 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 541 542 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 543 544 return tx_desc; 545 } 546 547 /** 548 * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors 549 * from given pool 550 * @soc: Handle to DP SoC structure 551 * @pool_id: pool id should pick up 552 * @num_requested: number of required descriptor 553 * 554 * allocate multiple tx descriptor and make a link 555 * 556 * Return: h_desc first descriptor pointer 557 */ 558 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple( 559 struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested) 560 { 561 struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL; 562 uint8_t count; 563 564 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 565 566 if ((num_requested == 0) || 567 (soc->tx_desc[desc_pool_id].num_free < num_requested)) { 568 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 569 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 570 "%s, No Free Desc: Available(%d) num_requested(%d)", 571 __func__, soc->tx_desc[desc_pool_id].num_free, 572 num_requested); 573 return NULL; 574 } 575 576 h_desc = soc->tx_desc[desc_pool_id].freelist; 577 578 /* h_desc should never be NULL since num_free > requested */ 579 qdf_assert_always(h_desc); 580 581 c_desc = h_desc; 582 for (count = 0; count < (num_requested - 1); count++) { 583 c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; 584 c_desc = c_desc->next; 585 } 586 soc->tx_desc[desc_pool_id].num_free -= count; 587 soc->tx_desc[desc_pool_id].num_allocated += count; 588 soc->tx_desc[desc_pool_id].freelist = c_desc->next; 589 c_desc->next = NULL; 590 591 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 592 return h_desc; 593 } 594 595 /** 596 * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list 597 * 598 * @soc Handle to DP SoC structure 599 * @pool_id 600 * @tx_desc 601 */ 602 static inline void 603 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 604 uint8_t desc_pool_id) 605 { 606 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 607 608 tx_desc->vdev = NULL; 609 tx_desc->nbuf = NULL; 610 tx_desc->flags = 0; 611 tx_desc->next = soc->tx_desc[desc_pool_id].freelist; 612 soc->tx_desc[desc_pool_id].freelist = tx_desc; 613 soc->tx_desc[desc_pool_id].num_allocated--; 614 soc->tx_desc[desc_pool_id].num_free++; 615 616 617 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 618 } 619 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 620 621 #ifdef QCA_DP_TX_DESC_ID_CHECK 622 /** 623 * dp_tx_is_desc_id_valid() - check is the tx desc id valid 624 * 625 * @soc Handle to DP SoC structure 626 * @tx_desc_id 627 * 628 * Return: true or false 629 */ 630 static inline bool 631 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id) 632 { 633 uint8_t pool_id; 634 uint16_t page_id, offset; 635 struct dp_tx_desc_pool_s *pool; 636 637 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 638 DP_TX_DESC_ID_POOL_OS; 639 /* Pool ID is out of limit */ 640 if (pool_id > wlan_cfg_get_num_tx_desc_pool( 641 soc->wlan_cfg_ctx)) { 642 QDF_TRACE(QDF_MODULE_ID_DP, 643 QDF_TRACE_LEVEL_FATAL, 644 "%s:Tx Comp pool id %d not valid", 645 __func__, 646 pool_id); 647 goto warn_exit; 648 } 649 650 pool = &soc->tx_desc[pool_id]; 651 /* the pool is freed */ 652 if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) { 653 QDF_TRACE(QDF_MODULE_ID_DP, 654 QDF_TRACE_LEVEL_FATAL, 655 "%s:the pool %d has been freed", 656 __func__, 657 pool_id); 658 goto warn_exit; 659 } 660 661 page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 662 DP_TX_DESC_ID_PAGE_OS; 663 /* the page id is out of limit */ 664 if (page_id >= pool->desc_pages.num_pages) { 665 QDF_TRACE(QDF_MODULE_ID_DP, 666 QDF_TRACE_LEVEL_FATAL, 667 "%s:the page id %d invalid, pool id %d, num_page %d", 668 __func__, 669 page_id, 670 pool_id, 671 pool->desc_pages.num_pages); 672 goto warn_exit; 673 } 674 675 offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 676 DP_TX_DESC_ID_OFFSET_OS; 677 /* the offset is out of limit */ 678 if (offset >= pool->desc_pages.num_element_per_page) { 679 QDF_TRACE(QDF_MODULE_ID_DP, 680 QDF_TRACE_LEVEL_FATAL, 681 "%s:offset %d invalid, pool%d,num_elem_per_page %d", 682 __func__, 683 offset, 684 pool_id, 685 pool->desc_pages.num_element_per_page); 686 goto warn_exit; 687 } 688 689 return true; 690 691 warn_exit: 692 QDF_TRACE(QDF_MODULE_ID_DP, 693 QDF_TRACE_LEVEL_FATAL, 694 "%s:Tx desc id 0x%x not valid", 695 __func__, 696 tx_desc_id); 697 qdf_assert_always(0); 698 return false; 699 } 700 701 #else 702 static inline bool 703 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id) 704 { 705 return true; 706 } 707 #endif /* QCA_DP_TX_DESC_ID_CHECK */ 708 709 /** 710 * dp_tx_desc_find() - find dp tx descriptor from cokie 711 * @soc - handle for the device sending the data 712 * @tx_desc_id - the ID of the descriptor in question 713 * @return the descriptor object that has the specified ID 714 * 715 * Use a tx descriptor ID to find the corresponding descriptor object. 716 * 717 */ 718 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc, 719 uint8_t pool_id, uint16_t page_id, uint16_t offset) 720 { 721 struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]); 722 723 return tx_desc_pool->desc_pages.cacheable_pages[page_id] + 724 tx_desc_pool->elem_size * offset; 725 } 726 727 /** 728 * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool 729 * @soc: handle for the device sending the data 730 * @pool_id: target pool id 731 * 732 * Return: None 733 */ 734 static inline 735 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc, 736 uint8_t desc_pool_id) 737 { 738 struct dp_tx_ext_desc_elem_s *c_elem; 739 740 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 741 if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) { 742 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 743 return NULL; 744 } 745 c_elem = soc->tx_ext_desc[desc_pool_id].freelist; 746 soc->tx_ext_desc[desc_pool_id].freelist = 747 soc->tx_ext_desc[desc_pool_id].freelist->next; 748 soc->tx_ext_desc[desc_pool_id].num_free--; 749 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 750 return c_elem; 751 } 752 753 /** 754 * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool 755 * @soc: handle for the device sending the data 756 * @pool_id: target pool id 757 * @elem: ext descriptor pointer should release 758 * 759 * Return: None 760 */ 761 static inline void dp_tx_ext_desc_free(struct dp_soc *soc, 762 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id) 763 { 764 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 765 elem->next = soc->tx_ext_desc[desc_pool_id].freelist; 766 soc->tx_ext_desc[desc_pool_id].freelist = elem; 767 soc->tx_ext_desc[desc_pool_id].num_free++; 768 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 769 return; 770 } 771 772 /** 773 * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and 774 * attach it to free list 775 * @soc: Handle to DP SoC structure 776 * @desc_pool_id: pool id should pick up 777 * @elem: tx descriptor should be freed 778 * @num_free: number of descriptors should be freed 779 * 780 * Return: none 781 */ 782 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc, 783 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id, 784 uint8_t num_free) 785 { 786 struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem; 787 uint8_t freed = num_free; 788 789 /* caller should always guarantee atleast list of num_free nodes */ 790 qdf_assert_always(head); 791 792 head = elem; 793 c_elem = head; 794 tail = head; 795 while (c_elem && freed) { 796 tail = c_elem; 797 c_elem = c_elem->next; 798 freed--; 799 } 800 801 /* caller should always guarantee atleast list of num_free nodes */ 802 qdf_assert_always(tail); 803 804 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 805 tail->next = soc->tx_ext_desc[desc_pool_id].freelist; 806 soc->tx_ext_desc[desc_pool_id].freelist = head; 807 soc->tx_ext_desc[desc_pool_id].num_free += num_free; 808 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); 809 810 return; 811 } 812 813 #if defined(FEATURE_TSO) 814 /** 815 * dp_tx_tso_desc_alloc() - function to allocate a TSO segment 816 * @soc: device soc instance 817 * @pool_id: pool id should pick up tso descriptor 818 * 819 * Allocates a TSO segment element from the free list held in 820 * the soc 821 * 822 * Return: tso_seg, tso segment memory pointer 823 */ 824 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc( 825 struct dp_soc *soc, uint8_t pool_id) 826 { 827 struct qdf_tso_seg_elem_t *tso_seg = NULL; 828 829 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); 830 if (soc->tx_tso_desc[pool_id].freelist) { 831 soc->tx_tso_desc[pool_id].num_free--; 832 tso_seg = soc->tx_tso_desc[pool_id].freelist; 833 soc->tx_tso_desc[pool_id].freelist = 834 soc->tx_tso_desc[pool_id].freelist->next; 835 } 836 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); 837 838 return tso_seg; 839 } 840 841 /** 842 * dp_tx_tso_desc_free() - function to free a TSO segment 843 * @soc: device soc instance 844 * @pool_id: pool id should pick up tso descriptor 845 * @tso_seg: tso segment memory pointer 846 * 847 * Returns a TSO segment element to the free list held in the 848 * HTT pdev 849 * 850 * Return: none 851 */ 852 static inline void dp_tx_tso_desc_free(struct dp_soc *soc, 853 uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg) 854 { 855 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); 856 tso_seg->next = soc->tx_tso_desc[pool_id].freelist; 857 soc->tx_tso_desc[pool_id].freelist = tso_seg; 858 soc->tx_tso_desc[pool_id].num_free++; 859 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); 860 } 861 862 static inline 863 struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc, 864 uint8_t pool_id) 865 { 866 struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL; 867 868 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); 869 if (soc->tx_tso_num_seg[pool_id].freelist) { 870 soc->tx_tso_num_seg[pool_id].num_free--; 871 tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist; 872 soc->tx_tso_num_seg[pool_id].freelist = 873 soc->tx_tso_num_seg[pool_id].freelist->next; 874 } 875 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); 876 877 return tso_num_seg; 878 } 879 880 static inline 881 void dp_tso_num_seg_free(struct dp_soc *soc, 882 uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg) 883 { 884 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); 885 tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist; 886 soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg; 887 soc->tx_tso_num_seg[pool_id].num_free++; 888 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); 889 } 890 #endif 891 892 /* 893 * dp_tx_me_alloc_buf() Alloc descriptor from me pool 894 * @pdev DP_PDEV handle for datapath 895 * 896 * Return:dp_tx_me_buf_t(buf) 897 */ 898 static inline struct dp_tx_me_buf_t* 899 dp_tx_me_alloc_buf(struct dp_pdev *pdev) 900 { 901 struct dp_tx_me_buf_t *buf = NULL; 902 qdf_spin_lock_bh(&pdev->tx_mutex); 903 if (pdev->me_buf.freelist) { 904 buf = pdev->me_buf.freelist; 905 pdev->me_buf.freelist = pdev->me_buf.freelist->next; 906 pdev->me_buf.buf_in_use++; 907 } else { 908 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 909 "Error allocating memory in pool"); 910 qdf_spin_unlock_bh(&pdev->tx_mutex); 911 return NULL; 912 } 913 qdf_spin_unlock_bh(&pdev->tx_mutex); 914 return buf; 915 } 916 917 /* 918 * dp_tx_me_free_buf() - Free me descriptor and add it to pool 919 * @pdev: DP_PDEV handle for datapath 920 * @buf : Allocated ME BUF 921 * 922 * Return:void 923 */ 924 static inline void 925 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf) 926 { 927 qdf_spin_lock_bh(&pdev->tx_mutex); 928 buf->next = pdev->me_buf.freelist; 929 pdev->me_buf.freelist = buf; 930 pdev->me_buf.buf_in_use--; 931 qdf_spin_unlock_bh(&pdev->tx_mutex); 932 } 933 #endif /* DP_TX_DESC_H */ 934