1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 46 #ifdef DUP_RX_DESC_WAR 47 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 48 hal_ring_handle_t hal_ring, 49 hal_ring_desc_t ring_desc, 50 struct dp_rx_desc *rx_desc) 51 { 52 void *hal_soc = soc->hal_soc; 53 54 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 55 dp_rx_desc_dump(rx_desc); 56 } 57 #else 58 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 59 hal_ring_handle_t hal_ring_hdl, 60 hal_ring_desc_t ring_desc, 61 struct dp_rx_desc *rx_desc) 62 { 63 hal_soc_handle_t hal_soc = soc->hal_soc; 64 65 dp_rx_desc_dump(rx_desc); 66 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 67 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 68 qdf_assert_always(0); 69 } 70 #endif 71 72 #ifndef QCA_HOST_MODE_WIFI_DISABLED 73 #ifdef RX_DESC_SANITY_WAR 74 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 75 hal_ring_handle_t hal_ring_hdl, 76 hal_ring_desc_t ring_desc, 77 struct dp_rx_desc *rx_desc) 78 { 79 uint8_t return_buffer_manager; 80 81 if (qdf_unlikely(!rx_desc)) { 82 /* 83 * This is an unlikely case where the cookie obtained 84 * from the ring_desc is invalid and hence we are not 85 * able to find the corresponding rx_desc 86 */ 87 goto fail; 88 } 89 90 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 91 if (qdf_unlikely(!(return_buffer_manager == 92 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 93 return_buffer_manager == 94 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 95 goto fail; 96 } 97 98 return QDF_STATUS_SUCCESS; 99 100 fail: 101 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 102 dp_err("Ring Desc:"); 103 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 104 ring_desc); 105 return QDF_STATUS_E_NULL_VALUE; 106 107 } 108 #endif 109 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 110 111 /** 112 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 113 * 114 * @dp_soc: struct dp_soc * 115 * @nbuf_frag_info_t: nbuf frag info 116 * @dp_pdev: struct dp_pdev * 117 * @rx_desc_pool: Rx desc pool 118 * 119 * Return: QDF_STATUS 120 */ 121 #ifdef DP_RX_MON_MEM_FRAG 122 static inline QDF_STATUS 123 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 124 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 125 struct dp_pdev *dp_pdev, 126 struct rx_desc_pool *rx_desc_pool) 127 { 128 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 129 130 (nbuf_frag_info_t->virt_addr).vaddr = 131 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 132 133 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 134 dp_err("Frag alloc failed"); 135 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 136 return QDF_STATUS_E_NOMEM; 137 } 138 139 ret = qdf_mem_map_page(dp_soc->osdev, 140 (nbuf_frag_info_t->virt_addr).vaddr, 141 QDF_DMA_FROM_DEVICE, 142 rx_desc_pool->buf_size, 143 &nbuf_frag_info_t->paddr); 144 145 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 146 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 147 dp_err("Frag map failed"); 148 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 149 return QDF_STATUS_E_FAULT; 150 } 151 152 return QDF_STATUS_SUCCESS; 153 } 154 #else 155 static inline QDF_STATUS 156 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 157 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 158 struct dp_pdev *dp_pdev, 159 struct rx_desc_pool *rx_desc_pool) 160 { 161 return QDF_STATUS_SUCCESS; 162 } 163 #endif /* DP_RX_MON_MEM_FRAG */ 164 165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 166 /** 167 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 168 * @soc: Datapath soc structure 169 * @ring_num: Refill ring number 170 * @num_req: number of buffers requested for refill 171 * @num_refill: number of buffers refilled 172 * 173 * Returns: None 174 */ 175 static inline void 176 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 177 hal_ring_handle_t hal_ring_hdl, 178 uint32_t num_req, uint32_t num_refill) 179 { 180 struct dp_refill_info_record *record; 181 uint32_t idx; 182 uint32_t tp; 183 uint32_t hp; 184 185 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 186 !soc->rx_refill_ring_history[ring_num])) 187 return; 188 189 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 190 DP_RX_REFILL_HIST_MAX); 191 192 /* No NULL check needed for record since its an array */ 193 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 194 195 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 196 record->timestamp = qdf_get_log_timestamp(); 197 record->num_req = num_req; 198 record->num_refill = num_refill; 199 record->hp = hp; 200 record->tp = tp; 201 } 202 #else 203 static inline void 204 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 205 hal_ring_handle_t hal_ring_hdl, 206 uint32_t num_req, uint32_t num_refill) 207 { 208 } 209 #endif 210 211 /** 212 * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map 213 * 214 * @dp_soc: struct dp_soc * 215 * @mac_id: Mac id 216 * @num_entries_avail: num_entries_avail 217 * @nbuf_frag_info_t: nbuf frag info 218 * @dp_pdev: struct dp_pdev * 219 * @rx_desc_pool: Rx desc pool 220 * 221 * Return: QDF_STATUS 222 */ 223 static inline QDF_STATUS 224 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 225 uint32_t mac_id, 226 uint32_t num_entries_avail, 227 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 228 struct dp_pdev *dp_pdev, 229 struct rx_desc_pool *rx_desc_pool) 230 { 231 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 232 233 (nbuf_frag_info_t->virt_addr).nbuf = 234 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 235 mac_id, 236 rx_desc_pool, 237 num_entries_avail); 238 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 239 dp_err("nbuf alloc failed"); 240 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 241 return QDF_STATUS_E_NOMEM; 242 } 243 244 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 245 nbuf_frag_info_t); 246 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 247 dp_rx_buffer_pool_nbuf_free(dp_soc, 248 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 249 dp_err("nbuf map failed"); 250 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 251 return QDF_STATUS_E_FAULT; 252 } 253 254 nbuf_frag_info_t->paddr = 255 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 256 257 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, 258 (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf), 259 rx_desc_pool->buf_size, 260 true); 261 262 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 263 &nbuf_frag_info_t->paddr, 264 rx_desc_pool); 265 if (ret == QDF_STATUS_E_FAILURE) { 266 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 267 return QDF_STATUS_E_ADDRNOTAVAIL; 268 } 269 270 return QDF_STATUS_SUCCESS; 271 } 272 273 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 274 QDF_STATUS 275 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 276 struct dp_srng *dp_rxdma_srng, 277 struct rx_desc_pool *rx_desc_pool) 278 { 279 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 280 uint32_t count; 281 void *rxdma_ring_entry; 282 union dp_rx_desc_list_elem_t *next = NULL; 283 void *rxdma_srng; 284 qdf_nbuf_t nbuf; 285 qdf_dma_addr_t paddr; 286 uint16_t num_entries_avail = 0; 287 uint16_t num_alloc_desc = 0; 288 union dp_rx_desc_list_elem_t *desc_list = NULL; 289 union dp_rx_desc_list_elem_t *tail = NULL; 290 int sync_hw_ptr = 0; 291 292 rxdma_srng = dp_rxdma_srng->hal_srng; 293 294 if (qdf_unlikely(!dp_pdev)) { 295 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 296 return QDF_STATUS_E_FAILURE; 297 } 298 299 if (qdf_unlikely(!rxdma_srng)) { 300 dp_rx_debug("%pK: rxdma srng not initialized", soc); 301 return QDF_STATUS_E_FAILURE; 302 } 303 304 hal_srng_access_start(soc->hal_soc, rxdma_srng); 305 306 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 307 rxdma_srng, 308 sync_hw_ptr); 309 310 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 311 soc, num_entries_avail); 312 313 if (qdf_unlikely(num_entries_avail < 314 ((dp_rxdma_srng->num_entries * 3) / 4))) { 315 hal_srng_access_end(soc->hal_soc, rxdma_srng); 316 return QDF_STATUS_E_FAILURE; 317 } 318 319 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 320 321 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 322 rx_desc_pool, 323 num_entries_avail, 324 &desc_list, 325 &tail); 326 327 if (!num_alloc_desc) { 328 dp_rx_err("%pK: no free rx_descs in freelist", soc); 329 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 330 num_entries_avail); 331 hal_srng_access_end(soc->hal_soc, rxdma_srng); 332 return QDF_STATUS_E_NOMEM; 333 } 334 335 for (count = 0; count < num_alloc_desc; count++) { 336 next = desc_list->next; 337 qdf_prefetch(next); 338 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 339 if (qdf_unlikely(!nbuf)) { 340 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 341 break; 342 } 343 344 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 345 rx_desc_pool->buf_size); 346 347 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 348 rxdma_srng); 349 qdf_assert_always(rxdma_ring_entry); 350 351 desc_list->rx_desc.nbuf = nbuf; 352 desc_list->rx_desc.rx_buf_start = nbuf->data; 353 desc_list->rx_desc.unmapped = 0; 354 355 /* rx_desc.in_use should be zero at this time*/ 356 qdf_assert_always(desc_list->rx_desc.in_use == 0); 357 358 desc_list->rx_desc.in_use = 1; 359 desc_list->rx_desc.in_err_state = 0; 360 361 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 362 paddr, 363 desc_list->rx_desc.cookie, 364 rx_desc_pool->owner); 365 366 desc_list = next; 367 } 368 qdf_dsb(); 369 hal_srng_access_end(soc->hal_soc, rxdma_srng); 370 371 /* No need to count the number of bytes received during replenish. 372 * Therefore set replenish.pkts.bytes as 0. 373 */ 374 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 375 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 376 /* 377 * add any available free desc back to the free list 378 */ 379 if (desc_list) 380 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 381 mac_id, rx_desc_pool); 382 383 return QDF_STATUS_SUCCESS; 384 } 385 386 QDF_STATUS 387 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 388 struct dp_srng *dp_rxdma_srng, 389 struct rx_desc_pool *rx_desc_pool, 390 uint32_t num_req_buffers, 391 union dp_rx_desc_list_elem_t **desc_list, 392 union dp_rx_desc_list_elem_t **tail) 393 { 394 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 395 uint32_t count; 396 void *rxdma_ring_entry; 397 union dp_rx_desc_list_elem_t *next; 398 void *rxdma_srng; 399 qdf_nbuf_t nbuf; 400 qdf_dma_addr_t paddr; 401 402 rxdma_srng = dp_rxdma_srng->hal_srng; 403 404 if (qdf_unlikely(!dp_pdev)) { 405 dp_rx_err("%pK: pdev is null for mac_id = %d", 406 soc, mac_id); 407 return QDF_STATUS_E_FAILURE; 408 } 409 410 if (qdf_unlikely(!rxdma_srng)) { 411 dp_rx_debug("%pK: rxdma srng not initialized", soc); 412 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 413 return QDF_STATUS_E_FAILURE; 414 } 415 416 dp_rx_debug("%pK: requested %d buffers for replenish", 417 soc, num_req_buffers); 418 419 hal_srng_access_start(soc->hal_soc, rxdma_srng); 420 421 for (count = 0; count < num_req_buffers; count++) { 422 next = (*desc_list)->next; 423 qdf_prefetch(next); 424 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 425 if (qdf_unlikely(!nbuf)) { 426 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 427 break; 428 } 429 430 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 431 rx_desc_pool->buf_size); 432 rxdma_ring_entry = (struct dp_buffer_addr_info *) 433 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 434 if (!rxdma_ring_entry) 435 break; 436 437 qdf_assert_always(rxdma_ring_entry); 438 439 (*desc_list)->rx_desc.nbuf = nbuf; 440 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 441 (*desc_list)->rx_desc.unmapped = 0; 442 443 /* rx_desc.in_use should be zero at this time*/ 444 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 445 446 (*desc_list)->rx_desc.in_use = 1; 447 (*desc_list)->rx_desc.in_err_state = 0; 448 449 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 450 paddr, 451 (*desc_list)->rx_desc.cookie, 452 rx_desc_pool->owner); 453 454 *desc_list = next; 455 } 456 qdf_dsb(); 457 hal_srng_access_end(soc->hal_soc, rxdma_srng); 458 459 /* No need to count the number of bytes received during replenish. 460 * Therefore set replenish.pkts.bytes as 0. 461 */ 462 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 463 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 464 /* 465 * add any available free desc back to the free list 466 */ 467 if (*desc_list) 468 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 469 mac_id, rx_desc_pool); 470 471 return QDF_STATUS_SUCCESS; 472 } 473 474 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 475 uint32_t mac_id, 476 struct dp_srng *dp_rxdma_srng, 477 struct rx_desc_pool *rx_desc_pool, 478 uint32_t num_req_buffers) 479 { 480 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 481 uint32_t count; 482 uint32_t nr_descs = 0; 483 void *rxdma_ring_entry; 484 union dp_rx_desc_list_elem_t *next; 485 void *rxdma_srng; 486 qdf_nbuf_t nbuf; 487 qdf_dma_addr_t paddr; 488 union dp_rx_desc_list_elem_t *desc_list = NULL; 489 union dp_rx_desc_list_elem_t *tail = NULL; 490 491 rxdma_srng = dp_rxdma_srng->hal_srng; 492 493 if (qdf_unlikely(!dp_pdev)) { 494 dp_rx_err("%pK: pdev is null for mac_id = %d", 495 soc, mac_id); 496 return QDF_STATUS_E_FAILURE; 497 } 498 499 if (qdf_unlikely(!rxdma_srng)) { 500 dp_rx_debug("%pK: rxdma srng not initialized", soc); 501 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 502 return QDF_STATUS_E_FAILURE; 503 } 504 505 dp_rx_debug("%pK: requested %d buffers for replenish", 506 soc, num_req_buffers); 507 508 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 509 num_req_buffers, &desc_list, &tail); 510 if (!nr_descs) { 511 dp_err("no free rx_descs in freelist"); 512 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 513 return QDF_STATUS_E_NOMEM; 514 } 515 516 dp_debug("got %u RX descs for driver attach", nr_descs); 517 518 hal_srng_access_start(soc->hal_soc, rxdma_srng); 519 520 for (count = 0; count < nr_descs; count++) { 521 next = desc_list->next; 522 qdf_prefetch(next); 523 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 524 if (qdf_unlikely(!nbuf)) { 525 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 526 break; 527 } 528 529 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 530 rx_desc_pool->buf_size); 531 rxdma_ring_entry = (struct dp_buffer_addr_info *) 532 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 533 if (!rxdma_ring_entry) 534 break; 535 536 qdf_assert_always(rxdma_ring_entry); 537 538 desc_list->rx_desc.nbuf = nbuf; 539 desc_list->rx_desc.rx_buf_start = nbuf->data; 540 desc_list->rx_desc.unmapped = 0; 541 542 /* rx_desc.in_use should be zero at this time*/ 543 qdf_assert_always(desc_list->rx_desc.in_use == 0); 544 545 desc_list->rx_desc.in_use = 1; 546 desc_list->rx_desc.in_err_state = 0; 547 548 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 549 paddr, 550 desc_list->rx_desc.cookie, 551 rx_desc_pool->owner); 552 553 desc_list = next; 554 } 555 qdf_dsb(); 556 hal_srng_access_end(soc->hal_soc, rxdma_srng); 557 558 /* No need to count the number of bytes received during replenish. 559 * Therefore set replenish.pkts.bytes as 0. 560 */ 561 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 562 563 return QDF_STATUS_SUCCESS; 564 } 565 #endif 566 567 /* 568 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 569 * called during dp rx initialization 570 * and at the end of dp_rx_process. 571 * 572 * @soc: core txrx main context 573 * @mac_id: mac_id which is one of 3 mac_ids 574 * @dp_rxdma_srng: dp rxdma circular ring 575 * @rx_desc_pool: Pointer to free Rx descriptor pool 576 * @num_req_buffers: number of buffer to be replenished 577 * @desc_list: list of descs if called from dp_rx_process 578 * or NULL during dp rx initialization or out of buffer 579 * interrupt. 580 * @tail: tail of descs list 581 * @func_name: name of the caller function 582 * Return: return success or failure 583 */ 584 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 585 struct dp_srng *dp_rxdma_srng, 586 struct rx_desc_pool *rx_desc_pool, 587 uint32_t num_req_buffers, 588 union dp_rx_desc_list_elem_t **desc_list, 589 union dp_rx_desc_list_elem_t **tail, 590 const char *func_name) 591 { 592 uint32_t num_alloc_desc; 593 uint16_t num_desc_to_free = 0; 594 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 595 uint32_t num_entries_avail; 596 uint32_t count; 597 int sync_hw_ptr = 1; 598 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 599 void *rxdma_ring_entry; 600 union dp_rx_desc_list_elem_t *next; 601 QDF_STATUS ret; 602 void *rxdma_srng; 603 604 rxdma_srng = dp_rxdma_srng->hal_srng; 605 606 if (qdf_unlikely(!dp_pdev)) { 607 dp_rx_err("%pK: pdev is null for mac_id = %d", 608 dp_soc, mac_id); 609 return QDF_STATUS_E_FAILURE; 610 } 611 612 if (qdf_unlikely(!rxdma_srng)) { 613 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 614 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 615 return QDF_STATUS_E_FAILURE; 616 } 617 618 dp_rx_debug("%pK: requested %d buffers for replenish", 619 dp_soc, num_req_buffers); 620 621 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 622 623 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 624 rxdma_srng, 625 sync_hw_ptr); 626 627 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 628 dp_soc, num_entries_avail); 629 630 if (!(*desc_list) && (num_entries_avail > 631 ((dp_rxdma_srng->num_entries * 3) / 4))) { 632 num_req_buffers = num_entries_avail; 633 } else if (num_entries_avail < num_req_buffers) { 634 num_desc_to_free = num_req_buffers - num_entries_avail; 635 num_req_buffers = num_entries_avail; 636 } 637 638 if (qdf_unlikely(!num_req_buffers)) { 639 num_desc_to_free = num_req_buffers; 640 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 641 goto free_descs; 642 } 643 644 /* 645 * if desc_list is NULL, allocate the descs from freelist 646 */ 647 if (!(*desc_list)) { 648 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 649 rx_desc_pool, 650 num_req_buffers, 651 desc_list, 652 tail); 653 654 if (!num_alloc_desc) { 655 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 656 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 657 num_req_buffers); 658 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 659 return QDF_STATUS_E_NOMEM; 660 } 661 662 dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc); 663 num_req_buffers = num_alloc_desc; 664 } 665 666 667 count = 0; 668 669 while (count < num_req_buffers) { 670 /* Flag is set while pdev rx_desc_pool initialization */ 671 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 672 ret = dp_pdev_frag_alloc_and_map(dp_soc, 673 &nbuf_frag_info, 674 dp_pdev, 675 rx_desc_pool); 676 else 677 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 678 mac_id, 679 num_entries_avail, &nbuf_frag_info, 680 dp_pdev, rx_desc_pool); 681 682 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 683 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 684 continue; 685 break; 686 } 687 688 count++; 689 690 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 691 rxdma_srng); 692 qdf_assert_always(rxdma_ring_entry); 693 694 next = (*desc_list)->next; 695 696 /* Flag is set while pdev rx_desc_pool initialization */ 697 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 698 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 699 &nbuf_frag_info); 700 else 701 dp_rx_desc_prep(&((*desc_list)->rx_desc), 702 &nbuf_frag_info); 703 704 /* rx_desc.in_use should be zero at this time*/ 705 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 706 707 (*desc_list)->rx_desc.in_use = 1; 708 (*desc_list)->rx_desc.in_err_state = 0; 709 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 710 func_name, RX_DESC_REPLENISHED); 711 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 712 nbuf_frag_info.virt_addr.nbuf, 713 (unsigned long long)(nbuf_frag_info.paddr), 714 (*desc_list)->rx_desc.cookie); 715 716 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 717 nbuf_frag_info.paddr, 718 (*desc_list)->rx_desc.cookie, 719 rx_desc_pool->owner); 720 721 *desc_list = next; 722 723 } 724 725 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 726 num_req_buffers, count); 727 728 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 729 730 dp_rx_schedule_refill_thread(dp_soc); 731 732 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 733 count, num_desc_to_free); 734 735 /* No need to count the number of bytes received during replenish. 736 * Therefore set replenish.pkts.bytes as 0. 737 */ 738 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 739 740 free_descs: 741 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 742 /* 743 * add any available free desc back to the free list 744 */ 745 if (*desc_list) 746 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 747 mac_id, rx_desc_pool); 748 749 return QDF_STATUS_SUCCESS; 750 } 751 752 qdf_export_symbol(__dp_rx_buffers_replenish); 753 754 /* 755 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 756 * pkts to RAW mode simulation to 757 * decapsulate the pkt. 758 * 759 * @vdev: vdev on which RAW mode is enabled 760 * @nbuf_list: list of RAW pkts to process 761 * @txrx_peer: peer object from which the pkt is rx 762 * 763 * Return: void 764 */ 765 void 766 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 767 struct dp_txrx_peer *txrx_peer) 768 { 769 qdf_nbuf_t deliver_list_head = NULL; 770 qdf_nbuf_t deliver_list_tail = NULL; 771 qdf_nbuf_t nbuf; 772 773 nbuf = nbuf_list; 774 while (nbuf) { 775 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 776 777 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 778 779 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 780 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 781 qdf_nbuf_len(nbuf)); 782 /* 783 * reset the chfrag_start and chfrag_end bits in nbuf cb 784 * as this is a non-amsdu pkt and RAW mode simulation expects 785 * these bit s to be 0 for non-amsdu pkt. 786 */ 787 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 788 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 789 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 790 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 791 } 792 793 nbuf = next; 794 } 795 796 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 797 &deliver_list_tail); 798 799 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 800 } 801 802 #ifndef QCA_HOST_MODE_WIFI_DISABLED 803 #ifndef FEATURE_WDS 804 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 805 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 806 { 807 } 808 #endif 809 810 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 811 /* 812 * dp_classify_critical_pkts() - API for marking critical packets 813 * @soc: dp_soc context 814 * @vdev: vdev on which packet is to be sent 815 * @nbuf: nbuf that has to be classified 816 * 817 * The function parses the packet, identifies whether its a critical frame and 818 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 819 * Code for marking which frames are CRITICAL is accessed via callback. 820 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 821 * 822 * Return: None 823 */ 824 static 825 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 826 qdf_nbuf_t nbuf) 827 { 828 if (vdev->tx_classify_critical_pkt_cb) 829 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 830 } 831 #else 832 static inline 833 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 834 qdf_nbuf_t nbuf) 835 { 836 } 837 #endif 838 839 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 840 static inline 841 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 842 { 843 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 844 } 845 #else 846 static inline 847 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 848 { 849 } 850 #endif 851 852 /* 853 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets 854 * 855 * @soc: core txrx main context 856 * @ta_peer : source peer entry 857 * @rx_tlv_hdr : start address of rx tlvs 858 * @nbuf : nbuf that has to be intrabss forwarded 859 * @tid_stats : tid stats pointer 860 * 861 * Return: bool: true if it is forwarded else false 862 */ 863 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 864 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 865 struct cdp_tid_rx_stats *tid_stats) 866 { 867 uint16_t len; 868 qdf_nbuf_t nbuf_copy; 869 870 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 871 nbuf)) 872 return true; 873 874 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 875 return false; 876 877 /* If the source peer in the isolation list 878 * then dont forward instead push to bridge stack 879 */ 880 if (dp_get_peer_isolation(ta_peer)) 881 return false; 882 883 nbuf_copy = qdf_nbuf_copy(nbuf); 884 if (!nbuf_copy) 885 return false; 886 887 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 888 889 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 890 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 891 892 dp_rx_nbuf_queue_mapping_set(nbuf_copy, qdf_get_cpu()); 893 if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy, 894 tid_stats)) 895 return false; 896 897 if (dp_tx_send((struct cdp_soc_t *)soc, 898 ta_peer->vdev->vdev_id, nbuf_copy)) { 899 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 900 len); 901 tid_stats->fail_cnt[INTRABSS_DROP]++; 902 dp_rx_nbuf_free(nbuf_copy); 903 } else { 904 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 905 len); 906 tid_stats->intrabss_cnt++; 907 } 908 return false; 909 } 910 911 /* 912 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets 913 * 914 * @soc: core txrx main context 915 * @ta_peer: source peer entry 916 * @tx_vdev_id: VDEV ID for Intra-BSS TX 917 * @rx_tlv_hdr: start address of rx tlvs 918 * @nbuf: nbuf that has to be intrabss forwarded 919 * @tid_stats: tid stats pointer 920 * 921 * Return: bool: true if it is forwarded else false 922 */ 923 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 924 uint8_t tx_vdev_id, 925 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 926 struct cdp_tid_rx_stats *tid_stats) 927 { 928 uint16_t len; 929 930 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 931 932 /* linearize the nbuf just before we send to 933 * dp_tx_send() 934 */ 935 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 936 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 937 return false; 938 939 nbuf = qdf_nbuf_unshare(nbuf); 940 if (!nbuf) { 941 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 942 rx.intra_bss.fail, 943 1, len); 944 /* return true even though the pkt is 945 * not forwarded. Basically skb_unshare 946 * failed and we want to continue with 947 * next nbuf. 948 */ 949 tid_stats->fail_cnt[INTRABSS_DROP]++; 950 return false; 951 } 952 } 953 954 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 955 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 956 957 dp_rx_nbuf_queue_mapping_set(nbuf, qdf_get_cpu()); 958 if (!dp_tx_send((struct cdp_soc_t *)soc, 959 tx_vdev_id, nbuf)) { 960 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 961 len); 962 } else { 963 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 964 len); 965 tid_stats->fail_cnt[INTRABSS_DROP]++; 966 return false; 967 } 968 969 return true; 970 } 971 972 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 973 974 #ifdef MESH_MODE_SUPPORT 975 976 /** 977 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 978 * 979 * @vdev: DP Virtual device handle 980 * @nbuf: Buffer pointer 981 * @rx_tlv_hdr: start of rx tlv header 982 * @txrx_peer: pointer to peer 983 * 984 * This function allocated memory for mesh receive stats and fill the 985 * required stats. Stores the memory address in skb cb. 986 * 987 * Return: void 988 */ 989 990 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 991 uint8_t *rx_tlv_hdr, 992 struct dp_txrx_peer *txrx_peer) 993 { 994 struct mesh_recv_hdr_s *rx_info = NULL; 995 uint32_t pkt_type; 996 uint32_t nss; 997 uint32_t rate_mcs; 998 uint32_t bw; 999 uint8_t primary_chan_num; 1000 uint32_t center_chan_freq; 1001 struct dp_soc *soc = vdev->pdev->soc; 1002 struct dp_peer *peer; 1003 struct dp_peer *primary_link_peer; 1004 struct dp_soc *link_peer_soc; 1005 cdp_peer_stats_param_t buf = {0}; 1006 1007 /* fill recv mesh stats */ 1008 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1009 1010 /* upper layers are resposible to free this memory */ 1011 1012 if (!rx_info) { 1013 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1014 vdev->pdev->soc); 1015 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1016 return; 1017 } 1018 1019 rx_info->rs_flags = MESH_RXHDR_VER1; 1020 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1021 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1022 1023 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1024 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1025 1026 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1027 if (peer) { 1028 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1029 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1030 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1031 rx_tlv_hdr); 1032 if (vdev->osif_get_key) 1033 vdev->osif_get_key(vdev->osif_vdev, 1034 &rx_info->rs_decryptkey[0], 1035 &peer->mac_addr.raw[0], 1036 rx_info->rs_keyix); 1037 } 1038 1039 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1040 } 1041 1042 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1043 txrx_peer->peer_id, 1044 DP_MOD_ID_MESH); 1045 1046 if (qdf_likely(primary_link_peer)) { 1047 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1048 dp_monitor_peer_get_stats_param(link_peer_soc, 1049 primary_link_peer, 1050 cdp_peer_rx_snr, &buf); 1051 rx_info->rs_snr = buf.rx_snr; 1052 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1053 } 1054 1055 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1056 1057 soc = vdev->pdev->soc; 1058 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1059 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1060 1061 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1062 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1063 soc->ctrl_psoc, 1064 vdev->pdev->pdev_id, 1065 center_chan_freq); 1066 } 1067 rx_info->rs_channel = primary_chan_num; 1068 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1069 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1070 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1071 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1072 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1073 (bw << 24); 1074 1075 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1076 1077 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1078 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1079 rx_info->rs_flags, 1080 rx_info->rs_rssi, 1081 rx_info->rs_channel, 1082 rx_info->rs_ratephy1, 1083 rx_info->rs_keyix, 1084 rx_info->rs_snr); 1085 1086 } 1087 1088 /** 1089 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 1090 * 1091 * @vdev: DP Virtual device handle 1092 * @nbuf: Buffer pointer 1093 * @rx_tlv_hdr: start of rx tlv header 1094 * 1095 * This checks if the received packet is matching any filter out 1096 * catogery and and drop the packet if it matches. 1097 * 1098 * Return: status(0 indicates drop, 1 indicate to no drop) 1099 */ 1100 1101 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1102 uint8_t *rx_tlv_hdr) 1103 { 1104 union dp_align_mac_addr mac_addr; 1105 struct dp_soc *soc = vdev->pdev->soc; 1106 1107 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1108 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1109 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1110 rx_tlv_hdr)) 1111 return QDF_STATUS_SUCCESS; 1112 1113 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1114 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1115 rx_tlv_hdr)) 1116 return QDF_STATUS_SUCCESS; 1117 1118 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1119 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1120 rx_tlv_hdr) && 1121 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1122 rx_tlv_hdr)) 1123 return QDF_STATUS_SUCCESS; 1124 1125 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1126 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1127 rx_tlv_hdr, 1128 &mac_addr.raw[0])) 1129 return QDF_STATUS_E_FAILURE; 1130 1131 if (!qdf_mem_cmp(&mac_addr.raw[0], 1132 &vdev->mac_addr.raw[0], 1133 QDF_MAC_ADDR_SIZE)) 1134 return QDF_STATUS_SUCCESS; 1135 } 1136 1137 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1138 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1139 rx_tlv_hdr, 1140 &mac_addr.raw[0])) 1141 return QDF_STATUS_E_FAILURE; 1142 1143 if (!qdf_mem_cmp(&mac_addr.raw[0], 1144 &vdev->mac_addr.raw[0], 1145 QDF_MAC_ADDR_SIZE)) 1146 return QDF_STATUS_SUCCESS; 1147 } 1148 } 1149 1150 return QDF_STATUS_E_FAILURE; 1151 } 1152 1153 #else 1154 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1155 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1156 { 1157 } 1158 1159 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1160 uint8_t *rx_tlv_hdr) 1161 { 1162 return QDF_STATUS_E_FAILURE; 1163 } 1164 1165 #endif 1166 1167 #ifdef FEATURE_NAC_RSSI 1168 /** 1169 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 1170 * @soc: DP SOC handle 1171 * @mpdu: mpdu for which peer is invalid 1172 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1173 * pool_id has same mapping) 1174 * 1175 * return: integer type 1176 */ 1177 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1178 uint8_t mac_id) 1179 { 1180 struct dp_invalid_peer_msg msg; 1181 struct dp_vdev *vdev = NULL; 1182 struct dp_pdev *pdev = NULL; 1183 struct ieee80211_frame *wh; 1184 qdf_nbuf_t curr_nbuf, next_nbuf; 1185 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1186 uint8_t *rx_pkt_hdr = NULL; 1187 int i = 0; 1188 1189 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1190 dp_rx_debug("%pK: Drop decapped frames", soc); 1191 goto free; 1192 } 1193 1194 /* In RAW packet, packet header will be part of data */ 1195 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1196 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1197 1198 if (!DP_FRAME_IS_DATA(wh)) { 1199 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1200 goto free; 1201 } 1202 1203 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1204 dp_rx_err("%pK: Invalid nbuf length", soc); 1205 goto free; 1206 } 1207 1208 /* In DMAC case the rx_desc_pools are common across PDEVs 1209 * so PDEV cannot be derived from the pool_id. 1210 * 1211 * link_id need to derived from the TLV tag word which is 1212 * disabled by default. For now adding a WAR to get vdev 1213 * with brute force this need to fixed with word based subscription 1214 * support is added by enabling TLV tag word 1215 */ 1216 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1217 for (i = 0; i < MAX_PDEV_CNT; i++) { 1218 pdev = soc->pdev_list[i]; 1219 1220 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1221 continue; 1222 1223 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1224 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1225 QDF_MAC_ADDR_SIZE) == 0) { 1226 goto out; 1227 } 1228 } 1229 } 1230 } else { 1231 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1232 1233 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1234 dp_rx_err("%pK: PDEV %s", 1235 soc, !pdev ? "not found" : "down"); 1236 goto free; 1237 } 1238 1239 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1240 QDF_STATUS_SUCCESS) 1241 return 0; 1242 1243 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1244 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1245 QDF_MAC_ADDR_SIZE) == 0) { 1246 goto out; 1247 } 1248 } 1249 } 1250 1251 if (!vdev) { 1252 dp_rx_err("%pK: VDEV not found", soc); 1253 goto free; 1254 } 1255 out: 1256 msg.wh = wh; 1257 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1258 msg.nbuf = mpdu; 1259 msg.vdev_id = vdev->vdev_id; 1260 1261 /* 1262 * NOTE: Only valid for HKv1. 1263 * If smart monitor mode is enabled on RE, we are getting invalid 1264 * peer frames with RA as STA mac of RE and the TA not matching 1265 * with any NAC list or the the BSSID.Such frames need to dropped 1266 * in order to avoid HM_WDS false addition. 1267 */ 1268 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1269 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1270 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1271 soc, wh->i_addr1); 1272 goto free; 1273 } 1274 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1275 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1276 pdev->pdev_id, &msg); 1277 } 1278 1279 free: 1280 /* Drop and free packet */ 1281 curr_nbuf = mpdu; 1282 while (curr_nbuf) { 1283 next_nbuf = qdf_nbuf_next(curr_nbuf); 1284 dp_rx_nbuf_free(curr_nbuf); 1285 curr_nbuf = next_nbuf; 1286 } 1287 1288 return 0; 1289 } 1290 1291 /** 1292 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 1293 * @soc: DP SOC handle 1294 * @mpdu: mpdu for which peer is invalid 1295 * @mpdu_done: if an mpdu is completed 1296 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1297 * pool_id has same mapping) 1298 * 1299 * return: integer type 1300 */ 1301 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1302 qdf_nbuf_t mpdu, bool mpdu_done, 1303 uint8_t mac_id) 1304 { 1305 /* Only trigger the process when mpdu is completed */ 1306 if (mpdu_done) 1307 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1308 } 1309 #else 1310 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1311 uint8_t mac_id) 1312 { 1313 qdf_nbuf_t curr_nbuf, next_nbuf; 1314 struct dp_pdev *pdev; 1315 struct dp_vdev *vdev = NULL; 1316 struct ieee80211_frame *wh; 1317 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1318 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1319 1320 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1321 1322 if (!DP_FRAME_IS_DATA(wh)) { 1323 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1324 "only for data frames"); 1325 goto free; 1326 } 1327 1328 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1329 dp_rx_info_rl("%pK: Invalid nbuf length", soc); 1330 goto free; 1331 } 1332 1333 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1334 if (!pdev) { 1335 dp_rx_info_rl("%pK: PDEV not found", soc); 1336 goto free; 1337 } 1338 1339 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1340 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1341 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1342 QDF_MAC_ADDR_SIZE) == 0) { 1343 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1344 goto out; 1345 } 1346 } 1347 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1348 1349 if (!vdev) { 1350 dp_rx_info_rl("%pK: VDEV not found", soc); 1351 goto free; 1352 } 1353 1354 out: 1355 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1356 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1357 free: 1358 /* reset the head and tail pointers */ 1359 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1360 if (pdev) { 1361 pdev->invalid_peer_head_msdu = NULL; 1362 pdev->invalid_peer_tail_msdu = NULL; 1363 } 1364 1365 /* Drop and free packet */ 1366 curr_nbuf = mpdu; 1367 while (curr_nbuf) { 1368 next_nbuf = qdf_nbuf_next(curr_nbuf); 1369 dp_rx_nbuf_free(curr_nbuf); 1370 curr_nbuf = next_nbuf; 1371 } 1372 1373 /* Reset the head and tail pointers */ 1374 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1375 if (pdev) { 1376 pdev->invalid_peer_head_msdu = NULL; 1377 pdev->invalid_peer_tail_msdu = NULL; 1378 } 1379 1380 return 0; 1381 } 1382 1383 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1384 qdf_nbuf_t mpdu, bool mpdu_done, 1385 uint8_t mac_id) 1386 { 1387 /* Process the nbuf */ 1388 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1389 } 1390 #endif 1391 1392 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1393 1394 #ifdef RECEIVE_OFFLOAD 1395 /** 1396 * dp_rx_print_offload_info() - Print offload info from RX TLV 1397 * @soc: dp soc handle 1398 * @msdu: MSDU for which the offload info is to be printed 1399 * 1400 * Return: None 1401 */ 1402 static void dp_rx_print_offload_info(struct dp_soc *soc, 1403 qdf_nbuf_t msdu) 1404 { 1405 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1406 dp_verbose_debug("lro_eligible 0x%x", 1407 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1408 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1409 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1410 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1411 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1412 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1413 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1414 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1415 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1416 dp_verbose_debug("---------------------------------------------------------"); 1417 } 1418 1419 /** 1420 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 1421 * @soc: DP SOC handle 1422 * @rx_tlv: RX TLV received for the msdu 1423 * @msdu: msdu for which GRO info needs to be filled 1424 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 1425 * 1426 * Return: None 1427 */ 1428 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1429 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1430 { 1431 struct hal_offload_info offload_info; 1432 1433 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1434 return; 1435 1436 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1437 return; 1438 1439 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1440 1441 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1442 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1443 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1444 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1445 rx_tlv); 1446 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1447 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1448 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1449 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1450 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1451 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1452 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1453 1454 dp_rx_print_offload_info(soc, msdu); 1455 } 1456 #endif /* RECEIVE_OFFLOAD */ 1457 1458 /** 1459 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1460 * 1461 * @soc: DP soc handle 1462 * @nbuf: pointer to msdu. 1463 * @mpdu_len: mpdu length 1464 * @l3_pad_len: L3 padding length by HW 1465 * 1466 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 1467 */ 1468 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1469 qdf_nbuf_t nbuf, 1470 uint16_t *mpdu_len, 1471 uint32_t l3_pad_len) 1472 { 1473 bool last_nbuf; 1474 uint32_t pkt_hdr_size; 1475 1476 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1477 1478 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1479 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1480 last_nbuf = false; 1481 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1482 } else { 1483 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1484 last_nbuf = true; 1485 *mpdu_len = 0; 1486 } 1487 1488 return last_nbuf; 1489 } 1490 1491 /** 1492 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1493 * 1494 * @soc: DP soc handle 1495 * @nbuf: pointer to msdu. 1496 * 1497 * Return: returns padding length in bytes. 1498 */ 1499 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1500 qdf_nbuf_t nbuf) 1501 { 1502 uint32_t l3_hdr_pad = 0; 1503 uint8_t *rx_tlv_hdr; 1504 struct hal_rx_msdu_metadata msdu_metadata; 1505 1506 while (nbuf) { 1507 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1508 /* scattered msdu end with continuation is 0 */ 1509 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1510 hal_rx_msdu_metadata_get(soc->hal_soc, 1511 rx_tlv_hdr, 1512 &msdu_metadata); 1513 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1514 break; 1515 } 1516 nbuf = nbuf->next; 1517 } 1518 1519 return l3_hdr_pad; 1520 } 1521 1522 /** 1523 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1524 * multiple nbufs. 1525 * @soc: DP SOC handle 1526 * @nbuf: pointer to the first msdu of an amsdu. 1527 * 1528 * This function implements the creation of RX frag_list for cases 1529 * where an MSDU is spread across multiple nbufs. 1530 * 1531 * Return: returns the head nbuf which contains complete frag_list. 1532 */ 1533 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1534 { 1535 qdf_nbuf_t parent, frag_list, next = NULL; 1536 uint16_t frag_list_len = 0; 1537 uint16_t mpdu_len; 1538 bool last_nbuf; 1539 uint32_t l3_hdr_pad_offset = 0; 1540 1541 /* 1542 * Use msdu len got from REO entry descriptor instead since 1543 * there is case the RX PKT TLV is corrupted while msdu_len 1544 * from REO descriptor is right for non-raw RX scatter msdu. 1545 */ 1546 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1547 1548 /* 1549 * this is a case where the complete msdu fits in one single nbuf. 1550 * in this case HW sets both start and end bit and we only need to 1551 * reset these bits for RAW mode simulator to decap the pkt 1552 */ 1553 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1554 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1555 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1556 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1557 return nbuf; 1558 } 1559 1560 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1561 /* 1562 * This is a case where we have multiple msdus (A-MSDU) spread across 1563 * multiple nbufs. here we create a fraglist out of these nbufs. 1564 * 1565 * the moment we encounter a nbuf with continuation bit set we 1566 * know for sure we have an MSDU which is spread across multiple 1567 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1568 */ 1569 parent = nbuf; 1570 frag_list = nbuf->next; 1571 nbuf = nbuf->next; 1572 1573 /* 1574 * set the start bit in the first nbuf we encounter with continuation 1575 * bit set. This has the proper mpdu length set as it is the first 1576 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1577 * nbufs will form the frag_list of the parent nbuf. 1578 */ 1579 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1580 /* 1581 * L3 header padding is only needed for the 1st buffer 1582 * in a scattered msdu 1583 */ 1584 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1585 l3_hdr_pad_offset); 1586 1587 /* 1588 * MSDU cont bit is set but reported MPDU length can fit 1589 * in to single buffer 1590 * 1591 * Increment error stats and avoid SG list creation 1592 */ 1593 if (last_nbuf) { 1594 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1595 qdf_nbuf_pull_head(parent, 1596 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1597 return parent; 1598 } 1599 1600 /* 1601 * this is where we set the length of the fragments which are 1602 * associated to the parent nbuf. We iterate through the frag_list 1603 * till we hit the last_nbuf of the list. 1604 */ 1605 do { 1606 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1607 qdf_nbuf_pull_head(nbuf, 1608 soc->rx_pkt_tlv_size); 1609 frag_list_len += qdf_nbuf_len(nbuf); 1610 1611 if (last_nbuf) { 1612 next = nbuf->next; 1613 nbuf->next = NULL; 1614 break; 1615 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1616 dp_err("Invalid packet length\n"); 1617 qdf_assert_always(0); 1618 } 1619 nbuf = nbuf->next; 1620 } while (!last_nbuf); 1621 1622 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1623 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1624 parent->next = next; 1625 1626 qdf_nbuf_pull_head(parent, 1627 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1628 return parent; 1629 } 1630 1631 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1632 1633 #ifdef QCA_PEER_EXT_STATS 1634 /* 1635 * dp_rx_compute_tid_delay - Computer per TID delay stats 1636 * @peer: DP soc context 1637 * @nbuf: NBuffer 1638 * 1639 * Return: Void 1640 */ 1641 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1642 qdf_nbuf_t nbuf) 1643 { 1644 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1645 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1646 1647 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1648 } 1649 #endif /* QCA_PEER_EXT_STATS */ 1650 1651 /** 1652 * dp_rx_compute_delay() - Compute and fill in all timestamps 1653 * to pass in correct fields 1654 * 1655 * @vdev: pdev handle 1656 * @tx_desc: tx descriptor 1657 * @tid: tid value 1658 * Return: none 1659 */ 1660 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1661 { 1662 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1663 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1664 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1665 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1666 uint32_t interframe_delay = 1667 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1668 struct cdp_tid_rx_stats *rstats = 1669 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1670 1671 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1672 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1673 /* 1674 * Update interframe delay stats calculated at deliver_data_ol point. 1675 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1676 * interframe delay will not be calculate correctly for 1st frame. 1677 * On the other side, this will help in avoiding extra per packet check 1678 * of vdev->prev_rx_deliver_tstamp. 1679 */ 1680 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1681 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1682 vdev->prev_rx_deliver_tstamp = current_ts; 1683 } 1684 1685 /** 1686 * dp_rx_drop_nbuf_list() - drop an nbuf list 1687 * @pdev: dp pdev reference 1688 * @buf_list: buffer list to be dropepd 1689 * 1690 * Return: int (number of bufs dropped) 1691 */ 1692 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1693 qdf_nbuf_t buf_list) 1694 { 1695 struct cdp_tid_rx_stats *stats = NULL; 1696 uint8_t tid = 0, ring_id = 0; 1697 int num_dropped = 0; 1698 qdf_nbuf_t buf, next_buf; 1699 1700 buf = buf_list; 1701 while (buf) { 1702 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1703 next_buf = qdf_nbuf_queue_next(buf); 1704 tid = qdf_nbuf_get_tid_val(buf); 1705 if (qdf_likely(pdev)) { 1706 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1707 stats->fail_cnt[INVALID_PEER_VDEV]++; 1708 stats->delivered_to_stack--; 1709 } 1710 dp_rx_nbuf_free(buf); 1711 buf = next_buf; 1712 num_dropped++; 1713 } 1714 1715 return num_dropped; 1716 } 1717 1718 #ifdef QCA_SUPPORT_WDS_EXTENDED 1719 /** 1720 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1721 * @soc: core txrx main context 1722 * @vdev: vdev 1723 * @txrx_peer: txrx peer 1724 * @nbuf_head: skb list head 1725 * 1726 * Return: true if packet is delivered to netdev per STA. 1727 */ 1728 static inline bool 1729 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1730 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1731 { 1732 /* 1733 * When extended WDS is disabled, frames are sent to AP netdevice. 1734 */ 1735 if (qdf_likely(!vdev->wds_ext_enabled)) 1736 return false; 1737 1738 /* 1739 * There can be 2 cases: 1740 * 1. Send frame to parent netdev if its not for netdev per STA 1741 * 2. If frame is meant for netdev per STA: 1742 * a. Send frame to appropriate netdev using registered fp. 1743 * b. If fp is NULL, drop the frames. 1744 */ 1745 if (!txrx_peer->wds_ext.init) 1746 return false; 1747 1748 if (txrx_peer->osif_rx) 1749 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 1750 else 1751 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1752 1753 return true; 1754 } 1755 1756 #else 1757 static inline bool 1758 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1759 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1760 { 1761 return false; 1762 } 1763 #endif 1764 1765 #ifdef PEER_CACHE_RX_PKTS 1766 /** 1767 * dp_rx_flush_rx_cached() - flush cached rx frames 1768 * @peer: peer 1769 * @drop: flag to drop frames or forward to net stack 1770 * 1771 * Return: None 1772 */ 1773 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1774 { 1775 struct dp_peer_cached_bufq *bufqi; 1776 struct dp_rx_cached_buf *cache_buf = NULL; 1777 ol_txrx_rx_fp data_rx = NULL; 1778 int num_buff_elem; 1779 QDF_STATUS status; 1780 1781 /* 1782 * Flush dp cached frames only for mld peers and legacy peers, as 1783 * link peers don't store cached frames 1784 */ 1785 if (IS_MLO_DP_LINK_PEER(peer)) 1786 return; 1787 1788 if (!peer->txrx_peer) { 1789 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 1790 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 1791 return; 1792 } 1793 1794 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 1795 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1796 return; 1797 } 1798 1799 qdf_spin_lock_bh(&peer->peer_info_lock); 1800 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1801 data_rx = peer->vdev->osif_rx; 1802 else 1803 drop = true; 1804 qdf_spin_unlock_bh(&peer->peer_info_lock); 1805 1806 bufqi = &peer->txrx_peer->bufq_info; 1807 1808 qdf_spin_lock_bh(&bufqi->bufq_lock); 1809 qdf_list_remove_front(&bufqi->cached_bufq, 1810 (qdf_list_node_t **)&cache_buf); 1811 while (cache_buf) { 1812 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 1813 cache_buf->buf); 1814 bufqi->entries -= num_buff_elem; 1815 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1816 if (drop) { 1817 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1818 cache_buf->buf); 1819 } else { 1820 /* Flush the cached frames to OSIF DEV */ 1821 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 1822 if (status != QDF_STATUS_SUCCESS) 1823 bufqi->dropped = dp_rx_drop_nbuf_list( 1824 peer->vdev->pdev, 1825 cache_buf->buf); 1826 } 1827 qdf_mem_free(cache_buf); 1828 cache_buf = NULL; 1829 qdf_spin_lock_bh(&bufqi->bufq_lock); 1830 qdf_list_remove_front(&bufqi->cached_bufq, 1831 (qdf_list_node_t **)&cache_buf); 1832 } 1833 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1834 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1835 } 1836 1837 /** 1838 * dp_rx_enqueue_rx() - cache rx frames 1839 * @peer: peer 1840 * @rx_buf_list: cache buffer list 1841 * 1842 * Return: None 1843 */ 1844 static QDF_STATUS 1845 dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list) 1846 { 1847 struct dp_rx_cached_buf *cache_buf; 1848 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 1849 int num_buff_elem; 1850 QDF_STATUS ret = QDF_STATUS_SUCCESS; 1851 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 1852 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1853 DP_MOD_ID_RX); 1854 1855 if (!peer) { 1856 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1857 rx_buf_list); 1858 return QDF_STATUS_E_INVAL; 1859 } 1860 1861 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 1862 bufqi->dropped); 1863 if (!peer->valid) { 1864 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1865 rx_buf_list); 1866 ret = QDF_STATUS_E_INVAL; 1867 goto fail; 1868 } 1869 1870 qdf_spin_lock_bh(&bufqi->bufq_lock); 1871 if (bufqi->entries >= bufqi->thresh) { 1872 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1873 rx_buf_list); 1874 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1875 ret = QDF_STATUS_E_RESOURCES; 1876 goto fail; 1877 } 1878 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1879 1880 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 1881 1882 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 1883 if (!cache_buf) { 1884 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1885 "Failed to allocate buf to cache rx frames"); 1886 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1887 rx_buf_list); 1888 ret = QDF_STATUS_E_NOMEM; 1889 goto fail; 1890 } 1891 1892 cache_buf->buf = rx_buf_list; 1893 1894 qdf_spin_lock_bh(&bufqi->bufq_lock); 1895 qdf_list_insert_back(&bufqi->cached_bufq, 1896 &cache_buf->node); 1897 bufqi->entries += num_buff_elem; 1898 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1899 1900 fail: 1901 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 1902 return ret; 1903 } 1904 1905 static inline 1906 bool dp_rx_is_peer_cache_bufq_supported(void) 1907 { 1908 return true; 1909 } 1910 #else 1911 static inline 1912 bool dp_rx_is_peer_cache_bufq_supported(void) 1913 { 1914 return false; 1915 } 1916 1917 static inline QDF_STATUS 1918 dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list) 1919 { 1920 return QDF_STATUS_SUCCESS; 1921 } 1922 #endif 1923 1924 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 1925 /** 1926 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 1927 * using the appropriate call back functions. 1928 * @soc: soc 1929 * @vdev: vdev 1930 * @peer: peer 1931 * @nbuf_head: skb list head 1932 * @nbuf_tail: skb list tail 1933 * 1934 * Return: None 1935 */ 1936 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 1937 struct dp_vdev *vdev, 1938 struct dp_txrx_peer *txrx_peer, 1939 qdf_nbuf_t nbuf_head) 1940 { 1941 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 1942 txrx_peer, nbuf_head))) 1943 return; 1944 1945 /* Function pointer initialized only when FISA is enabled */ 1946 if (vdev->osif_fisa_rx) 1947 /* on failure send it via regular path */ 1948 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 1949 else 1950 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1951 } 1952 1953 #else 1954 /** 1955 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 1956 * using the appropriate call back functions. 1957 * @soc: soc 1958 * @vdev: vdev 1959 * @txrx_peer: txrx peer 1960 * @nbuf_head: skb list head 1961 * @nbuf_tail: skb list tail 1962 * 1963 * Check the return status of the call back function and drop 1964 * the packets if the return status indicates a failure. 1965 * 1966 * Return: None 1967 */ 1968 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 1969 struct dp_vdev *vdev, 1970 struct dp_txrx_peer *txrx_peer, 1971 qdf_nbuf_t nbuf_head) 1972 { 1973 int num_nbuf = 0; 1974 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 1975 1976 /* Function pointer initialized only when FISA is enabled */ 1977 if (vdev->osif_fisa_rx) 1978 /* on failure send it via regular path */ 1979 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 1980 else if (vdev->osif_rx) 1981 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1982 1983 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 1984 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1985 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 1986 if (txrx_peer) 1987 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 1988 num_nbuf); 1989 } 1990 } 1991 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 1992 1993 /* 1994 * dp_rx_validate_rx_callbacks() - validate rx callbacks 1995 * @soc DP soc 1996 * @vdev: DP vdev handle 1997 * @txrx_peer: pointer to the txrx peer object 1998 * nbuf_head: skb list head 1999 * 2000 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2001 * QDF_STATUS_E_FAILURE 2002 */ 2003 static inline QDF_STATUS 2004 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2005 struct dp_vdev *vdev, 2006 struct dp_txrx_peer *txrx_peer, 2007 qdf_nbuf_t nbuf_head) 2008 { 2009 int num_nbuf; 2010 2011 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2012 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2013 /* 2014 * This is a special case where vdev is invalid, 2015 * so we cannot know the pdev to which this packet 2016 * belonged. Hence we update the soc rx error stats. 2017 */ 2018 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2019 return QDF_STATUS_E_FAILURE; 2020 } 2021 2022 /* 2023 * highly unlikely to have a vdev without a registered rx 2024 * callback function. if so let us free the nbuf_list. 2025 */ 2026 if (qdf_unlikely(!vdev->osif_rx)) { 2027 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2028 dp_rx_enqueue_rx(txrx_peer, nbuf_head); 2029 } else { 2030 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2031 nbuf_head); 2032 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2033 vdev->pdev->enhanced_stats_en); 2034 } 2035 return QDF_STATUS_E_FAILURE; 2036 } 2037 2038 return QDF_STATUS_SUCCESS; 2039 } 2040 2041 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2042 struct dp_vdev *vdev, 2043 struct dp_txrx_peer *txrx_peer, 2044 qdf_nbuf_t nbuf_head, 2045 qdf_nbuf_t nbuf_tail) 2046 { 2047 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2048 QDF_STATUS_SUCCESS) 2049 return QDF_STATUS_E_FAILURE; 2050 2051 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2052 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2053 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2054 &nbuf_tail); 2055 } 2056 2057 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2058 2059 return QDF_STATUS_SUCCESS; 2060 } 2061 2062 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2063 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2064 struct dp_vdev *vdev, 2065 struct dp_txrx_peer *txrx_peer, 2066 qdf_nbuf_t nbuf_head, 2067 qdf_nbuf_t nbuf_tail) 2068 { 2069 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2070 QDF_STATUS_SUCCESS) 2071 return QDF_STATUS_E_FAILURE; 2072 2073 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2074 2075 return QDF_STATUS_SUCCESS; 2076 } 2077 #endif 2078 2079 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2080 #ifdef VDEV_PEER_PROTOCOL_COUNT 2081 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2082 { \ 2083 qdf_nbuf_t nbuf_local; \ 2084 struct dp_txrx_peer *txrx_peer_local; \ 2085 struct dp_vdev *vdev_local = vdev_hdl; \ 2086 do { \ 2087 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2088 break; \ 2089 nbuf_local = nbuf; \ 2090 txrx_peer_local = txrx_peer; \ 2091 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2092 break; \ 2093 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2094 break; \ 2095 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2096 (nbuf_local), \ 2097 (txrx_peer_local), 0, 1); \ 2098 } while (0); \ 2099 } 2100 #else 2101 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2102 #endif 2103 2104 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2105 /** 2106 * dp_rx_rates_stats_update() - update rate stats 2107 * from rx msdu. 2108 * @soc: datapath soc handle 2109 * @nbuf: received msdu buffer 2110 * @rx_tlv_hdr: rx tlv header 2111 * @txrx_peer: datapath txrx_peer handle 2112 * @sgi: Short Guard Interval 2113 * @mcs: Modulation and Coding Set 2114 * @nss: Number of Spatial Streams 2115 * @bw: BandWidth 2116 * @pkt_type: Corresponds to preamble 2117 * 2118 * To be precisely record rates, following factors are considered: 2119 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2120 * Make sure to affect rx throughput as least as possible. 2121 * 2122 * Return: void 2123 */ 2124 static void 2125 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2126 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2127 uint32_t sgi, uint32_t mcs, 2128 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2129 { 2130 uint32_t rix; 2131 uint16_t ratecode; 2132 uint32_t avg_rx_rate; 2133 uint32_t ratekbps; 2134 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2135 2136 if (soc->high_throughput || 2137 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2138 return; 2139 } 2140 2141 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs); 2142 2143 /* here pkt_type corresponds to preamble */ 2144 ratekbps = dp_getrateindex(sgi, 2145 mcs, 2146 nss, 2147 pkt_type, 2148 bw, 2149 punc_mode, 2150 &rix, 2151 &ratecode); 2152 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps); 2153 avg_rx_rate = 2154 dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate, 2155 ratekbps); 2156 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate); 2157 } 2158 #else 2159 static void 2160 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2161 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2162 uint32_t sgi, uint32_t mcs, 2163 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2164 { 2165 } 2166 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2167 2168 #ifndef QCA_ENHANCED_STATS_SUPPORT 2169 /** 2170 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2171 * 2172 * @soc: datapath soc handle 2173 * @nbuf: received msdu buffer 2174 * @rx_tlv_hdr: rx tlv header 2175 * @txrx_peer: datapath txrx_peer handle 2176 * 2177 * Return: void 2178 */ 2179 static inline 2180 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2181 uint8_t *rx_tlv_hdr, 2182 struct dp_txrx_peer *txrx_peer) 2183 { 2184 bool is_ampdu; 2185 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2186 uint8_t dst_mcs_idx; 2187 2188 /* 2189 * TODO - For KIWI this field is present in ring_desc 2190 * Try to use ring desc instead of tlv. 2191 */ 2192 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2193 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu); 2194 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 2195 2196 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2197 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2198 tid = qdf_nbuf_get_tid_val(nbuf); 2199 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2200 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2201 rx_tlv_hdr); 2202 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2203 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2204 /* do HW to SW pkt type conversion */ 2205 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2206 hal_2_dp_pkt_type_map[pkt_type]); 2207 2208 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2209 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2210 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2211 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2212 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1); 2213 /* 2214 * only if nss > 0 and pkt_type is 11N/AC/AX, 2215 * then increase index [nss - 1] in array counter. 2216 */ 2217 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2218 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1); 2219 2220 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1); 2221 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2222 hal_rx_tlv_mic_err_get(soc->hal_soc, 2223 rx_tlv_hdr)); 2224 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2225 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2226 rx_tlv_hdr)); 2227 2228 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 2229 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1); 2230 2231 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2232 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2233 DP_PEER_EXTD_STATS_INC(txrx_peer, 2234 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2235 1); 2236 2237 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2238 sgi, mcs, nss, bw, pkt_type); 2239 } 2240 #else 2241 static inline 2242 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2243 uint8_t *rx_tlv_hdr, 2244 struct dp_txrx_peer *txrx_peer) 2245 { 2246 } 2247 #endif 2248 2249 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2250 static inline void 2251 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2252 qdf_nbuf_t nbuf) 2253 { 2254 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2255 2256 /* only count stats per lmac for MLO connection*/ 2257 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2258 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2259 txrx_peer->mld_peer); 2260 } 2261 #else 2262 static inline void 2263 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2264 qdf_nbuf_t nbuf) 2265 { 2266 } 2267 #endif 2268 2269 /** 2270 * dp_rx_msdu_stats_update() - update per msdu stats. 2271 * @soc: core txrx main context 2272 * @nbuf: pointer to the first msdu of an amsdu. 2273 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2274 * @txrx_peer: pointer to the txrx peer object. 2275 * @ring_id: reo dest ring number on which pkt is reaped. 2276 * @tid_stats: per tid rx stats. 2277 * 2278 * update all the per msdu stats for that nbuf. 2279 * Return: void 2280 */ 2281 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2282 uint8_t *rx_tlv_hdr, 2283 struct dp_txrx_peer *txrx_peer, 2284 uint8_t ring_id, 2285 struct cdp_tid_rx_stats *tid_stats) 2286 { 2287 bool is_not_amsdu; 2288 struct dp_vdev *vdev = txrx_peer->vdev; 2289 bool enh_flag; 2290 qdf_ether_header_t *eh; 2291 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2292 2293 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2294 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2295 qdf_nbuf_is_rx_chfrag_end(nbuf); 2296 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2297 msdu_len); 2298 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2299 is_not_amsdu); 2300 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); 2301 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2302 qdf_nbuf_is_rx_retry_flag(nbuf)); 2303 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf); 2304 tid_stats->msdu_cnt++; 2305 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2306 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2307 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2308 enh_flag = vdev->pdev->enhanced_stats_en; 2309 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2310 tid_stats->mcast_msdu_cnt++; 2311 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2312 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2313 tid_stats->bcast_msdu_cnt++; 2314 } 2315 } 2316 2317 txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks(); 2318 2319 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer); 2320 } 2321 2322 #ifndef WDS_VENDOR_EXTENSION 2323 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2324 struct dp_vdev *vdev, 2325 struct dp_txrx_peer *txrx_peer) 2326 { 2327 return 1; 2328 } 2329 #endif 2330 2331 #ifdef RX_DESC_DEBUG_CHECK 2332 /** 2333 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 2334 * corruption 2335 * 2336 * @ring_desc: REO ring descriptor 2337 * @rx_desc: Rx descriptor 2338 * 2339 * Return: NONE 2340 */ 2341 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2342 hal_ring_desc_t ring_desc, 2343 struct dp_rx_desc *rx_desc) 2344 { 2345 struct hal_buf_info hbi; 2346 2347 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2348 /* Sanity check for possible buffer paddr corruption */ 2349 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2350 return QDF_STATUS_SUCCESS; 2351 2352 return QDF_STATUS_E_FAILURE; 2353 } 2354 2355 /** 2356 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 2357 * out of bound access from H.W 2358 * 2359 * @soc: DP soc 2360 * @pkt_len: Packet length received from H.W 2361 * 2362 * Return: NONE 2363 */ 2364 static inline void 2365 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 2366 uint32_t pkt_len) 2367 { 2368 struct rx_desc_pool *rx_desc_pool; 2369 2370 rx_desc_pool = &soc->rx_desc_buf[0]; 2371 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 2372 } 2373 #else 2374 static inline void 2375 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 2376 #endif 2377 2378 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2379 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2380 /** 2381 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2382 * during roaming 2383 * @vdev: dp_vdev pointer 2384 * @rx_tlv_hdr: rx tlv header 2385 * @nbuf: pkt skb pointer 2386 * 2387 * This function will check if rx udp data is received from authorised 2388 * roamed peer before peer map indication is received from FW after 2389 * roaming. This is needed for VoIP scenarios in which packet loss 2390 * expected during roaming is minimal. 2391 * 2392 * Return: bool 2393 */ 2394 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2395 uint8_t *rx_tlv_hdr, 2396 qdf_nbuf_t nbuf) 2397 { 2398 char *hdr_desc; 2399 struct ieee80211_frame *wh = NULL; 2400 2401 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2402 rx_tlv_hdr); 2403 wh = (struct ieee80211_frame *)hdr_desc; 2404 2405 if (vdev->roaming_peer_status == 2406 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2407 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2408 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2409 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2410 return true; 2411 2412 return false; 2413 } 2414 #else 2415 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2416 uint8_t *rx_tlv_hdr, 2417 qdf_nbuf_t nbuf) 2418 { 2419 return false; 2420 } 2421 #endif 2422 /** 2423 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 2424 * no corresbonding peer found 2425 * @soc: core txrx main context 2426 * @nbuf: pkt skb pointer 2427 * 2428 * This function will try to deliver some RX special frames to stack 2429 * even there is no peer matched found. for instance, LFR case, some 2430 * eapol data will be sent to host before peer_map done. 2431 * 2432 * Return: None 2433 */ 2434 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2435 { 2436 uint16_t peer_id; 2437 uint8_t vdev_id; 2438 struct dp_vdev *vdev = NULL; 2439 uint32_t l2_hdr_offset = 0; 2440 uint16_t msdu_len = 0; 2441 uint32_t pkt_len = 0; 2442 uint8_t *rx_tlv_hdr; 2443 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2444 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2445 2446 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2447 if (peer_id > soc->max_peer_id) 2448 goto deliver_fail; 2449 2450 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2451 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2452 if (!vdev || vdev->delete.pending || !vdev->osif_rx) 2453 goto deliver_fail; 2454 2455 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2456 goto deliver_fail; 2457 2458 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2459 l2_hdr_offset = 2460 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2461 2462 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2463 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2464 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2465 2466 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2467 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2468 2469 if (dp_rx_is_special_frame(nbuf, frame_mask) || 2470 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, nbuf)) { 2471 qdf_nbuf_set_exc_frame(nbuf, 1); 2472 if (QDF_STATUS_SUCCESS != 2473 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2474 goto deliver_fail; 2475 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2476 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2477 return; 2478 } 2479 2480 deliver_fail: 2481 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2482 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2483 dp_rx_nbuf_free(nbuf); 2484 if (vdev) 2485 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2486 } 2487 #else 2488 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2489 { 2490 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2491 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2492 dp_rx_nbuf_free(nbuf); 2493 } 2494 #endif 2495 2496 /** 2497 * dp_rx_srng_get_num_pending() - get number of pending entries 2498 * @hal_soc: hal soc opaque pointer 2499 * @hal_ring: opaque pointer to the HAL Rx Ring 2500 * @num_entries: number of entries in the hal_ring. 2501 * @near_full: pointer to a boolean. This is set if ring is near full. 2502 * 2503 * The function returns the number of entries in a destination ring which are 2504 * yet to be reaped. The function also checks if the ring is near full. 2505 * If more than half of the ring needs to be reaped, the ring is considered 2506 * approaching full. 2507 * The function useses hal_srng_dst_num_valid_locked to get the number of valid 2508 * entries. It should not be called within a SRNG lock. HW pointer value is 2509 * synced into cached_hp. 2510 * 2511 * Return: Number of pending entries if any 2512 */ 2513 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2514 hal_ring_handle_t hal_ring_hdl, 2515 uint32_t num_entries, 2516 bool *near_full) 2517 { 2518 uint32_t num_pending = 0; 2519 2520 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2521 hal_ring_hdl, 2522 true); 2523 2524 if (num_entries && (num_pending >= num_entries >> 1)) 2525 *near_full = true; 2526 else 2527 *near_full = false; 2528 2529 return num_pending; 2530 } 2531 2532 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2533 2534 #ifdef WLAN_SUPPORT_RX_FISA 2535 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2536 { 2537 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2538 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2539 } 2540 #else 2541 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2542 { 2543 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2544 } 2545 #endif 2546 2547 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2548 2549 #ifdef DP_RX_DROP_RAW_FRM 2550 /** 2551 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2552 * @nbuf: pkt skb pointer 2553 * 2554 * Return: true - raw frame, dropped 2555 * false - not raw frame, do nothing 2556 */ 2557 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2558 { 2559 if (qdf_nbuf_is_raw_frame(nbuf)) { 2560 dp_rx_nbuf_free(nbuf); 2561 return true; 2562 } 2563 2564 return false; 2565 } 2566 #endif 2567 2568 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2569 /** 2570 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 2571 * @soc: Datapath soc structure 2572 * @ring_num: REO ring number 2573 * @ring_desc: REO ring descriptor 2574 * 2575 * Returns: None 2576 */ 2577 void 2578 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2579 hal_ring_desc_t ring_desc) 2580 { 2581 struct dp_buf_info_record *record; 2582 struct hal_buf_info hbi; 2583 uint32_t idx; 2584 2585 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2586 return; 2587 2588 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2589 2590 /* buffer_addr_info is the first element of ring_desc */ 2591 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 2592 &hbi); 2593 2594 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2595 DP_RX_HIST_MAX); 2596 2597 /* No NULL check needed for record since its an array */ 2598 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2599 2600 record->timestamp = qdf_get_log_timestamp(); 2601 record->hbi.paddr = hbi.paddr; 2602 record->hbi.sw_cookie = hbi.sw_cookie; 2603 record->hbi.rbm = hbi.rbm; 2604 } 2605 #endif 2606 2607 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2608 /** 2609 * dp_rx_update_stats() - Update soc level rx packet count 2610 * @soc: DP soc handle 2611 * @nbuf: nbuf received 2612 * 2613 * Returns: none 2614 */ 2615 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2616 { 2617 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2618 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2619 } 2620 #endif 2621 2622 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2623 /** 2624 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2625 * @soc : dp_soc handle 2626 * @pdev: dp_pdev handle 2627 * @peer_id: peer_id of the peer for which completion came 2628 * @ppdu_id: ppdu_id 2629 * @netbuf: Buffer pointer 2630 * 2631 * This function is used to deliver rx packet to packet capture 2632 */ 2633 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2634 uint16_t peer_id, uint32_t is_offload, 2635 qdf_nbuf_t netbuf) 2636 { 2637 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2638 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2639 peer_id, is_offload, pdev->pdev_id); 2640 } 2641 2642 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2643 uint32_t is_offload) 2644 { 2645 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2646 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2647 soc, nbuf, HTT_INVALID_VDEV, 2648 is_offload, 0); 2649 } 2650 #endif 2651 2652 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2653 2654 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2655 { 2656 QDF_STATUS ret; 2657 2658 if (vdev->osif_rx_flush) { 2659 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2660 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2661 dp_err("Failed to flush rx pkts for vdev %d\n", 2662 vdev->vdev_id); 2663 return ret; 2664 } 2665 } 2666 2667 return QDF_STATUS_SUCCESS; 2668 } 2669 2670 static QDF_STATUS 2671 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2672 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2673 struct dp_pdev *dp_pdev, 2674 struct rx_desc_pool *rx_desc_pool) 2675 { 2676 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2677 2678 (nbuf_frag_info_t->virt_addr).nbuf = 2679 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2680 RX_BUFFER_RESERVATION, 2681 rx_desc_pool->buf_alignment, FALSE); 2682 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 2683 dp_err("nbuf alloc failed"); 2684 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2685 return ret; 2686 } 2687 2688 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 2689 (nbuf_frag_info_t->virt_addr).nbuf, 2690 QDF_DMA_FROM_DEVICE, 2691 rx_desc_pool->buf_size); 2692 2693 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2694 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 2695 dp_err("nbuf map failed"); 2696 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2697 return ret; 2698 } 2699 2700 nbuf_frag_info_t->paddr = 2701 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 2702 2703 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 2704 &nbuf_frag_info_t->paddr, 2705 rx_desc_pool); 2706 if (ret == QDF_STATUS_E_FAILURE) { 2707 dp_err("nbuf check x86 failed"); 2708 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2709 return ret; 2710 } 2711 2712 return QDF_STATUS_SUCCESS; 2713 } 2714 2715 QDF_STATUS 2716 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2717 struct dp_srng *dp_rxdma_srng, 2718 struct rx_desc_pool *rx_desc_pool, 2719 uint32_t num_req_buffers) 2720 { 2721 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 2722 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 2723 union dp_rx_desc_list_elem_t *next; 2724 void *rxdma_ring_entry; 2725 qdf_dma_addr_t paddr; 2726 struct dp_rx_nbuf_frag_info *nf_info; 2727 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 2728 uint32_t buffer_index, nbuf_ptrs_per_page; 2729 qdf_nbuf_t nbuf; 2730 QDF_STATUS ret; 2731 int page_idx, total_pages; 2732 union dp_rx_desc_list_elem_t *desc_list = NULL; 2733 union dp_rx_desc_list_elem_t *tail = NULL; 2734 int sync_hw_ptr = 1; 2735 uint32_t num_entries_avail; 2736 2737 if (qdf_unlikely(!dp_pdev)) { 2738 dp_rx_err("%pK: pdev is null for mac_id = %d", 2739 dp_soc, mac_id); 2740 return QDF_STATUS_E_FAILURE; 2741 } 2742 2743 if (qdf_unlikely(!rxdma_srng)) { 2744 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2745 return QDF_STATUS_E_FAILURE; 2746 } 2747 2748 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 2749 2750 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2751 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 2752 rxdma_srng, 2753 sync_hw_ptr); 2754 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2755 2756 if (!num_entries_avail) { 2757 dp_err("Num of available entries is zero, nothing to do"); 2758 return QDF_STATUS_E_NOMEM; 2759 } 2760 2761 if (num_entries_avail < num_req_buffers) 2762 num_req_buffers = num_entries_avail; 2763 2764 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 2765 num_req_buffers, &desc_list, &tail); 2766 if (!nr_descs) { 2767 dp_err("no free rx_descs in freelist"); 2768 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 2769 return QDF_STATUS_E_NOMEM; 2770 } 2771 2772 dp_debug("got %u RX descs for driver attach", nr_descs); 2773 2774 /* 2775 * Try to allocate pointers to the nbuf one page at a time. 2776 * Take pointers that can fit in one page of memory and 2777 * iterate through the total descriptors that need to be 2778 * allocated in order of pages. Reuse the pointers that 2779 * have been allocated to fit in one page across each 2780 * iteration to index into the nbuf. 2781 */ 2782 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 2783 2784 /* 2785 * Add an extra page to store the remainder if any 2786 */ 2787 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 2788 total_pages++; 2789 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 2790 if (!nf_info) { 2791 dp_err("failed to allocate nbuf array"); 2792 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2793 QDF_BUG(0); 2794 return QDF_STATUS_E_NOMEM; 2795 } 2796 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 2797 2798 for (page_idx = 0; page_idx < total_pages; page_idx++) { 2799 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 2800 2801 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 2802 /* 2803 * The last page of buffer pointers may not be required 2804 * completely based on the number of descriptors. Below 2805 * check will ensure we are allocating only the 2806 * required number of descriptors. 2807 */ 2808 if (nr_nbuf_total >= nr_descs) 2809 break; 2810 /* Flag is set while pdev rx_desc_pool initialization */ 2811 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2812 ret = dp_pdev_frag_alloc_and_map(dp_soc, 2813 &nf_info[nr_nbuf], dp_pdev, 2814 rx_desc_pool); 2815 else 2816 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 2817 &nf_info[nr_nbuf], dp_pdev, 2818 rx_desc_pool); 2819 if (QDF_IS_STATUS_ERROR(ret)) 2820 break; 2821 2822 nr_nbuf_total++; 2823 } 2824 2825 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2826 2827 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 2828 rxdma_ring_entry = 2829 hal_srng_src_get_next(dp_soc->hal_soc, 2830 rxdma_srng); 2831 qdf_assert_always(rxdma_ring_entry); 2832 2833 next = desc_list->next; 2834 paddr = nf_info[buffer_index].paddr; 2835 nbuf = nf_info[buffer_index].virt_addr.nbuf; 2836 2837 /* Flag is set while pdev rx_desc_pool initialization */ 2838 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2839 dp_rx_desc_frag_prep(&desc_list->rx_desc, 2840 &nf_info[buffer_index]); 2841 else 2842 dp_rx_desc_prep(&desc_list->rx_desc, 2843 &nf_info[buffer_index]); 2844 desc_list->rx_desc.in_use = 1; 2845 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 2846 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 2847 __func__, 2848 RX_DESC_REPLENISHED); 2849 2850 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 2851 desc_list->rx_desc.cookie, 2852 rx_desc_pool->owner); 2853 dp_ipa_handle_rx_buf_smmu_mapping( 2854 dp_soc, nbuf, 2855 rx_desc_pool->buf_size, 2856 true); 2857 2858 desc_list = next; 2859 } 2860 2861 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 2862 rxdma_srng, nr_nbuf, nr_nbuf); 2863 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2864 } 2865 2866 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 2867 qdf_mem_free(nf_info); 2868 2869 if (!nr_nbuf_total) { 2870 dp_err("No nbuf's allocated"); 2871 QDF_BUG(0); 2872 return QDF_STATUS_E_RESOURCES; 2873 } 2874 2875 /* No need to count the number of bytes received during replenish. 2876 * Therefore set replenish.pkts.bytes as 0. 2877 */ 2878 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 2879 2880 return QDF_STATUS_SUCCESS; 2881 } 2882 2883 qdf_export_symbol(dp_pdev_rx_buffers_attach); 2884 2885 /** 2886 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 2887 * monitor destination ring via frag. 2888 * 2889 * Enable this flag only for monitor destination buffer processing 2890 * if DP_RX_MON_MEM_FRAG feature is enabled. 2891 * If flag is set then frag based function will be called for alloc, 2892 * map, prep desc and free ops for desc buffer else normal nbuf based 2893 * function will be called. 2894 * 2895 * @rx_desc_pool: Rx desc pool 2896 * @is_mon_dest_desc: Is it for monitor dest buffer 2897 * 2898 * Return: None 2899 */ 2900 #ifdef DP_RX_MON_MEM_FRAG 2901 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 2902 bool is_mon_dest_desc) 2903 { 2904 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 2905 if (is_mon_dest_desc) 2906 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 2907 } 2908 #else 2909 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 2910 bool is_mon_dest_desc) 2911 { 2912 rx_desc_pool->rx_mon_dest_frag_enable = false; 2913 if (is_mon_dest_desc) 2914 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 2915 } 2916 #endif 2917 2918 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 2919 2920 /* 2921 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 2922 * pool 2923 * 2924 * @pdev: core txrx pdev context 2925 * 2926 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2927 * QDF_STATUS_E_NOMEM 2928 */ 2929 QDF_STATUS 2930 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 2931 { 2932 struct dp_soc *soc = pdev->soc; 2933 uint32_t rxdma_entries; 2934 uint32_t rx_sw_desc_num; 2935 struct dp_srng *dp_rxdma_srng; 2936 struct rx_desc_pool *rx_desc_pool; 2937 uint32_t status = QDF_STATUS_SUCCESS; 2938 int mac_for_pdev; 2939 2940 mac_for_pdev = pdev->lmac_id; 2941 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 2942 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 2943 soc, mac_for_pdev); 2944 return status; 2945 } 2946 2947 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 2948 rxdma_entries = dp_rxdma_srng->num_entries; 2949 2950 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 2951 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 2952 2953 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 2954 status = dp_rx_desc_pool_alloc(soc, 2955 rx_sw_desc_num, 2956 rx_desc_pool); 2957 if (status != QDF_STATUS_SUCCESS) 2958 return status; 2959 2960 return status; 2961 } 2962 2963 /* 2964 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 2965 * 2966 * @pdev: core txrx pdev context 2967 */ 2968 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 2969 { 2970 int mac_for_pdev = pdev->lmac_id; 2971 struct dp_soc *soc = pdev->soc; 2972 struct rx_desc_pool *rx_desc_pool; 2973 2974 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 2975 2976 dp_rx_desc_pool_free(soc, rx_desc_pool); 2977 } 2978 2979 /* 2980 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 2981 * 2982 * @pdev: core txrx pdev context 2983 * 2984 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2985 * QDF_STATUS_E_NOMEM 2986 */ 2987 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 2988 { 2989 int mac_for_pdev = pdev->lmac_id; 2990 struct dp_soc *soc = pdev->soc; 2991 uint32_t rxdma_entries; 2992 uint32_t rx_sw_desc_num; 2993 struct dp_srng *dp_rxdma_srng; 2994 struct rx_desc_pool *rx_desc_pool; 2995 2996 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 2997 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 2998 /** 2999 * If NSS is enabled, rx_desc_pool is already filled. 3000 * Hence, just disable desc_pool frag flag. 3001 */ 3002 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3003 3004 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3005 soc, mac_for_pdev); 3006 return QDF_STATUS_SUCCESS; 3007 } 3008 3009 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3010 return QDF_STATUS_E_NOMEM; 3011 3012 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3013 rxdma_entries = dp_rxdma_srng->num_entries; 3014 3015 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3016 3017 rx_sw_desc_num = 3018 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3019 3020 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3021 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3022 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3023 /* Disable monitor dest processing via frag */ 3024 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3025 3026 dp_rx_desc_pool_init(soc, mac_for_pdev, 3027 rx_sw_desc_num, rx_desc_pool); 3028 return QDF_STATUS_SUCCESS; 3029 } 3030 3031 /* 3032 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 3033 * @pdev: core txrx pdev context 3034 * 3035 * This function resets the freelist of rx descriptors and destroys locks 3036 * associated with this list of descriptors. 3037 */ 3038 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3039 { 3040 int mac_for_pdev = pdev->lmac_id; 3041 struct dp_soc *soc = pdev->soc; 3042 struct rx_desc_pool *rx_desc_pool; 3043 3044 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3045 3046 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3047 } 3048 3049 /* 3050 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 3051 * 3052 * @pdev: core txrx pdev context 3053 * 3054 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3055 * QDF_STATUS_E_NOMEM 3056 */ 3057 QDF_STATUS 3058 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3059 { 3060 int mac_for_pdev = pdev->lmac_id; 3061 struct dp_soc *soc = pdev->soc; 3062 struct dp_srng *dp_rxdma_srng; 3063 struct rx_desc_pool *rx_desc_pool; 3064 uint32_t rxdma_entries; 3065 3066 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3067 rxdma_entries = dp_rxdma_srng->num_entries; 3068 3069 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3070 3071 /* Initialize RX buffer pool which will be 3072 * used during low memory conditions 3073 */ 3074 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3075 3076 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3077 dp_rxdma_srng, 3078 rx_desc_pool, 3079 rxdma_entries - 1); 3080 } 3081 3082 /* 3083 * dp_rx_pdev_buffers_free - Free nbufs (skbs) 3084 * 3085 * @pdev: core txrx pdev context 3086 */ 3087 void 3088 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3089 { 3090 int mac_for_pdev = pdev->lmac_id; 3091 struct dp_soc *soc = pdev->soc; 3092 struct rx_desc_pool *rx_desc_pool; 3093 3094 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3095 3096 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 3097 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3098 } 3099 3100 #ifdef DP_RX_SPECIAL_FRAME_NEED 3101 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3102 struct dp_txrx_peer *txrx_peer, 3103 qdf_nbuf_t nbuf, uint32_t frame_mask, 3104 uint8_t *rx_tlv_hdr) 3105 { 3106 uint32_t l2_hdr_offset = 0; 3107 uint16_t msdu_len = 0; 3108 uint32_t skip_len; 3109 3110 l2_hdr_offset = 3111 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3112 3113 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3114 skip_len = l2_hdr_offset; 3115 } else { 3116 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3117 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3118 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3119 } 3120 3121 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3122 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3123 qdf_nbuf_pull_head(nbuf, skip_len); 3124 3125 if (txrx_peer->vdev) { 3126 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3127 QDF_TX_RX_STATUS_OK); 3128 } 3129 3130 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3131 dp_info("special frame, mpdu sn 0x%x", 3132 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3133 qdf_nbuf_set_exc_frame(nbuf, 1); 3134 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3135 nbuf, NULL); 3136 return true; 3137 } 3138 3139 return false; 3140 } 3141 #endif 3142 3143 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3144 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3145 uint8_t *rx_tlv, 3146 qdf_nbuf_t nbuf) 3147 { 3148 struct dp_soc *soc; 3149 3150 if (!pdev->is_first_wakeup_packet) 3151 return; 3152 3153 soc = pdev->soc; 3154 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 3155 qdf_nbuf_mark_wakeup_frame(nbuf); 3156 dp_info("First packet after WOW Wakeup rcvd"); 3157 } 3158 } 3159 #endif 3160