1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 #include "enet.h" 46 47 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */ 48 49 #ifdef DUP_RX_DESC_WAR dp_rx_dump_info_and_assert(struct dp_soc * soc,hal_ring_handle_t hal_ring,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)50 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 51 hal_ring_handle_t hal_ring, 52 hal_ring_desc_t ring_desc, 53 struct dp_rx_desc *rx_desc) 54 { 55 void *hal_soc = soc->hal_soc; 56 57 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 58 dp_rx_desc_dump(rx_desc); 59 } 60 #else dp_rx_dump_info_and_assert(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)61 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 62 hal_ring_handle_t hal_ring_hdl, 63 hal_ring_desc_t ring_desc, 64 struct dp_rx_desc *rx_desc) 65 { 66 hal_soc_handle_t hal_soc = soc->hal_soc; 67 68 dp_rx_desc_dump(rx_desc); 69 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 70 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 71 qdf_assert_always(0); 72 } 73 #endif 74 75 #ifndef QCA_HOST_MODE_WIFI_DISABLED 76 #ifdef RX_DESC_SANITY_WAR dp_rx_desc_sanity(struct dp_soc * soc,hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)77 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 78 hal_ring_handle_t hal_ring_hdl, 79 hal_ring_desc_t ring_desc, 80 struct dp_rx_desc *rx_desc) 81 { 82 uint8_t return_buffer_manager; 83 84 if (qdf_unlikely(!rx_desc)) { 85 /* 86 * This is an unlikely case where the cookie obtained 87 * from the ring_desc is invalid and hence we are not 88 * able to find the corresponding rx_desc 89 */ 90 goto fail; 91 } 92 93 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 94 if (qdf_unlikely(!(return_buffer_manager == 95 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 96 return_buffer_manager == 97 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 98 goto fail; 99 } 100 101 return QDF_STATUS_SUCCESS; 102 103 fail: 104 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 105 dp_err_rl("Sanity failed for ring Desc:"); 106 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 107 ring_desc); 108 return QDF_STATUS_E_NULL_VALUE; 109 110 } 111 #endif 112 dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries,bool * near_full)113 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 114 hal_ring_handle_t hal_ring_hdl, 115 uint32_t num_entries, 116 bool *near_full) 117 { 118 uint32_t num_pending = 0; 119 120 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 121 hal_ring_hdl, 122 true); 123 124 if (num_entries && (num_pending >= num_entries >> 1)) 125 *near_full = true; 126 else 127 *near_full = false; 128 129 return num_pending; 130 } 131 132 #ifdef RX_DESC_DEBUG_CHECK dp_rx_desc_nbuf_sanity_check(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)133 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 134 hal_ring_desc_t ring_desc, 135 struct dp_rx_desc *rx_desc) 136 { 137 struct hal_buf_info hbi; 138 139 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 140 /* Sanity check for possible buffer paddr corruption */ 141 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 142 return QDF_STATUS_SUCCESS; 143 144 return QDF_STATUS_E_FAILURE; 145 } 146 147 /** 148 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 149 * out of bound access from H.W 150 * 151 * @soc: DP soc 152 * @pkt_len: Packet length received from H.W 153 * 154 * Return: NONE 155 */ 156 static inline void dp_rx_desc_nbuf_len_sanity_check(struct dp_soc * soc,uint32_t pkt_len)157 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 158 uint32_t pkt_len) 159 { 160 struct rx_desc_pool *rx_desc_pool; 161 162 rx_desc_pool = &soc->rx_desc_buf[0]; 163 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 164 } 165 #else 166 static inline void dp_rx_desc_nbuf_len_sanity_check(struct dp_soc * soc,uint32_t pkt_len)167 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 168 #endif 169 170 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 171 void dp_rx_ring_record_entry(struct dp_soc * soc,uint8_t ring_num,hal_ring_desc_t ring_desc)172 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 173 hal_ring_desc_t ring_desc) 174 { 175 struct dp_buf_info_record *record; 176 struct hal_buf_info hbi; 177 uint32_t idx; 178 179 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 180 return; 181 182 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 183 184 /* buffer_addr_info is the first element of ring_desc */ 185 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 186 &hbi); 187 188 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 189 DP_RX_HIST_MAX); 190 191 /* No NULL check needed for record since its an array */ 192 record = &soc->rx_ring_history[ring_num]->entry[idx]; 193 194 record->timestamp = qdf_get_log_timestamp(); 195 record->hbi.paddr = hbi.paddr; 196 record->hbi.sw_cookie = hbi.sw_cookie; 197 record->hbi.rbm = hbi.rbm; 198 } 199 #endif 200 201 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev * pdev,uint8_t * rx_tlv,qdf_nbuf_t nbuf)202 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 203 uint8_t *rx_tlv, 204 qdf_nbuf_t nbuf) 205 { 206 struct dp_soc *soc; 207 208 if (!pdev->is_first_wakeup_packet) 209 return; 210 211 soc = pdev->soc; 212 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 213 qdf_nbuf_mark_wakeup_frame(nbuf); 214 dp_info("First packet after WOW Wakeup rcvd"); 215 } 216 } 217 #endif 218 219 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 220 #endif /* WLAN_SOFTUMAC_SUPPORT */ 221 222 /** 223 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 224 * 225 * @dp_soc: struct dp_soc * 226 * @nbuf_frag_info_t: nbuf frag info 227 * @dp_pdev: struct dp_pdev * 228 * @rx_desc_pool: Rx desc pool 229 * 230 * Return: QDF_STATUS 231 */ 232 #ifdef DP_RX_MON_MEM_FRAG 233 static inline QDF_STATUS dp_pdev_frag_alloc_and_map(struct dp_soc * dp_soc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t,struct dp_pdev * dp_pdev,struct rx_desc_pool * rx_desc_pool)234 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 235 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 236 struct dp_pdev *dp_pdev, 237 struct rx_desc_pool *rx_desc_pool) 238 { 239 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 240 241 (nbuf_frag_info_t->virt_addr).vaddr = 242 qdf_frag_alloc(&rx_desc_pool->pf_cache, rx_desc_pool->buf_size); 243 244 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 245 dp_err("Frag alloc failed"); 246 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 247 return QDF_STATUS_E_NOMEM; 248 } 249 250 ret = qdf_mem_map_page(dp_soc->osdev, 251 (nbuf_frag_info_t->virt_addr).vaddr, 252 QDF_DMA_FROM_DEVICE, 253 rx_desc_pool->buf_size, 254 &nbuf_frag_info_t->paddr); 255 256 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 257 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 258 dp_err("Frag map failed"); 259 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 260 return QDF_STATUS_E_FAULT; 261 } 262 263 return QDF_STATUS_SUCCESS; 264 } 265 #else 266 static inline QDF_STATUS dp_pdev_frag_alloc_and_map(struct dp_soc * dp_soc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t,struct dp_pdev * dp_pdev,struct rx_desc_pool * rx_desc_pool)267 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 268 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 269 struct dp_pdev *dp_pdev, 270 struct rx_desc_pool *rx_desc_pool) 271 { 272 return QDF_STATUS_SUCCESS; 273 } 274 #endif /* DP_RX_MON_MEM_FRAG */ 275 276 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 277 /** 278 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 279 * @soc: Datapath soc structure 280 * @ring_num: Refill ring number 281 * @hal_ring_hdl: 282 * @num_req: number of buffers requested for refill 283 * @num_refill: number of buffers refilled 284 * 285 * Return: None 286 */ 287 static inline void dp_rx_refill_ring_record_entry(struct dp_soc * soc,uint8_t ring_num,hal_ring_handle_t hal_ring_hdl,uint32_t num_req,uint32_t num_refill)288 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 289 hal_ring_handle_t hal_ring_hdl, 290 uint32_t num_req, uint32_t num_refill) 291 { 292 struct dp_refill_info_record *record; 293 uint32_t idx; 294 uint32_t tp; 295 uint32_t hp; 296 297 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 298 !soc->rx_refill_ring_history[ring_num])) 299 return; 300 301 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 302 DP_RX_REFILL_HIST_MAX); 303 304 /* No NULL check needed for record since its an array */ 305 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 306 307 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 308 record->timestamp = qdf_get_log_timestamp(); 309 record->num_req = num_req; 310 record->num_refill = num_refill; 311 record->hp = hp; 312 record->tp = tp; 313 } 314 #else 315 static inline void dp_rx_refill_ring_record_entry(struct dp_soc * soc,uint8_t ring_num,hal_ring_handle_t hal_ring_hdl,uint32_t num_req,uint32_t num_refill)316 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 317 hal_ring_handle_t hal_ring_hdl, 318 uint32_t num_req, uint32_t num_refill) 319 { 320 } 321 #endif 322 323 /** 324 * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and 325 * map 326 * @dp_soc: struct dp_soc * 327 * @mac_id: Mac id 328 * @num_entries_avail: num_entries_avail 329 * @nbuf_frag_info_t: nbuf frag info 330 * @dp_pdev: struct dp_pdev * 331 * @rx_desc_pool: Rx desc pool 332 * 333 * Return: QDF_STATUS 334 */ 335 static inline QDF_STATUS dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc * dp_soc,uint32_t mac_id,uint32_t num_entries_avail,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t,struct dp_pdev * dp_pdev,struct rx_desc_pool * rx_desc_pool)336 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 337 uint32_t mac_id, 338 uint32_t num_entries_avail, 339 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 340 struct dp_pdev *dp_pdev, 341 struct rx_desc_pool *rx_desc_pool) 342 { 343 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 344 345 (nbuf_frag_info_t->virt_addr).nbuf = 346 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 347 mac_id, 348 rx_desc_pool, 349 num_entries_avail); 350 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 351 dp_err("nbuf alloc failed"); 352 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 353 return QDF_STATUS_E_NOMEM; 354 } 355 356 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 357 nbuf_frag_info_t); 358 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 359 dp_rx_buffer_pool_nbuf_free(dp_soc, 360 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 361 dp_err("nbuf map failed"); 362 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 363 return QDF_STATUS_E_FAULT; 364 } 365 366 nbuf_frag_info_t->paddr = 367 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 368 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 369 (nbuf_frag_info_t->virt_addr).nbuf), 370 rx_desc_pool->buf_size, 371 true, __func__, __LINE__); 372 373 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 374 &nbuf_frag_info_t->paddr, 375 rx_desc_pool); 376 if (ret == QDF_STATUS_E_FAILURE) { 377 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 378 return QDF_STATUS_E_ADDRNOTAVAIL; 379 } 380 381 return QDF_STATUS_SUCCESS; 382 } 383 384 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 385 QDF_STATUS __dp_rx_buffers_no_map_lt_replenish(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,bool force_replenish)386 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 387 struct dp_srng *dp_rxdma_srng, 388 struct rx_desc_pool *rx_desc_pool, 389 bool force_replenish) 390 { 391 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 392 uint32_t count; 393 void *rxdma_ring_entry; 394 union dp_rx_desc_list_elem_t *next = NULL; 395 void *rxdma_srng; 396 qdf_nbuf_t nbuf; 397 qdf_dma_addr_t paddr; 398 uint16_t num_entries_avail = 0; 399 uint16_t num_alloc_desc = 0; 400 union dp_rx_desc_list_elem_t *desc_list = NULL; 401 union dp_rx_desc_list_elem_t *tail = NULL; 402 int sync_hw_ptr = 0; 403 404 rxdma_srng = dp_rxdma_srng->hal_srng; 405 406 if (qdf_unlikely(!dp_pdev)) { 407 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 408 return QDF_STATUS_E_FAILURE; 409 } 410 411 if (qdf_unlikely(!rxdma_srng)) { 412 dp_rx_debug("%pK: rxdma srng not initialized", soc); 413 return QDF_STATUS_E_FAILURE; 414 } 415 416 hal_srng_access_start(soc->hal_soc, rxdma_srng); 417 418 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 419 rxdma_srng, 420 sync_hw_ptr); 421 422 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 423 soc, num_entries_avail); 424 425 if (qdf_unlikely(!force_replenish && (num_entries_avail < 426 ((dp_rxdma_srng->num_entries * 3) / 4)))) { 427 hal_srng_access_end(soc->hal_soc, rxdma_srng); 428 return QDF_STATUS_E_FAILURE; 429 } 430 431 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 432 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 433 rx_desc_pool, 434 num_entries_avail, 435 &desc_list, 436 &tail); 437 438 if (!num_alloc_desc) { 439 dp_rx_err("%pK: no free rx_descs in freelist", soc); 440 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 441 num_entries_avail); 442 hal_srng_access_end(soc->hal_soc, rxdma_srng); 443 return QDF_STATUS_E_NOMEM; 444 } 445 446 for (count = 0; count < num_alloc_desc; count++) { 447 next = desc_list->next; 448 qdf_prefetch(next); 449 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 450 if (qdf_unlikely(!nbuf)) { 451 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 452 break; 453 } 454 455 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 456 rx_desc_pool->buf_size); 457 458 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 459 rxdma_srng); 460 qdf_assert_always(rxdma_ring_entry); 461 462 desc_list->rx_desc.nbuf = nbuf; 463 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 464 desc_list->rx_desc.rx_buf_start = nbuf->data; 465 desc_list->rx_desc.paddr_buf_start = paddr; 466 desc_list->rx_desc.unmapped = 0; 467 468 /* rx_desc.in_use should be zero at this time*/ 469 qdf_assert_always(desc_list->rx_desc.in_use == 0); 470 471 desc_list->rx_desc.in_use = 1; 472 desc_list->rx_desc.in_err_state = 0; 473 474 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 475 paddr, 476 desc_list->rx_desc.cookie, 477 rx_desc_pool->owner); 478 479 desc_list = next; 480 } 481 qdf_dsb(); 482 hal_srng_access_end(soc->hal_soc, rxdma_srng); 483 484 /* No need to count the number of bytes received during replenish. 485 * Therefore set replenish.pkts.bytes as 0. 486 */ 487 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 488 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 489 /* 490 * add any available free desc back to the free list 491 */ 492 if (desc_list) 493 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 494 mac_id, rx_desc_pool); 495 496 return QDF_STATUS_SUCCESS; 497 } 498 499 QDF_STATUS __dp_rx_buffers_no_map_replenish(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)500 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 501 struct dp_srng *dp_rxdma_srng, 502 struct rx_desc_pool *rx_desc_pool, 503 uint32_t num_req_buffers, 504 union dp_rx_desc_list_elem_t **desc_list, 505 union dp_rx_desc_list_elem_t **tail) 506 { 507 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 508 uint32_t count; 509 void *rxdma_ring_entry; 510 union dp_rx_desc_list_elem_t *next; 511 void *rxdma_srng; 512 qdf_nbuf_t nbuf; 513 qdf_nbuf_t nbuf_next; 514 qdf_nbuf_t nbuf_head = NULL; 515 qdf_nbuf_t nbuf_tail = NULL; 516 qdf_dma_addr_t paddr; 517 518 rxdma_srng = dp_rxdma_srng->hal_srng; 519 520 if (qdf_unlikely(!dp_pdev)) { 521 dp_rx_err("%pK: pdev is null for mac_id = %d", 522 soc, mac_id); 523 return QDF_STATUS_E_FAILURE; 524 } 525 526 if (qdf_unlikely(!rxdma_srng)) { 527 dp_rx_debug("%pK: rxdma srng not initialized", soc); 528 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 /* Allocate required number of nbufs */ 533 for (count = 0; count < num_req_buffers; count++) { 534 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 535 if (qdf_unlikely(!nbuf)) { 536 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 537 /* Update num_req_buffers to nbufs allocated count */ 538 num_req_buffers = count; 539 break; 540 } 541 542 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 543 rx_desc_pool->buf_size); 544 545 QDF_NBUF_CB_PADDR(nbuf) = paddr; 546 DP_RX_LIST_APPEND(nbuf_head, 547 nbuf_tail, 548 nbuf); 549 } 550 qdf_dsb(); 551 552 nbuf = nbuf_head; 553 hal_srng_access_start(soc->hal_soc, rxdma_srng); 554 555 for (count = 0; count < num_req_buffers; count++) { 556 next = (*desc_list)->next; 557 nbuf_next = nbuf->next; 558 qdf_prefetch(next); 559 560 rxdma_ring_entry = (struct dp_buffer_addr_info *) 561 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 562 563 if (!rxdma_ring_entry) 564 break; 565 566 (*desc_list)->rx_desc.nbuf = nbuf; 567 dp_rx_set_reuse_nbuf(&(*desc_list)->rx_desc, nbuf); 568 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 569 (*desc_list)->rx_desc.paddr_buf_start = QDF_NBUF_CB_PADDR(nbuf); 570 (*desc_list)->rx_desc.unmapped = 0; 571 572 /* rx_desc.in_use should be zero at this time*/ 573 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 574 575 (*desc_list)->rx_desc.in_use = 1; 576 (*desc_list)->rx_desc.in_err_state = 0; 577 578 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 579 QDF_NBUF_CB_PADDR(nbuf), 580 (*desc_list)->rx_desc.cookie, 581 rx_desc_pool->owner); 582 583 *desc_list = next; 584 nbuf = nbuf_next; 585 } 586 hal_srng_access_end(soc->hal_soc, rxdma_srng); 587 588 /* No need to count the number of bytes received during replenish. 589 * Therefore set replenish.pkts.bytes as 0. 590 */ 591 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 592 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 593 /* 594 * add any available free desc back to the free list 595 */ 596 if (*desc_list) 597 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 598 mac_id, rx_desc_pool); 599 while (nbuf) { 600 nbuf_next = nbuf->next; 601 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 602 qdf_nbuf_free(nbuf); 603 nbuf = nbuf_next; 604 } 605 606 return QDF_STATUS_SUCCESS; 607 } 608 609 #ifdef WLAN_SUPPORT_PPEDS 610 QDF_STATUS __dp_rx_comp2refill_replenish(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)611 __dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id, 612 struct dp_srng *dp_rxdma_srng, 613 struct rx_desc_pool *rx_desc_pool, 614 uint32_t num_req_buffers, 615 union dp_rx_desc_list_elem_t **desc_list, 616 union dp_rx_desc_list_elem_t **tail) 617 { 618 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 619 uint32_t count; 620 void *rxdma_ring_entry; 621 union dp_rx_desc_list_elem_t *next; 622 union dp_rx_desc_list_elem_t *cur; 623 void *rxdma_srng; 624 qdf_nbuf_t nbuf; 625 626 rxdma_srng = dp_rxdma_srng->hal_srng; 627 628 if (qdf_unlikely(!dp_pdev)) { 629 dp_rx_err("%pK: pdev is null for mac_id = %d", 630 soc, mac_id); 631 return QDF_STATUS_E_FAILURE; 632 } 633 634 if (qdf_unlikely(!rxdma_srng)) { 635 dp_rx_debug("%pK: rxdma srng not initialized", soc); 636 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 637 return QDF_STATUS_E_FAILURE; 638 } 639 640 hal_srng_access_start(soc->hal_soc, rxdma_srng); 641 642 for (count = 0; count < num_req_buffers; count++) { 643 next = (*desc_list)->next; 644 qdf_prefetch(next); 645 646 rxdma_ring_entry = (struct dp_buffer_addr_info *) 647 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 648 649 if (!rxdma_ring_entry) 650 break; 651 652 (*desc_list)->rx_desc.in_use = 1; 653 (*desc_list)->rx_desc.in_err_state = 0; 654 (*desc_list)->rx_desc.nbuf = (*desc_list)->rx_desc.reuse_nbuf; 655 656 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 657 (*desc_list)->rx_desc.paddr_buf_start, 658 (*desc_list)->rx_desc.cookie, 659 rx_desc_pool->owner); 660 661 *desc_list = next; 662 } 663 hal_srng_access_end(soc->hal_soc, rxdma_srng); 664 665 /* No need to count the number of bytes received during replenish. 666 * Therefore set replenish.pkts.bytes as 0. 667 */ 668 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 669 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 670 671 /* 672 * add any available free desc back to the free list 673 */ 674 cur = *desc_list; 675 for ( ; count < num_req_buffers; count++) { 676 next = cur->next; 677 qdf_prefetch(next); 678 679 nbuf = cur->rx_desc.reuse_nbuf; 680 681 cur->rx_desc.nbuf = NULL; 682 cur->rx_desc.in_use = 0; 683 cur->rx_desc.has_reuse_nbuf = false; 684 cur->rx_desc.reuse_nbuf = NULL; 685 if (!nbuf->recycled_for_ds) 686 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 687 688 nbuf->recycled_for_ds = 0; 689 nbuf->fast_recycled = 0; 690 qdf_nbuf_free(nbuf); 691 cur = next; 692 } 693 694 if (*desc_list) 695 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 696 mac_id, rx_desc_pool); 697 698 return QDF_STATUS_SUCCESS; 699 } 700 #endif 701 __dp_pdev_rx_buffers_no_map_attach(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers)702 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 703 uint32_t mac_id, 704 struct dp_srng *dp_rxdma_srng, 705 struct rx_desc_pool *rx_desc_pool, 706 uint32_t num_req_buffers) 707 { 708 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 709 uint32_t count; 710 uint32_t nr_descs = 0; 711 void *rxdma_ring_entry; 712 union dp_rx_desc_list_elem_t *next; 713 void *rxdma_srng; 714 qdf_nbuf_t nbuf; 715 qdf_dma_addr_t paddr; 716 union dp_rx_desc_list_elem_t *desc_list = NULL; 717 union dp_rx_desc_list_elem_t *tail = NULL; 718 719 rxdma_srng = dp_rxdma_srng->hal_srng; 720 721 if (qdf_unlikely(!dp_pdev)) { 722 dp_rx_err("%pK: pdev is null for mac_id = %d", 723 soc, mac_id); 724 return QDF_STATUS_E_FAILURE; 725 } 726 727 if (qdf_unlikely(!rxdma_srng)) { 728 dp_rx_debug("%pK: rxdma srng not initialized", soc); 729 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 730 return QDF_STATUS_E_FAILURE; 731 } 732 733 dp_rx_debug("%pK: requested %d buffers for replenish", 734 soc, num_req_buffers); 735 736 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 737 num_req_buffers, &desc_list, &tail); 738 if (!nr_descs) { 739 dp_err("no free rx_descs in freelist"); 740 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 741 return QDF_STATUS_E_NOMEM; 742 } 743 744 dp_debug("got %u RX descs for driver attach", nr_descs); 745 746 hal_srng_access_start(soc->hal_soc, rxdma_srng); 747 748 for (count = 0; count < nr_descs; count++) { 749 next = desc_list->next; 750 qdf_prefetch(next); 751 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 752 if (qdf_unlikely(!nbuf)) { 753 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 754 break; 755 } 756 757 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 758 rx_desc_pool->buf_size); 759 rxdma_ring_entry = (struct dp_buffer_addr_info *) 760 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 761 if (!rxdma_ring_entry) { 762 qdf_nbuf_free(nbuf); 763 break; 764 } 765 766 desc_list->rx_desc.nbuf = nbuf; 767 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 768 desc_list->rx_desc.rx_buf_start = nbuf->data; 769 desc_list->rx_desc.paddr_buf_start = paddr; 770 desc_list->rx_desc.unmapped = 0; 771 772 /* rx_desc.in_use should be zero at this time*/ 773 qdf_assert_always(desc_list->rx_desc.in_use == 0); 774 775 desc_list->rx_desc.in_use = 1; 776 desc_list->rx_desc.in_err_state = 0; 777 778 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 779 paddr, 780 desc_list->rx_desc.cookie, 781 rx_desc_pool->owner); 782 783 desc_list = next; 784 } 785 qdf_dsb(); 786 hal_srng_access_end(soc->hal_soc, rxdma_srng); 787 788 /* No need to count the number of bytes received during replenish. 789 * Therefore set replenish.pkts.bytes as 0. 790 */ 791 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 792 793 return QDF_STATUS_SUCCESS; 794 } 795 #endif 796 797 #ifdef DP_UMAC_HW_RESET_SUPPORT 798 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 799 static inline dp_rx_rep_retrieve_paddr(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)800 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 801 uint32_t buf_size) 802 { 803 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 804 } 805 #else 806 static inline dp_rx_rep_retrieve_paddr(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)807 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 808 uint32_t buf_size) 809 { 810 return qdf_nbuf_get_frag_paddr(nbuf, 0); 811 } 812 #endif 813 814 /** 815 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 816 * @soc: core txrx main context 817 * @dp_rxdma_srng: rxdma ring 818 * @rx_desc_pool: rx descriptor pool 819 * @rx_desc:rx descriptor 820 * 821 * Return: void 822 */ 823 static inline dp_rx_desc_replenish(struct dp_soc * soc,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,struct dp_rx_desc * rx_desc)824 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 825 struct rx_desc_pool *rx_desc_pool, 826 struct dp_rx_desc *rx_desc) 827 { 828 void *rxdma_srng; 829 void *rxdma_ring_entry; 830 qdf_dma_addr_t paddr; 831 832 rxdma_srng = dp_rxdma_srng->hal_srng; 833 834 /* No one else should be accessing the srng at this point */ 835 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 836 837 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 838 839 qdf_assert_always(rxdma_ring_entry); 840 rx_desc->in_err_state = 0; 841 842 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 843 rx_desc_pool->buf_size); 844 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 845 rx_desc->cookie, rx_desc_pool->owner); 846 847 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 848 } 849 dp_rx_desc_reuse(struct dp_soc * soc,qdf_nbuf_t * nbuf_list)850 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 851 { 852 int mac_id, i, j; 853 union dp_rx_desc_list_elem_t *head = NULL; 854 union dp_rx_desc_list_elem_t *tail = NULL; 855 856 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 857 struct dp_srng *dp_rxdma_srng = 858 &soc->rx_refill_buf_ring[mac_id]; 859 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 860 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 861 /* Only fill up 1/3 of the ring size */ 862 uint32_t num_req_decs; 863 864 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 865 !rx_desc_pool->array) 866 continue; 867 868 num_req_decs = dp_rxdma_srng->num_entries / 3; 869 870 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 871 struct dp_rx_desc *rx_desc = 872 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 873 874 if (rx_desc->in_use) { 875 if (j < (dp_rxdma_srng->num_entries - 1)) { 876 dp_rx_desc_replenish(soc, dp_rxdma_srng, 877 rx_desc_pool, 878 rx_desc); 879 } else { 880 dp_rx_nbuf_unmap(soc, rx_desc, 0); 881 rx_desc->unmapped = 0; 882 883 rx_desc->nbuf->next = *nbuf_list; 884 *nbuf_list = rx_desc->nbuf; 885 886 dp_rx_add_to_free_desc_list(&head, 887 &tail, 888 rx_desc); 889 } 890 j++; 891 } 892 } 893 894 if (head) 895 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 896 mac_id, rx_desc_pool); 897 898 /* If num of descs in use were less, then we need to replenish 899 * the ring with some buffers 900 */ 901 head = NULL; 902 tail = NULL; 903 904 if (j < (num_req_decs - 1)) 905 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 906 rx_desc_pool, 907 ((num_req_decs - 1) - j), 908 &head, &tail, true); 909 } 910 } 911 #endif 912 __dp_rx_buffers_replenish(struct dp_soc * dp_soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail,bool req_only,bool force_replenish,const char * func_name)913 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 914 struct dp_srng *dp_rxdma_srng, 915 struct rx_desc_pool *rx_desc_pool, 916 uint32_t num_req_buffers, 917 union dp_rx_desc_list_elem_t **desc_list, 918 union dp_rx_desc_list_elem_t **tail, 919 bool req_only, bool force_replenish, 920 const char *func_name) 921 { 922 uint32_t num_alloc_desc; 923 uint16_t num_desc_to_free = 0; 924 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 925 uint32_t num_entries_avail; 926 uint32_t count; 927 uint32_t extra_buffers; 928 int sync_hw_ptr = 1; 929 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 930 void *rxdma_ring_entry; 931 union dp_rx_desc_list_elem_t *next; 932 QDF_STATUS ret; 933 void *rxdma_srng; 934 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 935 union dp_rx_desc_list_elem_t *tail_append = NULL; 936 union dp_rx_desc_list_elem_t *temp_list = NULL; 937 938 rxdma_srng = dp_rxdma_srng->hal_srng; 939 940 if (qdf_unlikely(!dp_pdev)) { 941 dp_rx_err("%pK: pdev is null for mac_id = %d", 942 dp_soc, mac_id); 943 return QDF_STATUS_E_FAILURE; 944 } 945 946 if (qdf_unlikely(!rxdma_srng)) { 947 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 948 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 949 return QDF_STATUS_E_FAILURE; 950 } 951 952 dp_verbose_debug("%pK: requested %d buffers for replenish", 953 dp_soc, num_req_buffers); 954 955 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 956 957 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 958 rxdma_srng, 959 sync_hw_ptr); 960 961 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 962 dp_soc, num_entries_avail); 963 964 if (!req_only && !(*desc_list) && 965 (force_replenish || (num_entries_avail > 966 ((dp_rxdma_srng->num_entries * 3) / 4)))) { 967 num_req_buffers = num_entries_avail; 968 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 969 } else if (num_entries_avail < num_req_buffers) { 970 num_desc_to_free = num_req_buffers - num_entries_avail; 971 num_req_buffers = num_entries_avail; 972 } else if ((*desc_list) && 973 dp_rxdma_srng->num_entries - num_entries_avail < 974 CRITICAL_BUFFER_THRESHOLD) { 975 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 976 * total buff requested after adding extra buffers is less 977 * than or equal to num entries available, else set it to max 978 * possible additional buffers available at that moment 979 */ 980 extra_buffers = 981 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 982 (num_entries_avail - num_req_buffers) : 983 CRITICAL_BUFFER_THRESHOLD; 984 /* Append some free descriptors to tail */ 985 num_alloc_desc = 986 dp_rx_get_free_desc_list(dp_soc, mac_id, 987 rx_desc_pool, 988 extra_buffers, 989 &desc_list_append, 990 &tail_append); 991 992 if (num_alloc_desc) { 993 temp_list = *desc_list; 994 *desc_list = desc_list_append; 995 tail_append->next = temp_list; 996 num_req_buffers += num_alloc_desc; 997 998 DP_STATS_DEC(dp_pdev, 999 replenish.free_list, 1000 num_alloc_desc); 1001 } else 1002 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 1003 } 1004 1005 if (qdf_unlikely(!num_req_buffers)) { 1006 num_desc_to_free = num_req_buffers; 1007 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1008 goto free_descs; 1009 } 1010 1011 /* 1012 * if desc_list is NULL, allocate the descs from freelist 1013 */ 1014 if (!(*desc_list)) { 1015 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1016 rx_desc_pool, 1017 num_req_buffers, 1018 desc_list, 1019 tail); 1020 1021 if (!num_alloc_desc) { 1022 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 1023 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 1024 num_req_buffers); 1025 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1026 return QDF_STATUS_E_NOMEM; 1027 } 1028 1029 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 1030 num_alloc_desc); 1031 num_req_buffers = num_alloc_desc; 1032 } 1033 1034 1035 count = 0; 1036 1037 while (count < num_req_buffers) { 1038 /* Flag is set while pdev rx_desc_pool initialization */ 1039 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1040 ret = dp_pdev_frag_alloc_and_map(dp_soc, 1041 &nbuf_frag_info, 1042 dp_pdev, 1043 rx_desc_pool); 1044 else 1045 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 1046 mac_id, 1047 num_entries_avail, &nbuf_frag_info, 1048 dp_pdev, rx_desc_pool); 1049 1050 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1051 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 1052 continue; 1053 break; 1054 } 1055 1056 count++; 1057 1058 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 1059 rxdma_srng); 1060 qdf_assert_always(rxdma_ring_entry); 1061 1062 next = (*desc_list)->next; 1063 1064 /* Flag is set while pdev rx_desc_pool initialization */ 1065 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1066 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 1067 &nbuf_frag_info); 1068 else 1069 dp_rx_desc_prep(&((*desc_list)->rx_desc), 1070 &nbuf_frag_info); 1071 1072 /* rx_desc.in_use should be zero at this time*/ 1073 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 1074 1075 (*desc_list)->rx_desc.in_use = 1; 1076 (*desc_list)->rx_desc.in_err_state = 0; 1077 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 1078 func_name, RX_DESC_REPLENISHED); 1079 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 1080 nbuf_frag_info.virt_addr.nbuf, 1081 (unsigned long long)(nbuf_frag_info.paddr), 1082 (*desc_list)->rx_desc.cookie); 1083 1084 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 1085 nbuf_frag_info.paddr, 1086 (*desc_list)->rx_desc.cookie, 1087 rx_desc_pool->owner); 1088 1089 *desc_list = next; 1090 1091 } 1092 1093 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 1094 num_req_buffers, count); 1095 1096 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1097 1098 dp_rx_schedule_refill_thread(dp_soc); 1099 1100 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 1101 count, num_desc_to_free); 1102 1103 /* No need to count the number of bytes received during replenish. 1104 * Therefore set replenish.pkts.bytes as 0. 1105 */ 1106 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 1107 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 1108 1109 free_descs: 1110 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 1111 /* 1112 * add any available free desc back to the free list 1113 */ 1114 if (*desc_list) 1115 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1116 mac_id, rx_desc_pool); 1117 1118 return QDF_STATUS_SUCCESS; 1119 } 1120 1121 qdf_export_symbol(__dp_rx_buffers_replenish); 1122 1123 void dp_rx_deliver_raw(struct dp_vdev * vdev,qdf_nbuf_t nbuf_list,struct dp_txrx_peer * txrx_peer,uint8_t link_id)1124 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 1125 struct dp_txrx_peer *txrx_peer, uint8_t link_id) 1126 { 1127 qdf_nbuf_t deliver_list_head = NULL; 1128 qdf_nbuf_t deliver_list_tail = NULL; 1129 qdf_nbuf_t nbuf; 1130 1131 nbuf = nbuf_list; 1132 while (nbuf) { 1133 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 1134 1135 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 1136 1137 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1138 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 1139 qdf_nbuf_len(nbuf), link_id); 1140 1141 nbuf = next; 1142 } 1143 1144 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 1145 &deliver_list_tail); 1146 1147 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 1148 } 1149 1150 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1151 #ifndef FEATURE_WDS dp_rx_da_learn(struct dp_soc * soc,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * ta_peer,qdf_nbuf_t nbuf)1152 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1153 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 1154 { 1155 } 1156 #endif 1157 1158 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 1159 /** 1160 * dp_classify_critical_pkts() - API for marking critical packets 1161 * @soc: dp_soc context 1162 * @vdev: vdev on which packet is to be sent 1163 * @nbuf: nbuf that has to be classified 1164 * 1165 * The function parses the packet, identifies whether its a critical frame and 1166 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 1167 * Code for marking which frames are CRITICAL is accessed via callback. 1168 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 1169 * 1170 * Return: None 1171 */ 1172 static dp_classify_critical_pkts(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)1173 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1174 qdf_nbuf_t nbuf) 1175 { 1176 if (vdev->tx_classify_critical_pkt_cb) 1177 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 1178 } 1179 #else 1180 static inline dp_classify_critical_pkts(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)1181 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1182 qdf_nbuf_t nbuf) 1183 { 1184 } 1185 #endif 1186 1187 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 1188 static inline dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf,uint8_t ring_id)1189 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1190 { 1191 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 1192 } 1193 #else 1194 static inline dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf,uint8_t ring_id)1195 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1196 { 1197 } 1198 #endif 1199 dp_rx_intrabss_mcbc_fwd(struct dp_soc * soc,struct dp_txrx_peer * ta_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf,struct cdp_tid_rx_stats * tid_stats,uint8_t link_id)1200 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1201 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1202 struct cdp_tid_rx_stats *tid_stats, 1203 uint8_t link_id) 1204 { 1205 uint16_t len; 1206 qdf_nbuf_t nbuf_copy; 1207 1208 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1209 nbuf)) 1210 return true; 1211 1212 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id)) 1213 return false; 1214 1215 /* If the source peer in the isolation list 1216 * then dont forward instead push to bridge stack 1217 */ 1218 if (dp_get_peer_isolation(ta_peer)) 1219 return false; 1220 1221 nbuf_copy = qdf_nbuf_copy(nbuf); 1222 if (!nbuf_copy) 1223 return false; 1224 1225 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1226 1227 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1228 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1229 1230 if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer, 1231 nbuf_copy, 1232 tid_stats, 1233 link_id)) 1234 return false; 1235 1236 /* Don't send packets if tx is paused */ 1237 if (!soc->is_tx_pause && 1238 !dp_tx_send((struct cdp_soc_t *)soc, 1239 ta_peer->vdev->vdev_id, nbuf_copy)) { 1240 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1241 len, link_id); 1242 tid_stats->intrabss_cnt++; 1243 } else { 1244 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1245 len, link_id); 1246 tid_stats->fail_cnt[INTRABSS_DROP]++; 1247 dp_rx_nbuf_free(nbuf_copy); 1248 } 1249 return false; 1250 } 1251 dp_rx_intrabss_ucast_fwd(struct dp_soc * soc,struct dp_txrx_peer * ta_peer,uint8_t tx_vdev_id,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf,struct cdp_tid_rx_stats * tid_stats,uint8_t link_id)1252 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1253 uint8_t tx_vdev_id, 1254 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1255 struct cdp_tid_rx_stats *tid_stats, 1256 uint8_t link_id) 1257 { 1258 uint16_t len; 1259 1260 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1261 1262 /* linearize the nbuf just before we send to 1263 * dp_tx_send() 1264 */ 1265 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1266 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1267 return false; 1268 1269 nbuf = qdf_nbuf_unshare(nbuf); 1270 if (!nbuf) { 1271 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1272 rx.intra_bss.fail, 1273 1, len, link_id); 1274 /* return true even though the pkt is 1275 * not forwarded. Basically skb_unshare 1276 * failed and we want to continue with 1277 * next nbuf. 1278 */ 1279 tid_stats->fail_cnt[INTRABSS_DROP]++; 1280 return false; 1281 } 1282 } 1283 1284 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1285 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1286 1287 /* Don't send packets if tx is paused */ 1288 if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc, 1289 tx_vdev_id, nbuf)) { 1290 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1291 len, link_id); 1292 } else { 1293 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1294 len, link_id); 1295 tid_stats->fail_cnt[INTRABSS_DROP]++; 1296 return false; 1297 } 1298 1299 return true; 1300 } 1301 1302 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1303 1304 #ifdef MESH_MODE_SUPPORT 1305 dp_rx_fill_mesh_stats(struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer)1306 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1307 uint8_t *rx_tlv_hdr, 1308 struct dp_txrx_peer *txrx_peer) 1309 { 1310 struct mesh_recv_hdr_s *rx_info = NULL; 1311 uint32_t pkt_type; 1312 uint32_t nss; 1313 uint32_t rate_mcs; 1314 uint32_t bw; 1315 uint8_t primary_chan_num; 1316 uint32_t center_chan_freq; 1317 struct dp_soc *soc = vdev->pdev->soc; 1318 struct dp_peer *peer; 1319 struct dp_peer *primary_link_peer; 1320 struct dp_soc *link_peer_soc; 1321 cdp_peer_stats_param_t buf = {0}; 1322 1323 /* fill recv mesh stats */ 1324 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1325 1326 /* upper layers are responsible to free this memory */ 1327 1328 if (!rx_info) { 1329 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1330 vdev->pdev->soc); 1331 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1332 return; 1333 } 1334 1335 rx_info->rs_flags = MESH_RXHDR_VER1; 1336 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1337 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1338 1339 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1340 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1341 1342 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1343 if (peer) { 1344 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1345 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1346 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1347 rx_tlv_hdr); 1348 if (vdev->osif_get_key) 1349 vdev->osif_get_key(vdev->osif_vdev, 1350 &rx_info->rs_decryptkey[0], 1351 &peer->mac_addr.raw[0], 1352 rx_info->rs_keyix); 1353 } 1354 1355 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1356 } 1357 1358 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1359 txrx_peer->peer_id, 1360 DP_MOD_ID_MESH); 1361 1362 if (qdf_likely(primary_link_peer)) { 1363 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1364 dp_monitor_peer_get_stats_param(link_peer_soc, 1365 primary_link_peer, 1366 cdp_peer_rx_snr, &buf); 1367 rx_info->rs_snr = buf.rx_snr; 1368 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1369 } 1370 1371 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1372 1373 soc = vdev->pdev->soc; 1374 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1375 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1376 1377 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1378 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1379 soc->ctrl_psoc, 1380 vdev->pdev->pdev_id, 1381 center_chan_freq); 1382 } 1383 rx_info->rs_channel = primary_chan_num; 1384 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1385 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1386 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1387 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1388 1389 /* 1390 * The MCS index does not start with 0 when NSS>1 in HT mode. 1391 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 1392 * ------------------------------------------------------ 1393 * NSS | 1 | 2 | 3 | 4 1394 * ------------------------------------------------------ 1395 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1396 * ------------------------------------------------------ 1397 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1398 * ------------------------------------------------------ 1399 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 1400 */ 1401 if ((pkt_type == DOT11_N) && (nss == 2)) 1402 rate_mcs += 8; 1403 1404 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1405 (bw << 24); 1406 1407 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1408 1409 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1410 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1411 rx_info->rs_flags, 1412 rx_info->rs_rssi, 1413 rx_info->rs_channel, 1414 rx_info->rs_ratephy1, 1415 rx_info->rs_keyix, 1416 rx_info->rs_snr); 1417 1418 } 1419 dp_rx_filter_mesh_packets(struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)1420 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1421 uint8_t *rx_tlv_hdr) 1422 { 1423 union dp_align_mac_addr mac_addr; 1424 struct dp_soc *soc = vdev->pdev->soc; 1425 1426 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1427 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1428 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1429 rx_tlv_hdr)) 1430 return QDF_STATUS_SUCCESS; 1431 1432 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1433 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1434 rx_tlv_hdr)) 1435 return QDF_STATUS_SUCCESS; 1436 1437 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1438 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1439 rx_tlv_hdr) && 1440 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1441 rx_tlv_hdr)) 1442 return QDF_STATUS_SUCCESS; 1443 1444 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1445 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1446 rx_tlv_hdr, 1447 &mac_addr.raw[0])) 1448 return QDF_STATUS_E_FAILURE; 1449 1450 if (!qdf_mem_cmp(&mac_addr.raw[0], 1451 &vdev->mac_addr.raw[0], 1452 QDF_MAC_ADDR_SIZE)) 1453 return QDF_STATUS_SUCCESS; 1454 } 1455 1456 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1457 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1458 rx_tlv_hdr, 1459 &mac_addr.raw[0])) 1460 return QDF_STATUS_E_FAILURE; 1461 1462 if (!qdf_mem_cmp(&mac_addr.raw[0], 1463 &vdev->mac_addr.raw[0], 1464 QDF_MAC_ADDR_SIZE)) 1465 return QDF_STATUS_SUCCESS; 1466 } 1467 } 1468 1469 return QDF_STATUS_E_FAILURE; 1470 } 1471 1472 #else dp_rx_fill_mesh_stats(struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * peer)1473 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1474 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1475 { 1476 } 1477 dp_rx_filter_mesh_packets(struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)1478 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1479 uint8_t *rx_tlv_hdr) 1480 { 1481 return QDF_STATUS_E_FAILURE; 1482 } 1483 1484 #endif 1485 1486 #ifdef RX_PEER_INVALID_ENH dp_rx_process_invalid_peer(struct dp_soc * soc,qdf_nbuf_t mpdu,uint8_t mac_id)1487 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1488 uint8_t mac_id) 1489 { 1490 struct dp_invalid_peer_msg msg; 1491 struct dp_vdev *vdev = NULL; 1492 struct dp_pdev *pdev = NULL; 1493 struct ieee80211_frame *wh; 1494 qdf_nbuf_t curr_nbuf, next_nbuf; 1495 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1496 uint8_t *rx_pkt_hdr = NULL; 1497 int i = 0; 1498 uint32_t nbuf_len; 1499 1500 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1501 dp_rx_debug("%pK: Drop decapped frames", soc); 1502 goto free; 1503 } 1504 1505 /* In RAW packet, packet header will be part of data */ 1506 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1507 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1508 1509 if (!DP_FRAME_IS_DATA(wh)) { 1510 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1511 goto free; 1512 } 1513 1514 nbuf_len = qdf_nbuf_len(mpdu); 1515 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1516 dp_rx_err("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1517 goto free; 1518 } 1519 1520 /* In DMAC case the rx_desc_pools are common across PDEVs 1521 * so PDEV cannot be derived from the pool_id. 1522 * 1523 * link_id need to derived from the TLV tag word which is 1524 * disabled by default. For now adding a WAR to get vdev 1525 * with brute force this need to fixed with word based subscription 1526 * support is added by enabling TLV tag word 1527 */ 1528 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1529 for (i = 0; i < MAX_PDEV_CNT; i++) { 1530 pdev = soc->pdev_list[i]; 1531 1532 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1533 continue; 1534 1535 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1536 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1537 QDF_MAC_ADDR_SIZE) == 0) { 1538 goto out; 1539 } 1540 } 1541 } 1542 } else { 1543 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1544 1545 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1546 dp_rx_err("%pK: PDEV %s", 1547 soc, !pdev ? "not found" : "down"); 1548 goto free; 1549 } 1550 1551 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1552 QDF_STATUS_SUCCESS) 1553 return 0; 1554 1555 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1556 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1557 QDF_MAC_ADDR_SIZE) == 0) { 1558 goto out; 1559 } 1560 } 1561 } 1562 1563 if (!vdev) { 1564 dp_rx_err("%pK: VDEV not found", soc); 1565 goto free; 1566 } 1567 out: 1568 msg.wh = wh; 1569 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1570 msg.nbuf = mpdu; 1571 msg.vdev_id = vdev->vdev_id; 1572 1573 /* 1574 * NOTE: Only valid for HKv1. 1575 * If smart monitor mode is enabled on RE, we are getting invalid 1576 * peer frames with RA as STA mac of RE and the TA not matching 1577 * with any NAC list or the the BSSID.Such frames need to dropped 1578 * in order to avoid HM_WDS false addition. 1579 */ 1580 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1581 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1582 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1583 soc, wh->i_addr1); 1584 goto free; 1585 } 1586 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1587 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1588 pdev->pdev_id, &msg); 1589 } 1590 1591 free: 1592 /* Drop and free packet */ 1593 curr_nbuf = mpdu; 1594 while (curr_nbuf) { 1595 next_nbuf = qdf_nbuf_next(curr_nbuf); 1596 dp_rx_nbuf_free(curr_nbuf); 1597 curr_nbuf = next_nbuf; 1598 } 1599 1600 return 0; 1601 } 1602 dp_rx_process_invalid_peer_wrapper(struct dp_soc * soc,qdf_nbuf_t mpdu,bool mpdu_done,uint8_t mac_id)1603 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1604 qdf_nbuf_t mpdu, bool mpdu_done, 1605 uint8_t mac_id) 1606 { 1607 /* Only trigger the process when mpdu is completed */ 1608 if (mpdu_done) 1609 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1610 } 1611 #else dp_rx_process_invalid_peer(struct dp_soc * soc,qdf_nbuf_t mpdu,uint8_t mac_id)1612 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1613 uint8_t mac_id) 1614 { 1615 qdf_nbuf_t curr_nbuf, next_nbuf; 1616 struct dp_pdev *pdev; 1617 struct dp_vdev *vdev = NULL; 1618 struct ieee80211_frame *wh; 1619 struct dp_peer *peer = NULL; 1620 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1621 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1622 uint32_t nbuf_len; 1623 1624 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1625 1626 if (!DP_FRAME_IS_DATA(wh)) { 1627 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1628 "only for data frames"); 1629 goto free; 1630 } 1631 1632 nbuf_len = qdf_nbuf_len(mpdu); 1633 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1634 dp_rx_info_rl("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1635 goto free; 1636 } 1637 1638 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1639 if (!pdev) { 1640 dp_rx_info_rl("%pK: PDEV not found", soc); 1641 goto free; 1642 } 1643 1644 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1645 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1646 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1647 QDF_MAC_ADDR_SIZE) == 0) { 1648 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1649 goto out; 1650 } 1651 } 1652 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1653 1654 if (!vdev) { 1655 dp_rx_info_rl("%pK: VDEV not found", soc); 1656 goto free; 1657 } 1658 1659 out: 1660 if (vdev->opmode == wlan_op_mode_ap) { 1661 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1662 vdev->vdev_id, 1663 DP_MOD_ID_RX_ERR); 1664 /* If SA is a valid peer in vdev, 1665 * don't send disconnect 1666 */ 1667 if (peer) { 1668 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1669 DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1); 1670 dp_err_rl("invalid peer frame with correct SA/RA is freed"); 1671 goto free; 1672 } 1673 } 1674 1675 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1676 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1677 free: 1678 1679 /* Drop and free packet */ 1680 curr_nbuf = mpdu; 1681 while (curr_nbuf) { 1682 next_nbuf = qdf_nbuf_next(curr_nbuf); 1683 dp_rx_nbuf_free(curr_nbuf); 1684 curr_nbuf = next_nbuf; 1685 } 1686 1687 /* Reset the head and tail pointers */ 1688 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1689 if (pdev) { 1690 pdev->invalid_peer_head_msdu = NULL; 1691 pdev->invalid_peer_tail_msdu = NULL; 1692 } 1693 1694 return 0; 1695 } 1696 dp_rx_process_invalid_peer_wrapper(struct dp_soc * soc,qdf_nbuf_t mpdu,bool mpdu_done,uint8_t mac_id)1697 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1698 qdf_nbuf_t mpdu, bool mpdu_done, 1699 uint8_t mac_id) 1700 { 1701 /* Process the nbuf */ 1702 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1703 } 1704 #endif 1705 1706 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1707 1708 #ifdef RECEIVE_OFFLOAD 1709 /** 1710 * dp_rx_print_offload_info() - Print offload info from RX TLV 1711 * @soc: dp soc handle 1712 * @msdu: MSDU for which the offload info is to be printed 1713 * @ofl_info: offload info saved in hal_offload_info structure 1714 * 1715 * Return: None 1716 */ dp_rx_print_offload_info(struct dp_soc * soc,qdf_nbuf_t msdu,struct hal_offload_info * ofl_info)1717 static void dp_rx_print_offload_info(struct dp_soc *soc, 1718 qdf_nbuf_t msdu, 1719 struct hal_offload_info *ofl_info) 1720 { 1721 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1722 dp_verbose_debug("lro_eligible 0x%x", 1723 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1724 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1725 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1726 dp_verbose_debug("TCP seq num 0x%x", ofl_info->tcp_seq_num); 1727 dp_verbose_debug("TCP ack num 0x%x", ofl_info->tcp_ack_num); 1728 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1729 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1730 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1731 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1732 dp_verbose_debug("---------------------------------------------------------"); 1733 } 1734 dp_rx_fill_gro_info(struct dp_soc * soc,uint8_t * rx_tlv,qdf_nbuf_t msdu,uint32_t * rx_ol_pkt_cnt)1735 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1736 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1737 { 1738 struct hal_offload_info offload_info; 1739 1740 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1741 return; 1742 1743 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1744 return; 1745 1746 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1747 1748 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1749 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1750 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1751 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1752 rx_tlv); 1753 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1754 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1755 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1756 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1757 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1758 1759 dp_rx_print_offload_info(soc, msdu, &offload_info); 1760 } 1761 #endif /* RECEIVE_OFFLOAD */ 1762 1763 /** 1764 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1765 * 1766 * @soc: DP soc handle 1767 * @nbuf: pointer to msdu. 1768 * @mpdu_len: mpdu length 1769 * @l3_pad_len: L3 padding length by HW 1770 * 1771 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1772 */ dp_rx_adjust_nbuf_len(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t * mpdu_len,uint32_t l3_pad_len)1773 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1774 qdf_nbuf_t nbuf, 1775 uint16_t *mpdu_len, 1776 uint32_t l3_pad_len) 1777 { 1778 bool last_nbuf; 1779 uint32_t pkt_hdr_size; 1780 uint16_t buf_size; 1781 1782 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 1783 1784 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1785 1786 if ((*mpdu_len + pkt_hdr_size) > buf_size) { 1787 qdf_nbuf_set_pktlen(nbuf, buf_size); 1788 last_nbuf = false; 1789 *mpdu_len -= (buf_size - pkt_hdr_size); 1790 } else { 1791 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1792 last_nbuf = true; 1793 *mpdu_len = 0; 1794 } 1795 1796 return last_nbuf; 1797 } 1798 1799 /** 1800 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1801 * 1802 * @soc: DP soc handle 1803 * @nbuf: pointer to msdu. 1804 * 1805 * Return: returns padding length in bytes. 1806 */ dp_get_l3_hdr_pad_len(struct dp_soc * soc,qdf_nbuf_t nbuf)1807 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1808 qdf_nbuf_t nbuf) 1809 { 1810 uint32_t l3_hdr_pad = 0; 1811 uint8_t *rx_tlv_hdr; 1812 struct hal_rx_msdu_metadata msdu_metadata; 1813 1814 while (nbuf) { 1815 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1816 /* scattered msdu end with continuation is 0 */ 1817 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1818 hal_rx_msdu_metadata_get(soc->hal_soc, 1819 rx_tlv_hdr, 1820 &msdu_metadata); 1821 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1822 break; 1823 } 1824 nbuf = nbuf->next; 1825 } 1826 1827 return l3_hdr_pad; 1828 } 1829 dp_rx_sg_create(struct dp_soc * soc,qdf_nbuf_t nbuf)1830 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1831 { 1832 qdf_nbuf_t parent, frag_list, frag_tail, next = NULL; 1833 uint16_t frag_list_len = 0; 1834 uint16_t mpdu_len; 1835 bool last_nbuf; 1836 uint32_t l3_hdr_pad_offset = 0; 1837 1838 /* 1839 * Use msdu len got from REO entry descriptor instead since 1840 * there is case the RX PKT TLV is corrupted while msdu_len 1841 * from REO descriptor is right for non-raw RX scatter msdu. 1842 */ 1843 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1844 1845 /* 1846 * If MSDU length of the first fragment is zero, need to 1847 * use the length of the last fragment to overwrite. 1848 */ 1849 if (!mpdu_len) { 1850 frag_tail = nbuf; 1851 while (frag_tail && qdf_nbuf_is_rx_chfrag_cont(frag_tail)) 1852 frag_tail = frag_tail->next; 1853 1854 if (frag_tail) 1855 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1856 QDF_NBUF_CB_RX_PKT_LEN(frag_tail); 1857 } 1858 /* 1859 * this is a case where the complete msdu fits in one single nbuf. 1860 * in this case HW sets both start and end bit and we only need to 1861 * reset these bits for RAW mode simulator to decap the pkt 1862 */ 1863 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1864 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1865 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1866 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1867 return nbuf; 1868 } 1869 1870 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1871 /* 1872 * This is a case where we have multiple msdus (A-MSDU) spread across 1873 * multiple nbufs. here we create a fraglist out of these nbufs. 1874 * 1875 * the moment we encounter a nbuf with continuation bit set we 1876 * know for sure we have an MSDU which is spread across multiple 1877 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1878 */ 1879 parent = nbuf; 1880 frag_list = nbuf->next; 1881 nbuf = nbuf->next; 1882 1883 /* 1884 * set the start bit in the first nbuf we encounter with continuation 1885 * bit set. This has the proper mpdu length set as it is the first 1886 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1887 * nbufs will form the frag_list of the parent nbuf. 1888 */ 1889 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1890 /* 1891 * L3 header padding is only needed for the 1st buffer 1892 * in a scattered msdu 1893 */ 1894 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1895 l3_hdr_pad_offset); 1896 1897 /* 1898 * MSDU cont bit is set but reported MPDU length can fit 1899 * in to single buffer 1900 * 1901 * Increment error stats and avoid SG list creation 1902 */ 1903 if (last_nbuf) { 1904 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1905 qdf_nbuf_pull_head(parent, 1906 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1907 return parent; 1908 } 1909 1910 /* 1911 * this is where we set the length of the fragments which are 1912 * associated to the parent nbuf. We iterate through the frag_list 1913 * till we hit the last_nbuf of the list. 1914 */ 1915 do { 1916 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1917 qdf_nbuf_pull_head(nbuf, 1918 soc->rx_pkt_tlv_size); 1919 frag_list_len += qdf_nbuf_len(nbuf); 1920 1921 if (last_nbuf) { 1922 next = nbuf->next; 1923 nbuf->next = NULL; 1924 break; 1925 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1926 dp_err("Invalid packet length"); 1927 qdf_assert_always(0); 1928 } 1929 nbuf = nbuf->next; 1930 } while (!last_nbuf); 1931 1932 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1933 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1934 parent->next = next; 1935 1936 qdf_nbuf_pull_head(parent, 1937 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1938 return parent; 1939 } 1940 1941 #ifdef DP_RX_SG_FRAME_SUPPORT dp_rx_is_sg_supported(void)1942 bool dp_rx_is_sg_supported(void) 1943 { 1944 return true; 1945 } 1946 #else dp_rx_is_sg_supported(void)1947 bool dp_rx_is_sg_supported(void) 1948 { 1949 return false; 1950 } 1951 #endif 1952 1953 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1954 1955 #ifdef QCA_PEER_EXT_STATS dp_rx_compute_tid_delay(struct cdp_delay_tid_stats * stats,qdf_nbuf_t nbuf)1956 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1957 qdf_nbuf_t nbuf) 1958 { 1959 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1960 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1961 1962 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1963 } 1964 #endif /* QCA_PEER_EXT_STATS */ 1965 dp_rx_compute_delay(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1966 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1967 { 1968 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1969 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1970 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1971 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1972 uint32_t interframe_delay = 1973 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1974 struct cdp_tid_rx_stats *rstats = 1975 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1976 1977 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1978 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1979 /* 1980 * Update interframe delay stats calculated at deliver_data_ol point. 1981 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1982 * interframe delay will not be calculate correctly for 1st frame. 1983 * On the other side, this will help in avoiding extra per packet check 1984 * of vdev->prev_rx_deliver_tstamp. 1985 */ 1986 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1987 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1988 vdev->prev_rx_deliver_tstamp = current_ts; 1989 } 1990 1991 /** 1992 * dp_rx_drop_nbuf_list() - drop an nbuf list 1993 * @pdev: dp pdev reference 1994 * @buf_list: buffer list to be dropepd 1995 * 1996 * Return: int (number of bufs dropped) 1997 */ dp_rx_drop_nbuf_list(struct dp_pdev * pdev,qdf_nbuf_t buf_list)1998 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1999 qdf_nbuf_t buf_list) 2000 { 2001 struct cdp_tid_rx_stats *stats = NULL; 2002 uint8_t tid = 0, ring_id = 0; 2003 int num_dropped = 0; 2004 qdf_nbuf_t buf, next_buf; 2005 2006 buf = buf_list; 2007 while (buf) { 2008 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 2009 next_buf = qdf_nbuf_queue_next(buf); 2010 tid = qdf_nbuf_get_tid_val(buf); 2011 if (qdf_likely(pdev)) { 2012 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 2013 stats->fail_cnt[INVALID_PEER_VDEV]++; 2014 stats->delivered_to_stack--; 2015 } 2016 dp_rx_nbuf_free(buf); 2017 buf = next_buf; 2018 num_dropped++; 2019 } 2020 2021 return num_dropped; 2022 } 2023 2024 #ifdef QCA_SUPPORT_WDS_EXTENDED 2025 /** 2026 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 2027 * @soc: core txrx main context 2028 * @vdev: vdev 2029 * @txrx_peer: txrx peer 2030 * @nbuf_head: skb list head 2031 * 2032 * Return: true if packet is delivered to netdev per STA. 2033 */ 2034 bool dp_rx_deliver_to_stack_ext(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2035 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2036 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2037 { 2038 /* 2039 * When extended WDS is disabled, frames are sent to AP netdevice. 2040 */ 2041 if (qdf_likely(!vdev->wds_ext_enabled)) 2042 return false; 2043 2044 /* 2045 * There can be 2 cases: 2046 * 1. Send frame to parent netdev if its not for netdev per STA 2047 * 2. If frame is meant for netdev per STA: 2048 * a. Send frame to appropriate netdev using registered fp. 2049 * b. If fp is NULL, drop the frames. 2050 */ 2051 if (!txrx_peer->wds_ext.init) 2052 return false; 2053 2054 if (txrx_peer->osif_rx) 2055 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 2056 else 2057 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2058 2059 return true; 2060 } 2061 2062 #else 2063 static inline bool dp_rx_deliver_to_stack_ext(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2064 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2065 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2066 { 2067 return false; 2068 } 2069 #endif 2070 2071 #ifdef PEER_CACHE_RX_PKTS 2072 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2073 /** 2074 * dp_set_nbuf_band() - Set band in nbuf cb 2075 * @peer: dp_peer 2076 * @nbuf: nbuf 2077 * 2078 * Return: None 2079 */ 2080 static inline void dp_set_nbuf_band(struct dp_peer * peer,qdf_nbuf_t nbuf)2081 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf) 2082 { 2083 uint8_t link_id = 0; 2084 2085 link_id = dp_rx_get_stats_arr_idx_from_link_id(nbuf, peer->txrx_peer); 2086 dp_rx_set_nbuf_band(nbuf, peer->txrx_peer, link_id); 2087 } 2088 #else 2089 static inline void dp_set_nbuf_band(struct dp_peer * peer,qdf_nbuf_t nbuf)2090 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf) 2091 { 2092 } 2093 #endif 2094 dp_rx_flush_rx_cached(struct dp_peer * peer,bool drop)2095 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 2096 { 2097 struct dp_peer_cached_bufq *bufqi; 2098 struct dp_rx_cached_buf *cache_buf = NULL; 2099 ol_txrx_rx_fp data_rx = NULL; 2100 int num_buff_elem; 2101 QDF_STATUS status; 2102 2103 /* 2104 * Flush dp cached frames only for mld peers and legacy peers, as 2105 * link peers don't store cached frames 2106 */ 2107 if (IS_MLO_DP_LINK_PEER(peer)) 2108 return; 2109 2110 if (!peer->txrx_peer) { 2111 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 2112 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2113 return; 2114 } 2115 2116 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 2117 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2118 return; 2119 } 2120 2121 qdf_spin_lock_bh(&peer->peer_info_lock); 2122 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 2123 data_rx = peer->vdev->osif_rx; 2124 else 2125 drop = true; 2126 qdf_spin_unlock_bh(&peer->peer_info_lock); 2127 2128 bufqi = &peer->txrx_peer->bufq_info; 2129 2130 qdf_spin_lock_bh(&bufqi->bufq_lock); 2131 qdf_list_remove_front(&bufqi->cached_bufq, 2132 (qdf_list_node_t **)&cache_buf); 2133 while (cache_buf) { 2134 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 2135 cache_buf->buf); 2136 bufqi->entries -= num_buff_elem; 2137 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2138 if (drop) { 2139 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 2140 cache_buf->buf); 2141 } else { 2142 dp_set_nbuf_band(peer, cache_buf->buf); 2143 /* Flush the cached frames to OSIF DEV */ 2144 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 2145 if (status != QDF_STATUS_SUCCESS) 2146 bufqi->dropped = dp_rx_drop_nbuf_list( 2147 peer->vdev->pdev, 2148 cache_buf->buf); 2149 } 2150 qdf_mem_free(cache_buf); 2151 cache_buf = NULL; 2152 qdf_spin_lock_bh(&bufqi->bufq_lock); 2153 qdf_list_remove_front(&bufqi->cached_bufq, 2154 (qdf_list_node_t **)&cache_buf); 2155 } 2156 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2157 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2158 } 2159 2160 /** 2161 * dp_rx_enqueue_rx() - cache rx frames 2162 * @peer: peer 2163 * @txrx_peer: DP txrx_peer 2164 * @rx_buf_list: cache buffer list 2165 * 2166 * Return: None 2167 */ 2168 static QDF_STATUS dp_rx_enqueue_rx(struct dp_peer * peer,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t rx_buf_list)2169 dp_rx_enqueue_rx(struct dp_peer *peer, 2170 struct dp_txrx_peer *txrx_peer, 2171 qdf_nbuf_t rx_buf_list) 2172 { 2173 struct dp_rx_cached_buf *cache_buf; 2174 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 2175 int num_buff_elem; 2176 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2177 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2178 struct dp_peer *ta_peer = NULL; 2179 2180 /* 2181 * If peer id is invalid which likely peer map has not completed, 2182 * then need caller provide dp_peer pointer, else it's ok to use 2183 * txrx_peer->peer_id to get dp_peer. 2184 */ 2185 if (peer) { 2186 if (QDF_STATUS_SUCCESS == 2187 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 2188 ta_peer = peer; 2189 } else { 2190 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2191 DP_MOD_ID_RX); 2192 } 2193 2194 if (!ta_peer) { 2195 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2196 rx_buf_list); 2197 return QDF_STATUS_E_INVAL; 2198 } 2199 2200 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2201 bufqi->dropped); 2202 if (!ta_peer->valid) { 2203 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2204 rx_buf_list); 2205 ret = QDF_STATUS_E_INVAL; 2206 goto fail; 2207 } 2208 2209 qdf_spin_lock_bh(&bufqi->bufq_lock); 2210 if (bufqi->entries >= bufqi->thresh) { 2211 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2212 rx_buf_list); 2213 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2214 ret = QDF_STATUS_E_RESOURCES; 2215 goto fail; 2216 } 2217 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2218 2219 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2220 2221 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2222 if (!cache_buf) { 2223 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2224 "Failed to allocate buf to cache rx frames"); 2225 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2226 rx_buf_list); 2227 ret = QDF_STATUS_E_NOMEM; 2228 goto fail; 2229 } 2230 2231 cache_buf->buf = rx_buf_list; 2232 2233 qdf_spin_lock_bh(&bufqi->bufq_lock); 2234 qdf_list_insert_back(&bufqi->cached_bufq, 2235 &cache_buf->node); 2236 bufqi->entries += num_buff_elem; 2237 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2238 2239 fail: 2240 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 2241 return ret; 2242 } 2243 2244 static inline dp_rx_is_peer_cache_bufq_supported(void)2245 bool dp_rx_is_peer_cache_bufq_supported(void) 2246 { 2247 return true; 2248 } 2249 #else 2250 static inline dp_rx_is_peer_cache_bufq_supported(void)2251 bool dp_rx_is_peer_cache_bufq_supported(void) 2252 { 2253 return false; 2254 } 2255 2256 static inline QDF_STATUS dp_rx_enqueue_rx(struct dp_peer * peer,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t rx_buf_list)2257 dp_rx_enqueue_rx(struct dp_peer *peer, 2258 struct dp_txrx_peer *txrx_peer, 2259 qdf_nbuf_t rx_buf_list) 2260 { 2261 return QDF_STATUS_SUCCESS; 2262 } 2263 #endif 2264 2265 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2266 /** 2267 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2268 * using the appropriate call back functions. 2269 * @soc: soc 2270 * @vdev: vdev 2271 * @txrx_peer: peer 2272 * @nbuf_head: skb list head 2273 * 2274 * Return: None 2275 */ dp_rx_check_delivery_to_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2276 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2277 struct dp_vdev *vdev, 2278 struct dp_txrx_peer *txrx_peer, 2279 qdf_nbuf_t nbuf_head) 2280 { 2281 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2282 txrx_peer, nbuf_head))) 2283 return; 2284 2285 /* Function pointer initialized only when FISA is enabled */ 2286 if (vdev->osif_fisa_rx) 2287 /* on failure send it via regular path */ 2288 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2289 else 2290 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2291 } 2292 2293 #else 2294 /** 2295 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2296 * using the appropriate call back functions. 2297 * @soc: soc 2298 * @vdev: vdev 2299 * @txrx_peer: txrx peer 2300 * @nbuf_head: skb list head 2301 * 2302 * Check the return status of the call back function and drop 2303 * the packets if the return status indicates a failure. 2304 * 2305 * Return: None 2306 */ dp_rx_check_delivery_to_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2307 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2308 struct dp_vdev *vdev, 2309 struct dp_txrx_peer *txrx_peer, 2310 qdf_nbuf_t nbuf_head) 2311 { 2312 int num_nbuf = 0; 2313 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2314 2315 /* Function pointer initialized only when FISA is enabled */ 2316 if (vdev->osif_fisa_rx) 2317 /* on failure send it via regular path */ 2318 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2319 else if (vdev->osif_rx) 2320 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2321 2322 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2323 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2324 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2325 if (txrx_peer) 2326 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2327 num_nbuf); 2328 } 2329 } 2330 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2331 2332 /** 2333 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2334 * @soc: DP soc 2335 * @vdev: DP vdev handle 2336 * @txrx_peer: pointer to the txrx peer object 2337 * @nbuf_head: skb list head 2338 * 2339 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2340 * QDF_STATUS_E_FAILURE 2341 */ 2342 static inline QDF_STATUS dp_rx_validate_rx_callbacks(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2343 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2344 struct dp_vdev *vdev, 2345 struct dp_txrx_peer *txrx_peer, 2346 qdf_nbuf_t nbuf_head) 2347 { 2348 int num_nbuf; 2349 2350 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2351 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2352 /* 2353 * This is a special case where vdev is invalid, 2354 * so we cannot know the pdev to which this packet 2355 * belonged. Hence we update the soc rx error stats. 2356 */ 2357 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2358 return QDF_STATUS_E_FAILURE; 2359 } 2360 2361 /* 2362 * highly unlikely to have a vdev without a registered rx 2363 * callback function. if so let us free the nbuf_list. 2364 */ 2365 if (qdf_unlikely(!vdev->osif_rx)) { 2366 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2367 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2368 } else { 2369 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2370 nbuf_head); 2371 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2372 vdev->pdev->enhanced_stats_en); 2373 } 2374 return QDF_STATUS_E_FAILURE; 2375 } 2376 2377 return QDF_STATUS_SUCCESS; 2378 } 2379 2380 #if defined(WLAN_FEATURE_11BE_MLO) && defined(RAW_PKT_MLD_ADDR_CONVERSION) dp_rx_raw_pkt_mld_addr_conv(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2381 static void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2382 struct dp_vdev *vdev, 2383 struct dp_txrx_peer *txrx_peer, 2384 qdf_nbuf_t nbuf_head) 2385 { 2386 qdf_nbuf_t nbuf, next; 2387 struct dp_peer *peer = NULL; 2388 struct ieee80211_frame *wh = NULL; 2389 2390 if (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi) 2391 return; 2392 2393 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2394 DP_MOD_ID_RX); 2395 2396 if (!peer) 2397 return; 2398 2399 if (!IS_MLO_DP_MLD_PEER(peer)) { 2400 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2401 return; 2402 } 2403 2404 nbuf = nbuf_head; 2405 while (nbuf) { 2406 next = nbuf->next; 2407 wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf); 2408 qdf_mem_copy(wh->i_addr1, vdev->mld_mac_addr.raw, 2409 QDF_MAC_ADDR_SIZE); 2410 qdf_mem_copy(wh->i_addr2, peer->mac_addr.raw, 2411 QDF_MAC_ADDR_SIZE); 2412 nbuf = next; 2413 } 2414 2415 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2416 } 2417 #else 2418 static inline dp_rx_raw_pkt_mld_addr_conv(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head)2419 void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2420 struct dp_vdev *vdev, 2421 struct dp_txrx_peer *txrx_peer, 2422 qdf_nbuf_t nbuf_head) 2423 { } 2424 #endif 2425 dp_rx_deliver_to_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head,qdf_nbuf_t nbuf_tail)2426 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2427 struct dp_vdev *vdev, 2428 struct dp_txrx_peer *txrx_peer, 2429 qdf_nbuf_t nbuf_head, 2430 qdf_nbuf_t nbuf_tail) 2431 { 2432 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2433 QDF_STATUS_SUCCESS) 2434 return QDF_STATUS_E_FAILURE; 2435 2436 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2437 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2438 dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head); 2439 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2440 &nbuf_tail); 2441 } 2442 2443 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2444 2445 return QDF_STATUS_SUCCESS; 2446 } 2447 2448 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT dp_rx_eapol_deliver_to_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf_head,qdf_nbuf_t nbuf_tail)2449 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2450 struct dp_vdev *vdev, 2451 struct dp_txrx_peer *txrx_peer, 2452 qdf_nbuf_t nbuf_head, 2453 qdf_nbuf_t nbuf_tail) 2454 { 2455 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2456 QDF_STATUS_SUCCESS) 2457 return QDF_STATUS_E_FAILURE; 2458 2459 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2460 2461 return QDF_STATUS_SUCCESS; 2462 } 2463 #endif 2464 2465 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2466 #ifdef VDEV_PEER_PROTOCOL_COUNT 2467 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2468 { \ 2469 qdf_nbuf_t nbuf_local; \ 2470 struct dp_txrx_peer *txrx_peer_local; \ 2471 struct dp_vdev *vdev_local = vdev_hdl; \ 2472 do { \ 2473 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2474 break; \ 2475 nbuf_local = nbuf; \ 2476 txrx_peer_local = txrx_peer; \ 2477 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2478 break; \ 2479 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2480 break; \ 2481 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2482 (nbuf_local), \ 2483 (txrx_peer_local), 0, 1); \ 2484 } while (0); \ 2485 } 2486 #else 2487 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2488 #endif 2489 2490 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2491 /** 2492 * dp_rx_rates_stats_update() - update rate stats 2493 * from rx msdu. 2494 * @soc: datapath soc handle 2495 * @nbuf: received msdu buffer 2496 * @rx_tlv_hdr: rx tlv header 2497 * @txrx_peer: datapath txrx_peer handle 2498 * @sgi: Short Guard Interval 2499 * @mcs: Modulation and Coding Set 2500 * @nss: Number of Spatial Streams 2501 * @bw: BandWidth 2502 * @pkt_type: Corresponds to preamble 2503 * @link_id: Link Id on which packet is received 2504 * 2505 * To be precisely record rates, following factors are considered: 2506 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2507 * Make sure to affect rx throughput as least as possible. 2508 * 2509 * Return: void 2510 */ 2511 static void dp_rx_rates_stats_update(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint32_t sgi,uint32_t mcs,uint32_t nss,uint32_t bw,uint32_t pkt_type,uint8_t link_id)2512 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2513 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2514 uint32_t sgi, uint32_t mcs, 2515 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2516 uint8_t link_id) 2517 { 2518 uint32_t rix; 2519 uint16_t ratecode; 2520 uint32_t avg_rx_rate; 2521 uint32_t ratekbps; 2522 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2523 2524 if (soc->high_throughput || 2525 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2526 return; 2527 } 2528 2529 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id); 2530 2531 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2532 if (qdf_unlikely(pkt_type == DOT11_B)) 2533 nss = 1; 2534 2535 /* here pkt_type corresponds to preamble */ 2536 ratekbps = dp_getrateindex(sgi, 2537 mcs, 2538 nss - 1, 2539 pkt_type, 2540 bw, 2541 punc_mode, 2542 &rix, 2543 &ratecode); 2544 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id); 2545 avg_rx_rate = 2546 dp_ath_rate_lpf( 2547 txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate, 2548 ratekbps); 2549 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id); 2550 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id); 2551 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id); 2552 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id); 2553 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id); 2554 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id); 2555 } 2556 #else 2557 static inline void dp_rx_rates_stats_update(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint32_t sgi,uint32_t mcs,uint32_t nss,uint32_t bw,uint32_t pkt_type,uint8_t link_id)2558 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2559 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2560 uint32_t sgi, uint32_t mcs, 2561 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2562 uint8_t link_id) 2563 { 2564 } 2565 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2566 2567 #ifndef QCA_ENHANCED_STATS_SUPPORT 2568 /** 2569 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2570 * 2571 * @soc: datapath soc handle 2572 * @nbuf: received msdu buffer 2573 * @rx_tlv_hdr: rx tlv header 2574 * @txrx_peer: datapath txrx_peer handle 2575 * @link_id: link id on which the packet is received 2576 * 2577 * Return: void 2578 */ 2579 static inline dp_rx_msdu_extd_stats_update(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint8_t link_id)2580 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2581 uint8_t *rx_tlv_hdr, 2582 struct dp_txrx_peer *txrx_peer, 2583 uint8_t link_id) 2584 { 2585 bool is_ampdu; 2586 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2587 uint8_t dst_mcs_idx; 2588 2589 /* 2590 * TODO - For KIWI this field is present in ring_desc 2591 * Try to use ring desc instead of tlv. 2592 */ 2593 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2594 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id); 2595 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu), 2596 link_id); 2597 2598 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2599 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2600 tid = qdf_nbuf_get_tid_val(nbuf); 2601 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2602 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2603 rx_tlv_hdr); 2604 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2605 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2606 /* do HW to SW pkt type conversion */ 2607 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2608 hal_2_dp_pkt_type_map[pkt_type]); 2609 2610 /* 2611 * The MCS index does not start with 0 when NSS>1 in HT mode. 2612 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 2613 * ------------------------------------------------------ 2614 * NSS | 1 | 2 | 3 | 4 2615 * ------------------------------------------------------ 2616 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2617 * ------------------------------------------------------ 2618 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2619 * ------------------------------------------------------ 2620 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 2621 */ 2622 if ((pkt_type == DOT11_N) && (nss == 2)) 2623 mcs += 8; 2624 2625 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2626 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2627 link_id); 2628 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2629 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2630 link_id); 2631 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id); 2632 /* 2633 * only if nss > 0 and pkt_type is 11N/AC/AX, 2634 * then increase index [nss - 1] in array counter. 2635 */ 2636 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2637 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id); 2638 2639 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id); 2640 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2641 hal_rx_tlv_mic_err_get(soc->hal_soc, 2642 rx_tlv_hdr), link_id); 2643 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2644 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2645 rx_tlv_hdr), link_id); 2646 2647 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1, 2648 link_id); 2649 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1, 2650 link_id); 2651 2652 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2653 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2654 DP_PEER_EXTD_STATS_INC(txrx_peer, 2655 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2656 1, link_id); 2657 2658 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2659 sgi, mcs, nss, bw, pkt_type, link_id); 2660 } 2661 #else 2662 static inline dp_rx_msdu_extd_stats_update(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint8_t link_id)2663 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2664 uint8_t *rx_tlv_hdr, 2665 struct dp_txrx_peer *txrx_peer, 2666 uint8_t link_id) 2667 { 2668 } 2669 #endif 2670 2671 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2672 static inline void dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint8_t link_id)2673 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2674 qdf_nbuf_t nbuf, uint8_t link_id) 2675 { 2676 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2677 2678 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2679 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2680 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2681 2682 if (qdf_likely(txrx_peer)) 2683 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2684 2685 return; 2686 } 2687 2688 /* only count stats per lmac for MLO connection*/ 2689 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2690 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2691 txrx_peer->is_mld_peer, link_id); 2692 } 2693 #else 2694 static inline void dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint8_t link_id)2695 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2696 qdf_nbuf_t nbuf, uint8_t link_id) 2697 { 2698 } 2699 #endif 2700 dp_rx_msdu_stats_update(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint8_t ring_id,struct cdp_tid_rx_stats * tid_stats,uint8_t link_id)2701 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2702 uint8_t *rx_tlv_hdr, 2703 struct dp_txrx_peer *txrx_peer, 2704 uint8_t ring_id, 2705 struct cdp_tid_rx_stats *tid_stats, 2706 uint8_t link_id) 2707 { 2708 bool is_not_amsdu; 2709 struct dp_vdev *vdev = txrx_peer->vdev; 2710 uint8_t enh_flag; 2711 qdf_ether_header_t *eh; 2712 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2713 2714 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2715 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2716 qdf_nbuf_is_rx_chfrag_end(nbuf); 2717 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2718 msdu_len, link_id); 2719 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2720 is_not_amsdu, link_id); 2721 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, 2722 !is_not_amsdu, link_id); 2723 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2724 qdf_nbuf_is_rx_retry_flag(nbuf), link_id); 2725 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id); 2726 tid_stats->msdu_cnt++; 2727 enh_flag = vdev->pdev->enhanced_stats_en; 2728 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2729 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2730 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2731 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id); 2732 tid_stats->mcast_msdu_cnt++; 2733 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2734 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, 2735 enh_flag, link_id); 2736 tid_stats->bcast_msdu_cnt++; 2737 } 2738 } else { 2739 DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len, 2740 enh_flag, link_id); 2741 } 2742 2743 txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts = 2744 qdf_system_ticks(); 2745 2746 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, 2747 txrx_peer, link_id); 2748 } 2749 2750 #ifndef WDS_VENDOR_EXTENSION dp_wds_rx_policy_check(uint8_t * rx_tlv_hdr,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer)2751 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2752 struct dp_vdev *vdev, 2753 struct dp_txrx_peer *txrx_peer) 2754 { 2755 return 1; 2756 } 2757 #endif 2758 2759 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2760 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2761 /** 2762 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2763 * during roaming 2764 * @vdev: dp_vdev pointer 2765 * @rx_tlv_hdr: rx tlv header 2766 * @nbuf: pkt skb pointer 2767 * 2768 * This function will check if rx udp data is received from authorised 2769 * roamed peer before peer map indication is received from FW after 2770 * roaming. This is needed for VoIP scenarios in which packet loss 2771 * expected during roaming is minimal. 2772 * 2773 * Return: bool 2774 */ dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev * vdev,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2775 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2776 uint8_t *rx_tlv_hdr, 2777 qdf_nbuf_t nbuf) 2778 { 2779 char *hdr_desc; 2780 struct ieee80211_frame *wh = NULL; 2781 2782 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2783 rx_tlv_hdr); 2784 wh = (struct ieee80211_frame *)hdr_desc; 2785 2786 if (vdev->roaming_peer_status == 2787 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2788 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2789 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2790 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2791 return true; 2792 2793 return false; 2794 } 2795 #else dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev * vdev,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2796 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2797 uint8_t *rx_tlv_hdr, 2798 qdf_nbuf_t nbuf) 2799 { 2800 return false; 2801 } 2802 #endif 2803 2804 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2805 /** 2806 * dp_rx_nbuf_band_set() - set nbuf band. 2807 * @soc: dp soc handle 2808 * @nbuf: nbuf handle 2809 * 2810 * Return: None 2811 */ 2812 static inline void dp_rx_nbuf_band_set(struct dp_soc * soc,qdf_nbuf_t nbuf)2813 dp_rx_nbuf_band_set(struct dp_soc *soc, qdf_nbuf_t nbuf) 2814 { 2815 struct qdf_mac_addr *mac_addr; 2816 struct dp_peer *peer; 2817 struct dp_txrx_peer *txrx_peer; 2818 2819 uint8_t link_id; 2820 2821 mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) + 2822 QDF_NBUF_SRC_MAC_OFFSET); 2823 2824 peer = dp_mld_peer_find_hash_find(soc, mac_addr->bytes, 0, 2825 DP_VDEV_ALL, DP_MOD_ID_RX); 2826 if (qdf_likely(peer)) { 2827 txrx_peer = dp_get_txrx_peer(peer); 2828 if (qdf_likely(txrx_peer)) { 2829 link_id = QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf); 2830 qdf_nbuf_rx_set_band(nbuf, txrx_peer->ll_band[link_id]); 2831 } 2832 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2833 } 2834 } 2835 #else 2836 static inline void dp_rx_nbuf_band_set(struct dp_soc * soc,qdf_nbuf_t nbuf)2837 dp_rx_nbuf_band_set(struct dp_soc *soc, qdf_nbuf_t nbuf) 2838 { 2839 } 2840 #endif 2841 dp_rx_deliver_to_stack_no_peer(struct dp_soc * soc,qdf_nbuf_t nbuf)2842 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2843 { 2844 uint16_t peer_id; 2845 uint8_t vdev_id; 2846 struct dp_vdev *vdev = NULL; 2847 uint32_t l2_hdr_offset = 0; 2848 uint16_t msdu_len = 0; 2849 uint32_t pkt_len = 0; 2850 uint8_t *rx_tlv_hdr; 2851 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2852 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP | 2853 FRAME_MASK_DNS_QUERY | FRAME_MASK_DNS_RESP; 2854 2855 bool is_special_frame = false; 2856 struct dp_peer *peer = NULL; 2857 2858 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2859 if (peer_id > soc->max_peer_id) 2860 goto deliver_fail; 2861 2862 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2863 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2864 if (!vdev || vdev->delete.pending) 2865 goto deliver_fail; 2866 2867 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2868 goto deliver_fail; 2869 2870 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2871 l2_hdr_offset = 2872 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2873 2874 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2875 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2876 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2877 2878 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2879 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2880 2881 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2882 if (qdf_likely(vdev->osif_rx)) { 2883 if (is_special_frame || 2884 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2885 nbuf)) { 2886 dp_rx_nbuf_band_set(soc, nbuf); 2887 qdf_nbuf_set_exc_frame(nbuf, 1); 2888 if (QDF_STATUS_SUCCESS != 2889 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2890 goto deliver_fail; 2891 2892 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2893 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2894 return; 2895 } 2896 } else if (is_special_frame) { 2897 /* 2898 * If MLO connection, txrx_peer for link peer does not exist, 2899 * try to store these RX packets to txrx_peer's bufq of MLD 2900 * peer until vdev->osif_rx is registered from CP and flush 2901 * them to stack. 2902 */ 2903 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2904 DP_MOD_ID_RX); 2905 if (!peer) 2906 goto deliver_fail; 2907 2908 /* only check for MLO connection */ 2909 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2910 dp_rx_is_peer_cache_bufq_supported()) { 2911 qdf_nbuf_set_exc_frame(nbuf, 1); 2912 2913 if (QDF_STATUS_SUCCESS == 2914 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2915 DP_STATS_INC(soc, 2916 rx.err.pkt_delivered_no_peer, 2917 1); 2918 } else { 2919 DP_STATS_INC(soc, 2920 rx.err.rx_invalid_peer.num, 2921 1); 2922 } 2923 2924 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2925 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2926 return; 2927 } 2928 2929 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2930 } 2931 2932 deliver_fail: 2933 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2934 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2935 dp_rx_nbuf_free(nbuf); 2936 if (vdev) 2937 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2938 } 2939 #else dp_rx_deliver_to_stack_no_peer(struct dp_soc * soc,qdf_nbuf_t nbuf)2940 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2941 { 2942 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2943 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2944 dp_rx_nbuf_free(nbuf); 2945 } 2946 #endif 2947 2948 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2949 2950 #ifdef WLAN_SUPPORT_RX_FISA dp_fisa_config(ol_txrx_soc_handle cdp_soc,uint8_t pdev_id,enum cdp_fisa_config_id config_id,union cdp_fisa_config * cfg)2951 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 2952 enum cdp_fisa_config_id config_id, 2953 union cdp_fisa_config *cfg) 2954 { 2955 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 2956 struct dp_pdev *pdev; 2957 QDF_STATUS status; 2958 2959 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2960 if (!pdev) { 2961 dp_err("pdev is NULL for pdev_id %u", pdev_id); 2962 return QDF_STATUS_E_INVAL; 2963 } 2964 2965 switch (config_id) { 2966 case CDP_FISA_HTT_RX_FISA_CFG: 2967 status = dp_htt_rx_fisa_config(pdev, cfg->fisa_config); 2968 break; 2969 case CDP_FISA_HTT_RX_FSE_OP_CFG: 2970 status = dp_htt_rx_flow_fse_operation(pdev, cfg->fse_op_cmd); 2971 break; 2972 case CDP_FISA_HTT_RX_FSE_SETUP_CFG: 2973 status = dp_htt_rx_flow_fst_setup(pdev, cfg->fse_setup_info); 2974 break; 2975 default: 2976 status = QDF_STATUS_E_INVAL; 2977 } 2978 2979 return status; 2980 } 2981 dp_rx_skip_tlvs(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t l3_padding)2982 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2983 { 2984 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2985 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2986 } 2987 #else dp_rx_skip_tlvs(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t l3_padding)2988 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2989 { 2990 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2991 } 2992 #endif 2993 2994 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2995 2996 #ifdef DP_RX_DROP_RAW_FRM dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)2997 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2998 { 2999 if (qdf_nbuf_is_raw_frame(nbuf)) { 3000 dp_rx_nbuf_free(nbuf); 3001 return true; 3002 } 3003 3004 return false; 3005 } 3006 #endif 3007 3008 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR dp_rx_update_stats(struct dp_soc * soc,qdf_nbuf_t nbuf)3009 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 3010 { 3011 DP_STATS_INC_PKT(soc, rx.ingress, 1, 3012 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 3013 } 3014 #endif 3015 3016 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 dp_rx_deliver_to_pkt_capture(struct dp_soc * soc,struct dp_pdev * pdev,uint16_t peer_id,uint32_t is_offload,qdf_nbuf_t netbuf)3017 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 3018 uint16_t peer_id, uint32_t is_offload, 3019 qdf_nbuf_t netbuf) 3020 { 3021 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 3022 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 3023 peer_id, is_offload, pdev->pdev_id); 3024 } 3025 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t is_offload)3026 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 3027 uint32_t is_offload) 3028 { 3029 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 3030 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 3031 soc, nbuf, HTT_INVALID_VDEV, 3032 is_offload, 0); 3033 } 3034 #endif 3035 3036 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3037 dp_rx_vdev_detach(struct dp_vdev * vdev)3038 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 3039 { 3040 QDF_STATUS ret; 3041 3042 if (vdev->osif_rx_flush) { 3043 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 3044 if (!QDF_IS_STATUS_SUCCESS(ret)) { 3045 dp_err("Failed to flush rx pkts for vdev %d", 3046 vdev->vdev_id); 3047 return ret; 3048 } 3049 } 3050 3051 return QDF_STATUS_SUCCESS; 3052 } 3053 3054 static QDF_STATUS dp_pdev_nbuf_alloc_and_map(struct dp_soc * dp_soc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t,struct dp_pdev * dp_pdev,struct rx_desc_pool * rx_desc_pool,bool dp_buf_page_frag_alloc_enable)3055 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 3056 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 3057 struct dp_pdev *dp_pdev, 3058 struct rx_desc_pool *rx_desc_pool, 3059 bool dp_buf_page_frag_alloc_enable) 3060 { 3061 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 3062 3063 if (dp_buf_page_frag_alloc_enable) { 3064 (nbuf_frag_info_t->virt_addr).nbuf = 3065 qdf_nbuf_frag_alloc(dp_soc->osdev, 3066 rx_desc_pool->buf_size, 3067 RX_BUFFER_RESERVATION, 3068 rx_desc_pool->buf_alignment, FALSE); 3069 } else { 3070 (nbuf_frag_info_t->virt_addr).nbuf = 3071 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 3072 RX_BUFFER_RESERVATION, 3073 rx_desc_pool->buf_alignment, FALSE); 3074 } 3075 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 3076 dp_err("nbuf alloc failed"); 3077 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 3078 return ret; 3079 } 3080 3081 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 3082 (nbuf_frag_info_t->virt_addr).nbuf, 3083 QDF_DMA_FROM_DEVICE, 3084 rx_desc_pool->buf_size); 3085 3086 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 3087 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 3088 dp_err("nbuf map failed"); 3089 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 3090 return ret; 3091 } 3092 3093 nbuf_frag_info_t->paddr = 3094 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 3095 3096 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 3097 &nbuf_frag_info_t->paddr, 3098 rx_desc_pool); 3099 if (ret == QDF_STATUS_E_FAILURE) { 3100 dp_err("nbuf check x86 failed"); 3101 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 3102 return ret; 3103 } 3104 3105 return QDF_STATUS_SUCCESS; 3106 } 3107 3108 QDF_STATUS dp_pdev_rx_buffers_attach(struct dp_soc * dp_soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers)3109 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 3110 struct dp_srng *dp_rxdma_srng, 3111 struct rx_desc_pool *rx_desc_pool, 3112 uint32_t num_req_buffers) 3113 { 3114 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 3115 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 3116 union dp_rx_desc_list_elem_t *next; 3117 void *rxdma_ring_entry; 3118 qdf_dma_addr_t paddr; 3119 struct dp_rx_nbuf_frag_info *nf_info; 3120 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 3121 uint32_t buffer_index, nbuf_ptrs_per_page; 3122 qdf_nbuf_t nbuf; 3123 QDF_STATUS ret; 3124 int page_idx, total_pages; 3125 union dp_rx_desc_list_elem_t *desc_list = NULL; 3126 union dp_rx_desc_list_elem_t *tail = NULL; 3127 int sync_hw_ptr = 1; 3128 uint32_t num_entries_avail; 3129 bool dp_buf_page_frag_alloc_enable; 3130 3131 if (qdf_unlikely(!dp_pdev)) { 3132 dp_rx_err("%pK: pdev is null for mac_id = %d", 3133 dp_soc, mac_id); 3134 return QDF_STATUS_E_FAILURE; 3135 } 3136 3137 dp_buf_page_frag_alloc_enable = 3138 wlan_cfg_is_dp_buf_page_frag_alloc_enable(dp_soc->wlan_cfg_ctx); 3139 3140 if (qdf_unlikely(!rxdma_srng)) { 3141 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3142 return QDF_STATUS_E_FAILURE; 3143 } 3144 3145 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3146 3147 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3148 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3149 rxdma_srng, 3150 sync_hw_ptr); 3151 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3152 3153 if (!num_entries_avail) { 3154 dp_err("Num of available entries is zero, nothing to do"); 3155 return QDF_STATUS_E_NOMEM; 3156 } 3157 3158 if (num_entries_avail < num_req_buffers) 3159 num_req_buffers = num_entries_avail; 3160 3161 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3162 num_req_buffers, &desc_list, &tail); 3163 if (!nr_descs) { 3164 dp_err("no free rx_descs in freelist"); 3165 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3166 return QDF_STATUS_E_NOMEM; 3167 } 3168 3169 dp_debug("got %u RX descs for driver attach", nr_descs); 3170 3171 /* 3172 * Try to allocate pointers to the nbuf one page at a time. 3173 * Take pointers that can fit in one page of memory and 3174 * iterate through the total descriptors that need to be 3175 * allocated in order of pages. Reuse the pointers that 3176 * have been allocated to fit in one page across each 3177 * iteration to index into the nbuf. 3178 */ 3179 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 3180 3181 /* 3182 * Add an extra page to store the remainder if any 3183 */ 3184 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 3185 total_pages++; 3186 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 3187 if (!nf_info) { 3188 dp_err("failed to allocate nbuf array"); 3189 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3190 QDF_BUG(0); 3191 return QDF_STATUS_E_NOMEM; 3192 } 3193 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 3194 3195 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3196 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 3197 3198 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3199 /* 3200 * The last page of buffer pointers may not be required 3201 * completely based on the number of descriptors. Below 3202 * check will ensure we are allocating only the 3203 * required number of descriptors. 3204 */ 3205 if (nr_nbuf_total >= nr_descs) 3206 break; 3207 /* Flag is set while pdev rx_desc_pool initialization */ 3208 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3209 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3210 &nf_info[nr_nbuf], dp_pdev, 3211 rx_desc_pool); 3212 else 3213 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3214 &nf_info[nr_nbuf], dp_pdev, 3215 rx_desc_pool, 3216 dp_buf_page_frag_alloc_enable); 3217 if (QDF_IS_STATUS_ERROR(ret)) 3218 break; 3219 3220 nr_nbuf_total++; 3221 } 3222 3223 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3224 3225 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3226 rxdma_ring_entry = 3227 hal_srng_src_get_next(dp_soc->hal_soc, 3228 rxdma_srng); 3229 qdf_assert_always(rxdma_ring_entry); 3230 3231 next = desc_list->next; 3232 paddr = nf_info[buffer_index].paddr; 3233 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3234 3235 /* Flag is set while pdev rx_desc_pool initialization */ 3236 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3237 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3238 &nf_info[buffer_index]); 3239 else 3240 dp_rx_desc_prep(&desc_list->rx_desc, 3241 &nf_info[buffer_index]); 3242 desc_list->rx_desc.in_use = 1; 3243 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3244 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3245 __func__, 3246 RX_DESC_REPLENISHED); 3247 3248 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3249 desc_list->rx_desc.cookie, 3250 rx_desc_pool->owner); 3251 3252 dp_ipa_handle_rx_buf_smmu_mapping( 3253 dp_soc, nbuf, 3254 rx_desc_pool->buf_size, true, 3255 __func__, __LINE__); 3256 3257 dp_audio_smmu_map(dp_soc->osdev, 3258 qdf_mem_paddr_from_dmaaddr(dp_soc->osdev, 3259 QDF_NBUF_CB_PADDR(nbuf)), 3260 QDF_NBUF_CB_PADDR(nbuf), 3261 rx_desc_pool->buf_size); 3262 3263 desc_list = next; 3264 } 3265 3266 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3267 rxdma_srng, nr_nbuf, nr_nbuf); 3268 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3269 } 3270 3271 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3272 qdf_mem_free(nf_info); 3273 3274 if (!nr_nbuf_total) { 3275 dp_err("No nbuf's allocated"); 3276 QDF_BUG(0); 3277 return QDF_STATUS_E_RESOURCES; 3278 } 3279 3280 /* No need to count the number of bytes received during replenish. 3281 * Therefore set replenish.pkts.bytes as 0. 3282 */ 3283 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3284 3285 return QDF_STATUS_SUCCESS; 3286 } 3287 3288 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3289 3290 #ifdef DP_RX_MON_MEM_FRAG dp_rx_enable_mon_dest_frag(struct rx_desc_pool * rx_desc_pool,bool is_mon_dest_desc)3291 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3292 bool is_mon_dest_desc) 3293 { 3294 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3295 if (is_mon_dest_desc) 3296 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3297 else 3298 qdf_frag_cache_drain(&rx_desc_pool->pf_cache); 3299 } 3300 #else dp_rx_enable_mon_dest_frag(struct rx_desc_pool * rx_desc_pool,bool is_mon_dest_desc)3301 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3302 bool is_mon_dest_desc) 3303 { 3304 rx_desc_pool->rx_mon_dest_frag_enable = false; 3305 if (is_mon_dest_desc) 3306 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3307 } 3308 #endif 3309 3310 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3311 3312 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev * pdev)3313 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3314 { 3315 struct dp_soc *soc = pdev->soc; 3316 uint32_t rxdma_entries; 3317 uint32_t rx_sw_desc_num; 3318 struct dp_srng *dp_rxdma_srng; 3319 struct rx_desc_pool *rx_desc_pool; 3320 uint32_t status = QDF_STATUS_SUCCESS; 3321 int mac_for_pdev; 3322 3323 mac_for_pdev = pdev->lmac_id; 3324 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3325 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3326 soc, mac_for_pdev); 3327 return status; 3328 } 3329 3330 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3331 rxdma_entries = dp_rxdma_srng->num_entries; 3332 3333 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3334 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3335 3336 rx_desc_pool->desc_type = QDF_DP_RX_DESC_BUF_TYPE; 3337 status = dp_rx_desc_pool_alloc(soc, 3338 rx_sw_desc_num, 3339 rx_desc_pool); 3340 if (status != QDF_STATUS_SUCCESS) 3341 return status; 3342 3343 return status; 3344 } 3345 dp_rx_pdev_desc_pool_free(struct dp_pdev * pdev)3346 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3347 { 3348 int mac_for_pdev = pdev->lmac_id; 3349 struct dp_soc *soc = pdev->soc; 3350 struct rx_desc_pool *rx_desc_pool; 3351 3352 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3353 3354 dp_rx_desc_pool_free(soc, rx_desc_pool); 3355 } 3356 dp_rx_pdev_desc_pool_init(struct dp_pdev * pdev)3357 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3358 { 3359 int mac_for_pdev = pdev->lmac_id; 3360 struct dp_soc *soc = pdev->soc; 3361 uint32_t rxdma_entries; 3362 uint32_t rx_sw_desc_num; 3363 struct dp_srng *dp_rxdma_srng; 3364 struct rx_desc_pool *rx_desc_pool; 3365 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3366 uint16_t buf_size; 3367 3368 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 3369 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3370 3371 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3372 /* 3373 * If NSS is enabled, rx_desc_pool is already filled. 3374 * Hence, just disable desc_pool frag flag. 3375 */ 3376 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3377 3378 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3379 soc, mac_for_pdev); 3380 return QDF_STATUS_SUCCESS; 3381 } 3382 3383 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3384 return QDF_STATUS_E_NOMEM; 3385 3386 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3387 rxdma_entries = dp_rxdma_srng->num_entries; 3388 3389 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3390 3391 rx_sw_desc_num = 3392 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3393 3394 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3395 rx_desc_pool->buf_size = buf_size; 3396 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3397 /* Disable monitor dest processing via frag */ 3398 if (target_type == TARGET_TYPE_QCN9160) { 3399 rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; 3400 rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; 3401 dp_rx_enable_mon_dest_frag(rx_desc_pool, true); 3402 } else { 3403 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3404 } 3405 3406 dp_rx_desc_pool_init(soc, mac_for_pdev, 3407 rx_sw_desc_num, rx_desc_pool); 3408 return QDF_STATUS_SUCCESS; 3409 } 3410 dp_rx_pdev_desc_pool_deinit(struct dp_pdev * pdev)3411 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3412 { 3413 int mac_for_pdev = pdev->lmac_id; 3414 struct dp_soc *soc = pdev->soc; 3415 struct rx_desc_pool *rx_desc_pool; 3416 3417 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3418 3419 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3420 } 3421 3422 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev * pdev)3423 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3424 { 3425 int mac_for_pdev = pdev->lmac_id; 3426 struct dp_soc *soc = pdev->soc; 3427 struct dp_srng *dp_rxdma_srng; 3428 struct rx_desc_pool *rx_desc_pool; 3429 uint32_t rxdma_entries; 3430 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3431 3432 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3433 rxdma_entries = dp_rxdma_srng->num_entries; 3434 3435 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3436 3437 /* Initialize RX buffer pool which will be 3438 * used during low memory conditions 3439 */ 3440 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3441 3442 if (target_type == TARGET_TYPE_QCN9160) 3443 return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, 3444 dp_rxdma_srng, 3445 rx_desc_pool, 3446 rxdma_entries - 1); 3447 else 3448 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3449 dp_rxdma_srng, 3450 rx_desc_pool, 3451 rxdma_entries - 1); 3452 } 3453 3454 void dp_rx_pdev_buffers_free(struct dp_pdev * pdev)3455 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3456 { 3457 int mac_for_pdev = pdev->lmac_id; 3458 struct dp_soc *soc = pdev->soc; 3459 struct rx_desc_pool *rx_desc_pool; 3460 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3461 3462 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3463 3464 if (target_type == TARGET_TYPE_QCN9160) 3465 dp_rx_desc_frag_free(soc, rx_desc_pool); 3466 else 3467 dp_rx_desc_nbuf_free(soc, rx_desc_pool, false); 3468 3469 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3470 } 3471 3472 #ifdef DP_RX_SPECIAL_FRAME_NEED dp_rx_deliver_special_frame(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint32_t frame_mask,uint8_t * rx_tlv_hdr)3473 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3474 struct dp_txrx_peer *txrx_peer, 3475 qdf_nbuf_t nbuf, uint32_t frame_mask, 3476 uint8_t *rx_tlv_hdr) 3477 { 3478 uint32_t l2_hdr_offset = 0; 3479 uint16_t msdu_len = 0; 3480 uint32_t skip_len; 3481 3482 l2_hdr_offset = 3483 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3484 3485 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3486 skip_len = l2_hdr_offset; 3487 } else { 3488 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3489 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3490 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3491 } 3492 3493 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3494 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3495 qdf_nbuf_pull_head(nbuf, skip_len); 3496 3497 if (txrx_peer->vdev) { 3498 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3499 QDF_TX_RX_STATUS_OK); 3500 } 3501 3502 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3503 dp_info("special frame, mpdu sn 0x%x", 3504 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3505 qdf_nbuf_set_exc_frame(nbuf, 1); 3506 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3507 nbuf, NULL); 3508 return true; 3509 } 3510 3511 return false; 3512 } 3513 #endif 3514 3515 #ifdef QCA_MULTIPASS_SUPPORT dp_rx_multipass_process(struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint8_t tid)3516 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, 3517 uint8_t tid) 3518 { 3519 struct vlan_ethhdr *vethhdrp; 3520 3521 if (qdf_unlikely(!txrx_peer->vlan_id)) 3522 return true; 3523 3524 vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf); 3525 /* 3526 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively 3527 * as it is expected to be padded by 0 3528 * return false if frame doesn't have above tag so that caller will 3529 * drop the frame. 3530 */ 3531 if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) || 3532 qdf_unlikely(vethhdrp->h_vlan_TCI != 0)) 3533 return false; 3534 3535 vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) | 3536 (txrx_peer->vlan_id & VLAN_VID_MASK)); 3537 3538 if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE)) 3539 dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf); 3540 3541 return true; 3542 } 3543 #endif /* QCA_MULTIPASS_SUPPORT */ 3544