1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hal_be_hw_headers.h" 19 #include "dp_types.h" 20 #include "hal_be_tx.h" 21 #include "hal_api.h" 22 #include "qdf_trace.h" 23 #include "hal_be_api_mon.h" 24 #include "dp_internal.h" 25 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 26 #include "dp_mon.h" 27 #include <dp_mon_2.0.h> 28 #include <dp_tx_mon_2.0.h> 29 #include <dp_be.h> 30 #include <hal_be_api_mon.h> 31 #include <dp_mon_filter_2.0.h> 32 #ifdef FEATURE_PERPKT_INFO 33 #include "dp_ratetable.h" 34 #endif 35 36 #define MAX_TX_MONITOR_STUCK 50 37 38 #ifdef TXMON_DEBUG 39 /* 40 * dp_tx_mon_debug_statu() - API to display tx monitor status 41 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 42 * @work_done - tx monitor work done 43 * 44 * Return: void 45 */ 46 static inline void 47 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 48 uint32_t work_done) 49 { 50 if (tx_mon_be->mode && !work_done) 51 tx_mon_be->stats.tx_mon_stuck++; 52 else if (tx_mon_be->mode && work_done) 53 tx_mon_be->stats.tx_mon_stuck = 0; 54 55 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 56 dp_mon_warn("Tx monitor block got stuck!!!!!"); 57 tx_mon_be->stats.tx_mon_stuck = 0; 58 tx_mon_be->stats.total_tx_mon_stuck++; 59 } 60 61 dp_mon_debug_rl("tx_ppdu_info[%u :D %u] STATUS[R %llu: F %llu] PKT_BUF[R %llu: F %llu : P %llu : S %llu]", 62 tx_mon_be->tx_ppdu_info_list_depth, 63 tx_mon_be->defer_ppdu_info_list_depth, 64 tx_mon_be->stats.status_buf_recv, 65 tx_mon_be->stats.status_buf_free, 66 tx_mon_be->stats.pkt_buf_recv, 67 tx_mon_be->stats.pkt_buf_free, 68 tx_mon_be->stats.pkt_buf_processed, 69 tx_mon_be->stats.pkt_buf_to_stack); 70 } 71 72 #else 73 /* 74 * dp_tx_mon_debug_statu() - API to display tx monitor status 75 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 76 * @work_done - tx monitor work done 77 * 78 * Return: void 79 */ 80 static inline void 81 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 82 uint32_t work_done) 83 { 84 if (tx_mon_be->mode && !work_done) 85 tx_mon_be->stats.tx_mon_stuck++; 86 else if (tx_mon_be->mode && work_done) 87 tx_mon_be->stats.tx_mon_stuck = 0; 88 89 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 90 dp_mon_warn("Tx monitor block got stuck!!!!!"); 91 tx_mon_be->stats.tx_mon_stuck = 0; 92 tx_mon_be->stats.total_tx_mon_stuck++; 93 } 94 } 95 #endif 96 97 static inline uint32_t 98 dp_tx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 99 uint32_t mac_id, uint32_t quota) 100 { 101 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 102 void *tx_mon_dst_ring_desc; 103 hal_soc_handle_t hal_soc; 104 void *mon_dst_srng; 105 struct dp_mon_pdev *mon_pdev; 106 struct dp_mon_pdev_be *mon_pdev_be; 107 uint32_t work_done = 0; 108 struct dp_mon_soc *mon_soc = soc->monitor_soc; 109 struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 110 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 111 struct dp_mon_desc_pool *tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 112 struct dp_tx_mon_desc_list mon_desc_list; 113 uint32_t replenish_cnt = 0; 114 115 if (!pdev) { 116 dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 117 return work_done; 118 } 119 120 mon_pdev = pdev->monitor_pdev; 121 mon_dst_srng = mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng; 122 123 if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { 124 dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK", 125 soc, mon_dst_srng); 126 return work_done; 127 } 128 129 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 130 if (qdf_unlikely(!mon_pdev_be)) 131 return work_done; 132 133 tx_mon_be = &mon_pdev_be->tx_monitor_be; 134 hal_soc = soc->hal_soc; 135 136 qdf_assert((hal_soc && pdev)); 137 138 qdf_spin_lock_bh(&mon_pdev->mon_lock); 139 mon_desc_list.desc_list = NULL; 140 mon_desc_list.tail = NULL; 141 mon_desc_list.tx_mon_reap_cnt = 0; 142 143 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) { 144 dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK", 145 __func__, __LINE__, mon_dst_srng); 146 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 147 return work_done; 148 } 149 150 while (qdf_likely((tx_mon_dst_ring_desc = 151 (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng)) 152 && quota--)) { 153 struct hal_mon_desc hal_mon_tx_desc = {0}; 154 struct dp_mon_desc *mon_desc = NULL; 155 qdf_frag_t status_frag = NULL; 156 uint32_t end_offset = 0; 157 158 hal_be_get_mon_dest_status(soc->hal_soc, 159 tx_mon_dst_ring_desc, 160 &hal_mon_tx_desc); 161 162 if (hal_mon_tx_desc.empty_descriptor) { 163 /* update stats counter */ 164 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d DROP[PPDU:%d MPDU:%d TLV:%d] E_O_PPDU:%d", 165 hal_mon_tx_desc.ppdu_id, 166 hal_mon_tx_desc.initiator, 167 hal_mon_tx_desc.empty_descriptor, 168 hal_mon_tx_desc.ring_id, 169 hal_mon_tx_desc.looping_count, 170 hal_mon_tx_desc.ppdu_drop_count, 171 hal_mon_tx_desc.mpdu_drop_count, 172 hal_mon_tx_desc.tlv_drop_count, 173 hal_mon_tx_desc.end_of_ppdu_dropped); 174 175 tx_mon_be->stats.ppdu_drop_cnt += 176 hal_mon_tx_desc.ppdu_drop_count; 177 tx_mon_be->stats.mpdu_drop_cnt += 178 hal_mon_tx_desc.mpdu_drop_count; 179 tx_mon_be->stats.tlv_drop_cnt += 180 hal_mon_tx_desc.tlv_drop_count; 181 work_done++; 182 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 183 continue; 184 } 185 186 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 187 hal_mon_tx_desc.ppdu_id, 188 hal_mon_tx_desc.initiator, 189 hal_mon_tx_desc.empty_descriptor, 190 hal_mon_tx_desc.ring_id, 191 hal_mon_tx_desc.looping_count, 192 hal_mon_tx_desc.buf_addr, 193 hal_mon_tx_desc.end_offset, 194 hal_mon_tx_desc.end_reason); 195 196 mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_tx_desc.buf_addr); 197 qdf_assert_always(mon_desc); 198 199 if (!mon_desc->unmapped) { 200 qdf_mem_unmap_page(soc->osdev, mon_desc->paddr, 201 DP_MON_DATA_BUFFER_SIZE, 202 QDF_DMA_FROM_DEVICE); 203 mon_desc->unmapped = 1; 204 } 205 206 if (mon_desc->magic != DP_MON_DESC_MAGIC) { 207 dp_mon_err("Invalid monitor descriptor"); 208 qdf_assert_always(0); 209 } 210 211 end_offset = hal_mon_tx_desc.end_offset; 212 213 status_frag = (qdf_frag_t)(mon_desc->buf_addr); 214 mon_desc->buf_addr = NULL; 215 /* increment reap count */ 216 ++mon_desc_list.tx_mon_reap_cnt; 217 218 /* add the mon_desc to free list */ 219 dp_mon_add_to_free_desc_list(&mon_desc_list.desc_list, 220 &mon_desc_list.tail, mon_desc); 221 222 223 if (qdf_unlikely(!status_frag)) { 224 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 225 hal_mon_tx_desc.ppdu_id, 226 hal_mon_tx_desc.initiator, 227 hal_mon_tx_desc.empty_descriptor, 228 hal_mon_tx_desc.ring_id, 229 hal_mon_tx_desc.looping_count, 230 hal_mon_tx_desc.buf_addr, 231 hal_mon_tx_desc.end_offset, 232 hal_mon_tx_desc.end_reason); 233 234 work_done++; 235 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 236 continue; 237 } 238 239 tx_mon_be->stats.status_buf_recv++; 240 241 if ((hal_mon_tx_desc.end_reason == HAL_MON_FLUSH_DETECTED) || 242 (hal_mon_tx_desc.end_reason == HAL_MON_PPDU_TRUNCATED)) { 243 tx_mon_be->be_ppdu_id = hal_mon_tx_desc.ppdu_id; 244 245 dp_tx_mon_update_end_reason(mon_pdev, 246 hal_mon_tx_desc.ppdu_id, 247 hal_mon_tx_desc.end_reason); 248 /* check and free packet buffer from status buffer */ 249 dp_tx_mon_status_free_packet_buf(pdev, status_frag, 250 end_offset, 251 &mon_desc_list); 252 253 tx_mon_be->stats.status_buf_free++; 254 qdf_frag_free(status_frag); 255 256 work_done++; 257 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 258 continue; 259 } 260 261 dp_tx_mon_process_status_tlv(soc, pdev, 262 &hal_mon_tx_desc, 263 status_frag, 264 end_offset, 265 &mon_desc_list); 266 267 work_done++; 268 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 269 } 270 dp_srng_access_end(int_ctx, soc, mon_dst_srng); 271 272 if (mon_desc_list.tx_mon_reap_cnt) { 273 dp_mon_buffers_replenish(soc, &mon_soc_be->tx_mon_buf_ring, 274 tx_mon_desc_pool, 275 mon_desc_list.tx_mon_reap_cnt, 276 &mon_desc_list.desc_list, 277 &mon_desc_list.tail, 278 &replenish_cnt); 279 } 280 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 281 dp_mon_debug("mac_id: %d, work_done:%d tx_monitor_reap_cnt:%d", 282 mac_id, work_done, mon_desc_list.tx_mon_reap_cnt); 283 284 tx_mon_be->stats.total_tx_mon_reap_cnt += mon_desc_list.tx_mon_reap_cnt; 285 tx_mon_be->stats.totat_tx_mon_replenish_cnt += replenish_cnt; 286 dp_tx_mon_debug_status(tx_mon_be, work_done); 287 288 return work_done; 289 } 290 291 uint32_t 292 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 293 uint32_t mac_id, uint32_t quota) 294 { 295 uint32_t work_done; 296 297 work_done = dp_tx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota); 298 299 return work_done; 300 } 301 302 void 303 dp_tx_mon_buf_desc_pool_deinit(struct dp_soc *soc) 304 { 305 struct dp_mon_soc *mon_soc = soc->monitor_soc; 306 struct dp_mon_soc_be *mon_soc_be = 307 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 308 309 dp_mon_desc_pool_deinit(&mon_soc_be->tx_desc_mon); 310 } 311 312 QDF_STATUS 313 dp_tx_mon_buf_desc_pool_init(struct dp_soc *soc) 314 { 315 struct dp_mon_soc *mon_soc = soc->monitor_soc; 316 struct dp_mon_soc_be *mon_soc_be = 317 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 318 uint32_t num_entries; 319 320 num_entries = 321 wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc->wlan_cfg_ctx); 322 323 return dp_mon_desc_pool_init(&mon_soc_be->tx_desc_mon, num_entries); 324 } 325 326 void dp_tx_mon_buf_desc_pool_free(struct dp_soc *soc) 327 { 328 struct dp_mon_soc *mon_soc = soc->monitor_soc; 329 struct dp_mon_soc_be *mon_soc_be = 330 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 331 332 if (mon_soc_be) 333 dp_mon_desc_pool_free(&mon_soc_be->tx_desc_mon); 334 } 335 336 QDF_STATUS 337 dp_tx_mon_buf_desc_pool_alloc(struct dp_soc *soc) 338 { 339 struct dp_srng *mon_buf_ring; 340 struct dp_mon_desc_pool *tx_mon_desc_pool; 341 int entries; 342 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 343 struct dp_mon_soc *mon_soc = soc->monitor_soc; 344 struct dp_mon_soc_be *mon_soc_be = 345 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 346 347 soc_cfg_ctx = soc->wlan_cfg_ctx; 348 349 entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 350 351 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 352 353 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 354 355 qdf_print("%s:%d tx mon buf desc pool entries: %d", __func__, __LINE__, entries); 356 return dp_mon_desc_pool_alloc(entries, tx_mon_desc_pool); 357 } 358 359 void 360 dp_tx_mon_buffers_free(struct dp_soc *soc) 361 { 362 struct dp_mon_desc_pool *tx_mon_desc_pool; 363 struct dp_mon_soc *mon_soc = soc->monitor_soc; 364 struct dp_mon_soc_be *mon_soc_be = 365 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 366 367 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 368 369 dp_mon_pool_frag_unmap_and_free(soc, tx_mon_desc_pool); 370 } 371 372 QDF_STATUS 373 dp_tx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size) 374 { 375 struct dp_srng *mon_buf_ring; 376 struct dp_mon_desc_pool *tx_mon_desc_pool; 377 union dp_mon_desc_list_elem_t *desc_list = NULL; 378 union dp_mon_desc_list_elem_t *tail = NULL; 379 struct dp_mon_soc *mon_soc = soc->monitor_soc; 380 struct dp_mon_soc_be *mon_soc_be = 381 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 382 383 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 384 385 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 386 387 return dp_mon_buffers_replenish(soc, mon_buf_ring, 388 tx_mon_desc_pool, 389 size, 390 &desc_list, &tail, NULL); 391 } 392 393 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE 394 395 /* 396 * dp_tx_mon_nbuf_get_num_frag() - get total number of fragments 397 * @buf: Network buf instance 398 * 399 * Return: number of fragments 400 */ 401 static inline 402 uint32_t dp_tx_mon_nbuf_get_num_frag(qdf_nbuf_t nbuf) 403 { 404 uint32_t num_frag = 0; 405 406 if (qdf_unlikely(!nbuf)) 407 return num_frag; 408 409 num_frag = qdf_nbuf_get_nr_frags_in_fraglist(nbuf); 410 411 return num_frag; 412 } 413 414 /* 415 * dp_tx_mon_free_usr_mpduq() - API to free user mpduq 416 * @tx_ppdu_info - pointer to tx_ppdu_info 417 * @usr_idx - user index 418 * @tx_mon_be - pointer to tx capture be 419 * 420 * Return: void 421 */ 422 void dp_tx_mon_free_usr_mpduq(struct dp_tx_ppdu_info *tx_ppdu_info, 423 uint8_t usr_idx, 424 struct dp_pdev_tx_monitor_be *tx_mon_be) 425 { 426 qdf_nbuf_queue_t *mpdu_q; 427 uint32_t num_frag = 0; 428 qdf_nbuf_t buf = NULL; 429 430 if (qdf_unlikely(!tx_ppdu_info)) 431 return; 432 433 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 434 435 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 436 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 437 qdf_nbuf_free(buf); 438 } 439 tx_mon_be->stats.pkt_buf_free += num_frag; 440 } 441 442 /* 443 * dp_tx_mon_free_ppdu_info() - API to free dp_tx_ppdu_info 444 * @tx_ppdu_info - pointer to tx_ppdu_info 445 * @tx_mon_be - pointer to tx capture be 446 * 447 * Return: void 448 */ 449 void dp_tx_mon_free_ppdu_info(struct dp_tx_ppdu_info *tx_ppdu_info, 450 struct dp_pdev_tx_monitor_be *tx_mon_be) 451 { 452 uint32_t user = 0; 453 454 for (; user < TXMON_PPDU_HAL(tx_ppdu_info, num_users); user++) { 455 qdf_nbuf_queue_t *mpdu_q; 456 uint32_t num_frag = 0; 457 qdf_nbuf_t buf = NULL; 458 459 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user, mpdu_q); 460 461 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 462 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 463 qdf_nbuf_free(buf); 464 } 465 tx_mon_be->stats.pkt_buf_free += num_frag; 466 } 467 468 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 469 qdf_mem_free(tx_ppdu_info); 470 } 471 472 /* 473 * dp_tx_mon_get_ppdu_info() - API to allocate dp_tx_ppdu_info 474 * @pdev - pdev handle 475 * @type - type of ppdu_info data or protection 476 * @num_user - number user in a ppdu_info 477 * @ppdu_id - ppdu_id number 478 * 479 * Return: pointer to dp_tx_ppdu_info 480 */ 481 struct dp_tx_ppdu_info *dp_tx_mon_get_ppdu_info(struct dp_pdev *pdev, 482 enum tx_ppdu_info_type type, 483 uint8_t num_user, 484 uint32_t ppdu_id) 485 { 486 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 487 struct dp_mon_pdev_be *mon_pdev_be = 488 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 489 struct dp_pdev_tx_monitor_be *tx_mon_be = 490 &mon_pdev_be->tx_monitor_be; 491 struct dp_tx_ppdu_info *tx_ppdu_info; 492 size_t sz_ppdu_info = 0; 493 uint8_t i; 494 495 /* allocate new tx_ppdu_info */ 496 sz_ppdu_info = (sizeof(struct dp_tx_ppdu_info) + 497 (sizeof(struct mon_rx_user_status) * num_user)); 498 499 tx_ppdu_info = (struct dp_tx_ppdu_info *)qdf_mem_malloc(sz_ppdu_info); 500 if (!tx_ppdu_info) { 501 dp_mon_err("allocation of tx_ppdu_info type[%d] failed!!!", 502 type); 503 return NULL; 504 } 505 506 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 507 TXMON_PPDU_HAL(tx_ppdu_info, num_users) = num_user; 508 TXMON_PPDU_HAL(tx_ppdu_info, ppdu_id) = ppdu_id; 509 TXMON_PPDU(tx_ppdu_info, ppdu_id) = ppdu_id; 510 511 for (i = 0; i < num_user; i++) { 512 qdf_nbuf_queue_t *mpdu_q; 513 514 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, i, mpdu_q); 515 qdf_nbuf_queue_init(mpdu_q); 516 } 517 518 /* assign tx_ppdu_info to monitor pdev for reference */ 519 if (type == TX_PROT_PPDU_INFO) { 520 qdf_mem_zero(&tx_mon_be->prot_status_info, sizeof(struct hal_tx_status_info)); 521 tx_mon_be->tx_prot_ppdu_info = tx_ppdu_info; 522 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 0; 523 } else { 524 qdf_mem_zero(&tx_mon_be->data_status_info, sizeof(struct hal_tx_status_info)); 525 tx_mon_be->tx_data_ppdu_info = tx_ppdu_info; 526 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 1; 527 } 528 529 return tx_ppdu_info; 530 } 531 532 /* 533 * dp_print_pdev_tx_monitor_stats_2_0: print tx capture stats 534 * @pdev: DP PDEV handle 535 * 536 * return: void 537 */ 538 void dp_print_pdev_tx_monitor_stats_2_0(struct dp_pdev *pdev) 539 { 540 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 541 struct dp_mon_pdev_be *mon_pdev_be = 542 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 543 struct dp_pdev_tx_monitor_be *tx_mon_be = 544 &mon_pdev_be->tx_monitor_be; 545 struct dp_tx_monitor_drop_stats stats = {0}; 546 547 qdf_mem_copy(&stats, &tx_mon_be->stats, 548 sizeof(struct dp_tx_monitor_drop_stats)); 549 550 /* TX monitor stats needed for beryllium */ 551 DP_PRINT_STATS("\n\tTX Capture BE stats mode[%d]:", tx_mon_be->mode); 552 DP_PRINT_STATS("\tbuffer pending : %u", tx_mon_be->last_frag_q_idx); 553 DP_PRINT_STATS("\treplenish count: %llu", 554 stats.totat_tx_mon_replenish_cnt); 555 DP_PRINT_STATS("\treap count : %llu", stats.total_tx_mon_reap_cnt); 556 DP_PRINT_STATS("\tmonitor stuck : %u", stats.total_tx_mon_stuck); 557 DP_PRINT_STATS("\tStatus buffer"); 558 DP_PRINT_STATS("\t\treceived : %llu", stats.status_buf_recv); 559 DP_PRINT_STATS("\t\tfree : %llu", stats.status_buf_free); 560 DP_PRINT_STATS("\tPacket buffer"); 561 DP_PRINT_STATS("\t\treceived : %llu", stats.pkt_buf_recv); 562 DP_PRINT_STATS("\t\tfree : %llu", stats.pkt_buf_free); 563 DP_PRINT_STATS("\t\tprocessed : %llu", stats.pkt_buf_processed); 564 DP_PRINT_STATS("\t\tto stack : %llu", stats.pkt_buf_to_stack); 565 DP_PRINT_STATS("\tppdu info"); 566 DP_PRINT_STATS("\t\tthreshold : %llu", stats.ppdu_info_drop_th); 567 DP_PRINT_STATS("\t\tflush : %llu", stats.ppdu_info_drop_flush); 568 DP_PRINT_STATS("\t\ttruncated : %llu", stats.ppdu_info_drop_trunc); 569 DP_PRINT_STATS("\tDrop stats"); 570 DP_PRINT_STATS("\t\tppdu drop : %llu", stats.ppdu_drop_cnt); 571 DP_PRINT_STATS("\t\tmpdu drop : %llu", stats.mpdu_drop_cnt); 572 DP_PRINT_STATS("\t\ttlv drop : %llu", stats.tlv_drop_cnt); 573 } 574 575 /* 576 * dp_config_enh_tx_monitor_2_0()- API to enable/disable enhanced tx capture 577 * @pdev_handle: DP_PDEV handle 578 * @val: user provided value 579 * 580 * Return: QDF_STATUS 581 */ 582 QDF_STATUS 583 dp_config_enh_tx_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 584 { 585 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 586 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 587 struct dp_mon_pdev_be *mon_pdev_be = 588 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 589 struct dp_pdev_tx_monitor_be *tx_mon_be = 590 &mon_pdev_be->tx_monitor_be; 591 struct dp_soc *soc = pdev->soc; 592 uint16_t num_of_buffers; 593 QDF_STATUS status; 594 595 soc_cfg_ctx = soc->wlan_cfg_ctx; 596 switch (val) { 597 case TX_MON_BE_DISABLE: 598 { 599 tx_mon_be->mode = TX_MON_BE_DISABLE; 600 mon_pdev_be->tx_mon_mode = 0; 601 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 602 break; 603 } 604 case TX_MON_BE_FULL_CAPTURE: 605 { 606 num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 607 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 608 num_of_buffers); 609 if (status != QDF_STATUS_SUCCESS) { 610 dp_mon_err("Tx monitor buffer allocation failed"); 611 return status; 612 } 613 qdf_mem_zero(&tx_mon_be->stats, 614 sizeof(struct dp_tx_monitor_drop_stats)); 615 tx_mon_be->last_tsft = 0; 616 tx_mon_be->last_ppdu_timestamp = 0; 617 tx_mon_be->mode = TX_MON_BE_FULL_CAPTURE; 618 mon_pdev_be->tx_mon_mode = 1; 619 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 620 break; 621 } 622 case TX_MON_BE_PEER_FILTER: 623 { 624 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 625 DP_MON_RING_FILL_LEVEL_DEFAULT); 626 if (status != QDF_STATUS_SUCCESS) { 627 dp_mon_err("Tx monitor buffer allocation failed"); 628 return status; 629 } 630 tx_mon_be->mode = TX_MON_BE_PEER_FILTER; 631 mon_pdev_be->tx_mon_mode = 2; 632 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_256B; 633 break; 634 } 635 default: 636 { 637 return QDF_STATUS_E_INVAL; 638 } 639 } 640 641 dp_mon_info("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 642 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 643 mon_pdev_be->tx_mon_filter_length); 644 645 dp_mon_filter_setup_tx_mon_mode(pdev); 646 dp_tx_mon_filter_update(pdev); 647 648 return QDF_STATUS_SUCCESS; 649 } 650 651 /* 652 * dp_peer_set_tx_capture_enabled_2_0() - add tx monitor peer filter 653 * @pdev: Datapath PDEV handle 654 * @peer: Datapath PEER handle 655 * @is_tx_pkt_cap_enable: flag for tx capture enable/disable 656 * @peer_mac: peer mac address 657 * 658 * Return: status 659 */ 660 QDF_STATUS dp_peer_set_tx_capture_enabled_2_0(struct dp_pdev *pdev_handle, 661 struct dp_peer *peer_handle, 662 uint8_t is_tx_pkt_cap_enable, 663 uint8_t *peer_mac) 664 { 665 return QDF_STATUS_SUCCESS; 666 } 667 668 #ifdef QCA_SUPPORT_LITE_MONITOR 669 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 670 struct dp_mon_pdev_be *mon_pdev_be) 671 { 672 struct dp_lite_mon_config *config; 673 struct dp_vdev *lite_mon_vdev; 674 675 config = &mon_pdev_be->lite_mon_tx_config->tx_config; 676 lite_mon_vdev = config->lite_mon_vdev; 677 678 if (lite_mon_vdev) 679 tx_cap_info->osif_vdev = lite_mon_vdev->osif_vdev; 680 } 681 682 /** 683 * dp_lite_mon_filter_ppdu() - Filter frames at ppdu level 684 * @mpdu_count: mpdu count in the nbuf queue 685 * @level: Lite monitor filter level 686 * 687 * Return: QDF_STATUS 688 */ 689 static inline QDF_STATUS 690 dp_lite_mon_filter_ppdu(uint8_t mpdu_count, uint8_t level) 691 { 692 if (level == CDP_LITE_MON_LEVEL_PPDU && mpdu_count > 1) 693 return QDF_STATUS_E_CANCELED; 694 695 return QDF_STATUS_SUCCESS; 696 } 697 698 /** 699 * dp_lite_mon_filter_subtype() - filter frames with subtype 700 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 701 * @config: Lite monitor configuration 702 * 703 * Return: QDF_STATUS 704 */ 705 static inline QDF_STATUS 706 dp_lite_mon_filter_subtype(struct dp_tx_ppdu_info *tx_ppdu_info, 707 struct dp_lite_mon_tx_config *config, qdf_nbuf_t buf) 708 { 709 uint16_t mgmt_filter, ctrl_filter, data_filter, type, subtype; 710 struct ieee80211_frame_min_one *wh; 711 uint8_t is_mcast = 0; 712 qdf_nbuf_t nbuf; 713 714 /* Return here if subtype filtering is not required */ 715 if (!config->subtype_filtering) 716 return QDF_STATUS_SUCCESS; 717 718 mgmt_filter = config->tx_config.mgmt_filter[DP_MON_FRM_FILTER_MODE_FP]; 719 ctrl_filter = config->tx_config.ctrl_filter[DP_MON_FRM_FILTER_MODE_FP]; 720 data_filter = config->tx_config.data_filter[DP_MON_FRM_FILTER_MODE_FP]; 721 722 if (dp_tx_mon_nbuf_get_num_frag(buf)) { 723 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_get_frag_addr(buf, 0); 724 } else { 725 nbuf = qdf_nbuf_get_ext_list(buf); 726 if (nbuf) 727 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_data(nbuf); 728 else 729 return QDF_STATUS_E_INVAL; 730 } 731 732 type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK); 733 subtype = ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 734 IEEE80211_FC0_SUBTYPE_SHIFT); 735 736 switch (type) { 737 case IEEE80211_FC0_TYPE_MGT: 738 if (mgmt_filter >> subtype & 0x1) 739 return QDF_STATUS_SUCCESS; 740 else 741 return QDF_STATUS_E_ABORTED; 742 case IEEE80211_FC0_TYPE_CTL: 743 if (ctrl_filter >> subtype & 0x1) 744 return QDF_STATUS_SUCCESS; 745 else 746 return QDF_STATUS_E_ABORTED; 747 case IEEE80211_FC0_TYPE_DATA: 748 is_mcast = DP_FRAME_IS_MULTICAST(wh->i_addr1); 749 if ((is_mcast && (data_filter & FILTER_DATA_MCAST)) || 750 (!is_mcast && (data_filter & FILTER_DATA_UCAST))) 751 return QDF_STATUS_SUCCESS; 752 return QDF_STATUS_E_ABORTED; 753 default: 754 return QDF_STATUS_E_INVAL; 755 } 756 } 757 758 /** 759 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 760 * @pdev: Pointer to physical device 761 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 762 * @buf: qdf nbuf structure of buffer 763 * @mpdu_count: mpdu count in the nbuf queue 764 * 765 * Return: QDF_STATUS 766 */ 767 static inline QDF_STATUS 768 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 769 struct dp_tx_ppdu_info *tx_ppdu_info, 770 qdf_nbuf_t buf, int mpdu_count) 771 { 772 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 773 struct dp_mon_pdev_be *mon_pdev_be = 774 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 775 struct dp_lite_mon_tx_config *config = 776 mon_pdev_be->lite_mon_tx_config; 777 QDF_STATUS ret; 778 779 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) 780 return QDF_STATUS_SUCCESS; 781 782 /* PPDU level filtering */ 783 ret = dp_lite_mon_filter_ppdu(mpdu_count, config->tx_config.level); 784 if (ret) 785 return ret; 786 787 /* Subtype filtering */ 788 ret = dp_lite_mon_filter_subtype(tx_ppdu_info, config, buf); 789 if (ret) 790 return ret; 791 792 return QDF_STATUS_SUCCESS; 793 } 794 795 #else 796 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 797 struct dp_mon_pdev_be *mon_pdev_be) 798 { 799 } 800 801 /** 802 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 803 * @pdev: Pointer to physical device 804 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 805 * @buf: qdf nbuf structure of buffer 806 * @mpdu_count: mpdu count in the nbuf queue 807 * 808 * Return: QDF_STATUS 809 */ 810 static inline QDF_STATUS 811 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 812 struct dp_tx_ppdu_info *tx_ppdu_info, 813 qdf_nbuf_t buf, int mpdu_count) 814 { 815 return QDF_STATUS_SUCCESS; 816 } 817 #endif 818 819 /** 820 * dp_tx_mon_send_to_stack() - API to send to stack 821 * @pdev: pdev Handle 822 * @mpdu: pointer to mpdu 823 * @num_frag: number of frag in mpdu 824 * @ppdu_id: ppdu id of the mpdu 825 * 826 * Return: void 827 */ 828 static void 829 dp_tx_mon_send_to_stack(struct dp_pdev *pdev, qdf_nbuf_t mpdu, 830 uint32_t num_frag, uint32_t ppdu_id) 831 { 832 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 833 struct dp_mon_pdev_be *mon_pdev_be = 834 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 835 struct dp_pdev_tx_monitor_be *tx_mon_be = 836 &mon_pdev_be->tx_monitor_be; 837 struct cdp_tx_indication_info tx_capture_info = {0}; 838 839 tx_mon_be->stats.pkt_buf_to_stack += num_frag; 840 841 tx_capture_info.radiotap_done = 1; 842 tx_capture_info.mpdu_nbuf = mpdu; 843 tx_capture_info.mpdu_info.ppdu_id = ppdu_id; 844 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) { 845 dp_wdi_event_handler(WDI_EVENT_TX_PKT_CAPTURE, 846 pdev->soc, 847 &tx_capture_info, 848 HTT_INVALID_PEER, 849 WDI_NO_VAL, 850 pdev->pdev_id); 851 } else { 852 dp_fill_lite_mon_vdev(&tx_capture_info, mon_pdev_be); 853 dp_wdi_event_handler(WDI_EVENT_LITE_MON_TX, 854 pdev->soc, 855 &tx_capture_info, 856 HTT_INVALID_PEER, 857 WDI_NO_VAL, 858 pdev->pdev_id); 859 } 860 if (tx_capture_info.mpdu_nbuf) 861 qdf_nbuf_free(tx_capture_info.mpdu_nbuf); 862 } 863 864 /** 865 * dp_tx_mon_send_per_usr_mpdu() - API to send per usr mpdu to stack 866 * @pdev: pdev Handle 867 * @ppdu_info: pointer to dp_tx_ppdu_info 868 * @user_id: current user index 869 * 870 * Return: void 871 */ 872 static void 873 dp_tx_mon_send_per_usr_mpdu(struct dp_pdev *pdev, 874 struct dp_tx_ppdu_info *ppdu_info, 875 uint8_t user_idx) 876 { 877 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 878 qdf_nbuf_t buf = NULL; 879 uint8_t mpdu_count = 0; 880 881 usr_mpdu_q = &TXMON_PPDU_USR(ppdu_info, user_idx, mpdu_q); 882 883 while ((buf = qdf_nbuf_queue_remove(usr_mpdu_q)) != NULL) { 884 uint32_t num_frag = dp_tx_mon_nbuf_get_num_frag(buf); 885 886 ppdu_info->hal_txmon.rx_status.rx_user_status = 887 &ppdu_info->hal_txmon.rx_user_status[user_idx]; 888 889 if (dp_tx_lite_mon_filtering(pdev, ppdu_info, buf, 890 ++mpdu_count)) { 891 qdf_nbuf_free(buf); 892 continue; 893 } 894 895 qdf_nbuf_update_radiotap(&ppdu_info->hal_txmon.rx_status, 896 buf, qdf_nbuf_headroom(buf)); 897 898 dp_tx_mon_send_to_stack(pdev, buf, num_frag, 899 TXMON_PPDU(ppdu_info, ppdu_id)); 900 } 901 } 902 903 #define PHY_MEDIUM_MHZ 960 904 #define PHY_TIMESTAMP_WRAP (0xFFFFFFFF / PHY_MEDIUM_MHZ) 905 906 /** 907 * dp_populate_tsft_from_phy_timestamp() - API to get tsft from phy timestamp 908 * @pdev: pdev Handle 909 * @ppdu_info: ppdi_info Handle 910 * 911 * Return: QDF_STATUS 912 */ 913 static QDF_STATUS 914 dp_populate_tsft_from_phy_timestamp(struct dp_pdev *pdev, 915 struct dp_tx_ppdu_info *ppdu_info) 916 { 917 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 918 struct dp_mon_pdev_be *mon_pdev_be = 919 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 920 struct dp_pdev_tx_monitor_be *tx_mon_be = 921 &mon_pdev_be->tx_monitor_be; 922 uint64_t tsft = 0; 923 uint32_t ppdu_timestamp = 0; 924 925 tsft = TXMON_PPDU_COM(ppdu_info, tsft); 926 ppdu_timestamp = TXMON_PPDU_COM(ppdu_info, ppdu_timestamp); 927 928 if (tsft && ppdu_timestamp) { 929 /* update tsft and ppdu timestamp */ 930 tx_mon_be->last_tsft = tsft; 931 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 932 } else if (!tx_mon_be->last_ppdu_timestamp || !tx_mon_be->last_tsft) { 933 return QDF_STATUS_E_EMPTY; 934 } 935 936 if (!tsft && ppdu_timestamp) { 937 /* response window */ 938 uint32_t cur_usec = ppdu_timestamp / PHY_MEDIUM_MHZ; 939 uint32_t last_usec = (tx_mon_be->last_ppdu_timestamp / 940 PHY_MEDIUM_MHZ); 941 uint32_t diff = 0; 942 943 if (last_usec < cur_usec) { 944 diff = cur_usec - last_usec; 945 tsft = tx_mon_be->last_tsft + diff; 946 } else { 947 diff = (PHY_TIMESTAMP_WRAP - last_usec) + cur_usec; 948 tsft = tx_mon_be->last_tsft + diff; 949 } 950 TXMON_PPDU_COM(ppdu_info, tsft) = tsft; 951 /* update tsft and ppdu timestamp */ 952 tx_mon_be->last_tsft = tsft; 953 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 954 } 955 956 if (!TXMON_PPDU_COM(ppdu_info, tsft) && 957 !TXMON_PPDU_COM(ppdu_info, ppdu_timestamp)) 958 return QDF_STATUS_E_EMPTY; 959 960 return QDF_STATUS_SUCCESS; 961 } 962 963 /** 964 * dp_tx_mon_update_radiotap() - API to update radiotap information 965 * @pdev: pdev Handle 966 * @ppdu_info: pointer to dp_tx_ppdu_info 967 * 968 * Return: void 969 */ 970 static void 971 dp_tx_mon_update_radiotap(struct dp_pdev *pdev, 972 struct dp_tx_ppdu_info *ppdu_info) 973 { 974 uint32_t usr_idx = 0; 975 uint32_t num_users = 0; 976 977 num_users = TXMON_PPDU_HAL(ppdu_info, num_users); 978 979 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_num) == 0)) 980 TXMON_PPDU_COM(ppdu_info, chan_num) = 981 pdev->operating_channel.num; 982 983 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_freq) == 0)) 984 TXMON_PPDU_COM(ppdu_info, chan_freq) = 985 pdev->operating_channel.freq; 986 987 if (QDF_STATUS_SUCCESS != 988 dp_populate_tsft_from_phy_timestamp(pdev, ppdu_info)) 989 return; 990 991 for (usr_idx = 0; usr_idx < num_users; usr_idx++) { 992 qdf_nbuf_queue_t *mpdu_q = NULL; 993 994 /* set AMPDU flag if number mpdu is more than 1 */ 995 mpdu_q = &TXMON_PPDU_USR(ppdu_info, usr_idx, mpdu_q); 996 if (mpdu_q && (qdf_nbuf_queue_len(mpdu_q) > 1)) { 997 TXMON_PPDU_COM(ppdu_info, 998 rs_flags) |= IEEE80211_AMPDU_FLAG; 999 TXMON_PPDU_USR(ppdu_info, usr_idx, is_ampdu) = 1; 1000 } 1001 1002 if (qdf_unlikely(!TXMON_PPDU_COM(ppdu_info, rate))) { 1003 uint32_t rate = 0; 1004 uint32_t rix = 0; 1005 uint16_t ratecode = 0; 1006 1007 rate = dp_getrateindex(TXMON_PPDU_COM(ppdu_info, sgi), 1008 TXMON_PPDU_USR(ppdu_info, 1009 usr_idx, mcs), 1010 TXMON_PPDU_COM(ppdu_info, nss), 1011 TXMON_PPDU_COM(ppdu_info, 1012 preamble_type), 1013 TXMON_PPDU_COM(ppdu_info, bw), 1014 0, 1015 &rix, &ratecode); 1016 1017 /* update rate */ 1018 TXMON_PPDU_COM(ppdu_info, rate) = rate; 1019 } 1020 1021 dp_tx_mon_send_per_usr_mpdu(pdev, ppdu_info, usr_idx); 1022 } 1023 } 1024 1025 /** 1026 * dp_tx_mon_ppdu_process - Deferred PPDU stats handler 1027 * @context: Opaque work context (PDEV) 1028 * 1029 * Return: none 1030 */ 1031 void dp_tx_mon_ppdu_process(void *context) 1032 { 1033 struct dp_pdev *pdev = (struct dp_pdev *)context; 1034 struct dp_mon_pdev *mon_pdev; 1035 struct dp_mon_pdev_be *mon_pdev_be; 1036 struct dp_tx_ppdu_info *defer_ppdu_info = NULL; 1037 struct dp_tx_ppdu_info *defer_ppdu_info_next = NULL; 1038 struct dp_pdev_tx_monitor_be *tx_mon_be; 1039 1040 /* sanity check */ 1041 if (qdf_unlikely(!pdev)) 1042 return; 1043 1044 mon_pdev = pdev->monitor_pdev; 1045 1046 if (qdf_unlikely(!mon_pdev)) 1047 return; 1048 1049 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1050 if (qdf_unlikely(!mon_pdev_be)) 1051 return; 1052 1053 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1054 if (qdf_unlikely(TX_MON_BE_DISABLE == tx_mon_be->mode && 1055 !dp_lite_mon_is_tx_enabled(mon_pdev))) 1056 return; 1057 1058 /* take lock here */ 1059 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1060 STAILQ_CONCAT(&tx_mon_be->defer_tx_ppdu_info_queue, 1061 &tx_mon_be->tx_ppdu_info_queue); 1062 tx_mon_be->defer_ppdu_info_list_depth += 1063 tx_mon_be->tx_ppdu_info_list_depth; 1064 tx_mon_be->tx_ppdu_info_list_depth = 0; 1065 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1066 1067 STAILQ_FOREACH_SAFE(defer_ppdu_info, 1068 &tx_mon_be->defer_tx_ppdu_info_queue, 1069 tx_ppdu_info_queue_elem, defer_ppdu_info_next) { 1070 /* remove dp_tx_ppdu_info from the list */ 1071 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1072 defer_ppdu_info, 1073 dp_tx_ppdu_info, 1074 tx_ppdu_info_queue_elem); 1075 tx_mon_be->defer_ppdu_info_list_depth--; 1076 1077 dp_tx_mon_update_radiotap(pdev, defer_ppdu_info); 1078 1079 /* free the ppdu_info */ 1080 dp_tx_mon_free_ppdu_info(defer_ppdu_info, tx_mon_be); 1081 defer_ppdu_info = NULL; 1082 } 1083 } 1084 1085 /** 1086 * dp_tx_ppdu_stats_attach_2_0 - Initialize Tx PPDU stats and enhanced capture 1087 * @pdev: DP PDEV 1088 * 1089 * Return: none 1090 */ 1091 void dp_tx_ppdu_stats_attach_2_0(struct dp_pdev *pdev) 1092 { 1093 struct dp_mon_pdev *mon_pdev; 1094 struct dp_mon_pdev_be *mon_pdev_be; 1095 struct dp_pdev_tx_monitor_be *tx_mon_be; 1096 1097 if (qdf_unlikely(!pdev)) 1098 return; 1099 1100 mon_pdev = pdev->monitor_pdev; 1101 1102 if (qdf_unlikely(!mon_pdev)) 1103 return; 1104 1105 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1106 if (qdf_unlikely(!mon_pdev_be)) 1107 return; 1108 1109 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1110 1111 STAILQ_INIT(&tx_mon_be->tx_ppdu_info_queue); 1112 tx_mon_be->tx_ppdu_info_list_depth = 0; 1113 1114 STAILQ_INIT(&tx_mon_be->defer_tx_ppdu_info_queue); 1115 tx_mon_be->defer_ppdu_info_list_depth = 0; 1116 1117 qdf_spinlock_create(&tx_mon_be->tx_mon_list_lock); 1118 /* Work queue setup for TX MONITOR post handling */ 1119 qdf_create_work(0, &tx_mon_be->post_ppdu_work, 1120 dp_tx_mon_ppdu_process, pdev); 1121 1122 tx_mon_be->post_ppdu_workqueue = 1123 qdf_alloc_unbound_workqueue("tx_mon_ppdu_work_queue"); 1124 } 1125 1126 /** 1127 * dp_tx_ppdu_stats_detach_be - Cleanup Tx PPDU stats and enhanced capture 1128 * @pdev: DP PDEV 1129 * 1130 * Return: none 1131 */ 1132 void dp_tx_ppdu_stats_detach_2_0(struct dp_pdev *pdev) 1133 { 1134 struct dp_mon_pdev *mon_pdev; 1135 struct dp_mon_pdev_be *mon_pdev_be; 1136 struct dp_pdev_tx_monitor_be *tx_mon_be; 1137 struct dp_tx_ppdu_info *tx_ppdu_info = NULL; 1138 struct dp_tx_ppdu_info *tx_ppdu_info_next = NULL; 1139 1140 if (qdf_unlikely(!pdev)) 1141 return; 1142 1143 mon_pdev = pdev->monitor_pdev; 1144 1145 if (qdf_unlikely(!mon_pdev)) 1146 return; 1147 1148 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1149 if (qdf_unlikely(!mon_pdev_be)) 1150 return; 1151 1152 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1153 /* TODO: disable tx_monitor, to avoid further packet from HW */ 1154 dp_monitor_config_enh_tx_capture(pdev, TX_MON_BE_DISABLE); 1155 1156 /* flush workqueue */ 1157 qdf_flush_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1158 qdf_destroy_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1159 1160 /* 1161 * TODO: iterate both tx_ppdu_info and defer_ppdu_info_list 1162 * free the tx_ppdu_info and decrement depth 1163 */ 1164 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1165 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1166 &tx_mon_be->tx_ppdu_info_queue, 1167 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1168 /* remove dp_tx_ppdu_info from the list */ 1169 STAILQ_REMOVE(&tx_mon_be->tx_ppdu_info_queue, tx_ppdu_info, 1170 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1171 /* decrement list length */ 1172 tx_mon_be->tx_ppdu_info_list_depth--; 1173 /* free tx_ppdu_info */ 1174 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1175 } 1176 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1177 1178 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1179 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1180 &tx_mon_be->defer_tx_ppdu_info_queue, 1181 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1182 /* remove dp_tx_ppdu_info from the list */ 1183 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1184 tx_ppdu_info, 1185 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1186 /* decrement list length */ 1187 tx_mon_be->defer_ppdu_info_list_depth--; 1188 /* free tx_ppdu_info */ 1189 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1190 } 1191 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1192 1193 qdf_spinlock_destroy(&tx_mon_be->tx_mon_list_lock); 1194 } 1195 #endif /* WLAN_TX_PKT_CAPTURE_ENH_BE */ 1196 1197 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE)) 1198 /* 1199 * dp_config_enh_tx_core_monitor_2_0()- API to validate core framework 1200 * @pdev_handle: DP_PDEV handle 1201 * @val: user provided value 1202 * 1203 * Return: QDF_STATUS 1204 */ 1205 QDF_STATUS 1206 dp_config_enh_tx_core_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 1207 { 1208 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1209 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1210 struct dp_mon_pdev_be *mon_pdev_be = 1211 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1212 struct dp_pdev_tx_monitor_be *tx_mon_be = 1213 &mon_pdev_be->tx_monitor_be; 1214 struct dp_soc *soc = pdev->soc; 1215 uint16_t num_of_buffers; 1216 QDF_STATUS status; 1217 1218 soc_cfg_ctx = soc->wlan_cfg_ctx; 1219 switch (val) { 1220 case TX_MON_BE_FRM_WRK_DISABLE: 1221 { 1222 tx_mon_be->mode = val; 1223 mon_pdev_be->tx_mon_mode = 0; 1224 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 1225 break; 1226 } 1227 case TX_MON_BE_FRM_WRK_FULL_CAPTURE: 1228 { 1229 num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 1230 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 1231 num_of_buffers); 1232 if (status != QDF_STATUS_SUCCESS) { 1233 dp_mon_err("Tx monitor buffer allocation failed"); 1234 return status; 1235 } 1236 tx_mon_be->mode = val; 1237 qdf_mem_zero(&tx_mon_be->stats, 1238 sizeof(struct dp_tx_monitor_drop_stats)); 1239 tx_mon_be->mode = val; 1240 mon_pdev_be->tx_mon_mode = 1; 1241 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 1242 break; 1243 } 1244 case TX_MON_BE_FRM_WRK_128B_CAPTURE: 1245 { 1246 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 1247 DP_MON_RING_FILL_LEVEL_DEFAULT); 1248 if (status != QDF_STATUS_SUCCESS) { 1249 dp_mon_err("Tx monitor buffer allocation failed"); 1250 return status; 1251 } 1252 tx_mon_be->mode = val; 1253 mon_pdev_be->tx_mon_mode = 1; 1254 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_128B; 1255 break; 1256 } 1257 default: 1258 { 1259 return QDF_STATUS_E_INVAL; 1260 } 1261 } 1262 1263 dp_mon_debug("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 1264 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 1265 mon_pdev_be->tx_mon_filter_length); 1266 1267 /* send HTT msg to configure TLV based on mode */ 1268 dp_mon_filter_setup_tx_mon_mode(pdev); 1269 dp_tx_mon_filter_update(pdev); 1270 1271 return QDF_STATUS_SUCCESS; 1272 } 1273 #endif 1274