1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hal_be_hw_headers.h" 19 #include "dp_types.h" 20 #include "hal_be_tx.h" 21 #include "hal_api.h" 22 #include "qdf_trace.h" 23 #include "hal_be_api_mon.h" 24 #include "dp_internal.h" 25 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 26 #include "dp_mon.h" 27 #include <dp_mon_2.0.h> 28 #include <dp_tx_mon_2.0.h> 29 #include <dp_be.h> 30 #include <hal_be_api_mon.h> 31 #include <dp_mon_filter_2.0.h> 32 #ifdef FEATURE_PERPKT_INFO 33 #include "dp_ratetable.h" 34 #endif 35 36 #define MAX_TX_MONITOR_STUCK 50 37 38 #ifdef TXMON_DEBUG 39 /* 40 * dp_tx_mon_debug_statu() - API to display tx monitor status 41 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 42 * @work_done - tx monitor work done 43 * 44 * Return: void 45 */ 46 static inline void 47 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 48 uint32_t work_done) 49 { 50 if (tx_mon_be->mode && !work_done) 51 tx_mon_be->stats.tx_mon_stuck++; 52 else if (tx_mon_be->mode && work_done) 53 tx_mon_be->stats.tx_mon_stuck = 0; 54 55 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 56 dp_mon_warn("Tx monitor block got stuck!!!!!"); 57 tx_mon_be->stats.tx_mon_stuck = 0; 58 tx_mon_be->stats.total_tx_mon_stuck++; 59 } 60 61 dp_mon_debug_rl("tx_ppdu_info[%u :D %u] STATUS[R %llu: F %llu] PKT_BUF[R %llu: F %llu : P %llu : S %llu]", 62 tx_mon_be->tx_ppdu_info_list_depth, 63 tx_mon_be->defer_ppdu_info_list_depth, 64 tx_mon_be->stats.status_buf_recv, 65 tx_mon_be->stats.status_buf_free, 66 tx_mon_be->stats.pkt_buf_recv, 67 tx_mon_be->stats.pkt_buf_free, 68 tx_mon_be->stats.pkt_buf_processed, 69 tx_mon_be->stats.pkt_buf_to_stack); 70 } 71 72 #else 73 /* 74 * dp_tx_mon_debug_statu() - API to display tx monitor status 75 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 76 * @work_done - tx monitor work done 77 * 78 * Return: void 79 */ 80 static inline void 81 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 82 uint32_t work_done) 83 { 84 if (tx_mon_be->mode && !work_done) 85 tx_mon_be->stats.tx_mon_stuck++; 86 else if (tx_mon_be->mode && work_done) 87 tx_mon_be->stats.tx_mon_stuck = 0; 88 89 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 90 dp_mon_warn("Tx monitor block got stuck!!!!!"); 91 tx_mon_be->stats.tx_mon_stuck = 0; 92 tx_mon_be->stats.total_tx_mon_stuck++; 93 } 94 } 95 #endif 96 97 static inline uint32_t 98 dp_tx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 99 uint32_t mac_id, uint32_t quota) 100 { 101 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 102 void *tx_mon_dst_ring_desc; 103 hal_soc_handle_t hal_soc; 104 void *mon_dst_srng; 105 struct dp_mon_pdev *mon_pdev; 106 struct dp_mon_pdev_be *mon_pdev_be; 107 uint32_t work_done = 0; 108 struct dp_mon_soc *mon_soc = soc->monitor_soc; 109 struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 110 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 111 struct dp_mon_desc_pool *tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 112 struct dp_tx_mon_desc_list mon_desc_list; 113 uint32_t replenish_cnt = 0; 114 115 if (!pdev) { 116 dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 117 return work_done; 118 } 119 120 mon_pdev = pdev->monitor_pdev; 121 mon_dst_srng = mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng; 122 123 if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { 124 dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK", 125 soc, mon_dst_srng); 126 return work_done; 127 } 128 129 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 130 if (qdf_unlikely(!mon_pdev_be)) 131 return work_done; 132 133 tx_mon_be = &mon_pdev_be->tx_monitor_be; 134 hal_soc = soc->hal_soc; 135 136 qdf_assert((hal_soc && pdev)); 137 138 qdf_spin_lock_bh(&mon_pdev->mon_lock); 139 mon_desc_list.desc_list = NULL; 140 mon_desc_list.tail = NULL; 141 mon_desc_list.tx_mon_reap_cnt = 0; 142 143 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) { 144 dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK", 145 __func__, __LINE__, mon_dst_srng); 146 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 147 return work_done; 148 } 149 150 while (qdf_likely((tx_mon_dst_ring_desc = 151 (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng)) 152 && quota--)) { 153 struct hal_mon_desc hal_mon_tx_desc = {0}; 154 struct dp_mon_desc *mon_desc = NULL; 155 qdf_frag_t status_frag = NULL; 156 uint32_t end_offset = 0; 157 158 hal_be_get_mon_dest_status(soc->hal_soc, 159 tx_mon_dst_ring_desc, 160 &hal_mon_tx_desc); 161 162 if (hal_mon_tx_desc.empty_descriptor) { 163 /* update stats counter */ 164 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d DROP[PPDU:%d MPDU:%d TLV:%d] E_O_PPDU:%d", 165 hal_mon_tx_desc.ppdu_id, 166 hal_mon_tx_desc.initiator, 167 hal_mon_tx_desc.empty_descriptor, 168 hal_mon_tx_desc.ring_id, 169 hal_mon_tx_desc.looping_count, 170 hal_mon_tx_desc.ppdu_drop_count, 171 hal_mon_tx_desc.mpdu_drop_count, 172 hal_mon_tx_desc.tlv_drop_count, 173 hal_mon_tx_desc.end_of_ppdu_dropped); 174 175 tx_mon_be->stats.ppdu_drop_cnt += 176 hal_mon_tx_desc.ppdu_drop_count; 177 tx_mon_be->stats.mpdu_drop_cnt += 178 hal_mon_tx_desc.mpdu_drop_count; 179 tx_mon_be->stats.tlv_drop_cnt += 180 hal_mon_tx_desc.tlv_drop_count; 181 work_done++; 182 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 183 continue; 184 } 185 186 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 187 hal_mon_tx_desc.ppdu_id, 188 hal_mon_tx_desc.initiator, 189 hal_mon_tx_desc.empty_descriptor, 190 hal_mon_tx_desc.ring_id, 191 hal_mon_tx_desc.looping_count, 192 hal_mon_tx_desc.buf_addr, 193 hal_mon_tx_desc.end_offset, 194 hal_mon_tx_desc.end_reason); 195 196 mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_tx_desc.buf_addr); 197 qdf_assert_always(mon_desc); 198 199 if (!mon_desc->unmapped) { 200 qdf_mem_unmap_page(soc->osdev, mon_desc->paddr, 201 DP_MON_DATA_BUFFER_SIZE, 202 QDF_DMA_FROM_DEVICE); 203 mon_desc->unmapped = 1; 204 } 205 206 if (mon_desc->magic != DP_MON_DESC_MAGIC) { 207 dp_mon_err("Invalid monitor descriptor"); 208 qdf_assert_always(0); 209 } 210 211 end_offset = hal_mon_tx_desc.end_offset; 212 213 status_frag = (qdf_frag_t)(mon_desc->buf_addr); 214 mon_desc->buf_addr = NULL; 215 /* increment reap count */ 216 ++mon_desc_list.tx_mon_reap_cnt; 217 218 /* add the mon_desc to free list */ 219 dp_mon_add_to_free_desc_list(&mon_desc_list.desc_list, 220 &mon_desc_list.tail, mon_desc); 221 222 223 if (qdf_unlikely(!status_frag)) { 224 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 225 hal_mon_tx_desc.ppdu_id, 226 hal_mon_tx_desc.initiator, 227 hal_mon_tx_desc.empty_descriptor, 228 hal_mon_tx_desc.ring_id, 229 hal_mon_tx_desc.looping_count, 230 hal_mon_tx_desc.buf_addr, 231 hal_mon_tx_desc.end_offset, 232 hal_mon_tx_desc.end_reason); 233 234 work_done++; 235 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 236 continue; 237 } 238 239 tx_mon_be->stats.status_buf_recv++; 240 241 if ((hal_mon_tx_desc.end_reason == HAL_MON_FLUSH_DETECTED) || 242 (hal_mon_tx_desc.end_reason == HAL_MON_PPDU_TRUNCATED)) { 243 tx_mon_be->be_ppdu_id = hal_mon_tx_desc.ppdu_id; 244 245 dp_tx_mon_update_end_reason(mon_pdev, 246 hal_mon_tx_desc.ppdu_id, 247 hal_mon_tx_desc.end_reason); 248 /* check and free packet buffer from status buffer */ 249 dp_tx_mon_status_free_packet_buf(pdev, status_frag, 250 end_offset, 251 &mon_desc_list); 252 253 tx_mon_be->stats.status_buf_free++; 254 qdf_frag_free(status_frag); 255 256 work_done++; 257 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 258 continue; 259 } 260 261 dp_tx_mon_process_status_tlv(soc, pdev, 262 &hal_mon_tx_desc, 263 status_frag, 264 end_offset, 265 &mon_desc_list); 266 267 work_done++; 268 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 269 } 270 dp_srng_access_end(int_ctx, soc, mon_dst_srng); 271 272 if (mon_desc_list.tx_mon_reap_cnt) { 273 dp_mon_buffers_replenish(soc, &mon_soc_be->tx_mon_buf_ring, 274 tx_mon_desc_pool, 275 mon_desc_list.tx_mon_reap_cnt, 276 &mon_desc_list.desc_list, 277 &mon_desc_list.tail, 278 &replenish_cnt); 279 } 280 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 281 dp_mon_debug("mac_id: %d, work_done:%d tx_monitor_reap_cnt:%d", 282 mac_id, work_done, mon_desc_list.tx_mon_reap_cnt); 283 284 tx_mon_be->stats.total_tx_mon_reap_cnt += mon_desc_list.tx_mon_reap_cnt; 285 tx_mon_be->stats.totat_tx_mon_replenish_cnt += replenish_cnt; 286 dp_tx_mon_debug_status(tx_mon_be, work_done); 287 288 return work_done; 289 } 290 291 uint32_t 292 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 293 uint32_t mac_id, uint32_t quota) 294 { 295 uint32_t work_done; 296 297 work_done = dp_tx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota); 298 299 return work_done; 300 } 301 302 void 303 dp_tx_mon_buf_desc_pool_deinit(struct dp_soc *soc) 304 { 305 struct dp_mon_soc *mon_soc = soc->monitor_soc; 306 struct dp_mon_soc_be *mon_soc_be = 307 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 308 309 dp_mon_desc_pool_deinit(&mon_soc_be->tx_desc_mon); 310 } 311 312 QDF_STATUS 313 dp_tx_mon_buf_desc_pool_init(struct dp_soc *soc) 314 { 315 struct dp_mon_soc *mon_soc = soc->monitor_soc; 316 struct dp_mon_soc_be *mon_soc_be = 317 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 318 uint32_t num_entries; 319 320 num_entries = 321 wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc->wlan_cfg_ctx); 322 323 return dp_mon_desc_pool_init(&mon_soc_be->tx_desc_mon, num_entries); 324 } 325 326 void dp_tx_mon_buf_desc_pool_free(struct dp_soc *soc) 327 { 328 struct dp_mon_soc *mon_soc = soc->monitor_soc; 329 struct dp_mon_soc_be *mon_soc_be = 330 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 331 332 if (mon_soc_be) 333 dp_mon_desc_pool_free(&mon_soc_be->tx_desc_mon); 334 } 335 336 QDF_STATUS 337 dp_tx_mon_buf_desc_pool_alloc(struct dp_soc *soc) 338 { 339 struct dp_srng *mon_buf_ring; 340 struct dp_mon_desc_pool *tx_mon_desc_pool; 341 int entries; 342 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 343 struct dp_mon_soc *mon_soc = soc->monitor_soc; 344 struct dp_mon_soc_be *mon_soc_be = 345 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 346 347 soc_cfg_ctx = soc->wlan_cfg_ctx; 348 349 entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 350 351 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 352 353 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 354 355 qdf_print("%s:%d tx mon buf desc pool entries: %d", __func__, __LINE__, entries); 356 return dp_mon_desc_pool_alloc(entries, tx_mon_desc_pool); 357 } 358 359 void 360 dp_tx_mon_buffers_free(struct dp_soc *soc) 361 { 362 struct dp_mon_desc_pool *tx_mon_desc_pool; 363 struct dp_mon_soc *mon_soc = soc->monitor_soc; 364 struct dp_mon_soc_be *mon_soc_be = 365 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 366 367 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 368 369 dp_mon_pool_frag_unmap_and_free(soc, tx_mon_desc_pool); 370 } 371 372 QDF_STATUS 373 dp_tx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size) 374 { 375 struct dp_srng *mon_buf_ring; 376 struct dp_mon_desc_pool *tx_mon_desc_pool; 377 union dp_mon_desc_list_elem_t *desc_list = NULL; 378 union dp_mon_desc_list_elem_t *tail = NULL; 379 struct dp_mon_soc *mon_soc = soc->monitor_soc; 380 struct dp_mon_soc_be *mon_soc_be = 381 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 382 383 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 384 385 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 386 387 return dp_mon_buffers_replenish(soc, mon_buf_ring, 388 tx_mon_desc_pool, 389 size, 390 &desc_list, &tail, NULL); 391 } 392 393 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE 394 395 /* 396 * dp_tx_mon_nbuf_get_num_frag() - get total number of fragments 397 * @buf: Network buf instance 398 * 399 * Return: number of fragments 400 */ 401 static inline 402 uint32_t dp_tx_mon_nbuf_get_num_frag(qdf_nbuf_t nbuf) 403 { 404 uint32_t num_frag = 0; 405 406 if (qdf_unlikely(!nbuf)) 407 return num_frag; 408 409 num_frag = qdf_nbuf_get_nr_frags_in_fraglist(nbuf); 410 411 return num_frag; 412 } 413 414 /* 415 * dp_tx_mon_free_usr_mpduq() - API to free user mpduq 416 * @tx_ppdu_info - pointer to tx_ppdu_info 417 * @usr_idx - user index 418 * @tx_mon_be - pointer to tx capture be 419 * 420 * Return: void 421 */ 422 void dp_tx_mon_free_usr_mpduq(struct dp_tx_ppdu_info *tx_ppdu_info, 423 uint8_t usr_idx, 424 struct dp_pdev_tx_monitor_be *tx_mon_be) 425 { 426 qdf_nbuf_queue_t *mpdu_q; 427 uint32_t num_frag = 0; 428 qdf_nbuf_t buf = NULL; 429 430 if (qdf_unlikely(!tx_ppdu_info)) 431 return; 432 433 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 434 435 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 436 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 437 qdf_nbuf_free(buf); 438 } 439 tx_mon_be->stats.pkt_buf_free += num_frag; 440 } 441 442 /* 443 * dp_tx_mon_free_ppdu_info() - API to free dp_tx_ppdu_info 444 * @tx_ppdu_info - pointer to tx_ppdu_info 445 * @tx_mon_be - pointer to tx capture be 446 * 447 * Return: void 448 */ 449 void dp_tx_mon_free_ppdu_info(struct dp_tx_ppdu_info *tx_ppdu_info, 450 struct dp_pdev_tx_monitor_be *tx_mon_be) 451 { 452 uint32_t user = 0; 453 454 for (; user < TXMON_PPDU_HAL(tx_ppdu_info, num_users); user++) { 455 qdf_nbuf_queue_t *mpdu_q; 456 uint32_t num_frag = 0; 457 qdf_nbuf_t buf = NULL; 458 459 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user, mpdu_q); 460 461 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 462 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 463 qdf_nbuf_free(buf); 464 } 465 tx_mon_be->stats.pkt_buf_free += num_frag; 466 } 467 468 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 469 qdf_mem_free(tx_ppdu_info); 470 } 471 472 /* 473 * dp_tx_mon_get_ppdu_info() - API to allocate dp_tx_ppdu_info 474 * @pdev - pdev handle 475 * @type - type of ppdu_info data or protection 476 * @num_user - number user in a ppdu_info 477 * @ppdu_id - ppdu_id number 478 * 479 * Return: pointer to dp_tx_ppdu_info 480 */ 481 struct dp_tx_ppdu_info *dp_tx_mon_get_ppdu_info(struct dp_pdev *pdev, 482 enum tx_ppdu_info_type type, 483 uint8_t num_user, 484 uint32_t ppdu_id) 485 { 486 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 487 struct dp_mon_pdev_be *mon_pdev_be = 488 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 489 struct dp_pdev_tx_monitor_be *tx_mon_be = 490 &mon_pdev_be->tx_monitor_be; 491 struct dp_tx_ppdu_info *tx_ppdu_info; 492 size_t sz_ppdu_info = 0; 493 uint8_t i; 494 495 /* allocate new tx_ppdu_info */ 496 sz_ppdu_info = (sizeof(struct dp_tx_ppdu_info) + 497 (sizeof(struct mon_rx_user_status) * num_user)); 498 499 tx_ppdu_info = (struct dp_tx_ppdu_info *)qdf_mem_malloc(sz_ppdu_info); 500 if (!tx_ppdu_info) { 501 dp_mon_err("allocation of tx_ppdu_info type[%d] failed!!!", 502 type); 503 return NULL; 504 } 505 506 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 507 TXMON_PPDU_HAL(tx_ppdu_info, num_users) = num_user; 508 TXMON_PPDU_HAL(tx_ppdu_info, ppdu_id) = ppdu_id; 509 TXMON_PPDU(tx_ppdu_info, ppdu_id) = ppdu_id; 510 511 for (i = 0; i < num_user; i++) { 512 qdf_nbuf_queue_t *mpdu_q; 513 514 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, i, mpdu_q); 515 qdf_nbuf_queue_init(mpdu_q); 516 } 517 518 /* assign tx_ppdu_info to monitor pdev for reference */ 519 if (type == TX_PROT_PPDU_INFO) { 520 tx_mon_be->tx_prot_ppdu_info = tx_ppdu_info; 521 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 0; 522 } else { 523 tx_mon_be->tx_data_ppdu_info = tx_ppdu_info; 524 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 1; 525 } 526 527 return tx_ppdu_info; 528 } 529 530 /* 531 * dp_print_pdev_tx_monitor_stats_2_0: print tx capture stats 532 * @pdev: DP PDEV handle 533 * 534 * return: void 535 */ 536 void dp_print_pdev_tx_monitor_stats_2_0(struct dp_pdev *pdev) 537 { 538 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 539 struct dp_mon_pdev_be *mon_pdev_be = 540 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 541 struct dp_pdev_tx_monitor_be *tx_mon_be = 542 &mon_pdev_be->tx_monitor_be; 543 struct dp_tx_monitor_drop_stats stats = {0}; 544 545 qdf_mem_copy(&stats, &tx_mon_be->stats, 546 sizeof(struct dp_tx_monitor_drop_stats)); 547 548 /* TX monitor stats needed for beryllium */ 549 DP_PRINT_STATS("\n\tTX Capture BE stats mode[%d]:", tx_mon_be->mode); 550 DP_PRINT_STATS("\tbuffer pending : %u", tx_mon_be->last_frag_q_idx); 551 DP_PRINT_STATS("\treplenish count: %llu", 552 stats.totat_tx_mon_replenish_cnt); 553 DP_PRINT_STATS("\treap count : %llu", stats.total_tx_mon_reap_cnt); 554 DP_PRINT_STATS("\tmonitor stuck : %u", stats.total_tx_mon_stuck); 555 DP_PRINT_STATS("\tStatus buffer"); 556 DP_PRINT_STATS("\t\treceived : %llu", stats.status_buf_recv); 557 DP_PRINT_STATS("\t\tfree : %llu", stats.status_buf_free); 558 DP_PRINT_STATS("\tPacket buffer"); 559 DP_PRINT_STATS("\t\treceived : %llu", stats.pkt_buf_recv); 560 DP_PRINT_STATS("\t\tfree : %llu", stats.pkt_buf_free); 561 DP_PRINT_STATS("\t\tprocessed : %llu", stats.pkt_buf_processed); 562 DP_PRINT_STATS("\t\tto stack : %llu", stats.pkt_buf_to_stack); 563 DP_PRINT_STATS("\tppdu info"); 564 DP_PRINT_STATS("\t\tthreshold : %llu", stats.ppdu_info_drop_th); 565 DP_PRINT_STATS("\t\tflush : %llu", stats.ppdu_info_drop_flush); 566 DP_PRINT_STATS("\t\ttruncated : %llu", stats.ppdu_info_drop_trunc); 567 DP_PRINT_STATS("\tDrop stats"); 568 DP_PRINT_STATS("\t\tppdu drop : %llu", stats.ppdu_drop_cnt); 569 DP_PRINT_STATS("\t\tmpdu drop : %llu", stats.mpdu_drop_cnt); 570 DP_PRINT_STATS("\t\ttlv drop : %llu", stats.tlv_drop_cnt); 571 } 572 573 /* 574 * dp_config_enh_tx_monitor_2_0()- API to enable/disable enhanced tx capture 575 * @pdev_handle: DP_PDEV handle 576 * @val: user provided value 577 * 578 * Return: QDF_STATUS 579 */ 580 QDF_STATUS 581 dp_config_enh_tx_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 582 { 583 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 584 struct dp_mon_pdev_be *mon_pdev_be = 585 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 586 struct dp_pdev_tx_monitor_be *tx_mon_be = 587 &mon_pdev_be->tx_monitor_be; 588 589 switch (val) { 590 case TX_MON_BE_DISABLE: 591 { 592 tx_mon_be->mode = TX_MON_BE_DISABLE; 593 mon_pdev_be->tx_mon_mode = 0; 594 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 595 break; 596 } 597 case TX_MON_BE_FULL_CAPTURE: 598 { 599 qdf_mem_zero(&tx_mon_be->stats, 600 sizeof(struct dp_tx_monitor_drop_stats)); 601 tx_mon_be->last_tsft = 0; 602 tx_mon_be->last_ppdu_timestamp = 0; 603 tx_mon_be->mode = TX_MON_BE_FULL_CAPTURE; 604 mon_pdev_be->tx_mon_mode = 1; 605 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 606 break; 607 } 608 case TX_MON_BE_PEER_FILTER: 609 { 610 tx_mon_be->mode = TX_MON_BE_PEER_FILTER; 611 mon_pdev_be->tx_mon_mode = 2; 612 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_256B; 613 break; 614 } 615 default: 616 { 617 return QDF_STATUS_E_INVAL; 618 } 619 } 620 621 dp_mon_info("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 622 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 623 mon_pdev_be->tx_mon_filter_length); 624 625 dp_mon_filter_setup_tx_mon_mode(pdev); 626 dp_tx_mon_filter_update(pdev); 627 628 return QDF_STATUS_SUCCESS; 629 } 630 631 /* 632 * dp_peer_set_tx_capture_enabled_2_0() - add tx monitor peer filter 633 * @pdev: Datapath PDEV handle 634 * @peer: Datapath PEER handle 635 * @is_tx_pkt_cap_enable: flag for tx capture enable/disable 636 * @peer_mac: peer mac address 637 * 638 * Return: status 639 */ 640 QDF_STATUS dp_peer_set_tx_capture_enabled_2_0(struct dp_pdev *pdev_handle, 641 struct dp_peer *peer_handle, 642 uint8_t is_tx_pkt_cap_enable, 643 uint8_t *peer_mac) 644 { 645 return QDF_STATUS_SUCCESS; 646 } 647 648 #ifdef QCA_SUPPORT_LITE_MONITOR 649 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 650 struct dp_mon_pdev_be *mon_pdev_be) 651 { 652 struct dp_lite_mon_config *config; 653 struct dp_vdev *lite_mon_vdev; 654 655 config = &mon_pdev_be->lite_mon_tx_config->tx_config; 656 lite_mon_vdev = config->lite_mon_vdev; 657 658 if (lite_mon_vdev) 659 tx_cap_info->osif_vdev = lite_mon_vdev->osif_vdev; 660 } 661 662 /** 663 * dp_lite_mon_filter_ppdu() - Filter frames at ppdu level 664 * @mpdu_count: mpdu count in the nbuf queue 665 * @level: Lite monitor filter level 666 * 667 * Return: QDF_STATUS 668 */ 669 static inline QDF_STATUS 670 dp_lite_mon_filter_ppdu(uint8_t mpdu_count, uint8_t level) 671 { 672 if (level == CDP_LITE_MON_LEVEL_PPDU && mpdu_count > 1) 673 return QDF_STATUS_E_CANCELED; 674 675 return QDF_STATUS_SUCCESS; 676 } 677 678 /** 679 * dp_lite_mon_filter_subtype() - filter frames with subtype 680 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 681 * @config: Lite monitor configuration 682 * 683 * Return: QDF_STATUS 684 */ 685 static inline QDF_STATUS 686 dp_lite_mon_filter_subtype(struct dp_tx_ppdu_info *tx_ppdu_info, 687 struct dp_lite_mon_tx_config *config, qdf_nbuf_t buf) 688 { 689 uint16_t mgmt_filter, ctrl_filter, data_filter, type, subtype; 690 struct ieee80211_frame_min_one *wh; 691 uint8_t is_mcast = 0; 692 qdf_nbuf_t nbuf; 693 694 /* Return here if subtype filtering is not required */ 695 if (!config->subtype_filtering) 696 return QDF_STATUS_SUCCESS; 697 698 mgmt_filter = config->tx_config.mgmt_filter[DP_MON_FRM_FILTER_MODE_FP]; 699 ctrl_filter = config->tx_config.ctrl_filter[DP_MON_FRM_FILTER_MODE_FP]; 700 data_filter = config->tx_config.data_filter[DP_MON_FRM_FILTER_MODE_FP]; 701 702 if (dp_tx_mon_nbuf_get_num_frag(buf)) { 703 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_get_frag_addr(buf, 0); 704 } else { 705 nbuf = qdf_nbuf_get_ext_list(buf); 706 if (nbuf) 707 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_data(nbuf); 708 else 709 return QDF_STATUS_E_INVAL; 710 } 711 712 type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK); 713 subtype = ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 714 IEEE80211_FC0_SUBTYPE_SHIFT); 715 716 switch (type) { 717 case IEEE80211_FC0_TYPE_MGT: 718 if (mgmt_filter >> subtype & 0x1) 719 return QDF_STATUS_SUCCESS; 720 else 721 return QDF_STATUS_E_ABORTED; 722 case IEEE80211_FC0_TYPE_CTL: 723 if (ctrl_filter >> subtype & 0x1) 724 return QDF_STATUS_SUCCESS; 725 else 726 return QDF_STATUS_E_ABORTED; 727 case IEEE80211_FC0_TYPE_DATA: 728 is_mcast = DP_FRAME_IS_MULTICAST(wh->i_addr1); 729 if ((is_mcast && (data_filter & FILTER_DATA_MCAST)) || 730 (!is_mcast && (data_filter & FILTER_DATA_UCAST))) 731 return QDF_STATUS_SUCCESS; 732 return QDF_STATUS_E_ABORTED; 733 default: 734 return QDF_STATUS_E_INVAL; 735 } 736 } 737 738 /** 739 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 740 * @pdev: Pointer to physical device 741 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 742 * @buf: qdf nbuf structure of buffer 743 * @mpdu_count: mpdu count in the nbuf queue 744 * 745 * Return: QDF_STATUS 746 */ 747 static inline QDF_STATUS 748 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 749 struct dp_tx_ppdu_info *tx_ppdu_info, 750 qdf_nbuf_t buf, int mpdu_count) 751 { 752 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 753 struct dp_mon_pdev_be *mon_pdev_be = 754 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 755 struct dp_lite_mon_tx_config *config = 756 mon_pdev_be->lite_mon_tx_config; 757 QDF_STATUS ret; 758 759 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) 760 return QDF_STATUS_SUCCESS; 761 762 /* PPDU level filtering */ 763 ret = dp_lite_mon_filter_ppdu(mpdu_count, config->tx_config.level); 764 if (ret) 765 return ret; 766 767 /* Subtype filtering */ 768 ret = dp_lite_mon_filter_subtype(tx_ppdu_info, config, buf); 769 if (ret) 770 return ret; 771 772 return QDF_STATUS_SUCCESS; 773 } 774 775 #else 776 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 777 struct dp_mon_pdev_be *mon_pdev_be) 778 { 779 } 780 781 /** 782 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 783 * @pdev: Pointer to physical device 784 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 785 * @buf: qdf nbuf structure of buffer 786 * @mpdu_count: mpdu count in the nbuf queue 787 * 788 * Return: QDF_STATUS 789 */ 790 static inline QDF_STATUS 791 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 792 struct dp_tx_ppdu_info *tx_ppdu_info, 793 qdf_nbuf_t buf, int mpdu_count) 794 { 795 return QDF_STATUS_SUCCESS; 796 } 797 #endif 798 799 /** 800 * dp_tx_mon_send_to_stack() - API to send to stack 801 * @pdev: pdev Handle 802 * @mpdu: pointer to mpdu 803 * @num_frag: number of frag in mpdu 804 * @ppdu_id: ppdu id of the mpdu 805 * 806 * Return: void 807 */ 808 static void 809 dp_tx_mon_send_to_stack(struct dp_pdev *pdev, qdf_nbuf_t mpdu, 810 uint32_t num_frag, uint32_t ppdu_id) 811 { 812 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 813 struct dp_mon_pdev_be *mon_pdev_be = 814 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 815 struct dp_pdev_tx_monitor_be *tx_mon_be = 816 &mon_pdev_be->tx_monitor_be; 817 struct cdp_tx_indication_info tx_capture_info = {0}; 818 819 tx_mon_be->stats.pkt_buf_to_stack += num_frag; 820 821 tx_capture_info.radiotap_done = 1; 822 tx_capture_info.mpdu_nbuf = mpdu; 823 tx_capture_info.mpdu_info.ppdu_id = ppdu_id; 824 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) { 825 dp_wdi_event_handler(WDI_EVENT_TX_PKT_CAPTURE, 826 pdev->soc, 827 &tx_capture_info, 828 HTT_INVALID_PEER, 829 WDI_NO_VAL, 830 pdev->pdev_id); 831 } else { 832 dp_fill_lite_mon_vdev(&tx_capture_info, mon_pdev_be); 833 dp_wdi_event_handler(WDI_EVENT_LITE_MON_TX, 834 pdev->soc, 835 &tx_capture_info, 836 HTT_INVALID_PEER, 837 WDI_NO_VAL, 838 pdev->pdev_id); 839 } 840 if (tx_capture_info.mpdu_nbuf) 841 qdf_nbuf_free(tx_capture_info.mpdu_nbuf); 842 } 843 844 /** 845 * dp_tx_mon_send_per_usr_mpdu() - API to send per usr mpdu to stack 846 * @pdev: pdev Handle 847 * @ppdu_info: pointer to dp_tx_ppdu_info 848 * @user_id: current user index 849 * 850 * Return: void 851 */ 852 static void 853 dp_tx_mon_send_per_usr_mpdu(struct dp_pdev *pdev, 854 struct dp_tx_ppdu_info *ppdu_info, 855 uint8_t user_idx) 856 { 857 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 858 qdf_nbuf_t buf = NULL; 859 uint8_t mpdu_count = 0; 860 861 usr_mpdu_q = &TXMON_PPDU_USR(ppdu_info, user_idx, mpdu_q); 862 863 while ((buf = qdf_nbuf_queue_remove(usr_mpdu_q)) != NULL) { 864 uint32_t num_frag = dp_tx_mon_nbuf_get_num_frag(buf); 865 866 ppdu_info->hal_txmon.rx_status.rx_user_status = 867 &ppdu_info->hal_txmon.rx_user_status[user_idx]; 868 869 if (dp_tx_lite_mon_filtering(pdev, ppdu_info, buf, 870 ++mpdu_count)) { 871 qdf_nbuf_free(buf); 872 continue; 873 } 874 875 qdf_nbuf_update_radiotap(&ppdu_info->hal_txmon.rx_status, 876 buf, qdf_nbuf_headroom(buf)); 877 878 dp_tx_mon_send_to_stack(pdev, buf, num_frag, 879 TXMON_PPDU(ppdu_info, ppdu_id)); 880 } 881 } 882 883 #define PHY_MEDIUM_MHZ 960 884 #define PHY_TIMESTAMP_WRAP (0xFFFFFFFF / PHY_MEDIUM_MHZ) 885 886 /** 887 * dp_populate_tsft_from_phy_timestamp() - API to get tsft from phy timestamp 888 * @pdev: pdev Handle 889 * @ppdu_info: ppdi_info Handle 890 * 891 * Return: QDF_STATUS 892 */ 893 static QDF_STATUS 894 dp_populate_tsft_from_phy_timestamp(struct dp_pdev *pdev, 895 struct dp_tx_ppdu_info *ppdu_info) 896 { 897 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 898 struct dp_mon_pdev_be *mon_pdev_be = 899 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 900 struct dp_pdev_tx_monitor_be *tx_mon_be = 901 &mon_pdev_be->tx_monitor_be; 902 uint64_t tsft = 0; 903 uint32_t ppdu_timestamp = 0; 904 905 tsft = TXMON_PPDU_COM(ppdu_info, tsft); 906 ppdu_timestamp = TXMON_PPDU_COM(ppdu_info, ppdu_timestamp); 907 908 if (tsft && ppdu_timestamp) { 909 /* update tsft and ppdu timestamp */ 910 tx_mon_be->last_tsft = tsft; 911 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 912 } else if (!tx_mon_be->last_ppdu_timestamp || !tx_mon_be->last_tsft) { 913 return QDF_STATUS_E_EMPTY; 914 } 915 916 if (!tsft && ppdu_timestamp) { 917 /* response window */ 918 uint32_t cur_usec = ppdu_timestamp / PHY_MEDIUM_MHZ; 919 uint32_t last_usec = (tx_mon_be->last_ppdu_timestamp / 920 PHY_MEDIUM_MHZ); 921 uint32_t diff = 0; 922 923 if (last_usec < cur_usec) { 924 diff = cur_usec - last_usec; 925 tsft = tx_mon_be->last_tsft + diff; 926 } else { 927 diff = (PHY_TIMESTAMP_WRAP - last_usec) + cur_usec; 928 tsft = tx_mon_be->last_tsft + diff; 929 } 930 TXMON_PPDU_COM(ppdu_info, tsft) = tsft; 931 /* update tsft and ppdu timestamp */ 932 tx_mon_be->last_tsft = tsft; 933 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 934 } 935 936 return QDF_STATUS_SUCCESS; 937 } 938 939 /** 940 * dp_tx_mon_update_radiotap() - API to update radiotap information 941 * @pdev: pdev Handle 942 * @ppdu_info: pointer to dp_tx_ppdu_info 943 * 944 * Return: void 945 */ 946 static void 947 dp_tx_mon_update_radiotap(struct dp_pdev *pdev, 948 struct dp_tx_ppdu_info *ppdu_info) 949 { 950 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 951 struct dp_mon_pdev_be *mon_pdev_be = 952 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 953 struct dp_pdev_tx_monitor_be *tx_mon_be = 954 &mon_pdev_be->tx_monitor_be; 955 uint32_t usr_idx = 0; 956 uint32_t num_users = 0; 957 958 num_users = TXMON_PPDU_HAL(ppdu_info, num_users); 959 960 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_num) == 0)) 961 TXMON_PPDU_COM(ppdu_info, chan_num) = 962 pdev->operating_channel.num; 963 964 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_freq) == 0)) 965 TXMON_PPDU_COM(ppdu_info, chan_freq) = 966 pdev->operating_channel.freq; 967 968 if (QDF_STATUS_SUCCESS != 969 dp_populate_tsft_from_phy_timestamp(pdev, ppdu_info)) { 970 /* free the ppdu_info */ 971 dp_tx_mon_free_ppdu_info(ppdu_info, tx_mon_be); 972 return; 973 } 974 975 for (usr_idx = 0; usr_idx < num_users; usr_idx++) { 976 qdf_nbuf_queue_t *mpdu_q = NULL; 977 978 /* set AMPDU flag if number mpdu is more than 1 */ 979 mpdu_q = &TXMON_PPDU_USR(ppdu_info, usr_idx, mpdu_q); 980 if (mpdu_q && (qdf_nbuf_queue_len(mpdu_q) > 1)) { 981 TXMON_PPDU_COM(ppdu_info, 982 rs_flags) |= IEEE80211_AMPDU_FLAG; 983 TXMON_PPDU_USR(ppdu_info, usr_idx, is_ampdu) = 1; 984 } 985 986 if (qdf_unlikely(!TXMON_PPDU_COM(ppdu_info, rate))) { 987 uint32_t rate = 0; 988 uint32_t rix = 0; 989 uint16_t ratecode = 0; 990 991 rate = dp_getrateindex(TXMON_PPDU_COM(ppdu_info, sgi), 992 TXMON_PPDU_USR(ppdu_info, 993 usr_idx, mcs), 994 TXMON_PPDU_COM(ppdu_info, nss), 995 TXMON_PPDU_COM(ppdu_info, 996 preamble_type), 997 TXMON_PPDU_COM(ppdu_info, bw), 998 0, 999 &rix, &ratecode); 1000 1001 /* update rate */ 1002 TXMON_PPDU_COM(ppdu_info, rate) = rate; 1003 } 1004 1005 dp_tx_mon_send_per_usr_mpdu(pdev, ppdu_info, usr_idx); 1006 } 1007 } 1008 1009 /** 1010 * dp_tx_mon_ppdu_process - Deferred PPDU stats handler 1011 * @context: Opaque work context (PDEV) 1012 * 1013 * Return: none 1014 */ 1015 void dp_tx_mon_ppdu_process(void *context) 1016 { 1017 struct dp_pdev *pdev = (struct dp_pdev *)context; 1018 struct dp_mon_pdev *mon_pdev; 1019 struct dp_mon_pdev_be *mon_pdev_be; 1020 struct dp_tx_ppdu_info *defer_ppdu_info = NULL; 1021 struct dp_tx_ppdu_info *defer_ppdu_info_next = NULL; 1022 struct dp_pdev_tx_monitor_be *tx_mon_be; 1023 1024 /* sanity check */ 1025 if (qdf_unlikely(!pdev)) 1026 return; 1027 1028 mon_pdev = pdev->monitor_pdev; 1029 1030 if (qdf_unlikely(!mon_pdev)) 1031 return; 1032 1033 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1034 if (qdf_unlikely(!mon_pdev_be)) 1035 return; 1036 1037 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1038 if (qdf_unlikely(TX_MON_BE_DISABLE == tx_mon_be->mode && 1039 !dp_lite_mon_is_tx_enabled(mon_pdev))) 1040 return; 1041 1042 /* take lock here */ 1043 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1044 STAILQ_CONCAT(&tx_mon_be->defer_tx_ppdu_info_queue, 1045 &tx_mon_be->tx_ppdu_info_queue); 1046 tx_mon_be->defer_ppdu_info_list_depth += 1047 tx_mon_be->tx_ppdu_info_list_depth; 1048 tx_mon_be->tx_ppdu_info_list_depth = 0; 1049 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1050 1051 STAILQ_FOREACH_SAFE(defer_ppdu_info, 1052 &tx_mon_be->defer_tx_ppdu_info_queue, 1053 tx_ppdu_info_queue_elem, defer_ppdu_info_next) { 1054 /* remove dp_tx_ppdu_info from the list */ 1055 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1056 defer_ppdu_info, 1057 dp_tx_ppdu_info, 1058 tx_ppdu_info_queue_elem); 1059 tx_mon_be->defer_ppdu_info_list_depth--; 1060 1061 dp_tx_mon_update_radiotap(pdev, defer_ppdu_info); 1062 1063 /* free the ppdu_info */ 1064 dp_tx_mon_free_ppdu_info(defer_ppdu_info, tx_mon_be); 1065 defer_ppdu_info = NULL; 1066 } 1067 } 1068 1069 /** 1070 * dp_tx_ppdu_stats_attach_2_0 - Initialize Tx PPDU stats and enhanced capture 1071 * @pdev: DP PDEV 1072 * 1073 * Return: none 1074 */ 1075 void dp_tx_ppdu_stats_attach_2_0(struct dp_pdev *pdev) 1076 { 1077 struct dp_mon_pdev *mon_pdev; 1078 struct dp_mon_pdev_be *mon_pdev_be; 1079 struct dp_pdev_tx_monitor_be *tx_mon_be; 1080 1081 if (qdf_unlikely(!pdev)) 1082 return; 1083 1084 mon_pdev = pdev->monitor_pdev; 1085 1086 if (qdf_unlikely(!mon_pdev)) 1087 return; 1088 1089 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1090 if (qdf_unlikely(!mon_pdev_be)) 1091 return; 1092 1093 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1094 1095 STAILQ_INIT(&tx_mon_be->tx_ppdu_info_queue); 1096 tx_mon_be->tx_ppdu_info_list_depth = 0; 1097 1098 STAILQ_INIT(&tx_mon_be->defer_tx_ppdu_info_queue); 1099 tx_mon_be->defer_ppdu_info_list_depth = 0; 1100 1101 qdf_spinlock_create(&tx_mon_be->tx_mon_list_lock); 1102 /* Work queue setup for TX MONITOR post handling */ 1103 qdf_create_work(0, &tx_mon_be->post_ppdu_work, 1104 dp_tx_mon_ppdu_process, pdev); 1105 1106 tx_mon_be->post_ppdu_workqueue = 1107 qdf_alloc_unbound_workqueue("tx_mon_ppdu_work_queue"); 1108 } 1109 1110 /** 1111 * dp_tx_ppdu_stats_detach_be - Cleanup Tx PPDU stats and enhanced capture 1112 * @pdev: DP PDEV 1113 * 1114 * Return: none 1115 */ 1116 void dp_tx_ppdu_stats_detach_2_0(struct dp_pdev *pdev) 1117 { 1118 struct dp_mon_pdev *mon_pdev; 1119 struct dp_mon_pdev_be *mon_pdev_be; 1120 struct dp_pdev_tx_monitor_be *tx_mon_be; 1121 struct dp_tx_ppdu_info *tx_ppdu_info = NULL; 1122 struct dp_tx_ppdu_info *tx_ppdu_info_next = NULL; 1123 1124 if (qdf_unlikely(!pdev)) 1125 return; 1126 1127 mon_pdev = pdev->monitor_pdev; 1128 1129 if (qdf_unlikely(!mon_pdev)) 1130 return; 1131 1132 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1133 if (qdf_unlikely(!mon_pdev_be)) 1134 return; 1135 1136 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1137 /* TODO: disable tx_monitor, to avoid further packet from HW */ 1138 dp_monitor_config_enh_tx_capture(pdev, TX_MON_BE_DISABLE); 1139 1140 /* flush workqueue */ 1141 qdf_flush_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1142 qdf_destroy_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1143 1144 /* 1145 * TODO: iterate both tx_ppdu_info and defer_ppdu_info_list 1146 * free the tx_ppdu_info and decrement depth 1147 */ 1148 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1149 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1150 &tx_mon_be->tx_ppdu_info_queue, 1151 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1152 /* remove dp_tx_ppdu_info from the list */ 1153 STAILQ_REMOVE(&tx_mon_be->tx_ppdu_info_queue, tx_ppdu_info, 1154 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1155 /* decrement list length */ 1156 tx_mon_be->tx_ppdu_info_list_depth--; 1157 /* free tx_ppdu_info */ 1158 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1159 } 1160 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1161 1162 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1163 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1164 &tx_mon_be->defer_tx_ppdu_info_queue, 1165 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1166 /* remove dp_tx_ppdu_info from the list */ 1167 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1168 tx_ppdu_info, 1169 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1170 /* decrement list length */ 1171 tx_mon_be->defer_ppdu_info_list_depth--; 1172 /* free tx_ppdu_info */ 1173 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1174 } 1175 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1176 1177 qdf_spinlock_destroy(&tx_mon_be->tx_mon_list_lock); 1178 } 1179 #endif /* WLAN_TX_PKT_CAPTURE_ENH_BE */ 1180 1181 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE)) 1182 /* 1183 * dp_config_enh_tx_core_monitor_2_0()- API to validate core framework 1184 * @pdev_handle: DP_PDEV handle 1185 * @val: user provided value 1186 * 1187 * Return: QDF_STATUS 1188 */ 1189 QDF_STATUS 1190 dp_config_enh_tx_core_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 1191 { 1192 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1193 struct dp_mon_pdev_be *mon_pdev_be = 1194 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1195 struct dp_pdev_tx_monitor_be *tx_mon_be = 1196 &mon_pdev_be->tx_monitor_be; 1197 1198 switch (val) { 1199 case TX_MON_BE_FRM_WRK_DISABLE: 1200 { 1201 tx_mon_be->mode = val; 1202 mon_pdev_be->tx_mon_mode = 0; 1203 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 1204 break; 1205 } 1206 case TX_MON_BE_FRM_WRK_FULL_CAPTURE: 1207 { 1208 tx_mon_be->mode = val; 1209 qdf_mem_zero(&tx_mon_be->stats, 1210 sizeof(struct dp_tx_monitor_drop_stats)); 1211 tx_mon_be->mode = val; 1212 mon_pdev_be->tx_mon_mode = 1; 1213 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 1214 break; 1215 } 1216 case TX_MON_BE_FRM_WRK_128B_CAPTURE: 1217 { 1218 tx_mon_be->mode = val; 1219 mon_pdev_be->tx_mon_mode = 1; 1220 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_128B; 1221 break; 1222 } 1223 default: 1224 { 1225 return QDF_STATUS_E_INVAL; 1226 } 1227 } 1228 1229 dp_mon_debug("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 1230 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 1231 mon_pdev_be->tx_mon_filter_length); 1232 1233 /* send HTT msg to configure TLV based on mode */ 1234 dp_mon_filter_setup_tx_mon_mode(pdev); 1235 dp_tx_mon_filter_update(pdev); 1236 1237 return QDF_STATUS_SUCCESS; 1238 } 1239 #endif 1240