1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hal_be_hw_headers.h" 19 #include "dp_types.h" 20 #include "hal_be_tx.h" 21 #include "hal_api.h" 22 #include "qdf_trace.h" 23 #include "hal_be_api_mon.h" 24 #include "dp_internal.h" 25 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 26 #include "dp_mon.h" 27 #include <dp_mon_2.0.h> 28 #include <dp_tx_mon_2.0.h> 29 #include <dp_be.h> 30 #include <hal_be_api_mon.h> 31 #include <dp_mon_filter_2.0.h> 32 #ifdef FEATURE_PERPKT_INFO 33 #include "dp_ratetable.h" 34 #endif 35 36 #define MAX_TX_MONITOR_STUCK 50 37 38 #ifdef TXMON_DEBUG 39 /* 40 * dp_tx_mon_debug_statu() - API to display tx monitor status 41 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 42 * @work_done - tx monitor work done 43 * 44 * Return: void 45 */ 46 static inline void 47 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 48 uint32_t work_done) 49 { 50 if (tx_mon_be->mode && !work_done) 51 tx_mon_be->stats.tx_mon_stuck++; 52 else if (tx_mon_be->mode && work_done) 53 tx_mon_be->stats.tx_mon_stuck = 0; 54 55 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 56 dp_mon_warn("Tx monitor block got stuck!!!!!"); 57 tx_mon_be->stats.tx_mon_stuck = 0; 58 tx_mon_be->stats.total_tx_mon_stuck++; 59 } 60 61 dp_mon_debug_rl("tx_ppdu_info[%u :D %u] STATUS[R %llu: F %llu] PKT_BUF[R %llu: F %llu : P %llu : S %llu]", 62 tx_mon_be->tx_ppdu_info_list_depth, 63 tx_mon_be->defer_ppdu_info_list_depth, 64 tx_mon_be->stats.status_buf_recv, 65 tx_mon_be->stats.status_buf_free, 66 tx_mon_be->stats.pkt_buf_recv, 67 tx_mon_be->stats.pkt_buf_free, 68 tx_mon_be->stats.pkt_buf_processed, 69 tx_mon_be->stats.pkt_buf_to_stack); 70 } 71 72 #else 73 /* 74 * dp_tx_mon_debug_statu() - API to display tx monitor status 75 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 76 * @work_done - tx monitor work done 77 * 78 * Return: void 79 */ 80 static inline void 81 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 82 uint32_t work_done) 83 { 84 if (tx_mon_be->mode && !work_done) 85 tx_mon_be->stats.tx_mon_stuck++; 86 else if (tx_mon_be->mode && work_done) 87 tx_mon_be->stats.tx_mon_stuck = 0; 88 89 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 90 dp_mon_warn("Tx monitor block got stuck!!!!!"); 91 tx_mon_be->stats.tx_mon_stuck = 0; 92 tx_mon_be->stats.total_tx_mon_stuck++; 93 } 94 } 95 #endif 96 97 static inline uint32_t 98 dp_tx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 99 uint32_t mac_id, uint32_t quota) 100 { 101 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 102 void *tx_mon_dst_ring_desc; 103 hal_soc_handle_t hal_soc; 104 void *mon_dst_srng; 105 struct dp_mon_pdev *mon_pdev; 106 struct dp_mon_pdev_be *mon_pdev_be; 107 uint32_t work_done = 0; 108 struct dp_mon_soc *mon_soc = soc->monitor_soc; 109 struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 110 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 111 struct dp_mon_desc_pool *tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 112 struct dp_tx_mon_desc_list mon_desc_list; 113 uint32_t replenish_cnt = 0; 114 115 if (!pdev) { 116 dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 117 return work_done; 118 } 119 120 mon_pdev = pdev->monitor_pdev; 121 mon_dst_srng = mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng; 122 123 if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { 124 dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK", 125 soc, mon_dst_srng); 126 return work_done; 127 } 128 129 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 130 if (qdf_unlikely(!mon_pdev_be)) 131 return work_done; 132 133 tx_mon_be = &mon_pdev_be->tx_monitor_be; 134 hal_soc = soc->hal_soc; 135 136 qdf_assert((hal_soc && pdev)); 137 138 qdf_spin_lock_bh(&mon_pdev->mon_lock); 139 mon_desc_list.desc_list = NULL; 140 mon_desc_list.tail = NULL; 141 mon_desc_list.tx_mon_reap_cnt = 0; 142 143 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) { 144 dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK", 145 __func__, __LINE__, mon_dst_srng); 146 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 147 return work_done; 148 } 149 150 while (qdf_likely((tx_mon_dst_ring_desc = 151 (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng)) 152 && quota--)) { 153 struct hal_mon_desc hal_mon_tx_desc = {0}; 154 struct dp_mon_desc *mon_desc = NULL; 155 qdf_frag_t status_frag = NULL; 156 uint32_t end_offset = 0; 157 158 hal_be_get_mon_dest_status(soc->hal_soc, 159 tx_mon_dst_ring_desc, 160 &hal_mon_tx_desc); 161 162 if (hal_mon_tx_desc.empty_descriptor) { 163 /* update stats counter */ 164 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d DROP[PPDU:%d MPDU:%d TLV:%d] E_O_PPDU:%d", 165 hal_mon_tx_desc.ppdu_id, 166 hal_mon_tx_desc.initiator, 167 hal_mon_tx_desc.empty_descriptor, 168 hal_mon_tx_desc.ring_id, 169 hal_mon_tx_desc.looping_count, 170 hal_mon_tx_desc.ppdu_drop_count, 171 hal_mon_tx_desc.mpdu_drop_count, 172 hal_mon_tx_desc.tlv_drop_count, 173 hal_mon_tx_desc.end_of_ppdu_dropped); 174 175 tx_mon_be->stats.ppdu_drop_cnt += 176 hal_mon_tx_desc.ppdu_drop_count; 177 tx_mon_be->stats.mpdu_drop_cnt += 178 hal_mon_tx_desc.mpdu_drop_count; 179 tx_mon_be->stats.tlv_drop_cnt += 180 hal_mon_tx_desc.tlv_drop_count; 181 work_done++; 182 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 183 continue; 184 } 185 186 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 187 hal_mon_tx_desc.ppdu_id, 188 hal_mon_tx_desc.initiator, 189 hal_mon_tx_desc.empty_descriptor, 190 hal_mon_tx_desc.ring_id, 191 hal_mon_tx_desc.looping_count, 192 hal_mon_tx_desc.buf_addr, 193 hal_mon_tx_desc.end_offset, 194 hal_mon_tx_desc.end_reason); 195 196 mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_tx_desc.buf_addr); 197 qdf_assert_always(mon_desc); 198 199 if (!mon_desc->unmapped) { 200 qdf_mem_unmap_page(soc->osdev, mon_desc->paddr, 201 DP_MON_DATA_BUFFER_SIZE, 202 QDF_DMA_FROM_DEVICE); 203 mon_desc->unmapped = 1; 204 } 205 206 if (mon_desc->magic != DP_MON_DESC_MAGIC) { 207 dp_mon_err("Invalid monitor descriptor"); 208 qdf_assert_always(0); 209 } 210 211 end_offset = hal_mon_tx_desc.end_offset; 212 213 status_frag = (qdf_frag_t)(mon_desc->buf_addr); 214 mon_desc->buf_addr = NULL; 215 /* increment reap count */ 216 ++mon_desc_list.tx_mon_reap_cnt; 217 218 /* add the mon_desc to free list */ 219 dp_mon_add_to_free_desc_list(&mon_desc_list.desc_list, 220 &mon_desc_list.tail, mon_desc); 221 222 223 if (qdf_unlikely(!status_frag)) { 224 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 225 hal_mon_tx_desc.ppdu_id, 226 hal_mon_tx_desc.initiator, 227 hal_mon_tx_desc.empty_descriptor, 228 hal_mon_tx_desc.ring_id, 229 hal_mon_tx_desc.looping_count, 230 hal_mon_tx_desc.buf_addr, 231 hal_mon_tx_desc.end_offset, 232 hal_mon_tx_desc.end_reason); 233 234 work_done++; 235 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 236 continue; 237 } 238 239 tx_mon_be->stats.status_buf_recv++; 240 241 if ((hal_mon_tx_desc.end_reason == HAL_MON_FLUSH_DETECTED) || 242 (hal_mon_tx_desc.end_reason == HAL_MON_PPDU_TRUNCATED)) { 243 tx_mon_be->be_ppdu_id = hal_mon_tx_desc.ppdu_id; 244 245 dp_tx_mon_update_end_reason(mon_pdev, 246 hal_mon_tx_desc.ppdu_id, 247 hal_mon_tx_desc.end_reason); 248 /* check and free packet buffer from status buffer */ 249 dp_tx_mon_status_free_packet_buf(pdev, status_frag, 250 end_offset, 251 &mon_desc_list); 252 253 tx_mon_be->stats.status_buf_free++; 254 qdf_frag_free(status_frag); 255 256 work_done++; 257 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 258 continue; 259 } 260 261 dp_tx_mon_process_status_tlv(soc, pdev, 262 &hal_mon_tx_desc, 263 status_frag, 264 end_offset, 265 &mon_desc_list); 266 267 work_done++; 268 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 269 } 270 dp_srng_access_end(int_ctx, soc, mon_dst_srng); 271 272 if (mon_desc_list.tx_mon_reap_cnt) { 273 dp_mon_buffers_replenish(soc, &mon_soc_be->tx_mon_buf_ring, 274 tx_mon_desc_pool, 275 mon_desc_list.tx_mon_reap_cnt, 276 &mon_desc_list.desc_list, 277 &mon_desc_list.tail, 278 &replenish_cnt); 279 } 280 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 281 dp_mon_debug("mac_id: %d, work_done:%d tx_monitor_reap_cnt:%d", 282 mac_id, work_done, mon_desc_list.tx_mon_reap_cnt); 283 284 tx_mon_be->stats.total_tx_mon_reap_cnt += mon_desc_list.tx_mon_reap_cnt; 285 tx_mon_be->stats.totat_tx_mon_replenish_cnt += replenish_cnt; 286 dp_tx_mon_debug_status(tx_mon_be, work_done); 287 288 return work_done; 289 } 290 291 uint32_t 292 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 293 uint32_t mac_id, uint32_t quota) 294 { 295 uint32_t work_done; 296 297 work_done = dp_tx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota); 298 299 return work_done; 300 } 301 302 void 303 dp_tx_mon_buf_desc_pool_deinit(struct dp_soc *soc) 304 { 305 struct dp_mon_soc *mon_soc = soc->monitor_soc; 306 struct dp_mon_soc_be *mon_soc_be = 307 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 308 309 dp_mon_desc_pool_deinit(&mon_soc_be->tx_desc_mon); 310 } 311 312 QDF_STATUS 313 dp_tx_mon_buf_desc_pool_init(struct dp_soc *soc) 314 { 315 struct dp_mon_soc *mon_soc = soc->monitor_soc; 316 struct dp_mon_soc_be *mon_soc_be = 317 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 318 uint32_t num_entries; 319 320 num_entries = 321 wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc->wlan_cfg_ctx); 322 323 return dp_mon_desc_pool_init(&mon_soc_be->tx_desc_mon, num_entries); 324 } 325 326 void dp_tx_mon_buf_desc_pool_free(struct dp_soc *soc) 327 { 328 struct dp_mon_soc *mon_soc = soc->monitor_soc; 329 struct dp_mon_soc_be *mon_soc_be = 330 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 331 332 if (mon_soc_be) 333 dp_mon_desc_pool_free(&mon_soc_be->tx_desc_mon); 334 } 335 336 QDF_STATUS 337 dp_tx_mon_buf_desc_pool_alloc(struct dp_soc *soc) 338 { 339 struct dp_srng *mon_buf_ring; 340 struct dp_mon_desc_pool *tx_mon_desc_pool; 341 int entries; 342 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 343 struct dp_mon_soc *mon_soc = soc->monitor_soc; 344 struct dp_mon_soc_be *mon_soc_be = 345 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 346 347 soc_cfg_ctx = soc->wlan_cfg_ctx; 348 349 entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 350 351 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 352 353 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 354 355 qdf_print("%s:%d tx mon buf desc pool entries: %d", __func__, __LINE__, entries); 356 return dp_mon_desc_pool_alloc(entries, tx_mon_desc_pool); 357 } 358 359 void 360 dp_tx_mon_buffers_free(struct dp_soc *soc) 361 { 362 struct dp_mon_desc_pool *tx_mon_desc_pool; 363 struct dp_mon_soc *mon_soc = soc->monitor_soc; 364 struct dp_mon_soc_be *mon_soc_be = 365 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 366 367 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 368 369 dp_mon_pool_frag_unmap_and_free(soc, tx_mon_desc_pool); 370 } 371 372 QDF_STATUS 373 dp_tx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size) 374 { 375 struct dp_srng *mon_buf_ring; 376 struct dp_mon_desc_pool *tx_mon_desc_pool; 377 union dp_mon_desc_list_elem_t *desc_list = NULL; 378 union dp_mon_desc_list_elem_t *tail = NULL; 379 struct dp_mon_soc *mon_soc = soc->monitor_soc; 380 struct dp_mon_soc_be *mon_soc_be = 381 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 382 383 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 384 385 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 386 387 return dp_mon_buffers_replenish(soc, mon_buf_ring, 388 tx_mon_desc_pool, 389 size, 390 &desc_list, &tail, NULL); 391 } 392 393 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE 394 395 /* 396 * dp_tx_mon_nbuf_get_num_frag() - get total number of fragments 397 * @buf: Network buf instance 398 * 399 * Return: number of fragments 400 */ 401 static inline 402 uint32_t dp_tx_mon_nbuf_get_num_frag(qdf_nbuf_t nbuf) 403 { 404 uint32_t num_frag = 0; 405 406 if (qdf_unlikely(!nbuf)) 407 return num_frag; 408 409 num_frag = qdf_nbuf_get_nr_frags_in_fraglist(nbuf); 410 411 return num_frag; 412 } 413 414 /* 415 * dp_tx_mon_free_usr_mpduq() - API to free user mpduq 416 * @tx_ppdu_info - pointer to tx_ppdu_info 417 * @usr_idx - user index 418 * @tx_mon_be - pointer to tx capture be 419 * 420 * Return: void 421 */ 422 void dp_tx_mon_free_usr_mpduq(struct dp_tx_ppdu_info *tx_ppdu_info, 423 uint8_t usr_idx, 424 struct dp_pdev_tx_monitor_be *tx_mon_be) 425 { 426 qdf_nbuf_queue_t *mpdu_q; 427 uint32_t num_frag = 0; 428 qdf_nbuf_t buf = NULL; 429 430 if (qdf_unlikely(!tx_ppdu_info)) 431 return; 432 433 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 434 435 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 436 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 437 qdf_nbuf_free(buf); 438 } 439 tx_mon_be->stats.pkt_buf_free += num_frag; 440 } 441 442 /* 443 * dp_tx_mon_free_ppdu_info() - API to free dp_tx_ppdu_info 444 * @tx_ppdu_info - pointer to tx_ppdu_info 445 * @tx_mon_be - pointer to tx capture be 446 * 447 * Return: void 448 */ 449 void dp_tx_mon_free_ppdu_info(struct dp_tx_ppdu_info *tx_ppdu_info, 450 struct dp_pdev_tx_monitor_be *tx_mon_be) 451 { 452 uint32_t user = 0; 453 454 for (; user < TXMON_PPDU_HAL(tx_ppdu_info, num_users); user++) { 455 qdf_nbuf_queue_t *mpdu_q; 456 uint32_t num_frag = 0; 457 qdf_nbuf_t buf = NULL; 458 459 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user, mpdu_q); 460 461 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 462 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 463 qdf_nbuf_free(buf); 464 } 465 tx_mon_be->stats.pkt_buf_free += num_frag; 466 } 467 468 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 469 qdf_mem_free(tx_ppdu_info); 470 } 471 472 /* 473 * dp_tx_mon_get_ppdu_info() - API to allocate dp_tx_ppdu_info 474 * @pdev - pdev handle 475 * @type - type of ppdu_info data or protection 476 * @num_user - number user in a ppdu_info 477 * @ppdu_id - ppdu_id number 478 * 479 * Return: pointer to dp_tx_ppdu_info 480 */ 481 struct dp_tx_ppdu_info *dp_tx_mon_get_ppdu_info(struct dp_pdev *pdev, 482 enum tx_ppdu_info_type type, 483 uint8_t num_user, 484 uint32_t ppdu_id) 485 { 486 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 487 struct dp_mon_pdev_be *mon_pdev_be = 488 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 489 struct dp_pdev_tx_monitor_be *tx_mon_be = 490 &mon_pdev_be->tx_monitor_be; 491 struct dp_tx_ppdu_info *tx_ppdu_info; 492 size_t sz_ppdu_info = 0; 493 uint8_t i; 494 495 /* allocate new tx_ppdu_info */ 496 sz_ppdu_info = (sizeof(struct dp_tx_ppdu_info) + 497 (sizeof(struct mon_rx_user_status) * num_user)); 498 499 tx_ppdu_info = (struct dp_tx_ppdu_info *)qdf_mem_malloc(sz_ppdu_info); 500 if (!tx_ppdu_info) { 501 dp_mon_err("allocation of tx_ppdu_info type[%d] failed!!!", 502 type); 503 return NULL; 504 } 505 506 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 507 TXMON_PPDU_HAL(tx_ppdu_info, num_users) = num_user; 508 TXMON_PPDU_HAL(tx_ppdu_info, ppdu_id) = ppdu_id; 509 TXMON_PPDU(tx_ppdu_info, ppdu_id) = ppdu_id; 510 511 for (i = 0; i < num_user; i++) { 512 qdf_nbuf_queue_t *mpdu_q; 513 514 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, i, mpdu_q); 515 qdf_nbuf_queue_init(mpdu_q); 516 } 517 518 /* assign tx_ppdu_info to monitor pdev for reference */ 519 if (type == TX_PROT_PPDU_INFO) { 520 qdf_mem_zero(&tx_mon_be->prot_status_info, sizeof(struct hal_tx_status_info)); 521 tx_mon_be->tx_prot_ppdu_info = tx_ppdu_info; 522 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 0; 523 } else { 524 qdf_mem_zero(&tx_mon_be->data_status_info, sizeof(struct hal_tx_status_info)); 525 tx_mon_be->tx_data_ppdu_info = tx_ppdu_info; 526 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 1; 527 } 528 529 return tx_ppdu_info; 530 } 531 532 /* 533 * dp_print_pdev_tx_monitor_stats_2_0: print tx capture stats 534 * @pdev: DP PDEV handle 535 * 536 * return: void 537 */ 538 void dp_print_pdev_tx_monitor_stats_2_0(struct dp_pdev *pdev) 539 { 540 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 541 struct dp_mon_pdev_be *mon_pdev_be = 542 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 543 struct dp_pdev_tx_monitor_be *tx_mon_be = 544 &mon_pdev_be->tx_monitor_be; 545 struct dp_tx_monitor_drop_stats stats = {0}; 546 547 qdf_mem_copy(&stats, &tx_mon_be->stats, 548 sizeof(struct dp_tx_monitor_drop_stats)); 549 550 /* TX monitor stats needed for beryllium */ 551 DP_PRINT_STATS("\n\tTX Capture BE stats mode[%d]:", tx_mon_be->mode); 552 DP_PRINT_STATS("\tbuffer pending : %u", tx_mon_be->last_frag_q_idx); 553 DP_PRINT_STATS("\treplenish count: %llu", 554 stats.totat_tx_mon_replenish_cnt); 555 DP_PRINT_STATS("\treap count : %llu", stats.total_tx_mon_reap_cnt); 556 DP_PRINT_STATS("\tmonitor stuck : %u", stats.total_tx_mon_stuck); 557 DP_PRINT_STATS("\tStatus buffer"); 558 DP_PRINT_STATS("\t\treceived : %llu", stats.status_buf_recv); 559 DP_PRINT_STATS("\t\tfree : %llu", stats.status_buf_free); 560 DP_PRINT_STATS("\tPacket buffer"); 561 DP_PRINT_STATS("\t\treceived : %llu", stats.pkt_buf_recv); 562 DP_PRINT_STATS("\t\tfree : %llu", stats.pkt_buf_free); 563 DP_PRINT_STATS("\t\tprocessed : %llu", stats.pkt_buf_processed); 564 DP_PRINT_STATS("\t\tto stack : %llu", stats.pkt_buf_to_stack); 565 DP_PRINT_STATS("\tppdu info"); 566 DP_PRINT_STATS("\t\tthreshold : %llu", stats.ppdu_info_drop_th); 567 DP_PRINT_STATS("\t\tflush : %llu", stats.ppdu_info_drop_flush); 568 DP_PRINT_STATS("\t\ttruncated : %llu", stats.ppdu_info_drop_trunc); 569 DP_PRINT_STATS("\tDrop stats"); 570 DP_PRINT_STATS("\t\tppdu drop : %llu", stats.ppdu_drop_cnt); 571 DP_PRINT_STATS("\t\tmpdu drop : %llu", stats.mpdu_drop_cnt); 572 DP_PRINT_STATS("\t\ttlv drop : %llu", stats.tlv_drop_cnt); 573 } 574 575 /* 576 * dp_config_enh_tx_monitor_2_0()- API to enable/disable enhanced tx capture 577 * @pdev_handle: DP_PDEV handle 578 * @val: user provided value 579 * 580 * Return: QDF_STATUS 581 */ 582 QDF_STATUS 583 dp_config_enh_tx_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 584 { 585 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 586 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 587 struct dp_mon_pdev_be *mon_pdev_be = 588 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 589 struct dp_pdev_tx_monitor_be *tx_mon_be = 590 &mon_pdev_be->tx_monitor_be; 591 struct dp_soc *soc = pdev->soc; 592 uint16_t num_of_buffers; 593 QDF_STATUS status; 594 595 soc_cfg_ctx = soc->wlan_cfg_ctx; 596 switch (val) { 597 case TX_MON_BE_DISABLE: 598 { 599 tx_mon_be->mode = TX_MON_BE_DISABLE; 600 mon_pdev_be->tx_mon_mode = 0; 601 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 602 break; 603 } 604 case TX_MON_BE_FULL_CAPTURE: 605 { 606 num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 607 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 608 num_of_buffers); 609 if (status != QDF_STATUS_SUCCESS) { 610 dp_mon_err("Tx monitor buffer allocation failed"); 611 return status; 612 } 613 qdf_mem_zero(&tx_mon_be->stats, 614 sizeof(struct dp_tx_monitor_drop_stats)); 615 tx_mon_be->last_tsft = 0; 616 tx_mon_be->last_ppdu_timestamp = 0; 617 tx_mon_be->mode = TX_MON_BE_FULL_CAPTURE; 618 mon_pdev_be->tx_mon_mode = 1; 619 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 620 break; 621 } 622 case TX_MON_BE_PEER_FILTER: 623 { 624 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 625 DP_MON_RING_FILL_LEVEL_DEFAULT); 626 if (status != QDF_STATUS_SUCCESS) { 627 dp_mon_err("Tx monitor buffer allocation failed"); 628 return status; 629 } 630 tx_mon_be->mode = TX_MON_BE_PEER_FILTER; 631 mon_pdev_be->tx_mon_mode = 2; 632 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_256B; 633 break; 634 } 635 default: 636 { 637 return QDF_STATUS_E_INVAL; 638 } 639 } 640 641 dp_mon_info("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 642 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 643 mon_pdev_be->tx_mon_filter_length); 644 645 dp_mon_filter_setup_tx_mon_mode(pdev); 646 dp_tx_mon_filter_update(pdev); 647 648 return QDF_STATUS_SUCCESS; 649 } 650 651 /* 652 * dp_peer_set_tx_capture_enabled_2_0() - add tx monitor peer filter 653 * @pdev: Datapath PDEV handle 654 * @peer: Datapath PEER handle 655 * @is_tx_pkt_cap_enable: flag for tx capture enable/disable 656 * @peer_mac: peer mac address 657 * 658 * Return: status 659 */ 660 QDF_STATUS dp_peer_set_tx_capture_enabled_2_0(struct dp_pdev *pdev_handle, 661 struct dp_peer *peer_handle, 662 uint8_t is_tx_pkt_cap_enable, 663 uint8_t *peer_mac) 664 { 665 return QDF_STATUS_SUCCESS; 666 } 667 668 #ifdef QCA_SUPPORT_LITE_MONITOR 669 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 670 struct dp_mon_pdev_be *mon_pdev_be) 671 { 672 struct dp_lite_mon_config *config; 673 struct dp_vdev *lite_mon_vdev; 674 675 config = &mon_pdev_be->lite_mon_tx_config->tx_config; 676 lite_mon_vdev = config->lite_mon_vdev; 677 678 if (lite_mon_vdev) 679 tx_cap_info->osif_vdev = lite_mon_vdev->osif_vdev; 680 } 681 682 /** 683 * dp_lite_mon_filter_ppdu() - Filter frames at ppdu level 684 * @mpdu_count: mpdu count in the nbuf queue 685 * @level: Lite monitor filter level 686 * 687 * Return: QDF_STATUS 688 */ 689 static inline QDF_STATUS 690 dp_lite_mon_filter_ppdu(uint8_t mpdu_count, uint8_t level) 691 { 692 if (level == CDP_LITE_MON_LEVEL_PPDU && mpdu_count > 1) 693 return QDF_STATUS_E_CANCELED; 694 695 return QDF_STATUS_SUCCESS; 696 } 697 698 /** 699 * dp_lite_mon_filter_peer() - filter frames with peer 700 * @config: Lite monitor configuration 701 * @wh: Pointer to ieee80211_frame 702 * 703 * Return: QDF_STATUS 704 */ 705 static inline QDF_STATUS 706 dp_lite_mon_filter_peer(struct dp_lite_mon_tx_config *config, 707 struct ieee80211_frame_min_one *wh) 708 { 709 struct dp_lite_mon_peer *peer; 710 711 /* Return here if peer filtering is not required */ 712 if (!config->tx_config.peer_count) 713 return QDF_STATUS_SUCCESS; 714 715 TAILQ_FOREACH(peer, &config->tx_config.peer_list, peer_list_elem) { 716 if (!qdf_mem_cmp(&peer->peer_mac.raw[0], 717 &wh->i_addr1[0], QDF_MAC_ADDR_SIZE)) { 718 return QDF_STATUS_SUCCESS; 719 } 720 } 721 722 return QDF_STATUS_E_ABORTED; 723 } 724 725 /** 726 * dp_lite_mon_filter_subtype() - filter frames with subtype 727 * @config: Lite monitor configuration 728 * @wh: Pointer to ieee80211_frame 729 * 730 * Return: QDF_STATUS 731 */ 732 static inline QDF_STATUS 733 dp_lite_mon_filter_subtype(struct dp_lite_mon_tx_config *config, 734 struct ieee80211_frame_min_one *wh) 735 { 736 uint16_t mgmt_filter, ctrl_filter, data_filter, type, subtype; 737 uint8_t is_mcast = 0; 738 739 /* Return here if subtype filtering is not required */ 740 if (!config->subtype_filtering) 741 return QDF_STATUS_SUCCESS; 742 743 mgmt_filter = config->tx_config.mgmt_filter[DP_MON_FRM_FILTER_MODE_FP]; 744 ctrl_filter = config->tx_config.ctrl_filter[DP_MON_FRM_FILTER_MODE_FP]; 745 data_filter = config->tx_config.data_filter[DP_MON_FRM_FILTER_MODE_FP]; 746 747 type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK); 748 subtype = ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 749 IEEE80211_FC0_SUBTYPE_SHIFT); 750 751 switch (type) { 752 case IEEE80211_FC0_TYPE_MGT: 753 if (mgmt_filter >> subtype & 0x1) 754 return QDF_STATUS_SUCCESS; 755 else 756 return QDF_STATUS_E_ABORTED; 757 case IEEE80211_FC0_TYPE_CTL: 758 if (ctrl_filter >> subtype & 0x1) 759 return QDF_STATUS_SUCCESS; 760 else 761 return QDF_STATUS_E_ABORTED; 762 case IEEE80211_FC0_TYPE_DATA: 763 is_mcast = DP_FRAME_IS_MULTICAST(wh->i_addr1); 764 if ((is_mcast && (data_filter & FILTER_DATA_MCAST)) || 765 (!is_mcast && (data_filter & FILTER_DATA_UCAST))) 766 return QDF_STATUS_SUCCESS; 767 return QDF_STATUS_E_ABORTED; 768 default: 769 return QDF_STATUS_E_INVAL; 770 } 771 } 772 773 /** 774 * dp_lite_mon_filter_peer_subtype() - filter frames with subtype and peer 775 * @config: Lite monitor configuration 776 * @buf: Pointer to nbuf 777 * 778 * Return: QDF_STATUS 779 */ 780 static inline QDF_STATUS 781 dp_lite_mon_filter_peer_subtype(struct dp_lite_mon_tx_config *config, 782 qdf_nbuf_t buf) 783 { 784 struct ieee80211_frame_min_one *wh; 785 qdf_nbuf_t nbuf; 786 QDF_STATUS ret; 787 788 /* Return here if subtype and peer filtering is not required */ 789 if (!config->subtype_filtering && !config->tx_config.peer_count) 790 return QDF_STATUS_SUCCESS; 791 792 if (dp_tx_mon_nbuf_get_num_frag(buf)) { 793 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_get_frag_addr(buf, 0); 794 } else { 795 nbuf = qdf_nbuf_get_ext_list(buf); 796 if (nbuf) 797 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_data(nbuf); 798 else 799 return QDF_STATUS_E_INVAL; 800 } 801 802 ret = dp_lite_mon_filter_subtype(config, wh); 803 if (ret) 804 return ret; 805 806 ret = dp_lite_mon_filter_peer(config, wh); 807 if (ret) 808 return ret; 809 810 return QDF_STATUS_SUCCESS; 811 } 812 813 /** 814 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 815 * @pdev: Pointer to physical device 816 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 817 * @buf: qdf nbuf structure of buffer 818 * @mpdu_count: mpdu count in the nbuf queue 819 * 820 * Return: QDF_STATUS 821 */ 822 static inline QDF_STATUS 823 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 824 struct dp_tx_ppdu_info *tx_ppdu_info, 825 qdf_nbuf_t buf, int mpdu_count) 826 { 827 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 828 struct dp_mon_pdev_be *mon_pdev_be = 829 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 830 struct dp_lite_mon_tx_config *config = 831 mon_pdev_be->lite_mon_tx_config; 832 QDF_STATUS ret; 833 834 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) 835 return QDF_STATUS_SUCCESS; 836 837 /* PPDU level filtering */ 838 ret = dp_lite_mon_filter_ppdu(mpdu_count, config->tx_config.level); 839 if (ret) 840 return ret; 841 842 /* Subtype and peer filtering */ 843 ret = dp_lite_mon_filter_peer_subtype(config, buf); 844 if (ret) 845 return ret; 846 847 return QDF_STATUS_SUCCESS; 848 } 849 850 #else 851 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 852 struct dp_mon_pdev_be *mon_pdev_be) 853 { 854 } 855 856 /** 857 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 858 * @pdev: Pointer to physical device 859 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 860 * @buf: qdf nbuf structure of buffer 861 * @mpdu_count: mpdu count in the nbuf queue 862 * 863 * Return: QDF_STATUS 864 */ 865 static inline QDF_STATUS 866 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 867 struct dp_tx_ppdu_info *tx_ppdu_info, 868 qdf_nbuf_t buf, int mpdu_count) 869 { 870 return QDF_STATUS_SUCCESS; 871 } 872 #endif 873 874 /** 875 * dp_tx_mon_send_to_stack() - API to send to stack 876 * @pdev: pdev Handle 877 * @mpdu: pointer to mpdu 878 * @num_frag: number of frag in mpdu 879 * @ppdu_id: ppdu id of the mpdu 880 * 881 * Return: void 882 */ 883 static void 884 dp_tx_mon_send_to_stack(struct dp_pdev *pdev, qdf_nbuf_t mpdu, 885 uint32_t num_frag, uint32_t ppdu_id) 886 { 887 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 888 struct dp_mon_pdev_be *mon_pdev_be = 889 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 890 struct dp_pdev_tx_monitor_be *tx_mon_be = 891 &mon_pdev_be->tx_monitor_be; 892 struct cdp_tx_indication_info tx_capture_info = {0}; 893 894 tx_mon_be->stats.pkt_buf_to_stack += num_frag; 895 896 tx_capture_info.radiotap_done = 1; 897 tx_capture_info.mpdu_nbuf = mpdu; 898 tx_capture_info.mpdu_info.ppdu_id = ppdu_id; 899 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) { 900 dp_wdi_event_handler(WDI_EVENT_TX_PKT_CAPTURE, 901 pdev->soc, 902 &tx_capture_info, 903 HTT_INVALID_PEER, 904 WDI_NO_VAL, 905 pdev->pdev_id); 906 } else { 907 dp_fill_lite_mon_vdev(&tx_capture_info, mon_pdev_be); 908 dp_wdi_event_handler(WDI_EVENT_LITE_MON_TX, 909 pdev->soc, 910 &tx_capture_info, 911 HTT_INVALID_PEER, 912 WDI_NO_VAL, 913 pdev->pdev_id); 914 } 915 if (tx_capture_info.mpdu_nbuf) 916 qdf_nbuf_free(tx_capture_info.mpdu_nbuf); 917 } 918 919 /** 920 * dp_tx_mon_send_per_usr_mpdu() - API to send per usr mpdu to stack 921 * @pdev: pdev Handle 922 * @ppdu_info: pointer to dp_tx_ppdu_info 923 * @user_id: current user index 924 * 925 * Return: void 926 */ 927 static void 928 dp_tx_mon_send_per_usr_mpdu(struct dp_pdev *pdev, 929 struct dp_tx_ppdu_info *ppdu_info, 930 uint8_t user_idx) 931 { 932 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 933 qdf_nbuf_t buf = NULL; 934 uint8_t mpdu_count = 0; 935 936 usr_mpdu_q = &TXMON_PPDU_USR(ppdu_info, user_idx, mpdu_q); 937 938 while ((buf = qdf_nbuf_queue_remove(usr_mpdu_q)) != NULL) { 939 uint32_t num_frag = dp_tx_mon_nbuf_get_num_frag(buf); 940 941 ppdu_info->hal_txmon.rx_status.rx_user_status = 942 &ppdu_info->hal_txmon.rx_user_status[user_idx]; 943 944 if (dp_tx_lite_mon_filtering(pdev, ppdu_info, buf, 945 ++mpdu_count)) { 946 qdf_nbuf_free(buf); 947 continue; 948 } 949 950 qdf_nbuf_update_radiotap(&ppdu_info->hal_txmon.rx_status, 951 buf, qdf_nbuf_headroom(buf)); 952 953 dp_tx_mon_send_to_stack(pdev, buf, num_frag, 954 TXMON_PPDU(ppdu_info, ppdu_id)); 955 } 956 } 957 958 #define PHY_MEDIUM_MHZ 960 959 #define PHY_TIMESTAMP_WRAP (0xFFFFFFFF / PHY_MEDIUM_MHZ) 960 961 /** 962 * dp_populate_tsft_from_phy_timestamp() - API to get tsft from phy timestamp 963 * @pdev: pdev Handle 964 * @ppdu_info: ppdi_info Handle 965 * 966 * Return: QDF_STATUS 967 */ 968 static QDF_STATUS 969 dp_populate_tsft_from_phy_timestamp(struct dp_pdev *pdev, 970 struct dp_tx_ppdu_info *ppdu_info) 971 { 972 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 973 struct dp_mon_pdev_be *mon_pdev_be = 974 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 975 struct dp_pdev_tx_monitor_be *tx_mon_be = 976 &mon_pdev_be->tx_monitor_be; 977 uint64_t tsft = 0; 978 uint32_t ppdu_timestamp = 0; 979 980 tsft = TXMON_PPDU_COM(ppdu_info, tsft); 981 ppdu_timestamp = TXMON_PPDU_COM(ppdu_info, ppdu_timestamp); 982 983 if (tsft && ppdu_timestamp) { 984 /* update tsft and ppdu timestamp */ 985 tx_mon_be->last_tsft = tsft; 986 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 987 } else if (!tx_mon_be->last_ppdu_timestamp || !tx_mon_be->last_tsft) { 988 return QDF_STATUS_E_EMPTY; 989 } 990 991 if (!tsft && ppdu_timestamp) { 992 /* response window */ 993 uint32_t cur_usec = ppdu_timestamp / PHY_MEDIUM_MHZ; 994 uint32_t last_usec = (tx_mon_be->last_ppdu_timestamp / 995 PHY_MEDIUM_MHZ); 996 uint32_t diff = 0; 997 998 if (last_usec < cur_usec) { 999 diff = cur_usec - last_usec; 1000 tsft = tx_mon_be->last_tsft + diff; 1001 } else { 1002 diff = (PHY_TIMESTAMP_WRAP - last_usec) + cur_usec; 1003 tsft = tx_mon_be->last_tsft + diff; 1004 } 1005 TXMON_PPDU_COM(ppdu_info, tsft) = tsft; 1006 /* update tsft and ppdu timestamp */ 1007 tx_mon_be->last_tsft = tsft; 1008 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 1009 } 1010 1011 if (!TXMON_PPDU_COM(ppdu_info, tsft) && 1012 !TXMON_PPDU_COM(ppdu_info, ppdu_timestamp)) 1013 return QDF_STATUS_E_EMPTY; 1014 1015 return QDF_STATUS_SUCCESS; 1016 } 1017 1018 /** 1019 * dp_tx_mon_update_radiotap() - API to update radiotap information 1020 * @pdev: pdev Handle 1021 * @ppdu_info: pointer to dp_tx_ppdu_info 1022 * 1023 * Return: void 1024 */ 1025 static void 1026 dp_tx_mon_update_radiotap(struct dp_pdev *pdev, 1027 struct dp_tx_ppdu_info *ppdu_info) 1028 { 1029 uint32_t usr_idx = 0; 1030 uint32_t num_users = 0; 1031 1032 num_users = TXMON_PPDU_HAL(ppdu_info, num_users); 1033 1034 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_num) == 0)) 1035 TXMON_PPDU_COM(ppdu_info, chan_num) = 1036 pdev->operating_channel.num; 1037 1038 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_freq) == 0)) 1039 TXMON_PPDU_COM(ppdu_info, chan_freq) = 1040 pdev->operating_channel.freq; 1041 1042 if (QDF_STATUS_SUCCESS != 1043 dp_populate_tsft_from_phy_timestamp(pdev, ppdu_info)) 1044 return; 1045 1046 for (usr_idx = 0; usr_idx < num_users; usr_idx++) { 1047 qdf_nbuf_queue_t *mpdu_q = NULL; 1048 1049 /* set AMPDU flag if number mpdu is more than 1 */ 1050 mpdu_q = &TXMON_PPDU_USR(ppdu_info, usr_idx, mpdu_q); 1051 if (mpdu_q && (qdf_nbuf_queue_len(mpdu_q) > 1)) { 1052 TXMON_PPDU_COM(ppdu_info, 1053 rs_flags) |= IEEE80211_AMPDU_FLAG; 1054 TXMON_PPDU_USR(ppdu_info, usr_idx, is_ampdu) = 1; 1055 } 1056 1057 if (qdf_unlikely(!TXMON_PPDU_COM(ppdu_info, rate))) { 1058 uint32_t rate = 0; 1059 uint32_t rix = 0; 1060 uint16_t ratecode = 0; 1061 1062 rate = dp_getrateindex(TXMON_PPDU_COM(ppdu_info, sgi), 1063 TXMON_PPDU_USR(ppdu_info, 1064 usr_idx, mcs), 1065 TXMON_PPDU_COM(ppdu_info, nss), 1066 TXMON_PPDU_COM(ppdu_info, 1067 preamble_type), 1068 TXMON_PPDU_COM(ppdu_info, bw), 1069 0, 1070 &rix, &ratecode); 1071 1072 /* update rate */ 1073 TXMON_PPDU_COM(ppdu_info, rate) = rate; 1074 } 1075 1076 dp_tx_mon_send_per_usr_mpdu(pdev, ppdu_info, usr_idx); 1077 } 1078 } 1079 1080 /** 1081 * dp_tx_mon_ppdu_process - Deferred PPDU stats handler 1082 * @context: Opaque work context (PDEV) 1083 * 1084 * Return: none 1085 */ 1086 void dp_tx_mon_ppdu_process(void *context) 1087 { 1088 struct dp_pdev *pdev = (struct dp_pdev *)context; 1089 struct dp_mon_pdev *mon_pdev; 1090 struct dp_mon_pdev_be *mon_pdev_be; 1091 struct dp_tx_ppdu_info *defer_ppdu_info = NULL; 1092 struct dp_tx_ppdu_info *defer_ppdu_info_next = NULL; 1093 struct dp_pdev_tx_monitor_be *tx_mon_be; 1094 1095 /* sanity check */ 1096 if (qdf_unlikely(!pdev)) 1097 return; 1098 1099 mon_pdev = pdev->monitor_pdev; 1100 1101 if (qdf_unlikely(!mon_pdev)) 1102 return; 1103 1104 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1105 if (qdf_unlikely(!mon_pdev_be)) 1106 return; 1107 1108 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1109 if (qdf_unlikely(TX_MON_BE_DISABLE == tx_mon_be->mode && 1110 !dp_lite_mon_is_tx_enabled(mon_pdev))) 1111 return; 1112 1113 /* take lock here */ 1114 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1115 STAILQ_CONCAT(&tx_mon_be->defer_tx_ppdu_info_queue, 1116 &tx_mon_be->tx_ppdu_info_queue); 1117 tx_mon_be->defer_ppdu_info_list_depth += 1118 tx_mon_be->tx_ppdu_info_list_depth; 1119 tx_mon_be->tx_ppdu_info_list_depth = 0; 1120 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1121 1122 STAILQ_FOREACH_SAFE(defer_ppdu_info, 1123 &tx_mon_be->defer_tx_ppdu_info_queue, 1124 tx_ppdu_info_queue_elem, defer_ppdu_info_next) { 1125 /* remove dp_tx_ppdu_info from the list */ 1126 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1127 defer_ppdu_info, 1128 dp_tx_ppdu_info, 1129 tx_ppdu_info_queue_elem); 1130 tx_mon_be->defer_ppdu_info_list_depth--; 1131 1132 dp_tx_mon_update_radiotap(pdev, defer_ppdu_info); 1133 1134 /* free the ppdu_info */ 1135 dp_tx_mon_free_ppdu_info(defer_ppdu_info, tx_mon_be); 1136 defer_ppdu_info = NULL; 1137 } 1138 } 1139 1140 /** 1141 * dp_tx_ppdu_stats_attach_2_0 - Initialize Tx PPDU stats and enhanced capture 1142 * @pdev: DP PDEV 1143 * 1144 * Return: none 1145 */ 1146 void dp_tx_ppdu_stats_attach_2_0(struct dp_pdev *pdev) 1147 { 1148 struct dp_mon_pdev *mon_pdev; 1149 struct dp_mon_pdev_be *mon_pdev_be; 1150 struct dp_pdev_tx_monitor_be *tx_mon_be; 1151 1152 if (qdf_unlikely(!pdev)) 1153 return; 1154 1155 mon_pdev = pdev->monitor_pdev; 1156 1157 if (qdf_unlikely(!mon_pdev)) 1158 return; 1159 1160 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1161 if (qdf_unlikely(!mon_pdev_be)) 1162 return; 1163 1164 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1165 1166 STAILQ_INIT(&tx_mon_be->tx_ppdu_info_queue); 1167 tx_mon_be->tx_ppdu_info_list_depth = 0; 1168 1169 STAILQ_INIT(&tx_mon_be->defer_tx_ppdu_info_queue); 1170 tx_mon_be->defer_ppdu_info_list_depth = 0; 1171 1172 qdf_spinlock_create(&tx_mon_be->tx_mon_list_lock); 1173 /* Work queue setup for TX MONITOR post handling */ 1174 qdf_create_work(0, &tx_mon_be->post_ppdu_work, 1175 dp_tx_mon_ppdu_process, pdev); 1176 1177 tx_mon_be->post_ppdu_workqueue = 1178 qdf_alloc_unbound_workqueue("tx_mon_ppdu_work_queue"); 1179 } 1180 1181 /** 1182 * dp_tx_ppdu_stats_detach_be - Cleanup Tx PPDU stats and enhanced capture 1183 * @pdev: DP PDEV 1184 * 1185 * Return: none 1186 */ 1187 void dp_tx_ppdu_stats_detach_2_0(struct dp_pdev *pdev) 1188 { 1189 struct dp_mon_pdev *mon_pdev; 1190 struct dp_mon_pdev_be *mon_pdev_be; 1191 struct dp_pdev_tx_monitor_be *tx_mon_be; 1192 struct dp_tx_ppdu_info *tx_ppdu_info = NULL; 1193 struct dp_tx_ppdu_info *tx_ppdu_info_next = NULL; 1194 1195 if (qdf_unlikely(!pdev)) 1196 return; 1197 1198 mon_pdev = pdev->monitor_pdev; 1199 1200 if (qdf_unlikely(!mon_pdev)) 1201 return; 1202 1203 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1204 if (qdf_unlikely(!mon_pdev_be)) 1205 return; 1206 1207 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1208 /* TODO: disable tx_monitor, to avoid further packet from HW */ 1209 dp_monitor_config_enh_tx_capture(pdev, TX_MON_BE_DISABLE); 1210 1211 /* flush workqueue */ 1212 qdf_flush_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1213 qdf_destroy_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1214 1215 /* 1216 * TODO: iterate both tx_ppdu_info and defer_ppdu_info_list 1217 * free the tx_ppdu_info and decrement depth 1218 */ 1219 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1220 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1221 &tx_mon_be->tx_ppdu_info_queue, 1222 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1223 /* remove dp_tx_ppdu_info from the list */ 1224 STAILQ_REMOVE(&tx_mon_be->tx_ppdu_info_queue, tx_ppdu_info, 1225 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1226 /* decrement list length */ 1227 tx_mon_be->tx_ppdu_info_list_depth--; 1228 /* free tx_ppdu_info */ 1229 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1230 } 1231 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1232 1233 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1234 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1235 &tx_mon_be->defer_tx_ppdu_info_queue, 1236 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1237 /* remove dp_tx_ppdu_info from the list */ 1238 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1239 tx_ppdu_info, 1240 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1241 /* decrement list length */ 1242 tx_mon_be->defer_ppdu_info_list_depth--; 1243 /* free tx_ppdu_info */ 1244 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1245 } 1246 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1247 1248 qdf_spinlock_destroy(&tx_mon_be->tx_mon_list_lock); 1249 } 1250 #endif /* WLAN_TX_PKT_CAPTURE_ENH_BE */ 1251 1252 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE)) 1253 /* 1254 * dp_config_enh_tx_core_monitor_2_0()- API to validate core framework 1255 * @pdev_handle: DP_PDEV handle 1256 * @val: user provided value 1257 * 1258 * Return: QDF_STATUS 1259 */ 1260 QDF_STATUS 1261 dp_config_enh_tx_core_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 1262 { 1263 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1264 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1265 struct dp_mon_pdev_be *mon_pdev_be = 1266 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1267 struct dp_pdev_tx_monitor_be *tx_mon_be = 1268 &mon_pdev_be->tx_monitor_be; 1269 struct dp_soc *soc = pdev->soc; 1270 uint16_t num_of_buffers; 1271 QDF_STATUS status; 1272 1273 soc_cfg_ctx = soc->wlan_cfg_ctx; 1274 switch (val) { 1275 case TX_MON_BE_FRM_WRK_DISABLE: 1276 { 1277 tx_mon_be->mode = val; 1278 mon_pdev_be->tx_mon_mode = 0; 1279 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 1280 break; 1281 } 1282 case TX_MON_BE_FRM_WRK_FULL_CAPTURE: 1283 { 1284 num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 1285 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 1286 num_of_buffers); 1287 if (status != QDF_STATUS_SUCCESS) { 1288 dp_mon_err("Tx monitor buffer allocation failed"); 1289 return status; 1290 } 1291 tx_mon_be->mode = val; 1292 qdf_mem_zero(&tx_mon_be->stats, 1293 sizeof(struct dp_tx_monitor_drop_stats)); 1294 tx_mon_be->mode = val; 1295 mon_pdev_be->tx_mon_mode = 1; 1296 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 1297 break; 1298 } 1299 case TX_MON_BE_FRM_WRK_128B_CAPTURE: 1300 { 1301 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 1302 DP_MON_RING_FILL_LEVEL_DEFAULT); 1303 if (status != QDF_STATUS_SUCCESS) { 1304 dp_mon_err("Tx monitor buffer allocation failed"); 1305 return status; 1306 } 1307 tx_mon_be->mode = val; 1308 mon_pdev_be->tx_mon_mode = 1; 1309 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_128B; 1310 break; 1311 } 1312 default: 1313 { 1314 return QDF_STATUS_E_INVAL; 1315 } 1316 } 1317 1318 dp_mon_debug("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 1319 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 1320 mon_pdev_be->tx_mon_filter_length); 1321 1322 /* send HTT msg to configure TLV based on mode */ 1323 dp_mon_filter_setup_tx_mon_mode(pdev); 1324 dp_tx_mon_filter_update(pdev); 1325 1326 return QDF_STATUS_SUCCESS; 1327 } 1328 #endif 1329