1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hal_be_hw_headers.h" 19 #include "dp_types.h" 20 #include "hal_be_tx.h" 21 #include "hal_api.h" 22 #include "qdf_trace.h" 23 #include "hal_be_api_mon.h" 24 #include "dp_internal.h" 25 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 26 #include "dp_mon.h" 27 #include <dp_mon_2.0.h> 28 #include <dp_tx_mon_2.0.h> 29 #include <dp_be.h> 30 #include <hal_be_api_mon.h> 31 #include <dp_mon_filter_2.0.h> 32 #ifdef FEATURE_PERPKT_INFO 33 #include "dp_ratetable.h" 34 #endif 35 36 #define MAX_TX_MONITOR_STUCK 50 37 38 #ifdef TXMON_DEBUG 39 /* 40 * dp_tx_mon_debug_statu() - API to display tx monitor status 41 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 42 * @work_done - tx monitor work done 43 * 44 * Return: void 45 */ 46 static inline void 47 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 48 uint32_t work_done) 49 { 50 if (tx_mon_be->mode && !work_done) 51 tx_mon_be->stats.tx_mon_stuck++; 52 else if (tx_mon_be->mode && work_done) 53 tx_mon_be->stats.tx_mon_stuck = 0; 54 55 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 56 dp_mon_warn("Tx monitor block got stuck!!!!!"); 57 tx_mon_be->stats.tx_mon_stuck = 0; 58 tx_mon_be->stats.total_tx_mon_stuck++; 59 } 60 61 dp_mon_debug_rl("tx_ppdu_info[%u :D %u] STATUS[R %llu: F %llu] PKT_BUF[R %llu: F %llu : P %llu : S %llu]", 62 tx_mon_be->tx_ppdu_info_list_depth, 63 tx_mon_be->defer_ppdu_info_list_depth, 64 tx_mon_be->stats.status_buf_recv, 65 tx_mon_be->stats.status_buf_free, 66 tx_mon_be->stats.pkt_buf_recv, 67 tx_mon_be->stats.pkt_buf_free, 68 tx_mon_be->stats.pkt_buf_processed, 69 tx_mon_be->stats.pkt_buf_to_stack); 70 } 71 72 #else 73 /* 74 * dp_tx_mon_debug_statu() - API to display tx monitor status 75 * @tx_mon_be - pointer to dp_pdev_tx_monitor_be 76 * @work_done - tx monitor work done 77 * 78 * Return: void 79 */ 80 static inline void 81 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be, 82 uint32_t work_done) 83 { 84 if (tx_mon_be->mode && !work_done) 85 tx_mon_be->stats.tx_mon_stuck++; 86 else if (tx_mon_be->mode && work_done) 87 tx_mon_be->stats.tx_mon_stuck = 0; 88 89 if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) { 90 dp_mon_warn("Tx monitor block got stuck!!!!!"); 91 tx_mon_be->stats.tx_mon_stuck = 0; 92 tx_mon_be->stats.total_tx_mon_stuck++; 93 } 94 } 95 #endif 96 97 static inline uint32_t 98 dp_tx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 99 uint32_t mac_id, uint32_t quota) 100 { 101 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 102 void *tx_mon_dst_ring_desc; 103 hal_soc_handle_t hal_soc; 104 void *mon_dst_srng; 105 struct dp_mon_pdev *mon_pdev; 106 struct dp_mon_pdev_be *mon_pdev_be; 107 uint32_t work_done = 0; 108 struct dp_mon_soc *mon_soc = soc->monitor_soc; 109 struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 110 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 111 struct dp_mon_desc_pool *tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 112 struct dp_tx_mon_desc_list mon_desc_list; 113 uint32_t replenish_cnt = 0; 114 115 if (!pdev) { 116 dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 117 return work_done; 118 } 119 120 mon_pdev = pdev->monitor_pdev; 121 mon_dst_srng = mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng; 122 123 if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { 124 dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK", 125 soc, mon_dst_srng); 126 return work_done; 127 } 128 129 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 130 if (qdf_unlikely(!mon_pdev_be)) 131 return work_done; 132 133 tx_mon_be = &mon_pdev_be->tx_monitor_be; 134 hal_soc = soc->hal_soc; 135 136 qdf_assert((hal_soc && pdev)); 137 138 qdf_spin_lock_bh(&mon_pdev->mon_lock); 139 mon_desc_list.desc_list = NULL; 140 mon_desc_list.tail = NULL; 141 mon_desc_list.tx_mon_reap_cnt = 0; 142 143 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) { 144 dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK", 145 __func__, __LINE__, mon_dst_srng); 146 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 147 return work_done; 148 } 149 150 while (qdf_likely((tx_mon_dst_ring_desc = 151 (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng)) 152 && quota--)) { 153 struct hal_mon_desc hal_mon_tx_desc = {0}; 154 struct dp_mon_desc *mon_desc = NULL; 155 qdf_frag_t status_frag = NULL; 156 uint32_t end_offset = 0; 157 158 hal_be_get_mon_dest_status(soc->hal_soc, 159 tx_mon_dst_ring_desc, 160 &hal_mon_tx_desc); 161 162 if (hal_mon_tx_desc.empty_descriptor) { 163 /* update stats counter */ 164 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d DROP[PPDU:%d MPDU:%d TLV:%d] E_O_PPDU:%d", 165 hal_mon_tx_desc.ppdu_id, 166 hal_mon_tx_desc.initiator, 167 hal_mon_tx_desc.empty_descriptor, 168 hal_mon_tx_desc.ring_id, 169 hal_mon_tx_desc.looping_count, 170 hal_mon_tx_desc.ppdu_drop_count, 171 hal_mon_tx_desc.mpdu_drop_count, 172 hal_mon_tx_desc.tlv_drop_count, 173 hal_mon_tx_desc.end_of_ppdu_dropped); 174 175 tx_mon_be->stats.ppdu_drop_cnt += 176 hal_mon_tx_desc.ppdu_drop_count; 177 tx_mon_be->stats.mpdu_drop_cnt += 178 hal_mon_tx_desc.mpdu_drop_count; 179 tx_mon_be->stats.tlv_drop_cnt += 180 hal_mon_tx_desc.tlv_drop_count; 181 work_done++; 182 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 183 continue; 184 } 185 186 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 187 hal_mon_tx_desc.ppdu_id, 188 hal_mon_tx_desc.initiator, 189 hal_mon_tx_desc.empty_descriptor, 190 hal_mon_tx_desc.ring_id, 191 hal_mon_tx_desc.looping_count, 192 hal_mon_tx_desc.buf_addr, 193 hal_mon_tx_desc.end_offset, 194 hal_mon_tx_desc.end_reason); 195 196 mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_tx_desc.buf_addr); 197 qdf_assert_always(mon_desc); 198 199 if (!mon_desc->unmapped) { 200 qdf_mem_unmap_page(soc->osdev, mon_desc->paddr, 201 DP_MON_DATA_BUFFER_SIZE, 202 QDF_DMA_FROM_DEVICE); 203 mon_desc->unmapped = 1; 204 } 205 206 if (mon_desc->magic != DP_MON_DESC_MAGIC) { 207 dp_mon_err("Invalid monitor descriptor"); 208 qdf_assert_always(0); 209 } 210 211 end_offset = hal_mon_tx_desc.end_offset; 212 213 status_frag = (qdf_frag_t)(mon_desc->buf_addr); 214 mon_desc->buf_addr = NULL; 215 /* increment reap count */ 216 ++mon_desc_list.tx_mon_reap_cnt; 217 218 /* add the mon_desc to free list */ 219 dp_mon_add_to_free_desc_list(&mon_desc_list.desc_list, 220 &mon_desc_list.tail, mon_desc); 221 222 223 if (qdf_unlikely(!status_frag)) { 224 dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d", 225 hal_mon_tx_desc.ppdu_id, 226 hal_mon_tx_desc.initiator, 227 hal_mon_tx_desc.empty_descriptor, 228 hal_mon_tx_desc.ring_id, 229 hal_mon_tx_desc.looping_count, 230 hal_mon_tx_desc.buf_addr, 231 hal_mon_tx_desc.end_offset, 232 hal_mon_tx_desc.end_reason); 233 234 work_done++; 235 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 236 continue; 237 } 238 239 tx_mon_be->stats.status_buf_recv++; 240 241 if ((hal_mon_tx_desc.end_reason == HAL_MON_FLUSH_DETECTED) || 242 (hal_mon_tx_desc.end_reason == HAL_MON_PPDU_TRUNCATED)) { 243 tx_mon_be->be_ppdu_id = hal_mon_tx_desc.ppdu_id; 244 245 dp_tx_mon_update_end_reason(mon_pdev, 246 hal_mon_tx_desc.ppdu_id, 247 hal_mon_tx_desc.end_reason); 248 /* check and free packet buffer from status buffer */ 249 dp_tx_mon_status_free_packet_buf(pdev, status_frag, 250 end_offset, 251 &mon_desc_list); 252 253 tx_mon_be->stats.status_buf_free++; 254 qdf_frag_free(status_frag); 255 256 work_done++; 257 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 258 continue; 259 } 260 261 dp_tx_process_pktlog_be(soc, pdev, 262 status_frag, 263 end_offset); 264 265 dp_tx_mon_process_status_tlv(soc, pdev, 266 &hal_mon_tx_desc, 267 status_frag, 268 end_offset, 269 &mon_desc_list); 270 271 work_done++; 272 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 273 } 274 dp_srng_access_end(int_ctx, soc, mon_dst_srng); 275 276 if (mon_desc_list.tx_mon_reap_cnt) { 277 dp_mon_buffers_replenish(soc, &mon_soc_be->tx_mon_buf_ring, 278 tx_mon_desc_pool, 279 mon_desc_list.tx_mon_reap_cnt, 280 &mon_desc_list.desc_list, 281 &mon_desc_list.tail, 282 &replenish_cnt); 283 } 284 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 285 dp_mon_debug("mac_id: %d, work_done:%d tx_monitor_reap_cnt:%d", 286 mac_id, work_done, mon_desc_list.tx_mon_reap_cnt); 287 288 tx_mon_be->stats.total_tx_mon_reap_cnt += mon_desc_list.tx_mon_reap_cnt; 289 tx_mon_be->stats.totat_tx_mon_replenish_cnt += replenish_cnt; 290 dp_tx_mon_debug_status(tx_mon_be, work_done); 291 292 return work_done; 293 } 294 295 uint32_t 296 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx, 297 uint32_t mac_id, uint32_t quota) 298 { 299 uint32_t work_done; 300 301 work_done = dp_tx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota); 302 303 return work_done; 304 } 305 306 void 307 dp_tx_mon_print_ring_stat_2_0(struct dp_pdev *pdev) 308 { 309 struct dp_soc *soc = pdev->soc; 310 struct dp_mon_soc *mon_soc = soc->monitor_soc; 311 struct dp_mon_soc_be *mon_soc_be = 312 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 313 int lmac_id; 314 315 lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, pdev->pdev_id); 316 dp_print_ring_stat_from_hal(soc, &mon_soc_be->tx_mon_buf_ring, 317 TX_MONITOR_BUF); 318 dp_print_ring_stat_from_hal(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id], 319 TX_MONITOR_DST); 320 } 321 322 void 323 dp_tx_mon_buf_desc_pool_deinit(struct dp_soc *soc) 324 { 325 struct dp_mon_soc *mon_soc = soc->monitor_soc; 326 struct dp_mon_soc_be *mon_soc_be = 327 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 328 329 dp_mon_desc_pool_deinit(&mon_soc_be->tx_desc_mon); 330 } 331 332 QDF_STATUS 333 dp_tx_mon_buf_desc_pool_init(struct dp_soc *soc) 334 { 335 struct dp_mon_soc *mon_soc = soc->monitor_soc; 336 struct dp_mon_soc_be *mon_soc_be = 337 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 338 uint32_t num_entries; 339 340 num_entries = 341 wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc->wlan_cfg_ctx); 342 343 return dp_mon_desc_pool_init(&mon_soc_be->tx_desc_mon, num_entries); 344 } 345 346 void dp_tx_mon_buf_desc_pool_free(struct dp_soc *soc) 347 { 348 struct dp_mon_soc *mon_soc = soc->monitor_soc; 349 struct dp_mon_soc_be *mon_soc_be = 350 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 351 352 if (mon_soc_be) 353 dp_mon_desc_pool_free(&mon_soc_be->tx_desc_mon); 354 } 355 356 QDF_STATUS 357 dp_tx_mon_buf_desc_pool_alloc(struct dp_soc *soc) 358 { 359 struct dp_mon_desc_pool *tx_mon_desc_pool; 360 int entries; 361 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 362 struct dp_mon_soc *mon_soc = soc->monitor_soc; 363 struct dp_mon_soc_be *mon_soc_be = 364 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 365 366 soc_cfg_ctx = soc->wlan_cfg_ctx; 367 368 entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 369 370 371 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 372 373 qdf_print("%s:%d tx mon buf desc pool entries: %d", __func__, __LINE__, entries); 374 return dp_mon_desc_pool_alloc(entries, tx_mon_desc_pool); 375 } 376 377 void 378 dp_tx_mon_buffers_free(struct dp_soc *soc) 379 { 380 struct dp_mon_desc_pool *tx_mon_desc_pool; 381 struct dp_mon_soc *mon_soc = soc->monitor_soc; 382 struct dp_mon_soc_be *mon_soc_be = 383 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 384 385 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 386 387 dp_mon_pool_frag_unmap_and_free(soc, tx_mon_desc_pool); 388 } 389 390 QDF_STATUS 391 dp_tx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size) 392 { 393 struct dp_srng *mon_buf_ring; 394 struct dp_mon_desc_pool *tx_mon_desc_pool; 395 union dp_mon_desc_list_elem_t *desc_list = NULL; 396 union dp_mon_desc_list_elem_t *tail = NULL; 397 struct dp_mon_soc *mon_soc = soc->monitor_soc; 398 struct dp_mon_soc_be *mon_soc_be = 399 dp_get_be_mon_soc_from_dp_mon_soc(mon_soc); 400 401 mon_buf_ring = &mon_soc_be->tx_mon_buf_ring; 402 403 tx_mon_desc_pool = &mon_soc_be->tx_desc_mon; 404 405 return dp_mon_buffers_replenish(soc, mon_buf_ring, 406 tx_mon_desc_pool, 407 size, 408 &desc_list, &tail, NULL); 409 } 410 411 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE 412 413 /* 414 * dp_tx_mon_nbuf_get_num_frag() - get total number of fragments 415 * @buf: Network buf instance 416 * 417 * Return: number of fragments 418 */ 419 static inline 420 uint32_t dp_tx_mon_nbuf_get_num_frag(qdf_nbuf_t nbuf) 421 { 422 uint32_t num_frag = 0; 423 424 if (qdf_unlikely(!nbuf)) 425 return num_frag; 426 427 num_frag = qdf_nbuf_get_nr_frags_in_fraglist(nbuf); 428 429 return num_frag; 430 } 431 432 /* 433 * dp_tx_mon_free_usr_mpduq() - API to free user mpduq 434 * @tx_ppdu_info - pointer to tx_ppdu_info 435 * @usr_idx - user index 436 * @tx_mon_be - pointer to tx capture be 437 * 438 * Return: void 439 */ 440 void dp_tx_mon_free_usr_mpduq(struct dp_tx_ppdu_info *tx_ppdu_info, 441 uint8_t usr_idx, 442 struct dp_pdev_tx_monitor_be *tx_mon_be) 443 { 444 qdf_nbuf_queue_t *mpdu_q; 445 uint32_t num_frag = 0; 446 qdf_nbuf_t buf = NULL; 447 448 if (qdf_unlikely(!tx_ppdu_info)) 449 return; 450 451 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 452 453 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 454 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 455 qdf_nbuf_free(buf); 456 } 457 tx_mon_be->stats.pkt_buf_free += num_frag; 458 } 459 460 /* 461 * dp_tx_mon_free_ppdu_info() - API to free dp_tx_ppdu_info 462 * @tx_ppdu_info - pointer to tx_ppdu_info 463 * @tx_mon_be - pointer to tx capture be 464 * 465 * Return: void 466 */ 467 void dp_tx_mon_free_ppdu_info(struct dp_tx_ppdu_info *tx_ppdu_info, 468 struct dp_pdev_tx_monitor_be *tx_mon_be) 469 { 470 uint32_t user = 0; 471 472 for (; user < TXMON_PPDU_HAL(tx_ppdu_info, num_users); user++) { 473 qdf_nbuf_queue_t *mpdu_q; 474 uint32_t num_frag = 0; 475 qdf_nbuf_t buf = NULL; 476 477 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user, mpdu_q); 478 479 while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) { 480 num_frag += dp_tx_mon_nbuf_get_num_frag(buf); 481 qdf_nbuf_free(buf); 482 } 483 tx_mon_be->stats.pkt_buf_free += num_frag; 484 } 485 486 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 487 qdf_mem_free(tx_ppdu_info); 488 } 489 490 /* 491 * dp_tx_mon_get_ppdu_info() - API to allocate dp_tx_ppdu_info 492 * @pdev - pdev handle 493 * @type - type of ppdu_info data or protection 494 * @num_user - number user in a ppdu_info 495 * @ppdu_id - ppdu_id number 496 * 497 * Return: pointer to dp_tx_ppdu_info 498 */ 499 struct dp_tx_ppdu_info *dp_tx_mon_get_ppdu_info(struct dp_pdev *pdev, 500 enum tx_ppdu_info_type type, 501 uint8_t num_user, 502 uint32_t ppdu_id) 503 { 504 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 505 struct dp_mon_pdev_be *mon_pdev_be = 506 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 507 struct dp_pdev_tx_monitor_be *tx_mon_be = 508 &mon_pdev_be->tx_monitor_be; 509 struct dp_tx_ppdu_info *tx_ppdu_info; 510 size_t sz_ppdu_info = 0; 511 uint8_t i; 512 513 /* allocate new tx_ppdu_info */ 514 sz_ppdu_info = (sizeof(struct dp_tx_ppdu_info) + 515 (sizeof(struct mon_rx_user_status) * num_user)); 516 517 tx_ppdu_info = (struct dp_tx_ppdu_info *)qdf_mem_malloc(sz_ppdu_info); 518 if (!tx_ppdu_info) { 519 dp_mon_err("allocation of tx_ppdu_info type[%d] failed!!!", 520 type); 521 return NULL; 522 } 523 524 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0; 525 TXMON_PPDU_HAL(tx_ppdu_info, num_users) = num_user; 526 TXMON_PPDU_HAL(tx_ppdu_info, ppdu_id) = ppdu_id; 527 TXMON_PPDU(tx_ppdu_info, ppdu_id) = ppdu_id; 528 529 for (i = 0; i < num_user; i++) { 530 qdf_nbuf_queue_t *mpdu_q; 531 532 mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, i, mpdu_q); 533 qdf_nbuf_queue_init(mpdu_q); 534 } 535 536 /* assign tx_ppdu_info to monitor pdev for reference */ 537 if (type == TX_PROT_PPDU_INFO) { 538 qdf_mem_zero(&tx_mon_be->prot_status_info, sizeof(struct hal_tx_status_info)); 539 tx_mon_be->tx_prot_ppdu_info = tx_ppdu_info; 540 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 0; 541 } else { 542 qdf_mem_zero(&tx_mon_be->data_status_info, sizeof(struct hal_tx_status_info)); 543 tx_mon_be->tx_data_ppdu_info = tx_ppdu_info; 544 TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 1; 545 } 546 547 return tx_ppdu_info; 548 } 549 550 /* 551 * dp_print_pdev_tx_monitor_stats_2_0: print tx capture stats 552 * @pdev: DP PDEV handle 553 * 554 * return: void 555 */ 556 void dp_print_pdev_tx_monitor_stats_2_0(struct dp_pdev *pdev) 557 { 558 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 559 struct dp_mon_pdev_be *mon_pdev_be = 560 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 561 struct dp_pdev_tx_monitor_be *tx_mon_be = 562 &mon_pdev_be->tx_monitor_be; 563 struct dp_tx_monitor_drop_stats stats = {0}; 564 565 qdf_mem_copy(&stats, &tx_mon_be->stats, 566 sizeof(struct dp_tx_monitor_drop_stats)); 567 568 /* TX monitor stats needed for beryllium */ 569 DP_PRINT_STATS("\n\tTX Capture BE stats mode[%d]:", tx_mon_be->mode); 570 DP_PRINT_STATS("\tbuffer pending : %u", tx_mon_be->last_frag_q_idx); 571 DP_PRINT_STATS("\treplenish count: %llu", 572 stats.totat_tx_mon_replenish_cnt); 573 DP_PRINT_STATS("\treap count : %llu", stats.total_tx_mon_reap_cnt); 574 DP_PRINT_STATS("\tmonitor stuck : %u", stats.total_tx_mon_stuck); 575 DP_PRINT_STATS("\tStatus buffer"); 576 DP_PRINT_STATS("\t\treceived : %llu", stats.status_buf_recv); 577 DP_PRINT_STATS("\t\tfree : %llu", stats.status_buf_free); 578 DP_PRINT_STATS("\tPacket buffer"); 579 DP_PRINT_STATS("\t\treceived : %llu", stats.pkt_buf_recv); 580 DP_PRINT_STATS("\t\tfree : %llu", stats.pkt_buf_free); 581 DP_PRINT_STATS("\t\tprocessed : %llu", stats.pkt_buf_processed); 582 DP_PRINT_STATS("\t\tto stack : %llu", stats.pkt_buf_to_stack); 583 DP_PRINT_STATS("\tppdu info"); 584 DP_PRINT_STATS("\t\tthreshold : %llu", stats.ppdu_info_drop_th); 585 DP_PRINT_STATS("\t\tflush : %llu", stats.ppdu_info_drop_flush); 586 DP_PRINT_STATS("\t\ttruncated : %llu", stats.ppdu_info_drop_trunc); 587 DP_PRINT_STATS("\tDrop stats"); 588 DP_PRINT_STATS("\t\tppdu drop : %llu", stats.ppdu_drop_cnt); 589 DP_PRINT_STATS("\t\tmpdu drop : %llu", stats.mpdu_drop_cnt); 590 DP_PRINT_STATS("\t\ttlv drop : %llu", stats.tlv_drop_cnt); 591 } 592 593 /* 594 * dp_config_enh_tx_monitor_2_0()- API to enable/disable enhanced tx capture 595 * @pdev_handle: DP_PDEV handle 596 * @val: user provided value 597 * 598 * Return: QDF_STATUS 599 */ 600 QDF_STATUS 601 dp_config_enh_tx_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 602 { 603 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 604 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 605 struct dp_mon_pdev_be *mon_pdev_be = 606 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 607 struct dp_pdev_tx_monitor_be *tx_mon_be = 608 &mon_pdev_be->tx_monitor_be; 609 struct dp_soc *soc = pdev->soc; 610 uint16_t num_of_buffers; 611 QDF_STATUS status; 612 613 soc_cfg_ctx = soc->wlan_cfg_ctx; 614 switch (val) { 615 case TX_MON_BE_DISABLE: 616 { 617 tx_mon_be->mode = TX_MON_BE_DISABLE; 618 mon_pdev_be->tx_mon_mode = 0; 619 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 620 break; 621 } 622 case TX_MON_BE_FULL_CAPTURE: 623 { 624 num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 625 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 626 num_of_buffers); 627 if (status != QDF_STATUS_SUCCESS) { 628 dp_mon_err("Tx monitor buffer allocation failed"); 629 return status; 630 } 631 qdf_mem_zero(&tx_mon_be->stats, 632 sizeof(struct dp_tx_monitor_drop_stats)); 633 tx_mon_be->last_tsft = 0; 634 tx_mon_be->last_ppdu_timestamp = 0; 635 tx_mon_be->mode = TX_MON_BE_FULL_CAPTURE; 636 mon_pdev_be->tx_mon_mode = 1; 637 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 638 break; 639 } 640 case TX_MON_BE_PEER_FILTER: 641 { 642 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 643 DP_MON_RING_FILL_LEVEL_DEFAULT); 644 if (status != QDF_STATUS_SUCCESS) { 645 dp_mon_err("Tx monitor buffer allocation failed"); 646 return status; 647 } 648 tx_mon_be->mode = TX_MON_BE_PEER_FILTER; 649 mon_pdev_be->tx_mon_mode = 2; 650 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_256B; 651 break; 652 } 653 default: 654 { 655 return QDF_STATUS_E_INVAL; 656 } 657 } 658 659 dp_mon_info("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 660 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 661 mon_pdev_be->tx_mon_filter_length); 662 663 dp_mon_filter_setup_tx_mon_mode(pdev); 664 dp_tx_mon_filter_update(pdev); 665 666 return QDF_STATUS_SUCCESS; 667 } 668 669 /* 670 * dp_peer_set_tx_capture_enabled_2_0() - add tx monitor peer filter 671 * @pdev: Datapath PDEV handle 672 * @peer: Datapath PEER handle 673 * @is_tx_pkt_cap_enable: flag for tx capture enable/disable 674 * @peer_mac: peer mac address 675 * 676 * Return: status 677 */ 678 QDF_STATUS dp_peer_set_tx_capture_enabled_2_0(struct dp_pdev *pdev_handle, 679 struct dp_peer *peer_handle, 680 uint8_t is_tx_pkt_cap_enable, 681 uint8_t *peer_mac) 682 { 683 return QDF_STATUS_SUCCESS; 684 } 685 686 #ifdef QCA_SUPPORT_LITE_MONITOR 687 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 688 struct dp_mon_pdev_be *mon_pdev_be) 689 { 690 struct dp_lite_mon_config *config; 691 struct dp_vdev *lite_mon_vdev; 692 693 config = &mon_pdev_be->lite_mon_tx_config->tx_config; 694 lite_mon_vdev = config->lite_mon_vdev; 695 696 if (lite_mon_vdev) 697 tx_cap_info->osif_vdev = lite_mon_vdev->osif_vdev; 698 } 699 700 /** 701 * dp_lite_mon_filter_ppdu() - Filter frames at ppdu level 702 * @mpdu_count: mpdu count in the nbuf queue 703 * @level: Lite monitor filter level 704 * 705 * Return: QDF_STATUS 706 */ 707 static inline QDF_STATUS 708 dp_lite_mon_filter_ppdu(uint8_t mpdu_count, uint8_t level) 709 { 710 if (level == CDP_LITE_MON_LEVEL_PPDU && mpdu_count > 1) 711 return QDF_STATUS_E_CANCELED; 712 713 return QDF_STATUS_SUCCESS; 714 } 715 716 /** 717 * dp_lite_mon_filter_peer() - filter frames with peer 718 * @config: Lite monitor configuration 719 * @wh: Pointer to ieee80211_frame 720 * 721 * Return: QDF_STATUS 722 */ 723 static inline QDF_STATUS 724 dp_lite_mon_filter_peer(struct dp_lite_mon_tx_config *config, 725 struct ieee80211_frame_min_one *wh) 726 { 727 struct dp_lite_mon_peer *peer; 728 729 /* Return here if sw peer filtering is not required or if peer count 730 * is zero 731 */ 732 if (!config->sw_peer_filtering || !config->tx_config.peer_count) 733 return QDF_STATUS_SUCCESS; 734 735 TAILQ_FOREACH(peer, &config->tx_config.peer_list, peer_list_elem) { 736 if (!qdf_mem_cmp(&peer->peer_mac.raw[0], 737 &wh->i_addr1[0], QDF_MAC_ADDR_SIZE)) { 738 return QDF_STATUS_SUCCESS; 739 } 740 } 741 742 return QDF_STATUS_E_ABORTED; 743 } 744 745 /** 746 * dp_lite_mon_filter_subtype() - filter frames with subtype 747 * @config: Lite monitor configuration 748 * @wh: Pointer to ieee80211_frame 749 * 750 * Return: QDF_STATUS 751 */ 752 static inline QDF_STATUS 753 dp_lite_mon_filter_subtype(struct dp_lite_mon_tx_config *config, 754 struct ieee80211_frame_min_one *wh) 755 { 756 uint16_t mgmt_filter, ctrl_filter, data_filter, type, subtype; 757 uint8_t is_mcast = 0; 758 759 /* Return here if subtype filtering is not required */ 760 if (!config->subtype_filtering) 761 return QDF_STATUS_SUCCESS; 762 763 mgmt_filter = config->tx_config.mgmt_filter[DP_MON_FRM_FILTER_MODE_FP]; 764 ctrl_filter = config->tx_config.ctrl_filter[DP_MON_FRM_FILTER_MODE_FP]; 765 data_filter = config->tx_config.data_filter[DP_MON_FRM_FILTER_MODE_FP]; 766 767 type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK); 768 subtype = ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 769 IEEE80211_FC0_SUBTYPE_SHIFT); 770 771 switch (type) { 772 case IEEE80211_FC0_TYPE_MGT: 773 if (mgmt_filter >> subtype & 0x1) 774 return QDF_STATUS_SUCCESS; 775 else 776 return QDF_STATUS_E_ABORTED; 777 case IEEE80211_FC0_TYPE_CTL: 778 if (ctrl_filter >> subtype & 0x1) 779 return QDF_STATUS_SUCCESS; 780 else 781 return QDF_STATUS_E_ABORTED; 782 case IEEE80211_FC0_TYPE_DATA: 783 is_mcast = DP_FRAME_IS_MULTICAST(wh->i_addr1); 784 if ((is_mcast && (data_filter & FILTER_DATA_MCAST)) || 785 (!is_mcast && (data_filter & FILTER_DATA_UCAST))) 786 return QDF_STATUS_SUCCESS; 787 return QDF_STATUS_E_ABORTED; 788 default: 789 return QDF_STATUS_E_INVAL; 790 } 791 } 792 793 /** 794 * dp_lite_mon_filter_peer_subtype() - filter frames with subtype and peer 795 * @config: Lite monitor configuration 796 * @buf: Pointer to nbuf 797 * 798 * Return: QDF_STATUS 799 */ 800 static inline QDF_STATUS 801 dp_lite_mon_filter_peer_subtype(struct dp_lite_mon_tx_config *config, 802 qdf_nbuf_t buf) 803 { 804 struct ieee80211_frame_min_one *wh; 805 qdf_nbuf_t nbuf; 806 QDF_STATUS ret; 807 808 /* Return here if subtype and peer filtering is not required */ 809 if (!config->subtype_filtering && !config->sw_peer_filtering && 810 !config->tx_config.peer_count) 811 return QDF_STATUS_SUCCESS; 812 813 if (dp_tx_mon_nbuf_get_num_frag(buf)) { 814 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_get_frag_addr(buf, 0); 815 } else { 816 nbuf = qdf_nbuf_get_ext_list(buf); 817 if (nbuf) 818 wh = (struct ieee80211_frame_min_one *)qdf_nbuf_data(nbuf); 819 else 820 return QDF_STATUS_E_INVAL; 821 } 822 823 ret = dp_lite_mon_filter_subtype(config, wh); 824 if (ret) 825 return ret; 826 827 ret = dp_lite_mon_filter_peer(config, wh); 828 if (ret) 829 return ret; 830 831 return QDF_STATUS_SUCCESS; 832 } 833 834 /** 835 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 836 * @pdev: Pointer to physical device 837 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 838 * @buf: qdf nbuf structure of buffer 839 * @mpdu_count: mpdu count in the nbuf queue 840 * 841 * Return: QDF_STATUS 842 */ 843 static inline QDF_STATUS 844 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 845 struct dp_tx_ppdu_info *tx_ppdu_info, 846 qdf_nbuf_t buf, int mpdu_count) 847 { 848 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 849 struct dp_mon_pdev_be *mon_pdev_be = 850 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 851 struct dp_lite_mon_tx_config *config = 852 mon_pdev_be->lite_mon_tx_config; 853 QDF_STATUS ret; 854 855 if (!dp_lite_mon_is_tx_enabled(mon_pdev) && 856 !config->tx_config.peer_count) 857 return QDF_STATUS_SUCCESS; 858 859 /* PPDU level filtering */ 860 ret = dp_lite_mon_filter_ppdu(mpdu_count, config->tx_config.level); 861 if (ret) 862 return ret; 863 864 /* Subtype and peer filtering */ 865 ret = dp_lite_mon_filter_peer_subtype(config, buf); 866 if (ret) 867 return ret; 868 869 return QDF_STATUS_SUCCESS; 870 } 871 872 #else 873 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info, 874 struct dp_mon_pdev_be *mon_pdev_be) 875 { 876 } 877 878 /** 879 * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor 880 * @pdev: Pointer to physical device 881 * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure 882 * @buf: qdf nbuf structure of buffer 883 * @mpdu_count: mpdu count in the nbuf queue 884 * 885 * Return: QDF_STATUS 886 */ 887 static inline QDF_STATUS 888 dp_tx_lite_mon_filtering(struct dp_pdev *pdev, 889 struct dp_tx_ppdu_info *tx_ppdu_info, 890 qdf_nbuf_t buf, int mpdu_count) 891 { 892 return QDF_STATUS_SUCCESS; 893 } 894 #endif 895 896 /** 897 * dp_tx_mon_send_to_stack() - API to send to stack 898 * @pdev: pdev Handle 899 * @mpdu: pointer to mpdu 900 * @num_frag: number of frag in mpdu 901 * @ppdu_id: ppdu id of the mpdu 902 * 903 * Return: void 904 */ 905 static void 906 dp_tx_mon_send_to_stack(struct dp_pdev *pdev, qdf_nbuf_t mpdu, 907 uint32_t num_frag, uint32_t ppdu_id) 908 { 909 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 910 struct dp_mon_pdev_be *mon_pdev_be = 911 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 912 struct dp_pdev_tx_monitor_be *tx_mon_be = 913 &mon_pdev_be->tx_monitor_be; 914 struct cdp_tx_indication_info tx_capture_info = {0}; 915 916 tx_mon_be->stats.pkt_buf_to_stack += num_frag; 917 918 tx_capture_info.radiotap_done = 1; 919 tx_capture_info.mpdu_nbuf = mpdu; 920 tx_capture_info.mpdu_info.ppdu_id = ppdu_id; 921 if (!dp_lite_mon_is_tx_enabled(mon_pdev)) { 922 dp_wdi_event_handler(WDI_EVENT_TX_PKT_CAPTURE, 923 pdev->soc, 924 &tx_capture_info, 925 HTT_INVALID_PEER, 926 WDI_NO_VAL, 927 pdev->pdev_id); 928 } else { 929 dp_fill_lite_mon_vdev(&tx_capture_info, mon_pdev_be); 930 dp_wdi_event_handler(WDI_EVENT_LITE_MON_TX, 931 pdev->soc, 932 &tx_capture_info, 933 HTT_INVALID_PEER, 934 WDI_NO_VAL, 935 pdev->pdev_id); 936 } 937 if (tx_capture_info.mpdu_nbuf) 938 qdf_nbuf_free(tx_capture_info.mpdu_nbuf); 939 } 940 941 /** 942 * dp_tx_mon_send_per_usr_mpdu() - API to send per usr mpdu to stack 943 * @pdev: pdev Handle 944 * @ppdu_info: pointer to dp_tx_ppdu_info 945 * @user_idx: current user index 946 * 947 * Return: void 948 */ 949 static void 950 dp_tx_mon_send_per_usr_mpdu(struct dp_pdev *pdev, 951 struct dp_tx_ppdu_info *ppdu_info, 952 uint8_t user_idx) 953 { 954 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 955 qdf_nbuf_t buf = NULL; 956 uint8_t mpdu_count = 0; 957 958 usr_mpdu_q = &TXMON_PPDU_USR(ppdu_info, user_idx, mpdu_q); 959 960 while ((buf = qdf_nbuf_queue_remove(usr_mpdu_q)) != NULL) { 961 uint32_t num_frag = dp_tx_mon_nbuf_get_num_frag(buf); 962 963 ppdu_info->hal_txmon.rx_status.rx_user_status = 964 &ppdu_info->hal_txmon.rx_user_status[user_idx]; 965 966 if (dp_tx_lite_mon_filtering(pdev, ppdu_info, buf, 967 ++mpdu_count)) { 968 qdf_nbuf_free(buf); 969 continue; 970 } 971 972 qdf_nbuf_update_radiotap(&ppdu_info->hal_txmon.rx_status, 973 buf, qdf_nbuf_headroom(buf)); 974 975 dp_tx_mon_send_to_stack(pdev, buf, num_frag, 976 TXMON_PPDU(ppdu_info, ppdu_id)); 977 } 978 } 979 980 #define PHY_MEDIUM_MHZ 960 981 #define PHY_TIMESTAMP_WRAP (0xFFFFFFFF / PHY_MEDIUM_MHZ) 982 983 /** 984 * dp_populate_tsft_from_phy_timestamp() - API to get tsft from phy timestamp 985 * @pdev: pdev Handle 986 * @ppdu_info: ppdi_info Handle 987 * 988 * Return: QDF_STATUS 989 */ 990 static QDF_STATUS 991 dp_populate_tsft_from_phy_timestamp(struct dp_pdev *pdev, 992 struct dp_tx_ppdu_info *ppdu_info) 993 { 994 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 995 struct dp_mon_pdev_be *mon_pdev_be = 996 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 997 struct dp_pdev_tx_monitor_be *tx_mon_be = 998 &mon_pdev_be->tx_monitor_be; 999 uint64_t tsft = 0; 1000 uint32_t ppdu_timestamp = 0; 1001 1002 tsft = TXMON_PPDU_COM(ppdu_info, tsft); 1003 ppdu_timestamp = TXMON_PPDU_COM(ppdu_info, ppdu_timestamp); 1004 1005 if (tsft && ppdu_timestamp) { 1006 /* update tsft and ppdu timestamp */ 1007 tx_mon_be->last_tsft = tsft; 1008 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 1009 } else if (!tx_mon_be->last_ppdu_timestamp || !tx_mon_be->last_tsft) { 1010 return QDF_STATUS_E_EMPTY; 1011 } 1012 1013 if (!tsft && ppdu_timestamp) { 1014 /* response window */ 1015 uint32_t cur_usec = ppdu_timestamp / PHY_MEDIUM_MHZ; 1016 uint32_t last_usec = (tx_mon_be->last_ppdu_timestamp / 1017 PHY_MEDIUM_MHZ); 1018 uint32_t diff = 0; 1019 1020 if (last_usec < cur_usec) { 1021 diff = cur_usec - last_usec; 1022 tsft = tx_mon_be->last_tsft + diff; 1023 } else { 1024 diff = (PHY_TIMESTAMP_WRAP - last_usec) + cur_usec; 1025 tsft = tx_mon_be->last_tsft + diff; 1026 } 1027 TXMON_PPDU_COM(ppdu_info, tsft) = tsft; 1028 /* update tsft and ppdu timestamp */ 1029 tx_mon_be->last_tsft = tsft; 1030 tx_mon_be->last_ppdu_timestamp = ppdu_timestamp; 1031 } 1032 1033 if (!TXMON_PPDU_COM(ppdu_info, tsft) && 1034 !TXMON_PPDU_COM(ppdu_info, ppdu_timestamp)) 1035 return QDF_STATUS_E_EMPTY; 1036 1037 return QDF_STATUS_SUCCESS; 1038 } 1039 1040 /** 1041 * dp_tx_mon_update_channel_freq() - API to update channel frequency and number 1042 * @pdev: pdev Handle 1043 * @soc: soc Handle 1044 * @freq: Frequency 1045 * 1046 * Return: void 1047 */ 1048 static inline void 1049 dp_tx_mon_update_channel_freq(struct dp_pdev *pdev, struct dp_soc *soc, 1050 uint16_t freq) 1051 { 1052 if (soc && soc->cdp_soc.ol_ops->freq_to_channel) { 1053 uint8_t c_num; 1054 1055 c_num = soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, 1056 pdev->pdev_id, 1057 freq); 1058 pdev->operating_channel.num = c_num; 1059 } 1060 1061 if (soc && soc->cdp_soc.ol_ops->freq_to_band) { 1062 uint8_t band; 1063 1064 band = soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, 1065 pdev->pdev_id, 1066 freq); 1067 pdev->operating_channel.band = band; 1068 } 1069 } 1070 1071 /** 1072 * dp_tx_mon_update_radiotap() - API to update radiotap information 1073 * @pdev: pdev Handle 1074 * @ppdu_info: pointer to dp_tx_ppdu_info 1075 * 1076 * Return: void 1077 */ 1078 static void 1079 dp_tx_mon_update_radiotap(struct dp_pdev *pdev, 1080 struct dp_tx_ppdu_info *ppdu_info) 1081 { 1082 uint32_t usr_idx = 0; 1083 uint32_t num_users = 0; 1084 1085 num_users = TXMON_PPDU_HAL(ppdu_info, num_users); 1086 1087 if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_freq) == 0 && 1088 TXMON_PPDU_COM(ppdu_info, chan_num) == 0)) { 1089 TXMON_PPDU_COM(ppdu_info, chan_freq) = 1090 pdev->operating_channel.freq; 1091 TXMON_PPDU_COM(ppdu_info, chan_num) = 1092 pdev->operating_channel.num; 1093 } else if (TXMON_PPDU_COM(ppdu_info, chan_freq) != 0 && 1094 TXMON_PPDU_COM(ppdu_info, chan_num) == 0) { 1095 uint16_t freq = TXMON_PPDU_COM(ppdu_info, chan_freq); 1096 1097 if (qdf_unlikely(pdev->operating_channel.freq != freq)) { 1098 dp_tx_mon_update_channel_freq(pdev, pdev->soc, freq); 1099 pdev->operating_channel.freq = freq; 1100 } 1101 TXMON_PPDU_COM(ppdu_info, 1102 chan_num) = pdev->operating_channel.num; 1103 } 1104 1105 if (QDF_STATUS_SUCCESS != 1106 dp_populate_tsft_from_phy_timestamp(pdev, ppdu_info)) 1107 return; 1108 1109 for (usr_idx = 0; usr_idx < num_users; usr_idx++) { 1110 qdf_nbuf_queue_t *mpdu_q = NULL; 1111 1112 /* set AMPDU flag if number mpdu is more than 1 */ 1113 mpdu_q = &TXMON_PPDU_USR(ppdu_info, usr_idx, mpdu_q); 1114 if (mpdu_q && (qdf_nbuf_queue_len(mpdu_q) > 1)) { 1115 TXMON_PPDU_COM(ppdu_info, 1116 rs_flags) |= IEEE80211_AMPDU_FLAG; 1117 TXMON_PPDU_USR(ppdu_info, usr_idx, is_ampdu) = 1; 1118 } 1119 1120 if (qdf_unlikely(!TXMON_PPDU_COM(ppdu_info, rate))) { 1121 uint32_t rate = 0; 1122 uint32_t rix = 0; 1123 uint16_t ratecode = 0; 1124 1125 rate = dp_getrateindex(TXMON_PPDU_COM(ppdu_info, sgi), 1126 TXMON_PPDU_USR(ppdu_info, 1127 usr_idx, mcs), 1128 TXMON_PPDU_COM(ppdu_info, nss), 1129 TXMON_PPDU_COM(ppdu_info, 1130 preamble_type), 1131 TXMON_PPDU_COM(ppdu_info, bw), 1132 0, 1133 &rix, &ratecode); 1134 1135 /* update rate */ 1136 TXMON_PPDU_COM(ppdu_info, rate) = rate; 1137 } 1138 1139 dp_tx_mon_send_per_usr_mpdu(pdev, ppdu_info, usr_idx); 1140 } 1141 } 1142 1143 /** 1144 * dp_tx_mon_ppdu_process - Deferred PPDU stats handler 1145 * @context: Opaque work context (PDEV) 1146 * 1147 * Return: none 1148 */ 1149 static void dp_tx_mon_ppdu_process(void *context) 1150 { 1151 struct dp_pdev *pdev = (struct dp_pdev *)context; 1152 struct dp_mon_pdev *mon_pdev; 1153 struct dp_mon_pdev_be *mon_pdev_be; 1154 struct dp_tx_ppdu_info *defer_ppdu_info = NULL; 1155 struct dp_tx_ppdu_info *defer_ppdu_info_next = NULL; 1156 struct dp_pdev_tx_monitor_be *tx_mon_be; 1157 1158 /* sanity check */ 1159 if (qdf_unlikely(!pdev)) 1160 return; 1161 1162 mon_pdev = pdev->monitor_pdev; 1163 1164 if (qdf_unlikely(!mon_pdev)) 1165 return; 1166 1167 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1168 if (qdf_unlikely(!mon_pdev_be)) 1169 return; 1170 1171 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1172 if (qdf_unlikely(TX_MON_BE_DISABLE == tx_mon_be->mode && 1173 !dp_lite_mon_is_tx_enabled(mon_pdev))) 1174 return; 1175 1176 /* take lock here */ 1177 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1178 STAILQ_CONCAT(&tx_mon_be->defer_tx_ppdu_info_queue, 1179 &tx_mon_be->tx_ppdu_info_queue); 1180 tx_mon_be->defer_ppdu_info_list_depth += 1181 tx_mon_be->tx_ppdu_info_list_depth; 1182 tx_mon_be->tx_ppdu_info_list_depth = 0; 1183 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1184 1185 STAILQ_FOREACH_SAFE(defer_ppdu_info, 1186 &tx_mon_be->defer_tx_ppdu_info_queue, 1187 tx_ppdu_info_queue_elem, defer_ppdu_info_next) { 1188 /* remove dp_tx_ppdu_info from the list */ 1189 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1190 defer_ppdu_info, 1191 dp_tx_ppdu_info, 1192 tx_ppdu_info_queue_elem); 1193 tx_mon_be->defer_ppdu_info_list_depth--; 1194 1195 dp_tx_mon_update_radiotap(pdev, defer_ppdu_info); 1196 1197 /* free the ppdu_info */ 1198 dp_tx_mon_free_ppdu_info(defer_ppdu_info, tx_mon_be); 1199 defer_ppdu_info = NULL; 1200 } 1201 } 1202 1203 void dp_tx_ppdu_stats_attach_2_0(struct dp_pdev *pdev) 1204 { 1205 struct dp_mon_pdev *mon_pdev; 1206 struct dp_mon_pdev_be *mon_pdev_be; 1207 struct dp_pdev_tx_monitor_be *tx_mon_be; 1208 1209 if (qdf_unlikely(!pdev)) 1210 return; 1211 1212 mon_pdev = pdev->monitor_pdev; 1213 1214 if (qdf_unlikely(!mon_pdev)) 1215 return; 1216 1217 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1218 if (qdf_unlikely(!mon_pdev_be)) 1219 return; 1220 1221 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1222 1223 STAILQ_INIT(&tx_mon_be->tx_ppdu_info_queue); 1224 tx_mon_be->tx_ppdu_info_list_depth = 0; 1225 1226 STAILQ_INIT(&tx_mon_be->defer_tx_ppdu_info_queue); 1227 tx_mon_be->defer_ppdu_info_list_depth = 0; 1228 1229 qdf_spinlock_create(&tx_mon_be->tx_mon_list_lock); 1230 /* Work queue setup for TX MONITOR post handling */ 1231 qdf_create_work(0, &tx_mon_be->post_ppdu_work, 1232 dp_tx_mon_ppdu_process, pdev); 1233 1234 tx_mon_be->post_ppdu_workqueue = 1235 qdf_alloc_unbound_workqueue("tx_mon_ppdu_work_queue"); 1236 } 1237 1238 void dp_tx_ppdu_stats_detach_2_0(struct dp_pdev *pdev) 1239 { 1240 struct dp_mon_pdev *mon_pdev; 1241 struct dp_mon_pdev_be *mon_pdev_be; 1242 struct dp_pdev_tx_monitor_be *tx_mon_be; 1243 struct dp_tx_ppdu_info *tx_ppdu_info = NULL; 1244 struct dp_tx_ppdu_info *tx_ppdu_info_next = NULL; 1245 1246 if (qdf_unlikely(!pdev)) 1247 return; 1248 1249 mon_pdev = pdev->monitor_pdev; 1250 1251 if (qdf_unlikely(!mon_pdev)) 1252 return; 1253 1254 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1255 if (qdf_unlikely(!mon_pdev_be)) 1256 return; 1257 1258 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1259 /* TODO: disable tx_monitor, to avoid further packet from HW */ 1260 dp_monitor_config_enh_tx_capture(pdev, TX_MON_BE_DISABLE); 1261 1262 /* flush workqueue */ 1263 qdf_flush_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1264 qdf_destroy_workqueue(0, tx_mon_be->post_ppdu_workqueue); 1265 1266 /* 1267 * TODO: iterate both tx_ppdu_info and defer_ppdu_info_list 1268 * free the tx_ppdu_info and decrement depth 1269 */ 1270 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1271 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1272 &tx_mon_be->tx_ppdu_info_queue, 1273 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1274 /* remove dp_tx_ppdu_info from the list */ 1275 STAILQ_REMOVE(&tx_mon_be->tx_ppdu_info_queue, tx_ppdu_info, 1276 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1277 /* decrement list length */ 1278 tx_mon_be->tx_ppdu_info_list_depth--; 1279 /* free tx_ppdu_info */ 1280 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1281 } 1282 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1283 1284 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1285 STAILQ_FOREACH_SAFE(tx_ppdu_info, 1286 &tx_mon_be->defer_tx_ppdu_info_queue, 1287 tx_ppdu_info_queue_elem, tx_ppdu_info_next) { 1288 /* remove dp_tx_ppdu_info from the list */ 1289 STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue, 1290 tx_ppdu_info, 1291 dp_tx_ppdu_info, tx_ppdu_info_queue_elem); 1292 /* decrement list length */ 1293 tx_mon_be->defer_ppdu_info_list_depth--; 1294 /* free tx_ppdu_info */ 1295 dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be); 1296 } 1297 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1298 1299 qdf_spinlock_destroy(&tx_mon_be->tx_mon_list_lock); 1300 } 1301 #endif /* WLAN_TX_PKT_CAPTURE_ENH_BE */ 1302 1303 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE)) 1304 /* 1305 * dp_config_enh_tx_core_monitor_2_0()- API to validate core framework 1306 * @pdev_handle: DP_PDEV handle 1307 * @val: user provided value 1308 * 1309 * Return: QDF_STATUS 1310 */ 1311 QDF_STATUS 1312 dp_config_enh_tx_core_monitor_2_0(struct dp_pdev *pdev, uint8_t val) 1313 { 1314 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1315 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1316 struct dp_mon_pdev_be *mon_pdev_be = 1317 dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1318 struct dp_pdev_tx_monitor_be *tx_mon_be = 1319 &mon_pdev_be->tx_monitor_be; 1320 struct dp_soc *soc = pdev->soc; 1321 uint16_t num_of_buffers; 1322 QDF_STATUS status; 1323 1324 soc_cfg_ctx = soc->wlan_cfg_ctx; 1325 switch (val) { 1326 case TX_MON_BE_FRM_WRK_DISABLE: 1327 { 1328 tx_mon_be->mode = val; 1329 mon_pdev_be->tx_mon_mode = 0; 1330 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B; 1331 break; 1332 } 1333 case TX_MON_BE_FRM_WRK_FULL_CAPTURE: 1334 { 1335 num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 1336 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 1337 num_of_buffers); 1338 if (status != QDF_STATUS_SUCCESS) { 1339 dp_mon_err("Tx monitor buffer allocation failed"); 1340 return status; 1341 } 1342 tx_mon_be->mode = val; 1343 qdf_mem_zero(&tx_mon_be->stats, 1344 sizeof(struct dp_tx_monitor_drop_stats)); 1345 tx_mon_be->mode = val; 1346 mon_pdev_be->tx_mon_mode = 1; 1347 mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH; 1348 break; 1349 } 1350 case TX_MON_BE_FRM_WRK_128B_CAPTURE: 1351 { 1352 status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, 1353 DP_MON_RING_FILL_LEVEL_DEFAULT); 1354 if (status != QDF_STATUS_SUCCESS) { 1355 dp_mon_err("Tx monitor buffer allocation failed"); 1356 return status; 1357 } 1358 tx_mon_be->mode = val; 1359 mon_pdev_be->tx_mon_mode = 1; 1360 mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_128B; 1361 break; 1362 } 1363 default: 1364 { 1365 return QDF_STATUS_E_INVAL; 1366 } 1367 } 1368 1369 dp_mon_debug("Tx monitor mode:%d mon_mode_flag:%d config_length:%d", 1370 tx_mon_be->mode, mon_pdev_be->tx_mon_mode, 1371 mon_pdev_be->tx_mon_filter_length); 1372 1373 /* send HTT msg to configure TLV based on mode */ 1374 dp_mon_filter_setup_tx_mon_mode(pdev); 1375 dp_tx_mon_filter_update(pdev); 1376 1377 return QDF_STATUS_SUCCESS; 1378 } 1379 #endif 1380