1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "dp_types.h" 18 #include "qdf_nbuf.h" 19 #include "dp_internal.h" 20 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 21 #include <dp_be.h> 22 #include <qdf_nbuf_frag.h> 23 #include <hal_be_api_mon.h> 24 #include <dp_mon.h> 25 #include <dp_tx_mon_2.0.h> 26 #include <dp_mon_2.0.h> 27 #include <dp_lite_mon.h> 28 29 #define MAX_PPDU_INFO_LIST_DEPTH 64 30 31 void 32 dp_tx_mon_status_free_packet_buf(struct dp_pdev *pdev, 33 qdf_frag_t status_frag, uint32_t end_offset, 34 struct dp_tx_mon_desc_list *mon_desc_list_ref) 35 { 36 struct dp_mon_pdev *mon_pdev; 37 struct dp_mon_pdev_be *mon_pdev_be; 38 struct dp_pdev_tx_monitor_be *tx_mon_be; 39 struct hal_mon_packet_info packet_info = {0}; 40 uint8_t *tx_tlv; 41 uint8_t *mon_buf_tx_tlv; 42 uint8_t *tx_tlv_start; 43 44 if (qdf_unlikely(!pdev)) 45 return; 46 47 mon_pdev = pdev->monitor_pdev; 48 if (qdf_unlikely(!mon_pdev)) 49 return; 50 51 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 52 if (qdf_unlikely(!mon_pdev_be)) 53 return; 54 55 tx_mon_be = &mon_pdev_be->tx_monitor_be; 56 tx_tlv = status_frag; 57 tx_tlv_start = tx_tlv; 58 /* 59 * parse each status buffer and find packet buffer in it 60 */ 61 do { 62 if (hal_txmon_is_mon_buf_addr_tlv(pdev->soc->hal_soc, tx_tlv)) { 63 struct dp_mon_desc *mon_desc = NULL; 64 qdf_frag_t packet_buffer = NULL; 65 66 mon_buf_tx_tlv = ((uint8_t *)tx_tlv + 67 HAL_RX_TLV64_HDR_SIZE); 68 hal_txmon_populate_packet_info(pdev->soc->hal_soc, 69 mon_buf_tx_tlv, 70 &packet_info); 71 72 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info.sw_cookie; 73 74 qdf_assert_always(mon_desc); 75 76 if (mon_desc->magic != DP_MON_DESC_MAGIC) 77 qdf_assert_always(0); 78 79 if (!mon_desc->unmapped) { 80 qdf_mem_unmap_page(pdev->soc->osdev, 81 (qdf_dma_addr_t)mon_desc->paddr, 82 DP_MON_DATA_BUFFER_SIZE, 83 QDF_DMA_FROM_DEVICE); 84 mon_desc->unmapped = 1; 85 } 86 87 packet_buffer = (qdf_frag_t)(mon_desc->buf_addr); 88 mon_desc->buf_addr = NULL; 89 90 qdf_assert_always(packet_buffer); 91 /* increment reap count */ 92 mon_desc_list_ref->tx_mon_reap_cnt++; 93 94 /* add the mon_desc to free list */ 95 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 96 &mon_desc_list_ref->tail, 97 mon_desc); 98 99 tx_mon_be->stats.pkt_buf_recv++; 100 tx_mon_be->stats.pkt_buf_free++; 101 102 /* free buffer, mapped to descriptor */ 103 qdf_frag_free(packet_buffer); 104 } 105 106 /* need api definition for hal_tx_status_get_next_tlv */ 107 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 108 } while ((tx_tlv - tx_tlv_start) < end_offset); 109 } 110 111 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(QCA_MONITOR_2_0_SUPPORT) 112 /** 113 * dp_tx_mon_status_queue_free() - API to free status buffer 114 * @pdev: pdev Handle 115 * @tx_mon_be: pointer to tx_monitor_be 116 * @mon_desc_list_ref: tx monitor descriptor list reference 117 * 118 * Return: void 119 */ 120 static void 121 dp_tx_mon_status_queue_free(struct dp_pdev *pdev, 122 struct dp_pdev_tx_monitor_be *tx_mon_be, 123 struct dp_tx_mon_desc_list *mon_desc_list_ref) 124 { 125 uint8_t last_frag_q_idx = tx_mon_be->last_frag_q_idx; 126 qdf_frag_t status_frag = NULL; 127 uint8_t i = tx_mon_be->cur_frag_q_idx; 128 uint32_t end_offset = 0; 129 130 for (; i < last_frag_q_idx; i++) { 131 status_frag = tx_mon_be->frag_q_vec[i].frag_buf; 132 133 if (qdf_unlikely(!status_frag)) 134 continue; 135 136 end_offset = tx_mon_be->frag_q_vec[i].end_offset; 137 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 138 mon_desc_list_ref); 139 tx_mon_be->stats.status_buf_free++; 140 qdf_frag_free(status_frag); 141 tx_mon_be->frag_q_vec[i].frag_buf = NULL; 142 tx_mon_be->frag_q_vec[i].end_offset = 0; 143 } 144 tx_mon_be->last_frag_q_idx = 0; 145 tx_mon_be->cur_frag_q_idx = 0; 146 } 147 148 /** 149 * dp_tx_mon_enqueue_mpdu_nbuf() - API to enqueue nbuf from per user mpdu queue 150 * @pdev: pdev Handle 151 * @tx_ppdu_info: pointer to tx ppdu info structure 152 * @user_id: user index 153 * @mpdu_nbuf: nbuf to be enqueue 154 * 155 * Return: void 156 */ 157 static void 158 dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev *pdev, 159 struct dp_tx_ppdu_info *tx_ppdu_info, 160 uint8_t user_id, qdf_nbuf_t mpdu_nbuf) 161 { 162 qdf_nbuf_t radiotap = NULL; 163 /* enqueue mpdu_nbuf to the per user mpdu_q */ 164 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 165 166 if (!TXMON_PPDU_HAL(tx_ppdu_info, rx_user_status) || 167 !TXMON_PPDU_HAL(tx_ppdu_info, num_users)) 168 QDF_BUG(0); 169 170 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user_id, mpdu_q); 171 172 radiotap = qdf_nbuf_alloc(pdev->soc->osdev, MAX_MONITOR_HEADER, 173 MAX_MONITOR_HEADER, 174 4, FALSE); 175 if (qdf_unlikely(!radiotap)) { 176 qdf_err("Unable to allocate radiotap buffer\n"); 177 qdf_nbuf_free(mpdu_nbuf); 178 return; 179 } 180 181 /* append ext list */ 182 qdf_nbuf_append_ext_list(radiotap, mpdu_nbuf, qdf_nbuf_len(mpdu_nbuf)); 183 qdf_nbuf_queue_add(usr_mpdu_q, radiotap); 184 } 185 186 /* 187 * TX MONITOR 188 * 189 * frame format 190 * ------------------------------------------------------------------------- 191 * FUNC | ToDS | FromDS | ADDRESS 1 | ADDRESS 2 | ADDRESS 3 | ADDRESS 4 | 192 * ------------------------------------------------------------------------ 193 * IBSS | 0 | 0 | DA | SA | BSSID | NOT USED | 194 * TO AP | 1 | 0 | BSSID | SA | DA | NOT USED | 195 * From AP| 0 | 1 | DA | BSSID | SA | NOT USED | 196 * WDS | 1 | 1 | RA | TA | DA | SA | 197 * ------------------------------------------------------------------------ 198 * 199 * HOST GENERATED FRAME: 200 * ===================== 201 * 1. RTS 202 * 2. CTS 203 * 3. ACK 204 * 4. BA 205 * 5. Multi STA BA 206 * 207 * control frame 208 * ------------------------------------------------------------ 209 * | protocol 2b | Type 2b | subtype 4b | ToDS 1b | FromDS 1b | 210 * | Morefrag 1b | Retry 1b | pwr_mgmt 1b | More data 1b | 211 * | protected frm 1b | order 1b | 212 * ----------------------------------------------------------- 213 * control frame originated from wireless station so ToDS = FromDS = 0, 214 * 215 * RTS 216 * --------------------------------------------------------------------------- 217 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | Transmit address 6 | FCS | 218 * --------------------------------------------------------------------------- 219 * subtype in FC is RTS - 1101 220 * type in FC is control frame - 10 221 * 222 * CTS 223 * -------------------------------------------------------- 224 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 225 * -------------------------------------------------------- 226 * subtype in FC is CTS - 0011 227 * type in FC is control frame - 10 228 * 229 * ACK 230 * -------------------------------------------------------- 231 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 232 * -------------------------------------------------------- 233 * subtype in FC is ACK - 1011 234 * type in FC is control frame - 10 235 * 236 * Block ACK 237 * -------------------------------------------------------------------------- 238 * | FC 2 | Dur 2 | RA 6 | TA 6 | BA CTRL 2 | BA Information variable | FCS | 239 * -------------------------------------------------------------------------- 240 * 241 * Block Ack control 242 * --------------------------------------------------------------- 243 * | BA ACK POLICY B0 | BA TYPE B1-B4 | Rsv B5-B11 | TID B12-B15 | 244 * --------------------------------------------------------------- 245 * 246 * BA ack policy 247 * 0 - Normal Ack 248 * 1 - No Ack 249 * 250 * Block Ack Type 251 * 0 - Reserved 252 * 1 - extended compressed 253 * 2 - compressed 254 * 3 - Multi TID 255 * 4-5 - Reserved 256 * 6 - GCR 257 * 7-9 - Reserved 258 * 10 - GLK-GCR 259 * 11 - Multi-STA 260 * 12-15 - Reserved 261 * 262 * Block Ack information 263 * ---------------------------------------------------------- 264 * | Block ack start seq ctrl 2 | Block ack bitmap variable | 265 * ---------------------------------------------------------- 266 * 267 * Multi STA Block Ack Information 268 * ----------------------------------------------------------------- 269 * | Per STA TID info 2 | BA start seq ctrl 2 | BA bitmap variable | 270 * ----------------------------------------------------------------- 271 * 272 * Per STA TID info 273 * ------------------------------------ 274 * | AID11 11b | Ack Type 1b | TID 4b | 275 * ------------------------------------ 276 * AID11 - 2045 means unassociated STA, then ACK Type and TID 0, 15 277 * 278 * Mgmt/PS-POLL frame ack 279 * Ack type - 1 and TID - 15, BA_seq_ctrl & BA_bitmap - not present 280 * 281 * All ack context - with no bitmap (all AMPDU success) 282 * Ack type - 1 and TID - 14, BA_seq_ctrl & BA_bitmap - not present 283 * 284 * Block ack context 285 * Ack type - 0 and TID - 0~7 BA_seq_ctrl & BA_bitmap - present 286 * 287 * Ack context 288 * Ack type - 1 and TID - 0~7 BA_seq_ctrl & BA_bitmap - not present 289 * 290 * 291 */ 292 293 /** 294 * dp_tx_mon_generate_cts2self_frm() - API to generate cts2self frame 295 * @pdev: pdev Handle 296 * @tx_ppdu_info: pointer to tx ppdu info structure 297 * @window_flag: frame generated window 298 * 299 * Return: void 300 */ 301 static void 302 dp_tx_mon_generate_cts2self_frm(struct dp_pdev *pdev, 303 struct dp_tx_ppdu_info *tx_ppdu_info, 304 uint8_t window_flag) 305 { 306 /* allocate and populate CTS/ CTS2SELF frame */ 307 /* enqueue 802.11 payload to per user mpdu_q */ 308 struct dp_mon_pdev *mon_pdev; 309 struct dp_mon_pdev_be *mon_pdev_be; 310 struct dp_pdev_tx_monitor_be *tx_mon_be; 311 struct hal_tx_status_info *tx_status_info; 312 uint16_t duration_le = 0; 313 struct ieee80211_frame_min_one *wh_min = NULL; 314 qdf_nbuf_t mpdu_nbuf = NULL; 315 uint8_t frm_ctl; 316 317 /* sanity check */ 318 if (qdf_unlikely(!pdev)) 319 return; 320 321 mon_pdev = pdev->monitor_pdev; 322 if (qdf_unlikely(!mon_pdev)) 323 return; 324 325 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 326 if (qdf_unlikely(!mon_pdev_be)) 327 return; 328 329 tx_mon_be = &mon_pdev_be->tx_monitor_be; 330 331 if (window_flag == INITIATOR_WINDOW) 332 tx_status_info = &tx_mon_be->prot_status_info; 333 else 334 tx_status_info = &tx_mon_be->data_status_info; 335 336 /* 337 * for radiotap we allocate new skb, 338 * so we don't need reserver skb header 339 */ 340 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 341 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 342 if (!mpdu_nbuf) 343 return; 344 345 wh_min = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 346 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 347 348 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 349 IEEE80211_FC0_SUBTYPE_CTS); 350 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 351 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 352 wh_min->i_fc[1] = 0; 353 wh_min->i_fc[0] = frm_ctl; 354 355 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 356 wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8; 357 wh_min->i_dur[0] = (duration_le & 0xFF); 358 359 if (window_flag == INITIATOR_WINDOW) { 360 qdf_mem_copy(wh_min->i_addr1, 361 TXMON_STATUS_INFO(tx_status_info, addr1), 362 QDF_MAC_ADDR_SIZE); 363 } else { 364 qdf_mem_copy(wh_min->i_addr1, 365 TXMON_STATUS_INFO(tx_status_info, addr2), 366 QDF_MAC_ADDR_SIZE); 367 } 368 369 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 370 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 371 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 372 } 373 374 /** 375 * dp_tx_mon_generate_rts_frm() - API to generate rts frame 376 * @pdev: pdev Handle 377 * @tx_ppdu_info: pointer to tx ppdu info structure 378 * @window_flag: frame generated window 379 * 380 * Return: void 381 */ 382 static void 383 dp_tx_mon_generate_rts_frm(struct dp_pdev *pdev, 384 struct dp_tx_ppdu_info *tx_ppdu_info, 385 uint8_t window_flag) 386 { 387 /* allocate and populate RTS frame */ 388 /* enqueue 802.11 payload to per user mpdu_q */ 389 struct dp_mon_pdev *mon_pdev; 390 struct dp_mon_pdev_be *mon_pdev_be; 391 struct dp_pdev_tx_monitor_be *tx_mon_be; 392 struct hal_tx_status_info *tx_status_info; 393 uint16_t duration_le = 0; 394 struct ieee80211_ctlframe_addr2 *wh_min = NULL; 395 qdf_nbuf_t mpdu_nbuf = NULL; 396 uint8_t frm_ctl; 397 398 /* sanity check */ 399 if (qdf_unlikely(!pdev)) 400 return; 401 402 mon_pdev = pdev->monitor_pdev; 403 if (qdf_unlikely(!mon_pdev)) 404 return; 405 406 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 407 if (qdf_unlikely(!mon_pdev_be)) 408 return; 409 410 tx_mon_be = &mon_pdev_be->tx_monitor_be; 411 tx_status_info = &tx_mon_be->prot_status_info; 412 /* 413 * for radiotap we allocate new skb, 414 * so we don't need reserver skb header 415 */ 416 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 417 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 418 if (!mpdu_nbuf) 419 return; 420 421 wh_min = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 422 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 423 424 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 425 IEEE80211_FC0_SUBTYPE_RTS); 426 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 427 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 428 wh_min->i_fc[1] = 0; 429 wh_min->i_fc[0] = frm_ctl; 430 431 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 432 wh_min->i_aidordur[1] = (duration_le & 0xFF00) >> 8; 433 wh_min->i_aidordur[0] = (duration_le & 0xFF); 434 435 if (!tx_status_info->protection_addr) 436 tx_status_info = &tx_mon_be->data_status_info; 437 438 if (window_flag == INITIATOR_WINDOW) { 439 qdf_mem_copy(wh_min->i_addr1, 440 TXMON_STATUS_INFO(tx_status_info, addr1), 441 QDF_MAC_ADDR_SIZE); 442 qdf_mem_copy(wh_min->i_addr2, 443 TXMON_STATUS_INFO(tx_status_info, addr2), 444 QDF_MAC_ADDR_SIZE); 445 } else { 446 qdf_mem_copy(wh_min->i_addr1, 447 TXMON_STATUS_INFO(tx_status_info, addr2), 448 QDF_MAC_ADDR_SIZE); 449 qdf_mem_copy(wh_min->i_addr2, 450 TXMON_STATUS_INFO(tx_status_info, addr1), 451 QDF_MAC_ADDR_SIZE); 452 } 453 454 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 455 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 456 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 457 } 458 459 /** 460 * dp_tx_mon_generate_ack_frm() - API to generate ack frame 461 * @pdev: pdev Handle 462 * @tx_ppdu_info: pointer to tx ppdu info structure 463 * @window_flag: frame generated window 464 * 465 * Return: void 466 */ 467 static void 468 dp_tx_mon_generate_ack_frm(struct dp_pdev *pdev, 469 struct dp_tx_ppdu_info *tx_ppdu_info, 470 uint8_t window_flag) 471 { 472 /* allocate and populate ACK frame */ 473 /* enqueue 802.11 payload to per user mpdu_q */ 474 struct dp_mon_pdev *mon_pdev; 475 struct dp_mon_pdev_be *mon_pdev_be; 476 struct dp_pdev_tx_monitor_be *tx_mon_be; 477 struct hal_tx_status_info *tx_status_info; 478 struct ieee80211_frame_min_one *wh_addr1 = NULL; 479 qdf_nbuf_t mpdu_nbuf = NULL; 480 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 481 uint8_t frm_ctl; 482 483 /* sanity check */ 484 if (qdf_unlikely(!pdev)) 485 return; 486 487 mon_pdev = pdev->monitor_pdev; 488 if (qdf_unlikely(!mon_pdev)) 489 return; 490 491 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 492 if (qdf_unlikely(!mon_pdev_be)) 493 return; 494 495 tx_mon_be = &mon_pdev_be->tx_monitor_be; 496 tx_status_info = &tx_mon_be->data_status_info; 497 /* 498 * for radiotap we allocate new skb, 499 * so we don't need reserver skb header 500 */ 501 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 502 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 503 if (!mpdu_nbuf) 504 return; 505 506 wh_addr1 = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 507 508 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 509 IEEE80211_FC0_SUBTYPE_ACK); 510 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 511 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 512 wh_addr1->i_fc[1] = 0; 513 wh_addr1->i_fc[0] = frm_ctl; 514 515 if (window_flag == INITIATOR_WINDOW) { 516 qdf_mem_copy(wh_addr1->i_addr1, 517 TXMON_STATUS_INFO(tx_status_info, addr1), 518 QDF_MAC_ADDR_SIZE); 519 } else { 520 qdf_mem_copy(wh_addr1->i_addr1, 521 TXMON_STATUS_INFO(tx_status_info, addr2), 522 QDF_MAC_ADDR_SIZE); 523 } 524 525 /* set duration zero for ack frame */ 526 *(u_int16_t *)(&wh_addr1->i_dur) = qdf_cpu_to_le16(0x0000); 527 528 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr1)); 529 530 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, user_id, mpdu_nbuf); 531 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 532 } 533 534 /** 535 * dp_tx_mon_generate_3addr_qos_null_frm() - API to generate 536 * 3 address qosnull frame 537 * 538 * @pdev: pdev Handle 539 * @tx_ppdu_info: pointer to tx ppdu info structure 540 * 541 * Return: void 542 */ 543 static void 544 dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev *pdev, 545 struct dp_tx_ppdu_info *tx_ppdu_info) 546 { 547 /* allocate and populate 3 address qos null frame */ 548 /* enqueue 802.11 payload to per user mpdu_q */ 549 struct dp_mon_pdev *mon_pdev; 550 struct dp_mon_pdev_be *mon_pdev_be; 551 struct dp_pdev_tx_monitor_be *tx_mon_be; 552 struct hal_tx_status_info *tx_status_info; 553 struct ieee80211_qosframe *wh_addr3 = NULL; 554 qdf_nbuf_t mpdu_nbuf = NULL; 555 uint16_t duration_le = 0; 556 uint8_t num_users = 0; 557 uint8_t frm_ctl; 558 559 /* sanity check */ 560 if (qdf_unlikely(!pdev)) 561 return; 562 563 mon_pdev = pdev->monitor_pdev; 564 if (qdf_unlikely(!mon_pdev)) 565 return; 566 567 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 568 if (qdf_unlikely(!mon_pdev_be)) 569 return; 570 571 tx_mon_be = &mon_pdev_be->tx_monitor_be; 572 tx_status_info = &tx_mon_be->data_status_info; 573 /* 574 * for radiotap we allocate new skb, 575 * so we don't need reserver skb header 576 */ 577 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 578 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 579 if (!mpdu_nbuf) 580 return; 581 582 wh_addr3 = (struct ieee80211_qosframe *)qdf_nbuf_data(mpdu_nbuf); 583 qdf_mem_zero(wh_addr3, sizeof(struct ieee80211_qosframe)); 584 585 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 586 IEEE80211_FC0_SUBTYPE_QOS_NULL); 587 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 588 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 589 wh_addr3->i_fc[1] = 0; 590 wh_addr3->i_fc[0] = frm_ctl; 591 592 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 593 wh_addr3->i_dur[1] = (duration_le & 0xFF00) >> 8; 594 wh_addr3->i_dur[0] = (duration_le & 0xFF); 595 596 qdf_mem_copy(wh_addr3->i_addr1, 597 TXMON_STATUS_INFO(tx_status_info, addr1), 598 QDF_MAC_ADDR_SIZE); 599 qdf_mem_copy(wh_addr3->i_addr2, 600 TXMON_STATUS_INFO(tx_status_info, addr2), 601 QDF_MAC_ADDR_SIZE); 602 qdf_mem_copy(wh_addr3->i_addr3, 603 TXMON_STATUS_INFO(tx_status_info, addr3), 604 QDF_MAC_ADDR_SIZE); 605 606 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr3)); 607 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 608 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 609 } 610 611 /** 612 * dp_tx_mon_generate_4addr_qos_null_frm() - API to generate 613 * 4 address qos null frame 614 * 615 * @pdev: pdev Handle 616 * @tx_ppdu_info: pointer to tx ppdu info structure 617 * 618 * Return: void 619 */ 620 static void 621 dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev *pdev, 622 struct dp_tx_ppdu_info *tx_ppdu_info) 623 { 624 /* allocate and populate 4 address qos null frame */ 625 /* enqueue 802.11 payload to per user mpdu_q */ 626 struct dp_mon_pdev *mon_pdev; 627 struct dp_mon_pdev_be *mon_pdev_be; 628 struct dp_pdev_tx_monitor_be *tx_mon_be; 629 struct hal_tx_status_info *tx_status_info; 630 struct ieee80211_qosframe_addr4 *wh_addr4 = NULL; 631 qdf_nbuf_t mpdu_nbuf = NULL; 632 uint16_t duration_le = 0; 633 uint8_t num_users = 0; 634 uint8_t frm_ctl; 635 636 /* sanity check */ 637 if (qdf_unlikely(!pdev)) 638 return; 639 640 mon_pdev = pdev->monitor_pdev; 641 if (qdf_unlikely(!mon_pdev)) 642 return; 643 644 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 645 if (qdf_unlikely(!mon_pdev_be)) 646 return; 647 648 tx_mon_be = &mon_pdev_be->tx_monitor_be; 649 tx_status_info = &tx_mon_be->data_status_info; 650 /* 651 * for radiotap we allocate new skb, 652 * so we don't need reserver skb header 653 */ 654 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 655 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 656 if (!mpdu_nbuf) 657 return; 658 659 wh_addr4 = (struct ieee80211_qosframe_addr4 *)qdf_nbuf_data(mpdu_nbuf); 660 qdf_mem_zero(wh_addr4, sizeof(struct ieee80211_qosframe_addr4)); 661 662 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 663 IEEE80211_FC0_SUBTYPE_QOS_NULL); 664 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 665 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 666 wh_addr4->i_fc[1] = 0; 667 wh_addr4->i_fc[0] = frm_ctl; 668 669 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 670 wh_addr4->i_dur[1] = (duration_le & 0xFF00) >> 8; 671 wh_addr4->i_dur[0] = (duration_le & 0xFF); 672 673 qdf_mem_copy(wh_addr4->i_addr1, 674 TXMON_STATUS_INFO(tx_status_info, addr1), 675 QDF_MAC_ADDR_SIZE); 676 qdf_mem_copy(wh_addr4->i_addr2, 677 TXMON_STATUS_INFO(tx_status_info, addr2), 678 QDF_MAC_ADDR_SIZE); 679 qdf_mem_copy(wh_addr4->i_addr3, 680 TXMON_STATUS_INFO(tx_status_info, addr3), 681 QDF_MAC_ADDR_SIZE); 682 qdf_mem_copy(wh_addr4->i_addr4, 683 TXMON_STATUS_INFO(tx_status_info, addr4), 684 QDF_MAC_ADDR_SIZE); 685 686 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr4)); 687 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 688 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 689 } 690 691 #define TXMON_BA_CTRL_SZ 2 692 #define TXMON_BA_INFO_SZ(bitmap_sz) ((4 * (bitmap_sz)) + 6) 693 #define TXMON_MU_BA_ACK_FRAME_SZ(bitmap_sz) \ 694 (sizeof(struct ieee80211_ctlframe_addr2) +\ 695 TXMON_BA_CTRL_SZ + (bitmap_sz)) 696 697 #define TXMON_BA_ACK_FRAME_SZ(bitmap_sz) \ 698 (sizeof(struct ieee80211_ctlframe_addr2) +\ 699 TXMON_BA_CTRL_SZ + TXMON_BA_INFO_SZ(bitmap_sz)) 700 701 /** 702 * dp_tx_mon_generate_mu_block_ack_frm() - API to generate MU block ack frame 703 * @pdev: pdev Handle 704 * @tx_ppdu_info: pointer to tx ppdu info structure 705 * @window_flag: frame generated window 706 * 707 * Return: void 708 */ 709 static void 710 dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev *pdev, 711 struct dp_tx_ppdu_info *tx_ppdu_info, 712 uint8_t window_flag) 713 { 714 /* allocate and populate MU block ack frame */ 715 /* enqueue 802.11 payload to per user mpdu_q */ 716 struct dp_mon_pdev *mon_pdev; 717 struct dp_mon_pdev_be *mon_pdev_be; 718 struct dp_pdev_tx_monitor_be *tx_mon_be; 719 struct hal_tx_status_info *tx_status_info; 720 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 721 qdf_nbuf_t mpdu_nbuf = NULL; 722 uint16_t ba_control = 0; 723 uint8_t *frm = NULL; 724 uint32_t ba_sz = 0; 725 uint8_t num_users = TXMON_PPDU_HAL(tx_ppdu_info, num_users); 726 uint8_t i = 0; 727 uint8_t frm_ctl; 728 729 /* sanity check */ 730 if (qdf_unlikely(!pdev)) 731 return; 732 733 mon_pdev = pdev->monitor_pdev; 734 if (qdf_unlikely(!mon_pdev)) 735 return; 736 737 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 738 if (qdf_unlikely(!mon_pdev_be)) 739 return; 740 741 tx_mon_be = &mon_pdev_be->tx_monitor_be; 742 tx_status_info = &tx_mon_be->data_status_info; 743 for (i = 0; i < num_users; i++) 744 ba_sz += (4 << TXMON_BA_INFO_SZ(TXMON_PPDU_USR(tx_ppdu_info, 745 i, 746 ba_bitmap_sz))); 747 748 /* 749 * for multi sta block ack, do we need to increase the size 750 * or copy info on subsequent frame offset 751 * 752 * for radiotap we allocate new skb, 753 * so we don't need reserver skb header 754 */ 755 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 756 TXMON_MU_BA_ACK_FRAME_SZ(ba_sz), 0, 4, 757 FALSE); 758 if (!mpdu_nbuf) { 759 /* TODO: update status and break */ 760 return; 761 } 762 763 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 764 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 765 766 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 767 IEEE80211_FC0_BLOCK_ACK); 768 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 769 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 770 wh_addr2->i_fc[1] = 0; 771 wh_addr2->i_fc[0] = frm_ctl; 772 773 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0000); 774 775 if (window_flag == RESPONSE_WINDOW) { 776 qdf_mem_copy(wh_addr2->i_addr2, 777 TXMON_STATUS_INFO(tx_status_info, addr2), 778 QDF_MAC_ADDR_SIZE); 779 if (num_users > 1) 780 qdf_mem_set(wh_addr2->i_addr1, QDF_MAC_ADDR_SIZE, 0xFF); 781 else 782 qdf_mem_copy(wh_addr2->i_addr1, 783 TXMON_STATUS_INFO(tx_status_info, addr1), 784 QDF_MAC_ADDR_SIZE); 785 } else { 786 qdf_mem_copy(wh_addr2->i_addr2, 787 TXMON_STATUS_INFO(tx_status_info, addr1), 788 QDF_MAC_ADDR_SIZE); 789 qdf_mem_copy(wh_addr2->i_addr1, 790 TXMON_STATUS_INFO(tx_status_info, addr2), 791 QDF_MAC_ADDR_SIZE); 792 } 793 794 frm = (uint8_t *)&wh_addr2[1]; 795 796 /* BA control */ 797 ba_control = 0x0016; 798 *((uint16_t *)frm) = qdf_cpu_to_le16(ba_control); 799 frm += 2; 800 801 for (i = 0; i < num_users; i++) { 802 *((uint16_t *)frm) = 803 qdf_cpu_to_le16((TXMON_PPDU_USR(tx_ppdu_info, i, tid) << 804 DP_IEEE80211_BAR_CTL_TID_S) | 805 (TXMON_PPDU_USR(tx_ppdu_info, i, 806 aid) & 0x7FF)); 807 frm += 2; 808 *((uint16_t *)frm) = qdf_cpu_to_le16( 809 TXMON_PPDU_USR(tx_ppdu_info, i, start_seq)); 810 frm += 2; 811 qdf_mem_copy(frm, 812 TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap), 813 4 << 814 TXMON_PPDU_USR(tx_ppdu_info, 815 i, ba_bitmap_sz)); 816 frm += 4 << TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap_sz); 817 } 818 819 qdf_nbuf_set_pktlen(mpdu_nbuf, 820 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 821 822 /* always enqueue to first active user */ 823 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 824 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 825 /* HE MU fields not required for Multi Sta Block ack frame */ 826 TXMON_PPDU_COM(tx_ppdu_info, he_mu_flags) = 0; 827 } 828 829 /** 830 * dp_tx_mon_generate_block_ack_frm() - API to generate block ack frame 831 * @pdev: pdev Handle 832 * @tx_ppdu_info: pointer to tx ppdu info structure 833 * @window_flag: frame generated window 834 * 835 * Return: void 836 */ 837 static void 838 dp_tx_mon_generate_block_ack_frm(struct dp_pdev *pdev, 839 struct dp_tx_ppdu_info *tx_ppdu_info, 840 uint8_t window_flag) 841 { 842 /* allocate and populate block ack frame */ 843 /* enqueue 802.11 payload to per user mpdu_q */ 844 struct dp_mon_pdev *mon_pdev; 845 struct dp_mon_pdev_be *mon_pdev_be; 846 struct dp_pdev_tx_monitor_be *tx_mon_be; 847 struct hal_tx_status_info *tx_status_info; 848 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 849 qdf_nbuf_t mpdu_nbuf = NULL; 850 uint8_t *frm = NULL; 851 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 852 uint32_t ba_bitmap_sz = TXMON_PPDU_USR(tx_ppdu_info, 853 user_id, ba_bitmap_sz); 854 uint8_t frm_ctl; 855 856 /* sanity check */ 857 if (qdf_unlikely(!pdev)) 858 return; 859 860 mon_pdev = pdev->monitor_pdev; 861 if (qdf_unlikely(!mon_pdev)) 862 return; 863 864 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 865 if (qdf_unlikely(!mon_pdev_be)) 866 return; 867 868 tx_mon_be = &mon_pdev_be->tx_monitor_be; 869 tx_status_info = &tx_mon_be->data_status_info; 870 /* 871 * for multi sta block ack, do we need to increase the size 872 * or copy info on subsequent frame offset 873 * 874 * for radiotap we allocate new skb, 875 * so we don't need reserver skb header 876 */ 877 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 878 TXMON_BA_ACK_FRAME_SZ(ba_bitmap_sz), 879 0, 4, FALSE); 880 if (!mpdu_nbuf) { 881 /* TODO: update status and break */ 882 return; 883 } 884 885 /* 886 * BA CONTROL 887 * fields required to construct block ack information 888 * B0 - BA ACK POLICY 889 * 0 - Normal ACK 890 * 1 - No ACK 891 * B1 - MULTI TID 892 * B2 - COMPRESSED BITMAP 893 * B12 894 * 00 - Basic block ack 895 * 01 - Compressed block ack 896 * 10 - Reserved 897 * 11 - Multi tid block ack 898 * B3-B11 - Reserved 899 * B12-B15 - TID info 900 * 901 * BA INFORMATION 902 * Per sta tid info 903 * AID: 11 bits 904 * ACK type: 1 bit 905 * TID: 4 bits 906 * 907 * BA SEQ CTRL 908 * 909 * BA bitmap 910 * 911 */ 912 913 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 914 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 915 916 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 917 IEEE80211_FC0_BLOCK_ACK); 918 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 919 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 920 wh_addr2->i_fc[1] = 0; 921 wh_addr2->i_fc[0] = frm_ctl; 922 923 /* duration */ 924 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0020); 925 926 if (window_flag) { 927 qdf_mem_copy(wh_addr2->i_addr2, 928 TXMON_STATUS_INFO(tx_status_info, addr2), 929 QDF_MAC_ADDR_SIZE); 930 qdf_mem_copy(wh_addr2->i_addr1, 931 TXMON_STATUS_INFO(tx_status_info, addr1), 932 QDF_MAC_ADDR_SIZE); 933 } else { 934 qdf_mem_copy(wh_addr2->i_addr2, 935 TXMON_STATUS_INFO(tx_status_info, addr1), 936 QDF_MAC_ADDR_SIZE); 937 qdf_mem_copy(wh_addr2->i_addr1, 938 TXMON_STATUS_INFO(tx_status_info, addr2), 939 QDF_MAC_ADDR_SIZE); 940 } 941 942 frm = (uint8_t *)&wh_addr2[1]; 943 /* BA control */ 944 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 945 user_id, 946 ba_control)); 947 frm += 2; 948 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 949 user_id, 950 start_seq)); 951 frm += 2; 952 qdf_mem_copy(frm, 953 TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap), 954 4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 955 frm += (4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 956 957 qdf_nbuf_set_pktlen(mpdu_nbuf, 958 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 959 960 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 961 962 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 963 } 964 965 /** 966 * dp_tx_mon_alloc_mpdu() - API to allocate mpdu and add that current 967 * user index 968 * 969 * @pdev: pdev Handle 970 * @tx_ppdu_info: pointer to tx ppdu info structure 971 * 972 * Return: void 973 */ 974 static void 975 dp_tx_mon_alloc_mpdu(struct dp_pdev *pdev, struct dp_tx_ppdu_info *tx_ppdu_info) 976 { 977 qdf_nbuf_t mpdu_nbuf = NULL; 978 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 979 uint32_t usr_idx = 0; 980 981 /* 982 * payload will be added as a frag to buffer 983 * and we allocate new skb for radiotap header 984 * we allocate a dummy buffer size 985 */ 986 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 987 MAX_MONITOR_HEADER, MAX_MONITOR_HEADER, 988 4, FALSE); 989 if (!mpdu_nbuf) { 990 qdf_err("%s: %d No memory to allocate mpdu_nbuf!!!!!\n", 991 __func__, __LINE__); 992 return; 993 } 994 995 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 996 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 997 998 qdf_nbuf_queue_add(usr_mpdu_q, mpdu_nbuf); 999 } 1000 1001 /** 1002 * dp_tx_mon_generate_data_frm() - API to generate data frame 1003 * @pdev: pdev Handle 1004 * @tx_ppdu_info: pointer to tx ppdu info structure 1005 * @take_ref: 1006 * 1007 * Return: void 1008 */ 1009 static void 1010 dp_tx_mon_generate_data_frm(struct dp_pdev *pdev, 1011 struct dp_tx_ppdu_info *tx_ppdu_info, 1012 bool take_ref) 1013 { 1014 struct dp_mon_pdev *mon_pdev; 1015 struct dp_mon_pdev_be *mon_pdev_be; 1016 struct dp_pdev_tx_monitor_be *tx_mon_be; 1017 struct hal_tx_status_info *tx_status_info; 1018 qdf_nbuf_t mpdu_nbuf = NULL; 1019 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 1020 uint32_t usr_idx = 0; 1021 1022 /* sanity check */ 1023 if (qdf_unlikely(!pdev)) 1024 return; 1025 1026 mon_pdev = pdev->monitor_pdev; 1027 if (qdf_unlikely(!mon_pdev)) 1028 return; 1029 1030 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1031 if (qdf_unlikely(!mon_pdev_be)) 1032 return; 1033 1034 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1035 1036 tx_status_info = &tx_mon_be->data_status_info; 1037 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 1038 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 1039 mpdu_nbuf = qdf_nbuf_queue_last(usr_mpdu_q); 1040 1041 if (!mpdu_nbuf) 1042 QDF_BUG(0); 1043 1044 tx_mon_be->stats.pkt_buf_processed++; 1045 1046 /* add function to either copy or add frag to frag_list */ 1047 qdf_nbuf_add_frag(pdev->soc->osdev, 1048 TXMON_STATUS_INFO(tx_status_info, buffer), 1049 mpdu_nbuf, 1050 TXMON_STATUS_INFO(tx_status_info, offset), 1051 TXMON_STATUS_INFO(tx_status_info, length), 1052 DP_MON_DATA_BUFFER_SIZE, 1053 take_ref, TXMON_NO_BUFFER_SZ); 1054 } 1055 1056 /** 1057 * dp_tx_mon_generate_prot_frm() - API to generate protection frame 1058 * @pdev: pdev Handle 1059 * @tx_ppdu_info: pointer to tx ppdu info structure 1060 * 1061 * Return: void 1062 */ 1063 static void 1064 dp_tx_mon_generate_prot_frm(struct dp_pdev *pdev, 1065 struct dp_tx_ppdu_info *tx_ppdu_info) 1066 { 1067 struct dp_mon_pdev *mon_pdev; 1068 struct dp_mon_pdev_be *mon_pdev_be; 1069 struct dp_pdev_tx_monitor_be *tx_mon_be; 1070 struct hal_tx_status_info *tx_status_info; 1071 1072 /* sanity check */ 1073 if (qdf_unlikely(!pdev)) 1074 return; 1075 1076 mon_pdev = pdev->monitor_pdev; 1077 if (qdf_unlikely(!mon_pdev)) 1078 return; 1079 1080 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1081 if (qdf_unlikely(!mon_pdev_be)) 1082 return; 1083 1084 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1085 tx_status_info = &tx_mon_be->prot_status_info; 1086 1087 /* update medium prot type from data */ 1088 TXMON_STATUS_INFO(tx_status_info, medium_prot_type) = 1089 tx_mon_be->data_status_info.medium_prot_type; 1090 1091 switch (TXMON_STATUS_INFO(tx_status_info, medium_prot_type)) { 1092 case TXMON_MEDIUM_NO_PROTECTION: 1093 { 1094 /* no protection frame - do nothing */ 1095 break; 1096 } 1097 case TXMON_MEDIUM_RTS_LEGACY: 1098 case TXMON_MEDIUM_RTS_11AC_STATIC_BW: 1099 case TXMON_MEDIUM_RTS_11AC_DYNAMIC_BW: 1100 { 1101 dp_tx_mon_generate_rts_frm(pdev, tx_ppdu_info, 1102 INITIATOR_WINDOW); 1103 break; 1104 } 1105 case TXMON_MEDIUM_CTS2SELF: 1106 { 1107 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1108 INITIATOR_WINDOW); 1109 break; 1110 } 1111 case TXMON_MEDIUM_QOS_NULL_NO_ACK_3ADDR: 1112 { 1113 dp_tx_mon_generate_3addr_qos_null_frm(pdev, tx_ppdu_info); 1114 break; 1115 } 1116 case TXMON_MEDIUM_QOS_NULL_NO_ACK_4ADDR: 1117 { 1118 dp_tx_mon_generate_4addr_qos_null_frm(pdev, tx_ppdu_info); 1119 break; 1120 } 1121 } 1122 } 1123 1124 /** 1125 * dp_tx_mon_generated_response_frm() - API to handle generated response frame 1126 * @pdev: pdev Handle 1127 * @tx_ppdu_info: pointer to tx ppdu info structure 1128 * 1129 * Return: QDF_STATUS 1130 */ 1131 static QDF_STATUS 1132 dp_tx_mon_generated_response_frm(struct dp_pdev *pdev, 1133 struct dp_tx_ppdu_info *tx_ppdu_info) 1134 { 1135 struct dp_mon_pdev *mon_pdev; 1136 struct dp_mon_pdev_be *mon_pdev_be; 1137 struct dp_pdev_tx_monitor_be *tx_mon_be; 1138 struct hal_tx_status_info *tx_status_info; 1139 QDF_STATUS status = QDF_STATUS_SUCCESS; 1140 uint8_t gen_response = 0; 1141 1142 /* sanity check */ 1143 if (qdf_unlikely(!pdev)) 1144 return QDF_STATUS_E_NOMEM; 1145 1146 mon_pdev = pdev->monitor_pdev; 1147 if (qdf_unlikely(!mon_pdev)) 1148 return QDF_STATUS_E_NOMEM; 1149 1150 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1151 if (qdf_unlikely(!mon_pdev_be)) 1152 return QDF_STATUS_E_NOMEM; 1153 1154 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1155 1156 tx_status_info = &tx_mon_be->data_status_info; 1157 gen_response = TXMON_STATUS_INFO(tx_status_info, generated_response); 1158 1159 switch (gen_response) { 1160 case TXMON_GEN_RESP_SELFGEN_ACK: 1161 { 1162 dp_tx_mon_generate_ack_frm(pdev, tx_ppdu_info, RESPONSE_WINDOW); 1163 break; 1164 } 1165 case TXMON_GEN_RESP_SELFGEN_CTS: 1166 { 1167 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1168 RESPONSE_WINDOW); 1169 break; 1170 } 1171 case TXMON_GEN_RESP_SELFGEN_BA: 1172 { 1173 dp_tx_mon_generate_block_ack_frm(pdev, tx_ppdu_info, 1174 RESPONSE_WINDOW); 1175 break; 1176 } 1177 case TXMON_GEN_RESP_SELFGEN_MBA: 1178 { 1179 dp_tx_mon_generate_mu_block_ack_frm(pdev, tx_ppdu_info, 1180 RESPONSE_WINDOW); 1181 break; 1182 } 1183 case TXMON_GEN_RESP_SELFGEN_CBF: 1184 { 1185 break; 1186 } 1187 case TXMON_GEN_RESP_SELFGEN_TRIG: 1188 { 1189 break; 1190 } 1191 case TXMON_GEN_RESP_SELFGEN_NDP_LMR: 1192 { 1193 break; 1194 } 1195 }; 1196 1197 return status; 1198 } 1199 1200 /** 1201 * dp_tx_mon_update_ppdu_info_status() - API to update frame as information 1202 * is stored only for that processing 1203 * 1204 * @pdev: pdev Handle 1205 * @tx_data_ppdu_info: pointer to data tx ppdu info 1206 * @tx_prot_ppdu_info: pointer to protection tx ppdu info 1207 * @tx_tlv_hdr: pointer to tx_tlv_hdr 1208 * @status_frag: pointer to fragment 1209 * @tlv_status: tlv status return from hal api 1210 * @mon_desc_list_ref: tx monitor descriptor list reference 1211 * 1212 * Return: QDF_STATUS 1213 */ 1214 static QDF_STATUS 1215 dp_tx_mon_update_ppdu_info_status(struct dp_pdev *pdev, 1216 struct dp_tx_ppdu_info *tx_data_ppdu_info, 1217 struct dp_tx_ppdu_info *tx_prot_ppdu_info, 1218 void *tx_tlv_hdr, 1219 qdf_frag_t status_frag, 1220 uint32_t tlv_status, 1221 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1222 { 1223 struct dp_mon_pdev *mon_pdev; 1224 struct dp_mon_pdev_be *mon_pdev_be; 1225 struct dp_pdev_tx_monitor_be *tx_mon_be; 1226 struct hal_tx_status_info *tx_status_info; 1227 QDF_STATUS status = QDF_STATUS_SUCCESS; 1228 1229 /* sanity check */ 1230 if (qdf_unlikely(!pdev)) 1231 return QDF_STATUS_E_NOMEM; 1232 1233 mon_pdev = pdev->monitor_pdev; 1234 if (qdf_unlikely(!mon_pdev)) 1235 return QDF_STATUS_E_NOMEM; 1236 1237 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1238 if (qdf_unlikely(!mon_pdev_be)) 1239 return QDF_STATUS_E_NOMEM; 1240 1241 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1242 1243 switch (tlv_status) { 1244 case HAL_MON_TX_FES_SETUP: 1245 { 1246 /* 1247 * start of initiator window 1248 * 1249 * got number of user count from fes setup tlv 1250 */ 1251 break; 1252 } 1253 case HAL_MON_RX_RESPONSE_REQUIRED_INFO: 1254 { 1255 break; 1256 } 1257 case HAL_MON_TX_FES_STATUS_START_PROT: 1258 { 1259 /* update tsft to local */ 1260 break; 1261 } 1262 case HAL_MON_TX_FES_STATUS_START_PPDU: 1263 { 1264 /* update tsft to local */ 1265 break; 1266 } 1267 case HAL_MON_TX_FES_STATUS_PROT: 1268 { 1269 TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used) = 1; 1270 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) = 1271 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) << 1; 1272 1273 /* based on medium protection type we need to generate frame */ 1274 dp_tx_mon_generate_prot_frm(pdev, tx_prot_ppdu_info); 1275 break; 1276 } 1277 case HAL_MON_RX_FRAME_BITMAP_ACK: 1278 { 1279 break; 1280 } 1281 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_256: 1282 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_1K: 1283 { 1284 /* 1285 * this comes for each user 1286 * BlockAck is not same as ACK, single frame can hold 1287 * multiple BlockAck info 1288 */ 1289 tx_status_info = &tx_mon_be->data_status_info; 1290 1291 if (TXMON_PPDU_HAL(tx_data_ppdu_info, num_users)) 1292 dp_tx_mon_generate_block_ack_frm(pdev, 1293 tx_data_ppdu_info, 1294 INITIATOR_WINDOW); 1295 else 1296 dp_tx_mon_generate_mu_block_ack_frm(pdev, 1297 tx_data_ppdu_info, 1298 INITIATOR_WINDOW); 1299 1300 break; 1301 } 1302 case HAL_MON_TX_MPDU_START: 1303 { 1304 dp_tx_mon_alloc_mpdu(pdev, tx_data_ppdu_info); 1305 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1306 break; 1307 } 1308 case HAL_MON_TX_MSDU_START: 1309 { 1310 break; 1311 } 1312 case HAL_MON_TX_DATA: 1313 { 1314 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1315 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, true); 1316 break; 1317 } 1318 case HAL_MON_TX_BUFFER_ADDR: 1319 { 1320 struct hal_mon_packet_info *packet_info = NULL; 1321 struct dp_mon_desc *mon_desc = NULL; 1322 qdf_frag_t packet_buffer = NULL; 1323 uint32_t end_offset = 0; 1324 1325 tx_status_info = &tx_mon_be->data_status_info; 1326 /* update buffer from packet info */ 1327 packet_info = &TXMON_PPDU_HAL(tx_data_ppdu_info, packet_info); 1328 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info->sw_cookie; 1329 1330 qdf_assert_always(mon_desc); 1331 1332 if (mon_desc->magic != DP_MON_DESC_MAGIC) 1333 qdf_assert_always(0); 1334 1335 qdf_assert_always(mon_desc->buf_addr); 1336 tx_mon_be->stats.pkt_buf_recv++; 1337 1338 if (!mon_desc->unmapped) { 1339 qdf_mem_unmap_page(pdev->soc->osdev, 1340 (qdf_dma_addr_t)mon_desc->paddr, 1341 DP_MON_DATA_BUFFER_SIZE, 1342 QDF_DMA_FROM_DEVICE); 1343 mon_desc->unmapped = 1; 1344 } 1345 1346 packet_buffer = mon_desc->buf_addr; 1347 mon_desc->buf_addr = NULL; 1348 1349 /* increment reap count */ 1350 mon_desc_list_ref->tx_mon_reap_cnt++; 1351 1352 /* add the mon_desc to free list */ 1353 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 1354 &mon_desc_list_ref->tail, 1355 mon_desc); 1356 1357 TXMON_STATUS_INFO(tx_status_info, buffer) = packet_buffer; 1358 TXMON_STATUS_INFO(tx_status_info, offset) = end_offset; 1359 TXMON_STATUS_INFO(tx_status_info, 1360 length) = packet_info->dma_length; 1361 1362 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1363 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, false); 1364 break; 1365 } 1366 case HAL_MON_TX_FES_STATUS_END: 1367 { 1368 break; 1369 } 1370 case HAL_MON_RESPONSE_END_STATUS_INFO: 1371 { 1372 dp_tx_mon_generated_response_frm(pdev, tx_data_ppdu_info); 1373 break; 1374 } 1375 case HAL_MON_TX_FES_STATUS_START: 1376 { 1377 /* update the medium protection type */ 1378 break; 1379 } 1380 case HAL_MON_TX_QUEUE_EXTENSION: 1381 { 1382 /* No action for Queue Extension TLV */ 1383 break; 1384 } 1385 case HAL_MON_TX_FW2SW: 1386 { 1387 /* update the frequency */ 1388 tx_status_info = &tx_mon_be->data_status_info; 1389 1390 TXMON_PPDU_COM(tx_data_ppdu_info, 1391 chan_freq) = TXMON_STATUS_INFO(tx_status_info, 1392 freq); 1393 TXMON_PPDU_COM(tx_prot_ppdu_info, 1394 chan_freq) = TXMON_STATUS_INFO(tx_status_info, 1395 freq); 1396 break; 1397 } 1398 default: 1399 { 1400 /* return or break in default case */ 1401 break; 1402 } 1403 }; 1404 1405 return status; 1406 } 1407 1408 QDF_STATUS 1409 dp_tx_process_pktlog_be(struct dp_soc *soc, struct dp_pdev *pdev, 1410 qdf_frag_t status_frag, uint32_t end_offset) 1411 { 1412 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1413 qdf_nbuf_t nbuf = NULL; 1414 enum WDI_EVENT pktlog_mode = WDI_NO_VAL; 1415 int frag_bytes; 1416 1417 if (!mon_pdev->pktlog_hybrid_mode) 1418 return QDF_STATUS_E_INVAL; 1419 1420 nbuf = qdf_nbuf_alloc(soc->osdev, MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 1421 if (!nbuf) 1422 return QDF_STATUS_E_NOMEM; 1423 1424 qdf_nbuf_add_rx_frag(status_frag, nbuf, 0, 1425 (end_offset + 1), 1426 0, true); 1427 1428 if (mon_pdev->pktlog_hybrid_mode) 1429 pktlog_mode = WDI_EVENT_HYBRID_TX; 1430 1431 frag_bytes = qdf_nbuf_get_frag_len(nbuf, 0); 1432 if (pktlog_mode != WDI_NO_VAL) { 1433 dp_wdi_event_handler(pktlog_mode, soc, 1434 nbuf, HTT_INVALID_PEER, 1435 WDI_NO_VAL, pdev->pdev_id); 1436 } 1437 qdf_nbuf_free(nbuf); 1438 1439 return QDF_STATUS_SUCCESS; 1440 } 1441 1442 /** 1443 * dp_tx_mon_process_tlv_2_0() - API to parse PPDU worth information 1444 * @pdev: DP_PDEV handle 1445 * @mon_desc_list_ref: tx monitor descriptor list reference 1446 * 1447 * Return: status 1448 */ 1449 QDF_STATUS 1450 dp_tx_mon_process_tlv_2_0(struct dp_pdev *pdev, 1451 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1452 { 1453 struct dp_mon_pdev *mon_pdev; 1454 struct dp_mon_pdev_be *mon_pdev_be; 1455 struct dp_pdev_tx_monitor_be *tx_mon_be; 1456 struct dp_tx_ppdu_info *tx_prot_ppdu_info = NULL; 1457 struct dp_tx_ppdu_info *tx_data_ppdu_info = NULL; 1458 struct hal_tx_status_info *tx_status_prot; 1459 struct hal_tx_status_info *tx_status_data; 1460 qdf_frag_t status_frag = NULL; 1461 uint32_t end_offset = 0; 1462 uint32_t tlv_status; 1463 uint32_t status = QDF_STATUS_SUCCESS; 1464 uint8_t *tx_tlv; 1465 uint8_t *tx_tlv_start; 1466 uint8_t num_users = 0; 1467 uint8_t cur_frag_q_idx; 1468 bool schedule_wrq = false; 1469 1470 /* sanity check */ 1471 if (qdf_unlikely(!pdev)) 1472 return QDF_STATUS_E_NOMEM; 1473 1474 mon_pdev = pdev->monitor_pdev; 1475 if (qdf_unlikely(!mon_pdev)) 1476 return QDF_STATUS_E_NOMEM; 1477 1478 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1479 if (qdf_unlikely(!mon_pdev_be)) 1480 return QDF_STATUS_E_NOMEM; 1481 1482 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1483 cur_frag_q_idx = tx_mon_be->cur_frag_q_idx; 1484 1485 tx_status_prot = &tx_mon_be->prot_status_info; 1486 tx_status_data = &tx_mon_be->data_status_info; 1487 1488 tx_prot_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_PROT_PPDU_INFO, 1489 1, tx_mon_be->be_ppdu_id); 1490 1491 if (!tx_prot_ppdu_info) { 1492 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1493 return QDF_STATUS_E_NOMEM; 1494 } 1495 1496 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1497 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1498 tx_tlv = status_frag; 1499 dp_mon_debug("last_frag_q_idx: %d status_frag:%pK", 1500 tx_mon_be->last_frag_q_idx, status_frag); 1501 1502 /* get number of user from tlv window */ 1503 tlv_status = hal_txmon_status_get_num_users(pdev->soc->hal_soc, 1504 tx_tlv, &num_users); 1505 if (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE || !num_users) { 1506 dp_mon_err("window open with tlv_tag[0x%x] num_users[%d]!\n", 1507 hal_tx_status_get_tlv_tag(tx_tlv), num_users); 1508 return QDF_STATUS_E_INVAL; 1509 } 1510 1511 /* allocate tx_data_ppdu_info based on num_users */ 1512 tx_data_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_DATA_PPDU_INFO, 1513 num_users, 1514 tx_mon_be->be_ppdu_id); 1515 if (!tx_data_ppdu_info) { 1516 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1517 return QDF_STATUS_E_NOMEM; 1518 } 1519 1520 /* iterate status buffer queue */ 1521 while (tx_mon_be->cur_frag_q_idx < tx_mon_be->last_frag_q_idx) { 1522 /* get status buffer from frag_q_vec */ 1523 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1524 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1525 if (qdf_unlikely(!status_frag)) { 1526 dp_mon_err("status frag is NULL\n"); 1527 QDF_BUG(0); 1528 } 1529 1530 tx_tlv = status_frag; 1531 tx_tlv_start = tx_tlv; 1532 /* 1533 * parse each status buffer and populate the information to 1534 * dp_tx_ppdu_info 1535 */ 1536 do { 1537 tlv_status = hal_txmon_status_parse_tlv( 1538 pdev->soc->hal_soc, 1539 &tx_data_ppdu_info->hal_txmon, 1540 &tx_prot_ppdu_info->hal_txmon, 1541 tx_status_data, 1542 tx_status_prot, 1543 tx_tlv, status_frag); 1544 1545 status = 1546 dp_tx_mon_update_ppdu_info_status( 1547 pdev, 1548 tx_data_ppdu_info, 1549 tx_prot_ppdu_info, 1550 tx_tlv, 1551 status_frag, 1552 tlv_status, 1553 mon_desc_list_ref); 1554 1555 /* need api definition for hal_tx_status_get_next_tlv */ 1556 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 1557 if ((tx_tlv - tx_tlv_start) >= end_offset) 1558 break; 1559 } while ((tx_tlv - tx_tlv_start) < end_offset); 1560 1561 /* 1562 * free status buffer after parsing 1563 * is status_frag mapped to mpdu if so make sure 1564 */ 1565 tx_mon_be->stats.status_buf_free++; 1566 qdf_frag_free(status_frag); 1567 tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf = NULL; 1568 tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset = 0; 1569 cur_frag_q_idx = ++tx_mon_be->cur_frag_q_idx; 1570 } 1571 1572 /* clear the unreleased frag array */ 1573 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1574 1575 if (TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used)) { 1576 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1577 chan_num))) { 1578 /* update channel number, if not fetched properly */ 1579 TXMON_PPDU_COM(tx_prot_ppdu_info, 1580 chan_num) = mon_pdev->mon_chan_num; 1581 } 1582 1583 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1584 chan_freq))) { 1585 /* update channel frequency, if not fetched properly */ 1586 TXMON_PPDU_COM(tx_prot_ppdu_info, 1587 chan_freq) = mon_pdev->mon_chan_freq; 1588 } 1589 1590 /* 1591 * add dp_tx_ppdu_info to pdev queue 1592 * for post processing 1593 * 1594 * TODO: add a threshold check and drop the ppdu info 1595 */ 1596 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1597 tx_mon_be->last_prot_ppdu_info = 1598 tx_mon_be->tx_prot_ppdu_info; 1599 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1600 tx_prot_ppdu_info, 1601 tx_ppdu_info_queue_elem); 1602 tx_mon_be->tx_ppdu_info_list_depth++; 1603 1604 tx_mon_be->tx_prot_ppdu_info = NULL; 1605 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1606 schedule_wrq = true; 1607 } else { 1608 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be); 1609 tx_mon_be->tx_prot_ppdu_info = NULL; 1610 tx_prot_ppdu_info = NULL; 1611 } 1612 1613 if (TXMON_PPDU_HAL(tx_data_ppdu_info, is_used)) { 1614 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1615 chan_num))) { 1616 /* update channel number, if not fetched properly */ 1617 TXMON_PPDU_COM(tx_data_ppdu_info, 1618 chan_num) = mon_pdev->mon_chan_num; 1619 } 1620 1621 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1622 chan_freq))) { 1623 /* update channel frequency, if not fetched properly */ 1624 TXMON_PPDU_COM(tx_data_ppdu_info, 1625 chan_freq) = mon_pdev->mon_chan_freq; 1626 } 1627 1628 /* 1629 * add dp_tx_ppdu_info to pdev queue 1630 * for post processing 1631 * 1632 * TODO: add a threshold check and drop the ppdu info 1633 */ 1634 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1635 tx_mon_be->last_data_ppdu_info = 1636 tx_mon_be->tx_data_ppdu_info; 1637 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1638 tx_data_ppdu_info, 1639 tx_ppdu_info_queue_elem); 1640 tx_mon_be->tx_ppdu_info_list_depth++; 1641 1642 tx_mon_be->tx_data_ppdu_info = NULL; 1643 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1644 schedule_wrq = true; 1645 } else { 1646 dp_tx_mon_free_ppdu_info(tx_data_ppdu_info, tx_mon_be); 1647 tx_mon_be->tx_data_ppdu_info = NULL; 1648 tx_data_ppdu_info = NULL; 1649 } 1650 1651 if (schedule_wrq) 1652 qdf_queue_work(NULL, tx_mon_be->post_ppdu_workqueue, 1653 &tx_mon_be->post_ppdu_work); 1654 1655 return QDF_STATUS_SUCCESS; 1656 } 1657 1658 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1659 int ppdu_id, int end_reason) 1660 { 1661 struct dp_mon_pdev_be *mon_pdev_be; 1662 struct dp_pdev_tx_monitor_be *tx_mon_be; 1663 1664 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1665 if (qdf_unlikely(!mon_pdev_be)) 1666 return; 1667 1668 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1669 1670 tx_mon_be->be_end_reason_bitmap |= (1 << end_reason); 1671 } 1672 1673 QDF_STATUS 1674 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1675 struct dp_pdev *pdev, 1676 struct hal_mon_desc *mon_ring_desc, 1677 qdf_frag_t status_frag, 1678 uint32_t end_offset, 1679 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1680 { 1681 struct dp_mon_pdev *mon_pdev; 1682 struct dp_mon_pdev_be *mon_pdev_be; 1683 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 1684 uint8_t last_frag_q_idx = 0; 1685 1686 /* sanity check */ 1687 if (qdf_unlikely(!pdev)) 1688 goto free_status_buffer; 1689 1690 mon_pdev = pdev->monitor_pdev; 1691 if (qdf_unlikely(!mon_pdev)) 1692 goto free_status_buffer; 1693 1694 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1695 if (qdf_unlikely(!mon_pdev_be)) 1696 goto free_status_buffer; 1697 1698 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1699 1700 if (qdf_unlikely(tx_mon_be->last_frag_q_idx > 1701 MAX_STATUS_BUFFER_IN_PPDU)) { 1702 dp_mon_err("status frag queue for a ppdu[%d] exceed %d\n", 1703 tx_mon_be->be_ppdu_id, 1704 MAX_STATUS_BUFFER_IN_PPDU); 1705 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1706 goto free_status_buffer; 1707 } 1708 1709 if (tx_mon_be->mode == TX_MON_BE_DISABLE && 1710 !dp_lite_mon_is_tx_enabled(mon_pdev)) { 1711 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1712 mon_desc_list_ref); 1713 goto free_status_buffer; 1714 } 1715 1716 if (tx_mon_be->be_ppdu_id != mon_ring_desc->ppdu_id && 1717 tx_mon_be->last_frag_q_idx) { 1718 if (tx_mon_be->be_end_reason_bitmap & 1719 (1 << HAL_MON_FLUSH_DETECTED)) { 1720 tx_mon_be->stats.ppdu_info_drop_flush++; 1721 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1722 mon_desc_list_ref); 1723 } else if (tx_mon_be->be_end_reason_bitmap & 1724 (1 << HAL_MON_PPDU_TRUNCATED)) { 1725 tx_mon_be->stats.ppdu_info_drop_trunc++; 1726 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1727 mon_desc_list_ref); 1728 } else { 1729 dp_mon_err("End of ppdu not seen PID:%d cur_pid:%d idx:%d", 1730 tx_mon_be->be_ppdu_id, 1731 mon_ring_desc->ppdu_id, 1732 tx_mon_be->last_frag_q_idx); 1733 /* schedule ppdu worth information */ 1734 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1735 mon_desc_list_ref); 1736 } 1737 1738 /* reset end reason bitmap */ 1739 tx_mon_be->be_end_reason_bitmap = 0; 1740 tx_mon_be->last_frag_q_idx = 0; 1741 tx_mon_be->cur_frag_q_idx = 0; 1742 } 1743 1744 tx_mon_be->be_ppdu_id = mon_ring_desc->ppdu_id; 1745 tx_mon_be->be_end_reason_bitmap |= (1 << mon_ring_desc->end_reason); 1746 1747 last_frag_q_idx = tx_mon_be->last_frag_q_idx; 1748 1749 tx_mon_be->frag_q_vec[last_frag_q_idx].frag_buf = status_frag; 1750 tx_mon_be->frag_q_vec[last_frag_q_idx].end_offset = end_offset; 1751 tx_mon_be->last_frag_q_idx++; 1752 1753 if (mon_ring_desc->end_reason == HAL_MON_END_OF_PPDU) { 1754 /* drop processing of tlv, if ppdu info list exceed threshold */ 1755 if ((tx_mon_be->defer_ppdu_info_list_depth + 1756 tx_mon_be->tx_ppdu_info_list_depth) > 1757 MAX_PPDU_INFO_LIST_DEPTH) { 1758 tx_mon_be->stats.ppdu_info_drop_th++; 1759 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1760 mon_desc_list_ref); 1761 return QDF_STATUS_E_PENDING; 1762 } 1763 1764 if (dp_tx_mon_process_tlv_2_0(pdev, 1765 mon_desc_list_ref) != 1766 QDF_STATUS_SUCCESS) 1767 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1768 mon_desc_list_ref); 1769 } 1770 1771 return QDF_STATUS_SUCCESS; 1772 1773 free_status_buffer: 1774 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1775 mon_desc_list_ref); 1776 if (qdf_likely(tx_mon_be)) 1777 tx_mon_be->stats.status_buf_free++; 1778 1779 qdf_frag_free(status_frag); 1780 1781 return QDF_STATUS_E_NOMEM; 1782 } 1783 1784 #else 1785 1786 QDF_STATUS 1787 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1788 struct dp_pdev *pdev, 1789 struct hal_mon_desc *mon_ring_desc, 1790 qdf_frag_t status_frag, 1791 uint32_t end_offset, 1792 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1793 { 1794 struct dp_mon_pdev *mon_pdev; 1795 struct dp_mon_pdev_be *mon_pdev_be; 1796 struct dp_pdev_tx_monitor_be *tx_mon_be; 1797 1798 /* sanity check */ 1799 if (qdf_unlikely(!pdev)) 1800 return QDF_STATUS_E_INVAL; 1801 1802 mon_pdev = pdev->monitor_pdev; 1803 if (qdf_unlikely(!mon_pdev)) 1804 return QDF_STATUS_E_INVAL; 1805 1806 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1807 if (qdf_unlikely(!mon_pdev_be)) 1808 return QDF_STATUS_E_INVAL; 1809 1810 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1811 1812 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1813 mon_desc_list_ref); 1814 tx_mon_be->stats.status_buf_free++; 1815 qdf_frag_free(status_frag); 1816 1817 return QDF_STATUS_E_INVAL; 1818 } 1819 1820 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1821 int ppdu_id, int end_reason) 1822 { 1823 } 1824 #endif 1825