1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "dp_types.h" 18 #include "qdf_nbuf.h" 19 #include "dp_internal.h" 20 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 21 #include <dp_be.h> 22 #include <qdf_nbuf_frag.h> 23 #include <hal_be_api_mon.h> 24 #include <dp_mon.h> 25 #include <dp_tx_mon_2.0.h> 26 #include <dp_mon_2.0.h> 27 #include <dp_lite_mon.h> 28 29 #define MAX_PPDU_INFO_LIST_DEPTH 64 30 31 /** 32 * dp_tx_mon_status_free_packet_buf() - API to free packet buffer 33 * @pdev: pdev Handle 34 * @status_frag: status frag 35 * @end_offset: status fragment end offset 36 * @mon_desc_list_ref: tx monitor descriptor list reference 37 * 38 * Return: void 39 */ 40 void 41 dp_tx_mon_status_free_packet_buf(struct dp_pdev *pdev, 42 qdf_frag_t status_frag, uint32_t end_offset, 43 struct dp_tx_mon_desc_list *mon_desc_list_ref) 44 { 45 struct dp_mon_pdev *mon_pdev; 46 struct dp_mon_pdev_be *mon_pdev_be; 47 struct dp_pdev_tx_monitor_be *tx_mon_be; 48 struct hal_mon_packet_info packet_info = {0}; 49 uint8_t *tx_tlv; 50 uint8_t *mon_buf_tx_tlv; 51 uint8_t *tx_tlv_start; 52 53 if (qdf_unlikely(!pdev)) 54 return; 55 56 mon_pdev = pdev->monitor_pdev; 57 if (qdf_unlikely(!mon_pdev)) 58 return; 59 60 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 61 if (qdf_unlikely(!mon_pdev_be)) 62 return; 63 64 tx_mon_be = &mon_pdev_be->tx_monitor_be; 65 tx_tlv = status_frag; 66 tx_tlv_start = tx_tlv; 67 /* 68 * parse each status buffer and find packet buffer in it 69 */ 70 do { 71 if (hal_txmon_is_mon_buf_addr_tlv(pdev->soc->hal_soc, tx_tlv)) { 72 struct dp_mon_desc *mon_desc = NULL; 73 qdf_frag_t packet_buffer = NULL; 74 75 mon_buf_tx_tlv = ((uint8_t *)tx_tlv + 76 HAL_RX_TLV64_HDR_SIZE); 77 hal_txmon_populate_packet_info(pdev->soc->hal_soc, 78 mon_buf_tx_tlv, 79 &packet_info); 80 81 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info.sw_cookie; 82 83 qdf_assert_always(mon_desc); 84 85 if (mon_desc->magic != DP_MON_DESC_MAGIC) 86 qdf_assert_always(0); 87 88 if (!mon_desc->unmapped) { 89 qdf_mem_unmap_page(pdev->soc->osdev, 90 (qdf_dma_addr_t)mon_desc->paddr, 91 DP_MON_DATA_BUFFER_SIZE, 92 QDF_DMA_FROM_DEVICE); 93 mon_desc->unmapped = 1; 94 } 95 96 packet_buffer = (qdf_frag_t)(mon_desc->buf_addr); 97 mon_desc->buf_addr = NULL; 98 99 qdf_assert_always(packet_buffer); 100 /* increment reap count */ 101 mon_desc_list_ref->tx_mon_reap_cnt++; 102 103 /* add the mon_desc to free list */ 104 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 105 &mon_desc_list_ref->tail, 106 mon_desc); 107 108 tx_mon_be->stats.pkt_buf_recv++; 109 tx_mon_be->stats.pkt_buf_free++; 110 111 /* free buffer, mapped to descriptor */ 112 qdf_frag_free(packet_buffer); 113 } 114 115 /* need api definition for hal_tx_status_get_next_tlv */ 116 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 117 } while ((tx_tlv - tx_tlv_start) < end_offset); 118 } 119 120 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(QCA_MONITOR_2_0_SUPPORT) 121 /** 122 * dp_tx_mon_status_queue_free() - API to free status buffer 123 * @pdev: pdev Handle 124 * @tx_mon_be: pointer to tx_monitor_be 125 * @mon_desc_list_ref: tx monitor descriptor list reference 126 * 127 * Return: void 128 */ 129 static void 130 dp_tx_mon_status_queue_free(struct dp_pdev *pdev, 131 struct dp_pdev_tx_monitor_be *tx_mon_be, 132 struct dp_tx_mon_desc_list *mon_desc_list_ref) 133 { 134 uint8_t last_frag_q_idx = tx_mon_be->last_frag_q_idx; 135 qdf_frag_t status_frag = NULL; 136 uint8_t i = tx_mon_be->cur_frag_q_idx; 137 uint32_t end_offset = 0; 138 139 for (; i < last_frag_q_idx; i++) { 140 status_frag = tx_mon_be->frag_q_vec[i].frag_buf; 141 142 if (qdf_unlikely(!status_frag)) 143 continue; 144 145 end_offset = tx_mon_be->frag_q_vec[i].end_offset; 146 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 147 mon_desc_list_ref); 148 tx_mon_be->stats.status_buf_free++; 149 qdf_frag_free(status_frag); 150 tx_mon_be->frag_q_vec[i].frag_buf = NULL; 151 tx_mon_be->frag_q_vec[i].end_offset = 0; 152 } 153 tx_mon_be->last_frag_q_idx = 0; 154 tx_mon_be->cur_frag_q_idx = 0; 155 } 156 157 /** 158 * dp_tx_mon_enqueue_mpdu_nbuf() - API to enqueue nbuf from per user mpdu queue 159 * @tx_ppdu_info: pointer to tx ppdu info structure 160 * @user_id: user index 161 * @mpdu_nbuf: nbuf to be enqueue 162 * 163 * Return: void 164 */ 165 static void 166 dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev *pdev, 167 struct dp_tx_ppdu_info *tx_ppdu_info, 168 uint8_t user_id, qdf_nbuf_t mpdu_nbuf) 169 { 170 qdf_nbuf_t radiotap = NULL; 171 /* enqueue mpdu_nbuf to the per user mpdu_q */ 172 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 173 174 if (!TXMON_PPDU_HAL(tx_ppdu_info, rx_user_status) || 175 !TXMON_PPDU_HAL(tx_ppdu_info, num_users)) 176 QDF_BUG(0); 177 178 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user_id, mpdu_q); 179 180 radiotap = qdf_nbuf_alloc(pdev->soc->osdev, MAX_MONITOR_HEADER, 181 MAX_MONITOR_HEADER, 182 4, FALSE); 183 if (qdf_unlikely(!radiotap)) { 184 qdf_err("Unable to allocate radiotap buffer\n"); 185 qdf_nbuf_free(mpdu_nbuf); 186 return; 187 } 188 189 /* append ext list */ 190 qdf_nbuf_append_ext_list(radiotap, mpdu_nbuf, qdf_nbuf_len(mpdu_nbuf)); 191 qdf_nbuf_queue_add(usr_mpdu_q, radiotap); 192 } 193 194 /* 195 * TX MONITOR 196 * 197 * frame format 198 * ------------------------------------------------------------------------- 199 * FUNC | ToDS | FromDS | ADDRESS 1 | ADDRESS 2 | ADDRESS 3 | ADDRESS 4 | 200 * ------------------------------------------------------------------------ 201 * IBSS | 0 | 0 | DA | SA | BSSID | NOT USED | 202 * TO AP | 1 | 0 | BSSID | SA | DA | NOT USED | 203 * From AP| 0 | 1 | DA | BSSID | SA | NOT USED | 204 * WDS | 1 | 1 | RA | TA | DA | SA | 205 * ------------------------------------------------------------------------ 206 * 207 * HOST GENERATED FRAME: 208 * ===================== 209 * 1. RTS 210 * 2. CTS 211 * 3. ACK 212 * 4. BA 213 * 5. Multi STA BA 214 * 215 * control frame 216 * ------------------------------------------------------------ 217 * | protocol 2b | Type 2b | subtype 4b | ToDS 1b | FromDS 1b | 218 * | Morefrag 1b | Retry 1b | pwr_mgmt 1b | More data 1b | 219 * | protected frm 1b | order 1b | 220 * ----------------------------------------------------------- 221 * control frame originated from wireless station so ToDS = FromDS = 0, 222 * 223 * RTS 224 * --------------------------------------------------------------------------- 225 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | Transmit address 6 | FCS | 226 * --------------------------------------------------------------------------- 227 * subtype in FC is RTS - 1101 228 * type in FC is control frame - 10 229 * 230 * CTS 231 * -------------------------------------------------------- 232 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 233 * -------------------------------------------------------- 234 * subtype in FC is CTS - 0011 235 * type in FC is control frame - 10 236 * 237 * ACK 238 * -------------------------------------------------------- 239 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 240 * -------------------------------------------------------- 241 * subtype in FC is ACK - 1011 242 * type in FC is control frame - 10 243 * 244 * Block ACK 245 * -------------------------------------------------------------------------- 246 * | FC 2 | Dur 2 | RA 6 | TA 6 | BA CTRL 2 | BA Information variable | FCS | 247 * -------------------------------------------------------------------------- 248 * 249 * Block Ack control 250 * --------------------------------------------------------------- 251 * | BA ACK POLICY B0 | BA TYPE B1-B4 | Rsv B5-B11 | TID B12-B15 | 252 * --------------------------------------------------------------- 253 * 254 * BA ack policy 255 * 0 - Normal Ack 256 * 1 - No Ack 257 * 258 * Block Ack Type 259 * 0 - Reserved 260 * 1 - extended compressed 261 * 2 - compressed 262 * 3 - Multi TID 263 * 4-5 - Reserved 264 * 6 - GCR 265 * 7-9 - Reserved 266 * 10 - GLK-GCR 267 * 11 - Multi-STA 268 * 12-15 - Reserved 269 * 270 * Block Ack information 271 * ---------------------------------------------------------- 272 * | Block ack start seq ctrl 2 | Block ack bitmap variable | 273 * ---------------------------------------------------------- 274 * 275 * Multi STA Block Ack Information 276 * ----------------------------------------------------------------- 277 * | Per STA TID info 2 | BA start seq ctrl 2 | BA bitmap variable | 278 * ----------------------------------------------------------------- 279 * 280 * Per STA TID info 281 * ------------------------------------ 282 * | AID11 11b | Ack Type 1b | TID 4b | 283 * ------------------------------------ 284 * AID11 - 2045 means unassociated STA, then ACK Type and TID 0, 15 285 * 286 * Mgmt/PS-POLL frame ack 287 * Ack type - 1 and TID - 15, BA_seq_ctrl & BA_bitmap - not present 288 * 289 * All ack context - with no bitmap (all AMPDU success) 290 * Ack type - 1 and TID - 14, BA_seq_ctrl & BA_bitmap - not present 291 * 292 * Block ack context 293 * Ack type - 0 and TID - 0~7 BA_seq_ctrl & BA_bitmap - present 294 * 295 * Ack context 296 * Ack type - 1 and TID - 0~7 BA_seq_ctrl & BA_bitmap - not present 297 * 298 * 299 */ 300 301 /** 302 * dp_tx_mon_generate_cts2self_frm() - API to generate cts2self frame 303 * @pdev: pdev Handle 304 * @tx_ppdu_info: pointer to tx ppdu info structure 305 * @window_flag: frame generated window 306 * 307 * Return: void 308 */ 309 static void 310 dp_tx_mon_generate_cts2self_frm(struct dp_pdev *pdev, 311 struct dp_tx_ppdu_info *tx_ppdu_info, 312 uint8_t window_flag) 313 { 314 /* allocate and populate CTS/ CTS2SELF frame */ 315 /* enqueue 802.11 payload to per user mpdu_q */ 316 struct dp_mon_pdev *mon_pdev; 317 struct dp_mon_pdev_be *mon_pdev_be; 318 struct dp_pdev_tx_monitor_be *tx_mon_be; 319 struct hal_tx_status_info *tx_status_info; 320 uint16_t duration_le = 0; 321 struct ieee80211_frame_min_one *wh_min = NULL; 322 qdf_nbuf_t mpdu_nbuf = NULL; 323 uint8_t frm_ctl; 324 325 /* sanity check */ 326 if (qdf_unlikely(!pdev)) 327 return; 328 329 mon_pdev = pdev->monitor_pdev; 330 if (qdf_unlikely(!mon_pdev)) 331 return; 332 333 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 334 if (qdf_unlikely(!mon_pdev_be)) 335 return; 336 337 tx_mon_be = &mon_pdev_be->tx_monitor_be; 338 339 if (window_flag == INITIATOR_WINDOW) 340 tx_status_info = &tx_mon_be->prot_status_info; 341 else 342 tx_status_info = &tx_mon_be->data_status_info; 343 344 /* 345 * for radiotap we allocate new skb, 346 * so we don't need reserver skb header 347 */ 348 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 349 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 350 if (!mpdu_nbuf) 351 return; 352 353 wh_min = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 354 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 355 356 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 357 IEEE80211_FC0_SUBTYPE_CTS); 358 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 359 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 360 wh_min->i_fc[1] = 0; 361 wh_min->i_fc[0] = frm_ctl; 362 363 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 364 wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8; 365 wh_min->i_dur[0] = (duration_le & 0xFF); 366 367 if (window_flag == INITIATOR_WINDOW) { 368 qdf_mem_copy(wh_min->i_addr1, 369 TXMON_STATUS_INFO(tx_status_info, addr1), 370 QDF_MAC_ADDR_SIZE); 371 } else { 372 qdf_mem_copy(wh_min->i_addr1, 373 TXMON_STATUS_INFO(tx_status_info, addr2), 374 QDF_MAC_ADDR_SIZE); 375 } 376 377 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 378 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 379 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 380 } 381 382 /** 383 * dp_tx_mon_generate_rts_frm() - API to generate rts frame 384 * @pdev: pdev Handle 385 * @tx_ppdu_info: pointer to tx ppdu info structure 386 * @window_flag: frame generated window 387 * 388 * Return: void 389 */ 390 static void 391 dp_tx_mon_generate_rts_frm(struct dp_pdev *pdev, 392 struct dp_tx_ppdu_info *tx_ppdu_info, 393 uint8_t window_flag) 394 { 395 /* allocate and populate RTS frame */ 396 /* enqueue 802.11 payload to per user mpdu_q */ 397 struct dp_mon_pdev *mon_pdev; 398 struct dp_mon_pdev_be *mon_pdev_be; 399 struct dp_pdev_tx_monitor_be *tx_mon_be; 400 struct hal_tx_status_info *tx_status_info; 401 uint16_t duration_le = 0; 402 struct ieee80211_ctlframe_addr2 *wh_min = NULL; 403 qdf_nbuf_t mpdu_nbuf = NULL; 404 uint8_t frm_ctl; 405 406 /* sanity check */ 407 if (qdf_unlikely(!pdev)) 408 return; 409 410 mon_pdev = pdev->monitor_pdev; 411 if (qdf_unlikely(!mon_pdev)) 412 return; 413 414 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 415 if (qdf_unlikely(!mon_pdev_be)) 416 return; 417 418 tx_mon_be = &mon_pdev_be->tx_monitor_be; 419 tx_status_info = &tx_mon_be->prot_status_info; 420 /* 421 * for radiotap we allocate new skb, 422 * so we don't need reserver skb header 423 */ 424 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 425 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 426 if (!mpdu_nbuf) 427 return; 428 429 wh_min = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 430 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 431 432 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 433 IEEE80211_FC0_SUBTYPE_RTS); 434 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 435 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 436 wh_min->i_fc[1] = 0; 437 wh_min->i_fc[0] = frm_ctl; 438 439 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 440 wh_min->i_aidordur[1] = (duration_le & 0xFF00) >> 8; 441 wh_min->i_aidordur[0] = (duration_le & 0xFF); 442 443 if (!tx_status_info->protection_addr) 444 tx_status_info = &tx_mon_be->data_status_info; 445 446 if (window_flag == INITIATOR_WINDOW) { 447 qdf_mem_copy(wh_min->i_addr1, 448 TXMON_STATUS_INFO(tx_status_info, addr1), 449 QDF_MAC_ADDR_SIZE); 450 qdf_mem_copy(wh_min->i_addr2, 451 TXMON_STATUS_INFO(tx_status_info, addr2), 452 QDF_MAC_ADDR_SIZE); 453 } else { 454 qdf_mem_copy(wh_min->i_addr1, 455 TXMON_STATUS_INFO(tx_status_info, addr2), 456 QDF_MAC_ADDR_SIZE); 457 qdf_mem_copy(wh_min->i_addr2, 458 TXMON_STATUS_INFO(tx_status_info, addr1), 459 QDF_MAC_ADDR_SIZE); 460 } 461 462 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 463 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 464 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 465 } 466 467 /** 468 * dp_tx_mon_generate_ack_frm() - API to generate ack frame 469 * @pdev: pdev Handle 470 * @tx_ppdu_info: pointer to tx ppdu info structure 471 * @window_flag: frame generated window 472 * 473 * Return: void 474 */ 475 static void 476 dp_tx_mon_generate_ack_frm(struct dp_pdev *pdev, 477 struct dp_tx_ppdu_info *tx_ppdu_info, 478 uint8_t window_flag) 479 { 480 /* allocate and populate ACK frame */ 481 /* enqueue 802.11 payload to per user mpdu_q */ 482 struct dp_mon_pdev *mon_pdev; 483 struct dp_mon_pdev_be *mon_pdev_be; 484 struct dp_pdev_tx_monitor_be *tx_mon_be; 485 struct hal_tx_status_info *tx_status_info; 486 struct ieee80211_frame_min_one *wh_addr1 = NULL; 487 qdf_nbuf_t mpdu_nbuf = NULL; 488 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 489 uint8_t frm_ctl; 490 491 /* sanity check */ 492 if (qdf_unlikely(!pdev)) 493 return; 494 495 mon_pdev = pdev->monitor_pdev; 496 if (qdf_unlikely(!mon_pdev)) 497 return; 498 499 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 500 if (qdf_unlikely(!mon_pdev_be)) 501 return; 502 503 tx_mon_be = &mon_pdev_be->tx_monitor_be; 504 tx_status_info = &tx_mon_be->data_status_info; 505 /* 506 * for radiotap we allocate new skb, 507 * so we don't need reserver skb header 508 */ 509 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 510 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 511 if (!mpdu_nbuf) 512 return; 513 514 wh_addr1 = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 515 516 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 517 IEEE80211_FC0_SUBTYPE_ACK); 518 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 519 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 520 wh_addr1->i_fc[1] = 0; 521 wh_addr1->i_fc[0] = frm_ctl; 522 523 if (window_flag == INITIATOR_WINDOW) { 524 qdf_mem_copy(wh_addr1->i_addr1, 525 TXMON_STATUS_INFO(tx_status_info, addr1), 526 QDF_MAC_ADDR_SIZE); 527 } else { 528 qdf_mem_copy(wh_addr1->i_addr1, 529 TXMON_STATUS_INFO(tx_status_info, addr2), 530 QDF_MAC_ADDR_SIZE); 531 } 532 533 /* set duration zero for ack frame */ 534 *(u_int16_t *)(&wh_addr1->i_dur) = qdf_cpu_to_le16(0x0000); 535 536 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr1)); 537 538 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, user_id, mpdu_nbuf); 539 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 540 } 541 542 /** 543 * dp_tx_mon_generate_3addr_qos_null_frm() - API to generate 544 * 3 address qosnull frame 545 * 546 * @pdev: pdev Handle 547 * @tx_ppdu_info: pointer to tx ppdu info structure 548 * 549 * Return: void 550 */ 551 static void 552 dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev *pdev, 553 struct dp_tx_ppdu_info *tx_ppdu_info) 554 { 555 /* allocate and populate 3 address qos null frame */ 556 /* enqueue 802.11 payload to per user mpdu_q */ 557 struct dp_mon_pdev *mon_pdev; 558 struct dp_mon_pdev_be *mon_pdev_be; 559 struct dp_pdev_tx_monitor_be *tx_mon_be; 560 struct hal_tx_status_info *tx_status_info; 561 struct ieee80211_qosframe *wh_addr3 = NULL; 562 qdf_nbuf_t mpdu_nbuf = NULL; 563 uint16_t duration_le = 0; 564 uint8_t num_users = 0; 565 uint8_t frm_ctl; 566 567 /* sanity check */ 568 if (qdf_unlikely(!pdev)) 569 return; 570 571 mon_pdev = pdev->monitor_pdev; 572 if (qdf_unlikely(!mon_pdev)) 573 return; 574 575 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 576 if (qdf_unlikely(!mon_pdev_be)) 577 return; 578 579 tx_mon_be = &mon_pdev_be->tx_monitor_be; 580 tx_status_info = &tx_mon_be->data_status_info; 581 /* 582 * for radiotap we allocate new skb, 583 * so we don't need reserver skb header 584 */ 585 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 586 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 587 if (!mpdu_nbuf) 588 return; 589 590 wh_addr3 = (struct ieee80211_qosframe *)qdf_nbuf_data(mpdu_nbuf); 591 qdf_mem_zero(wh_addr3, sizeof(struct ieee80211_qosframe)); 592 593 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 594 IEEE80211_FC0_SUBTYPE_QOS_NULL); 595 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 596 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 597 wh_addr3->i_fc[1] = 0; 598 wh_addr3->i_fc[0] = frm_ctl; 599 600 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 601 wh_addr3->i_dur[1] = (duration_le & 0xFF00) >> 8; 602 wh_addr3->i_dur[0] = (duration_le & 0xFF); 603 604 qdf_mem_copy(wh_addr3->i_addr1, 605 TXMON_STATUS_INFO(tx_status_info, addr1), 606 QDF_MAC_ADDR_SIZE); 607 qdf_mem_copy(wh_addr3->i_addr2, 608 TXMON_STATUS_INFO(tx_status_info, addr2), 609 QDF_MAC_ADDR_SIZE); 610 qdf_mem_copy(wh_addr3->i_addr3, 611 TXMON_STATUS_INFO(tx_status_info, addr3), 612 QDF_MAC_ADDR_SIZE); 613 614 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr3)); 615 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 616 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 617 } 618 619 /** 620 * dp_tx_mon_generate_4addr_qos_null_frm() - API to generate 621 * 4 address qos null frame 622 * 623 * @pdev: pdev Handle 624 * @tx_ppdu_info: pointer to tx ppdu info structure 625 * 626 * Return: void 627 */ 628 static void 629 dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev *pdev, 630 struct dp_tx_ppdu_info *tx_ppdu_info) 631 { 632 /* allocate and populate 4 address qos null frame */ 633 /* enqueue 802.11 payload to per user mpdu_q */ 634 struct dp_mon_pdev *mon_pdev; 635 struct dp_mon_pdev_be *mon_pdev_be; 636 struct dp_pdev_tx_monitor_be *tx_mon_be; 637 struct hal_tx_status_info *tx_status_info; 638 struct ieee80211_qosframe_addr4 *wh_addr4 = NULL; 639 qdf_nbuf_t mpdu_nbuf = NULL; 640 uint16_t duration_le = 0; 641 uint8_t num_users = 0; 642 uint8_t frm_ctl; 643 644 /* sanity check */ 645 if (qdf_unlikely(!pdev)) 646 return; 647 648 mon_pdev = pdev->monitor_pdev; 649 if (qdf_unlikely(!mon_pdev)) 650 return; 651 652 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 653 if (qdf_unlikely(!mon_pdev_be)) 654 return; 655 656 tx_mon_be = &mon_pdev_be->tx_monitor_be; 657 tx_status_info = &tx_mon_be->data_status_info; 658 /* 659 * for radiotap we allocate new skb, 660 * so we don't need reserver skb header 661 */ 662 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 663 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 664 if (!mpdu_nbuf) 665 return; 666 667 wh_addr4 = (struct ieee80211_qosframe_addr4 *)qdf_nbuf_data(mpdu_nbuf); 668 qdf_mem_zero(wh_addr4, sizeof(struct ieee80211_qosframe_addr4)); 669 670 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 671 IEEE80211_FC0_SUBTYPE_QOS_NULL); 672 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 673 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 674 wh_addr4->i_fc[1] = 0; 675 wh_addr4->i_fc[0] = frm_ctl; 676 677 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 678 wh_addr4->i_dur[1] = (duration_le & 0xFF00) >> 8; 679 wh_addr4->i_dur[0] = (duration_le & 0xFF); 680 681 qdf_mem_copy(wh_addr4->i_addr1, 682 TXMON_STATUS_INFO(tx_status_info, addr1), 683 QDF_MAC_ADDR_SIZE); 684 qdf_mem_copy(wh_addr4->i_addr2, 685 TXMON_STATUS_INFO(tx_status_info, addr2), 686 QDF_MAC_ADDR_SIZE); 687 qdf_mem_copy(wh_addr4->i_addr3, 688 TXMON_STATUS_INFO(tx_status_info, addr3), 689 QDF_MAC_ADDR_SIZE); 690 qdf_mem_copy(wh_addr4->i_addr4, 691 TXMON_STATUS_INFO(tx_status_info, addr4), 692 QDF_MAC_ADDR_SIZE); 693 694 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr4)); 695 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 696 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 697 } 698 699 #define TXMON_BA_CTRL_SZ 2 700 #define TXMON_BA_INFO_SZ(bitmap_sz) ((4 * (bitmap_sz)) + 6) 701 #define TXMON_MU_BA_ACK_FRAME_SZ(bitmap_sz) \ 702 (sizeof(struct ieee80211_ctlframe_addr2) +\ 703 TXMON_BA_CTRL_SZ + (bitmap_sz)) 704 705 #define TXMON_BA_ACK_FRAME_SZ(bitmap_sz) \ 706 (sizeof(struct ieee80211_ctlframe_addr2) +\ 707 TXMON_BA_CTRL_SZ + TXMON_BA_INFO_SZ(bitmap_sz)) 708 709 /** 710 * dp_tx_mon_generate_mu_block_ack_frm() - API to generate MU block ack frame 711 * @pdev: pdev Handle 712 * @tx_ppdu_info: pointer to tx ppdu info structure 713 * @window_flag: frame generated window 714 * 715 * Return: void 716 */ 717 static void 718 dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev *pdev, 719 struct dp_tx_ppdu_info *tx_ppdu_info, 720 uint8_t window_flag) 721 { 722 /* allocate and populate MU block ack frame */ 723 /* enqueue 802.11 payload to per user mpdu_q */ 724 struct dp_mon_pdev *mon_pdev; 725 struct dp_mon_pdev_be *mon_pdev_be; 726 struct dp_pdev_tx_monitor_be *tx_mon_be; 727 struct hal_tx_status_info *tx_status_info; 728 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 729 qdf_nbuf_t mpdu_nbuf = NULL; 730 uint16_t ba_control = 0; 731 uint8_t *frm = NULL; 732 uint32_t ba_sz = 0; 733 uint8_t num_users = TXMON_PPDU_HAL(tx_ppdu_info, num_users); 734 uint8_t i = 0; 735 uint8_t frm_ctl; 736 737 /* sanity check */ 738 if (qdf_unlikely(!pdev)) 739 return; 740 741 mon_pdev = pdev->monitor_pdev; 742 if (qdf_unlikely(!mon_pdev)) 743 return; 744 745 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 746 if (qdf_unlikely(!mon_pdev_be)) 747 return; 748 749 tx_mon_be = &mon_pdev_be->tx_monitor_be; 750 tx_status_info = &tx_mon_be->data_status_info; 751 for (i = 0; i < num_users; i++) 752 ba_sz += (4 << TXMON_BA_INFO_SZ(TXMON_PPDU_USR(tx_ppdu_info, 753 i, 754 ba_bitmap_sz))); 755 756 /* 757 * for multi sta block ack, do we need to increase the size 758 * or copy info on subsequent frame offset 759 * 760 * for radiotap we allocate new skb, 761 * so we don't need reserver skb header 762 */ 763 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 764 TXMON_MU_BA_ACK_FRAME_SZ(ba_sz), 0, 4, 765 FALSE); 766 if (!mpdu_nbuf) { 767 /* TODO: update status and break */ 768 return; 769 } 770 771 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 772 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 773 774 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 775 IEEE80211_FC0_BLOCK_ACK); 776 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 777 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 778 wh_addr2->i_fc[1] = 0; 779 wh_addr2->i_fc[0] = frm_ctl; 780 781 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0000); 782 783 if (window_flag == RESPONSE_WINDOW) { 784 qdf_mem_copy(wh_addr2->i_addr2, 785 TXMON_STATUS_INFO(tx_status_info, addr2), 786 QDF_MAC_ADDR_SIZE); 787 if (num_users > 1) 788 qdf_mem_set(wh_addr2->i_addr1, QDF_MAC_ADDR_SIZE, 0xFF); 789 else 790 qdf_mem_copy(wh_addr2->i_addr1, 791 TXMON_STATUS_INFO(tx_status_info, addr1), 792 QDF_MAC_ADDR_SIZE); 793 } else { 794 qdf_mem_copy(wh_addr2->i_addr2, 795 TXMON_STATUS_INFO(tx_status_info, addr1), 796 QDF_MAC_ADDR_SIZE); 797 qdf_mem_copy(wh_addr2->i_addr1, 798 TXMON_STATUS_INFO(tx_status_info, addr2), 799 QDF_MAC_ADDR_SIZE); 800 } 801 802 frm = (uint8_t *)&wh_addr2[1]; 803 804 /* BA control */ 805 ba_control = 0x0016; 806 *((uint16_t *)frm) = qdf_cpu_to_le16(ba_control); 807 frm += 2; 808 809 for (i = 0; i < num_users; i++) { 810 *((uint16_t *)frm) = 811 qdf_cpu_to_le16((TXMON_PPDU_USR(tx_ppdu_info, i, tid) << 812 DP_IEEE80211_BAR_CTL_TID_S) | 813 (TXMON_PPDU_USR(tx_ppdu_info, i, 814 aid) & 0x7FF)); 815 frm += 2; 816 *((uint16_t *)frm) = qdf_cpu_to_le16( 817 TXMON_PPDU_USR(tx_ppdu_info, i, start_seq)); 818 frm += 2; 819 qdf_mem_copy(frm, 820 TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap), 821 4 << 822 TXMON_PPDU_USR(tx_ppdu_info, 823 i, ba_bitmap_sz)); 824 frm += 4 << TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap_sz); 825 } 826 827 qdf_nbuf_set_pktlen(mpdu_nbuf, 828 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 829 830 /* always enqueue to first active user */ 831 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 832 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 833 /* HE MU fields not required for Multi Sta Block ack frame */ 834 TXMON_PPDU_COM(tx_ppdu_info, he_mu_flags) = 0; 835 } 836 837 /** 838 * dp_tx_mon_generate_block_ack_frm() - API to generate block ack frame 839 * @pdev: pdev Handle 840 * @tx_ppdu_info: pointer to tx ppdu info structure 841 * @window_flag: frame generated window 842 * 843 * Return: void 844 */ 845 static void 846 dp_tx_mon_generate_block_ack_frm(struct dp_pdev *pdev, 847 struct dp_tx_ppdu_info *tx_ppdu_info, 848 uint8_t window_flag) 849 { 850 /* allocate and populate block ack frame */ 851 /* enqueue 802.11 payload to per user mpdu_q */ 852 struct dp_mon_pdev *mon_pdev; 853 struct dp_mon_pdev_be *mon_pdev_be; 854 struct dp_pdev_tx_monitor_be *tx_mon_be; 855 struct hal_tx_status_info *tx_status_info; 856 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 857 qdf_nbuf_t mpdu_nbuf = NULL; 858 uint8_t *frm = NULL; 859 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 860 uint32_t ba_bitmap_sz = TXMON_PPDU_USR(tx_ppdu_info, 861 user_id, ba_bitmap_sz); 862 uint8_t frm_ctl; 863 864 /* sanity check */ 865 if (qdf_unlikely(!pdev)) 866 return; 867 868 mon_pdev = pdev->monitor_pdev; 869 if (qdf_unlikely(!mon_pdev)) 870 return; 871 872 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 873 if (qdf_unlikely(!mon_pdev_be)) 874 return; 875 876 tx_mon_be = &mon_pdev_be->tx_monitor_be; 877 tx_status_info = &tx_mon_be->data_status_info; 878 /* 879 * for multi sta block ack, do we need to increase the size 880 * or copy info on subsequent frame offset 881 * 882 * for radiotap we allocate new skb, 883 * so we don't need reserver skb header 884 */ 885 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 886 TXMON_BA_ACK_FRAME_SZ(ba_bitmap_sz), 887 0, 4, FALSE); 888 if (!mpdu_nbuf) { 889 /* TODO: update status and break */ 890 return; 891 } 892 893 /* 894 * BA CONTROL 895 * fields required to construct block ack information 896 * B0 - BA ACK POLICY 897 * 0 - Normal ACK 898 * 1 - No ACK 899 * B1 - MULTI TID 900 * B2 - COMPRESSED BITMAP 901 * B12 902 * 00 - Basic block ack 903 * 01 - Compressed block ack 904 * 10 - Reserved 905 * 11 - Multi tid block ack 906 * B3-B11 - Reserved 907 * B12-B15 - TID info 908 * 909 * BA INFORMATION 910 * Per sta tid info 911 * AID: 11 bits 912 * ACK type: 1 bit 913 * TID: 4 bits 914 * 915 * BA SEQ CTRL 916 * 917 * BA bitmap 918 * 919 */ 920 921 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 922 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 923 924 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 925 IEEE80211_FC0_BLOCK_ACK); 926 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 927 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 928 wh_addr2->i_fc[1] = 0; 929 wh_addr2->i_fc[0] = frm_ctl; 930 931 /* duration */ 932 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0020); 933 934 if (window_flag) { 935 qdf_mem_copy(wh_addr2->i_addr2, 936 TXMON_STATUS_INFO(tx_status_info, addr2), 937 QDF_MAC_ADDR_SIZE); 938 qdf_mem_copy(wh_addr2->i_addr1, 939 TXMON_STATUS_INFO(tx_status_info, addr1), 940 QDF_MAC_ADDR_SIZE); 941 } else { 942 qdf_mem_copy(wh_addr2->i_addr2, 943 TXMON_STATUS_INFO(tx_status_info, addr1), 944 QDF_MAC_ADDR_SIZE); 945 qdf_mem_copy(wh_addr2->i_addr1, 946 TXMON_STATUS_INFO(tx_status_info, addr2), 947 QDF_MAC_ADDR_SIZE); 948 } 949 950 frm = (uint8_t *)&wh_addr2[1]; 951 /* BA control */ 952 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 953 user_id, 954 ba_control)); 955 frm += 2; 956 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 957 user_id, 958 start_seq)); 959 frm += 2; 960 qdf_mem_copy(frm, 961 TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap), 962 4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 963 frm += (4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 964 965 qdf_nbuf_set_pktlen(mpdu_nbuf, 966 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 967 968 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 969 970 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 971 } 972 973 /** 974 * dp_tx_mon_alloc_mpdu() - API to allocate mpdu and add that current 975 * user index 976 * 977 * @pdev: pdev Handle 978 * @tx_ppdu_info: pointer to tx ppdu info structure 979 * 980 * Return: void 981 */ 982 static void 983 dp_tx_mon_alloc_mpdu(struct dp_pdev *pdev, struct dp_tx_ppdu_info *tx_ppdu_info) 984 { 985 qdf_nbuf_t mpdu_nbuf = NULL; 986 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 987 uint32_t usr_idx = 0; 988 989 /* 990 * payload will be added as a frag to buffer 991 * and we allocate new skb for radiotap header 992 * we allocate a dummy buffer size 993 */ 994 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 995 MAX_MONITOR_HEADER, MAX_MONITOR_HEADER, 996 4, FALSE); 997 if (!mpdu_nbuf) { 998 qdf_err("%s: %d No memory to allocate mpdu_nbuf!!!!!\n", 999 __func__, __LINE__); 1000 return; 1001 } 1002 1003 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 1004 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 1005 1006 qdf_nbuf_queue_add(usr_mpdu_q, mpdu_nbuf); 1007 } 1008 1009 /** 1010 * dp_tx_mon_generate_data_frm() - API to generate data frame 1011 * @pdev: pdev Handle 1012 * @tx_ppdu_info: pointer to tx ppdu info structure 1013 * 1014 * Return: void 1015 */ 1016 static void 1017 dp_tx_mon_generate_data_frm(struct dp_pdev *pdev, 1018 struct dp_tx_ppdu_info *tx_ppdu_info, 1019 bool take_ref) 1020 { 1021 struct dp_mon_pdev *mon_pdev; 1022 struct dp_mon_pdev_be *mon_pdev_be; 1023 struct dp_pdev_tx_monitor_be *tx_mon_be; 1024 struct hal_tx_status_info *tx_status_info; 1025 qdf_nbuf_t mpdu_nbuf = NULL; 1026 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 1027 uint32_t usr_idx = 0; 1028 1029 /* sanity check */ 1030 if (qdf_unlikely(!pdev)) 1031 return; 1032 1033 mon_pdev = pdev->monitor_pdev; 1034 if (qdf_unlikely(!mon_pdev)) 1035 return; 1036 1037 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1038 if (qdf_unlikely(!mon_pdev_be)) 1039 return; 1040 1041 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1042 1043 tx_status_info = &tx_mon_be->data_status_info; 1044 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 1045 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 1046 mpdu_nbuf = qdf_nbuf_queue_last(usr_mpdu_q); 1047 1048 if (!mpdu_nbuf) 1049 QDF_BUG(0); 1050 1051 tx_mon_be->stats.pkt_buf_processed++; 1052 1053 /* add function to either copy or add frag to frag_list */ 1054 qdf_nbuf_add_frag(pdev->soc->osdev, 1055 TXMON_STATUS_INFO(tx_status_info, buffer), 1056 mpdu_nbuf, 1057 TXMON_STATUS_INFO(tx_status_info, offset), 1058 TXMON_STATUS_INFO(tx_status_info, length), 1059 DP_MON_DATA_BUFFER_SIZE, 1060 take_ref, TXMON_NO_BUFFER_SZ); 1061 } 1062 1063 /** 1064 * dp_tx_mon_generate_prot_frm() - API to generate protection frame 1065 * @pdev: pdev Handle 1066 * @tx_ppdu_info: pointer to tx ppdu info structure 1067 * 1068 * Return: void 1069 */ 1070 static void 1071 dp_tx_mon_generate_prot_frm(struct dp_pdev *pdev, 1072 struct dp_tx_ppdu_info *tx_ppdu_info) 1073 { 1074 struct dp_mon_pdev *mon_pdev; 1075 struct dp_mon_pdev_be *mon_pdev_be; 1076 struct dp_pdev_tx_monitor_be *tx_mon_be; 1077 struct hal_tx_status_info *tx_status_info; 1078 1079 /* sanity check */ 1080 if (qdf_unlikely(!pdev)) 1081 return; 1082 1083 mon_pdev = pdev->monitor_pdev; 1084 if (qdf_unlikely(!mon_pdev)) 1085 return; 1086 1087 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1088 if (qdf_unlikely(!mon_pdev_be)) 1089 return; 1090 1091 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1092 tx_status_info = &tx_mon_be->prot_status_info; 1093 1094 /* update medium prot type from data */ 1095 TXMON_STATUS_INFO(tx_status_info, medium_prot_type) = 1096 tx_mon_be->data_status_info.medium_prot_type; 1097 1098 switch (TXMON_STATUS_INFO(tx_status_info, medium_prot_type)) { 1099 case TXMON_MEDIUM_NO_PROTECTION: 1100 { 1101 /* no protection frame - do nothing */ 1102 break; 1103 } 1104 case TXMON_MEDIUM_RTS_LEGACY: 1105 case TXMON_MEDIUM_RTS_11AC_STATIC_BW: 1106 case TXMON_MEDIUM_RTS_11AC_DYNAMIC_BW: 1107 { 1108 dp_tx_mon_generate_rts_frm(pdev, tx_ppdu_info, 1109 INITIATOR_WINDOW); 1110 break; 1111 } 1112 case TXMON_MEDIUM_CTS2SELF: 1113 { 1114 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1115 INITIATOR_WINDOW); 1116 break; 1117 } 1118 case TXMON_MEDIUM_QOS_NULL_NO_ACK_3ADDR: 1119 { 1120 dp_tx_mon_generate_3addr_qos_null_frm(pdev, tx_ppdu_info); 1121 break; 1122 } 1123 case TXMON_MEDIUM_QOS_NULL_NO_ACK_4ADDR: 1124 { 1125 dp_tx_mon_generate_4addr_qos_null_frm(pdev, tx_ppdu_info); 1126 break; 1127 } 1128 } 1129 } 1130 1131 /** 1132 * dp_tx_mon_generated_response_frm() - API to handle generated response frame 1133 * @pdev: pdev Handle 1134 * @tx_ppdu_info: pointer to tx ppdu info structure 1135 * 1136 * Return: QDF_STATUS 1137 */ 1138 static QDF_STATUS 1139 dp_tx_mon_generated_response_frm(struct dp_pdev *pdev, 1140 struct dp_tx_ppdu_info *tx_ppdu_info) 1141 { 1142 struct dp_mon_pdev *mon_pdev; 1143 struct dp_mon_pdev_be *mon_pdev_be; 1144 struct dp_pdev_tx_monitor_be *tx_mon_be; 1145 struct hal_tx_status_info *tx_status_info; 1146 QDF_STATUS status = QDF_STATUS_SUCCESS; 1147 uint8_t gen_response = 0; 1148 1149 /* sanity check */ 1150 if (qdf_unlikely(!pdev)) 1151 return QDF_STATUS_E_NOMEM; 1152 1153 mon_pdev = pdev->monitor_pdev; 1154 if (qdf_unlikely(!mon_pdev)) 1155 return QDF_STATUS_E_NOMEM; 1156 1157 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1158 if (qdf_unlikely(!mon_pdev_be)) 1159 return QDF_STATUS_E_NOMEM; 1160 1161 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1162 1163 tx_status_info = &tx_mon_be->data_status_info; 1164 gen_response = TXMON_STATUS_INFO(tx_status_info, generated_response); 1165 1166 switch (gen_response) { 1167 case TXMON_GEN_RESP_SELFGEN_ACK: 1168 { 1169 dp_tx_mon_generate_ack_frm(pdev, tx_ppdu_info, RESPONSE_WINDOW); 1170 break; 1171 } 1172 case TXMON_GEN_RESP_SELFGEN_CTS: 1173 { 1174 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1175 RESPONSE_WINDOW); 1176 break; 1177 } 1178 case TXMON_GEN_RESP_SELFGEN_BA: 1179 { 1180 dp_tx_mon_generate_block_ack_frm(pdev, tx_ppdu_info, 1181 RESPONSE_WINDOW); 1182 break; 1183 } 1184 case TXMON_GEN_RESP_SELFGEN_MBA: 1185 { 1186 dp_tx_mon_generate_mu_block_ack_frm(pdev, tx_ppdu_info, 1187 RESPONSE_WINDOW); 1188 break; 1189 } 1190 case TXMON_GEN_RESP_SELFGEN_CBF: 1191 { 1192 break; 1193 } 1194 case TXMON_GEN_RESP_SELFGEN_TRIG: 1195 { 1196 break; 1197 } 1198 case TXMON_GEN_RESP_SELFGEN_NDP_LMR: 1199 { 1200 break; 1201 } 1202 }; 1203 1204 return status; 1205 } 1206 1207 /** 1208 * dp_tx_mon_update_ppdu_info_status() - API to update frame as information 1209 * is stored only for that processing 1210 * 1211 * @pdev: pdev Handle 1212 * @tx_data_ppdu_info: pointer to data tx ppdu info 1213 * @tx_prot_ppdu_info: pointer to protection tx ppdu info 1214 * @tx_tlv_hdr: pointer to tx_tlv_hdr 1215 * @status_frag: pointer to fragment 1216 * @tlv_status: tlv status return from hal api 1217 * @mon_desc_list_ref: tx monitor descriptor list reference 1218 * 1219 * Return: QDF_STATUS 1220 */ 1221 static QDF_STATUS 1222 dp_tx_mon_update_ppdu_info_status(struct dp_pdev *pdev, 1223 struct dp_tx_ppdu_info *tx_data_ppdu_info, 1224 struct dp_tx_ppdu_info *tx_prot_ppdu_info, 1225 void *tx_tlv_hdr, 1226 qdf_frag_t status_frag, 1227 uint32_t tlv_status, 1228 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1229 { 1230 struct dp_mon_pdev *mon_pdev; 1231 struct dp_mon_pdev_be *mon_pdev_be; 1232 struct dp_pdev_tx_monitor_be *tx_mon_be; 1233 struct hal_tx_status_info *tx_status_info; 1234 QDF_STATUS status = QDF_STATUS_SUCCESS; 1235 1236 /* sanity check */ 1237 if (qdf_unlikely(!pdev)) 1238 return QDF_STATUS_E_NOMEM; 1239 1240 mon_pdev = pdev->monitor_pdev; 1241 if (qdf_unlikely(!mon_pdev)) 1242 return QDF_STATUS_E_NOMEM; 1243 1244 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1245 if (qdf_unlikely(!mon_pdev_be)) 1246 return QDF_STATUS_E_NOMEM; 1247 1248 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1249 1250 switch (tlv_status) { 1251 case HAL_MON_TX_FES_SETUP: 1252 { 1253 /* 1254 * start of initiator window 1255 * 1256 * got number of user count from fes setup tlv 1257 */ 1258 break; 1259 } 1260 case HAL_MON_RX_RESPONSE_REQUIRED_INFO: 1261 { 1262 break; 1263 } 1264 case HAL_MON_TX_FES_STATUS_START_PROT: 1265 { 1266 /* update tsft to local */ 1267 break; 1268 } 1269 case HAL_MON_TX_FES_STATUS_START_PPDU: 1270 { 1271 /* update tsft to local */ 1272 break; 1273 } 1274 case HAL_MON_TX_FES_STATUS_PROT: 1275 { 1276 TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used) = 1; 1277 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) = 1278 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) << 1; 1279 1280 /* based on medium protection type we need to generate frame */ 1281 dp_tx_mon_generate_prot_frm(pdev, tx_prot_ppdu_info); 1282 break; 1283 } 1284 case HAL_MON_RX_FRAME_BITMAP_ACK: 1285 { 1286 break; 1287 } 1288 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_256: 1289 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_1K: 1290 { 1291 /* 1292 * this comes for each user 1293 * BlockAck is not same as ACK, single frame can hold 1294 * multiple BlockAck info 1295 */ 1296 tx_status_info = &tx_mon_be->data_status_info; 1297 1298 if (TXMON_PPDU_HAL(tx_data_ppdu_info, num_users)) 1299 dp_tx_mon_generate_block_ack_frm(pdev, 1300 tx_data_ppdu_info, 1301 INITIATOR_WINDOW); 1302 else 1303 dp_tx_mon_generate_mu_block_ack_frm(pdev, 1304 tx_data_ppdu_info, 1305 INITIATOR_WINDOW); 1306 1307 break; 1308 } 1309 case HAL_MON_TX_MPDU_START: 1310 { 1311 dp_tx_mon_alloc_mpdu(pdev, tx_data_ppdu_info); 1312 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1313 break; 1314 } 1315 case HAL_MON_TX_MSDU_START: 1316 { 1317 break; 1318 } 1319 case HAL_MON_TX_DATA: 1320 { 1321 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1322 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, true); 1323 break; 1324 } 1325 case HAL_MON_TX_BUFFER_ADDR: 1326 { 1327 struct hal_mon_packet_info *packet_info = NULL; 1328 struct dp_mon_desc *mon_desc = NULL; 1329 qdf_frag_t packet_buffer = NULL; 1330 uint32_t end_offset = 0; 1331 1332 tx_status_info = &tx_mon_be->data_status_info; 1333 /* update buffer from packet info */ 1334 packet_info = &TXMON_PPDU_HAL(tx_data_ppdu_info, packet_info); 1335 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info->sw_cookie; 1336 1337 qdf_assert_always(mon_desc); 1338 1339 if (mon_desc->magic != DP_MON_DESC_MAGIC) 1340 qdf_assert_always(0); 1341 1342 qdf_assert_always(mon_desc->buf_addr); 1343 tx_mon_be->stats.pkt_buf_recv++; 1344 1345 if (!mon_desc->unmapped) { 1346 qdf_mem_unmap_page(pdev->soc->osdev, 1347 (qdf_dma_addr_t)mon_desc->paddr, 1348 DP_MON_DATA_BUFFER_SIZE, 1349 QDF_DMA_FROM_DEVICE); 1350 mon_desc->unmapped = 1; 1351 } 1352 1353 packet_buffer = mon_desc->buf_addr; 1354 mon_desc->buf_addr = NULL; 1355 1356 /* increment reap count */ 1357 mon_desc_list_ref->tx_mon_reap_cnt++; 1358 1359 /* add the mon_desc to free list */ 1360 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 1361 &mon_desc_list_ref->tail, 1362 mon_desc); 1363 1364 TXMON_STATUS_INFO(tx_status_info, buffer) = packet_buffer; 1365 TXMON_STATUS_INFO(tx_status_info, offset) = end_offset; 1366 TXMON_STATUS_INFO(tx_status_info, 1367 length) = packet_info->dma_length; 1368 1369 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1370 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, false); 1371 break; 1372 } 1373 case HAL_MON_TX_FES_STATUS_END: 1374 { 1375 break; 1376 } 1377 case HAL_MON_RESPONSE_END_STATUS_INFO: 1378 { 1379 dp_tx_mon_generated_response_frm(pdev, tx_data_ppdu_info); 1380 break; 1381 } 1382 case HAL_MON_TX_FES_STATUS_START: 1383 { 1384 /* update the medium protection type */ 1385 break; 1386 } 1387 case HAL_MON_TX_QUEUE_EXTENSION: 1388 { 1389 /* No action for Queue Extension TLV */ 1390 break; 1391 } 1392 case HAL_MON_TX_FW2SW: 1393 { 1394 /* update the frequency */ 1395 tx_status_info = &tx_mon_be->data_status_info; 1396 1397 TXMON_PPDU_COM(tx_data_ppdu_info, 1398 chan_freq) = TXMON_STATUS_INFO(tx_status_info, 1399 freq); 1400 TXMON_PPDU_COM(tx_prot_ppdu_info, 1401 chan_freq) = TXMON_STATUS_INFO(tx_status_info, 1402 freq); 1403 break; 1404 } 1405 default: 1406 { 1407 /* return or break in default case */ 1408 break; 1409 } 1410 }; 1411 1412 return status; 1413 } 1414 1415 QDF_STATUS 1416 dp_tx_process_pktlog_be(struct dp_soc *soc, struct dp_pdev *pdev, 1417 qdf_frag_t status_frag, uint32_t end_offset) 1418 { 1419 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1420 qdf_nbuf_t nbuf = NULL; 1421 enum WDI_EVENT pktlog_mode = WDI_NO_VAL; 1422 int frag_bytes; 1423 1424 if (!mon_pdev->pktlog_hybrid_mode) 1425 return QDF_STATUS_E_INVAL; 1426 1427 nbuf = qdf_nbuf_alloc(soc->osdev, MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 1428 if (!nbuf) 1429 return QDF_STATUS_E_NOMEM; 1430 1431 qdf_nbuf_add_rx_frag(status_frag, nbuf, 0, 1432 (end_offset + 1), 1433 0, true); 1434 1435 if (mon_pdev->pktlog_hybrid_mode) 1436 pktlog_mode = WDI_EVENT_HYBRID_TX; 1437 1438 frag_bytes = qdf_nbuf_get_frag_len(nbuf, 0); 1439 if (pktlog_mode != WDI_NO_VAL) { 1440 dp_wdi_event_handler(pktlog_mode, soc, 1441 nbuf, HTT_INVALID_PEER, 1442 WDI_NO_VAL, pdev->pdev_id); 1443 } 1444 qdf_nbuf_free(nbuf); 1445 1446 return QDF_STATUS_SUCCESS; 1447 } 1448 1449 /* 1450 * dp_tx_mon_process_tlv_2_0() - API to parse PPDU worth information 1451 * @pdev_handle: DP_PDEV handle 1452 * @mon_desc_list_ref: tx monitor descriptor list reference 1453 * 1454 * Return: status 1455 */ 1456 QDF_STATUS 1457 dp_tx_mon_process_tlv_2_0(struct dp_pdev *pdev, 1458 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1459 { 1460 struct dp_mon_pdev *mon_pdev; 1461 struct dp_mon_pdev_be *mon_pdev_be; 1462 struct dp_pdev_tx_monitor_be *tx_mon_be; 1463 struct dp_tx_ppdu_info *tx_prot_ppdu_info = NULL; 1464 struct dp_tx_ppdu_info *tx_data_ppdu_info = NULL; 1465 struct hal_tx_status_info *tx_status_prot; 1466 struct hal_tx_status_info *tx_status_data; 1467 qdf_frag_t status_frag = NULL; 1468 uint32_t end_offset = 0; 1469 uint32_t tlv_status; 1470 uint32_t status = QDF_STATUS_SUCCESS; 1471 uint8_t *tx_tlv; 1472 uint8_t *tx_tlv_start; 1473 uint8_t num_users = 0; 1474 uint8_t cur_frag_q_idx; 1475 bool schedule_wrq = false; 1476 1477 /* sanity check */ 1478 if (qdf_unlikely(!pdev)) 1479 return QDF_STATUS_E_NOMEM; 1480 1481 mon_pdev = pdev->monitor_pdev; 1482 if (qdf_unlikely(!mon_pdev)) 1483 return QDF_STATUS_E_NOMEM; 1484 1485 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1486 if (qdf_unlikely(!mon_pdev_be)) 1487 return QDF_STATUS_E_NOMEM; 1488 1489 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1490 cur_frag_q_idx = tx_mon_be->cur_frag_q_idx; 1491 1492 tx_status_prot = &tx_mon_be->prot_status_info; 1493 tx_status_data = &tx_mon_be->data_status_info; 1494 1495 tx_prot_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_PROT_PPDU_INFO, 1496 1, tx_mon_be->be_ppdu_id); 1497 1498 if (!tx_prot_ppdu_info) { 1499 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1500 return QDF_STATUS_E_NOMEM; 1501 } 1502 1503 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1504 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1505 tx_tlv = status_frag; 1506 dp_mon_debug("last_frag_q_idx: %d status_frag:%pK", 1507 tx_mon_be->last_frag_q_idx, status_frag); 1508 1509 /* get number of user from tlv window */ 1510 tlv_status = hal_txmon_status_get_num_users(pdev->soc->hal_soc, 1511 tx_tlv, &num_users); 1512 if (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE || !num_users) { 1513 dp_mon_err("window open with tlv_tag[0x%x] num_users[%d]!\n", 1514 hal_tx_status_get_tlv_tag(tx_tlv), num_users); 1515 return QDF_STATUS_E_INVAL; 1516 } 1517 1518 /* allocate tx_data_ppdu_info based on num_users */ 1519 tx_data_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_DATA_PPDU_INFO, 1520 num_users, 1521 tx_mon_be->be_ppdu_id); 1522 if (!tx_data_ppdu_info) { 1523 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1524 return QDF_STATUS_E_NOMEM; 1525 } 1526 1527 /* iterate status buffer queue */ 1528 while (tx_mon_be->cur_frag_q_idx < tx_mon_be->last_frag_q_idx) { 1529 /* get status buffer from frag_q_vec */ 1530 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1531 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1532 if (qdf_unlikely(!status_frag)) { 1533 dp_mon_err("status frag is NULL\n"); 1534 QDF_BUG(0); 1535 } 1536 1537 tx_tlv = status_frag; 1538 tx_tlv_start = tx_tlv; 1539 /* 1540 * parse each status buffer and populate the information to 1541 * dp_tx_ppdu_info 1542 */ 1543 do { 1544 tlv_status = hal_txmon_status_parse_tlv( 1545 pdev->soc->hal_soc, 1546 &tx_data_ppdu_info->hal_txmon, 1547 &tx_prot_ppdu_info->hal_txmon, 1548 tx_status_data, 1549 tx_status_prot, 1550 tx_tlv, status_frag); 1551 1552 status = 1553 dp_tx_mon_update_ppdu_info_status( 1554 pdev, 1555 tx_data_ppdu_info, 1556 tx_prot_ppdu_info, 1557 tx_tlv, 1558 status_frag, 1559 tlv_status, 1560 mon_desc_list_ref); 1561 1562 /* need api definition for hal_tx_status_get_next_tlv */ 1563 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 1564 if ((tx_tlv - tx_tlv_start) >= end_offset) 1565 break; 1566 } while ((tx_tlv - tx_tlv_start) < end_offset); 1567 1568 /* 1569 * free status buffer after parsing 1570 * is status_frag mapped to mpdu if so make sure 1571 */ 1572 tx_mon_be->stats.status_buf_free++; 1573 qdf_frag_free(status_frag); 1574 tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf = NULL; 1575 tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset = 0; 1576 cur_frag_q_idx = ++tx_mon_be->cur_frag_q_idx; 1577 } 1578 1579 /* clear the unreleased frag array */ 1580 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1581 1582 if (TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used)) { 1583 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1584 chan_num))) { 1585 /* update channel number, if not fetched properly */ 1586 TXMON_PPDU_COM(tx_prot_ppdu_info, 1587 chan_num) = mon_pdev->mon_chan_num; 1588 } 1589 1590 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1591 chan_freq))) { 1592 /* update channel frequency, if not fetched properly */ 1593 TXMON_PPDU_COM(tx_prot_ppdu_info, 1594 chan_freq) = mon_pdev->mon_chan_freq; 1595 } 1596 1597 /* 1598 * add dp_tx_ppdu_info to pdev queue 1599 * for post processing 1600 * 1601 * TODO: add a threshold check and drop the ppdu info 1602 */ 1603 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1604 tx_mon_be->last_prot_ppdu_info = 1605 tx_mon_be->tx_prot_ppdu_info; 1606 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1607 tx_prot_ppdu_info, 1608 tx_ppdu_info_queue_elem); 1609 tx_mon_be->tx_ppdu_info_list_depth++; 1610 1611 tx_mon_be->tx_prot_ppdu_info = NULL; 1612 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1613 schedule_wrq = true; 1614 } else { 1615 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be); 1616 tx_mon_be->tx_prot_ppdu_info = NULL; 1617 tx_prot_ppdu_info = NULL; 1618 } 1619 1620 if (TXMON_PPDU_HAL(tx_data_ppdu_info, is_used)) { 1621 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1622 chan_num))) { 1623 /* update channel number, if not fetched properly */ 1624 TXMON_PPDU_COM(tx_data_ppdu_info, 1625 chan_num) = mon_pdev->mon_chan_num; 1626 } 1627 1628 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1629 chan_freq))) { 1630 /* update channel frequency, if not fetched properly */ 1631 TXMON_PPDU_COM(tx_data_ppdu_info, 1632 chan_freq) = mon_pdev->mon_chan_freq; 1633 } 1634 1635 /* 1636 * add dp_tx_ppdu_info to pdev queue 1637 * for post processing 1638 * 1639 * TODO: add a threshold check and drop the ppdu info 1640 */ 1641 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1642 tx_mon_be->last_data_ppdu_info = 1643 tx_mon_be->tx_data_ppdu_info; 1644 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1645 tx_data_ppdu_info, 1646 tx_ppdu_info_queue_elem); 1647 tx_mon_be->tx_ppdu_info_list_depth++; 1648 1649 tx_mon_be->tx_data_ppdu_info = NULL; 1650 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1651 schedule_wrq = true; 1652 } else { 1653 dp_tx_mon_free_ppdu_info(tx_data_ppdu_info, tx_mon_be); 1654 tx_mon_be->tx_data_ppdu_info = NULL; 1655 tx_data_ppdu_info = NULL; 1656 } 1657 1658 if (schedule_wrq) 1659 qdf_queue_work(NULL, tx_mon_be->post_ppdu_workqueue, 1660 &tx_mon_be->post_ppdu_work); 1661 1662 return QDF_STATUS_SUCCESS; 1663 } 1664 1665 /** 1666 * dp_tx_mon_update_end_reason() - API to update end reason 1667 * 1668 * @mon_pdev - DP_MON_PDEV handle 1669 * @ppdu_id - ppdu_id 1670 * @end_reason - monitor destination descriptor end reason 1671 * 1672 * Return: void 1673 */ 1674 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1675 int ppdu_id, int end_reason) 1676 { 1677 struct dp_mon_pdev_be *mon_pdev_be; 1678 struct dp_pdev_tx_monitor_be *tx_mon_be; 1679 1680 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1681 if (qdf_unlikely(!mon_pdev_be)) 1682 return; 1683 1684 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1685 1686 tx_mon_be->be_end_reason_bitmap |= (1 << end_reason); 1687 } 1688 1689 /* 1690 * dp_tx_mon_process_status_tlv() - API to processed TLV 1691 * invoked from interrupt handler 1692 * 1693 * @soc - DP_SOC handle 1694 * @pdev - DP_PDEV handle 1695 * @mon_ring_desc - descriptor status info 1696 * @addr - status buffer frag address 1697 * @end_offset - end offset of buffer that has valid buffer 1698 * @mon_desc_list_ref: tx monitor descriptor list reference 1699 * 1700 * Return: QDF_STATUS 1701 */ 1702 QDF_STATUS 1703 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1704 struct dp_pdev *pdev, 1705 struct hal_mon_desc *mon_ring_desc, 1706 qdf_frag_t status_frag, 1707 uint32_t end_offset, 1708 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1709 { 1710 struct dp_mon_pdev *mon_pdev; 1711 struct dp_mon_pdev_be *mon_pdev_be; 1712 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 1713 uint8_t last_frag_q_idx = 0; 1714 1715 /* sanity check */ 1716 if (qdf_unlikely(!pdev)) 1717 goto free_status_buffer; 1718 1719 mon_pdev = pdev->monitor_pdev; 1720 if (qdf_unlikely(!mon_pdev)) 1721 goto free_status_buffer; 1722 1723 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1724 if (qdf_unlikely(!mon_pdev_be)) 1725 goto free_status_buffer; 1726 1727 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1728 1729 if (qdf_unlikely(tx_mon_be->last_frag_q_idx > 1730 MAX_STATUS_BUFFER_IN_PPDU)) { 1731 dp_mon_err("status frag queue for a ppdu[%d] exceed %d\n", 1732 tx_mon_be->be_ppdu_id, 1733 MAX_STATUS_BUFFER_IN_PPDU); 1734 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1735 goto free_status_buffer; 1736 } 1737 1738 if (tx_mon_be->mode == TX_MON_BE_DISABLE && 1739 !dp_lite_mon_is_tx_enabled(mon_pdev)) { 1740 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1741 mon_desc_list_ref); 1742 goto free_status_buffer; 1743 } 1744 1745 if (tx_mon_be->be_ppdu_id != mon_ring_desc->ppdu_id && 1746 tx_mon_be->last_frag_q_idx) { 1747 if (tx_mon_be->be_end_reason_bitmap & 1748 (1 << HAL_MON_FLUSH_DETECTED)) { 1749 tx_mon_be->stats.ppdu_info_drop_flush++; 1750 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1751 mon_desc_list_ref); 1752 } else if (tx_mon_be->be_end_reason_bitmap & 1753 (1 << HAL_MON_PPDU_TRUNCATED)) { 1754 tx_mon_be->stats.ppdu_info_drop_trunc++; 1755 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1756 mon_desc_list_ref); 1757 } else { 1758 dp_mon_err("End of ppdu not seen PID:%d cur_pid:%d idx:%d", 1759 tx_mon_be->be_ppdu_id, 1760 mon_ring_desc->ppdu_id, 1761 tx_mon_be->last_frag_q_idx); 1762 /* schedule ppdu worth information */ 1763 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1764 mon_desc_list_ref); 1765 } 1766 1767 /* reset end reason bitmap */ 1768 tx_mon_be->be_end_reason_bitmap = 0; 1769 tx_mon_be->last_frag_q_idx = 0; 1770 tx_mon_be->cur_frag_q_idx = 0; 1771 } 1772 1773 tx_mon_be->be_ppdu_id = mon_ring_desc->ppdu_id; 1774 tx_mon_be->be_end_reason_bitmap |= (1 << mon_ring_desc->end_reason); 1775 1776 last_frag_q_idx = tx_mon_be->last_frag_q_idx; 1777 1778 tx_mon_be->frag_q_vec[last_frag_q_idx].frag_buf = status_frag; 1779 tx_mon_be->frag_q_vec[last_frag_q_idx].end_offset = end_offset; 1780 tx_mon_be->last_frag_q_idx++; 1781 1782 if (mon_ring_desc->end_reason == HAL_MON_END_OF_PPDU) { 1783 /* drop processing of tlv, if ppdu info list exceed threshold */ 1784 if ((tx_mon_be->defer_ppdu_info_list_depth + 1785 tx_mon_be->tx_ppdu_info_list_depth) > 1786 MAX_PPDU_INFO_LIST_DEPTH) { 1787 tx_mon_be->stats.ppdu_info_drop_th++; 1788 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1789 mon_desc_list_ref); 1790 return QDF_STATUS_E_PENDING; 1791 } 1792 1793 if (dp_tx_mon_process_tlv_2_0(pdev, 1794 mon_desc_list_ref) != 1795 QDF_STATUS_SUCCESS) 1796 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1797 mon_desc_list_ref); 1798 } 1799 1800 return QDF_STATUS_SUCCESS; 1801 1802 free_status_buffer: 1803 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1804 mon_desc_list_ref); 1805 if (qdf_likely(tx_mon_be)) 1806 tx_mon_be->stats.status_buf_free++; 1807 1808 qdf_frag_free(status_frag); 1809 1810 return QDF_STATUS_E_NOMEM; 1811 } 1812 1813 #else 1814 1815 /** 1816 * dp_tx_mon_process_status_tlv() - API to processed TLV 1817 * invoked from interrupt handler 1818 * 1819 * @soc - DP_SOC handle 1820 * @pdev - DP_PDEV handle 1821 * @mon_ring_desc - descriptor status info 1822 * @addr - status buffer frag address 1823 * @end_offset - end offset of buffer that has valid buffer 1824 * @mon_desc_list_ref: tx monitor descriptor list reference 1825 * 1826 * Return: QDF_STATUS 1827 */ 1828 QDF_STATUS 1829 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1830 struct dp_pdev *pdev, 1831 struct hal_mon_desc *mon_ring_desc, 1832 qdf_frag_t status_frag, 1833 uint32_t end_offset, 1834 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1835 { 1836 struct dp_mon_pdev *mon_pdev; 1837 struct dp_mon_pdev_be *mon_pdev_be; 1838 struct dp_pdev_tx_monitor_be *tx_mon_be; 1839 1840 /* sanity check */ 1841 if (qdf_unlikely(!pdev)) 1842 return QDF_STATUS_E_INVAL; 1843 1844 mon_pdev = pdev->monitor_pdev; 1845 if (qdf_unlikely(!mon_pdev)) 1846 return QDF_STATUS_E_INVAL; 1847 1848 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1849 if (qdf_unlikely(!mon_pdev_be)) 1850 return QDF_STATUS_E_INVAL; 1851 1852 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1853 1854 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1855 mon_desc_list_ref); 1856 tx_mon_be->stats.status_buf_free++; 1857 qdf_frag_free(status_frag); 1858 1859 return QDF_STATUS_E_INVAL; 1860 } 1861 1862 /** 1863 * dp_tx_mon_update_end_reason() - API to update end reason 1864 * 1865 * @mon_pdev - DP_MON_PDEV handle 1866 * @ppdu_id - ppdu_id 1867 * @end_reason - monitor destination descriptor end reason 1868 * 1869 * Return: void 1870 */ 1871 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1872 int ppdu_id, int end_reason) 1873 { 1874 } 1875 #endif 1876