1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "dp_types.h" 18 #include "qdf_nbuf.h" 19 #include "dp_internal.h" 20 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 21 #include <dp_be.h> 22 #include <qdf_nbuf_frag.h> 23 #include <hal_be_api_mon.h> 24 #include <dp_mon.h> 25 #include <dp_tx_mon_2.0.h> 26 #include <dp_mon_2.0.h> 27 #include <dp_lite_mon.h> 28 29 #define MAX_PPDU_INFO_LIST_DEPTH 64 30 31 /** 32 * dp_tx_mon_status_free_packet_buf() - API to free packet buffer 33 * @pdev: pdev Handle 34 * @status_frag: status frag 35 * @end_offset: status fragment end offset 36 * @mon_desc_list_ref: tx monitor descriptor list reference 37 * 38 * Return: void 39 */ 40 void 41 dp_tx_mon_status_free_packet_buf(struct dp_pdev *pdev, 42 qdf_frag_t status_frag, uint32_t end_offset, 43 struct dp_tx_mon_desc_list *mon_desc_list_ref) 44 { 45 struct dp_mon_pdev *mon_pdev; 46 struct dp_mon_pdev_be *mon_pdev_be; 47 struct dp_pdev_tx_monitor_be *tx_mon_be; 48 struct hal_mon_packet_info packet_info = {0}; 49 uint8_t *tx_tlv; 50 uint8_t *mon_buf_tx_tlv; 51 uint8_t *tx_tlv_start; 52 53 if (qdf_unlikely(!pdev)) 54 return; 55 56 mon_pdev = pdev->monitor_pdev; 57 if (qdf_unlikely(!mon_pdev)) 58 return; 59 60 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 61 if (qdf_unlikely(!mon_pdev_be)) 62 return; 63 64 tx_mon_be = &mon_pdev_be->tx_monitor_be; 65 tx_tlv = status_frag; 66 tx_tlv_start = tx_tlv; 67 /* 68 * parse each status buffer and find packet buffer in it 69 */ 70 do { 71 if (hal_txmon_is_mon_buf_addr_tlv(pdev->soc->hal_soc, tx_tlv)) { 72 struct dp_mon_desc *mon_desc = NULL; 73 qdf_frag_t packet_buffer = NULL; 74 75 mon_buf_tx_tlv = ((uint8_t *)tx_tlv + 76 HAL_RX_TLV64_HDR_SIZE); 77 hal_txmon_populate_packet_info(pdev->soc->hal_soc, 78 mon_buf_tx_tlv, 79 &packet_info); 80 81 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info.sw_cookie; 82 83 qdf_assert_always(mon_desc); 84 85 if (mon_desc->magic != DP_MON_DESC_MAGIC) 86 qdf_assert_always(0); 87 88 if (!mon_desc->unmapped) { 89 qdf_mem_unmap_page(pdev->soc->osdev, 90 (qdf_dma_addr_t)mon_desc->paddr, 91 DP_MON_DATA_BUFFER_SIZE, 92 QDF_DMA_FROM_DEVICE); 93 mon_desc->unmapped = 1; 94 } 95 96 packet_buffer = (qdf_frag_t)(mon_desc->buf_addr); 97 mon_desc->buf_addr = NULL; 98 99 qdf_assert_always(packet_buffer); 100 /* increment reap count */ 101 mon_desc_list_ref->tx_mon_reap_cnt++; 102 103 /* add the mon_desc to free list */ 104 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 105 &mon_desc_list_ref->tail, 106 mon_desc); 107 108 tx_mon_be->stats.pkt_buf_recv++; 109 tx_mon_be->stats.pkt_buf_free++; 110 111 /* free buffer, mapped to descriptor */ 112 qdf_frag_free(packet_buffer); 113 } 114 115 /* need api definition for hal_tx_status_get_next_tlv */ 116 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 117 } while ((tx_tlv - tx_tlv_start) < end_offset); 118 } 119 120 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(QCA_MONITOR_2_0_SUPPORT) 121 /** 122 * dp_tx_mon_status_queue_free() - API to free status buffer 123 * @pdev: pdev Handle 124 * @tx_mon_be: pointer to tx_monitor_be 125 * @mon_desc_list_ref: tx monitor descriptor list reference 126 * 127 * Return: void 128 */ 129 static void 130 dp_tx_mon_status_queue_free(struct dp_pdev *pdev, 131 struct dp_pdev_tx_monitor_be *tx_mon_be, 132 struct dp_tx_mon_desc_list *mon_desc_list_ref) 133 { 134 uint8_t last_frag_q_idx = tx_mon_be->last_frag_q_idx; 135 qdf_frag_t status_frag = NULL; 136 uint8_t i = tx_mon_be->cur_frag_q_idx; 137 uint32_t end_offset = 0; 138 139 for (; i < last_frag_q_idx; i++) { 140 status_frag = tx_mon_be->frag_q_vec[i].frag_buf; 141 142 if (qdf_unlikely(!status_frag)) 143 continue; 144 145 end_offset = tx_mon_be->frag_q_vec[i].end_offset; 146 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 147 mon_desc_list_ref); 148 tx_mon_be->stats.status_buf_free++; 149 qdf_frag_free(status_frag); 150 tx_mon_be->frag_q_vec[i].frag_buf = NULL; 151 tx_mon_be->frag_q_vec[i].end_offset = 0; 152 } 153 tx_mon_be->last_frag_q_idx = 0; 154 tx_mon_be->cur_frag_q_idx = 0; 155 } 156 157 /** 158 * dp_tx_mon_enqueue_mpdu_nbuf() - API to enqueue nbuf from per user mpdu queue 159 * @tx_ppdu_info: pointer to tx ppdu info structure 160 * @user_id: user index 161 * @mpdu_nbuf: nbuf to be enqueue 162 * 163 * Return: void 164 */ 165 static void 166 dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev *pdev, 167 struct dp_tx_ppdu_info *tx_ppdu_info, 168 uint8_t user_id, qdf_nbuf_t mpdu_nbuf) 169 { 170 qdf_nbuf_t radiotap = NULL; 171 /* enqueue mpdu_nbuf to the per user mpdu_q */ 172 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 173 174 if (!TXMON_PPDU_HAL(tx_ppdu_info, rx_user_status) || 175 !TXMON_PPDU_HAL(tx_ppdu_info, num_users)) 176 QDF_BUG(0); 177 178 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user_id, mpdu_q); 179 180 radiotap = qdf_nbuf_alloc(pdev->soc->osdev, MAX_MONITOR_HEADER, 181 MAX_MONITOR_HEADER, 182 4, FALSE); 183 if (qdf_unlikely(!radiotap)) { 184 qdf_err("Unable to allocate radiotap buffer\n"); 185 qdf_nbuf_free(mpdu_nbuf); 186 return; 187 } 188 189 /* append ext list */ 190 qdf_nbuf_append_ext_list(radiotap, mpdu_nbuf, qdf_nbuf_len(mpdu_nbuf)); 191 qdf_nbuf_queue_add(usr_mpdu_q, radiotap); 192 } 193 194 /* 195 * TX MONITOR 196 * 197 * frame format 198 * ------------------------------------------------------------------------- 199 * FUNC | ToDS | FromDS | ADDRESS 1 | ADDRESS 2 | ADDRESS 3 | ADDRESS 4 | 200 * ------------------------------------------------------------------------ 201 * IBSS | 0 | 0 | DA | SA | BSSID | NOT USED | 202 * TO AP | 1 | 0 | BSSID | SA | DA | NOT USED | 203 * From AP| 0 | 1 | DA | BSSID | SA | NOT USED | 204 * WDS | 1 | 1 | RA | TA | DA | SA | 205 * ------------------------------------------------------------------------ 206 * 207 * HOST GENERATED FRAME: 208 * ===================== 209 * 1. RTS 210 * 2. CTS 211 * 3. ACK 212 * 4. BA 213 * 5. Multi STA BA 214 * 215 * control frame 216 * ------------------------------------------------------------ 217 * | protocol 2b | Type 2b | subtype 4b | ToDS 1b | FromDS 1b | 218 * | Morefrag 1b | Retry 1b | pwr_mgmt 1b | More data 1b | 219 * | protected frm 1b | order 1b | 220 * ----------------------------------------------------------- 221 * control frame originated from wireless station so ToDS = FromDS = 0, 222 * 223 * RTS 224 * --------------------------------------------------------------------------- 225 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | Transmit address 6 | FCS | 226 * --------------------------------------------------------------------------- 227 * subtype in FC is RTS - 1101 228 * type in FC is control frame - 10 229 * 230 * CTS 231 * -------------------------------------------------------- 232 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 233 * -------------------------------------------------------- 234 * subtype in FC is CTS - 0011 235 * type in FC is control frame - 10 236 * 237 * ACK 238 * -------------------------------------------------------- 239 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 240 * -------------------------------------------------------- 241 * subtype in FC is ACK - 1011 242 * type in FC is control frame - 10 243 * 244 * Block ACK 245 * -------------------------------------------------------------------------- 246 * | FC 2 | Dur 2 | RA 6 | TA 6 | BA CTRL 2 | BA Information variable | FCS | 247 * -------------------------------------------------------------------------- 248 * 249 * Block Ack control 250 * --------------------------------------------------------------- 251 * | BA ACK POLICY B0 | BA TYPE B1-B4 | Rsv B5-B11 | TID B12-B15 | 252 * --------------------------------------------------------------- 253 * 254 * BA ack policy 255 * 0 - Normal Ack 256 * 1 - No Ack 257 * 258 * Block Ack Type 259 * 0 - Reserved 260 * 1 - extended compressed 261 * 2 - compressed 262 * 3 - Multi TID 263 * 4-5 - Reserved 264 * 6 - GCR 265 * 7-9 - Reserved 266 * 10 - GLK-GCR 267 * 11 - Multi-STA 268 * 12-15 - Reserved 269 * 270 * Block Ack information 271 * ---------------------------------------------------------- 272 * | Block ack start seq ctrl 2 | Block ack bitmap variable | 273 * ---------------------------------------------------------- 274 * 275 * Multi STA Block Ack Information 276 * ----------------------------------------------------------------- 277 * | Per STA TID info 2 | BA start seq ctrl 2 | BA bitmap variable | 278 * ----------------------------------------------------------------- 279 * 280 * Per STA TID info 281 * ------------------------------------ 282 * | AID11 11b | Ack Type 1b | TID 4b | 283 * ------------------------------------ 284 * AID11 - 2045 means unassociated STA, then ACK Type and TID 0, 15 285 * 286 * Mgmt/PS-POLL frame ack 287 * Ack type - 1 and TID - 15, BA_seq_ctrl & BA_bitmap - not present 288 * 289 * All ack context - with no bitmap (all AMPDU success) 290 * Ack type - 1 and TID - 14, BA_seq_ctrl & BA_bitmap - not present 291 * 292 * Block ack context 293 * Ack type - 0 and TID - 0~7 BA_seq_ctrl & BA_bitmap - present 294 * 295 * Ack context 296 * Ack type - 1 and TID - 0~7 BA_seq_ctrl & BA_bitmap - not present 297 * 298 * 299 */ 300 301 /** 302 * dp_tx_mon_generate_cts2self_frm() - API to generate cts2self frame 303 * @pdev: pdev Handle 304 * @tx_ppdu_info: pointer to tx ppdu info structure 305 * @window_flag: frame generated window 306 * 307 * Return: void 308 */ 309 static void 310 dp_tx_mon_generate_cts2self_frm(struct dp_pdev *pdev, 311 struct dp_tx_ppdu_info *tx_ppdu_info, 312 uint8_t window_flag) 313 { 314 /* allocate and populate CTS/ CTS2SELF frame */ 315 /* enqueue 802.11 payload to per user mpdu_q */ 316 struct dp_mon_pdev *mon_pdev; 317 struct dp_mon_pdev_be *mon_pdev_be; 318 struct dp_pdev_tx_monitor_be *tx_mon_be; 319 struct hal_tx_status_info *tx_status_info; 320 uint16_t duration_le = 0; 321 struct ieee80211_frame_min_one *wh_min = NULL; 322 qdf_nbuf_t mpdu_nbuf = NULL; 323 uint8_t frm_ctl; 324 325 /* sanity check */ 326 if (qdf_unlikely(!pdev)) 327 return; 328 329 mon_pdev = pdev->monitor_pdev; 330 if (qdf_unlikely(!mon_pdev)) 331 return; 332 333 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 334 if (qdf_unlikely(!mon_pdev_be)) 335 return; 336 337 tx_mon_be = &mon_pdev_be->tx_monitor_be; 338 339 if (window_flag == INITIATOR_WINDOW) 340 tx_status_info = &tx_mon_be->prot_status_info; 341 else 342 tx_status_info = &tx_mon_be->data_status_info; 343 344 /* 345 * for radiotap we allocate new skb, 346 * so we don't need reserver skb header 347 */ 348 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 349 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 350 if (!mpdu_nbuf) 351 return; 352 353 wh_min = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 354 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 355 356 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 357 IEEE80211_FC0_SUBTYPE_CTS); 358 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 359 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 360 wh_min->i_fc[1] = 0; 361 wh_min->i_fc[0] = frm_ctl; 362 363 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 364 wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8; 365 wh_min->i_dur[0] = (duration_le & 0xFF); 366 367 if (window_flag == INITIATOR_WINDOW) { 368 qdf_mem_copy(wh_min->i_addr1, 369 TXMON_STATUS_INFO(tx_status_info, addr1), 370 QDF_MAC_ADDR_SIZE); 371 } else { 372 qdf_mem_copy(wh_min->i_addr1, 373 TXMON_STATUS_INFO(tx_status_info, addr2), 374 QDF_MAC_ADDR_SIZE); 375 } 376 377 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 378 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 379 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 380 } 381 382 /** 383 * dp_tx_mon_generate_rts_frm() - API to generate rts frame 384 * @pdev: pdev Handle 385 * @tx_ppdu_info: pointer to tx ppdu info structure 386 * @window_flag: frame generated window 387 * 388 * Return: void 389 */ 390 static void 391 dp_tx_mon_generate_rts_frm(struct dp_pdev *pdev, 392 struct dp_tx_ppdu_info *tx_ppdu_info, 393 uint8_t window_flag) 394 { 395 /* allocate and populate RTS frame */ 396 /* enqueue 802.11 payload to per user mpdu_q */ 397 struct dp_mon_pdev *mon_pdev; 398 struct dp_mon_pdev_be *mon_pdev_be; 399 struct dp_pdev_tx_monitor_be *tx_mon_be; 400 struct hal_tx_status_info *tx_status_info; 401 uint16_t duration_le = 0; 402 struct ieee80211_ctlframe_addr2 *wh_min = NULL; 403 qdf_nbuf_t mpdu_nbuf = NULL; 404 uint8_t frm_ctl; 405 406 /* sanity check */ 407 if (qdf_unlikely(!pdev)) 408 return; 409 410 mon_pdev = pdev->monitor_pdev; 411 if (qdf_unlikely(!mon_pdev)) 412 return; 413 414 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 415 if (qdf_unlikely(!mon_pdev_be)) 416 return; 417 418 tx_mon_be = &mon_pdev_be->tx_monitor_be; 419 tx_status_info = &tx_mon_be->prot_status_info; 420 /* 421 * for radiotap we allocate new skb, 422 * so we don't need reserver skb header 423 */ 424 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 425 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 426 if (!mpdu_nbuf) 427 return; 428 429 wh_min = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 430 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 431 432 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 433 IEEE80211_FC0_SUBTYPE_RTS); 434 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 435 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 436 wh_min->i_fc[1] = 0; 437 wh_min->i_fc[0] = frm_ctl; 438 439 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 440 wh_min->i_aidordur[1] = (duration_le & 0xFF00) >> 8; 441 wh_min->i_aidordur[0] = (duration_le & 0xFF); 442 443 if (!tx_status_info->protection_addr) 444 tx_status_info = &tx_mon_be->data_status_info; 445 446 if (window_flag == INITIATOR_WINDOW) { 447 qdf_mem_copy(wh_min->i_addr1, 448 TXMON_STATUS_INFO(tx_status_info, addr1), 449 QDF_MAC_ADDR_SIZE); 450 qdf_mem_copy(wh_min->i_addr2, 451 TXMON_STATUS_INFO(tx_status_info, addr2), 452 QDF_MAC_ADDR_SIZE); 453 } else { 454 qdf_mem_copy(wh_min->i_addr1, 455 TXMON_STATUS_INFO(tx_status_info, addr2), 456 QDF_MAC_ADDR_SIZE); 457 qdf_mem_copy(wh_min->i_addr2, 458 TXMON_STATUS_INFO(tx_status_info, addr1), 459 QDF_MAC_ADDR_SIZE); 460 } 461 462 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 463 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 464 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 465 } 466 467 /** 468 * dp_tx_mon_generate_ack_frm() - API to generate ack frame 469 * @pdev: pdev Handle 470 * @tx_ppdu_info: pointer to tx ppdu info structure 471 * @window_flag: frame generated window 472 * 473 * Return: void 474 */ 475 static void 476 dp_tx_mon_generate_ack_frm(struct dp_pdev *pdev, 477 struct dp_tx_ppdu_info *tx_ppdu_info, 478 uint8_t window_flag) 479 { 480 /* allocate and populate ACK frame */ 481 /* enqueue 802.11 payload to per user mpdu_q */ 482 struct dp_mon_pdev *mon_pdev; 483 struct dp_mon_pdev_be *mon_pdev_be; 484 struct dp_pdev_tx_monitor_be *tx_mon_be; 485 struct hal_tx_status_info *tx_status_info; 486 struct ieee80211_frame_min_one *wh_addr1 = NULL; 487 qdf_nbuf_t mpdu_nbuf = NULL; 488 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 489 uint8_t frm_ctl; 490 491 /* sanity check */ 492 if (qdf_unlikely(!pdev)) 493 return; 494 495 mon_pdev = pdev->monitor_pdev; 496 if (qdf_unlikely(!mon_pdev)) 497 return; 498 499 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 500 if (qdf_unlikely(!mon_pdev_be)) 501 return; 502 503 tx_mon_be = &mon_pdev_be->tx_monitor_be; 504 tx_status_info = &tx_mon_be->data_status_info; 505 /* 506 * for radiotap we allocate new skb, 507 * so we don't need reserver skb header 508 */ 509 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 510 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 511 if (!mpdu_nbuf) 512 return; 513 514 wh_addr1 = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 515 516 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 517 IEEE80211_FC0_SUBTYPE_ACK); 518 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 519 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 520 wh_addr1->i_fc[1] = 0; 521 wh_addr1->i_fc[0] = frm_ctl; 522 523 if (window_flag == INITIATOR_WINDOW) { 524 qdf_mem_copy(wh_addr1->i_addr1, 525 TXMON_STATUS_INFO(tx_status_info, addr1), 526 QDF_MAC_ADDR_SIZE); 527 } else { 528 qdf_mem_copy(wh_addr1->i_addr1, 529 TXMON_STATUS_INFO(tx_status_info, addr2), 530 QDF_MAC_ADDR_SIZE); 531 } 532 533 /* set duration zero for ack frame */ 534 *(u_int16_t *)(&wh_addr1->i_dur) = qdf_cpu_to_le16(0x0000); 535 536 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr1)); 537 538 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, user_id, mpdu_nbuf); 539 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 540 } 541 542 /** 543 * dp_tx_mon_generate_3addr_qos_null_frm() - API to generate 544 * 3 address qosnull frame 545 * 546 * @pdev: pdev Handle 547 * @tx_ppdu_info: pointer to tx ppdu info structure 548 * 549 * Return: void 550 */ 551 static void 552 dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev *pdev, 553 struct dp_tx_ppdu_info *tx_ppdu_info) 554 { 555 /* allocate and populate 3 address qos null frame */ 556 /* enqueue 802.11 payload to per user mpdu_q */ 557 struct dp_mon_pdev *mon_pdev; 558 struct dp_mon_pdev_be *mon_pdev_be; 559 struct dp_pdev_tx_monitor_be *tx_mon_be; 560 struct hal_tx_status_info *tx_status_info; 561 struct ieee80211_qosframe *wh_addr3 = NULL; 562 qdf_nbuf_t mpdu_nbuf = NULL; 563 uint16_t duration_le = 0; 564 uint8_t num_users = 0; 565 uint8_t frm_ctl; 566 567 /* sanity check */ 568 if (qdf_unlikely(!pdev)) 569 return; 570 571 mon_pdev = pdev->monitor_pdev; 572 if (qdf_unlikely(!mon_pdev)) 573 return; 574 575 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 576 if (qdf_unlikely(!mon_pdev_be)) 577 return; 578 579 tx_mon_be = &mon_pdev_be->tx_monitor_be; 580 tx_status_info = &tx_mon_be->data_status_info; 581 /* 582 * for radiotap we allocate new skb, 583 * so we don't need reserver skb header 584 */ 585 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 586 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 587 if (!mpdu_nbuf) 588 return; 589 590 wh_addr3 = (struct ieee80211_qosframe *)qdf_nbuf_data(mpdu_nbuf); 591 qdf_mem_zero(wh_addr3, sizeof(struct ieee80211_qosframe)); 592 593 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 594 IEEE80211_FC0_SUBTYPE_QOS_NULL); 595 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 596 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 597 wh_addr3->i_fc[1] = 0; 598 wh_addr3->i_fc[0] = frm_ctl; 599 600 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 601 wh_addr3->i_dur[1] = (duration_le & 0xFF00) >> 8; 602 wh_addr3->i_dur[0] = (duration_le & 0xFF); 603 604 qdf_mem_copy(wh_addr3->i_addr1, 605 TXMON_STATUS_INFO(tx_status_info, addr1), 606 QDF_MAC_ADDR_SIZE); 607 qdf_mem_copy(wh_addr3->i_addr2, 608 TXMON_STATUS_INFO(tx_status_info, addr2), 609 QDF_MAC_ADDR_SIZE); 610 qdf_mem_copy(wh_addr3->i_addr3, 611 TXMON_STATUS_INFO(tx_status_info, addr3), 612 QDF_MAC_ADDR_SIZE); 613 614 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr3)); 615 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 616 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 617 } 618 619 /** 620 * dp_tx_mon_generate_4addr_qos_null_frm() - API to generate 621 * 4 address qos null frame 622 * 623 * @pdev: pdev Handle 624 * @tx_ppdu_info: pointer to tx ppdu info structure 625 * 626 * Return: void 627 */ 628 static void 629 dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev *pdev, 630 struct dp_tx_ppdu_info *tx_ppdu_info) 631 { 632 /* allocate and populate 4 address qos null frame */ 633 /* enqueue 802.11 payload to per user mpdu_q */ 634 struct dp_mon_pdev *mon_pdev; 635 struct dp_mon_pdev_be *mon_pdev_be; 636 struct dp_pdev_tx_monitor_be *tx_mon_be; 637 struct hal_tx_status_info *tx_status_info; 638 struct ieee80211_qosframe_addr4 *wh_addr4 = NULL; 639 qdf_nbuf_t mpdu_nbuf = NULL; 640 uint16_t duration_le = 0; 641 uint8_t num_users = 0; 642 uint8_t frm_ctl; 643 644 /* sanity check */ 645 if (qdf_unlikely(!pdev)) 646 return; 647 648 mon_pdev = pdev->monitor_pdev; 649 if (qdf_unlikely(!mon_pdev)) 650 return; 651 652 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 653 if (qdf_unlikely(!mon_pdev_be)) 654 return; 655 656 tx_mon_be = &mon_pdev_be->tx_monitor_be; 657 tx_status_info = &tx_mon_be->data_status_info; 658 /* 659 * for radiotap we allocate new skb, 660 * so we don't need reserver skb header 661 */ 662 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 663 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 664 if (!mpdu_nbuf) 665 return; 666 667 wh_addr4 = (struct ieee80211_qosframe_addr4 *)qdf_nbuf_data(mpdu_nbuf); 668 qdf_mem_zero(wh_addr4, sizeof(struct ieee80211_qosframe_addr4)); 669 670 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 671 IEEE80211_FC0_SUBTYPE_QOS_NULL); 672 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 673 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 674 wh_addr4->i_fc[1] = 0; 675 wh_addr4->i_fc[0] = frm_ctl; 676 677 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 678 wh_addr4->i_dur[1] = (duration_le & 0xFF00) >> 8; 679 wh_addr4->i_dur[0] = (duration_le & 0xFF); 680 681 qdf_mem_copy(wh_addr4->i_addr1, 682 TXMON_STATUS_INFO(tx_status_info, addr1), 683 QDF_MAC_ADDR_SIZE); 684 qdf_mem_copy(wh_addr4->i_addr2, 685 TXMON_STATUS_INFO(tx_status_info, addr2), 686 QDF_MAC_ADDR_SIZE); 687 qdf_mem_copy(wh_addr4->i_addr3, 688 TXMON_STATUS_INFO(tx_status_info, addr3), 689 QDF_MAC_ADDR_SIZE); 690 qdf_mem_copy(wh_addr4->i_addr4, 691 TXMON_STATUS_INFO(tx_status_info, addr4), 692 QDF_MAC_ADDR_SIZE); 693 694 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr4)); 695 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 696 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 697 } 698 699 #define TXMON_BA_CTRL_SZ 2 700 #define TXMON_BA_INFO_SZ(bitmap_sz) ((4 * (bitmap_sz)) + 6) 701 #define TXMON_MU_BA_ACK_FRAME_SZ(bitmap_sz) \ 702 (sizeof(struct ieee80211_ctlframe_addr2) +\ 703 TXMON_BA_CTRL_SZ + (bitmap_sz)) 704 705 #define TXMON_BA_ACK_FRAME_SZ(bitmap_sz) \ 706 (sizeof(struct ieee80211_ctlframe_addr2) +\ 707 TXMON_BA_CTRL_SZ + TXMON_BA_INFO_SZ(bitmap_sz)) 708 709 /** 710 * dp_tx_mon_generate_mu_block_ack_frm() - API to generate MU block ack frame 711 * @pdev: pdev Handle 712 * @tx_ppdu_info: pointer to tx ppdu info structure 713 * @window_flag: frame generated window 714 * 715 * Return: void 716 */ 717 static void 718 dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev *pdev, 719 struct dp_tx_ppdu_info *tx_ppdu_info, 720 uint8_t window_flag) 721 { 722 /* allocate and populate MU block ack frame */ 723 /* enqueue 802.11 payload to per user mpdu_q */ 724 struct dp_mon_pdev *mon_pdev; 725 struct dp_mon_pdev_be *mon_pdev_be; 726 struct dp_pdev_tx_monitor_be *tx_mon_be; 727 struct hal_tx_status_info *tx_status_info; 728 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 729 qdf_nbuf_t mpdu_nbuf = NULL; 730 uint16_t ba_control = 0; 731 uint8_t *frm = NULL; 732 uint32_t ba_sz = 0; 733 uint8_t num_users = TXMON_PPDU_HAL(tx_ppdu_info, num_users); 734 uint8_t i = 0; 735 uint8_t frm_ctl; 736 737 /* sanity check */ 738 if (qdf_unlikely(!pdev)) 739 return; 740 741 mon_pdev = pdev->monitor_pdev; 742 if (qdf_unlikely(!mon_pdev)) 743 return; 744 745 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 746 if (qdf_unlikely(!mon_pdev_be)) 747 return; 748 749 tx_mon_be = &mon_pdev_be->tx_monitor_be; 750 tx_status_info = &tx_mon_be->data_status_info; 751 for (i = 0; i < num_users; i++) 752 ba_sz += (4 << TXMON_BA_INFO_SZ(TXMON_PPDU_USR(tx_ppdu_info, 753 i, 754 ba_bitmap_sz))); 755 756 /* 757 * for multi sta block ack, do we need to increase the size 758 * or copy info on subsequent frame offset 759 * 760 * for radiotap we allocate new skb, 761 * so we don't need reserver skb header 762 */ 763 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 764 TXMON_MU_BA_ACK_FRAME_SZ(ba_sz), 0, 4, 765 FALSE); 766 if (!mpdu_nbuf) { 767 /* TODO: update status and break */ 768 return; 769 } 770 771 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 772 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 773 774 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 775 IEEE80211_FC0_BLOCK_ACK); 776 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 777 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 778 wh_addr2->i_fc[1] = 0; 779 wh_addr2->i_fc[0] = frm_ctl; 780 781 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0000); 782 783 if (window_flag == RESPONSE_WINDOW) { 784 qdf_mem_copy(wh_addr2->i_addr2, 785 TXMON_STATUS_INFO(tx_status_info, addr2), 786 QDF_MAC_ADDR_SIZE); 787 if (num_users > 1) 788 qdf_mem_set(wh_addr2->i_addr1, QDF_MAC_ADDR_SIZE, 0xFF); 789 else 790 qdf_mem_copy(wh_addr2->i_addr1, 791 TXMON_STATUS_INFO(tx_status_info, addr1), 792 QDF_MAC_ADDR_SIZE); 793 } else { 794 qdf_mem_copy(wh_addr2->i_addr2, 795 TXMON_STATUS_INFO(tx_status_info, addr1), 796 QDF_MAC_ADDR_SIZE); 797 qdf_mem_copy(wh_addr2->i_addr1, 798 TXMON_STATUS_INFO(tx_status_info, addr2), 799 QDF_MAC_ADDR_SIZE); 800 } 801 802 frm = (uint8_t *)&wh_addr2[1]; 803 804 /* BA control */ 805 ba_control = 0x0016; 806 *((uint16_t *)frm) = qdf_cpu_to_le16(ba_control); 807 frm += 2; 808 809 for (i = 0; i < num_users; i++) { 810 *((uint16_t *)frm) = 811 qdf_cpu_to_le16((TXMON_PPDU_USR(tx_ppdu_info, i, tid) << 812 DP_IEEE80211_BAR_CTL_TID_S) | 813 (TXMON_PPDU_USR(tx_ppdu_info, i, 814 aid) & 0x7FF)); 815 frm += 2; 816 *((uint16_t *)frm) = qdf_cpu_to_le16( 817 TXMON_PPDU_USR(tx_ppdu_info, i, start_seq)); 818 frm += 2; 819 qdf_mem_copy(frm, 820 TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap), 821 4 << 822 TXMON_PPDU_USR(tx_ppdu_info, 823 i, ba_bitmap_sz)); 824 frm += 4 << TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap_sz); 825 } 826 827 qdf_nbuf_set_pktlen(mpdu_nbuf, 828 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 829 830 /* always enqueue to first active user */ 831 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 832 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 833 /* HE MU fields not required for Multi Sta Block ack frame */ 834 TXMON_PPDU_COM(tx_ppdu_info, he_mu_flags) = 0; 835 } 836 837 /** 838 * dp_tx_mon_generate_block_ack_frm() - API to generate block ack frame 839 * @pdev: pdev Handle 840 * @tx_ppdu_info: pointer to tx ppdu info structure 841 * @window_flag: frame generated window 842 * 843 * Return: void 844 */ 845 static void 846 dp_tx_mon_generate_block_ack_frm(struct dp_pdev *pdev, 847 struct dp_tx_ppdu_info *tx_ppdu_info, 848 uint8_t window_flag) 849 { 850 /* allocate and populate block ack frame */ 851 /* enqueue 802.11 payload to per user mpdu_q */ 852 struct dp_mon_pdev *mon_pdev; 853 struct dp_mon_pdev_be *mon_pdev_be; 854 struct dp_pdev_tx_monitor_be *tx_mon_be; 855 struct hal_tx_status_info *tx_status_info; 856 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 857 qdf_nbuf_t mpdu_nbuf = NULL; 858 uint8_t *frm = NULL; 859 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 860 uint32_t ba_bitmap_sz = TXMON_PPDU_USR(tx_ppdu_info, 861 user_id, ba_bitmap_sz); 862 uint8_t frm_ctl; 863 864 /* sanity check */ 865 if (qdf_unlikely(!pdev)) 866 return; 867 868 mon_pdev = pdev->monitor_pdev; 869 if (qdf_unlikely(!mon_pdev)) 870 return; 871 872 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 873 if (qdf_unlikely(!mon_pdev_be)) 874 return; 875 876 tx_mon_be = &mon_pdev_be->tx_monitor_be; 877 tx_status_info = &tx_mon_be->data_status_info; 878 /* 879 * for multi sta block ack, do we need to increase the size 880 * or copy info on subsequent frame offset 881 * 882 * for radiotap we allocate new skb, 883 * so we don't need reserver skb header 884 */ 885 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 886 TXMON_BA_ACK_FRAME_SZ(ba_bitmap_sz), 887 0, 4, FALSE); 888 if (!mpdu_nbuf) { 889 /* TODO: update status and break */ 890 return; 891 } 892 893 /* 894 * BA CONTROL 895 * fields required to construct block ack information 896 * B0 - BA ACK POLICY 897 * 0 - Normal ACK 898 * 1 - No ACK 899 * B1 - MULTI TID 900 * B2 - COMPRESSED BITMAP 901 * B12 902 * 00 - Basic block ack 903 * 01 - Compressed block ack 904 * 10 - Reserved 905 * 11 - Multi tid block ack 906 * B3-B11 - Reserved 907 * B12-B15 - TID info 908 * 909 * BA INFORMATION 910 * Per sta tid info 911 * AID: 11 bits 912 * ACK type: 1 bit 913 * TID: 4 bits 914 * 915 * BA SEQ CTRL 916 * 917 * BA bitmap 918 * 919 */ 920 921 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 922 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 923 924 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 925 IEEE80211_FC0_BLOCK_ACK); 926 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 927 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 928 wh_addr2->i_fc[1] = 0; 929 wh_addr2->i_fc[0] = frm_ctl; 930 931 /* duration */ 932 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0020); 933 934 if (window_flag) { 935 qdf_mem_copy(wh_addr2->i_addr2, 936 TXMON_STATUS_INFO(tx_status_info, addr2), 937 QDF_MAC_ADDR_SIZE); 938 qdf_mem_copy(wh_addr2->i_addr1, 939 TXMON_STATUS_INFO(tx_status_info, addr1), 940 QDF_MAC_ADDR_SIZE); 941 } else { 942 qdf_mem_copy(wh_addr2->i_addr2, 943 TXMON_STATUS_INFO(tx_status_info, addr1), 944 QDF_MAC_ADDR_SIZE); 945 qdf_mem_copy(wh_addr2->i_addr1, 946 TXMON_STATUS_INFO(tx_status_info, addr2), 947 QDF_MAC_ADDR_SIZE); 948 } 949 950 frm = (uint8_t *)&wh_addr2[1]; 951 /* BA control */ 952 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 953 user_id, 954 ba_control)); 955 frm += 2; 956 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 957 user_id, 958 start_seq)); 959 frm += 2; 960 qdf_mem_copy(frm, 961 TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap), 962 4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 963 frm += (4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 964 965 qdf_nbuf_set_pktlen(mpdu_nbuf, 966 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 967 968 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 969 970 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 971 } 972 973 /** 974 * dp_tx_mon_alloc_mpdu() - API to allocate mpdu and add that current 975 * user index 976 * 977 * @pdev: pdev Handle 978 * @tx_ppdu_info: pointer to tx ppdu info structure 979 * 980 * Return: void 981 */ 982 static void 983 dp_tx_mon_alloc_mpdu(struct dp_pdev *pdev, struct dp_tx_ppdu_info *tx_ppdu_info) 984 { 985 qdf_nbuf_t mpdu_nbuf = NULL; 986 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 987 uint32_t usr_idx = 0; 988 989 /* 990 * payload will be added as a frag to buffer 991 * and we allocate new skb for radiotap header 992 * we allocate a dummy buffer size 993 */ 994 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 995 MAX_MONITOR_HEADER, MAX_MONITOR_HEADER, 996 4, FALSE); 997 if (!mpdu_nbuf) { 998 qdf_err("%s: %d No memory to allocate mpdu_nbuf!!!!!\n", 999 __func__, __LINE__); 1000 return; 1001 } 1002 1003 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 1004 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 1005 1006 qdf_nbuf_queue_add(usr_mpdu_q, mpdu_nbuf); 1007 } 1008 1009 /** 1010 * dp_tx_mon_generate_data_frm() - API to generate data frame 1011 * @pdev: pdev Handle 1012 * @tx_ppdu_info: pointer to tx ppdu info structure 1013 * 1014 * Return: void 1015 */ 1016 static void 1017 dp_tx_mon_generate_data_frm(struct dp_pdev *pdev, 1018 struct dp_tx_ppdu_info *tx_ppdu_info, 1019 bool take_ref) 1020 { 1021 struct dp_mon_pdev *mon_pdev; 1022 struct dp_mon_pdev_be *mon_pdev_be; 1023 struct dp_pdev_tx_monitor_be *tx_mon_be; 1024 struct hal_tx_status_info *tx_status_info; 1025 qdf_nbuf_t mpdu_nbuf = NULL; 1026 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 1027 uint32_t usr_idx = 0; 1028 1029 /* sanity check */ 1030 if (qdf_unlikely(!pdev)) 1031 return; 1032 1033 mon_pdev = pdev->monitor_pdev; 1034 if (qdf_unlikely(!mon_pdev)) 1035 return; 1036 1037 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1038 if (qdf_unlikely(!mon_pdev_be)) 1039 return; 1040 1041 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1042 1043 tx_status_info = &tx_mon_be->data_status_info; 1044 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 1045 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 1046 mpdu_nbuf = qdf_nbuf_queue_last(usr_mpdu_q); 1047 1048 if (!mpdu_nbuf) 1049 QDF_BUG(0); 1050 1051 tx_mon_be->stats.pkt_buf_processed++; 1052 1053 /* add function to either copy or add frag to frag_list */ 1054 qdf_nbuf_add_frag(pdev->soc->osdev, 1055 TXMON_STATUS_INFO(tx_status_info, buffer), 1056 mpdu_nbuf, 1057 TXMON_STATUS_INFO(tx_status_info, offset), 1058 TXMON_STATUS_INFO(tx_status_info, length), 1059 DP_MON_DATA_BUFFER_SIZE, 1060 take_ref, TXMON_NO_BUFFER_SZ); 1061 } 1062 1063 /** 1064 * dp_tx_mon_generate_prot_frm() - API to generate protection frame 1065 * @pdev: pdev Handle 1066 * @tx_ppdu_info: pointer to tx ppdu info structure 1067 * 1068 * Return: void 1069 */ 1070 static void 1071 dp_tx_mon_generate_prot_frm(struct dp_pdev *pdev, 1072 struct dp_tx_ppdu_info *tx_ppdu_info) 1073 { 1074 struct dp_mon_pdev *mon_pdev; 1075 struct dp_mon_pdev_be *mon_pdev_be; 1076 struct dp_pdev_tx_monitor_be *tx_mon_be; 1077 struct hal_tx_status_info *tx_status_info; 1078 1079 /* sanity check */ 1080 if (qdf_unlikely(!pdev)) 1081 return; 1082 1083 mon_pdev = pdev->monitor_pdev; 1084 if (qdf_unlikely(!mon_pdev)) 1085 return; 1086 1087 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1088 if (qdf_unlikely(!mon_pdev_be)) 1089 return; 1090 1091 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1092 tx_status_info = &tx_mon_be->prot_status_info; 1093 1094 /* update medium prot type from data */ 1095 TXMON_STATUS_INFO(tx_status_info, medium_prot_type) = 1096 tx_mon_be->data_status_info.medium_prot_type; 1097 1098 switch (TXMON_STATUS_INFO(tx_status_info, medium_prot_type)) { 1099 case TXMON_MEDIUM_NO_PROTECTION: 1100 { 1101 /* no protection frame - do nothing */ 1102 break; 1103 } 1104 case TXMON_MEDIUM_RTS_LEGACY: 1105 case TXMON_MEDIUM_RTS_11AC_STATIC_BW: 1106 case TXMON_MEDIUM_RTS_11AC_DYNAMIC_BW: 1107 { 1108 dp_tx_mon_generate_rts_frm(pdev, tx_ppdu_info, 1109 INITIATOR_WINDOW); 1110 break; 1111 } 1112 case TXMON_MEDIUM_CTS2SELF: 1113 { 1114 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1115 INITIATOR_WINDOW); 1116 break; 1117 } 1118 case TXMON_MEDIUM_QOS_NULL_NO_ACK_3ADDR: 1119 { 1120 dp_tx_mon_generate_3addr_qos_null_frm(pdev, tx_ppdu_info); 1121 break; 1122 } 1123 case TXMON_MEDIUM_QOS_NULL_NO_ACK_4ADDR: 1124 { 1125 dp_tx_mon_generate_4addr_qos_null_frm(pdev, tx_ppdu_info); 1126 break; 1127 } 1128 } 1129 } 1130 1131 /** 1132 * dp_tx_mon_generated_response_frm() - API to handle generated response frame 1133 * @pdev: pdev Handle 1134 * @tx_ppdu_info: pointer to tx ppdu info structure 1135 * 1136 * Return: QDF_STATUS 1137 */ 1138 static QDF_STATUS 1139 dp_tx_mon_generated_response_frm(struct dp_pdev *pdev, 1140 struct dp_tx_ppdu_info *tx_ppdu_info) 1141 { 1142 struct dp_mon_pdev *mon_pdev; 1143 struct dp_mon_pdev_be *mon_pdev_be; 1144 struct dp_pdev_tx_monitor_be *tx_mon_be; 1145 struct hal_tx_status_info *tx_status_info; 1146 QDF_STATUS status = QDF_STATUS_SUCCESS; 1147 uint8_t gen_response = 0; 1148 1149 /* sanity check */ 1150 if (qdf_unlikely(!pdev)) 1151 return QDF_STATUS_E_NOMEM; 1152 1153 mon_pdev = pdev->monitor_pdev; 1154 if (qdf_unlikely(!mon_pdev)) 1155 return QDF_STATUS_E_NOMEM; 1156 1157 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1158 if (qdf_unlikely(!mon_pdev_be)) 1159 return QDF_STATUS_E_NOMEM; 1160 1161 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1162 1163 tx_status_info = &tx_mon_be->data_status_info; 1164 gen_response = TXMON_STATUS_INFO(tx_status_info, generated_response); 1165 1166 switch (gen_response) { 1167 case TXMON_GEN_RESP_SELFGEN_ACK: 1168 { 1169 dp_tx_mon_generate_ack_frm(pdev, tx_ppdu_info, RESPONSE_WINDOW); 1170 break; 1171 } 1172 case TXMON_GEN_RESP_SELFGEN_CTS: 1173 { 1174 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1175 RESPONSE_WINDOW); 1176 break; 1177 } 1178 case TXMON_GEN_RESP_SELFGEN_BA: 1179 { 1180 dp_tx_mon_generate_block_ack_frm(pdev, tx_ppdu_info, 1181 RESPONSE_WINDOW); 1182 break; 1183 } 1184 case TXMON_GEN_RESP_SELFGEN_MBA: 1185 { 1186 dp_tx_mon_generate_mu_block_ack_frm(pdev, tx_ppdu_info, 1187 RESPONSE_WINDOW); 1188 break; 1189 } 1190 case TXMON_GEN_RESP_SELFGEN_CBF: 1191 { 1192 break; 1193 } 1194 case TXMON_GEN_RESP_SELFGEN_TRIG: 1195 { 1196 break; 1197 } 1198 case TXMON_GEN_RESP_SELFGEN_NDP_LMR: 1199 { 1200 break; 1201 } 1202 }; 1203 1204 return status; 1205 } 1206 1207 /** 1208 * dp_tx_mon_update_ppdu_info_status() - API to update frame as information 1209 * is stored only for that processing 1210 * 1211 * @pdev: pdev Handle 1212 * @tx_data_ppdu_info: pointer to data tx ppdu info 1213 * @tx_prot_ppdu_info: pointer to protection tx ppdu info 1214 * @tx_tlv_hdr: pointer to tx_tlv_hdr 1215 * @status_frag: pointer to fragment 1216 * @tlv_status: tlv status return from hal api 1217 * @mon_desc_list_ref: tx monitor descriptor list reference 1218 * 1219 * Return: QDF_STATUS 1220 */ 1221 static QDF_STATUS 1222 dp_tx_mon_update_ppdu_info_status(struct dp_pdev *pdev, 1223 struct dp_tx_ppdu_info *tx_data_ppdu_info, 1224 struct dp_tx_ppdu_info *tx_prot_ppdu_info, 1225 void *tx_tlv_hdr, 1226 qdf_frag_t status_frag, 1227 uint32_t tlv_status, 1228 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1229 { 1230 struct dp_mon_pdev *mon_pdev; 1231 struct dp_mon_pdev_be *mon_pdev_be; 1232 struct dp_pdev_tx_monitor_be *tx_mon_be; 1233 struct hal_tx_status_info *tx_status_info; 1234 QDF_STATUS status = QDF_STATUS_SUCCESS; 1235 1236 /* sanity check */ 1237 if (qdf_unlikely(!pdev)) 1238 return QDF_STATUS_E_NOMEM; 1239 1240 mon_pdev = pdev->monitor_pdev; 1241 if (qdf_unlikely(!mon_pdev)) 1242 return QDF_STATUS_E_NOMEM; 1243 1244 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1245 if (qdf_unlikely(!mon_pdev_be)) 1246 return QDF_STATUS_E_NOMEM; 1247 1248 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1249 1250 switch (tlv_status) { 1251 case HAL_MON_TX_FES_SETUP: 1252 { 1253 /* 1254 * start of initiator window 1255 * 1256 * got number of user count from fes setup tlv 1257 */ 1258 break; 1259 } 1260 case HAL_MON_RX_RESPONSE_REQUIRED_INFO: 1261 { 1262 break; 1263 } 1264 case HAL_MON_TX_FES_STATUS_START_PROT: 1265 { 1266 /* update tsft to local */ 1267 break; 1268 } 1269 case HAL_MON_TX_FES_STATUS_START_PPDU: 1270 { 1271 /* update tsft to local */ 1272 break; 1273 } 1274 case HAL_MON_TX_FES_STATUS_PROT: 1275 { 1276 TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used) = 1; 1277 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) = 1278 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) << 1; 1279 1280 /* based on medium protection type we need to generate frame */ 1281 dp_tx_mon_generate_prot_frm(pdev, tx_prot_ppdu_info); 1282 break; 1283 } 1284 case HAL_MON_RX_FRAME_BITMAP_ACK: 1285 { 1286 break; 1287 } 1288 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_256: 1289 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_1K: 1290 { 1291 /* 1292 * this comes for each user 1293 * BlockAck is not same as ACK, single frame can hold 1294 * multiple BlockAck info 1295 */ 1296 tx_status_info = &tx_mon_be->data_status_info; 1297 1298 if (TXMON_PPDU_HAL(tx_data_ppdu_info, num_users)) 1299 dp_tx_mon_generate_block_ack_frm(pdev, 1300 tx_data_ppdu_info, 1301 INITIATOR_WINDOW); 1302 else 1303 dp_tx_mon_generate_mu_block_ack_frm(pdev, 1304 tx_data_ppdu_info, 1305 INITIATOR_WINDOW); 1306 1307 break; 1308 } 1309 case HAL_MON_TX_MPDU_START: 1310 { 1311 dp_tx_mon_alloc_mpdu(pdev, tx_data_ppdu_info); 1312 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1313 break; 1314 } 1315 case HAL_MON_TX_MSDU_START: 1316 { 1317 break; 1318 } 1319 case HAL_MON_TX_DATA: 1320 { 1321 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1322 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, true); 1323 break; 1324 } 1325 case HAL_MON_TX_BUFFER_ADDR: 1326 { 1327 struct hal_mon_packet_info *packet_info = NULL; 1328 struct dp_mon_desc *mon_desc = NULL; 1329 qdf_frag_t packet_buffer = NULL; 1330 uint32_t end_offset = 0; 1331 1332 tx_status_info = &tx_mon_be->data_status_info; 1333 /* update buffer from packet info */ 1334 packet_info = &TXMON_PPDU_HAL(tx_data_ppdu_info, packet_info); 1335 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info->sw_cookie; 1336 1337 qdf_assert_always(mon_desc); 1338 1339 if (mon_desc->magic != DP_MON_DESC_MAGIC) 1340 qdf_assert_always(0); 1341 1342 qdf_assert_always(mon_desc->buf_addr); 1343 tx_mon_be->stats.pkt_buf_recv++; 1344 1345 if (!mon_desc->unmapped) { 1346 qdf_mem_unmap_page(pdev->soc->osdev, 1347 (qdf_dma_addr_t)mon_desc->paddr, 1348 DP_MON_DATA_BUFFER_SIZE, 1349 QDF_DMA_FROM_DEVICE); 1350 mon_desc->unmapped = 1; 1351 } 1352 1353 packet_buffer = mon_desc->buf_addr; 1354 mon_desc->buf_addr = NULL; 1355 1356 /* increment reap count */ 1357 mon_desc_list_ref->tx_mon_reap_cnt++; 1358 1359 /* add the mon_desc to free list */ 1360 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 1361 &mon_desc_list_ref->tail, 1362 mon_desc); 1363 1364 TXMON_STATUS_INFO(tx_status_info, buffer) = packet_buffer; 1365 TXMON_STATUS_INFO(tx_status_info, offset) = end_offset; 1366 TXMON_STATUS_INFO(tx_status_info, 1367 length) = packet_info->dma_length; 1368 1369 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1370 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, false); 1371 break; 1372 } 1373 case HAL_MON_TX_FES_STATUS_END: 1374 { 1375 break; 1376 } 1377 case HAL_MON_RESPONSE_END_STATUS_INFO: 1378 { 1379 dp_tx_mon_generated_response_frm(pdev, tx_data_ppdu_info); 1380 break; 1381 } 1382 case HAL_MON_TX_FES_STATUS_START: 1383 { 1384 /* update the medium protection type */ 1385 break; 1386 } 1387 case HAL_MON_TX_QUEUE_EXTENSION: 1388 { 1389 /* No action for Queue Extension TLV */ 1390 break; 1391 } 1392 default: 1393 { 1394 /* return or break in default case */ 1395 break; 1396 } 1397 }; 1398 1399 return status; 1400 } 1401 1402 /* 1403 * dp_tx_mon_process_tlv_2_0() - API to parse PPDU worth information 1404 * @pdev_handle: DP_PDEV handle 1405 * @mon_desc_list_ref: tx monitor descriptor list reference 1406 * 1407 * Return: status 1408 */ 1409 QDF_STATUS 1410 dp_tx_mon_process_tlv_2_0(struct dp_pdev *pdev, 1411 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1412 { 1413 struct dp_mon_pdev *mon_pdev; 1414 struct dp_mon_pdev_be *mon_pdev_be; 1415 struct dp_pdev_tx_monitor_be *tx_mon_be; 1416 struct dp_tx_ppdu_info *tx_prot_ppdu_info = NULL; 1417 struct dp_tx_ppdu_info *tx_data_ppdu_info = NULL; 1418 struct hal_tx_status_info *tx_status_prot; 1419 struct hal_tx_status_info *tx_status_data; 1420 qdf_frag_t status_frag = NULL; 1421 uint32_t end_offset = 0; 1422 uint32_t tlv_status; 1423 uint32_t status = QDF_STATUS_SUCCESS; 1424 uint8_t *tx_tlv; 1425 uint8_t *tx_tlv_start; 1426 uint8_t num_users = 0; 1427 uint8_t cur_frag_q_idx; 1428 bool schedule_wrq = false; 1429 1430 /* sanity check */ 1431 if (qdf_unlikely(!pdev)) 1432 return QDF_STATUS_E_NOMEM; 1433 1434 mon_pdev = pdev->monitor_pdev; 1435 if (qdf_unlikely(!mon_pdev)) 1436 return QDF_STATUS_E_NOMEM; 1437 1438 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1439 if (qdf_unlikely(!mon_pdev_be)) 1440 return QDF_STATUS_E_NOMEM; 1441 1442 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1443 cur_frag_q_idx = tx_mon_be->cur_frag_q_idx; 1444 1445 tx_status_prot = &tx_mon_be->prot_status_info; 1446 tx_status_data = &tx_mon_be->data_status_info; 1447 1448 tx_prot_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_PROT_PPDU_INFO, 1449 1, tx_mon_be->be_ppdu_id); 1450 1451 if (!tx_prot_ppdu_info) { 1452 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1453 return QDF_STATUS_E_NOMEM; 1454 } 1455 1456 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1457 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1458 tx_tlv = status_frag; 1459 dp_mon_debug("last_frag_q_idx: %d status_frag:%pK", 1460 tx_mon_be->last_frag_q_idx, status_frag); 1461 1462 /* get number of user from tlv window */ 1463 tlv_status = hal_txmon_status_get_num_users(pdev->soc->hal_soc, 1464 tx_tlv, &num_users); 1465 if (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE || !num_users) { 1466 dp_mon_err("window open with tlv_tag[0x%x] num_users[%d]!\n", 1467 hal_tx_status_get_tlv_tag(tx_tlv), num_users); 1468 return QDF_STATUS_E_INVAL; 1469 } 1470 1471 /* allocate tx_data_ppdu_info based on num_users */ 1472 tx_data_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_DATA_PPDU_INFO, 1473 num_users, 1474 tx_mon_be->be_ppdu_id); 1475 if (!tx_data_ppdu_info) { 1476 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1477 return QDF_STATUS_E_NOMEM; 1478 } 1479 1480 /* iterate status buffer queue */ 1481 while (tx_mon_be->cur_frag_q_idx < tx_mon_be->last_frag_q_idx) { 1482 /* get status buffer from frag_q_vec */ 1483 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1484 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1485 if (qdf_unlikely(!status_frag)) { 1486 dp_mon_err("status frag is NULL\n"); 1487 QDF_BUG(0); 1488 } 1489 1490 tx_tlv = status_frag; 1491 tx_tlv_start = tx_tlv; 1492 /* 1493 * parse each status buffer and populate the information to 1494 * dp_tx_ppdu_info 1495 */ 1496 do { 1497 tlv_status = hal_txmon_status_parse_tlv( 1498 pdev->soc->hal_soc, 1499 &tx_data_ppdu_info->hal_txmon, 1500 &tx_prot_ppdu_info->hal_txmon, 1501 tx_status_data, 1502 tx_status_prot, 1503 tx_tlv, status_frag); 1504 1505 status = 1506 dp_tx_mon_update_ppdu_info_status( 1507 pdev, 1508 tx_data_ppdu_info, 1509 tx_prot_ppdu_info, 1510 tx_tlv, 1511 status_frag, 1512 tlv_status, 1513 mon_desc_list_ref); 1514 1515 /* need api definition for hal_tx_status_get_next_tlv */ 1516 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 1517 if ((tx_tlv - tx_tlv_start) >= end_offset) 1518 break; 1519 } while ((tx_tlv - tx_tlv_start) < end_offset); 1520 1521 /* 1522 * free status buffer after parsing 1523 * is status_frag mapped to mpdu if so make sure 1524 */ 1525 tx_mon_be->stats.status_buf_free++; 1526 qdf_frag_free(status_frag); 1527 tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf = NULL; 1528 tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset = 0; 1529 cur_frag_q_idx = ++tx_mon_be->cur_frag_q_idx; 1530 } 1531 1532 /* clear the unreleased frag array */ 1533 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1534 1535 if (TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used)) { 1536 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1537 chan_num))) { 1538 /* update channel number, if not fetched properly */ 1539 TXMON_PPDU_COM(tx_prot_ppdu_info, 1540 chan_num) = mon_pdev->mon_chan_num; 1541 } 1542 1543 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1544 chan_freq))) { 1545 /* update channel frequency, if not fetched properly */ 1546 TXMON_PPDU_COM(tx_prot_ppdu_info, 1547 chan_freq) = mon_pdev->mon_chan_freq; 1548 } 1549 1550 /* 1551 * add dp_tx_ppdu_info to pdev queue 1552 * for post processing 1553 * 1554 * TODO: add a threshold check and drop the ppdu info 1555 */ 1556 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1557 tx_mon_be->last_prot_ppdu_info = 1558 tx_mon_be->tx_prot_ppdu_info; 1559 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1560 tx_prot_ppdu_info, 1561 tx_ppdu_info_queue_elem); 1562 tx_mon_be->tx_ppdu_info_list_depth++; 1563 1564 tx_mon_be->tx_prot_ppdu_info = NULL; 1565 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1566 schedule_wrq = true; 1567 } else { 1568 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be); 1569 tx_mon_be->tx_prot_ppdu_info = NULL; 1570 tx_prot_ppdu_info = NULL; 1571 } 1572 1573 if (TXMON_PPDU_HAL(tx_data_ppdu_info, is_used)) { 1574 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1575 chan_num))) { 1576 /* update channel number, if not fetched properly */ 1577 TXMON_PPDU_COM(tx_data_ppdu_info, 1578 chan_num) = mon_pdev->mon_chan_num; 1579 } 1580 1581 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1582 chan_freq))) { 1583 /* update channel frequency, if not fetched properly */ 1584 TXMON_PPDU_COM(tx_data_ppdu_info, 1585 chan_freq) = mon_pdev->mon_chan_freq; 1586 } 1587 1588 /* 1589 * add dp_tx_ppdu_info to pdev queue 1590 * for post processing 1591 * 1592 * TODO: add a threshold check and drop the ppdu info 1593 */ 1594 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1595 tx_mon_be->last_data_ppdu_info = 1596 tx_mon_be->tx_data_ppdu_info; 1597 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1598 tx_data_ppdu_info, 1599 tx_ppdu_info_queue_elem); 1600 tx_mon_be->tx_ppdu_info_list_depth++; 1601 1602 tx_mon_be->tx_data_ppdu_info = NULL; 1603 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1604 schedule_wrq = true; 1605 } else { 1606 dp_tx_mon_free_ppdu_info(tx_data_ppdu_info, tx_mon_be); 1607 tx_mon_be->tx_data_ppdu_info = NULL; 1608 tx_data_ppdu_info = NULL; 1609 } 1610 1611 if (schedule_wrq) 1612 qdf_queue_work(NULL, tx_mon_be->post_ppdu_workqueue, 1613 &tx_mon_be->post_ppdu_work); 1614 1615 return QDF_STATUS_SUCCESS; 1616 } 1617 1618 /** 1619 * dp_tx_mon_update_end_reason() - API to update end reason 1620 * 1621 * @mon_pdev - DP_MON_PDEV handle 1622 * @ppdu_id - ppdu_id 1623 * @end_reason - monitor destination descriptor end reason 1624 * 1625 * Return: void 1626 */ 1627 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1628 int ppdu_id, int end_reason) 1629 { 1630 struct dp_mon_pdev_be *mon_pdev_be; 1631 struct dp_pdev_tx_monitor_be *tx_mon_be; 1632 1633 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1634 if (qdf_unlikely(!mon_pdev_be)) 1635 return; 1636 1637 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1638 1639 tx_mon_be->be_end_reason_bitmap |= (1 << end_reason); 1640 } 1641 1642 /* 1643 * dp_tx_mon_process_status_tlv() - API to processed TLV 1644 * invoked from interrupt handler 1645 * 1646 * @soc - DP_SOC handle 1647 * @pdev - DP_PDEV handle 1648 * @mon_ring_desc - descriptor status info 1649 * @addr - status buffer frag address 1650 * @end_offset - end offset of buffer that has valid buffer 1651 * @mon_desc_list_ref: tx monitor descriptor list reference 1652 * 1653 * Return: QDF_STATUS 1654 */ 1655 QDF_STATUS 1656 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1657 struct dp_pdev *pdev, 1658 struct hal_mon_desc *mon_ring_desc, 1659 qdf_frag_t status_frag, 1660 uint32_t end_offset, 1661 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1662 { 1663 struct dp_mon_pdev *mon_pdev; 1664 struct dp_mon_pdev_be *mon_pdev_be; 1665 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 1666 uint8_t last_frag_q_idx = 0; 1667 1668 /* sanity check */ 1669 if (qdf_unlikely(!pdev)) 1670 goto free_status_buffer; 1671 1672 mon_pdev = pdev->monitor_pdev; 1673 if (qdf_unlikely(!mon_pdev)) 1674 goto free_status_buffer; 1675 1676 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1677 if (qdf_unlikely(!mon_pdev_be)) 1678 goto free_status_buffer; 1679 1680 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1681 1682 if (qdf_unlikely(tx_mon_be->last_frag_q_idx > 1683 MAX_STATUS_BUFFER_IN_PPDU)) { 1684 dp_mon_err("status frag queue for a ppdu[%d] exceed %d\n", 1685 tx_mon_be->be_ppdu_id, 1686 MAX_STATUS_BUFFER_IN_PPDU); 1687 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1688 goto free_status_buffer; 1689 } 1690 1691 if (tx_mon_be->mode == TX_MON_BE_DISABLE && 1692 !dp_lite_mon_is_tx_enabled(mon_pdev)) { 1693 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1694 mon_desc_list_ref); 1695 goto free_status_buffer; 1696 } 1697 1698 if (tx_mon_be->be_ppdu_id != mon_ring_desc->ppdu_id && 1699 tx_mon_be->last_frag_q_idx) { 1700 if (tx_mon_be->be_end_reason_bitmap & 1701 (1 << HAL_MON_FLUSH_DETECTED)) { 1702 tx_mon_be->stats.ppdu_info_drop_flush++; 1703 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1704 mon_desc_list_ref); 1705 } else if (tx_mon_be->be_end_reason_bitmap & 1706 (1 << HAL_MON_PPDU_TRUNCATED)) { 1707 tx_mon_be->stats.ppdu_info_drop_trunc++; 1708 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1709 mon_desc_list_ref); 1710 } else { 1711 dp_mon_err("End of ppdu not seen PID:%d cur_pid:%d idx:%d", 1712 tx_mon_be->be_ppdu_id, 1713 mon_ring_desc->ppdu_id, 1714 tx_mon_be->last_frag_q_idx); 1715 /* schedule ppdu worth information */ 1716 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1717 mon_desc_list_ref); 1718 } 1719 1720 /* reset end reason bitmap */ 1721 tx_mon_be->be_end_reason_bitmap = 0; 1722 tx_mon_be->last_frag_q_idx = 0; 1723 tx_mon_be->cur_frag_q_idx = 0; 1724 } 1725 1726 tx_mon_be->be_ppdu_id = mon_ring_desc->ppdu_id; 1727 tx_mon_be->be_end_reason_bitmap |= (1 << mon_ring_desc->end_reason); 1728 1729 last_frag_q_idx = tx_mon_be->last_frag_q_idx; 1730 1731 tx_mon_be->frag_q_vec[last_frag_q_idx].frag_buf = status_frag; 1732 tx_mon_be->frag_q_vec[last_frag_q_idx].end_offset = end_offset; 1733 tx_mon_be->last_frag_q_idx++; 1734 1735 if (mon_ring_desc->end_reason == HAL_MON_END_OF_PPDU) { 1736 /* drop processing of tlv, if ppdu info list exceed threshold */ 1737 if ((tx_mon_be->defer_ppdu_info_list_depth + 1738 tx_mon_be->tx_ppdu_info_list_depth) > 1739 MAX_PPDU_INFO_LIST_DEPTH) { 1740 tx_mon_be->stats.ppdu_info_drop_th++; 1741 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1742 mon_desc_list_ref); 1743 return QDF_STATUS_E_PENDING; 1744 } 1745 1746 if (dp_tx_mon_process_tlv_2_0(pdev, 1747 mon_desc_list_ref) != 1748 QDF_STATUS_SUCCESS) 1749 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1750 mon_desc_list_ref); 1751 } 1752 1753 return QDF_STATUS_SUCCESS; 1754 1755 free_status_buffer: 1756 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1757 mon_desc_list_ref); 1758 if (qdf_likely(tx_mon_be)) 1759 tx_mon_be->stats.status_buf_free++; 1760 1761 qdf_frag_free(status_frag); 1762 1763 return QDF_STATUS_E_NOMEM; 1764 } 1765 1766 #else 1767 1768 /** 1769 * dp_tx_mon_process_status_tlv() - API to processed TLV 1770 * invoked from interrupt handler 1771 * 1772 * @soc - DP_SOC handle 1773 * @pdev - DP_PDEV handle 1774 * @mon_ring_desc - descriptor status info 1775 * @addr - status buffer frag address 1776 * @end_offset - end offset of buffer that has valid buffer 1777 * @mon_desc_list_ref: tx monitor descriptor list reference 1778 * 1779 * Return: QDF_STATUS 1780 */ 1781 QDF_STATUS 1782 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1783 struct dp_pdev *pdev, 1784 struct hal_mon_desc *mon_ring_desc, 1785 qdf_frag_t status_frag, 1786 uint32_t end_offset, 1787 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1788 { 1789 struct dp_mon_pdev *mon_pdev; 1790 struct dp_mon_pdev_be *mon_pdev_be; 1791 struct dp_pdev_tx_monitor_be *tx_mon_be; 1792 1793 /* sanity check */ 1794 if (qdf_unlikely(!pdev)) 1795 return QDF_STATUS_E_INVAL; 1796 1797 mon_pdev = pdev->monitor_pdev; 1798 if (qdf_unlikely(!mon_pdev)) 1799 return QDF_STATUS_E_INVAL; 1800 1801 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1802 if (qdf_unlikely(!mon_pdev_be)) 1803 return QDF_STATUS_E_INVAL; 1804 1805 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1806 1807 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1808 mon_desc_list_ref); 1809 tx_mon_be->stats.status_buf_free++; 1810 qdf_frag_free(status_frag); 1811 1812 return QDF_STATUS_E_INVAL; 1813 } 1814 1815 /** 1816 * dp_tx_mon_update_end_reason() - API to update end reason 1817 * 1818 * @mon_pdev - DP_MON_PDEV handle 1819 * @ppdu_id - ppdu_id 1820 * @end_reason - monitor destination descriptor end reason 1821 * 1822 * Return: void 1823 */ 1824 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1825 int ppdu_id, int end_reason) 1826 { 1827 } 1828 #endif 1829