1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "dp_types.h" 18 #include "qdf_nbuf.h" 19 #include "dp_internal.h" 20 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 21 #include <dp_be.h> 22 #include <qdf_nbuf_frag.h> 23 #include <hal_be_api_mon.h> 24 #include <dp_mon.h> 25 #include <dp_tx_mon_2.0.h> 26 #include <dp_mon_2.0.h> 27 #include <dp_lite_mon.h> 28 29 #define MAX_PPDU_INFO_LIST_DEPTH 64 30 31 /** 32 * dp_tx_mon_status_free_packet_buf() - API to free packet buffer 33 * @pdev: pdev Handle 34 * @status_frag: status frag 35 * @end_offset: status fragment end offset 36 * @mon_desc_list_ref: tx monitor descriptor list reference 37 * 38 * Return: void 39 */ 40 void 41 dp_tx_mon_status_free_packet_buf(struct dp_pdev *pdev, 42 qdf_frag_t status_frag, uint32_t end_offset, 43 struct dp_tx_mon_desc_list *mon_desc_list_ref) 44 { 45 struct dp_mon_pdev *mon_pdev; 46 struct dp_mon_pdev_be *mon_pdev_be; 47 struct dp_pdev_tx_monitor_be *tx_mon_be; 48 struct hal_mon_packet_info packet_info = {0}; 49 uint8_t *tx_tlv; 50 uint8_t *mon_buf_tx_tlv; 51 uint8_t *tx_tlv_start; 52 53 if (qdf_unlikely(!pdev)) 54 return; 55 56 mon_pdev = pdev->monitor_pdev; 57 if (qdf_unlikely(!mon_pdev)) 58 return; 59 60 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 61 if (qdf_unlikely(!mon_pdev_be)) 62 return; 63 64 tx_mon_be = &mon_pdev_be->tx_monitor_be; 65 tx_tlv = status_frag; 66 tx_tlv_start = tx_tlv; 67 /* 68 * parse each status buffer and find packet buffer in it 69 */ 70 do { 71 if (hal_txmon_is_mon_buf_addr_tlv(pdev->soc->hal_soc, tx_tlv)) { 72 struct dp_mon_desc *mon_desc = NULL; 73 qdf_frag_t packet_buffer = NULL; 74 75 mon_buf_tx_tlv = ((uint8_t *)tx_tlv + 76 HAL_RX_TLV64_HDR_SIZE); 77 hal_txmon_populate_packet_info(pdev->soc->hal_soc, 78 mon_buf_tx_tlv, 79 &packet_info); 80 81 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info.sw_cookie; 82 83 qdf_assert_always(mon_desc); 84 85 if (mon_desc->magic != DP_MON_DESC_MAGIC) 86 qdf_assert_always(0); 87 88 if (!mon_desc->unmapped) { 89 qdf_mem_unmap_page(pdev->soc->osdev, 90 (qdf_dma_addr_t)mon_desc->paddr, 91 DP_MON_DATA_BUFFER_SIZE, 92 QDF_DMA_FROM_DEVICE); 93 mon_desc->unmapped = 1; 94 } 95 96 packet_buffer = (qdf_frag_t)(mon_desc->buf_addr); 97 mon_desc->buf_addr = NULL; 98 99 qdf_assert_always(packet_buffer); 100 /* increment reap count */ 101 mon_desc_list_ref->tx_mon_reap_cnt++; 102 103 /* add the mon_desc to free list */ 104 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 105 &mon_desc_list_ref->tail, 106 mon_desc); 107 108 tx_mon_be->stats.pkt_buf_recv++; 109 tx_mon_be->stats.pkt_buf_free++; 110 111 /* free buffer, mapped to descriptor */ 112 qdf_frag_free(packet_buffer); 113 } 114 115 /* need api definition for hal_tx_status_get_next_tlv */ 116 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 117 } while ((tx_tlv - tx_tlv_start) < end_offset); 118 } 119 120 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(QCA_MONITOR_2_0_SUPPORT) 121 /** 122 * dp_tx_mon_status_queue_free() - API to free status buffer 123 * @pdev: pdev Handle 124 * @tx_mon_be: pointer to tx_monitor_be 125 * @mon_desc_list_ref: tx monitor descriptor list reference 126 * 127 * Return: void 128 */ 129 static void 130 dp_tx_mon_status_queue_free(struct dp_pdev *pdev, 131 struct dp_pdev_tx_monitor_be *tx_mon_be, 132 struct dp_tx_mon_desc_list *mon_desc_list_ref) 133 { 134 uint8_t last_frag_q_idx = tx_mon_be->last_frag_q_idx; 135 qdf_frag_t status_frag = NULL; 136 uint8_t i = tx_mon_be->cur_frag_q_idx; 137 uint32_t end_offset = 0; 138 139 for (; i < last_frag_q_idx; i++) { 140 status_frag = tx_mon_be->frag_q_vec[i].frag_buf; 141 142 if (qdf_unlikely(!status_frag)) 143 continue; 144 145 end_offset = tx_mon_be->frag_q_vec[i].end_offset; 146 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 147 mon_desc_list_ref); 148 tx_mon_be->stats.status_buf_free++; 149 qdf_frag_free(status_frag); 150 tx_mon_be->frag_q_vec[i].frag_buf = NULL; 151 tx_mon_be->frag_q_vec[i].end_offset = 0; 152 } 153 tx_mon_be->last_frag_q_idx = 0; 154 tx_mon_be->cur_frag_q_idx = 0; 155 } 156 157 /** 158 * dp_tx_mon_enqueue_mpdu_nbuf() - API to enqueue nbuf from per user mpdu queue 159 * @tx_ppdu_info: pointer to tx ppdu info structure 160 * @user_id: user index 161 * @mpdu_nbuf: nbuf to be enqueue 162 * 163 * Return: void 164 */ 165 static void 166 dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev *pdev, 167 struct dp_tx_ppdu_info *tx_ppdu_info, 168 uint8_t user_id, qdf_nbuf_t mpdu_nbuf) 169 { 170 qdf_nbuf_t radiotap = NULL; 171 /* enqueue mpdu_nbuf to the per user mpdu_q */ 172 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 173 174 if (!TXMON_PPDU_HAL(tx_ppdu_info, rx_user_status) || 175 !TXMON_PPDU_HAL(tx_ppdu_info, num_users)) 176 QDF_BUG(0); 177 178 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user_id, mpdu_q); 179 180 radiotap = qdf_nbuf_alloc(pdev->soc->osdev, MAX_MONITOR_HEADER, 181 MAX_MONITOR_HEADER, 182 4, FALSE); 183 if (qdf_unlikely(!radiotap)) { 184 qdf_err("Unable to allocate radiotap buffer\n"); 185 qdf_nbuf_free(mpdu_nbuf); 186 return; 187 } 188 189 /* append ext list */ 190 qdf_nbuf_append_ext_list(radiotap, mpdu_nbuf, qdf_nbuf_len(mpdu_nbuf)); 191 qdf_nbuf_queue_add(usr_mpdu_q, radiotap); 192 } 193 194 /* 195 * TX MONITOR 196 * 197 * frame format 198 * ------------------------------------------------------------------------- 199 * FUNC | ToDS | FromDS | ADDRESS 1 | ADDRESS 2 | ADDRESS 3 | ADDRESS 4 | 200 * ------------------------------------------------------------------------ 201 * IBSS | 0 | 0 | DA | SA | BSSID | NOT USED | 202 * TO AP | 1 | 0 | BSSID | SA | DA | NOT USED | 203 * From AP| 0 | 1 | DA | BSSID | SA | NOT USED | 204 * WDS | 1 | 1 | RA | TA | DA | SA | 205 * ------------------------------------------------------------------------ 206 * 207 * HOST GENERATED FRAME: 208 * ===================== 209 * 1. RTS 210 * 2. CTS 211 * 3. ACK 212 * 4. BA 213 * 5. Multi STA BA 214 * 215 * control frame 216 * ------------------------------------------------------------ 217 * | protocol 2b | Type 2b | subtype 4b | ToDS 1b | FromDS 1b | 218 * | Morefrag 1b | Retry 1b | pwr_mgmt 1b | More data 1b | 219 * | protected frm 1b | order 1b | 220 * ----------------------------------------------------------- 221 * control frame originated from wireless station so ToDS = FromDS = 0, 222 * 223 * RTS 224 * --------------------------------------------------------------------------- 225 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | Transmit address 6 | FCS | 226 * --------------------------------------------------------------------------- 227 * subtype in FC is RTS - 1101 228 * type in FC is control frame - 10 229 * 230 * CTS 231 * -------------------------------------------------------- 232 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 233 * -------------------------------------------------------- 234 * subtype in FC is CTS - 0011 235 * type in FC is control frame - 10 236 * 237 * ACK 238 * -------------------------------------------------------- 239 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 | 240 * -------------------------------------------------------- 241 * subtype in FC is ACK - 1011 242 * type in FC is control frame - 10 243 * 244 * Block ACK 245 * -------------------------------------------------------------------------- 246 * | FC 2 | Dur 2 | RA 6 | TA 6 | BA CTRL 2 | BA Information variable | FCS | 247 * -------------------------------------------------------------------------- 248 * 249 * Block Ack control 250 * --------------------------------------------------------------- 251 * | BA ACK POLICY B0 | BA TYPE B1-B4 | Rsv B5-B11 | TID B12-B15 | 252 * --------------------------------------------------------------- 253 * 254 * BA ack policy 255 * 0 - Normal Ack 256 * 1 - No Ack 257 * 258 * Block Ack Type 259 * 0 - Reserved 260 * 1 - extended compressed 261 * 2 - compressed 262 * 3 - Multi TID 263 * 4-5 - Reserved 264 * 6 - GCR 265 * 7-9 - Reserved 266 * 10 - GLK-GCR 267 * 11 - Multi-STA 268 * 12-15 - Reserved 269 * 270 * Block Ack information 271 * ---------------------------------------------------------- 272 * | Block ack start seq ctrl 2 | Block ack bitmap variable | 273 * ---------------------------------------------------------- 274 * 275 * Multi STA Block Ack Information 276 * ----------------------------------------------------------------- 277 * | Per STA TID info 2 | BA start seq ctrl 2 | BA bitmap variable | 278 * ----------------------------------------------------------------- 279 * 280 * Per STA TID info 281 * ------------------------------------ 282 * | AID11 11b | Ack Type 1b | TID 4b | 283 * ------------------------------------ 284 * AID11 - 2045 means unassociated STA, then ACK Type and TID 0, 15 285 * 286 * Mgmt/PS-POLL frame ack 287 * Ack type - 1 and TID - 15, BA_seq_ctrl & BA_bitmap - not present 288 * 289 * All ack context - with no bitmap (all AMPDU success) 290 * Ack type - 1 and TID - 14, BA_seq_ctrl & BA_bitmap - not present 291 * 292 * Block ack context 293 * Ack type - 0 and TID - 0~7 BA_seq_ctrl & BA_bitmap - present 294 * 295 * Ack context 296 * Ack type - 1 and TID - 0~7 BA_seq_ctrl & BA_bitmap - not present 297 * 298 * 299 */ 300 301 /** 302 * dp_tx_mon_generate_cts2self_frm() - API to generate cts2self frame 303 * @pdev: pdev Handle 304 * @tx_ppdu_info: pointer to tx ppdu info structure 305 * @window_flag: frame generated window 306 * 307 * Return: void 308 */ 309 static void 310 dp_tx_mon_generate_cts2self_frm(struct dp_pdev *pdev, 311 struct dp_tx_ppdu_info *tx_ppdu_info, 312 uint8_t window_flag) 313 { 314 /* allocate and populate CTS/ CTS2SELF frame */ 315 /* enqueue 802.11 payload to per user mpdu_q */ 316 struct dp_mon_pdev *mon_pdev; 317 struct dp_mon_pdev_be *mon_pdev_be; 318 struct dp_pdev_tx_monitor_be *tx_mon_be; 319 struct hal_tx_status_info *tx_status_info; 320 uint16_t duration_le = 0; 321 struct ieee80211_frame_min_one *wh_min = NULL; 322 qdf_nbuf_t mpdu_nbuf = NULL; 323 uint8_t frm_ctl; 324 325 /* sanity check */ 326 if (qdf_unlikely(!pdev)) 327 return; 328 329 mon_pdev = pdev->monitor_pdev; 330 if (qdf_unlikely(!mon_pdev)) 331 return; 332 333 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 334 if (qdf_unlikely(!mon_pdev_be)) 335 return; 336 337 tx_mon_be = &mon_pdev_be->tx_monitor_be; 338 tx_status_info = &tx_mon_be->prot_status_info; 339 340 /* 341 * for radiotap we allocate new skb, 342 * so we don't need reserver skb header 343 */ 344 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 345 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 346 if (!mpdu_nbuf) 347 return; 348 349 wh_min = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 350 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 351 352 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 353 IEEE80211_FC0_SUBTYPE_CTS); 354 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 355 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 356 wh_min->i_fc[1] = 0; 357 wh_min->i_fc[0] = frm_ctl; 358 359 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 360 wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8; 361 wh_min->i_dur[0] = (duration_le & 0xFF); 362 363 if (window_flag == INITIATOR_WINDOW) { 364 qdf_mem_copy(wh_min->i_addr1, 365 TXMON_STATUS_INFO(tx_status_info, addr1), 366 QDF_MAC_ADDR_SIZE); 367 } else { 368 qdf_mem_copy(wh_min->i_addr1, 369 TXMON_STATUS_INFO(tx_status_info, addr2), 370 QDF_MAC_ADDR_SIZE); 371 } 372 373 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 374 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 375 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 376 } 377 378 /** 379 * dp_tx_mon_generate_rts_frm() - API to generate rts frame 380 * @pdev: pdev Handle 381 * @tx_ppdu_info: pointer to tx ppdu info structure 382 * @window_flag: frame generated window 383 * 384 * Return: void 385 */ 386 static void 387 dp_tx_mon_generate_rts_frm(struct dp_pdev *pdev, 388 struct dp_tx_ppdu_info *tx_ppdu_info, 389 uint8_t window_flag) 390 { 391 /* allocate and populate RTS frame */ 392 /* enqueue 802.11 payload to per user mpdu_q */ 393 struct dp_mon_pdev *mon_pdev; 394 struct dp_mon_pdev_be *mon_pdev_be; 395 struct dp_pdev_tx_monitor_be *tx_mon_be; 396 struct hal_tx_status_info *tx_status_info; 397 uint16_t duration_le = 0; 398 struct ieee80211_ctlframe_addr2 *wh_min = NULL; 399 qdf_nbuf_t mpdu_nbuf = NULL; 400 uint8_t frm_ctl; 401 402 /* sanity check */ 403 if (qdf_unlikely(!pdev)) 404 return; 405 406 mon_pdev = pdev->monitor_pdev; 407 if (qdf_unlikely(!mon_pdev)) 408 return; 409 410 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 411 if (qdf_unlikely(!mon_pdev_be)) 412 return; 413 414 tx_mon_be = &mon_pdev_be->tx_monitor_be; 415 tx_status_info = &tx_mon_be->prot_status_info; 416 /* 417 * for radiotap we allocate new skb, 418 * so we don't need reserver skb header 419 */ 420 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 421 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 422 if (!mpdu_nbuf) 423 return; 424 425 wh_min = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 426 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY); 427 428 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 429 IEEE80211_FC0_SUBTYPE_RTS); 430 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 431 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 432 wh_min->i_fc[1] = 0; 433 wh_min->i_fc[0] = frm_ctl; 434 435 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 436 wh_min->i_aidordur[1] = (duration_le & 0xFF00) >> 8; 437 wh_min->i_aidordur[0] = (duration_le & 0xFF); 438 439 if (!tx_status_info->protection_addr) 440 tx_status_info = &tx_mon_be->data_status_info; 441 442 if (window_flag == INITIATOR_WINDOW) { 443 qdf_mem_copy(wh_min->i_addr1, 444 TXMON_STATUS_INFO(tx_status_info, addr1), 445 QDF_MAC_ADDR_SIZE); 446 qdf_mem_copy(wh_min->i_addr2, 447 TXMON_STATUS_INFO(tx_status_info, addr2), 448 QDF_MAC_ADDR_SIZE); 449 } else { 450 qdf_mem_copy(wh_min->i_addr1, 451 TXMON_STATUS_INFO(tx_status_info, addr2), 452 QDF_MAC_ADDR_SIZE); 453 qdf_mem_copy(wh_min->i_addr2, 454 TXMON_STATUS_INFO(tx_status_info, addr1), 455 QDF_MAC_ADDR_SIZE); 456 } 457 458 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min)); 459 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 460 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 461 } 462 463 /** 464 * dp_tx_mon_generate_ack_frm() - API to generate ack frame 465 * @pdev: pdev Handle 466 * @tx_ppdu_info: pointer to tx ppdu info structure 467 * @window_flag: frame generated window 468 * 469 * Return: void 470 */ 471 static void 472 dp_tx_mon_generate_ack_frm(struct dp_pdev *pdev, 473 struct dp_tx_ppdu_info *tx_ppdu_info, 474 uint8_t window_flag) 475 { 476 /* allocate and populate ACK frame */ 477 /* enqueue 802.11 payload to per user mpdu_q */ 478 struct dp_mon_pdev *mon_pdev; 479 struct dp_mon_pdev_be *mon_pdev_be; 480 struct dp_pdev_tx_monitor_be *tx_mon_be; 481 struct hal_tx_status_info *tx_status_info; 482 struct ieee80211_frame_min_one *wh_addr1 = NULL; 483 qdf_nbuf_t mpdu_nbuf = NULL; 484 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 485 uint8_t frm_ctl; 486 487 /* sanity check */ 488 if (qdf_unlikely(!pdev)) 489 return; 490 491 mon_pdev = pdev->monitor_pdev; 492 if (qdf_unlikely(!mon_pdev)) 493 return; 494 495 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 496 if (qdf_unlikely(!mon_pdev_be)) 497 return; 498 499 tx_mon_be = &mon_pdev_be->tx_monitor_be; 500 tx_status_info = &tx_mon_be->data_status_info; 501 /* 502 * for radiotap we allocate new skb, 503 * so we don't need reserver skb header 504 */ 505 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 506 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 507 if (!mpdu_nbuf) 508 return; 509 510 wh_addr1 = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf); 511 512 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 513 IEEE80211_FC0_SUBTYPE_ACK); 514 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 515 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 516 wh_addr1->i_fc[1] = 0; 517 wh_addr1->i_fc[0] = frm_ctl; 518 519 if (window_flag == INITIATOR_WINDOW) { 520 qdf_mem_copy(wh_addr1->i_addr1, 521 TXMON_STATUS_INFO(tx_status_info, addr1), 522 QDF_MAC_ADDR_SIZE); 523 } else { 524 qdf_mem_copy(wh_addr1->i_addr1, 525 TXMON_STATUS_INFO(tx_status_info, addr2), 526 QDF_MAC_ADDR_SIZE); 527 } 528 529 /* set duration zero for ack frame */ 530 *(u_int16_t *)(&wh_addr1->i_dur) = qdf_cpu_to_le16(0x0000); 531 532 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr1)); 533 534 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, user_id, mpdu_nbuf); 535 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 536 } 537 538 /** 539 * dp_tx_mon_generate_3addr_qos_null_frm() - API to generate 540 * 3 address qosnull frame 541 * 542 * @pdev: pdev Handle 543 * @tx_ppdu_info: pointer to tx ppdu info structure 544 * 545 * Return: void 546 */ 547 static void 548 dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev *pdev, 549 struct dp_tx_ppdu_info *tx_ppdu_info) 550 { 551 /* allocate and populate 3 address qos null frame */ 552 /* enqueue 802.11 payload to per user mpdu_q */ 553 struct dp_mon_pdev *mon_pdev; 554 struct dp_mon_pdev_be *mon_pdev_be; 555 struct dp_pdev_tx_monitor_be *tx_mon_be; 556 struct hal_tx_status_info *tx_status_info; 557 struct ieee80211_qosframe *wh_addr3 = NULL; 558 qdf_nbuf_t mpdu_nbuf = NULL; 559 uint16_t duration_le = 0; 560 uint8_t num_users = 0; 561 uint8_t frm_ctl; 562 563 /* sanity check */ 564 if (qdf_unlikely(!pdev)) 565 return; 566 567 mon_pdev = pdev->monitor_pdev; 568 if (qdf_unlikely(!mon_pdev)) 569 return; 570 571 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 572 if (qdf_unlikely(!mon_pdev_be)) 573 return; 574 575 tx_mon_be = &mon_pdev_be->tx_monitor_be; 576 tx_status_info = &tx_mon_be->data_status_info; 577 /* 578 * for radiotap we allocate new skb, 579 * so we don't need reserver skb header 580 */ 581 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 582 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 583 if (!mpdu_nbuf) 584 return; 585 586 wh_addr3 = (struct ieee80211_qosframe *)qdf_nbuf_data(mpdu_nbuf); 587 qdf_mem_zero(wh_addr3, sizeof(struct ieee80211_qosframe)); 588 589 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 590 IEEE80211_FC0_SUBTYPE_QOS_NULL); 591 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 592 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 593 wh_addr3->i_fc[1] = 0; 594 wh_addr3->i_fc[0] = frm_ctl; 595 596 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 597 wh_addr3->i_dur[1] = (duration_le & 0xFF00) >> 8; 598 wh_addr3->i_dur[0] = (duration_le & 0xFF); 599 600 qdf_mem_copy(wh_addr3->i_addr1, 601 TXMON_STATUS_INFO(tx_status_info, addr1), 602 QDF_MAC_ADDR_SIZE); 603 qdf_mem_copy(wh_addr3->i_addr2, 604 TXMON_STATUS_INFO(tx_status_info, addr2), 605 QDF_MAC_ADDR_SIZE); 606 qdf_mem_copy(wh_addr3->i_addr3, 607 TXMON_STATUS_INFO(tx_status_info, addr3), 608 QDF_MAC_ADDR_SIZE); 609 610 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr3)); 611 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 612 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 613 } 614 615 /** 616 * dp_tx_mon_generate_4addr_qos_null_frm() - API to generate 617 * 4 address qos null frame 618 * 619 * @pdev: pdev Handle 620 * @tx_ppdu_info: pointer to tx ppdu info structure 621 * 622 * Return: void 623 */ 624 static void 625 dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev *pdev, 626 struct dp_tx_ppdu_info *tx_ppdu_info) 627 { 628 /* allocate and populate 4 address qos null frame */ 629 /* enqueue 802.11 payload to per user mpdu_q */ 630 struct dp_mon_pdev *mon_pdev; 631 struct dp_mon_pdev_be *mon_pdev_be; 632 struct dp_pdev_tx_monitor_be *tx_mon_be; 633 struct hal_tx_status_info *tx_status_info; 634 struct ieee80211_qosframe_addr4 *wh_addr4 = NULL; 635 qdf_nbuf_t mpdu_nbuf = NULL; 636 uint16_t duration_le = 0; 637 uint8_t num_users = 0; 638 uint8_t frm_ctl; 639 640 /* sanity check */ 641 if (qdf_unlikely(!pdev)) 642 return; 643 644 mon_pdev = pdev->monitor_pdev; 645 if (qdf_unlikely(!mon_pdev)) 646 return; 647 648 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 649 if (qdf_unlikely(!mon_pdev_be)) 650 return; 651 652 tx_mon_be = &mon_pdev_be->tx_monitor_be; 653 tx_status_info = &tx_mon_be->data_status_info; 654 /* 655 * for radiotap we allocate new skb, 656 * so we don't need reserver skb header 657 */ 658 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 659 MAX_DUMMY_FRM_BODY, 0, 4, FALSE); 660 if (!mpdu_nbuf) 661 return; 662 663 wh_addr4 = (struct ieee80211_qosframe_addr4 *)qdf_nbuf_data(mpdu_nbuf); 664 qdf_mem_zero(wh_addr4, sizeof(struct ieee80211_qosframe_addr4)); 665 666 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | 667 IEEE80211_FC0_SUBTYPE_QOS_NULL); 668 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 669 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 670 wh_addr4->i_fc[1] = 0; 671 wh_addr4->i_fc[0] = frm_ctl; 672 673 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration)); 674 wh_addr4->i_dur[1] = (duration_le & 0xFF00) >> 8; 675 wh_addr4->i_dur[0] = (duration_le & 0xFF); 676 677 qdf_mem_copy(wh_addr4->i_addr1, 678 TXMON_STATUS_INFO(tx_status_info, addr1), 679 QDF_MAC_ADDR_SIZE); 680 qdf_mem_copy(wh_addr4->i_addr2, 681 TXMON_STATUS_INFO(tx_status_info, addr2), 682 QDF_MAC_ADDR_SIZE); 683 qdf_mem_copy(wh_addr4->i_addr3, 684 TXMON_STATUS_INFO(tx_status_info, addr3), 685 QDF_MAC_ADDR_SIZE); 686 qdf_mem_copy(wh_addr4->i_addr4, 687 TXMON_STATUS_INFO(tx_status_info, addr4), 688 QDF_MAC_ADDR_SIZE); 689 690 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr4)); 691 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf); 692 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 693 } 694 695 #define TXMON_BA_CTRL_SZ 2 696 #define TXMON_BA_INFO_SZ(bitmap_sz) ((4 * (bitmap_sz)) + 6) 697 #define TXMON_MU_BA_ACK_FRAME_SZ(bitmap_sz) \ 698 (sizeof(struct ieee80211_ctlframe_addr2) +\ 699 TXMON_BA_CTRL_SZ + (bitmap_sz)) 700 701 #define TXMON_BA_ACK_FRAME_SZ(bitmap_sz) \ 702 (sizeof(struct ieee80211_ctlframe_addr2) +\ 703 TXMON_BA_CTRL_SZ + TXMON_BA_INFO_SZ(bitmap_sz)) 704 705 /** 706 * dp_tx_mon_generate_mu_block_ack_frm() - API to generate MU block ack frame 707 * @pdev: pdev Handle 708 * @tx_ppdu_info: pointer to tx ppdu info structure 709 * @window_flag: frame generated window 710 * 711 * Return: void 712 */ 713 static void 714 dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev *pdev, 715 struct dp_tx_ppdu_info *tx_ppdu_info, 716 uint8_t window_flag) 717 { 718 /* allocate and populate MU block ack frame */ 719 /* enqueue 802.11 payload to per user mpdu_q */ 720 struct dp_mon_pdev *mon_pdev; 721 struct dp_mon_pdev_be *mon_pdev_be; 722 struct dp_pdev_tx_monitor_be *tx_mon_be; 723 struct hal_tx_status_info *tx_status_info; 724 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 725 qdf_nbuf_t mpdu_nbuf = NULL; 726 uint8_t *frm = NULL; 727 uint32_t ba_sz = 0; 728 uint8_t num_users = TXMON_PPDU_HAL(tx_ppdu_info, num_users); 729 uint8_t i = 0; 730 uint8_t frm_ctl; 731 732 /* sanity check */ 733 if (qdf_unlikely(!pdev)) 734 return; 735 736 mon_pdev = pdev->monitor_pdev; 737 if (qdf_unlikely(!mon_pdev)) 738 return; 739 740 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 741 if (qdf_unlikely(!mon_pdev_be)) 742 return; 743 744 tx_mon_be = &mon_pdev_be->tx_monitor_be; 745 tx_status_info = &tx_mon_be->data_status_info; 746 for (i = 0; i < num_users; i++) 747 ba_sz += (4 << TXMON_BA_INFO_SZ(TXMON_PPDU_USR(tx_ppdu_info, 748 i, 749 ba_bitmap_sz))); 750 751 /* 752 * for multi sta block ack, do we need to increase the size 753 * or copy info on subsequent frame offset 754 * 755 * for radiotap we allocate new skb, 756 * so we don't need reserver skb header 757 */ 758 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 759 TXMON_MU_BA_ACK_FRAME_SZ(ba_sz), 0, 4, 760 FALSE); 761 if (!mpdu_nbuf) { 762 /* TODO: update status and break */ 763 return; 764 } 765 766 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 767 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 768 769 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 770 IEEE80211_FC0_BLOCK_ACK); 771 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 772 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 773 wh_addr2->i_fc[1] = 0; 774 wh_addr2->i_fc[0] = frm_ctl; 775 776 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0000); 777 778 qdf_mem_copy(wh_addr2->i_addr2, 779 TXMON_STATUS_INFO(tx_status_info, addr2), 780 QDF_MAC_ADDR_SIZE); 781 qdf_mem_copy(wh_addr2->i_addr1, 782 TXMON_STATUS_INFO(tx_status_info, addr1), 783 QDF_MAC_ADDR_SIZE); 784 785 frm = (uint8_t *)&wh_addr2[1]; 786 787 /* BA control */ 788 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 789 0, ba_control)); 790 frm += 2; 791 792 for (i = 0; i < num_users; i++) { 793 *((uint16_t *)frm) = 794 qdf_cpu_to_le16((TXMON_PPDU_USR(tx_ppdu_info, i, tid) << 795 DP_IEEE80211_BAR_CTL_TID_S) | 796 (TXMON_PPDU_USR(tx_ppdu_info, i, 797 aid) & 0x7FF)); 798 frm += 2; 799 *((uint16_t *)frm) = TXMON_PPDU_USR(tx_ppdu_info, 800 i, start_seq) & 0xFFF; 801 frm += 2; 802 qdf_mem_copy(frm, 803 TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap), 804 4 << 805 TXMON_PPDU_USR(tx_ppdu_info, 806 i, ba_bitmap_sz)); 807 frm += 4 << TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap_sz); 808 } 809 810 qdf_nbuf_set_pktlen(mpdu_nbuf, 811 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 812 813 /* always enqueue to first active user */ 814 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 815 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 816 } 817 818 /** 819 * dp_tx_mon_generate_block_ack_frm() - API to generate block ack frame 820 * @pdev: pdev Handle 821 * @tx_ppdu_info: pointer to tx ppdu info structure 822 * @window_flag: frame generated window 823 * 824 * Return: void 825 */ 826 static void 827 dp_tx_mon_generate_block_ack_frm(struct dp_pdev *pdev, 828 struct dp_tx_ppdu_info *tx_ppdu_info, 829 uint8_t window_flag) 830 { 831 /* allocate and populate block ack frame */ 832 /* enqueue 802.11 payload to per user mpdu_q */ 833 struct dp_mon_pdev *mon_pdev; 834 struct dp_mon_pdev_be *mon_pdev_be; 835 struct dp_pdev_tx_monitor_be *tx_mon_be; 836 struct hal_tx_status_info *tx_status_info; 837 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL; 838 qdf_nbuf_t mpdu_nbuf = NULL; 839 uint8_t *frm = NULL; 840 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 841 uint32_t ba_bitmap_sz = TXMON_PPDU_USR(tx_ppdu_info, 842 user_id, ba_bitmap_sz); 843 uint8_t frm_ctl; 844 845 /* sanity check */ 846 if (qdf_unlikely(!pdev)) 847 return; 848 849 mon_pdev = pdev->monitor_pdev; 850 if (qdf_unlikely(!mon_pdev)) 851 return; 852 853 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 854 if (qdf_unlikely(!mon_pdev_be)) 855 return; 856 857 tx_mon_be = &mon_pdev_be->tx_monitor_be; 858 tx_status_info = &tx_mon_be->data_status_info; 859 /* 860 * for multi sta block ack, do we need to increase the size 861 * or copy info on subsequent frame offset 862 * 863 * for radiotap we allocate new skb, 864 * so we don't need reserver skb header 865 */ 866 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 867 TXMON_BA_ACK_FRAME_SZ(ba_bitmap_sz), 868 0, 4, FALSE); 869 if (!mpdu_nbuf) { 870 /* TODO: update status and break */ 871 return; 872 } 873 874 /* 875 * BA CONTROL 876 * fields required to construct block ack information 877 * B0 - BA ACK POLICY 878 * 0 - Normal ACK 879 * 1 - No ACK 880 * B1 - MULTI TID 881 * B2 - COMPRESSED BITMAP 882 * B12 883 * 00 - Basic block ack 884 * 01 - Compressed block ack 885 * 10 - Reserved 886 * 11 - Multi tid block ack 887 * B3-B11 - Reserved 888 * B12-B15 - TID info 889 * 890 * BA INFORMATION 891 * Per sta tid info 892 * AID: 11 bits 893 * ACK type: 1 bit 894 * TID: 4 bits 895 * 896 * BA SEQ CTRL 897 * 898 * BA bitmap 899 * 900 */ 901 902 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf); 903 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE); 904 905 frm_ctl = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | 906 IEEE80211_FC0_BLOCK_ACK); 907 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl; 908 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1; 909 wh_addr2->i_fc[1] = 0; 910 wh_addr2->i_fc[0] = frm_ctl; 911 912 /* duration */ 913 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0020); 914 915 if (window_flag) { 916 qdf_mem_copy(wh_addr2->i_addr2, 917 TXMON_STATUS_INFO(tx_status_info, addr2), 918 QDF_MAC_ADDR_SIZE); 919 qdf_mem_copy(wh_addr2->i_addr1, 920 TXMON_STATUS_INFO(tx_status_info, addr1), 921 QDF_MAC_ADDR_SIZE); 922 } else { 923 qdf_mem_copy(wh_addr2->i_addr2, 924 TXMON_STATUS_INFO(tx_status_info, addr1), 925 QDF_MAC_ADDR_SIZE); 926 qdf_mem_copy(wh_addr2->i_addr1, 927 TXMON_STATUS_INFO(tx_status_info, addr2), 928 QDF_MAC_ADDR_SIZE); 929 } 930 931 frm = (uint8_t *)&wh_addr2[1]; 932 /* BA control */ 933 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 934 user_id, 935 ba_control)); 936 frm += 2; 937 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info, 938 user_id, 939 start_seq) & 0xFFF); 940 frm += 2; 941 qdf_mem_copy(frm, 942 TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap), 943 4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 944 frm += (4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz)); 945 946 qdf_nbuf_set_pktlen(mpdu_nbuf, 947 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf))); 948 949 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf); 950 951 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1; 952 } 953 954 /** 955 * dp_tx_mon_alloc_mpdu() - API to allocate mpdu and add that current 956 * user index 957 * 958 * @pdev: pdev Handle 959 * @tx_ppdu_info: pointer to tx ppdu info structure 960 * 961 * Return: void 962 */ 963 static void 964 dp_tx_mon_alloc_mpdu(struct dp_pdev *pdev, struct dp_tx_ppdu_info *tx_ppdu_info) 965 { 966 qdf_nbuf_t mpdu_nbuf = NULL; 967 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 968 uint32_t usr_idx = 0; 969 970 /* 971 * payload will be added as a frag to buffer 972 * and we allocate new skb for radiotap header 973 * we allocate a dummy bufffer size 974 */ 975 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 976 MAX_MONITOR_HEADER, MAX_MONITOR_HEADER, 977 4, FALSE); 978 if (!mpdu_nbuf) { 979 qdf_err("%s: %d No memory to allocate mpdu_nbuf!!!!!\n", 980 __func__, __LINE__); 981 return; 982 } 983 984 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 985 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 986 987 qdf_nbuf_queue_add(usr_mpdu_q, mpdu_nbuf); 988 } 989 990 /** 991 * dp_tx_mon_generate_data_frm() - API to generate data frame 992 * @pdev: pdev Handle 993 * @tx_ppdu_info: pointer to tx ppdu info structure 994 * 995 * Return: void 996 */ 997 static void 998 dp_tx_mon_generate_data_frm(struct dp_pdev *pdev, 999 struct dp_tx_ppdu_info *tx_ppdu_info, 1000 bool take_ref) 1001 { 1002 struct dp_mon_pdev *mon_pdev; 1003 struct dp_mon_pdev_be *mon_pdev_be; 1004 struct dp_pdev_tx_monitor_be *tx_mon_be; 1005 struct hal_tx_status_info *tx_status_info; 1006 qdf_nbuf_t mpdu_nbuf = NULL; 1007 qdf_nbuf_queue_t *usr_mpdu_q = NULL; 1008 uint32_t usr_idx = 0; 1009 1010 /* sanity check */ 1011 if (qdf_unlikely(!pdev)) 1012 return; 1013 1014 mon_pdev = pdev->monitor_pdev; 1015 if (qdf_unlikely(!mon_pdev)) 1016 return; 1017 1018 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1019 if (qdf_unlikely(!mon_pdev_be)) 1020 return; 1021 1022 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1023 1024 tx_status_info = &tx_mon_be->data_status_info; 1025 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx); 1026 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q); 1027 mpdu_nbuf = qdf_nbuf_queue_last(usr_mpdu_q); 1028 1029 if (!mpdu_nbuf) 1030 QDF_BUG(0); 1031 1032 tx_mon_be->stats.pkt_buf_processed++; 1033 1034 /* add function to either copy or add frag to frag_list */ 1035 qdf_nbuf_add_frag(pdev->soc->osdev, 1036 TXMON_STATUS_INFO(tx_status_info, buffer), 1037 mpdu_nbuf, 1038 TXMON_STATUS_INFO(tx_status_info, offset), 1039 TXMON_STATUS_INFO(tx_status_info, length), 1040 DP_MON_DATA_BUFFER_SIZE, 1041 take_ref, TXMON_NO_BUFFER_SZ); 1042 } 1043 1044 /** 1045 * dp_tx_mon_generate_prot_frm() - API to generate protection frame 1046 * @pdev: pdev Handle 1047 * @tx_ppdu_info: pointer to tx ppdu info structure 1048 * 1049 * Return: void 1050 */ 1051 static void 1052 dp_tx_mon_generate_prot_frm(struct dp_pdev *pdev, 1053 struct dp_tx_ppdu_info *tx_ppdu_info) 1054 { 1055 struct dp_mon_pdev *mon_pdev; 1056 struct dp_mon_pdev_be *mon_pdev_be; 1057 struct dp_pdev_tx_monitor_be *tx_mon_be; 1058 struct hal_tx_status_info *tx_status_info; 1059 1060 /* sanity check */ 1061 if (qdf_unlikely(!pdev)) 1062 return; 1063 1064 mon_pdev = pdev->monitor_pdev; 1065 if (qdf_unlikely(!mon_pdev)) 1066 return; 1067 1068 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1069 if (qdf_unlikely(!mon_pdev_be)) 1070 return; 1071 1072 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1073 tx_status_info = &tx_mon_be->prot_status_info; 1074 1075 /* update medium prot type from data */ 1076 TXMON_STATUS_INFO(tx_status_info, medium_prot_type) = 1077 tx_mon_be->data_status_info.medium_prot_type; 1078 1079 switch (TXMON_STATUS_INFO(tx_status_info, medium_prot_type)) { 1080 case TXMON_MEDIUM_NO_PROTECTION: 1081 { 1082 /* no protection frame - do nothing */ 1083 break; 1084 } 1085 case TXMON_MEDIUM_RTS_LEGACY: 1086 case TXMON_MEDIUM_RTS_11AC_STATIC_BW: 1087 case TXMON_MEDIUM_RTS_11AC_DYNAMIC_BW: 1088 { 1089 dp_tx_mon_generate_rts_frm(pdev, tx_ppdu_info, 1090 INITIATOR_WINDOW); 1091 break; 1092 } 1093 case TXMON_MEDIUM_CTS2SELF: 1094 { 1095 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1096 INITIATOR_WINDOW); 1097 break; 1098 } 1099 case TXMON_MEDIUM_QOS_NULL_NO_ACK_3ADDR: 1100 { 1101 dp_tx_mon_generate_3addr_qos_null_frm(pdev, tx_ppdu_info); 1102 break; 1103 } 1104 case TXMON_MEDIUM_QOS_NULL_NO_ACK_4ADDR: 1105 { 1106 dp_tx_mon_generate_4addr_qos_null_frm(pdev, tx_ppdu_info); 1107 break; 1108 } 1109 } 1110 } 1111 1112 /** 1113 * dp_tx_mon_generated_response_frm() - API to handle generated response frame 1114 * @pdev: pdev Handle 1115 * @tx_ppdu_info: pointer to tx ppdu info structure 1116 * 1117 * Return: QDF_STATUS 1118 */ 1119 static QDF_STATUS 1120 dp_tx_mon_generated_response_frm(struct dp_pdev *pdev, 1121 struct dp_tx_ppdu_info *tx_ppdu_info) 1122 { 1123 struct dp_mon_pdev *mon_pdev; 1124 struct dp_mon_pdev_be *mon_pdev_be; 1125 struct dp_pdev_tx_monitor_be *tx_mon_be; 1126 struct hal_tx_status_info *tx_status_info; 1127 QDF_STATUS status = QDF_STATUS_SUCCESS; 1128 uint8_t gen_response = 0; 1129 1130 /* sanity check */ 1131 if (qdf_unlikely(!pdev)) 1132 return QDF_STATUS_E_NOMEM; 1133 1134 mon_pdev = pdev->monitor_pdev; 1135 if (qdf_unlikely(!mon_pdev)) 1136 return QDF_STATUS_E_NOMEM; 1137 1138 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1139 if (qdf_unlikely(!mon_pdev_be)) 1140 return QDF_STATUS_E_NOMEM; 1141 1142 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1143 1144 tx_status_info = &tx_mon_be->data_status_info; 1145 gen_response = TXMON_STATUS_INFO(tx_status_info, generated_response); 1146 1147 switch (gen_response) { 1148 case TXMON_GEN_RESP_SELFGEN_ACK: 1149 { 1150 dp_tx_mon_generate_ack_frm(pdev, tx_ppdu_info, RESPONSE_WINDOW); 1151 break; 1152 } 1153 case TXMON_GEN_RESP_SELFGEN_CTS: 1154 { 1155 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info, 1156 RESPONSE_WINDOW); 1157 break; 1158 } 1159 case TXMON_GEN_RESP_SELFGEN_BA: 1160 { 1161 dp_tx_mon_generate_block_ack_frm(pdev, tx_ppdu_info, 1162 RESPONSE_WINDOW); 1163 break; 1164 } 1165 case TXMON_GEN_RESP_SELFGEN_MBA: 1166 { 1167 break; 1168 } 1169 case TXMON_GEN_RESP_SELFGEN_CBF: 1170 { 1171 break; 1172 } 1173 case TXMON_GEN_RESP_SELFGEN_TRIG: 1174 { 1175 break; 1176 } 1177 case TXMON_GEN_RESP_SELFGEN_NDP_LMR: 1178 { 1179 break; 1180 } 1181 }; 1182 1183 return status; 1184 } 1185 1186 /** 1187 * dp_tx_mon_update_ppdu_info_status() - API to update frame as information 1188 * is stored only for that processing 1189 * 1190 * @pdev: pdev Handle 1191 * @tx_data_ppdu_info: pointer to data tx ppdu info 1192 * @tx_prot_ppdu_info: pointer to protection tx ppdu info 1193 * @tx_tlv_hdr: pointer to tx_tlv_hdr 1194 * @status_frag: pointer to fragment 1195 * @tlv_status: tlv status return from hal api 1196 * @mon_desc_list_ref: tx monitor descriptor list reference 1197 * 1198 * Return: QDF_STATUS 1199 */ 1200 static QDF_STATUS 1201 dp_tx_mon_update_ppdu_info_status(struct dp_pdev *pdev, 1202 struct dp_tx_ppdu_info *tx_data_ppdu_info, 1203 struct dp_tx_ppdu_info *tx_prot_ppdu_info, 1204 void *tx_tlv_hdr, 1205 qdf_frag_t status_frag, 1206 uint32_t tlv_status, 1207 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1208 { 1209 struct dp_mon_pdev *mon_pdev; 1210 struct dp_mon_pdev_be *mon_pdev_be; 1211 struct dp_pdev_tx_monitor_be *tx_mon_be; 1212 struct hal_tx_status_info *tx_status_info; 1213 QDF_STATUS status = QDF_STATUS_SUCCESS; 1214 1215 /* sanity check */ 1216 if (qdf_unlikely(!pdev)) 1217 return QDF_STATUS_E_NOMEM; 1218 1219 mon_pdev = pdev->monitor_pdev; 1220 if (qdf_unlikely(!mon_pdev)) 1221 return QDF_STATUS_E_NOMEM; 1222 1223 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1224 if (qdf_unlikely(!mon_pdev_be)) 1225 return QDF_STATUS_E_NOMEM; 1226 1227 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1228 1229 switch (tlv_status) { 1230 case HAL_MON_TX_FES_SETUP: 1231 { 1232 /* 1233 * start of initiator window 1234 * 1235 * got number of user count from fes setup tlv 1236 */ 1237 break; 1238 } 1239 case HAL_MON_RX_RESPONSE_REQUIRED_INFO: 1240 { 1241 break; 1242 } 1243 case HAL_MON_TX_FES_STATUS_START_PROT: 1244 { 1245 /* update tsft to local */ 1246 break; 1247 } 1248 case HAL_MON_TX_FES_STATUS_START_PPDU: 1249 { 1250 /* update tsft to local */ 1251 break; 1252 } 1253 case HAL_MON_TX_FES_STATUS_PROT: 1254 { 1255 TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used) = 1; 1256 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) = 1257 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) << 1; 1258 1259 /* based on medium protection type we need to generate frame */ 1260 dp_tx_mon_generate_prot_frm(pdev, tx_prot_ppdu_info); 1261 break; 1262 } 1263 case HAL_MON_RX_FRAME_BITMAP_ACK: 1264 { 1265 /* this comes for each user */ 1266 dp_tx_mon_generate_ack_frm(pdev, tx_data_ppdu_info, 1267 INITIATOR_WINDOW); 1268 break; 1269 } 1270 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_256: 1271 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_1K: 1272 { 1273 /* 1274 * this comes for each user 1275 * BlockAck is not same as ACK, single frame can hold 1276 * multiple BlockAck info 1277 */ 1278 tx_status_info = &tx_mon_be->data_status_info; 1279 1280 if (TXMON_PPDU_HAL(tx_data_ppdu_info, num_users)) 1281 dp_tx_mon_generate_block_ack_frm(pdev, 1282 tx_data_ppdu_info, 1283 INITIATOR_WINDOW); 1284 else 1285 dp_tx_mon_generate_mu_block_ack_frm(pdev, 1286 tx_data_ppdu_info, 1287 INITIATOR_WINDOW); 1288 1289 break; 1290 } 1291 case HAL_MON_TX_MPDU_START: 1292 { 1293 dp_tx_mon_alloc_mpdu(pdev, tx_data_ppdu_info); 1294 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1295 break; 1296 } 1297 case HAL_MON_TX_MSDU_START: 1298 { 1299 break; 1300 } 1301 case HAL_MON_TX_DATA: 1302 { 1303 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1304 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, true); 1305 break; 1306 } 1307 case HAL_MON_TX_BUFFER_ADDR: 1308 { 1309 struct hal_mon_packet_info *packet_info = NULL; 1310 struct dp_mon_desc *mon_desc = NULL; 1311 qdf_frag_t packet_buffer = NULL; 1312 uint32_t end_offset = 0; 1313 1314 tx_status_info = &tx_mon_be->data_status_info; 1315 /* update buffer from packet info */ 1316 packet_info = &TXMON_PPDU_HAL(tx_data_ppdu_info, packet_info); 1317 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info->sw_cookie; 1318 1319 qdf_assert_always(mon_desc); 1320 1321 if (mon_desc->magic != DP_MON_DESC_MAGIC) 1322 qdf_assert_always(0); 1323 1324 qdf_assert_always(mon_desc->buf_addr); 1325 tx_mon_be->stats.pkt_buf_recv++; 1326 1327 if (!mon_desc->unmapped) { 1328 qdf_mem_unmap_page(pdev->soc->osdev, 1329 (qdf_dma_addr_t)mon_desc->paddr, 1330 DP_MON_DATA_BUFFER_SIZE, 1331 QDF_DMA_FROM_DEVICE); 1332 mon_desc->unmapped = 1; 1333 } 1334 1335 packet_buffer = mon_desc->buf_addr; 1336 mon_desc->buf_addr = NULL; 1337 1338 /* increment reap count */ 1339 mon_desc_list_ref->tx_mon_reap_cnt++; 1340 1341 /* add the mon_desc to free list */ 1342 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list, 1343 &mon_desc_list_ref->tail, 1344 mon_desc); 1345 1346 TXMON_STATUS_INFO(tx_status_info, buffer) = packet_buffer; 1347 TXMON_STATUS_INFO(tx_status_info, offset) = end_offset; 1348 TXMON_STATUS_INFO(tx_status_info, 1349 length) = packet_info->dma_length; 1350 1351 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1; 1352 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, false); 1353 break; 1354 } 1355 case HAL_MON_TX_FES_STATUS_END: 1356 { 1357 break; 1358 } 1359 case HAL_MON_RESPONSE_END_STATUS_INFO: 1360 { 1361 dp_tx_mon_generated_response_frm(pdev, tx_data_ppdu_info); 1362 break; 1363 } 1364 case HAL_MON_TX_FES_STATUS_START: 1365 { 1366 /* update the medium protection type */ 1367 break; 1368 } 1369 case HAL_MON_TX_QUEUE_EXTENSION: 1370 { 1371 /* No action for Queue Extension TLV */ 1372 break; 1373 } 1374 default: 1375 { 1376 /* return or break in default case */ 1377 break; 1378 } 1379 }; 1380 1381 return status; 1382 } 1383 1384 /* 1385 * dp_tx_mon_process_tlv_2_0() - API to parse PPDU worth information 1386 * @pdev_handle: DP_PDEV handle 1387 * @mon_desc_list_ref: tx monitor descriptor list reference 1388 * 1389 * Return: status 1390 */ 1391 QDF_STATUS 1392 dp_tx_mon_process_tlv_2_0(struct dp_pdev *pdev, 1393 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1394 { 1395 struct dp_mon_pdev *mon_pdev; 1396 struct dp_mon_pdev_be *mon_pdev_be; 1397 struct dp_pdev_tx_monitor_be *tx_mon_be; 1398 struct dp_tx_ppdu_info *tx_prot_ppdu_info = NULL; 1399 struct dp_tx_ppdu_info *tx_data_ppdu_info = NULL; 1400 struct hal_tx_status_info *tx_status_prot; 1401 struct hal_tx_status_info *tx_status_data; 1402 qdf_frag_t status_frag = NULL; 1403 uint32_t end_offset = 0; 1404 uint32_t tlv_status; 1405 uint32_t status = QDF_STATUS_SUCCESS; 1406 uint8_t *tx_tlv; 1407 uint8_t *tx_tlv_start; 1408 uint8_t num_users = 0; 1409 uint8_t cur_frag_q_idx; 1410 bool schedule_wrq = false; 1411 1412 /* sanity check */ 1413 if (qdf_unlikely(!pdev)) 1414 return QDF_STATUS_E_NOMEM; 1415 1416 mon_pdev = pdev->monitor_pdev; 1417 if (qdf_unlikely(!mon_pdev)) 1418 return QDF_STATUS_E_NOMEM; 1419 1420 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1421 if (qdf_unlikely(!mon_pdev_be)) 1422 return QDF_STATUS_E_NOMEM; 1423 1424 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1425 cur_frag_q_idx = tx_mon_be->cur_frag_q_idx; 1426 1427 tx_status_prot = &tx_mon_be->prot_status_info; 1428 tx_status_data = &tx_mon_be->data_status_info; 1429 1430 tx_prot_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_PROT_PPDU_INFO, 1431 1, tx_mon_be->be_ppdu_id); 1432 1433 if (!tx_prot_ppdu_info) { 1434 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1435 return QDF_STATUS_E_NOMEM; 1436 } 1437 1438 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1439 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1440 tx_tlv = status_frag; 1441 dp_mon_debug("last_frag_q_idx: %d status_frag:%pK", 1442 tx_mon_be->last_frag_q_idx, status_frag); 1443 1444 /* get number of user from tlv window */ 1445 tlv_status = hal_txmon_status_get_num_users(pdev->soc->hal_soc, 1446 tx_tlv, &num_users); 1447 if (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE || !num_users) { 1448 dp_mon_err("window open with tlv_tag[0x%x] num_users[%d]!\n", 1449 hal_tx_status_get_tlv_tag(tx_tlv), num_users); 1450 return QDF_STATUS_E_INVAL; 1451 } 1452 1453 /* allocate tx_data_ppdu_info based on num_users */ 1454 tx_data_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_DATA_PPDU_INFO, 1455 num_users, 1456 tx_mon_be->be_ppdu_id); 1457 if (!tx_data_ppdu_info) { 1458 dp_mon_info("tx prot ppdu info alloc got failed!!"); 1459 return QDF_STATUS_E_NOMEM; 1460 } 1461 1462 /* iterate status buffer queue */ 1463 while (tx_mon_be->cur_frag_q_idx < tx_mon_be->last_frag_q_idx) { 1464 /* get status buffer from frag_q_vec */ 1465 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf; 1466 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset; 1467 if (qdf_unlikely(!status_frag)) { 1468 dp_mon_err("status frag is NULL\n"); 1469 QDF_BUG(0); 1470 } 1471 1472 tx_tlv = status_frag; 1473 tx_tlv_start = tx_tlv; 1474 /* 1475 * parse each status buffer and populate the information to 1476 * dp_tx_ppdu_info 1477 */ 1478 do { 1479 tlv_status = hal_txmon_status_parse_tlv( 1480 pdev->soc->hal_soc, 1481 &tx_data_ppdu_info->hal_txmon, 1482 &tx_prot_ppdu_info->hal_txmon, 1483 tx_status_data, 1484 tx_status_prot, 1485 tx_tlv, status_frag); 1486 1487 status = 1488 dp_tx_mon_update_ppdu_info_status( 1489 pdev, 1490 tx_data_ppdu_info, 1491 tx_prot_ppdu_info, 1492 tx_tlv, 1493 status_frag, 1494 tlv_status, 1495 mon_desc_list_ref); 1496 1497 /* need api definition for hal_tx_status_get_next_tlv */ 1498 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv); 1499 if ((tx_tlv - tx_tlv_start) >= end_offset) 1500 break; 1501 } while ((tx_tlv - tx_tlv_start) < end_offset); 1502 1503 /* 1504 * free status buffer after parsing 1505 * is status_frag mapped to mpdu if so make sure 1506 */ 1507 tx_mon_be->stats.status_buf_free++; 1508 qdf_frag_free(status_frag); 1509 tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf = NULL; 1510 tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset = 0; 1511 cur_frag_q_idx = ++tx_mon_be->cur_frag_q_idx; 1512 } 1513 1514 /* clear the unreleased frag array */ 1515 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1516 1517 if (TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used)) { 1518 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1519 chan_num))) { 1520 /* update channel number, if not fetched properly */ 1521 TXMON_PPDU_COM(tx_prot_ppdu_info, 1522 chan_num) = mon_pdev->mon_chan_num; 1523 } 1524 1525 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info, 1526 chan_freq))) { 1527 /* update channel frequency, if not fetched properly */ 1528 TXMON_PPDU_COM(tx_prot_ppdu_info, 1529 chan_freq) = mon_pdev->mon_chan_freq; 1530 } 1531 1532 /* 1533 * add dp_tx_ppdu_info to pdev queue 1534 * for post processing 1535 * 1536 * TODO: add a threshold check and drop the ppdu info 1537 */ 1538 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1539 tx_mon_be->last_prot_ppdu_info = 1540 tx_mon_be->tx_prot_ppdu_info; 1541 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1542 tx_prot_ppdu_info, 1543 tx_ppdu_info_queue_elem); 1544 tx_mon_be->tx_ppdu_info_list_depth++; 1545 1546 tx_mon_be->tx_prot_ppdu_info = NULL; 1547 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1548 schedule_wrq = true; 1549 } else { 1550 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be); 1551 tx_mon_be->tx_prot_ppdu_info = NULL; 1552 tx_prot_ppdu_info = NULL; 1553 } 1554 1555 if (TXMON_PPDU_HAL(tx_data_ppdu_info, is_used)) { 1556 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1557 chan_num))) { 1558 /* update channel number, if not fetched properly */ 1559 TXMON_PPDU_COM(tx_data_ppdu_info, 1560 chan_num) = mon_pdev->mon_chan_num; 1561 } 1562 1563 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info, 1564 chan_freq))) { 1565 /* update channel frequency, if not fetched properly */ 1566 TXMON_PPDU_COM(tx_data_ppdu_info, 1567 chan_freq) = mon_pdev->mon_chan_freq; 1568 } 1569 1570 /* 1571 * add dp_tx_ppdu_info to pdev queue 1572 * for post processing 1573 * 1574 * TODO: add a threshold check and drop the ppdu info 1575 */ 1576 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock); 1577 tx_mon_be->last_data_ppdu_info = 1578 tx_mon_be->tx_data_ppdu_info; 1579 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue, 1580 tx_data_ppdu_info, 1581 tx_ppdu_info_queue_elem); 1582 tx_mon_be->tx_ppdu_info_list_depth++; 1583 1584 tx_mon_be->tx_data_ppdu_info = NULL; 1585 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock); 1586 schedule_wrq = true; 1587 } else { 1588 dp_tx_mon_free_ppdu_info(tx_data_ppdu_info, tx_mon_be); 1589 tx_mon_be->tx_data_ppdu_info = NULL; 1590 tx_data_ppdu_info = NULL; 1591 } 1592 1593 if (schedule_wrq) 1594 qdf_queue_work(NULL, tx_mon_be->post_ppdu_workqueue, 1595 &tx_mon_be->post_ppdu_work); 1596 1597 return QDF_STATUS_SUCCESS; 1598 } 1599 1600 /** 1601 * dp_tx_mon_update_end_reason() - API to update end reason 1602 * 1603 * @mon_pdev - DP_MON_PDEV handle 1604 * @ppdu_id - ppdu_id 1605 * @end_reason - monitor destiantion descriptor end reason 1606 * 1607 * Return: void 1608 */ 1609 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1610 int ppdu_id, int end_reason) 1611 { 1612 struct dp_mon_pdev_be *mon_pdev_be; 1613 struct dp_pdev_tx_monitor_be *tx_mon_be; 1614 1615 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1616 if (qdf_unlikely(!mon_pdev_be)) 1617 return; 1618 1619 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1620 1621 tx_mon_be->be_end_reason_bitmap |= (1 << end_reason); 1622 } 1623 1624 /* 1625 * dp_tx_mon_process_status_tlv() - API to processed TLV 1626 * invoked from interrupt handler 1627 * 1628 * @soc - DP_SOC handle 1629 * @pdev - DP_PDEV handle 1630 * @mon_ring_desc - descriptor status info 1631 * @addr - status buffer frag address 1632 * @end_offset - end offset of buffer that has valid buffer 1633 * @mon_desc_list_ref: tx monitor descriptor list reference 1634 * 1635 * Return: QDF_STATUS 1636 */ 1637 QDF_STATUS 1638 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1639 struct dp_pdev *pdev, 1640 struct hal_mon_desc *mon_ring_desc, 1641 qdf_frag_t status_frag, 1642 uint32_t end_offset, 1643 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1644 { 1645 struct dp_mon_pdev *mon_pdev; 1646 struct dp_mon_pdev_be *mon_pdev_be; 1647 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL; 1648 uint8_t last_frag_q_idx = 0; 1649 1650 /* sanity check */ 1651 if (qdf_unlikely(!pdev)) 1652 goto free_status_buffer; 1653 1654 mon_pdev = pdev->monitor_pdev; 1655 if (qdf_unlikely(!mon_pdev)) 1656 goto free_status_buffer; 1657 1658 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1659 if (qdf_unlikely(!mon_pdev_be)) 1660 goto free_status_buffer; 1661 1662 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1663 1664 if (qdf_unlikely(tx_mon_be->last_frag_q_idx > 1665 MAX_STATUS_BUFFER_IN_PPDU)) { 1666 dp_mon_err("status frag queue for a ppdu[%d] exceed %d\n", 1667 tx_mon_be->be_ppdu_id, 1668 MAX_STATUS_BUFFER_IN_PPDU); 1669 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref); 1670 goto free_status_buffer; 1671 } 1672 1673 if (tx_mon_be->mode == TX_MON_BE_DISABLE && 1674 !dp_lite_mon_is_tx_enabled(mon_pdev)) { 1675 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1676 mon_desc_list_ref); 1677 goto free_status_buffer; 1678 } 1679 1680 if (tx_mon_be->be_ppdu_id != mon_ring_desc->ppdu_id && 1681 tx_mon_be->last_frag_q_idx) { 1682 if (tx_mon_be->be_end_reason_bitmap & 1683 (1 << HAL_MON_FLUSH_DETECTED)) { 1684 tx_mon_be->stats.ppdu_info_drop_flush++; 1685 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1686 mon_desc_list_ref); 1687 } else if (tx_mon_be->be_end_reason_bitmap & 1688 (1 << HAL_MON_PPDU_TRUNCATED)) { 1689 tx_mon_be->stats.ppdu_info_drop_trunc++; 1690 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1691 mon_desc_list_ref); 1692 } else { 1693 dp_mon_err("End of ppdu not seen PID:%d cur_pid:%d idx:%d", 1694 tx_mon_be->be_ppdu_id, 1695 mon_ring_desc->ppdu_id, 1696 tx_mon_be->last_frag_q_idx); 1697 /* schedule ppdu worth information */ 1698 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1699 mon_desc_list_ref); 1700 } 1701 1702 /* reset end reason bitmap */ 1703 tx_mon_be->be_end_reason_bitmap = 0; 1704 tx_mon_be->last_frag_q_idx = 0; 1705 tx_mon_be->cur_frag_q_idx = 0; 1706 } 1707 1708 tx_mon_be->be_ppdu_id = mon_ring_desc->ppdu_id; 1709 tx_mon_be->be_end_reason_bitmap |= (1 << mon_ring_desc->end_reason); 1710 1711 last_frag_q_idx = tx_mon_be->last_frag_q_idx; 1712 1713 tx_mon_be->frag_q_vec[last_frag_q_idx].frag_buf = status_frag; 1714 tx_mon_be->frag_q_vec[last_frag_q_idx].end_offset = end_offset; 1715 tx_mon_be->last_frag_q_idx++; 1716 1717 if (mon_ring_desc->end_reason == HAL_MON_END_OF_PPDU) { 1718 /* drop processing of tlv, if ppdu info list exceed threshold */ 1719 if ((tx_mon_be->defer_ppdu_info_list_depth + 1720 tx_mon_be->tx_ppdu_info_list_depth) > 1721 MAX_PPDU_INFO_LIST_DEPTH) { 1722 tx_mon_be->stats.ppdu_info_drop_th++; 1723 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1724 mon_desc_list_ref); 1725 return QDF_STATUS_E_PENDING; 1726 } 1727 1728 if (dp_tx_mon_process_tlv_2_0(pdev, 1729 mon_desc_list_ref) != 1730 QDF_STATUS_SUCCESS) 1731 dp_tx_mon_status_queue_free(pdev, tx_mon_be, 1732 mon_desc_list_ref); 1733 } 1734 1735 return QDF_STATUS_SUCCESS; 1736 1737 free_status_buffer: 1738 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1739 mon_desc_list_ref); 1740 if (qdf_likely(tx_mon_be)) 1741 tx_mon_be->stats.status_buf_free++; 1742 1743 qdf_frag_free(status_frag); 1744 1745 return QDF_STATUS_E_NOMEM; 1746 } 1747 1748 #else 1749 1750 /** 1751 * dp_tx_mon_process_status_tlv() - API to processed TLV 1752 * invoked from interrupt handler 1753 * 1754 * @soc - DP_SOC handle 1755 * @pdev - DP_PDEV handle 1756 * @mon_ring_desc - descriptor status info 1757 * @addr - status buffer frag address 1758 * @end_offset - end offset of buffer that has valid buffer 1759 * @mon_desc_list_ref: tx monitor descriptor list reference 1760 * 1761 * Return: QDF_STATUS 1762 */ 1763 QDF_STATUS 1764 dp_tx_mon_process_status_tlv(struct dp_soc *soc, 1765 struct dp_pdev *pdev, 1766 struct hal_mon_desc *mon_ring_desc, 1767 qdf_frag_t status_frag, 1768 uint32_t end_offset, 1769 struct dp_tx_mon_desc_list *mon_desc_list_ref) 1770 { 1771 struct dp_mon_pdev *mon_pdev; 1772 struct dp_mon_pdev_be *mon_pdev_be; 1773 struct dp_pdev_tx_monitor_be *tx_mon_be; 1774 1775 /* sanity check */ 1776 if (qdf_unlikely(!pdev)) 1777 return QDF_STATUS_E_INVAL; 1778 1779 mon_pdev = pdev->monitor_pdev; 1780 if (qdf_unlikely(!mon_pdev)) 1781 return QDF_STATUS_E_INVAL; 1782 1783 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev); 1784 if (qdf_unlikely(!mon_pdev_be)) 1785 return QDF_STATUS_E_INVAL; 1786 1787 tx_mon_be = &mon_pdev_be->tx_monitor_be; 1788 1789 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset, 1790 mon_desc_list_ref); 1791 tx_mon_be->stats.status_buf_free++; 1792 qdf_frag_free(status_frag); 1793 1794 return QDF_STATUS_E_INVAL; 1795 } 1796 1797 /** 1798 * dp_tx_mon_update_end_reason() - API to update end reason 1799 * 1800 * @mon_pdev - DP_MON_PDEV handle 1801 * @ppdu_id - ppdu_id 1802 * @end_reason - monitor destiantion descriptor end reason 1803 * 1804 * Return: void 1805 */ 1806 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev, 1807 int ppdu_id, int end_reason) 1808 { 1809 } 1810 #endif 1811