1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <htt.h> 20 #include <hal_hw_headers.h> 21 #include <hal_api.h> 22 #include "dp_peer.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_rx.h" 26 #include "dp_rx_mon.h" 27 #include "htt_stats.h" 28 #include "htt_ppdu_stats.h" 29 #include "dp_htt.h" 30 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 31 #include "cdp_txrx_cmn_struct.h" 32 33 #ifdef FEATURE_PERPKT_INFO 34 #include "dp_ratetable.h" 35 #endif 36 37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE 38 39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64 40 41 #define HTT_MSG_BUF_SIZE(msg_bytes) \ 42 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) 43 44 #define HTT_PID_BIT_MASK 0x3 45 46 #define DP_EXT_MSG_LENGTH 2048 47 #define HTT_HEADER_LEN 16 48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16 49 50 #define HTT_SHIFT_UPPER_TIMESTAMP 32 51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000 52 53 #define HTT_HTC_PKT_STATUS_SUCCESS \ 54 ((pkt->htc_pkt.Status != QDF_STATUS_E_CANCELED) && \ 55 (pkt->htc_pkt.Status != QDF_STATUS_E_RESOURCES)) 56 57 /* 58 * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv 59 * bitmap for sniffer mode 60 * @bitmap: received bitmap 61 * 62 * Return: expected bitmap value, returns zero if doesn't match with 63 * either 64-bit Tx window or 256-bit window tlv bitmap 64 */ 65 int 66 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) 67 { 68 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) 69 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; 70 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) 71 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; 72 73 return 0; 74 } 75 76 #ifdef FEATURE_PERPKT_INFO 77 /* 78 * dp_peer_find_by_id_valid - check if peer exists for given id 79 * @soc: core DP soc context 80 * @peer_id: peer id from peer object can be retrieved 81 * 82 * Return: true if peer exists of false otherwise 83 */ 84 85 static 86 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) 87 { 88 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, 89 DP_MOD_ID_HTT); 90 91 if (peer) { 92 /* 93 * Decrement the peer ref which is taken as part of 94 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled 95 */ 96 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 97 98 return true; 99 } 100 101 return false; 102 } 103 104 /* 105 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. 106 * @peer: Datapath peer handle 107 * @ppdu: User PPDU Descriptor 108 * @cur_ppdu_id: PPDU_ID 109 * 110 * Return: None 111 * 112 * on Tx data frame, we may get delayed ba set 113 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we 114 * request Block Ack Request(BAR). Successful msdu is received only after Block 115 * Ack. To populate peer stats we need successful msdu(data frame). 116 * So we hold the Tx data stats on delayed_ba for stats update. 117 */ 118 static void 119 dp_peer_copy_delay_stats(struct dp_peer *peer, 120 struct cdp_tx_completion_ppdu_user *ppdu, 121 uint32_t cur_ppdu_id) 122 { 123 struct dp_pdev *pdev; 124 struct dp_vdev *vdev; 125 126 if (peer->last_delayed_ba) { 127 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 128 "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]", 129 peer->last_delayed_ba_ppduid, cur_ppdu_id); 130 vdev = peer->vdev; 131 if (vdev) { 132 pdev = vdev->pdev; 133 pdev->stats.cdp_delayed_ba_not_recev++; 134 } 135 } 136 137 peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; 138 peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; 139 peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; 140 peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; 141 peer->delayed_ba_ppdu_stats.bw = ppdu->bw; 142 peer->delayed_ba_ppdu_stats.nss = ppdu->nss; 143 peer->delayed_ba_ppdu_stats.gi = ppdu->gi; 144 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 145 peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; 146 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 147 peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast; 148 peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast; 149 peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; 150 peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; 151 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 152 153 peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; 154 peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; 155 peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; 156 157 peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; 158 peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; 159 160 peer->last_delayed_ba = true; 161 162 ppdu->debug_copied = true; 163 } 164 165 /* 166 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. 167 * @peer: Datapath peer handle 168 * @ppdu: PPDU Descriptor 169 * 170 * Return: None 171 * 172 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info 173 * from Tx BAR frame not required to populate peer stats. 174 * But we need successful MPDU and MSDU to update previous 175 * transmitted Tx data frame. Overwrite ppdu stats with the previous 176 * stored ppdu stats. 177 */ 178 static void 179 dp_peer_copy_stats_to_bar(struct dp_peer *peer, 180 struct cdp_tx_completion_ppdu_user *ppdu) 181 { 182 ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size; 183 ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc; 184 ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re; 185 ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf; 186 ppdu->bw = peer->delayed_ba_ppdu_stats.bw; 187 ppdu->nss = peer->delayed_ba_ppdu_stats.nss; 188 ppdu->gi = peer->delayed_ba_ppdu_stats.gi; 189 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; 190 ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc; 191 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; 192 ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; 193 ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; 194 ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl; 195 ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl; 196 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; 197 198 ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start; 199 ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones; 200 ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast; 201 202 ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos; 203 ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id; 204 205 peer->last_delayed_ba = false; 206 207 ppdu->debug_copied = true; 208 } 209 210 /* 211 * dp_tx_rate_stats_update() - Update rate per-peer statistics 212 * @peer: Datapath peer handle 213 * @ppdu: PPDU Descriptor 214 * 215 * Return: None 216 */ 217 static void 218 dp_tx_rate_stats_update(struct dp_peer *peer, 219 struct cdp_tx_completion_ppdu_user *ppdu) 220 { 221 uint32_t ratekbps = 0; 222 uint64_t ppdu_tx_rate = 0; 223 uint32_t rix; 224 uint16_t ratecode = 0; 225 226 if (!peer || !ppdu) 227 return; 228 229 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) 230 return; 231 232 ratekbps = dp_getrateindex(ppdu->gi, 233 ppdu->mcs, 234 ppdu->nss, 235 ppdu->preamble, 236 ppdu->bw, 237 &rix, 238 &ratecode); 239 240 DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps); 241 242 if (!ratekbps) 243 return; 244 245 /* Calculate goodput in non-training period 246 * In training period, don't do anything as 247 * pending pkt is send as goodput. 248 */ 249 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { 250 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * 251 (CDP_PERCENT_MACRO - ppdu->current_rate_per)); 252 } 253 ppdu->rix = rix; 254 ppdu->tx_ratekbps = ratekbps; 255 ppdu->tx_ratecode = ratecode; 256 peer->stats.tx.avg_tx_rate = 257 dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps); 258 ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate); 259 DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); 260 261 if (peer->vdev) { 262 /* 263 * In STA mode: 264 * We get ucast stats as BSS peer stats. 265 * 266 * In AP mode: 267 * We get mcast stats as BSS peer stats. 268 * We get ucast stats as assoc peer stats. 269 */ 270 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { 271 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; 272 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; 273 } else { 274 peer->vdev->stats.tx.last_tx_rate = ratekbps; 275 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; 276 } 277 } 278 } 279 280 /* 281 * dp_tx_stats_update() - Update per-peer statistics 282 * @pdev: Datapath pdev handle 283 * @peer: Datapath peer handle 284 * @ppdu: PPDU Descriptor 285 * @ack_rssi: RSSI of last ack received 286 * 287 * Return: None 288 */ 289 static void 290 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 291 struct cdp_tx_completion_ppdu_user *ppdu, 292 uint32_t ack_rssi) 293 { 294 uint8_t preamble, mcs; 295 uint16_t num_msdu; 296 uint16_t num_mpdu; 297 uint16_t mpdu_tried; 298 uint16_t mpdu_failed; 299 300 preamble = ppdu->preamble; 301 mcs = ppdu->mcs; 302 num_msdu = ppdu->num_msdu; 303 num_mpdu = ppdu->mpdu_success; 304 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; 305 mpdu_failed = mpdu_tried - num_mpdu; 306 307 /* If the peer statistics are already processed as part of 308 * per-MSDU completion handler, do not process these again in per-PPDU 309 * indications */ 310 if (pdev->soc->process_tx_status) 311 return; 312 313 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { 314 /* 315 * All failed mpdu will be retried, so incrementing 316 * retries mpdu based on mpdu failed. Even for 317 * ack failure i.e for long retries we get 318 * mpdu failed equal mpdu tried. 319 */ 320 DP_STATS_INC(peer, tx.retries, mpdu_failed); 321 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); 322 return; 323 } 324 325 if (ppdu->is_ppdu_cookie_valid) 326 DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1); 327 328 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && 329 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { 330 if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) 331 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 332 "mu_group_id out of bound!!\n"); 333 else 334 DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id], 335 (ppdu->user_pos + 1)); 336 } 337 338 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || 339 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { 340 DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones); 341 DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start); 342 switch (ppdu->ru_tones) { 343 case RU_26: 344 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu, 345 num_msdu); 346 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu, 347 num_mpdu); 348 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried, 349 mpdu_tried); 350 break; 351 case RU_52: 352 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu, 353 num_msdu); 354 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu, 355 num_mpdu); 356 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried, 357 mpdu_tried); 358 break; 359 case RU_106: 360 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu, 361 num_msdu); 362 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu, 363 num_mpdu); 364 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried, 365 mpdu_tried); 366 break; 367 case RU_242: 368 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu, 369 num_msdu); 370 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu, 371 num_mpdu); 372 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried, 373 mpdu_tried); 374 break; 375 case RU_484: 376 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu, 377 num_msdu); 378 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu, 379 num_mpdu); 380 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried, 381 mpdu_tried); 382 break; 383 case RU_996: 384 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu, 385 num_msdu); 386 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu, 387 num_mpdu); 388 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried, 389 mpdu_tried); 390 break; 391 } 392 } 393 394 /* 395 * All failed mpdu will be retried, so incrementing 396 * retries mpdu based on mpdu failed. Even for 397 * ack failure i.e for long retries we get 398 * mpdu failed equal mpdu tried. 399 */ 400 DP_STATS_INC(peer, tx.retries, mpdu_failed); 401 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); 402 403 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, 404 num_msdu); 405 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, 406 num_mpdu); 407 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, 408 mpdu_tried); 409 410 DP_STATS_INC_PKT(peer, tx.comp_pkt, 411 num_msdu, (ppdu->success_bytes + 412 ppdu->retry_bytes + ppdu->failed_bytes)); 413 DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate); 414 DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu); 415 DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu); 416 DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu); 417 if (ppdu->tid < CDP_DATA_TID_MAX) 418 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], 419 num_msdu); 420 DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc); 421 DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc); 422 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) 423 DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi); 424 425 DP_STATS_INCC(peer, 426 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 427 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); 428 DP_STATS_INCC(peer, 429 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 430 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); 431 DP_STATS_INCC(peer, 432 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 433 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); 434 DP_STATS_INCC(peer, 435 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 436 ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); 437 DP_STATS_INCC(peer, 438 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 439 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); 440 DP_STATS_INCC(peer, 441 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 442 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); 443 DP_STATS_INCC(peer, 444 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 445 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); 446 DP_STATS_INCC(peer, 447 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 448 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); 449 DP_STATS_INCC(peer, 450 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 451 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); 452 DP_STATS_INCC(peer, 453 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 454 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); 455 DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu); 456 DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu)); 457 DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct); 458 459 dp_peer_stats_notify(pdev, peer); 460 461 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 462 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, 463 &peer->stats, ppdu->peer_id, 464 UPDATE_PEER_STATS, pdev->pdev_id); 465 #endif 466 } 467 #endif 468 469 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc, 470 uint32_t mac_id, 471 uint32_t event, 472 qdf_nbuf_t mpdu, 473 uint32_t msdu_timestamp) 474 { 475 uint32_t data_size, hdr_size, ppdu_id, align4byte; 476 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 477 uint32_t *msg_word; 478 479 if (!pdev) 480 return QDF_STATUS_E_INVAL; 481 482 ppdu_id = pdev->ppdu_info.com_info.ppdu_id; 483 484 hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE 485 + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload); 486 487 data_size = qdf_nbuf_len(mpdu); 488 489 qdf_nbuf_push_head(mpdu, hdr_size); 490 491 msg_word = (uint32_t *)qdf_nbuf_data(mpdu); 492 /* 493 * Populate the PPDU Stats Indication header 494 */ 495 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND); 496 HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id); 497 HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id); 498 align4byte = ((data_size + 499 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 500 + 3) >> 2) << 2; 501 HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte); 502 msg_word++; 503 HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id); 504 msg_word++; 505 506 *msg_word = msdu_timestamp; 507 msg_word++; 508 /* Skip reserved field */ 509 msg_word++; 510 /* 511 * Populate MGMT_CTRL Payload TLV first 512 */ 513 HTT_STATS_TLV_TAG_SET(*msg_word, 514 HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV); 515 516 align4byte = ((data_size - sizeof(htt_tlv_hdr_t) + 517 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 518 + 3) >> 2) << 2; 519 HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte); 520 msg_word++; 521 522 HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET( 523 *msg_word, data_size); 524 msg_word++; 525 526 dp_wdi_event_handler(event, soc, (void *)mpdu, 527 HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id); 528 529 qdf_nbuf_pull_head(mpdu, hdr_size); 530 531 return QDF_STATUS_SUCCESS; 532 } 533 534 #ifdef WLAN_TX_PKT_CAPTURE_ENH 535 #include "dp_tx_capture.h" 536 #else 537 static inline void 538 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev, 539 void *data, 540 uint32_t ppdu_id, 541 uint32_t size) 542 { 543 } 544 #endif 545 546 /* 547 * htt_htc_pkt_alloc() - Allocate HTC packet buffer 548 * @htt_soc: HTT SOC handle 549 * 550 * Return: Pointer to htc packet buffer 551 */ 552 static struct dp_htt_htc_pkt * 553 htt_htc_pkt_alloc(struct htt_soc *soc) 554 { 555 struct dp_htt_htc_pkt_union *pkt = NULL; 556 557 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 558 if (soc->htt_htc_pkt_freelist) { 559 pkt = soc->htt_htc_pkt_freelist; 560 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; 561 } 562 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 563 564 if (!pkt) 565 pkt = qdf_mem_malloc(sizeof(*pkt)); 566 567 if (!pkt) 568 return NULL; 569 570 htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0); 571 572 return &pkt->u.pkt; /* not actually a dereference */ 573 } 574 575 /* 576 * htt_htc_pkt_free() - Free HTC packet buffer 577 * @htt_soc: HTT SOC handle 578 */ 579 static void 580 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 581 { 582 struct dp_htt_htc_pkt_union *u_pkt = 583 (struct dp_htt_htc_pkt_union *)pkt; 584 585 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 586 htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0); 587 u_pkt->u.next = soc->htt_htc_pkt_freelist; 588 soc->htt_htc_pkt_freelist = u_pkt; 589 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 590 } 591 592 /* 593 * htt_htc_pkt_pool_free() - Free HTC packet pool 594 * @htt_soc: HTT SOC handle 595 */ 596 void 597 htt_htc_pkt_pool_free(struct htt_soc *soc) 598 { 599 struct dp_htt_htc_pkt_union *pkt, *next; 600 pkt = soc->htt_htc_pkt_freelist; 601 while (pkt) { 602 next = pkt->u.next; 603 qdf_mem_free(pkt); 604 pkt = next; 605 } 606 soc->htt_htc_pkt_freelist = NULL; 607 } 608 609 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 610 611 static void 612 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 613 { 614 } 615 616 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 617 618 /* 619 * htt_htc_misc_pkt_list_trim() - trim misc list 620 * @htt_soc: HTT SOC handle 621 * @level: max no. of pkts in list 622 */ 623 static void 624 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level) 625 { 626 struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL; 627 int i = 0; 628 qdf_nbuf_t netbuf; 629 630 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 631 pkt = soc->htt_htc_pkt_misclist; 632 while (pkt) { 633 next = pkt->u.next; 634 /* trim the out grown list*/ 635 if (++i > level) { 636 netbuf = 637 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); 638 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 639 qdf_nbuf_free(netbuf); 640 qdf_mem_free(pkt); 641 pkt = NULL; 642 if (prev) 643 prev->u.next = NULL; 644 } 645 prev = pkt; 646 pkt = next; 647 } 648 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 649 } 650 651 /* 652 * htt_htc_misc_pkt_list_add() - Add pkt to misc list 653 * @htt_soc: HTT SOC handle 654 * @dp_htt_htc_pkt: pkt to be added to list 655 */ 656 static void 657 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 658 { 659 struct dp_htt_htc_pkt_union *u_pkt = 660 (struct dp_htt_htc_pkt_union *)pkt; 661 int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc, 662 pkt->htc_pkt.Endpoint) 663 + DP_HTT_HTC_PKT_MISCLIST_SIZE; 664 665 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 666 if (soc->htt_htc_pkt_misclist) { 667 u_pkt->u.next = soc->htt_htc_pkt_misclist; 668 soc->htt_htc_pkt_misclist = u_pkt; 669 } else { 670 soc->htt_htc_pkt_misclist = u_pkt; 671 } 672 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 673 674 /* only ce pipe size + tx_queue_depth could possibly be in use 675 * free older packets in the misclist 676 */ 677 htt_htc_misc_pkt_list_trim(soc, misclist_trim_level); 678 } 679 680 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 681 682 /** 683 * DP_HTT_SEND_HTC_PKT() - Send htt packet from host 684 * @soc : HTT SOC handle 685 * @pkt: pkt to be send 686 * @cmd : command to be recorded in dp htt logger 687 * @buf : Pointer to buffer needs to be recored for above cmd 688 * 689 * Return: None 690 */ 691 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc, 692 struct dp_htt_htc_pkt *pkt, 693 uint8_t cmd, uint8_t *buf) 694 { 695 QDF_STATUS status; 696 697 htt_command_record(soc->htt_logger_handle, cmd, buf); 698 699 status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt); 700 if (status == QDF_STATUS_SUCCESS && HTT_HTC_PKT_STATUS_SUCCESS) 701 htt_htc_misc_pkt_list_add(soc, pkt); 702 else 703 soc->stats.fail_count++; 704 return status; 705 } 706 707 /* 708 * htt_htc_misc_pkt_pool_free() - free pkts in misc list 709 * @htt_soc: HTT SOC handle 710 */ 711 static void 712 htt_htc_misc_pkt_pool_free(struct htt_soc *soc) 713 { 714 struct dp_htt_htc_pkt_union *pkt, *next; 715 qdf_nbuf_t netbuf; 716 717 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 718 pkt = soc->htt_htc_pkt_misclist; 719 720 while (pkt) { 721 next = pkt->u.next; 722 if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) != 723 HTC_PACKET_MAGIC_COOKIE) { 724 pkt = next; 725 soc->stats.skip_count++; 726 continue; 727 } 728 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); 729 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 730 731 soc->stats.htc_pkt_free++; 732 dp_htt_info("%pK: Pkt free count %d", 733 soc->dp_soc, soc->stats.htc_pkt_free); 734 735 qdf_nbuf_free(netbuf); 736 qdf_mem_free(pkt); 737 pkt = next; 738 } 739 soc->htt_htc_pkt_misclist = NULL; 740 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 741 dp_info("HTC Packets, fail count = %d, skip count = %d", 742 soc->stats.fail_count, soc->stats.skip_count); 743 } 744 745 /* 746 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ 747 * @tgt_mac_addr: Target MAC 748 * @buffer: Output buffer 749 */ 750 static u_int8_t * 751 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) 752 { 753 #ifdef BIG_ENDIAN_HOST 754 /* 755 * The host endianness is opposite of the target endianness. 756 * To make u_int32_t elements come out correctly, the target->host 757 * upload has swizzled the bytes in each u_int32_t element of the 758 * message. 759 * For byte-array message fields like the MAC address, this 760 * upload swizzling puts the bytes in the wrong order, and needs 761 * to be undone. 762 */ 763 buffer[0] = tgt_mac_addr[3]; 764 buffer[1] = tgt_mac_addr[2]; 765 buffer[2] = tgt_mac_addr[1]; 766 buffer[3] = tgt_mac_addr[0]; 767 buffer[4] = tgt_mac_addr[7]; 768 buffer[5] = tgt_mac_addr[6]; 769 return buffer; 770 #else 771 /* 772 * The host endianness matches the target endianness - 773 * we can use the mac addr directly from the message buffer. 774 */ 775 return tgt_mac_addr; 776 #endif 777 } 778 779 /* 780 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer 781 * @soc: SOC handle 782 * @status: Completion status 783 * @netbuf: HTT buffer 784 */ 785 static void 786 dp_htt_h2t_send_complete_free_netbuf( 787 void *soc, A_STATUS status, qdf_nbuf_t netbuf) 788 { 789 qdf_nbuf_free(netbuf); 790 } 791 792 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 793 /* 794 * dp_htt_h2t_send_complete() - H2T completion handler 795 * @context: Opaque context (HTT SOC handle) 796 * @htc_pkt: HTC packet 797 */ 798 static void 799 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 800 { 801 struct htt_soc *soc = (struct htt_soc *) context; 802 struct dp_htt_htc_pkt *htt_pkt; 803 qdf_nbuf_t netbuf; 804 805 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 806 807 /* process (free or keep) the netbuf that held the message */ 808 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 809 /* 810 * adf sendcomplete is required for windows only 811 */ 812 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 813 /* free the htt_htc_pkt / HTC_PACKET object */ 814 qdf_nbuf_free(netbuf); 815 htt_htc_pkt_free(soc, htt_pkt); 816 } 817 818 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 819 820 /* 821 * * dp_htt_h2t_send_complete() - H2T completion handler 822 * * @context: Opaque context (HTT SOC handle) 823 * * @htc_pkt: HTC packet 824 * */ 825 static void 826 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 827 { 828 void (*send_complete_part2)( 829 void *soc, QDF_STATUS status, qdf_nbuf_t msdu); 830 struct htt_soc *soc = (struct htt_soc *) context; 831 struct dp_htt_htc_pkt *htt_pkt; 832 qdf_nbuf_t netbuf; 833 834 send_complete_part2 = htc_pkt->pPktContext; 835 836 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 837 838 /* process (free or keep) the netbuf that held the message */ 839 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 840 /* 841 * adf sendcomplete is required for windows only 842 */ 843 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 844 if (send_complete_part2){ 845 send_complete_part2( 846 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); 847 } 848 /* free the htt_htc_pkt / HTC_PACKET object */ 849 htt_htc_pkt_free(soc, htt_pkt); 850 } 851 852 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 853 854 /* 855 * htt_h2t_ver_req_msg() - Send HTT version request message to target 856 * @htt_soc: HTT SOC handle 857 * 858 * Return: 0 on success; error code on failure 859 */ 860 static int htt_h2t_ver_req_msg(struct htt_soc *soc) 861 { 862 struct dp_htt_htc_pkt *pkt; 863 qdf_nbuf_t msg; 864 uint32_t *msg_word; 865 QDF_STATUS status; 866 867 msg = qdf_nbuf_alloc( 868 soc->osdev, 869 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), 870 /* reserve room for the HTC header */ 871 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 872 if (!msg) 873 return QDF_STATUS_E_NOMEM; 874 875 /* 876 * Set the length of the message. 877 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 878 * separately during the below call to qdf_nbuf_push_head. 879 * The contribution from the HTC header is added separately inside HTC. 880 */ 881 if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) { 882 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 883 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg", 884 __func__); 885 return QDF_STATUS_E_FAILURE; 886 } 887 888 /* fill in the message contents */ 889 msg_word = (u_int32_t *) qdf_nbuf_data(msg); 890 891 /* rewind beyond alignment pad to get to the HTC header reserved area */ 892 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 893 894 *msg_word = 0; 895 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); 896 897 pkt = htt_htc_pkt_alloc(soc); 898 if (!pkt) { 899 qdf_nbuf_free(msg); 900 return QDF_STATUS_E_FAILURE; 901 } 902 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 903 904 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 905 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), 906 qdf_nbuf_len(msg), soc->htc_endpoint, 907 HTC_TX_PACKET_TAG_RTPM_PUT_RC); 908 909 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 910 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, 911 NULL); 912 913 if (status != QDF_STATUS_SUCCESS) { 914 qdf_nbuf_free(msg); 915 htt_htc_pkt_free(soc, pkt); 916 } 917 918 return status; 919 } 920 921 /* 922 * htt_srng_setup() - Send SRNG setup message to target 923 * @htt_soc: HTT SOC handle 924 * @mac_id: MAC Id 925 * @hal_srng: Opaque HAL SRNG pointer 926 * @hal_ring_type: SRNG ring type 927 * 928 * Return: 0 on success; error code on failure 929 */ 930 int htt_srng_setup(struct htt_soc *soc, int mac_id, 931 hal_ring_handle_t hal_ring_hdl, 932 int hal_ring_type) 933 { 934 struct dp_htt_htc_pkt *pkt; 935 qdf_nbuf_t htt_msg; 936 uint32_t *msg_word; 937 struct hal_srng_params srng_params; 938 qdf_dma_addr_t hp_addr, tp_addr; 939 uint32_t ring_entry_size = 940 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); 941 int htt_ring_type, htt_ring_id; 942 uint8_t *htt_logger_bufp; 943 int target_pdev_id; 944 int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id); 945 QDF_STATUS status; 946 947 /* Sizes should be set in 4-byte words */ 948 ring_entry_size = ring_entry_size >> 2; 949 950 htt_msg = qdf_nbuf_alloc(soc->osdev, 951 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), 952 /* reserve room for the HTC header */ 953 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 954 if (!htt_msg) 955 goto fail0; 956 957 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); 958 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl); 959 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl); 960 961 switch (hal_ring_type) { 962 case RXDMA_BUF: 963 #ifdef QCA_HOST2FW_RXBUF_RING 964 if (srng_params.ring_id == 965 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) { 966 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 967 htt_ring_type = HTT_SW_TO_SW_RING; 968 #ifdef IPA_OFFLOAD 969 } else if (srng_params.ring_id == 970 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) { 971 htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING; 972 htt_ring_type = HTT_SW_TO_SW_RING; 973 #endif 974 #else 975 if (srng_params.ring_id == 976 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 977 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 978 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 979 htt_ring_type = HTT_SW_TO_HW_RING; 980 #endif 981 } else if (srng_params.ring_id == 982 #ifdef IPA_OFFLOAD 983 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 + 984 #else 985 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 986 #endif 987 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 988 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 989 htt_ring_type = HTT_SW_TO_HW_RING; 990 } else { 991 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 992 "%s: Ring %d currently not supported", 993 __func__, srng_params.ring_id); 994 goto fail1; 995 } 996 997 dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx", 998 hal_ring_type, srng_params.ring_id, htt_ring_id, 999 (uint64_t)hp_addr, 1000 (uint64_t)tp_addr); 1001 break; 1002 case RXDMA_MONITOR_BUF: 1003 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 1004 htt_ring_type = HTT_SW_TO_HW_RING; 1005 break; 1006 case RXDMA_MONITOR_STATUS: 1007 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 1008 htt_ring_type = HTT_SW_TO_HW_RING; 1009 break; 1010 case RXDMA_MONITOR_DST: 1011 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 1012 htt_ring_type = HTT_HW_TO_SW_RING; 1013 break; 1014 case RXDMA_MONITOR_DESC: 1015 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 1016 htt_ring_type = HTT_SW_TO_HW_RING; 1017 break; 1018 case RXDMA_DST: 1019 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 1020 htt_ring_type = HTT_HW_TO_SW_RING; 1021 break; 1022 1023 default: 1024 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1025 "%s: Ring currently not supported", __func__); 1026 goto fail1; 1027 } 1028 1029 /* 1030 * Set the length of the message. 1031 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 1032 * separately during the below call to qdf_nbuf_push_head. 1033 * The contribution from the HTC header is added separately inside HTC. 1034 */ 1035 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { 1036 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1037 "%s: Failed to expand head for SRING_SETUP msg", 1038 __func__); 1039 return QDF_STATUS_E_FAILURE; 1040 } 1041 1042 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 1043 1044 /* rewind beyond alignment pad to get to the HTC header reserved area */ 1045 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 1046 1047 /* word 0 */ 1048 *msg_word = 0; 1049 htt_logger_bufp = (uint8_t *)msg_word; 1050 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); 1051 target_pdev_id = 1052 dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id); 1053 1054 if ((htt_ring_type == HTT_SW_TO_HW_RING) || 1055 (htt_ring_type == HTT_HW_TO_SW_RING)) 1056 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id); 1057 else 1058 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); 1059 1060 dp_info("mac_id %d", mac_id); 1061 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); 1062 /* TODO: Discuss with FW on changing this to unique ID and using 1063 * htt_ring_type to send the type of ring 1064 */ 1065 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); 1066 1067 /* word 1 */ 1068 msg_word++; 1069 *msg_word = 0; 1070 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, 1071 srng_params.ring_base_paddr & 0xffffffff); 1072 1073 /* word 2 */ 1074 msg_word++; 1075 *msg_word = 0; 1076 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, 1077 (uint64_t)srng_params.ring_base_paddr >> 32); 1078 1079 /* word 3 */ 1080 msg_word++; 1081 *msg_word = 0; 1082 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); 1083 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, 1084 (ring_entry_size * srng_params.num_entries)); 1085 dp_info("entry_size %d", ring_entry_size); 1086 dp_info("num_entries %d", srng_params.num_entries); 1087 dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries)); 1088 if (htt_ring_type == HTT_SW_TO_HW_RING) 1089 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( 1090 *msg_word, 1); 1091 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, 1092 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 1093 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, 1094 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 1095 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, 1096 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); 1097 1098 /* word 4 */ 1099 msg_word++; 1100 *msg_word = 0; 1101 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 1102 hp_addr & 0xffffffff); 1103 1104 /* word 5 */ 1105 msg_word++; 1106 *msg_word = 0; 1107 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 1108 (uint64_t)hp_addr >> 32); 1109 1110 /* word 6 */ 1111 msg_word++; 1112 *msg_word = 0; 1113 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 1114 tp_addr & 0xffffffff); 1115 1116 /* word 7 */ 1117 msg_word++; 1118 *msg_word = 0; 1119 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 1120 (uint64_t)tp_addr >> 32); 1121 1122 /* word 8 */ 1123 msg_word++; 1124 *msg_word = 0; 1125 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, 1126 srng_params.msi_addr & 0xffffffff); 1127 1128 /* word 9 */ 1129 msg_word++; 1130 *msg_word = 0; 1131 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, 1132 (uint64_t)(srng_params.msi_addr) >> 32); 1133 1134 /* word 10 */ 1135 msg_word++; 1136 *msg_word = 0; 1137 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, 1138 qdf_cpu_to_le32(srng_params.msi_data)); 1139 1140 /* word 11 */ 1141 msg_word++; 1142 *msg_word = 0; 1143 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, 1144 srng_params.intr_batch_cntr_thres_entries * 1145 ring_entry_size); 1146 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, 1147 srng_params.intr_timer_thres_us >> 3); 1148 1149 /* word 12 */ 1150 msg_word++; 1151 *msg_word = 0; 1152 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 1153 /* TODO: Setting low threshold to 1/8th of ring size - see 1154 * if this needs to be configurable 1155 */ 1156 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, 1157 srng_params.low_threshold); 1158 } 1159 /* "response_required" field should be set if a HTT response message is 1160 * required after setting up the ring. 1161 */ 1162 pkt = htt_htc_pkt_alloc(soc); 1163 if (!pkt) 1164 goto fail1; 1165 1166 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 1167 1168 SET_HTC_PACKET_INFO_TX( 1169 &pkt->htc_pkt, 1170 dp_htt_h2t_send_complete_free_netbuf, 1171 qdf_nbuf_data(htt_msg), 1172 qdf_nbuf_len(htt_msg), 1173 soc->htc_endpoint, 1174 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 1175 1176 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 1177 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP, 1178 htt_logger_bufp); 1179 1180 if (status != QDF_STATUS_SUCCESS) { 1181 qdf_nbuf_free(htt_msg); 1182 htt_htc_pkt_free(soc, pkt); 1183 } 1184 1185 return status; 1186 1187 fail1: 1188 qdf_nbuf_free(htt_msg); 1189 fail0: 1190 return QDF_STATUS_E_FAILURE; 1191 } 1192 1193 #ifdef QCA_SUPPORT_FULL_MON 1194 /** 1195 * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW 1196 * 1197 * @htt_soc: HTT Soc handle 1198 * @pdev_id: Radio id 1199 * @dp_full_mon_config: enabled/disable configuration 1200 * 1201 * Return: Success when HTT message is sent, error on failure 1202 */ 1203 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc, 1204 uint8_t pdev_id, 1205 enum dp_full_mon_config config) 1206 { 1207 struct htt_soc *soc = (struct htt_soc *)htt_soc; 1208 struct dp_htt_htc_pkt *pkt; 1209 qdf_nbuf_t htt_msg; 1210 uint32_t *msg_word; 1211 uint8_t *htt_logger_bufp; 1212 1213 htt_msg = qdf_nbuf_alloc(soc->osdev, 1214 HTT_MSG_BUF_SIZE( 1215 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ), 1216 /* reserve room for the HTC header */ 1217 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 1218 4, 1219 TRUE); 1220 if (!htt_msg) 1221 return QDF_STATUS_E_FAILURE; 1222 1223 /* 1224 * Set the length of the message. 1225 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 1226 * separately during the below call to qdf_nbuf_push_head. 1227 * The contribution from the HTC header is added separately inside HTC. 1228 */ 1229 if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ)) { 1230 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1231 "%s: Failed to expand head for RX Ring Cfg msg", 1232 __func__); 1233 goto fail1; 1234 } 1235 1236 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 1237 1238 /* rewind beyond alignment pad to get to the HTC header reserved area */ 1239 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 1240 1241 /* word 0 */ 1242 *msg_word = 0; 1243 htt_logger_bufp = (uint8_t *)msg_word; 1244 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); 1245 HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET( 1246 *msg_word, DP_SW2HW_MACID(pdev_id)); 1247 1248 msg_word++; 1249 *msg_word = 0; 1250 /* word 1 */ 1251 if (config == DP_FULL_MON_ENABLE) { 1252 HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true); 1253 HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true); 1254 HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true); 1255 HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2); 1256 } else if (config == DP_FULL_MON_DISABLE) { 1257 /* As per MAC team's suggestion, While disbaling full monitor 1258 * mode, Set 'en' bit to true in full monitor mode register. 1259 */ 1260 HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true); 1261 HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false); 1262 HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false); 1263 HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2); 1264 } 1265 1266 pkt = htt_htc_pkt_alloc(soc); 1267 if (!pkt) { 1268 qdf_err("HTC packet allocation failed"); 1269 goto fail1; 1270 } 1271 1272 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 1273 1274 SET_HTC_PACKET_INFO_TX( 1275 &pkt->htc_pkt, 1276 dp_htt_h2t_send_complete_free_netbuf, 1277 qdf_nbuf_data(htt_msg), 1278 qdf_nbuf_len(htt_msg), 1279 soc->htc_endpoint, 1280 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 1281 1282 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 1283 qdf_debug("config: %d", config); 1284 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP, 1285 htt_logger_bufp); 1286 return QDF_STATUS_SUCCESS; 1287 fail1: 1288 qdf_nbuf_free(htt_msg); 1289 return QDF_STATUS_E_FAILURE; 1290 } 1291 #else 1292 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc, 1293 uint8_t pdev_id, 1294 enum dp_full_mon_config config) 1295 { 1296 return 0; 1297 } 1298 1299 #endif 1300 1301 /* 1302 * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter 1303 * config message to target 1304 * @htt_soc: HTT SOC handle 1305 * @pdev_id: WIN- PDEV Id, MCL- mac id 1306 * @hal_srng: Opaque HAL SRNG pointer 1307 * @hal_ring_type: SRNG ring type 1308 * @ring_buf_size: SRNG buffer size 1309 * @htt_tlv_filter: Rx SRNG TLV and filter setting 1310 * Return: 0 on success; error code on failure 1311 */ 1312 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id, 1313 hal_ring_handle_t hal_ring_hdl, 1314 int hal_ring_type, int ring_buf_size, 1315 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 1316 { 1317 struct htt_soc *soc = (struct htt_soc *)htt_soc; 1318 struct dp_htt_htc_pkt *pkt; 1319 qdf_nbuf_t htt_msg; 1320 uint32_t *msg_word; 1321 struct hal_srng_params srng_params; 1322 uint32_t htt_ring_type, htt_ring_id; 1323 uint32_t tlv_filter; 1324 uint8_t *htt_logger_bufp; 1325 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx; 1326 uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx); 1327 int target_pdev_id; 1328 QDF_STATUS status; 1329 1330 htt_msg = qdf_nbuf_alloc(soc->osdev, 1331 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), 1332 /* reserve room for the HTC header */ 1333 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 1334 if (!htt_msg) 1335 goto fail0; 1336 1337 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); 1338 1339 switch (hal_ring_type) { 1340 case RXDMA_BUF: 1341 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 1342 htt_ring_type = HTT_SW_TO_HW_RING; 1343 break; 1344 case RXDMA_MONITOR_BUF: 1345 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 1346 htt_ring_type = HTT_SW_TO_HW_RING; 1347 break; 1348 case RXDMA_MONITOR_STATUS: 1349 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 1350 htt_ring_type = HTT_SW_TO_HW_RING; 1351 break; 1352 case RXDMA_MONITOR_DST: 1353 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 1354 htt_ring_type = HTT_HW_TO_SW_RING; 1355 break; 1356 case RXDMA_MONITOR_DESC: 1357 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 1358 htt_ring_type = HTT_SW_TO_HW_RING; 1359 break; 1360 case RXDMA_DST: 1361 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 1362 htt_ring_type = HTT_HW_TO_SW_RING; 1363 break; 1364 1365 default: 1366 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1367 "%s: Ring currently not supported", __func__); 1368 goto fail1; 1369 } 1370 1371 /* 1372 * Set the length of the message. 1373 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 1374 * separately during the below call to qdf_nbuf_push_head. 1375 * The contribution from the HTC header is added separately inside HTC. 1376 */ 1377 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { 1378 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1379 "%s: Failed to expand head for RX Ring Cfg msg", 1380 __func__); 1381 goto fail1; /* failure */ 1382 } 1383 1384 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 1385 1386 /* rewind beyond alignment pad to get to the HTC header reserved area */ 1387 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 1388 1389 /* word 0 */ 1390 htt_logger_bufp = (uint8_t *)msg_word; 1391 *msg_word = 0; 1392 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); 1393 1394 /* 1395 * pdev_id is indexed from 0 whereas mac_id is indexed from 1 1396 * SW_TO_SW and SW_TO_HW rings are unaffected by this 1397 */ 1398 target_pdev_id = 1399 dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id); 1400 1401 if (htt_ring_type == HTT_SW_TO_SW_RING || 1402 htt_ring_type == HTT_SW_TO_HW_RING) 1403 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, 1404 target_pdev_id); 1405 1406 /* TODO: Discuss with FW on changing this to unique ID and using 1407 * htt_ring_type to send the type of ring 1408 */ 1409 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); 1410 1411 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, 1412 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 1413 1414 HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word, 1415 htt_tlv_filter->offset_valid); 1416 1417 if (mon_drop_th > 0) 1418 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, 1419 1); 1420 else 1421 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, 1422 0); 1423 1424 /* word 1 */ 1425 msg_word++; 1426 *msg_word = 0; 1427 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, 1428 ring_buf_size); 1429 1430 /* word 2 */ 1431 msg_word++; 1432 *msg_word = 0; 1433 1434 if (htt_tlv_filter->enable_fp) { 1435 /* TYPE: MGMT */ 1436 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1437 FP, MGMT, 0000, 1438 (htt_tlv_filter->fp_mgmt_filter & 1439 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1440 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1441 FP, MGMT, 0001, 1442 (htt_tlv_filter->fp_mgmt_filter & 1443 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1444 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1445 FP, MGMT, 0010, 1446 (htt_tlv_filter->fp_mgmt_filter & 1447 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1448 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1449 FP, MGMT, 0011, 1450 (htt_tlv_filter->fp_mgmt_filter & 1451 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1452 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1453 FP, MGMT, 0100, 1454 (htt_tlv_filter->fp_mgmt_filter & 1455 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1456 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1457 FP, MGMT, 0101, 1458 (htt_tlv_filter->fp_mgmt_filter & 1459 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1460 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1461 FP, MGMT, 0110, 1462 (htt_tlv_filter->fp_mgmt_filter & 1463 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1464 /* reserved */ 1465 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 1466 MGMT, 0111, 1467 (htt_tlv_filter->fp_mgmt_filter & 1468 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1469 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1470 FP, MGMT, 1000, 1471 (htt_tlv_filter->fp_mgmt_filter & 1472 FILTER_MGMT_BEACON) ? 1 : 0); 1473 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1474 FP, MGMT, 1001, 1475 (htt_tlv_filter->fp_mgmt_filter & 1476 FILTER_MGMT_ATIM) ? 1 : 0); 1477 } 1478 1479 if (htt_tlv_filter->enable_md) { 1480 /* TYPE: MGMT */ 1481 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1482 MD, MGMT, 0000, 1483 (htt_tlv_filter->md_mgmt_filter & 1484 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1485 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1486 MD, MGMT, 0001, 1487 (htt_tlv_filter->md_mgmt_filter & 1488 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1489 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1490 MD, MGMT, 0010, 1491 (htt_tlv_filter->md_mgmt_filter & 1492 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1493 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1494 MD, MGMT, 0011, 1495 (htt_tlv_filter->md_mgmt_filter & 1496 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1497 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1498 MD, MGMT, 0100, 1499 (htt_tlv_filter->md_mgmt_filter & 1500 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1501 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1502 MD, MGMT, 0101, 1503 (htt_tlv_filter->md_mgmt_filter & 1504 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1505 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1506 MD, MGMT, 0110, 1507 (htt_tlv_filter->md_mgmt_filter & 1508 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1509 /* reserved */ 1510 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 1511 MGMT, 0111, 1512 (htt_tlv_filter->md_mgmt_filter & 1513 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1514 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1515 MD, MGMT, 1000, 1516 (htt_tlv_filter->md_mgmt_filter & 1517 FILTER_MGMT_BEACON) ? 1 : 0); 1518 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1519 MD, MGMT, 1001, 1520 (htt_tlv_filter->md_mgmt_filter & 1521 FILTER_MGMT_ATIM) ? 1 : 0); 1522 } 1523 1524 if (htt_tlv_filter->enable_mo) { 1525 /* TYPE: MGMT */ 1526 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1527 MO, MGMT, 0000, 1528 (htt_tlv_filter->mo_mgmt_filter & 1529 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1530 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1531 MO, MGMT, 0001, 1532 (htt_tlv_filter->mo_mgmt_filter & 1533 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1534 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1535 MO, MGMT, 0010, 1536 (htt_tlv_filter->mo_mgmt_filter & 1537 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1538 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1539 MO, MGMT, 0011, 1540 (htt_tlv_filter->mo_mgmt_filter & 1541 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1542 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1543 MO, MGMT, 0100, 1544 (htt_tlv_filter->mo_mgmt_filter & 1545 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1546 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1547 MO, MGMT, 0101, 1548 (htt_tlv_filter->mo_mgmt_filter & 1549 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1550 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1551 MO, MGMT, 0110, 1552 (htt_tlv_filter->mo_mgmt_filter & 1553 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1554 /* reserved */ 1555 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 1556 MGMT, 0111, 1557 (htt_tlv_filter->mo_mgmt_filter & 1558 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1559 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1560 MO, MGMT, 1000, 1561 (htt_tlv_filter->mo_mgmt_filter & 1562 FILTER_MGMT_BEACON) ? 1 : 0); 1563 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1564 MO, MGMT, 1001, 1565 (htt_tlv_filter->mo_mgmt_filter & 1566 FILTER_MGMT_ATIM) ? 1 : 0); 1567 } 1568 1569 /* word 3 */ 1570 msg_word++; 1571 *msg_word = 0; 1572 1573 if (htt_tlv_filter->enable_fp) { 1574 /* TYPE: MGMT */ 1575 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1576 FP, MGMT, 1010, 1577 (htt_tlv_filter->fp_mgmt_filter & 1578 FILTER_MGMT_DISASSOC) ? 1 : 0); 1579 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1580 FP, MGMT, 1011, 1581 (htt_tlv_filter->fp_mgmt_filter & 1582 FILTER_MGMT_AUTH) ? 1 : 0); 1583 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1584 FP, MGMT, 1100, 1585 (htt_tlv_filter->fp_mgmt_filter & 1586 FILTER_MGMT_DEAUTH) ? 1 : 0); 1587 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1588 FP, MGMT, 1101, 1589 (htt_tlv_filter->fp_mgmt_filter & 1590 FILTER_MGMT_ACTION) ? 1 : 0); 1591 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1592 FP, MGMT, 1110, 1593 (htt_tlv_filter->fp_mgmt_filter & 1594 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1595 /* reserved*/ 1596 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 1597 MGMT, 1111, 1598 (htt_tlv_filter->fp_mgmt_filter & 1599 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1600 } 1601 1602 if (htt_tlv_filter->enable_md) { 1603 /* TYPE: MGMT */ 1604 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1605 MD, MGMT, 1010, 1606 (htt_tlv_filter->md_mgmt_filter & 1607 FILTER_MGMT_DISASSOC) ? 1 : 0); 1608 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1609 MD, MGMT, 1011, 1610 (htt_tlv_filter->md_mgmt_filter & 1611 FILTER_MGMT_AUTH) ? 1 : 0); 1612 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1613 MD, MGMT, 1100, 1614 (htt_tlv_filter->md_mgmt_filter & 1615 FILTER_MGMT_DEAUTH) ? 1 : 0); 1616 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1617 MD, MGMT, 1101, 1618 (htt_tlv_filter->md_mgmt_filter & 1619 FILTER_MGMT_ACTION) ? 1 : 0); 1620 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1621 MD, MGMT, 1110, 1622 (htt_tlv_filter->md_mgmt_filter & 1623 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1624 } 1625 1626 if (htt_tlv_filter->enable_mo) { 1627 /* TYPE: MGMT */ 1628 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1629 MO, MGMT, 1010, 1630 (htt_tlv_filter->mo_mgmt_filter & 1631 FILTER_MGMT_DISASSOC) ? 1 : 0); 1632 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1633 MO, MGMT, 1011, 1634 (htt_tlv_filter->mo_mgmt_filter & 1635 FILTER_MGMT_AUTH) ? 1 : 0); 1636 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1637 MO, MGMT, 1100, 1638 (htt_tlv_filter->mo_mgmt_filter & 1639 FILTER_MGMT_DEAUTH) ? 1 : 0); 1640 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1641 MO, MGMT, 1101, 1642 (htt_tlv_filter->mo_mgmt_filter & 1643 FILTER_MGMT_ACTION) ? 1 : 0); 1644 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1645 MO, MGMT, 1110, 1646 (htt_tlv_filter->mo_mgmt_filter & 1647 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1648 /* reserved*/ 1649 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 1650 MGMT, 1111, 1651 (htt_tlv_filter->mo_mgmt_filter & 1652 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1653 } 1654 1655 /* word 4 */ 1656 msg_word++; 1657 *msg_word = 0; 1658 1659 if (htt_tlv_filter->enable_fp) { 1660 /* TYPE: CTRL */ 1661 /* reserved */ 1662 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1663 CTRL, 0000, 1664 (htt_tlv_filter->fp_ctrl_filter & 1665 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1666 /* reserved */ 1667 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1668 CTRL, 0001, 1669 (htt_tlv_filter->fp_ctrl_filter & 1670 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1671 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1672 CTRL, 0010, 1673 (htt_tlv_filter->fp_ctrl_filter & 1674 FILTER_CTRL_TRIGGER) ? 1 : 0); 1675 /* reserved */ 1676 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1677 CTRL, 0011, 1678 (htt_tlv_filter->fp_ctrl_filter & 1679 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1680 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1681 CTRL, 0100, 1682 (htt_tlv_filter->fp_ctrl_filter & 1683 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1684 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1685 CTRL, 0101, 1686 (htt_tlv_filter->fp_ctrl_filter & 1687 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1688 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1689 CTRL, 0110, 1690 (htt_tlv_filter->fp_ctrl_filter & 1691 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1692 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1693 CTRL, 0111, 1694 (htt_tlv_filter->fp_ctrl_filter & 1695 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1696 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1697 CTRL, 1000, 1698 (htt_tlv_filter->fp_ctrl_filter & 1699 FILTER_CTRL_BA_REQ) ? 1 : 0); 1700 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1701 CTRL, 1001, 1702 (htt_tlv_filter->fp_ctrl_filter & 1703 FILTER_CTRL_BA) ? 1 : 0); 1704 } 1705 1706 if (htt_tlv_filter->enable_md) { 1707 /* TYPE: CTRL */ 1708 /* reserved */ 1709 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1710 CTRL, 0000, 1711 (htt_tlv_filter->md_ctrl_filter & 1712 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1713 /* reserved */ 1714 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1715 CTRL, 0001, 1716 (htt_tlv_filter->md_ctrl_filter & 1717 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1718 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1719 CTRL, 0010, 1720 (htt_tlv_filter->md_ctrl_filter & 1721 FILTER_CTRL_TRIGGER) ? 1 : 0); 1722 /* reserved */ 1723 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1724 CTRL, 0011, 1725 (htt_tlv_filter->md_ctrl_filter & 1726 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1727 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1728 CTRL, 0100, 1729 (htt_tlv_filter->md_ctrl_filter & 1730 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1731 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1732 CTRL, 0101, 1733 (htt_tlv_filter->md_ctrl_filter & 1734 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1735 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1736 CTRL, 0110, 1737 (htt_tlv_filter->md_ctrl_filter & 1738 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1739 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1740 CTRL, 0111, 1741 (htt_tlv_filter->md_ctrl_filter & 1742 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1743 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1744 CTRL, 1000, 1745 (htt_tlv_filter->md_ctrl_filter & 1746 FILTER_CTRL_BA_REQ) ? 1 : 0); 1747 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1748 CTRL, 1001, 1749 (htt_tlv_filter->md_ctrl_filter & 1750 FILTER_CTRL_BA) ? 1 : 0); 1751 } 1752 1753 if (htt_tlv_filter->enable_mo) { 1754 /* TYPE: CTRL */ 1755 /* reserved */ 1756 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1757 CTRL, 0000, 1758 (htt_tlv_filter->mo_ctrl_filter & 1759 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1760 /* reserved */ 1761 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1762 CTRL, 0001, 1763 (htt_tlv_filter->mo_ctrl_filter & 1764 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1765 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1766 CTRL, 0010, 1767 (htt_tlv_filter->mo_ctrl_filter & 1768 FILTER_CTRL_TRIGGER) ? 1 : 0); 1769 /* reserved */ 1770 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1771 CTRL, 0011, 1772 (htt_tlv_filter->mo_ctrl_filter & 1773 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1774 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1775 CTRL, 0100, 1776 (htt_tlv_filter->mo_ctrl_filter & 1777 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1778 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1779 CTRL, 0101, 1780 (htt_tlv_filter->mo_ctrl_filter & 1781 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1782 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1783 CTRL, 0110, 1784 (htt_tlv_filter->mo_ctrl_filter & 1785 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1786 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1787 CTRL, 0111, 1788 (htt_tlv_filter->mo_ctrl_filter & 1789 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1790 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1791 CTRL, 1000, 1792 (htt_tlv_filter->mo_ctrl_filter & 1793 FILTER_CTRL_BA_REQ) ? 1 : 0); 1794 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1795 CTRL, 1001, 1796 (htt_tlv_filter->mo_ctrl_filter & 1797 FILTER_CTRL_BA) ? 1 : 0); 1798 } 1799 1800 /* word 5 */ 1801 msg_word++; 1802 *msg_word = 0; 1803 if (htt_tlv_filter->enable_fp) { 1804 /* TYPE: CTRL */ 1805 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1806 CTRL, 1010, 1807 (htt_tlv_filter->fp_ctrl_filter & 1808 FILTER_CTRL_PSPOLL) ? 1 : 0); 1809 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1810 CTRL, 1011, 1811 (htt_tlv_filter->fp_ctrl_filter & 1812 FILTER_CTRL_RTS) ? 1 : 0); 1813 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1814 CTRL, 1100, 1815 (htt_tlv_filter->fp_ctrl_filter & 1816 FILTER_CTRL_CTS) ? 1 : 0); 1817 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1818 CTRL, 1101, 1819 (htt_tlv_filter->fp_ctrl_filter & 1820 FILTER_CTRL_ACK) ? 1 : 0); 1821 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1822 CTRL, 1110, 1823 (htt_tlv_filter->fp_ctrl_filter & 1824 FILTER_CTRL_CFEND) ? 1 : 0); 1825 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1826 CTRL, 1111, 1827 (htt_tlv_filter->fp_ctrl_filter & 1828 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1829 /* TYPE: DATA */ 1830 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1831 DATA, MCAST, 1832 (htt_tlv_filter->fp_data_filter & 1833 FILTER_DATA_MCAST) ? 1 : 0); 1834 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1835 DATA, UCAST, 1836 (htt_tlv_filter->fp_data_filter & 1837 FILTER_DATA_UCAST) ? 1 : 0); 1838 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1839 DATA, NULL, 1840 (htt_tlv_filter->fp_data_filter & 1841 FILTER_DATA_NULL) ? 1 : 0); 1842 } 1843 1844 if (htt_tlv_filter->enable_md) { 1845 /* TYPE: CTRL */ 1846 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1847 CTRL, 1010, 1848 (htt_tlv_filter->md_ctrl_filter & 1849 FILTER_CTRL_PSPOLL) ? 1 : 0); 1850 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1851 CTRL, 1011, 1852 (htt_tlv_filter->md_ctrl_filter & 1853 FILTER_CTRL_RTS) ? 1 : 0); 1854 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1855 CTRL, 1100, 1856 (htt_tlv_filter->md_ctrl_filter & 1857 FILTER_CTRL_CTS) ? 1 : 0); 1858 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1859 CTRL, 1101, 1860 (htt_tlv_filter->md_ctrl_filter & 1861 FILTER_CTRL_ACK) ? 1 : 0); 1862 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1863 CTRL, 1110, 1864 (htt_tlv_filter->md_ctrl_filter & 1865 FILTER_CTRL_CFEND) ? 1 : 0); 1866 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1867 CTRL, 1111, 1868 (htt_tlv_filter->md_ctrl_filter & 1869 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1870 /* TYPE: DATA */ 1871 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1872 DATA, MCAST, 1873 (htt_tlv_filter->md_data_filter & 1874 FILTER_DATA_MCAST) ? 1 : 0); 1875 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1876 DATA, UCAST, 1877 (htt_tlv_filter->md_data_filter & 1878 FILTER_DATA_UCAST) ? 1 : 0); 1879 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1880 DATA, NULL, 1881 (htt_tlv_filter->md_data_filter & 1882 FILTER_DATA_NULL) ? 1 : 0); 1883 } 1884 1885 if (htt_tlv_filter->enable_mo) { 1886 /* TYPE: CTRL */ 1887 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1888 CTRL, 1010, 1889 (htt_tlv_filter->mo_ctrl_filter & 1890 FILTER_CTRL_PSPOLL) ? 1 : 0); 1891 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1892 CTRL, 1011, 1893 (htt_tlv_filter->mo_ctrl_filter & 1894 FILTER_CTRL_RTS) ? 1 : 0); 1895 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1896 CTRL, 1100, 1897 (htt_tlv_filter->mo_ctrl_filter & 1898 FILTER_CTRL_CTS) ? 1 : 0); 1899 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1900 CTRL, 1101, 1901 (htt_tlv_filter->mo_ctrl_filter & 1902 FILTER_CTRL_ACK) ? 1 : 0); 1903 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1904 CTRL, 1110, 1905 (htt_tlv_filter->mo_ctrl_filter & 1906 FILTER_CTRL_CFEND) ? 1 : 0); 1907 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1908 CTRL, 1111, 1909 (htt_tlv_filter->mo_ctrl_filter & 1910 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1911 /* TYPE: DATA */ 1912 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1913 DATA, MCAST, 1914 (htt_tlv_filter->mo_data_filter & 1915 FILTER_DATA_MCAST) ? 1 : 0); 1916 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1917 DATA, UCAST, 1918 (htt_tlv_filter->mo_data_filter & 1919 FILTER_DATA_UCAST) ? 1 : 0); 1920 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1921 DATA, NULL, 1922 (htt_tlv_filter->mo_data_filter & 1923 FILTER_DATA_NULL) ? 1 : 0); 1924 } 1925 1926 /* word 6 */ 1927 msg_word++; 1928 *msg_word = 0; 1929 tlv_filter = 0; 1930 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, 1931 htt_tlv_filter->mpdu_start); 1932 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, 1933 htt_tlv_filter->msdu_start); 1934 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, 1935 htt_tlv_filter->packet); 1936 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, 1937 htt_tlv_filter->msdu_end); 1938 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, 1939 htt_tlv_filter->mpdu_end); 1940 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, 1941 htt_tlv_filter->packet_header); 1942 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, 1943 htt_tlv_filter->attention); 1944 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, 1945 htt_tlv_filter->ppdu_start); 1946 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, 1947 htt_tlv_filter->ppdu_end); 1948 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, 1949 htt_tlv_filter->ppdu_end_user_stats); 1950 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, 1951 PPDU_END_USER_STATS_EXT, 1952 htt_tlv_filter->ppdu_end_user_stats_ext); 1953 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, 1954 htt_tlv_filter->ppdu_end_status_done); 1955 /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/ 1956 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED, 1957 htt_tlv_filter->header_per_msdu); 1958 1959 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); 1960 1961 msg_word++; 1962 *msg_word = 0; 1963 if (htt_tlv_filter->offset_valid) { 1964 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word, 1965 htt_tlv_filter->rx_packet_offset); 1966 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word, 1967 htt_tlv_filter->rx_header_offset); 1968 1969 msg_word++; 1970 *msg_word = 0; 1971 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word, 1972 htt_tlv_filter->rx_mpdu_end_offset); 1973 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word, 1974 htt_tlv_filter->rx_mpdu_start_offset); 1975 1976 msg_word++; 1977 *msg_word = 0; 1978 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word, 1979 htt_tlv_filter->rx_msdu_end_offset); 1980 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word, 1981 htt_tlv_filter->rx_msdu_start_offset); 1982 1983 msg_word++; 1984 *msg_word = 0; 1985 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word, 1986 htt_tlv_filter->rx_attn_offset); 1987 msg_word++; 1988 *msg_word = 0; 1989 } else { 1990 msg_word += 4; 1991 *msg_word = 0; 1992 } 1993 1994 if (mon_drop_th > 0) 1995 HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word, 1996 mon_drop_th); 1997 1998 /* "response_required" field should be set if a HTT response message is 1999 * required after setting up the ring. 2000 */ 2001 pkt = htt_htc_pkt_alloc(soc); 2002 if (!pkt) 2003 goto fail1; 2004 2005 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 2006 2007 SET_HTC_PACKET_INFO_TX( 2008 &pkt->htc_pkt, 2009 dp_htt_h2t_send_complete_free_netbuf, 2010 qdf_nbuf_data(htt_msg), 2011 qdf_nbuf_len(htt_msg), 2012 soc->htc_endpoint, 2013 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 2014 2015 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 2016 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 2017 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG, 2018 htt_logger_bufp); 2019 2020 if (status != QDF_STATUS_SUCCESS) { 2021 qdf_nbuf_free(htt_msg); 2022 htt_htc_pkt_free(soc, pkt); 2023 } 2024 2025 return status; 2026 2027 fail1: 2028 qdf_nbuf_free(htt_msg); 2029 fail0: 2030 return QDF_STATUS_E_FAILURE; 2031 } 2032 2033 #if defined(HTT_STATS_ENABLE) 2034 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 2035 struct dp_soc *soc, qdf_nbuf_t htt_msg) 2036 2037 { 2038 uint32_t pdev_id; 2039 uint32_t *msg_word = NULL; 2040 uint32_t msg_remain_len = 0; 2041 2042 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 2043 2044 /*COOKIE MSB*/ 2045 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 2046 2047 /* stats message length + 16 size of HTT header*/ 2048 msg_remain_len = qdf_min(htt_stats->msg_len + 16, 2049 (uint32_t)DP_EXT_MSG_LENGTH); 2050 2051 dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc, 2052 msg_word, msg_remain_len, 2053 WDI_NO_VAL, pdev_id); 2054 2055 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 2056 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 2057 } 2058 /* Need to be freed here as WDI handler will 2059 * make a copy of pkt to send data to application 2060 */ 2061 qdf_nbuf_free(htt_msg); 2062 return QDF_STATUS_SUCCESS; 2063 } 2064 #else 2065 static inline QDF_STATUS 2066 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 2067 struct dp_soc *soc, qdf_nbuf_t htt_msg) 2068 { 2069 return QDF_STATUS_E_NOSUPPORT; 2070 } 2071 #endif 2072 2073 #ifdef HTT_STATS_DEBUGFS_SUPPORT 2074 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer 2075 * @pdev: dp pdev handle 2076 * @msg_word: HTT msg 2077 * @msg_len: Length of HTT msg sent 2078 * 2079 * Return: none 2080 */ 2081 static inline void 2082 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word, 2083 uint32_t msg_len) 2084 { 2085 struct htt_dbgfs_cfg dbgfs_cfg; 2086 int done = 0; 2087 2088 /* send 5th word of HTT msg to upper layer */ 2089 dbgfs_cfg.msg_word = (msg_word + 4); 2090 dbgfs_cfg.m = pdev->dbgfs_cfg->m; 2091 2092 /* stats message length + 16 size of HTT header*/ 2093 msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH); 2094 2095 if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process) 2096 pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg, 2097 (msg_len - HTT_HEADER_LEN)); 2098 2099 /* Get TLV Done bit from 4th msg word */ 2100 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3)); 2101 if (done) { 2102 if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event)) 2103 dp_htt_err("%pK: Failed to set event for debugfs htt stats" 2104 , pdev->soc); 2105 } 2106 } 2107 #else 2108 static inline void 2109 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word, 2110 uint32_t msg_len) 2111 { 2112 } 2113 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 2114 2115 /** 2116 * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats 2117 * @htt_stats: htt stats info 2118 * 2119 * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message 2120 * contains sub messages which are identified by a TLV header. 2121 * In this function we will process the stream of T2H messages and read all the 2122 * TLV contained in the message. 2123 * 2124 * THe following cases have been taken care of 2125 * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer 2126 * In this case the buffer will contain multiple tlvs. 2127 * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer. 2128 * Only one tlv will be contained in the HTT message and this tag 2129 * will extend onto the next buffer. 2130 * Case 3: When the buffer is the continuation of the previous message 2131 * Case 4: tlv length is 0. which will indicate the end of message 2132 * 2133 * return: void 2134 */ 2135 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats, 2136 struct dp_soc *soc) 2137 { 2138 htt_tlv_tag_t tlv_type = 0xff; 2139 qdf_nbuf_t htt_msg = NULL; 2140 uint32_t *msg_word; 2141 uint8_t *tlv_buf_head = NULL; 2142 uint8_t *tlv_buf_tail = NULL; 2143 uint32_t msg_remain_len = 0; 2144 uint32_t tlv_remain_len = 0; 2145 uint32_t *tlv_start; 2146 int cookie_val = 0; 2147 int cookie_msb = 0; 2148 int pdev_id; 2149 bool copy_stats = false; 2150 struct dp_pdev *pdev; 2151 2152 /* Process node in the HTT message queue */ 2153 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 2154 != NULL) { 2155 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 2156 cookie_val = *(msg_word + 1); 2157 htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET( 2158 *(msg_word + 2159 HTT_T2H_EXT_STATS_TLV_START_OFFSET)); 2160 2161 if (cookie_val) { 2162 if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg) 2163 == QDF_STATUS_SUCCESS) { 2164 continue; 2165 } 2166 } 2167 2168 cookie_msb = *(msg_word + 2); 2169 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 2170 pdev = soc->pdev_list[pdev_id]; 2171 2172 if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) { 2173 dp_htt_stats_dbgfs_send_msg(pdev, msg_word, 2174 htt_stats->msg_len); 2175 qdf_nbuf_free(htt_msg); 2176 continue; 2177 } 2178 2179 if (cookie_msb & DBG_STATS_COOKIE_DP_STATS) 2180 copy_stats = true; 2181 2182 /* read 5th word */ 2183 msg_word = msg_word + 4; 2184 msg_remain_len = qdf_min(htt_stats->msg_len, 2185 (uint32_t) DP_EXT_MSG_LENGTH); 2186 /* Keep processing the node till node length is 0 */ 2187 while (msg_remain_len) { 2188 /* 2189 * if message is not a continuation of previous message 2190 * read the tlv type and tlv length 2191 */ 2192 if (!tlv_buf_head) { 2193 tlv_type = HTT_STATS_TLV_TAG_GET( 2194 *msg_word); 2195 tlv_remain_len = HTT_STATS_TLV_LENGTH_GET( 2196 *msg_word); 2197 } 2198 2199 if (tlv_remain_len == 0) { 2200 msg_remain_len = 0; 2201 2202 if (tlv_buf_head) { 2203 qdf_mem_free(tlv_buf_head); 2204 tlv_buf_head = NULL; 2205 tlv_buf_tail = NULL; 2206 } 2207 2208 goto error; 2209 } 2210 2211 if (!tlv_buf_head) 2212 tlv_remain_len += HTT_TLV_HDR_LEN; 2213 2214 if ((tlv_remain_len <= msg_remain_len)) { 2215 /* Case 3 */ 2216 if (tlv_buf_head) { 2217 qdf_mem_copy(tlv_buf_tail, 2218 (uint8_t *)msg_word, 2219 tlv_remain_len); 2220 tlv_start = (uint32_t *)tlv_buf_head; 2221 } else { 2222 /* Case 1 */ 2223 tlv_start = msg_word; 2224 } 2225 2226 if (copy_stats) 2227 dp_htt_stats_copy_tag(pdev, 2228 tlv_type, 2229 tlv_start); 2230 else 2231 dp_htt_stats_print_tag(pdev, 2232 tlv_type, 2233 tlv_start); 2234 2235 if (tlv_type == HTT_STATS_PEER_DETAILS_TAG || 2236 tlv_type == HTT_STATS_PEER_STATS_CMN_TAG) 2237 dp_peer_update_inactive_time(pdev, 2238 tlv_type, 2239 tlv_start); 2240 2241 msg_remain_len -= tlv_remain_len; 2242 2243 msg_word = (uint32_t *) 2244 (((uint8_t *)msg_word) + 2245 tlv_remain_len); 2246 2247 tlv_remain_len = 0; 2248 2249 if (tlv_buf_head) { 2250 qdf_mem_free(tlv_buf_head); 2251 tlv_buf_head = NULL; 2252 tlv_buf_tail = NULL; 2253 } 2254 2255 } else { /* tlv_remain_len > msg_remain_len */ 2256 /* Case 2 & 3 */ 2257 if (!tlv_buf_head) { 2258 tlv_buf_head = qdf_mem_malloc( 2259 tlv_remain_len); 2260 2261 if (!tlv_buf_head) { 2262 QDF_TRACE(QDF_MODULE_ID_TXRX, 2263 QDF_TRACE_LEVEL_ERROR, 2264 "Alloc failed"); 2265 goto error; 2266 } 2267 2268 tlv_buf_tail = tlv_buf_head; 2269 } 2270 2271 qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word, 2272 msg_remain_len); 2273 tlv_remain_len -= msg_remain_len; 2274 tlv_buf_tail += msg_remain_len; 2275 } 2276 } 2277 2278 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 2279 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 2280 } 2281 2282 qdf_nbuf_free(htt_msg); 2283 } 2284 return; 2285 2286 error: 2287 qdf_nbuf_free(htt_msg); 2288 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 2289 != NULL) 2290 qdf_nbuf_free(htt_msg); 2291 } 2292 2293 void htt_t2h_stats_handler(void *context) 2294 { 2295 struct dp_soc *soc = (struct dp_soc *)context; 2296 struct htt_stats_context htt_stats; 2297 uint32_t *msg_word; 2298 qdf_nbuf_t htt_msg = NULL; 2299 uint8_t done; 2300 uint32_t rem_stats; 2301 2302 if (!soc) { 2303 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2304 "soc is NULL"); 2305 return; 2306 } 2307 2308 if (!qdf_atomic_read(&soc->cmn_init_done)) { 2309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2310 "soc: 0x%pK, init_done: %d", soc, 2311 qdf_atomic_read(&soc->cmn_init_done)); 2312 return; 2313 } 2314 2315 qdf_mem_zero(&htt_stats, sizeof(htt_stats)); 2316 qdf_nbuf_queue_init(&htt_stats.msg); 2317 2318 /* pull one completed stats from soc->htt_stats_msg and process */ 2319 qdf_spin_lock_bh(&soc->htt_stats.lock); 2320 if (!soc->htt_stats.num_stats) { 2321 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2322 return; 2323 } 2324 while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) { 2325 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 2326 msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET; 2327 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 2328 qdf_nbuf_queue_add(&htt_stats.msg, htt_msg); 2329 /* 2330 * Done bit signifies that this is the last T2H buffer in the 2331 * stream of HTT EXT STATS message 2332 */ 2333 if (done) 2334 break; 2335 } 2336 rem_stats = --soc->htt_stats.num_stats; 2337 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2338 2339 /* If there are more stats to process, schedule stats work again. 2340 * Scheduling prior to processing ht_stats to queue with early 2341 * index 2342 */ 2343 if (rem_stats) 2344 qdf_sched_work(0, &soc->htt_stats.work); 2345 2346 dp_process_htt_stat_msg(&htt_stats, soc); 2347 } 2348 2349 /* 2350 * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, 2351 * if a new peer id arrives in a PPDU 2352 * pdev: DP pdev handle 2353 * @peer_id : peer unique identifier 2354 * @ppdu_info: per ppdu tlv structure 2355 * 2356 * return:user index to be populated 2357 */ 2358 #ifdef FEATURE_PERPKT_INFO 2359 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, 2360 uint16_t peer_id, 2361 struct ppdu_info *ppdu_info) 2362 { 2363 uint8_t user_index = 0; 2364 struct cdp_tx_completion_ppdu *ppdu_desc; 2365 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2366 2367 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2368 2369 while ((user_index + 1) <= ppdu_info->last_user) { 2370 ppdu_user_desc = &ppdu_desc->user[user_index]; 2371 if (ppdu_user_desc->peer_id != peer_id) { 2372 user_index++; 2373 continue; 2374 } else { 2375 /* Max users possible is 8 so user array index should 2376 * not exceed 7 2377 */ 2378 qdf_assert_always(user_index <= (ppdu_desc->max_users - 1)); 2379 return user_index; 2380 } 2381 } 2382 2383 ppdu_info->last_user++; 2384 /* Max users possible is 8 so last user should not exceed 8 */ 2385 qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users); 2386 return ppdu_info->last_user - 1; 2387 } 2388 2389 /* 2390 * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv 2391 * pdev: DP pdev handle 2392 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv 2393 * @ppdu_info: per ppdu tlv structure 2394 * 2395 * return:void 2396 */ 2397 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, 2398 uint32_t *tag_buf, struct ppdu_info *ppdu_info) 2399 { 2400 uint16_t frame_type; 2401 uint16_t frame_ctrl; 2402 uint16_t freq; 2403 struct dp_soc *soc = NULL; 2404 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 2405 uint64_t ppdu_start_timestamp; 2406 uint32_t *start_tag_buf; 2407 2408 start_tag_buf = tag_buf; 2409 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2410 2411 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 2412 2413 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); 2414 ppdu_info->sched_cmdid = 2415 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); 2416 ppdu_desc->num_users = 2417 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); 2418 2419 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 2420 2421 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); 2422 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); 2423 ppdu_desc->htt_frame_type = frame_type; 2424 2425 frame_ctrl = ppdu_desc->frame_ctrl; 2426 2427 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; 2428 2429 switch (frame_type) { 2430 case HTT_STATS_FTYPE_TIDQ_DATA_SU: 2431 case HTT_STATS_FTYPE_TIDQ_DATA_MU: 2432 case HTT_STATS_FTYPE_SGEN_QOS_NULL: 2433 /* 2434 * for management packet, frame type come as DATA_SU 2435 * need to check frame_ctrl before setting frame_type 2436 */ 2437 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) 2438 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 2439 else 2440 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; 2441 break; 2442 case HTT_STATS_FTYPE_SGEN_MU_BAR: 2443 case HTT_STATS_FTYPE_SGEN_BAR: 2444 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; 2445 break; 2446 default: 2447 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 2448 break; 2449 } 2450 2451 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); 2452 ppdu_desc->tx_duration = *tag_buf; 2453 2454 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); 2455 ppdu_desc->ppdu_start_timestamp = *tag_buf; 2456 2457 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); 2458 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); 2459 if (freq != ppdu_desc->channel) { 2460 soc = pdev->soc; 2461 ppdu_desc->channel = freq; 2462 pdev->operating_channel.freq = freq; 2463 if (soc && soc->cdp_soc.ol_ops->freq_to_channel) 2464 pdev->operating_channel.num = 2465 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, 2466 pdev->pdev_id, 2467 freq); 2468 2469 if (soc && soc->cdp_soc.ol_ops->freq_to_band) 2470 pdev->operating_channel.band = 2471 soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, 2472 pdev->pdev_id, 2473 freq); 2474 } 2475 2476 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); 2477 2478 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); 2479 ppdu_desc->phy_ppdu_tx_time_us = 2480 HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf); 2481 ppdu_desc->beam_change = 2482 HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); 2483 ppdu_desc->doppler = 2484 HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf); 2485 ppdu_desc->spatial_reuse = 2486 HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf); 2487 2488 dp_tx_capture_htt_frame_counter(pdev, frame_type); 2489 2490 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); 2491 ppdu_start_timestamp = *tag_buf; 2492 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << 2493 HTT_SHIFT_UPPER_TIMESTAMP) & 2494 HTT_MASK_UPPER_TIMESTAMP); 2495 2496 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 2497 ppdu_desc->tx_duration; 2498 /* Ack time stamp is same as end time stamp*/ 2499 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 2500 2501 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 2502 ppdu_desc->tx_duration; 2503 2504 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; 2505 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; 2506 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; 2507 2508 /* Ack time stamp is same as end time stamp*/ 2509 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 2510 2511 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR); 2512 ppdu_desc->bss_color = 2513 HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf); 2514 } 2515 2516 /* 2517 * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common 2518 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv 2519 * @ppdu_info: per ppdu tlv structure 2520 * 2521 * return:void 2522 */ 2523 static void dp_process_ppdu_stats_user_common_tlv( 2524 struct dp_pdev *pdev, uint32_t *tag_buf, 2525 struct ppdu_info *ppdu_info) 2526 { 2527 uint16_t peer_id; 2528 struct cdp_tx_completion_ppdu *ppdu_desc; 2529 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2530 uint8_t curr_user_index = 0; 2531 struct dp_peer *peer; 2532 struct dp_vdev *vdev; 2533 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2534 2535 ppdu_desc = 2536 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2537 2538 tag_buf++; 2539 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 2540 2541 curr_user_index = 2542 dp_get_ppdu_info_user_index(pdev, 2543 peer_id, ppdu_info); 2544 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2545 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2546 2547 ppdu_desc->vdev_id = 2548 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); 2549 2550 ppdu_user_desc->peer_id = peer_id; 2551 2552 tag_buf++; 2553 2554 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { 2555 ppdu_user_desc->delayed_ba = 1; 2556 ppdu_desc->delayed_ba = 1; 2557 } 2558 2559 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { 2560 ppdu_user_desc->is_mcast = true; 2561 ppdu_user_desc->mpdu_tried_mcast = 2562 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 2563 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; 2564 } else { 2565 ppdu_user_desc->mpdu_tried_ucast = 2566 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 2567 } 2568 2569 ppdu_user_desc->is_seq_num_valid = 2570 HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf); 2571 tag_buf++; 2572 2573 ppdu_user_desc->qos_ctrl = 2574 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); 2575 ppdu_user_desc->frame_ctrl = 2576 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); 2577 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; 2578 2579 if (ppdu_user_desc->delayed_ba) 2580 ppdu_user_desc->mpdu_success = 0; 2581 2582 tag_buf += 3; 2583 2584 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { 2585 ppdu_user_desc->ppdu_cookie = 2586 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); 2587 ppdu_user_desc->is_ppdu_cookie_valid = 1; 2588 } 2589 2590 /* returning earlier causes other feilds unpopulated */ 2591 if (peer_id == DP_SCAN_PEER_ID) { 2592 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 2593 DP_MOD_ID_TX_PPDU_STATS); 2594 if (!vdev) 2595 return; 2596 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, 2597 QDF_MAC_ADDR_SIZE); 2598 dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS); 2599 } else { 2600 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 2601 DP_MOD_ID_TX_PPDU_STATS); 2602 if (!peer) { 2603 /* 2604 * fw sends peer_id which is about to removed but 2605 * it was already removed in host. 2606 * eg: for disassoc, fw send ppdu stats 2607 * with peer id equal to previously associated 2608 * peer's peer_id but it was removed 2609 */ 2610 vdev = dp_vdev_get_ref_by_id(pdev->soc, 2611 ppdu_desc->vdev_id, 2612 DP_MOD_ID_TX_PPDU_STATS); 2613 if (!vdev) 2614 return; 2615 qdf_mem_copy(ppdu_user_desc->mac_addr, 2616 vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE); 2617 dp_vdev_unref_delete(pdev->soc, vdev, 2618 DP_MOD_ID_TX_PPDU_STATS); 2619 return; 2620 } 2621 qdf_mem_copy(ppdu_user_desc->mac_addr, 2622 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 2623 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 2624 } 2625 } 2626 2627 2628 /** 2629 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv 2630 * @pdev: DP pdev handle 2631 * @tag_buf: T2H message buffer carrying the user rate TLV 2632 * @ppdu_info: per ppdu tlv structure 2633 * 2634 * return:void 2635 */ 2636 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, 2637 uint32_t *tag_buf, 2638 struct ppdu_info *ppdu_info) 2639 { 2640 uint16_t peer_id; 2641 struct cdp_tx_completion_ppdu *ppdu_desc; 2642 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2643 uint8_t curr_user_index = 0; 2644 struct dp_vdev *vdev; 2645 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2646 2647 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2648 2649 tag_buf++; 2650 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 2651 2652 curr_user_index = 2653 dp_get_ppdu_info_user_index(pdev, 2654 peer_id, ppdu_info); 2655 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2656 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2657 if (peer_id == DP_SCAN_PEER_ID) { 2658 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 2659 DP_MOD_ID_TX_PPDU_STATS); 2660 if (!vdev) 2661 return; 2662 dp_vdev_unref_delete(pdev->soc, vdev, 2663 DP_MOD_ID_TX_PPDU_STATS); 2664 } 2665 ppdu_user_desc->peer_id = peer_id; 2666 2667 ppdu_user_desc->tid = 2668 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); 2669 2670 tag_buf += 1; 2671 2672 ppdu_user_desc->user_pos = 2673 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); 2674 ppdu_user_desc->mu_group_id = 2675 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); 2676 2677 tag_buf += 1; 2678 2679 ppdu_user_desc->ru_start = 2680 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); 2681 ppdu_user_desc->ru_tones = 2682 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - 2683 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; 2684 ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones; 2685 2686 tag_buf += 2; 2687 2688 ppdu_user_desc->ppdu_type = 2689 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); 2690 2691 tag_buf++; 2692 ppdu_user_desc->tx_rate = *tag_buf; 2693 2694 ppdu_user_desc->ltf_size = 2695 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); 2696 ppdu_user_desc->stbc = 2697 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); 2698 ppdu_user_desc->he_re = 2699 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); 2700 ppdu_user_desc->txbf = 2701 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); 2702 ppdu_user_desc->bw = 2703 HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2; 2704 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); 2705 ppdu_desc->usr_nss_sum += ppdu_user_desc->nss; 2706 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); 2707 ppdu_user_desc->preamble = 2708 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); 2709 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); 2710 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); 2711 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); 2712 } 2713 2714 /* 2715 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process 2716 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 2717 * pdev: DP PDEV handle 2718 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 2719 * @ppdu_info: per ppdu tlv structure 2720 * 2721 * return:void 2722 */ 2723 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 2724 struct dp_pdev *pdev, uint32_t *tag_buf, 2725 struct ppdu_info *ppdu_info) 2726 { 2727 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = 2728 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; 2729 2730 struct cdp_tx_completion_ppdu *ppdu_desc; 2731 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2732 uint8_t curr_user_index = 0; 2733 uint16_t peer_id; 2734 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; 2735 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2736 2737 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2738 2739 tag_buf++; 2740 2741 peer_id = 2742 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 2743 2744 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2745 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2746 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2747 ppdu_user_desc->peer_id = peer_id; 2748 2749 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 2750 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 2751 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 2752 2753 dp_process_ppdu_stats_update_failed_bitmap(pdev, 2754 (void *)ppdu_user_desc, 2755 ppdu_info->ppdu_id, 2756 size); 2757 } 2758 2759 /* 2760 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process 2761 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 2762 * soc: DP SOC handle 2763 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 2764 * @ppdu_info: per ppdu tlv structure 2765 * 2766 * return:void 2767 */ 2768 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 2769 struct dp_pdev *pdev, uint32_t *tag_buf, 2770 struct ppdu_info *ppdu_info) 2771 { 2772 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = 2773 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; 2774 2775 struct cdp_tx_completion_ppdu *ppdu_desc; 2776 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2777 uint8_t curr_user_index = 0; 2778 uint16_t peer_id; 2779 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; 2780 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2781 2782 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2783 2784 tag_buf++; 2785 2786 peer_id = 2787 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 2788 2789 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2790 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2791 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2792 ppdu_user_desc->peer_id = peer_id; 2793 2794 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 2795 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 2796 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 2797 2798 dp_process_ppdu_stats_update_failed_bitmap(pdev, 2799 (void *)ppdu_user_desc, 2800 ppdu_info->ppdu_id, 2801 size); 2802 } 2803 2804 /* 2805 * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process 2806 * htt_ppdu_stats_user_cmpltn_common_tlv 2807 * soc: DP SOC handle 2808 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv 2809 * @ppdu_info: per ppdu tlv structure 2810 * 2811 * return:void 2812 */ 2813 static void dp_process_ppdu_stats_user_cmpltn_common_tlv( 2814 struct dp_pdev *pdev, uint32_t *tag_buf, 2815 struct ppdu_info *ppdu_info) 2816 { 2817 uint16_t peer_id; 2818 struct cdp_tx_completion_ppdu *ppdu_desc; 2819 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2820 uint8_t curr_user_index = 0; 2821 uint8_t bw_iter; 2822 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = 2823 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; 2824 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2825 2826 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2827 2828 tag_buf++; 2829 peer_id = 2830 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); 2831 2832 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2833 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2834 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2835 ppdu_user_desc->peer_id = peer_id; 2836 2837 ppdu_user_desc->completion_status = 2838 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( 2839 *tag_buf); 2840 2841 ppdu_user_desc->tid = 2842 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); 2843 2844 2845 tag_buf++; 2846 if (qdf_likely(ppdu_user_desc->completion_status == 2847 HTT_PPDU_STATS_USER_STATUS_OK)) { 2848 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; 2849 ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi; 2850 ppdu_user_desc->ack_rssi_valid = 1; 2851 } else { 2852 ppdu_user_desc->ack_rssi_valid = 0; 2853 } 2854 2855 tag_buf++; 2856 2857 ppdu_user_desc->mpdu_success = 2858 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); 2859 2860 ppdu_user_desc->mpdu_failed = 2861 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - 2862 ppdu_user_desc->mpdu_success; 2863 2864 tag_buf++; 2865 2866 ppdu_user_desc->long_retries = 2867 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); 2868 2869 ppdu_user_desc->short_retries = 2870 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); 2871 ppdu_user_desc->retry_msdus = 2872 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; 2873 2874 ppdu_user_desc->is_ampdu = 2875 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); 2876 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; 2877 2878 ppdu_desc->resp_type = 2879 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); 2880 ppdu_desc->mprot_type = 2881 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); 2882 ppdu_desc->rts_success = 2883 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); 2884 ppdu_desc->rts_failure = 2885 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); 2886 ppdu_user_desc->pream_punct = 2887 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf); 2888 2889 ppdu_info->compltn_common_tlv++; 2890 2891 /* 2892 * MU BAR may send request to n users but we may received ack only from 2893 * m users. To have count of number of users respond back, we have a 2894 * separate counter bar_num_users per PPDU that get increment for every 2895 * htt_ppdu_stats_user_cmpltn_common_tlv 2896 */ 2897 ppdu_desc->bar_num_users++; 2898 2899 tag_buf++; 2900 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { 2901 ppdu_user_desc->rssi_chain[bw_iter] = 2902 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); 2903 tag_buf++; 2904 } 2905 2906 ppdu_user_desc->sa_tx_antenna = 2907 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); 2908 2909 tag_buf++; 2910 ppdu_user_desc->sa_is_training = 2911 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); 2912 if (ppdu_user_desc->sa_is_training) { 2913 ppdu_user_desc->sa_goodput = 2914 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); 2915 } 2916 2917 tag_buf++; 2918 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { 2919 ppdu_user_desc->sa_max_rates[bw_iter] = 2920 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); 2921 } 2922 2923 tag_buf += CDP_NUM_SA_BW; 2924 ppdu_user_desc->current_rate_per = 2925 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); 2926 } 2927 2928 /* 2929 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process 2930 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 2931 * pdev: DP PDEV handle 2932 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 2933 * @ppdu_info: per ppdu tlv structure 2934 * 2935 * return:void 2936 */ 2937 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 2938 struct dp_pdev *pdev, uint32_t *tag_buf, 2939 struct ppdu_info *ppdu_info) 2940 { 2941 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = 2942 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; 2943 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2944 struct cdp_tx_completion_ppdu *ppdu_desc; 2945 uint8_t curr_user_index = 0; 2946 uint16_t peer_id; 2947 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2948 2949 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2950 2951 tag_buf++; 2952 2953 peer_id = 2954 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 2955 2956 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2957 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2958 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2959 ppdu_user_desc->peer_id = peer_id; 2960 2961 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 2962 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 2963 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 2964 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; 2965 } 2966 2967 /* 2968 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process 2969 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 2970 * pdev: DP PDEV handle 2971 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 2972 * @ppdu_info: per ppdu tlv structure 2973 * 2974 * return:void 2975 */ 2976 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 2977 struct dp_pdev *pdev, uint32_t *tag_buf, 2978 struct ppdu_info *ppdu_info) 2979 { 2980 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = 2981 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; 2982 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2983 struct cdp_tx_completion_ppdu *ppdu_desc; 2984 uint8_t curr_user_index = 0; 2985 uint16_t peer_id; 2986 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2987 2988 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2989 2990 tag_buf++; 2991 2992 peer_id = 2993 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 2994 2995 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2996 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2997 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2998 ppdu_user_desc->peer_id = peer_id; 2999 3000 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 3001 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 3002 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 3003 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; 3004 } 3005 3006 /* 3007 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process 3008 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3009 * pdev: DP PDE handle 3010 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3011 * @ppdu_info: per ppdu tlv structure 3012 * 3013 * return:void 3014 */ 3015 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 3016 struct dp_pdev *pdev, uint32_t *tag_buf, 3017 struct ppdu_info *ppdu_info) 3018 { 3019 uint16_t peer_id; 3020 struct cdp_tx_completion_ppdu *ppdu_desc; 3021 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3022 uint8_t curr_user_index = 0; 3023 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3024 3025 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3026 3027 tag_buf += 2; 3028 peer_id = 3029 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); 3030 3031 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3032 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3033 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3034 if (!ppdu_user_desc->ack_ba_tlv) { 3035 ppdu_user_desc->ack_ba_tlv = 1; 3036 } else { 3037 pdev->stats.ack_ba_comes_twice++; 3038 return; 3039 } 3040 3041 ppdu_user_desc->peer_id = peer_id; 3042 3043 tag_buf++; 3044 /* not to update ppdu_desc->tid from this TLV */ 3045 ppdu_user_desc->num_mpdu = 3046 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); 3047 3048 ppdu_user_desc->num_msdu = 3049 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); 3050 3051 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; 3052 3053 tag_buf++; 3054 ppdu_user_desc->start_seq = 3055 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( 3056 *tag_buf); 3057 3058 tag_buf++; 3059 ppdu_user_desc->success_bytes = *tag_buf; 3060 3061 /* increase ack ba tlv counter on successful mpdu */ 3062 if (ppdu_user_desc->num_mpdu) 3063 ppdu_info->ack_ba_tlv++; 3064 3065 if (ppdu_user_desc->ba_size == 0) { 3066 ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq; 3067 ppdu_user_desc->ba_bitmap[0] = 1; 3068 ppdu_user_desc->ba_size = 1; 3069 } 3070 } 3071 3072 /* 3073 * dp_process_ppdu_stats_user_common_array_tlv: Process 3074 * htt_ppdu_stats_user_common_array_tlv 3075 * pdev: DP PDEV handle 3076 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3077 * @ppdu_info: per ppdu tlv structure 3078 * 3079 * return:void 3080 */ 3081 static void dp_process_ppdu_stats_user_common_array_tlv( 3082 struct dp_pdev *pdev, uint32_t *tag_buf, 3083 struct ppdu_info *ppdu_info) 3084 { 3085 uint32_t peer_id; 3086 struct cdp_tx_completion_ppdu *ppdu_desc; 3087 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3088 uint8_t curr_user_index = 0; 3089 struct htt_tx_ppdu_stats_info *dp_stats_buf; 3090 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3091 3092 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3093 3094 tag_buf++; 3095 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; 3096 tag_buf += 3; 3097 peer_id = 3098 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); 3099 3100 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { 3101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3102 "Invalid peer"); 3103 return; 3104 } 3105 3106 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3107 3108 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3109 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3110 3111 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; 3112 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; 3113 3114 tag_buf++; 3115 3116 ppdu_user_desc->success_msdus = 3117 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); 3118 ppdu_user_desc->retry_bytes = 3119 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); 3120 tag_buf++; 3121 ppdu_user_desc->failed_msdus = 3122 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); 3123 } 3124 3125 /* 3126 * dp_process_ppdu_stats_flush_tlv: Process 3127 * htt_ppdu_stats_flush_tlv 3128 * @pdev: DP PDEV handle 3129 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv 3130 * @ppdu_info: per ppdu tlv structure 3131 * 3132 * return:void 3133 */ 3134 static void 3135 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, 3136 uint32_t *tag_buf, 3137 struct ppdu_info *ppdu_info) 3138 { 3139 struct cdp_tx_completion_ppdu *ppdu_desc; 3140 uint32_t peer_id; 3141 uint8_t tid; 3142 struct dp_peer *peer; 3143 3144 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3145 qdf_nbuf_data(ppdu_info->nbuf); 3146 ppdu_desc->is_flush = 1; 3147 3148 tag_buf++; 3149 ppdu_desc->drop_reason = *tag_buf; 3150 3151 tag_buf++; 3152 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); 3153 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); 3154 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); 3155 3156 tag_buf++; 3157 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); 3158 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); 3159 3160 ppdu_desc->num_users = 1; 3161 ppdu_desc->user[0].peer_id = peer_id; 3162 ppdu_desc->user[0].tid = tid; 3163 3164 ppdu_desc->queue_type = 3165 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); 3166 3167 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 3168 DP_MOD_ID_TX_PPDU_STATS); 3169 if (!peer) 3170 goto add_ppdu_to_sched_list; 3171 3172 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { 3173 DP_STATS_INC(peer, 3174 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], 3175 ppdu_desc->num_msdu); 3176 } 3177 3178 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3179 3180 add_ppdu_to_sched_list: 3181 ppdu_info->done = 1; 3182 TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 3183 pdev->list_depth--; 3184 TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info, 3185 ppdu_info_list_elem); 3186 pdev->sched_comp_list_depth++; 3187 } 3188 3189 /** 3190 * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv 3191 * Here we are not going to process the buffer. 3192 * @pdev: DP PDEV handle 3193 * @ppdu_info: per ppdu tlv structure 3194 * 3195 * return:void 3196 */ 3197 static void 3198 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev, 3199 struct ppdu_info *ppdu_info) 3200 { 3201 struct cdp_tx_completion_ppdu *ppdu_desc; 3202 struct dp_peer *peer; 3203 uint8_t num_users; 3204 uint8_t i; 3205 3206 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3207 qdf_nbuf_data(ppdu_info->nbuf); 3208 3209 num_users = ppdu_desc->bar_num_users; 3210 3211 for (i = 0; i < num_users; i++) { 3212 if (ppdu_desc->user[i].user_pos == 0) { 3213 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3214 /* update phy mode for bar frame */ 3215 ppdu_desc->phy_mode = 3216 ppdu_desc->user[i].preamble; 3217 ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs; 3218 break; 3219 } 3220 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) { 3221 ppdu_desc->frame_ctrl = 3222 ppdu_desc->user[i].frame_ctrl; 3223 break; 3224 } 3225 } 3226 } 3227 3228 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 3229 ppdu_desc->delayed_ba) { 3230 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 3231 3232 for (i = 0; i < ppdu_desc->num_users; i++) { 3233 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 3234 uint64_t start_tsf; 3235 uint64_t end_tsf; 3236 uint32_t ppdu_id; 3237 3238 ppdu_id = ppdu_desc->ppdu_id; 3239 peer = dp_peer_get_ref_by_id 3240 (pdev->soc, ppdu_desc->user[i].peer_id, 3241 DP_MOD_ID_TX_PPDU_STATS); 3242 /** 3243 * This check is to make sure peer is not deleted 3244 * after processing the TLVs. 3245 */ 3246 if (!peer) 3247 continue; 3248 3249 delay_ppdu = &peer->delayed_ba_ppdu_stats; 3250 start_tsf = ppdu_desc->ppdu_start_timestamp; 3251 end_tsf = ppdu_desc->ppdu_end_timestamp; 3252 /** 3253 * save delayed ba user info 3254 */ 3255 if (ppdu_desc->user[i].delayed_ba) { 3256 dp_peer_copy_delay_stats(peer, 3257 &ppdu_desc->user[i], 3258 ppdu_id); 3259 peer->last_delayed_ba_ppduid = ppdu_id; 3260 delay_ppdu->ppdu_start_timestamp = start_tsf; 3261 delay_ppdu->ppdu_end_timestamp = end_tsf; 3262 } 3263 ppdu_desc->user[i].peer_last_delayed_ba = 3264 peer->last_delayed_ba; 3265 3266 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3267 3268 if (ppdu_desc->user[i].delayed_ba && 3269 !ppdu_desc->user[i].debug_copied) { 3270 QDF_TRACE(QDF_MODULE_ID_TXRX, 3271 QDF_TRACE_LEVEL_INFO_MED, 3272 "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n", 3273 __func__, __LINE__, 3274 ppdu_desc->ppdu_id, 3275 ppdu_desc->bar_ppdu_id, 3276 ppdu_desc->num_users, 3277 i, 3278 ppdu_desc->htt_frame_type); 3279 } 3280 } 3281 } 3282 3283 /* 3284 * when frame type is BAR and STATS_COMMON_TLV is set 3285 * copy the store peer delayed info to BAR status 3286 */ 3287 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3288 for (i = 0; i < ppdu_desc->bar_num_users; i++) { 3289 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 3290 uint64_t start_tsf; 3291 uint64_t end_tsf; 3292 3293 peer = dp_peer_get_ref_by_id 3294 (pdev->soc, 3295 ppdu_desc->user[i].peer_id, 3296 DP_MOD_ID_TX_PPDU_STATS); 3297 /** 3298 * This check is to make sure peer is not deleted 3299 * after processing the TLVs. 3300 */ 3301 if (!peer) 3302 continue; 3303 3304 if (ppdu_desc->user[i].completion_status != 3305 HTT_PPDU_STATS_USER_STATUS_OK) { 3306 dp_peer_unref_delete(peer, 3307 DP_MOD_ID_TX_PPDU_STATS); 3308 continue; 3309 } 3310 3311 delay_ppdu = &peer->delayed_ba_ppdu_stats; 3312 start_tsf = delay_ppdu->ppdu_start_timestamp; 3313 end_tsf = delay_ppdu->ppdu_end_timestamp; 3314 3315 if (peer->last_delayed_ba) { 3316 dp_peer_copy_stats_to_bar(peer, 3317 &ppdu_desc->user[i]); 3318 ppdu_desc->ppdu_id = 3319 peer->last_delayed_ba_ppduid; 3320 ppdu_desc->ppdu_start_timestamp = start_tsf; 3321 ppdu_desc->ppdu_end_timestamp = end_tsf; 3322 } 3323 ppdu_desc->user[i].peer_last_delayed_ba = 3324 peer->last_delayed_ba; 3325 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3326 } 3327 } 3328 3329 TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 3330 pdev->list_depth--; 3331 TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info, 3332 ppdu_info_list_elem); 3333 pdev->sched_comp_list_depth++; 3334 } 3335 3336 #ifndef WLAN_TX_PKT_CAPTURE_ENH 3337 /* 3338 * dp_deliver_mgmt_frm: Process 3339 * @pdev: DP PDEV handle 3340 * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv 3341 * 3342 * return: void 3343 */ 3344 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 3345 { 3346 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { 3347 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, 3348 nbuf, HTT_INVALID_PEER, 3349 WDI_NO_VAL, pdev->pdev_id); 3350 } else { 3351 if (!pdev->bpr_enable) 3352 qdf_nbuf_free(nbuf); 3353 } 3354 } 3355 #endif 3356 3357 /* 3358 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process 3359 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv 3360 * @pdev: DP PDEV handle 3361 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv 3362 * @length: tlv_length 3363 * 3364 * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller 3365 */ 3366 static QDF_STATUS 3367 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, 3368 qdf_nbuf_t tag_buf, 3369 uint32_t ppdu_id) 3370 { 3371 uint32_t *nbuf_ptr; 3372 uint8_t trim_size; 3373 size_t head_size; 3374 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; 3375 uint32_t *msg_word; 3376 uint32_t tsf_hdr; 3377 3378 if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) && 3379 (!pdev->bpr_enable) && (!pdev->tx_capture_enabled)) 3380 return QDF_STATUS_SUCCESS; 3381 3382 /* 3383 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t 3384 */ 3385 msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); 3386 msg_word = msg_word + 2; 3387 tsf_hdr = *msg_word; 3388 3389 trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf + 3390 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - 3391 qdf_nbuf_data(tag_buf)); 3392 3393 if (!qdf_nbuf_pull_head(tag_buf, trim_size)) 3394 return QDF_STATUS_SUCCESS; 3395 3396 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - 3397 pdev->mgmtctrl_frm_info.mgmt_buf_len); 3398 3399 if (pdev->tx_capture_enabled) { 3400 head_size = sizeof(struct cdp_tx_mgmt_comp_info); 3401 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { 3402 qdf_err("Fail to get headroom h_sz %zu h_avail %d\n", 3403 head_size, qdf_nbuf_headroom(tag_buf)); 3404 qdf_assert_always(0); 3405 return QDF_STATUS_E_NOMEM; 3406 } 3407 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) 3408 qdf_nbuf_push_head(tag_buf, head_size); 3409 qdf_assert_always(ptr_mgmt_comp_info); 3410 ptr_mgmt_comp_info->ppdu_id = ppdu_id; 3411 ptr_mgmt_comp_info->is_sgen_pkt = true; 3412 ptr_mgmt_comp_info->tx_tsf = tsf_hdr; 3413 } else { 3414 head_size = sizeof(ppdu_id); 3415 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); 3416 *nbuf_ptr = ppdu_id; 3417 } 3418 3419 if (pdev->bpr_enable) { 3420 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, 3421 tag_buf, HTT_INVALID_PEER, 3422 WDI_NO_VAL, pdev->pdev_id); 3423 } 3424 3425 dp_deliver_mgmt_frm(pdev, tag_buf); 3426 3427 return QDF_STATUS_E_ALREADY; 3428 } 3429 3430 /** 3431 * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU 3432 * 3433 * If the TLV length sent as part of PPDU TLV is less that expected size i.e 3434 * size of corresponding data structure, pad the remaining bytes with zeros 3435 * and continue processing the TLVs 3436 * 3437 * @pdev: DP pdev handle 3438 * @tag_buf: TLV buffer 3439 * @tlv_expected_size: Expected size of Tag 3440 * @tlv_len: TLV length received from FW 3441 * 3442 * Return: Pointer to updated TLV 3443 */ 3444 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, 3445 uint32_t *tag_buf, 3446 uint16_t tlv_expected_size, 3447 uint16_t tlv_len) 3448 { 3449 uint32_t *tlv_desc = tag_buf; 3450 3451 qdf_assert_always(tlv_len != 0); 3452 3453 if (tlv_len < tlv_expected_size) { 3454 qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size); 3455 qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len); 3456 tlv_desc = pdev->ppdu_tlv_buf; 3457 } 3458 3459 return tlv_desc; 3460 } 3461 3462 /** 3463 * dp_process_ppdu_tag(): Function to process the PPDU TLVs 3464 * @pdev: DP pdev handle 3465 * @tag_buf: TLV buffer 3466 * @tlv_len: length of tlv 3467 * @ppdu_info: per ppdu tlv structure 3468 * 3469 * return: void 3470 */ 3471 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf, 3472 uint32_t tlv_len, struct ppdu_info *ppdu_info) 3473 { 3474 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3475 uint16_t tlv_expected_size; 3476 uint32_t *tlv_desc; 3477 3478 switch (tlv_type) { 3479 case HTT_PPDU_STATS_COMMON_TLV: 3480 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); 3481 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3482 tlv_expected_size, tlv_len); 3483 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); 3484 break; 3485 case HTT_PPDU_STATS_USR_COMMON_TLV: 3486 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); 3487 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3488 tlv_expected_size, tlv_len); 3489 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, 3490 ppdu_info); 3491 break; 3492 case HTT_PPDU_STATS_USR_RATE_TLV: 3493 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); 3494 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3495 tlv_expected_size, tlv_len); 3496 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, 3497 ppdu_info); 3498 break; 3499 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: 3500 tlv_expected_size = 3501 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); 3502 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3503 tlv_expected_size, tlv_len); 3504 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 3505 pdev, tlv_desc, ppdu_info); 3506 break; 3507 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: 3508 tlv_expected_size = 3509 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); 3510 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3511 tlv_expected_size, tlv_len); 3512 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 3513 pdev, tlv_desc, ppdu_info); 3514 break; 3515 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: 3516 tlv_expected_size = 3517 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); 3518 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3519 tlv_expected_size, tlv_len); 3520 dp_process_ppdu_stats_user_cmpltn_common_tlv( 3521 pdev, tlv_desc, ppdu_info); 3522 break; 3523 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: 3524 tlv_expected_size = 3525 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); 3526 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3527 tlv_expected_size, tlv_len); 3528 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 3529 pdev, tlv_desc, ppdu_info); 3530 break; 3531 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: 3532 tlv_expected_size = 3533 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); 3534 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3535 tlv_expected_size, tlv_len); 3536 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 3537 pdev, tlv_desc, ppdu_info); 3538 break; 3539 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: 3540 tlv_expected_size = 3541 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); 3542 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3543 tlv_expected_size, tlv_len); 3544 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 3545 pdev, tlv_desc, ppdu_info); 3546 break; 3547 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: 3548 tlv_expected_size = 3549 sizeof(htt_ppdu_stats_usr_common_array_tlv_v); 3550 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3551 tlv_expected_size, tlv_len); 3552 dp_process_ppdu_stats_user_common_array_tlv( 3553 pdev, tlv_desc, ppdu_info); 3554 break; 3555 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: 3556 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); 3557 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3558 tlv_expected_size, tlv_len); 3559 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, 3560 ppdu_info); 3561 break; 3562 case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV: 3563 dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info); 3564 break; 3565 default: 3566 break; 3567 } 3568 } 3569 3570 #ifdef WLAN_ATF_ENABLE 3571 static void 3572 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 3573 struct cdp_tx_completion_ppdu *ppdu_desc, 3574 struct cdp_tx_completion_ppdu_user *user) 3575 { 3576 uint32_t nss_ru_width_sum = 0; 3577 3578 if (!pdev || !ppdu_desc || !user) 3579 return; 3580 3581 if (!pdev->dp_atf_stats_enable) 3582 return; 3583 3584 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA) 3585 return; 3586 3587 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 3588 if (!nss_ru_width_sum) 3589 nss_ru_width_sum = 1; 3590 3591 /** 3592 * For SU-MIMO PPDU phy Tx time is same for the single user. 3593 * For MU-MIMO phy Tx time is calculated per user as below 3594 * user phy tx time = 3595 * Entire PPDU duration * MU Ratio * OFDMA Ratio 3596 * MU Ratio = usr_nss / Sum_of_nss_of_all_users 3597 * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users 3598 * usr_ru_widt = ru_end – ru_start + 1 3599 */ 3600 if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { 3601 user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us; 3602 } else { 3603 user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us * 3604 user->nss * user->ru_tones) / nss_ru_width_sum; 3605 } 3606 } 3607 #else 3608 static void 3609 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 3610 struct cdp_tx_completion_ppdu *ppdu_desc, 3611 struct cdp_tx_completion_ppdu_user *user) 3612 { 3613 } 3614 #endif 3615 3616 /** 3617 * dp_ppdu_desc_user_stats_update(): Function to update TX user stats 3618 * @pdev: DP pdev handle 3619 * @ppdu_info: per PPDU TLV descriptor 3620 * 3621 * return: void 3622 */ 3623 void 3624 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, 3625 struct ppdu_info *ppdu_info) 3626 { 3627 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 3628 struct dp_peer *peer = NULL; 3629 uint32_t tlv_bitmap_expected; 3630 uint32_t tlv_bitmap_default; 3631 uint16_t i; 3632 uint32_t num_users; 3633 3634 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3635 qdf_nbuf_data(ppdu_info->nbuf); 3636 3637 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR) 3638 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 3639 3640 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 3641 if (pdev->tx_sniffer_enable || pdev->mcopy_mode || 3642 pdev->tx_capture_enabled) { 3643 if (ppdu_info->is_ampdu) 3644 tlv_bitmap_expected = 3645 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 3646 ppdu_info->tlv_bitmap); 3647 } 3648 3649 tlv_bitmap_default = tlv_bitmap_expected; 3650 3651 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3652 num_users = ppdu_desc->bar_num_users; 3653 ppdu_desc->num_users = ppdu_desc->bar_num_users; 3654 } else { 3655 num_users = ppdu_desc->num_users; 3656 } 3657 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 3658 3659 for (i = 0; i < num_users; i++) { 3660 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; 3661 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; 3662 3663 peer = dp_peer_get_ref_by_id(pdev->soc, 3664 ppdu_desc->user[i].peer_id, 3665 DP_MOD_ID_TX_PPDU_STATS); 3666 /** 3667 * This check is to make sure peer is not deleted 3668 * after processing the TLVs. 3669 */ 3670 if (!peer) 3671 continue; 3672 3673 ppdu_desc->user[i].is_bss_peer = peer->bss_peer; 3674 /* 3675 * different frame like DATA, BAR or CTRL has different 3676 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we 3677 * receive other tlv in-order/sequential from fw. 3678 * Since ACK_BA_STATUS TLV come from Hardware it is 3679 * asynchronous So we need to depend on some tlv to confirm 3680 * all tlv is received for a ppdu. 3681 * So we depend on both SCHED_CMD_STATUS_TLV and 3682 * ACK_BA_STATUS_TLV. for failure packet we won't get 3683 * ACK_BA_STATUS_TLV. 3684 */ 3685 if (!(ppdu_info->tlv_bitmap & 3686 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) || 3687 (!(ppdu_info->tlv_bitmap & 3688 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && 3689 (ppdu_desc->user[i].completion_status == 3690 HTT_PPDU_STATS_USER_STATUS_OK))) { 3691 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3692 continue; 3693 } 3694 3695 /** 3696 * Update tx stats for data frames having Qos as well as 3697 * non-Qos data tid 3698 */ 3699 3700 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || 3701 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || 3702 (ppdu_desc->htt_frame_type == 3703 HTT_STATS_FTYPE_SGEN_QOS_NULL) || 3704 ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) && 3705 (ppdu_desc->num_mpdu > 1))) && 3706 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { 3707 3708 dp_tx_stats_update(pdev, peer, 3709 &ppdu_desc->user[i], 3710 ppdu_desc->ack_rssi); 3711 dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]); 3712 } 3713 3714 dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc, 3715 &ppdu_desc->user[i]); 3716 3717 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3718 tlv_bitmap_expected = tlv_bitmap_default; 3719 } 3720 } 3721 3722 #ifndef WLAN_TX_PKT_CAPTURE_ENH 3723 3724 /** 3725 * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor 3726 * to upper layer 3727 * @pdev: DP pdev handle 3728 * @ppdu_info: per PPDU TLV descriptor 3729 * 3730 * return: void 3731 */ 3732 static 3733 void dp_ppdu_desc_deliver(struct dp_pdev *pdev, 3734 struct ppdu_info *ppdu_info) 3735 { 3736 struct ppdu_info *s_ppdu_info = NULL; 3737 struct ppdu_info *ppdu_info_next = NULL; 3738 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 3739 qdf_nbuf_t nbuf; 3740 uint32_t time_delta = 0; 3741 bool starved = 0; 3742 bool matched = 0; 3743 bool recv_ack_ba_done = 0; 3744 3745 if (ppdu_info->tlv_bitmap & 3746 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 3747 ppdu_info->done) 3748 recv_ack_ba_done = 1; 3749 3750 pdev->last_sched_cmdid = ppdu_info->sched_cmdid; 3751 3752 s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list); 3753 3754 TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list, 3755 ppdu_info_list_elem, ppdu_info_next) { 3756 if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32) 3757 time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) + 3758 ppdu_info->tsf_l32; 3759 else 3760 time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32; 3761 3762 if (!s_ppdu_info->done && !recv_ack_ba_done) { 3763 if (time_delta < MAX_SCHED_STARVE) { 3764 dp_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]", 3765 pdev->pdev_id, 3766 s_ppdu_info->ppdu_id, 3767 s_ppdu_info->sched_cmdid, 3768 s_ppdu_info->tlv_bitmap, 3769 s_ppdu_info->tsf_l32, 3770 s_ppdu_info->done); 3771 break; 3772 } 3773 starved = 1; 3774 } 3775 3776 pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid; 3777 TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info, 3778 ppdu_info_list_elem); 3779 pdev->sched_comp_list_depth--; 3780 3781 nbuf = s_ppdu_info->nbuf; 3782 qdf_assert_always(nbuf); 3783 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3784 qdf_nbuf_data(nbuf); 3785 ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap; 3786 3787 if (starved) { 3788 dp_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n", 3789 ppdu_desc->frame_ctrl, 3790 ppdu_desc->htt_frame_type, 3791 ppdu_desc->tlv_bitmap, 3792 ppdu_desc->user[0].completion_status); 3793 starved = 0; 3794 } 3795 3796 if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id && 3797 ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid) 3798 matched = 1; 3799 3800 dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info); 3801 3802 qdf_mem_free(s_ppdu_info); 3803 3804 /** 3805 * Deliver PPDU stats only for valid (acked) data 3806 * frames if sniffer mode is not enabled. 3807 * If sniffer mode is enabled, PPDU stats 3808 * for all frames including mgmt/control 3809 * frames should be delivered to upper layer 3810 */ 3811 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { 3812 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, 3813 pdev->soc, 3814 nbuf, HTT_INVALID_PEER, 3815 WDI_NO_VAL, 3816 pdev->pdev_id); 3817 } else { 3818 if (ppdu_desc->num_mpdu != 0 && 3819 ppdu_desc->num_users != 0 && 3820 ppdu_desc->frame_ctrl & 3821 HTT_FRAMECTRL_DATATYPE) { 3822 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, 3823 pdev->soc, 3824 nbuf, HTT_INVALID_PEER, 3825 WDI_NO_VAL, 3826 pdev->pdev_id); 3827 } else { 3828 qdf_nbuf_free(nbuf); 3829 } 3830 } 3831 3832 if (matched) 3833 break; 3834 } 3835 return; 3836 } 3837 3838 #endif 3839 3840 /** 3841 * dp_get_ppdu_desc(): Function to allocate new PPDU status 3842 * desc for new ppdu id 3843 * @pdev: DP pdev handle 3844 * @ppdu_id: PPDU unique identifier 3845 * @tlv_type: TLV type received 3846 * @tsf_l32: timestamp received along with ppdu stats indication header 3847 * @max_users: Maximum user for that particular ppdu 3848 * 3849 * return: ppdu_info per ppdu tlv structure 3850 */ 3851 static 3852 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, 3853 uint8_t tlv_type, uint32_t tsf_l32, 3854 uint8_t max_users) 3855 { 3856 struct ppdu_info *ppdu_info = NULL; 3857 struct ppdu_info *s_ppdu_info = NULL; 3858 struct ppdu_info *ppdu_info_next = NULL; 3859 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 3860 uint32_t size = 0; 3861 struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL; 3862 struct cdp_tx_completion_ppdu_user *tmp_user; 3863 uint32_t time_delta; 3864 3865 /* 3866 * Find ppdu_id node exists or not 3867 */ 3868 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, 3869 ppdu_info_list_elem, ppdu_info_next) { 3870 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { 3871 if (ppdu_info->tsf_l32 > tsf_l32) 3872 time_delta = (MAX_TSF_32 - 3873 ppdu_info->tsf_l32) + tsf_l32; 3874 else 3875 time_delta = tsf_l32 - ppdu_info->tsf_l32; 3876 3877 if (time_delta > WRAP_DROP_TSF_DELTA) { 3878 TAILQ_REMOVE(&pdev->ppdu_info_list, 3879 ppdu_info, ppdu_info_list_elem); 3880 pdev->list_depth--; 3881 pdev->stats.ppdu_wrap_drop++; 3882 tmp_ppdu_desc = 3883 (struct cdp_tx_completion_ppdu *) 3884 qdf_nbuf_data(ppdu_info->nbuf); 3885 tmp_user = &tmp_ppdu_desc->user[0]; 3886 dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n", 3887 ppdu_info->ppdu_id, 3888 ppdu_info->tsf_l32, 3889 ppdu_info->tlv_bitmap, 3890 tmp_user->completion_status, 3891 ppdu_info->compltn_common_tlv, 3892 ppdu_info->ack_ba_tlv, 3893 ppdu_id, tsf_l32, tlv_type); 3894 qdf_nbuf_free(ppdu_info->nbuf); 3895 ppdu_info->nbuf = NULL; 3896 qdf_mem_free(ppdu_info); 3897 } else { 3898 break; 3899 } 3900 } 3901 } 3902 3903 /* 3904 * check if it is ack ba tlv and if it is not there in ppdu info 3905 * list then check it in sched completion ppdu list 3906 */ 3907 if (!ppdu_info && 3908 tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) { 3909 TAILQ_FOREACH(s_ppdu_info, 3910 &pdev->sched_comp_ppdu_list, 3911 ppdu_info_list_elem) { 3912 if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) { 3913 if (s_ppdu_info->tsf_l32 > tsf_l32) 3914 time_delta = (MAX_TSF_32 - 3915 s_ppdu_info->tsf_l32) + 3916 tsf_l32; 3917 else 3918 time_delta = tsf_l32 - 3919 s_ppdu_info->tsf_l32; 3920 if (time_delta < WRAP_DROP_TSF_DELTA) { 3921 ppdu_info = s_ppdu_info; 3922 break; 3923 } 3924 } else { 3925 /* 3926 * ACK BA STATUS TLV comes sequential order 3927 * if we received ack ba status tlv for second 3928 * ppdu and first ppdu is still waiting for 3929 * ACK BA STATUS TLV. Based on fw comment 3930 * we won't receive it tlv later. So we can 3931 * set ppdu info done. 3932 */ 3933 if (s_ppdu_info) 3934 s_ppdu_info->done = 1; 3935 } 3936 } 3937 } 3938 3939 if (ppdu_info) { 3940 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { 3941 /** 3942 * if we get tlv_type that is already been processed 3943 * for ppdu, that means we got a new ppdu with same 3944 * ppdu id. Hence Flush the older ppdu 3945 * for MUMIMO and OFDMA, In a PPDU we have 3946 * multiple user with same tlv types. tlv bitmap is 3947 * used to check whether SU or MU_MIMO/OFDMA 3948 */ 3949 if (!(ppdu_info->tlv_bitmap & 3950 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) 3951 return ppdu_info; 3952 3953 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3954 qdf_nbuf_data(ppdu_info->nbuf); 3955 3956 /** 3957 * apart from ACK BA STATUS TLV rest all comes in order 3958 * so if tlv type not ACK BA STATUS TLV we can deliver 3959 * ppdu_info 3960 */ 3961 if ((tlv_type == 3962 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 3963 (ppdu_desc->htt_frame_type == 3964 HTT_STATS_FTYPE_SGEN_MU_BAR)) 3965 return ppdu_info; 3966 3967 dp_ppdu_desc_deliver(pdev, ppdu_info); 3968 } else { 3969 return ppdu_info; 3970 } 3971 } 3972 3973 /** 3974 * Flush the head ppdu descriptor if ppdu desc list reaches max 3975 * threshold 3976 */ 3977 if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 3978 ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list); 3979 TAILQ_REMOVE(&pdev->ppdu_info_list, 3980 ppdu_info, ppdu_info_list_elem); 3981 pdev->list_depth--; 3982 pdev->stats.ppdu_drop++; 3983 qdf_nbuf_free(ppdu_info->nbuf); 3984 ppdu_info->nbuf = NULL; 3985 qdf_mem_free(ppdu_info); 3986 } 3987 3988 size = sizeof(struct cdp_tx_completion_ppdu) + 3989 (max_users * sizeof(struct cdp_tx_completion_ppdu_user)); 3990 3991 /* 3992 * Allocate new ppdu_info node 3993 */ 3994 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); 3995 if (!ppdu_info) 3996 return NULL; 3997 3998 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size, 3999 0, 4, TRUE); 4000 if (!ppdu_info->nbuf) { 4001 qdf_mem_free(ppdu_info); 4002 return NULL; 4003 } 4004 4005 ppdu_info->ppdu_desc = 4006 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4007 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size); 4008 4009 if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) { 4010 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 4011 "No tailroom for HTT PPDU"); 4012 qdf_nbuf_free(ppdu_info->nbuf); 4013 ppdu_info->nbuf = NULL; 4014 ppdu_info->last_user = 0; 4015 qdf_mem_free(ppdu_info); 4016 return NULL; 4017 } 4018 4019 ppdu_info->ppdu_desc->max_users = max_users; 4020 ppdu_info->tsf_l32 = tsf_l32; 4021 /** 4022 * No lock is needed because all PPDU TLVs are processed in 4023 * same context and this list is updated in same context 4024 */ 4025 TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info, 4026 ppdu_info_list_elem); 4027 pdev->list_depth++; 4028 return ppdu_info; 4029 } 4030 4031 /** 4032 * dp_htt_process_tlv(): Function to process each PPDU TLVs 4033 * @pdev: DP pdev handle 4034 * @htt_t2h_msg: HTT target to host message 4035 * 4036 * return: ppdu_info per ppdu tlv structure 4037 */ 4038 4039 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, 4040 qdf_nbuf_t htt_t2h_msg) 4041 { 4042 uint32_t length; 4043 uint32_t ppdu_id; 4044 uint8_t tlv_type; 4045 uint32_t tlv_length, tlv_bitmap_expected; 4046 uint8_t *tlv_buf; 4047 struct ppdu_info *ppdu_info = NULL; 4048 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4049 uint8_t max_users = CDP_MU_MAX_USERS; 4050 uint32_t tsf_l32; 4051 4052 uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); 4053 4054 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); 4055 4056 msg_word = msg_word + 1; 4057 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); 4058 4059 msg_word = msg_word + 1; 4060 tsf_l32 = (uint32_t)(*msg_word); 4061 4062 msg_word = msg_word + 2; 4063 while (length > 0) { 4064 tlv_buf = (uint8_t *)msg_word; 4065 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 4066 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 4067 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) 4068 pdev->stats.ppdu_stats_counter[tlv_type]++; 4069 4070 if (tlv_length == 0) 4071 break; 4072 4073 tlv_length += HTT_TLV_HDR_LEN; 4074 4075 /** 4076 * Not allocating separate ppdu descriptor for MGMT Payload 4077 * TLV as this is sent as separate WDI indication and it 4078 * doesn't contain any ppdu information 4079 */ 4080 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { 4081 pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; 4082 pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; 4083 pdev->mgmtctrl_frm_info.mgmt_buf_len = 4084 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET 4085 (*(msg_word + 1)); 4086 msg_word = 4087 (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 4088 length -= (tlv_length); 4089 continue; 4090 } 4091 4092 /* 4093 * retrieve max_users if it's USERS_INFO, 4094 * else, it's 1 for COMPLTN_FLUSH, 4095 * else, use CDP_MU_MAX_USERS 4096 */ 4097 if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) { 4098 max_users = 4099 HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1)); 4100 } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) { 4101 max_users = 1; 4102 } 4103 4104 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type, 4105 tsf_l32, max_users); 4106 if (!ppdu_info) 4107 return NULL; 4108 4109 ppdu_info->ppdu_id = ppdu_id; 4110 ppdu_info->tlv_bitmap |= (1 << tlv_type); 4111 4112 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); 4113 4114 /** 4115 * Increment pdev level tlv count to monitor 4116 * missing TLVs 4117 */ 4118 pdev->tlv_count++; 4119 ppdu_info->last_tlv_cnt = pdev->tlv_count; 4120 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 4121 length -= (tlv_length); 4122 } 4123 4124 if (!ppdu_info) 4125 return NULL; 4126 4127 pdev->last_ppdu_id = ppdu_id; 4128 4129 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 4130 4131 if (pdev->tx_sniffer_enable || pdev->mcopy_mode || 4132 pdev->tx_capture_enabled) { 4133 if (ppdu_info->is_ampdu) 4134 tlv_bitmap_expected = 4135 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 4136 ppdu_info->tlv_bitmap); 4137 } 4138 4139 ppdu_desc = ppdu_info->ppdu_desc; 4140 4141 if (!ppdu_desc) 4142 return NULL; 4143 4144 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != 4145 HTT_PPDU_STATS_USER_STATUS_OK) { 4146 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; 4147 } 4148 4149 /* 4150 * for frame type DATA and BAR, we update stats based on MSDU, 4151 * successful msdu and mpdu are populate from ACK BA STATUS TLV 4152 * which comes out of order. successful mpdu also populated from 4153 * COMPLTN COMMON TLV which comes in order. for every ppdu_info 4154 * we store successful mpdu from both tlv and compare before delivering 4155 * to make sure we received ACK BA STATUS TLV. For some self generated 4156 * frame we won't get ack ba status tlv so no need to wait for 4157 * ack ba status tlv. 4158 */ 4159 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && 4160 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { 4161 /* 4162 * most of the time bar frame will have duplicate ack ba 4163 * status tlv 4164 */ 4165 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && 4166 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)) 4167 return NULL; 4168 /* 4169 * For data frame, compltn common tlv should match ack ba status 4170 * tlv and completion status. Reason we are checking first user 4171 * for ofdma, completion seen at next MU BAR frm, for mimo 4172 * only for first user completion will be immediate. 4173 */ 4174 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 4175 (ppdu_desc->user[0].completion_status == 0 && 4176 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))) 4177 return NULL; 4178 } 4179 4180 /** 4181 * Once all the TLVs for a given PPDU has been processed, 4182 * return PPDU status to be delivered to higher layer. 4183 * tlv_bitmap_expected can't be available for different frame type. 4184 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu. 4185 * apart from ACK BA TLV, FW sends other TLV in sequential order. 4186 * flush tlv comes separate. 4187 */ 4188 if ((ppdu_info->tlv_bitmap != 0 && 4189 (ppdu_info->tlv_bitmap & 4190 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) || 4191 (ppdu_info->tlv_bitmap & 4192 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) { 4193 ppdu_info->done = 1; 4194 return ppdu_info; 4195 } 4196 4197 return NULL; 4198 } 4199 #endif /* FEATURE_PERPKT_INFO */ 4200 4201 /** 4202 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW 4203 * @soc: DP SOC handle 4204 * @pdev_id: pdev id 4205 * @htt_t2h_msg: HTT message nbuf 4206 * 4207 * return:void 4208 */ 4209 #if defined(WDI_EVENT_ENABLE) 4210 #ifdef FEATURE_PERPKT_INFO 4211 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 4212 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 4213 { 4214 struct dp_pdev *pdev = soc->pdev_list[pdev_id]; 4215 struct ppdu_info *ppdu_info = NULL; 4216 bool free_buf = true; 4217 4218 if (pdev_id >= MAX_PDEV_CNT) 4219 return true; 4220 4221 pdev = soc->pdev_list[pdev_id]; 4222 if (!pdev) 4223 return true; 4224 4225 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && 4226 !pdev->mcopy_mode && !pdev->bpr_enable) 4227 return free_buf; 4228 4229 qdf_spin_lock_bh(&pdev->ppdu_stats_lock); 4230 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); 4231 4232 if (pdev->mgmtctrl_frm_info.mgmt_buf) { 4233 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv 4234 (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) != 4235 QDF_STATUS_SUCCESS) 4236 free_buf = false; 4237 } 4238 4239 if (ppdu_info) 4240 dp_ppdu_desc_deliver(pdev, ppdu_info); 4241 4242 pdev->mgmtctrl_frm_info.mgmt_buf = NULL; 4243 pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; 4244 pdev->mgmtctrl_frm_info.ppdu_id = 0; 4245 4246 qdf_spin_unlock_bh(&pdev->ppdu_stats_lock); 4247 4248 return free_buf; 4249 } 4250 #else 4251 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 4252 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 4253 { 4254 return true; 4255 } 4256 #endif 4257 #endif 4258 4259 /** 4260 * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats 4261 * @soc: DP SOC handle 4262 * @htt_t2h_msg: HTT message nbuf 4263 * 4264 * return:void 4265 */ 4266 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, 4267 qdf_nbuf_t htt_t2h_msg) 4268 { 4269 uint8_t done; 4270 qdf_nbuf_t msg_copy; 4271 uint32_t *msg_word; 4272 4273 msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); 4274 msg_word = msg_word + 3; 4275 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 4276 4277 /* 4278 * HTT EXT stats response comes as stream of TLVs which span over 4279 * multiple T2H messages. 4280 * The first message will carry length of the response. 4281 * For rest of the messages length will be zero. 4282 * 4283 * Clone the T2H message buffer and store it in a list to process 4284 * it later. 4285 * 4286 * The original T2H message buffers gets freed in the T2H HTT event 4287 * handler 4288 */ 4289 msg_copy = qdf_nbuf_clone(htt_t2h_msg); 4290 4291 if (!msg_copy) { 4292 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 4293 "T2H messge clone failed for HTT EXT STATS"); 4294 goto error; 4295 } 4296 4297 qdf_spin_lock_bh(&soc->htt_stats.lock); 4298 qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy); 4299 /* 4300 * Done bit signifies that this is the last T2H buffer in the stream of 4301 * HTT EXT STATS message 4302 */ 4303 if (done) { 4304 soc->htt_stats.num_stats++; 4305 qdf_sched_work(0, &soc->htt_stats.work); 4306 } 4307 qdf_spin_unlock_bh(&soc->htt_stats.lock); 4308 4309 return; 4310 4311 error: 4312 qdf_spin_lock_bh(&soc->htt_stats.lock); 4313 while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) 4314 != NULL) { 4315 qdf_nbuf_free(msg_copy); 4316 } 4317 soc->htt_stats.num_stats = 0; 4318 qdf_spin_unlock_bh(&soc->htt_stats.lock); 4319 return; 4320 } 4321 4322 /* 4323 * htt_soc_attach_target() - SOC level HTT setup 4324 * @htt_soc: HTT SOC handle 4325 * 4326 * Return: 0 on success; error code on failure 4327 */ 4328 int htt_soc_attach_target(struct htt_soc *htt_soc) 4329 { 4330 struct htt_soc *soc = (struct htt_soc *)htt_soc; 4331 4332 return htt_h2t_ver_req_msg(soc); 4333 } 4334 4335 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc) 4336 { 4337 htt_soc->htc_soc = htc_soc; 4338 } 4339 4340 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc) 4341 { 4342 return htt_soc->htc_soc; 4343 } 4344 4345 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle) 4346 { 4347 int i; 4348 int j; 4349 int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long); 4350 struct htt_soc *htt_soc = NULL; 4351 4352 htt_soc = qdf_mem_malloc(sizeof(*htt_soc)); 4353 if (!htt_soc) { 4354 dp_err("HTT attach failed"); 4355 return NULL; 4356 } 4357 4358 for (i = 0; i < MAX_PDEV_CNT; i++) { 4359 htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size); 4360 if (!htt_soc->pdevid_tt[i].umac_ttt) 4361 break; 4362 qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1); 4363 htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size); 4364 if (!htt_soc->pdevid_tt[i].lmac_ttt) { 4365 qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt); 4366 break; 4367 } 4368 qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1); 4369 } 4370 if (i != MAX_PDEV_CNT) { 4371 for (j = 0; j < i; j++) { 4372 qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt); 4373 qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt); 4374 } 4375 qdf_mem_free(htt_soc); 4376 return NULL; 4377 } 4378 4379 htt_soc->dp_soc = soc; 4380 htt_soc->htc_soc = htc_handle; 4381 HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex); 4382 4383 return htt_soc; 4384 } 4385 4386 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) 4387 /* 4388 * dp_ppdu_stats_ind_handler() - PPDU stats msg handler 4389 * @htt_soc: HTT SOC handle 4390 * @msg_word: Pointer to payload 4391 * @htt_t2h_msg: HTT msg nbuf 4392 * 4393 * Return: True if buffer should be freed by caller. 4394 */ 4395 static bool 4396 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 4397 uint32_t *msg_word, 4398 qdf_nbuf_t htt_t2h_msg) 4399 { 4400 u_int8_t pdev_id; 4401 u_int8_t target_pdev_id; 4402 bool free_buf; 4403 4404 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 4405 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 4406 target_pdev_id); 4407 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, 4408 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, 4409 pdev_id); 4410 4411 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, 4412 htt_t2h_msg); 4413 4414 return free_buf; 4415 } 4416 #else 4417 static bool 4418 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 4419 uint32_t *msg_word, 4420 qdf_nbuf_t htt_t2h_msg) 4421 { 4422 return true; 4423 } 4424 #endif 4425 4426 #if defined(WDI_EVENT_ENABLE) && \ 4427 !defined(REMOVE_PKT_LOG) 4428 /* 4429 * dp_pktlog_msg_handler() - Pktlog msg handler 4430 * @htt_soc: HTT SOC handle 4431 * @msg_word: Pointer to payload 4432 * 4433 * Return: None 4434 */ 4435 static void 4436 dp_pktlog_msg_handler(struct htt_soc *soc, 4437 uint32_t *msg_word) 4438 { 4439 uint8_t pdev_id; 4440 uint8_t target_pdev_id; 4441 uint32_t *pl_hdr; 4442 4443 target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word); 4444 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 4445 target_pdev_id); 4446 pl_hdr = (msg_word + 1); 4447 dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, 4448 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL, 4449 pdev_id); 4450 } 4451 #else 4452 static void 4453 dp_pktlog_msg_handler(struct htt_soc *soc, 4454 uint32_t *msg_word) 4455 { 4456 } 4457 #endif 4458 4459 /* 4460 * time_allow_print() - time allow print 4461 * @htt_ring_tt: ringi_id array of timestamps 4462 * @ring_id: ring_id (index) 4463 * 4464 * Return: 1 for successfully saving timestamp in array 4465 * and 0 for timestamp falling within 2 seconds after last one 4466 */ 4467 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id) 4468 { 4469 unsigned long tstamp; 4470 unsigned long delta; 4471 4472 tstamp = qdf_get_system_timestamp(); 4473 4474 if (!htt_ring_tt) 4475 return 0; //unable to print backpressure messages 4476 4477 if (htt_ring_tt[ring_id] == -1) { 4478 htt_ring_tt[ring_id] = tstamp; 4479 return 1; 4480 } 4481 delta = tstamp - htt_ring_tt[ring_id]; 4482 if (delta >= 2000) { 4483 htt_ring_tt[ring_id] = tstamp; 4484 return 1; 4485 } 4486 4487 return 0; 4488 } 4489 4490 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type, 4491 struct dp_pdev *pdev, u_int8_t ring_id, 4492 u_int16_t hp_idx, u_int16_t tp_idx, 4493 u_int32_t bkp_time, char *ring_stype) 4494 { 4495 dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ", 4496 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype); 4497 dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ", 4498 ring_id, hp_idx, tp_idx, bkp_time); 4499 } 4500 4501 /** 4502 * dp_get_srng_ring_state_from_hal(): Get hal level ring stats 4503 * @soc: DP_SOC handle 4504 * @srng: DP_SRNG handle 4505 * @ring_type: srng src/dst ring 4506 * 4507 * Return: void 4508 */ 4509 static QDF_STATUS 4510 dp_get_srng_ring_state_from_hal(struct dp_soc *soc, 4511 struct dp_pdev *pdev, 4512 struct dp_srng *srng, 4513 enum hal_ring_type ring_type, 4514 struct dp_srng_ring_state *state) 4515 { 4516 struct hal_soc *hal_soc; 4517 4518 if (!soc || !srng || !srng->hal_srng || !state) 4519 return QDF_STATUS_E_INVAL; 4520 4521 hal_soc = (struct hal_soc *)soc->hal_soc; 4522 4523 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail, 4524 &state->sw_head); 4525 4526 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head, 4527 &state->hw_tail, ring_type); 4528 4529 state->ring_type = ring_type; 4530 4531 return QDF_STATUS_SUCCESS; 4532 } 4533 4534 /** 4535 * dp_queue_srng_ring_stats(): Print pdev hal level ring stats 4536 * @pdev: DP_pdev handle 4537 * 4538 * Return: void 4539 */ 4540 static void dp_queue_ring_stats(struct dp_pdev *pdev) 4541 { 4542 uint32_t i; 4543 int mac_id; 4544 int lmac_id; 4545 uint32_t j = 0; 4546 struct dp_soc_srngs_state * soc_srngs_state = NULL; 4547 QDF_STATUS status; 4548 4549 soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state)); 4550 if (!soc_srngs_state) { 4551 dp_htt_alert("Memory alloc failed for back pressure event"); 4552 return; 4553 } 4554 4555 status = dp_get_srng_ring_state_from_hal 4556 (pdev->soc, pdev, 4557 &pdev->soc->reo_exception_ring, 4558 REO_EXCEPTION, 4559 &soc_srngs_state->ring_state[j]); 4560 4561 if (status == QDF_STATUS_SUCCESS) 4562 qdf_assert_always(++j < DP_MAX_SRNGS); 4563 4564 status = dp_get_srng_ring_state_from_hal 4565 (pdev->soc, pdev, 4566 &pdev->soc->reo_reinject_ring, 4567 REO_REINJECT, 4568 &soc_srngs_state->ring_state[j]); 4569 4570 if (status == QDF_STATUS_SUCCESS) 4571 qdf_assert_always(++j < DP_MAX_SRNGS); 4572 4573 status = dp_get_srng_ring_state_from_hal 4574 (pdev->soc, pdev, 4575 &pdev->soc->reo_cmd_ring, 4576 REO_CMD, 4577 &soc_srngs_state->ring_state[j]); 4578 4579 if (status == QDF_STATUS_SUCCESS) 4580 qdf_assert_always(++j < DP_MAX_SRNGS); 4581 4582 status = dp_get_srng_ring_state_from_hal 4583 (pdev->soc, pdev, 4584 &pdev->soc->reo_status_ring, 4585 REO_STATUS, 4586 &soc_srngs_state->ring_state[j]); 4587 4588 if (status == QDF_STATUS_SUCCESS) 4589 qdf_assert_always(++j < DP_MAX_SRNGS); 4590 4591 status = dp_get_srng_ring_state_from_hal 4592 (pdev->soc, pdev, 4593 &pdev->soc->rx_rel_ring, 4594 WBM2SW_RELEASE, 4595 &soc_srngs_state->ring_state[j]); 4596 4597 if (status == QDF_STATUS_SUCCESS) 4598 qdf_assert_always(++j < DP_MAX_SRNGS); 4599 4600 status = dp_get_srng_ring_state_from_hal 4601 (pdev->soc, pdev, 4602 &pdev->soc->tcl_cmd_credit_ring, 4603 TCL_CMD_CREDIT, 4604 &soc_srngs_state->ring_state[j]); 4605 4606 if (status == QDF_STATUS_SUCCESS) 4607 qdf_assert_always(++j < DP_MAX_SRNGS); 4608 4609 status = dp_get_srng_ring_state_from_hal 4610 (pdev->soc, pdev, 4611 &pdev->soc->tcl_status_ring, 4612 TCL_STATUS, 4613 &soc_srngs_state->ring_state[j]); 4614 4615 if (status == QDF_STATUS_SUCCESS) 4616 qdf_assert_always(++j < DP_MAX_SRNGS); 4617 4618 status = dp_get_srng_ring_state_from_hal 4619 (pdev->soc, pdev, 4620 &pdev->soc->wbm_desc_rel_ring, 4621 SW2WBM_RELEASE, 4622 &soc_srngs_state->ring_state[j]); 4623 4624 if (status == QDF_STATUS_SUCCESS) 4625 qdf_assert_always(++j < DP_MAX_SRNGS); 4626 4627 for (i = 0; i < MAX_REO_DEST_RINGS; i++) { 4628 status = dp_get_srng_ring_state_from_hal 4629 (pdev->soc, pdev, 4630 &pdev->soc->reo_dest_ring[i], 4631 REO_DST, 4632 &soc_srngs_state->ring_state[j]); 4633 4634 if (status == QDF_STATUS_SUCCESS) 4635 qdf_assert_always(++j < DP_MAX_SRNGS); 4636 } 4637 4638 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) { 4639 status = dp_get_srng_ring_state_from_hal 4640 (pdev->soc, pdev, 4641 &pdev->soc->tcl_data_ring[i], 4642 TCL_DATA, 4643 &soc_srngs_state->ring_state[j]); 4644 4645 if (status == QDF_STATUS_SUCCESS) 4646 qdf_assert_always(++j < DP_MAX_SRNGS); 4647 } 4648 4649 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { 4650 status = dp_get_srng_ring_state_from_hal 4651 (pdev->soc, pdev, 4652 &pdev->soc->tx_comp_ring[i], 4653 WBM2SW_RELEASE, 4654 &soc_srngs_state->ring_state[j]); 4655 4656 if (status == QDF_STATUS_SUCCESS) 4657 qdf_assert_always(++j < DP_MAX_SRNGS); 4658 } 4659 4660 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id); 4661 status = dp_get_srng_ring_state_from_hal 4662 (pdev->soc, pdev, 4663 &pdev->soc->rx_refill_buf_ring 4664 [lmac_id], 4665 RXDMA_BUF, 4666 &soc_srngs_state->ring_state[j]); 4667 4668 if (status == QDF_STATUS_SUCCESS) 4669 qdf_assert_always(++j < DP_MAX_SRNGS); 4670 4671 status = dp_get_srng_ring_state_from_hal 4672 (pdev->soc, pdev, 4673 &pdev->rx_refill_buf_ring2, 4674 RXDMA_BUF, 4675 &soc_srngs_state->ring_state[j]); 4676 4677 if (status == QDF_STATUS_SUCCESS) 4678 qdf_assert_always(++j < DP_MAX_SRNGS); 4679 4680 4681 for (i = 0; i < MAX_RX_MAC_RINGS; i++) { 4682 dp_get_srng_ring_state_from_hal 4683 (pdev->soc, pdev, 4684 &pdev->rx_mac_buf_ring[i], 4685 RXDMA_BUF, 4686 &soc_srngs_state->ring_state[j]); 4687 4688 if (status == QDF_STATUS_SUCCESS) 4689 qdf_assert_always(++j < DP_MAX_SRNGS); 4690 } 4691 4692 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 4693 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 4694 mac_id, pdev->pdev_id); 4695 4696 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) { 4697 status = dp_get_srng_ring_state_from_hal 4698 (pdev->soc, pdev, 4699 &pdev->soc->rxdma_mon_buf_ring[lmac_id], 4700 RXDMA_MONITOR_BUF, 4701 &soc_srngs_state->ring_state[j]); 4702 4703 if (status == QDF_STATUS_SUCCESS) 4704 qdf_assert_always(++j < DP_MAX_SRNGS); 4705 4706 status = dp_get_srng_ring_state_from_hal 4707 (pdev->soc, pdev, 4708 &pdev->soc->rxdma_mon_dst_ring[lmac_id], 4709 RXDMA_MONITOR_DST, 4710 &soc_srngs_state->ring_state[j]); 4711 4712 if (status == QDF_STATUS_SUCCESS) 4713 qdf_assert_always(++j < DP_MAX_SRNGS); 4714 4715 status = dp_get_srng_ring_state_from_hal 4716 (pdev->soc, pdev, 4717 &pdev->soc->rxdma_mon_desc_ring[lmac_id], 4718 RXDMA_MONITOR_DESC, 4719 &soc_srngs_state->ring_state[j]); 4720 4721 if (status == QDF_STATUS_SUCCESS) 4722 qdf_assert_always(++j < DP_MAX_SRNGS); 4723 } 4724 4725 status = dp_get_srng_ring_state_from_hal 4726 (pdev->soc, pdev, 4727 &pdev->soc->rxdma_mon_status_ring[lmac_id], 4728 RXDMA_MONITOR_STATUS, 4729 &soc_srngs_state->ring_state[j]); 4730 4731 if (status == QDF_STATUS_SUCCESS) 4732 qdf_assert_always(++j < DP_MAX_SRNGS); 4733 } 4734 4735 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { 4736 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 4737 i, pdev->pdev_id); 4738 4739 status = dp_get_srng_ring_state_from_hal 4740 (pdev->soc, pdev, 4741 &pdev->soc->rxdma_err_dst_ring 4742 [lmac_id], 4743 RXDMA_DST, 4744 &soc_srngs_state->ring_state[j]); 4745 4746 if (status == QDF_STATUS_SUCCESS) 4747 qdf_assert_always(++j < DP_MAX_SRNGS); 4748 } 4749 soc_srngs_state->max_ring_id = j; 4750 4751 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 4752 4753 soc_srngs_state->seq_num = pdev->bkp_stats.seq_num; 4754 TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state, 4755 list_elem); 4756 pdev->bkp_stats.seq_num++; 4757 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 4758 4759 qdf_queue_work(0, pdev->bkp_stats.work_queue, 4760 &pdev->bkp_stats.work); 4761 } 4762 4763 /* 4764 * dp_htt_bkp_event_alert() - htt backpressure event alert 4765 * @msg_word: htt packet context 4766 * @htt_soc: HTT SOC handle 4767 * 4768 * Return: after attempting to print stats 4769 */ 4770 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc) 4771 { 4772 u_int8_t ring_type; 4773 u_int8_t pdev_id; 4774 uint8_t target_pdev_id; 4775 u_int8_t ring_id; 4776 u_int16_t hp_idx; 4777 u_int16_t tp_idx; 4778 u_int32_t bkp_time; 4779 enum htt_t2h_msg_type msg_type; 4780 struct dp_soc *dpsoc; 4781 struct dp_pdev *pdev; 4782 struct dp_htt_timestamp *radio_tt; 4783 4784 if (!soc) 4785 return; 4786 4787 dpsoc = (struct dp_soc *)soc->dp_soc; 4788 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 4789 ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word); 4790 target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word); 4791 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 4792 target_pdev_id); 4793 if (pdev_id >= MAX_PDEV_CNT) { 4794 dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id); 4795 return; 4796 } 4797 4798 pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id]; 4799 ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word); 4800 hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1)); 4801 tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1)); 4802 bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2)); 4803 radio_tt = &soc->pdevid_tt[pdev_id]; 4804 4805 switch (ring_type) { 4806 case HTT_SW_RING_TYPE_UMAC: 4807 if (!time_allow_print(radio_tt->umac_ttt, ring_id)) 4808 return; 4809 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 4810 bkp_time, "HTT_SW_RING_TYPE_UMAC"); 4811 break; 4812 case HTT_SW_RING_TYPE_LMAC: 4813 if (!time_allow_print(radio_tt->lmac_ttt, ring_id)) 4814 return; 4815 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 4816 bkp_time, "HTT_SW_RING_TYPE_LMAC"); 4817 break; 4818 default: 4819 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 4820 bkp_time, "UNKNOWN"); 4821 break; 4822 } 4823 4824 dp_queue_ring_stats(pdev); 4825 } 4826 4827 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 4828 /* 4829 * dp_offload_ind_handler() - offload msg handler 4830 * @htt_soc: HTT SOC handle 4831 * @msg_word: Pointer to payload 4832 * 4833 * Return: None 4834 */ 4835 static void 4836 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word) 4837 { 4838 u_int8_t pdev_id; 4839 u_int8_t target_pdev_id; 4840 4841 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 4842 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 4843 target_pdev_id); 4844 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc, 4845 msg_word, HTT_INVALID_VDEV, WDI_NO_VAL, 4846 pdev_id); 4847 } 4848 #else 4849 static void 4850 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word) 4851 { 4852 } 4853 #endif 4854 4855 /* 4856 * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler 4857 * @context: Opaque context (HTT SOC handle) 4858 * @pkt: HTC packet 4859 */ 4860 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) 4861 { 4862 struct htt_soc *soc = (struct htt_soc *) context; 4863 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; 4864 u_int32_t *msg_word; 4865 enum htt_t2h_msg_type msg_type; 4866 bool free_buf = true; 4867 4868 /* check for successful message reception */ 4869 if (pkt->Status != QDF_STATUS_SUCCESS) { 4870 if (pkt->Status != QDF_STATUS_E_CANCELED) 4871 soc->stats.htc_err_cnt++; 4872 4873 qdf_nbuf_free(htt_t2h_msg); 4874 return; 4875 } 4876 4877 /* TODO: Check if we should pop the HTC/HTT header alignment padding */ 4878 4879 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); 4880 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 4881 htt_event_record(soc->htt_logger_handle, 4882 msg_type, (uint8_t *)msg_word); 4883 switch (msg_type) { 4884 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 4885 { 4886 dp_htt_bkp_event_alert(msg_word, soc); 4887 break; 4888 } 4889 case HTT_T2H_MSG_TYPE_PEER_MAP: 4890 { 4891 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 4892 u_int8_t *peer_mac_addr; 4893 u_int16_t peer_id; 4894 u_int16_t hw_peer_id; 4895 u_int8_t vdev_id; 4896 u_int8_t is_wds; 4897 struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc; 4898 4899 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); 4900 hw_peer_id = 4901 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); 4902 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); 4903 peer_mac_addr = htt_t2h_mac_addr_deswizzle( 4904 (u_int8_t *) (msg_word+1), 4905 &mac_addr_deswizzle_buf[0]); 4906 QDF_TRACE(QDF_MODULE_ID_TXRX, 4907 QDF_TRACE_LEVEL_INFO, 4908 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 4909 peer_id, vdev_id); 4910 4911 /* 4912 * check if peer already exists for this peer_id, if so 4913 * this peer map event is in response for a wds peer add 4914 * wmi command sent during wds source port learning. 4915 * in this case just add the ast entry to the existing 4916 * peer ast_list. 4917 */ 4918 is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]); 4919 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, 4920 vdev_id, peer_mac_addr, 0, 4921 is_wds); 4922 break; 4923 } 4924 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 4925 { 4926 u_int16_t peer_id; 4927 u_int8_t vdev_id; 4928 u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0}; 4929 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); 4930 vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word); 4931 4932 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, 4933 vdev_id, mac_addr, 0, 4934 DP_PEER_WDS_COUNT_INVALID); 4935 break; 4936 } 4937 case HTT_T2H_MSG_TYPE_SEC_IND: 4938 { 4939 u_int16_t peer_id; 4940 enum cdp_sec_type sec_type; 4941 int is_unicast; 4942 4943 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); 4944 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); 4945 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); 4946 /* point to the first part of the Michael key */ 4947 msg_word++; 4948 dp_rx_sec_ind_handler( 4949 soc->dp_soc, peer_id, sec_type, is_unicast, 4950 msg_word, msg_word + 2); 4951 break; 4952 } 4953 4954 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 4955 { 4956 free_buf = dp_ppdu_stats_ind_handler(soc, msg_word, 4957 htt_t2h_msg); 4958 break; 4959 } 4960 4961 case HTT_T2H_MSG_TYPE_PKTLOG: 4962 { 4963 dp_pktlog_msg_handler(soc, msg_word); 4964 break; 4965 } 4966 4967 case HTT_T2H_MSG_TYPE_VERSION_CONF: 4968 { 4969 /* 4970 * HTC maintains runtime pm count for H2T messages that 4971 * have a response msg from FW. This count ensures that 4972 * in the case FW does not sent out the response or host 4973 * did not process this indication runtime_put happens 4974 * properly in the cleanup path. 4975 */ 4976 if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0) 4977 htc_pm_runtime_put(soc->htc_soc); 4978 else 4979 soc->stats.htt_ver_req_put_skip++; 4980 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); 4981 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); 4982 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, 4983 "target uses HTT version %d.%d; host uses %d.%d", 4984 soc->tgt_ver.major, soc->tgt_ver.minor, 4985 HTT_CURRENT_VERSION_MAJOR, 4986 HTT_CURRENT_VERSION_MINOR); 4987 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { 4988 QDF_TRACE(QDF_MODULE_ID_TXRX, 4989 QDF_TRACE_LEVEL_WARN, 4990 "*** Incompatible host/target HTT versions!"); 4991 } 4992 /* abort if the target is incompatible with the host */ 4993 qdf_assert(soc->tgt_ver.major == 4994 HTT_CURRENT_VERSION_MAJOR); 4995 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { 4996 QDF_TRACE(QDF_MODULE_ID_TXRX, 4997 QDF_TRACE_LEVEL_INFO_LOW, 4998 "*** Warning: host/target HTT versions" 4999 " are different, though compatible!"); 5000 } 5001 break; 5002 } 5003 case HTT_T2H_MSG_TYPE_RX_ADDBA: 5004 { 5005 uint16_t peer_id; 5006 uint8_t tid; 5007 uint8_t win_sz; 5008 uint16_t status; 5009 struct dp_peer *peer; 5010 5011 /* 5012 * Update REO Queue Desc with new values 5013 */ 5014 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); 5015 tid = HTT_RX_ADDBA_TID_GET(*msg_word); 5016 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); 5017 peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id, 5018 DP_MOD_ID_HTT); 5019 5020 /* 5021 * Window size needs to be incremented by 1 5022 * since fw needs to represent a value of 256 5023 * using just 8 bits 5024 */ 5025 if (peer) { 5026 status = dp_addba_requestprocess_wifi3( 5027 (struct cdp_soc_t *)soc->dp_soc, 5028 peer->mac_addr.raw, peer->vdev->vdev_id, 5029 0, tid, 0, win_sz + 1, 0xffff); 5030 5031 /* 5032 * If PEER_LOCK_REF_PROTECT enbled dec ref 5033 * which is inc by dp_peer_get_ref_by_id 5034 */ 5035 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 5036 5037 QDF_TRACE(QDF_MODULE_ID_TXRX, 5038 QDF_TRACE_LEVEL_INFO, 5039 FL("PeerID %d BAW %d TID %d stat %d"), 5040 peer_id, win_sz, tid, status); 5041 5042 } else { 5043 QDF_TRACE(QDF_MODULE_ID_TXRX, 5044 QDF_TRACE_LEVEL_ERROR, 5045 FL("Peer not found peer id %d"), 5046 peer_id); 5047 } 5048 break; 5049 } 5050 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 5051 { 5052 dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg); 5053 break; 5054 } 5055 case HTT_T2H_MSG_TYPE_PEER_MAP_V2: 5056 { 5057 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 5058 u_int8_t *peer_mac_addr; 5059 u_int16_t peer_id; 5060 u_int16_t hw_peer_id; 5061 u_int8_t vdev_id; 5062 bool is_wds; 5063 u_int16_t ast_hash; 5064 struct dp_ast_flow_override_info ast_flow_info; 5065 5066 qdf_mem_set(&ast_flow_info, 0, 5067 sizeof(struct dp_ast_flow_override_info)); 5068 5069 peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word); 5070 hw_peer_id = 5071 HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2)); 5072 vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word); 5073 peer_mac_addr = 5074 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 5075 &mac_addr_deswizzle_buf[0]); 5076 is_wds = 5077 HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3)); 5078 ast_hash = 5079 HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3)); 5080 /* 5081 * Update 4 ast_index per peer, ast valid mask 5082 * and TID flow valid mask. 5083 * AST valid mask is 3 bit field corresponds to 5084 * ast_index[3:1]. ast_index 0 is always valid. 5085 */ 5086 ast_flow_info.ast_valid_mask = 5087 HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3)); 5088 ast_flow_info.ast_idx[0] = hw_peer_id; 5089 ast_flow_info.ast_flow_mask[0] = 5090 HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4)); 5091 ast_flow_info.ast_idx[1] = 5092 HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4)); 5093 ast_flow_info.ast_flow_mask[1] = 5094 HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4)); 5095 ast_flow_info.ast_idx[2] = 5096 HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5)); 5097 ast_flow_info.ast_flow_mask[2] = 5098 HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4)); 5099 ast_flow_info.ast_idx[3] = 5100 HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6)); 5101 ast_flow_info.ast_flow_mask[3] = 5102 HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4)); 5103 /* 5104 * TID valid mask is applicable only 5105 * for HI and LOW priority flows. 5106 * tid_valid_mas is 8 bit field corresponds 5107 * to TID[7:0] 5108 */ 5109 ast_flow_info.tid_valid_low_pri_mask = 5110 HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5)); 5111 ast_flow_info.tid_valid_hi_pri_mask = 5112 HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5)); 5113 5114 QDF_TRACE(QDF_MODULE_ID_TXRX, 5115 QDF_TRACE_LEVEL_INFO, 5116 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 5117 peer_id, vdev_id); 5118 5119 dp_rx_peer_map_handler(soc->dp_soc, peer_id, 5120 hw_peer_id, vdev_id, 5121 peer_mac_addr, ast_hash, 5122 is_wds); 5123 5124 /* 5125 * Update ast indexes for flow override support 5126 * Applicable only for non wds peers 5127 */ 5128 dp_peer_ast_index_flow_queue_map_create( 5129 soc->dp_soc, is_wds, 5130 peer_id, peer_mac_addr, 5131 &ast_flow_info); 5132 5133 break; 5134 } 5135 case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2: 5136 { 5137 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 5138 u_int8_t *mac_addr; 5139 u_int16_t peer_id; 5140 u_int8_t vdev_id; 5141 u_int8_t is_wds; 5142 u_int32_t free_wds_count; 5143 5144 peer_id = 5145 HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word); 5146 vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word); 5147 mac_addr = 5148 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 5149 &mac_addr_deswizzle_buf[0]); 5150 is_wds = 5151 HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2)); 5152 free_wds_count = 5153 HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4)); 5154 5155 QDF_TRACE(QDF_MODULE_ID_TXRX, 5156 QDF_TRACE_LEVEL_INFO, 5157 "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n", 5158 peer_id, vdev_id); 5159 5160 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, 5161 vdev_id, mac_addr, 5162 is_wds, free_wds_count); 5163 break; 5164 } 5165 case HTT_T2H_MSG_TYPE_RX_DELBA: 5166 { 5167 uint16_t peer_id; 5168 uint8_t tid; 5169 uint8_t win_sz; 5170 QDF_STATUS status; 5171 5172 peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word); 5173 tid = HTT_RX_DELBA_TID_GET(*msg_word); 5174 win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word); 5175 5176 status = dp_rx_delba_ind_handler( 5177 soc->dp_soc, 5178 peer_id, tid, win_sz); 5179 5180 QDF_TRACE(QDF_MODULE_ID_TXRX, 5181 QDF_TRACE_LEVEL_INFO, 5182 FL("DELBA PeerID %d BAW %d TID %d stat %d"), 5183 peer_id, win_sz, tid, status); 5184 break; 5185 } 5186 case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND: 5187 { 5188 uint16_t num_entries; 5189 uint32_t cmem_ba_lo; 5190 uint32_t cmem_ba_hi; 5191 5192 num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word); 5193 cmem_ba_lo = *(msg_word + 1); 5194 cmem_ba_hi = *(msg_word + 2); 5195 5196 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 5197 FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"), 5198 num_entries, cmem_ba_lo, cmem_ba_hi); 5199 5200 dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries, 5201 cmem_ba_lo, cmem_ba_hi); 5202 break; 5203 } 5204 case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND: 5205 { 5206 dp_offload_ind_handler(soc, msg_word); 5207 break; 5208 } 5209 default: 5210 break; 5211 }; 5212 5213 /* Free the indication buffer */ 5214 if (free_buf) 5215 qdf_nbuf_free(htt_t2h_msg); 5216 } 5217 5218 /* 5219 * dp_htt_h2t_full() - Send full handler (called from HTC) 5220 * @context: Opaque context (HTT SOC handle) 5221 * @pkt: HTC packet 5222 * 5223 * Return: enum htc_send_full_action 5224 */ 5225 static enum htc_send_full_action 5226 dp_htt_h2t_full(void *context, HTC_PACKET *pkt) 5227 { 5228 return HTC_SEND_FULL_KEEP; 5229 } 5230 5231 /* 5232 * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages 5233 * @context: Opaque context (HTT SOC handle) 5234 * @nbuf: nbuf containing T2H message 5235 * @pipe_id: HIF pipe ID 5236 * 5237 * Return: QDF_STATUS 5238 * 5239 * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which 5240 * will be used for packet log and other high-priority HTT messages. Proper 5241 * HTC connection to be added later once required FW changes are available 5242 */ 5243 static QDF_STATUS 5244 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id) 5245 { 5246 QDF_STATUS rc = QDF_STATUS_SUCCESS; 5247 HTC_PACKET htc_pkt; 5248 5249 qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE); 5250 qdf_mem_zero(&htc_pkt, sizeof(htc_pkt)); 5251 htc_pkt.Status = QDF_STATUS_SUCCESS; 5252 htc_pkt.pPktContext = (void *)nbuf; 5253 dp_htt_t2h_msg_handler(context, &htc_pkt); 5254 5255 return rc; 5256 } 5257 5258 /* 5259 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC 5260 * @htt_soc: HTT SOC handle 5261 * 5262 * Return: QDF_STATUS 5263 */ 5264 static QDF_STATUS 5265 htt_htc_soc_attach(struct htt_soc *soc) 5266 { 5267 struct htc_service_connect_req connect; 5268 struct htc_service_connect_resp response; 5269 QDF_STATUS status; 5270 struct dp_soc *dpsoc = soc->dp_soc; 5271 5272 qdf_mem_zero(&connect, sizeof(connect)); 5273 qdf_mem_zero(&response, sizeof(response)); 5274 5275 connect.pMetaData = NULL; 5276 connect.MetaDataLength = 0; 5277 connect.EpCallbacks.pContext = soc; 5278 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; 5279 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 5280 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; 5281 5282 /* rx buffers currently are provided by HIF, not by EpRecvRefill */ 5283 connect.EpCallbacks.EpRecvRefill = NULL; 5284 5285 /* N/A, fill is done by HIF */ 5286 connect.EpCallbacks.RecvRefillWaterMark = 1; 5287 5288 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; 5289 /* 5290 * Specify how deep to let a queue get before htc_send_pkt will 5291 * call the EpSendFull function due to excessive send queue depth. 5292 */ 5293 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; 5294 5295 /* disable flow control for HTT data message service */ 5296 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 5297 5298 /* connect to control service */ 5299 connect.service_id = HTT_DATA_MSG_SVC; 5300 5301 status = htc_connect_service(soc->htc_soc, &connect, &response); 5302 5303 if (status != QDF_STATUS_SUCCESS) 5304 return status; 5305 5306 soc->htc_endpoint = response.Endpoint; 5307 5308 hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint); 5309 5310 htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc); 5311 dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc, 5312 dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE); 5313 5314 return QDF_STATUS_SUCCESS; /* success */ 5315 } 5316 5317 /* 5318 * htt_soc_initialize() - SOC level HTT initialization 5319 * @htt_soc: Opaque htt SOC handle 5320 * @ctrl_psoc: Opaque ctrl SOC handle 5321 * @htc_soc: SOC level HTC handle 5322 * @hal_soc: Opaque HAL SOC handle 5323 * @osdev: QDF device 5324 * 5325 * Return: HTT handle on success; NULL on failure 5326 */ 5327 void * 5328 htt_soc_initialize(struct htt_soc *htt_soc, 5329 struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 5330 HTC_HANDLE htc_soc, 5331 hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev) 5332 { 5333 struct htt_soc *soc = (struct htt_soc *)htt_soc; 5334 5335 soc->osdev = osdev; 5336 soc->ctrl_psoc = ctrl_psoc; 5337 soc->htc_soc = htc_soc; 5338 soc->hal_soc = hal_soc_hdl; 5339 5340 if (htt_htc_soc_attach(soc)) 5341 goto fail2; 5342 5343 return soc; 5344 5345 fail2: 5346 return NULL; 5347 } 5348 5349 void htt_soc_htc_dealloc(struct htt_soc *htt_handle) 5350 { 5351 htt_interface_logging_deinit(htt_handle->htt_logger_handle); 5352 htt_htc_misc_pkt_pool_free(htt_handle); 5353 htt_htc_pkt_pool_free(htt_handle); 5354 } 5355 5356 /* 5357 * htt_soc_htc_prealloc() - HTC memory prealloc 5358 * @htt_soc: SOC level HTT handle 5359 * 5360 * Return: QDF_STATUS_SUCCESS on Success or 5361 * QDF_STATUS_E_NOMEM on allocation failure 5362 */ 5363 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc) 5364 { 5365 int i; 5366 5367 soc->htt_htc_pkt_freelist = NULL; 5368 /* pre-allocate some HTC_PACKET objects */ 5369 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { 5370 struct dp_htt_htc_pkt_union *pkt; 5371 pkt = qdf_mem_malloc(sizeof(*pkt)); 5372 if (!pkt) 5373 return QDF_STATUS_E_NOMEM; 5374 5375 htt_htc_pkt_free(soc, &pkt->u.pkt); 5376 } 5377 return QDF_STATUS_SUCCESS; 5378 } 5379 5380 /* 5381 * htt_soc_detach() - Free SOC level HTT handle 5382 * @htt_hdl: HTT SOC handle 5383 */ 5384 void htt_soc_detach(struct htt_soc *htt_hdl) 5385 { 5386 int i; 5387 struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl; 5388 5389 for (i = 0; i < MAX_PDEV_CNT; i++) { 5390 qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt); 5391 qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt); 5392 } 5393 5394 HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex); 5395 qdf_mem_free(htt_handle); 5396 5397 } 5398 5399 /** 5400 * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW 5401 * @pdev: DP PDEV handle 5402 * @stats_type_upload_mask: stats type requested by user 5403 * @config_param_0: extra configuration parameters 5404 * @config_param_1: extra configuration parameters 5405 * @config_param_2: extra configuration parameters 5406 * @config_param_3: extra configuration parameters 5407 * @mac_id: mac number 5408 * 5409 * return: QDF STATUS 5410 */ 5411 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 5412 uint32_t stats_type_upload_mask, uint32_t config_param_0, 5413 uint32_t config_param_1, uint32_t config_param_2, 5414 uint32_t config_param_3, int cookie_val, int cookie_msb, 5415 uint8_t mac_id) 5416 { 5417 struct htt_soc *soc = pdev->soc->htt_handle; 5418 struct dp_htt_htc_pkt *pkt; 5419 qdf_nbuf_t msg; 5420 uint32_t *msg_word; 5421 uint8_t pdev_mask = 0; 5422 uint8_t *htt_logger_bufp; 5423 int mac_for_pdev; 5424 int target_pdev_id; 5425 QDF_STATUS status; 5426 5427 msg = qdf_nbuf_alloc( 5428 soc->osdev, 5429 HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ), 5430 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 5431 5432 if (!msg) 5433 return QDF_STATUS_E_NOMEM; 5434 5435 /*TODO:Add support for SOC stats 5436 * Bit 0: SOC Stats 5437 * Bit 1: Pdev stats for pdev id 0 5438 * Bit 2: Pdev stats for pdev id 1 5439 * Bit 3: Pdev stats for pdev id 2 5440 */ 5441 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 5442 target_pdev_id = 5443 dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); 5444 5445 pdev_mask = 1 << target_pdev_id; 5446 5447 /* 5448 * Set the length of the message. 5449 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5450 * separately during the below call to qdf_nbuf_push_head. 5451 * The contribution from the HTC header is added separately inside HTC. 5452 */ 5453 if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) { 5454 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 5455 "Failed to expand head for HTT_EXT_STATS"); 5456 qdf_nbuf_free(msg); 5457 return QDF_STATUS_E_FAILURE; 5458 } 5459 5460 dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n" 5461 "config_param_1 %u\n config_param_2 %u\n" 5462 "config_param_4 %u\n -------------", 5463 pdev->soc, cookie_val, 5464 config_param_0, 5465 config_param_1, config_param_2, config_param_3); 5466 5467 msg_word = (uint32_t *) qdf_nbuf_data(msg); 5468 5469 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5470 htt_logger_bufp = (uint8_t *)msg_word; 5471 *msg_word = 0; 5472 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ); 5473 HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask); 5474 HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask); 5475 5476 /* word 1 */ 5477 msg_word++; 5478 *msg_word = 0; 5479 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0); 5480 5481 /* word 2 */ 5482 msg_word++; 5483 *msg_word = 0; 5484 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1); 5485 5486 /* word 3 */ 5487 msg_word++; 5488 *msg_word = 0; 5489 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2); 5490 5491 /* word 4 */ 5492 msg_word++; 5493 *msg_word = 0; 5494 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3); 5495 5496 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0); 5497 5498 /* word 5 */ 5499 msg_word++; 5500 5501 /* word 6 */ 5502 msg_word++; 5503 *msg_word = 0; 5504 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val); 5505 5506 /* word 7 */ 5507 msg_word++; 5508 *msg_word = 0; 5509 /* Currently Using last 2 bits for pdev_id 5510 * For future reference, reserving 3 bits in cookie_msb for pdev_id 5511 */ 5512 cookie_msb = (cookie_msb | pdev->pdev_id); 5513 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb); 5514 5515 pkt = htt_htc_pkt_alloc(soc); 5516 if (!pkt) { 5517 qdf_nbuf_free(msg); 5518 return QDF_STATUS_E_NOMEM; 5519 } 5520 5521 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5522 5523 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 5524 dp_htt_h2t_send_complete_free_netbuf, 5525 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 5526 soc->htc_endpoint, 5527 /* tag for FW response msg not guaranteed */ 5528 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5529 5530 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5531 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ, 5532 htt_logger_bufp); 5533 5534 if (status != QDF_STATUS_SUCCESS) { 5535 qdf_nbuf_free(msg); 5536 htt_htc_pkt_free(soc, pkt); 5537 } 5538 5539 return status; 5540 } 5541 5542 /** 5543 * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration 5544 * HTT message to pass to FW 5545 * @pdev: DP PDEV handle 5546 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 5547 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 5548 * 5549 * tuple_mask[1:0]: 5550 * 00 - Do not report 3 tuple hash value 5551 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 5552 * 01 - Report 3 tuple hash value in flow_id_toeplitz 5553 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 5554 * 5555 * return: QDF STATUS 5556 */ 5557 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, 5558 uint32_t tuple_mask, uint8_t mac_id) 5559 { 5560 struct htt_soc *soc = pdev->soc->htt_handle; 5561 struct dp_htt_htc_pkt *pkt; 5562 qdf_nbuf_t msg; 5563 uint32_t *msg_word; 5564 uint8_t *htt_logger_bufp; 5565 int mac_for_pdev; 5566 int target_pdev_id; 5567 5568 msg = qdf_nbuf_alloc( 5569 soc->osdev, 5570 HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES), 5571 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 5572 5573 if (!msg) 5574 return QDF_STATUS_E_NOMEM; 5575 5576 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 5577 target_pdev_id = 5578 dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); 5579 5580 /* 5581 * Set the length of the message. 5582 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5583 * separately during the below call to qdf_nbuf_push_head. 5584 * The contribution from the HTC header is added separately inside HTC. 5585 */ 5586 if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) { 5587 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 5588 "Failed to expand head for HTT_3TUPLE_CONFIG"); 5589 qdf_nbuf_free(msg); 5590 return QDF_STATUS_E_FAILURE; 5591 } 5592 5593 dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------", 5594 pdev->soc, tuple_mask, target_pdev_id); 5595 5596 msg_word = (uint32_t *)qdf_nbuf_data(msg); 5597 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5598 htt_logger_bufp = (uint8_t *)msg_word; 5599 5600 *msg_word = 0; 5601 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG); 5602 HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id); 5603 5604 msg_word++; 5605 *msg_word = 0; 5606 HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask); 5607 HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask); 5608 5609 pkt = htt_htc_pkt_alloc(soc); 5610 if (!pkt) { 5611 qdf_nbuf_free(msg); 5612 return QDF_STATUS_E_NOMEM; 5613 } 5614 5615 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5616 5617 SET_HTC_PACKET_INFO_TX( 5618 &pkt->htc_pkt, 5619 dp_htt_h2t_send_complete_free_netbuf, 5620 qdf_nbuf_data(msg), 5621 qdf_nbuf_len(msg), 5622 soc->htc_endpoint, 5623 /* tag for no FW response msg */ 5624 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5625 5626 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5627 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG, 5628 htt_logger_bufp); 5629 5630 return QDF_STATUS_SUCCESS; 5631 } 5632 5633 /* This macro will revert once proper HTT header will define for 5634 * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file 5635 * */ 5636 #if defined(WDI_EVENT_ENABLE) 5637 /** 5638 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 5639 * @pdev: DP PDEV handle 5640 * @stats_type_upload_mask: stats type requested by user 5641 * @mac_id: Mac id number 5642 * 5643 * return: QDF STATUS 5644 */ 5645 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 5646 uint32_t stats_type_upload_mask, uint8_t mac_id) 5647 { 5648 struct htt_soc *soc = pdev->soc->htt_handle; 5649 struct dp_htt_htc_pkt *pkt; 5650 qdf_nbuf_t msg; 5651 uint32_t *msg_word; 5652 uint8_t pdev_mask; 5653 QDF_STATUS status; 5654 5655 msg = qdf_nbuf_alloc( 5656 soc->osdev, 5657 HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ), 5658 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); 5659 5660 if (!msg) { 5661 dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer" 5662 , pdev->soc); 5663 qdf_assert(0); 5664 return QDF_STATUS_E_NOMEM; 5665 } 5666 5667 /*TODO:Add support for SOC stats 5668 * Bit 0: SOC Stats 5669 * Bit 1: Pdev stats for pdev id 0 5670 * Bit 2: Pdev stats for pdev id 1 5671 * Bit 3: Pdev stats for pdev id 2 5672 */ 5673 pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, 5674 mac_id); 5675 5676 /* 5677 * Set the length of the message. 5678 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5679 * separately during the below call to qdf_nbuf_push_head. 5680 * The contribution from the HTC header is added separately inside HTC. 5681 */ 5682 if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) { 5683 dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS" 5684 , pdev->soc); 5685 qdf_nbuf_free(msg); 5686 return QDF_STATUS_E_FAILURE; 5687 } 5688 5689 msg_word = (uint32_t *) qdf_nbuf_data(msg); 5690 5691 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5692 *msg_word = 0; 5693 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); 5694 HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask); 5695 HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word, 5696 stats_type_upload_mask); 5697 5698 pkt = htt_htc_pkt_alloc(soc); 5699 if (!pkt) { 5700 dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc); 5701 qdf_assert(0); 5702 qdf_nbuf_free(msg); 5703 return QDF_STATUS_E_NOMEM; 5704 } 5705 5706 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5707 5708 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 5709 dp_htt_h2t_send_complete_free_netbuf, 5710 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 5711 soc->htc_endpoint, 5712 /* tag for no FW response msg */ 5713 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5714 5715 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5716 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG, 5717 (uint8_t *)msg_word); 5718 5719 if (status != QDF_STATUS_SUCCESS) { 5720 qdf_nbuf_free(msg); 5721 htt_htc_pkt_free(soc, pkt); 5722 } 5723 5724 return status; 5725 } 5726 #endif 5727 5728 void 5729 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, 5730 uint32_t *tag_buf) 5731 { 5732 struct dp_peer *peer = NULL; 5733 switch (tag_type) { 5734 case HTT_STATS_PEER_DETAILS_TAG: 5735 { 5736 htt_peer_details_tlv *dp_stats_buf = 5737 (htt_peer_details_tlv *)tag_buf; 5738 5739 pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id; 5740 } 5741 break; 5742 case HTT_STATS_PEER_STATS_CMN_TAG: 5743 { 5744 htt_peer_stats_cmn_tlv *dp_stats_buf = 5745 (htt_peer_stats_cmn_tlv *)tag_buf; 5746 5747 peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id, 5748 DP_MOD_ID_HTT); 5749 5750 if (peer && !peer->bss_peer) { 5751 peer->stats.tx.inactive_time = 5752 dp_stats_buf->inactive_time; 5753 qdf_event_set(&pdev->fw_peer_stats_event); 5754 } 5755 if (peer) 5756 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 5757 } 5758 break; 5759 default: 5760 qdf_err("Invalid tag_type"); 5761 } 5762 } 5763 5764 /** 5765 * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW 5766 * @pdev: DP pdev handle 5767 * @fse_setup_info: FST setup parameters 5768 * 5769 * Return: Success when HTT message is sent, error on failure 5770 */ 5771 QDF_STATUS 5772 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev, 5773 struct dp_htt_rx_flow_fst_setup *fse_setup_info) 5774 { 5775 struct htt_soc *soc = pdev->soc->htt_handle; 5776 struct dp_htt_htc_pkt *pkt; 5777 qdf_nbuf_t msg; 5778 u_int32_t *msg_word; 5779 struct htt_h2t_msg_rx_fse_setup_t *fse_setup; 5780 uint8_t *htt_logger_bufp; 5781 u_int32_t *key; 5782 QDF_STATUS status; 5783 5784 msg = qdf_nbuf_alloc( 5785 soc->osdev, 5786 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)), 5787 /* reserve room for the HTC header */ 5788 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 5789 5790 if (!msg) 5791 return QDF_STATUS_E_NOMEM; 5792 5793 /* 5794 * Set the length of the message. 5795 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5796 * separately during the below call to qdf_nbuf_push_head. 5797 * The contribution from the HTC header is added separately inside HTC. 5798 */ 5799 if (!qdf_nbuf_put_tail(msg, 5800 sizeof(struct htt_h2t_msg_rx_fse_setup_t))) { 5801 qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg"); 5802 return QDF_STATUS_E_FAILURE; 5803 } 5804 5805 /* fill in the message contents */ 5806 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 5807 5808 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t)); 5809 /* rewind beyond alignment pad to get to the HTC header reserved area */ 5810 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5811 htt_logger_bufp = (uint8_t *)msg_word; 5812 5813 *msg_word = 0; 5814 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG); 5815 5816 fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word; 5817 5818 HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id); 5819 5820 msg_word++; 5821 HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries); 5822 HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search); 5823 HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word, 5824 fse_setup_info->ip_da_sa_prefix); 5825 5826 msg_word++; 5827 HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word, 5828 fse_setup_info->base_addr_lo); 5829 msg_word++; 5830 HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word, 5831 fse_setup_info->base_addr_hi); 5832 5833 key = (u_int32_t *)fse_setup_info->hash_key; 5834 fse_setup->toeplitz31_0 = *key++; 5835 fse_setup->toeplitz63_32 = *key++; 5836 fse_setup->toeplitz95_64 = *key++; 5837 fse_setup->toeplitz127_96 = *key++; 5838 fse_setup->toeplitz159_128 = *key++; 5839 fse_setup->toeplitz191_160 = *key++; 5840 fse_setup->toeplitz223_192 = *key++; 5841 fse_setup->toeplitz255_224 = *key++; 5842 fse_setup->toeplitz287_256 = *key++; 5843 fse_setup->toeplitz314_288 = *key; 5844 5845 msg_word++; 5846 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0); 5847 msg_word++; 5848 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32); 5849 msg_word++; 5850 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64); 5851 msg_word++; 5852 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96); 5853 msg_word++; 5854 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128); 5855 msg_word++; 5856 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160); 5857 msg_word++; 5858 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192); 5859 msg_word++; 5860 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224); 5861 msg_word++; 5862 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256); 5863 msg_word++; 5864 HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word, 5865 fse_setup->toeplitz314_288); 5866 5867 pkt = htt_htc_pkt_alloc(soc); 5868 if (!pkt) { 5869 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 5870 qdf_assert(0); 5871 qdf_nbuf_free(msg); 5872 return QDF_STATUS_E_RESOURCES; /* failure */ 5873 } 5874 5875 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5876 5877 SET_HTC_PACKET_INFO_TX( 5878 &pkt->htc_pkt, 5879 dp_htt_h2t_send_complete_free_netbuf, 5880 qdf_nbuf_data(msg), 5881 qdf_nbuf_len(msg), 5882 soc->htc_endpoint, 5883 /* tag for no FW response msg */ 5884 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5885 5886 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5887 5888 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 5889 HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG, 5890 htt_logger_bufp); 5891 5892 if (status == QDF_STATUS_SUCCESS) { 5893 dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u", 5894 fse_setup_info->pdev_id); 5895 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, 5896 (void *)fse_setup_info->hash_key, 5897 fse_setup_info->hash_key_len); 5898 } else { 5899 qdf_nbuf_free(msg); 5900 htt_htc_pkt_free(soc, pkt); 5901 } 5902 5903 return status; 5904 } 5905 5906 /** 5907 * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to 5908 * add/del a flow in HW 5909 * @pdev: DP pdev handle 5910 * @fse_op_info: Flow entry parameters 5911 * 5912 * Return: Success when HTT message is sent, error on failure 5913 */ 5914 QDF_STATUS 5915 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev, 5916 struct dp_htt_rx_flow_fst_operation *fse_op_info) 5917 { 5918 struct htt_soc *soc = pdev->soc->htt_handle; 5919 struct dp_htt_htc_pkt *pkt; 5920 qdf_nbuf_t msg; 5921 u_int32_t *msg_word; 5922 struct htt_h2t_msg_rx_fse_operation_t *fse_operation; 5923 uint8_t *htt_logger_bufp; 5924 QDF_STATUS status; 5925 5926 msg = qdf_nbuf_alloc( 5927 soc->osdev, 5928 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)), 5929 /* reserve room for the HTC header */ 5930 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 5931 if (!msg) 5932 return QDF_STATUS_E_NOMEM; 5933 5934 /* 5935 * Set the length of the message. 5936 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5937 * separately during the below call to qdf_nbuf_push_head. 5938 * The contribution from the HTC header is added separately inside HTC. 5939 */ 5940 if (!qdf_nbuf_put_tail(msg, 5941 sizeof(struct htt_h2t_msg_rx_fse_operation_t))) { 5942 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); 5943 qdf_nbuf_free(msg); 5944 return QDF_STATUS_E_FAILURE; 5945 } 5946 5947 /* fill in the message contents */ 5948 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 5949 5950 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t)); 5951 /* rewind beyond alignment pad to get to the HTC header reserved area */ 5952 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5953 htt_logger_bufp = (uint8_t *)msg_word; 5954 5955 *msg_word = 0; 5956 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG); 5957 5958 fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word; 5959 5960 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id); 5961 msg_word++; 5962 HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false); 5963 if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) { 5964 HTT_RX_FSE_OPERATION_SET(*msg_word, 5965 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY); 5966 msg_word++; 5967 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5968 *msg_word, 5969 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0)); 5970 msg_word++; 5971 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5972 *msg_word, 5973 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32)); 5974 msg_word++; 5975 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5976 *msg_word, 5977 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64)); 5978 msg_word++; 5979 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5980 *msg_word, 5981 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96)); 5982 msg_word++; 5983 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5984 *msg_word, 5985 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0)); 5986 msg_word++; 5987 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5988 *msg_word, 5989 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32)); 5990 msg_word++; 5991 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5992 *msg_word, 5993 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64)); 5994 msg_word++; 5995 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 5996 *msg_word, 5997 qdf_htonl( 5998 fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96)); 5999 msg_word++; 6000 HTT_RX_FSE_SOURCEPORT_SET( 6001 *msg_word, 6002 fse_op_info->rx_flow->flow_tuple_info.src_port); 6003 HTT_RX_FSE_DESTPORT_SET( 6004 *msg_word, 6005 fse_op_info->rx_flow->flow_tuple_info.dest_port); 6006 msg_word++; 6007 HTT_RX_FSE_L4_PROTO_SET( 6008 *msg_word, 6009 fse_op_info->rx_flow->flow_tuple_info.l4_protocol); 6010 } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) { 6011 HTT_RX_FSE_OPERATION_SET(*msg_word, 6012 HTT_RX_FSE_CACHE_INVALIDATE_FULL); 6013 } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) { 6014 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE); 6015 } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) { 6016 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE); 6017 } 6018 6019 pkt = htt_htc_pkt_alloc(soc); 6020 if (!pkt) { 6021 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 6022 qdf_assert(0); 6023 qdf_nbuf_free(msg); 6024 return QDF_STATUS_E_RESOURCES; /* failure */ 6025 } 6026 6027 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 6028 6029 SET_HTC_PACKET_INFO_TX( 6030 &pkt->htc_pkt, 6031 dp_htt_h2t_send_complete_free_netbuf, 6032 qdf_nbuf_data(msg), 6033 qdf_nbuf_len(msg), 6034 soc->htc_endpoint, 6035 /* tag for no FW response msg */ 6036 HTC_TX_PACKET_TAG_RUNTIME_PUT); 6037 6038 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 6039 6040 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 6041 HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG, 6042 htt_logger_bufp); 6043 6044 if (status == QDF_STATUS_SUCCESS) { 6045 dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u", 6046 fse_op_info->pdev_id); 6047 } else { 6048 qdf_nbuf_free(msg); 6049 htt_htc_pkt_free(soc, pkt); 6050 } 6051 6052 return status; 6053 } 6054 6055 /** 6056 * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA 6057 * @pdev: DP pdev handle 6058 * @fse_op_info: Flow entry parameters 6059 * 6060 * Return: Success when HTT message is sent, error on failure 6061 */ 6062 QDF_STATUS 6063 dp_htt_rx_fisa_config(struct dp_pdev *pdev, 6064 struct dp_htt_rx_fisa_cfg *fisa_config) 6065 { 6066 struct htt_soc *soc = pdev->soc->htt_handle; 6067 struct dp_htt_htc_pkt *pkt; 6068 qdf_nbuf_t msg; 6069 u_int32_t *msg_word; 6070 struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config; 6071 uint8_t *htt_logger_bufp; 6072 uint32_t len; 6073 QDF_STATUS status; 6074 6075 len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t)); 6076 6077 msg = qdf_nbuf_alloc(soc->osdev, 6078 len, 6079 /* reserve room for the HTC header */ 6080 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 6081 4, 6082 TRUE); 6083 if (!msg) 6084 return QDF_STATUS_E_NOMEM; 6085 6086 /* 6087 * Set the length of the message. 6088 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 6089 * separately during the below call to qdf_nbuf_push_head. 6090 * The contribution from the HTC header is added separately inside HTC. 6091 */ 6092 if (!qdf_nbuf_put_tail(msg, 6093 sizeof(struct htt_h2t_msg_type_fisa_config_t))) { 6094 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); 6095 qdf_nbuf_free(msg); 6096 return QDF_STATUS_E_FAILURE; 6097 } 6098 6099 /* fill in the message contents */ 6100 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 6101 6102 memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t)); 6103 /* rewind beyond alignment pad to get to the HTC header reserved area */ 6104 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 6105 htt_logger_bufp = (uint8_t *)msg_word; 6106 6107 *msg_word = 0; 6108 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG); 6109 6110 htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word; 6111 6112 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id); 6113 6114 msg_word++; 6115 HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1); 6116 HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf); 6117 6118 msg_word++; 6119 htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout; 6120 6121 pkt = htt_htc_pkt_alloc(soc); 6122 if (!pkt) { 6123 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 6124 qdf_assert(0); 6125 qdf_nbuf_free(msg); 6126 return QDF_STATUS_E_RESOURCES; /* failure */ 6127 } 6128 6129 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 6130 6131 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 6132 dp_htt_h2t_send_complete_free_netbuf, 6133 qdf_nbuf_data(msg), 6134 qdf_nbuf_len(msg), 6135 soc->htc_endpoint, 6136 /* tag for no FW response msg */ 6137 HTC_TX_PACKET_TAG_RUNTIME_PUT); 6138 6139 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 6140 6141 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG, 6142 htt_logger_bufp); 6143 6144 if (status == QDF_STATUS_SUCCESS) { 6145 dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u", 6146 fisa_config->pdev_id); 6147 } else { 6148 qdf_nbuf_free(msg); 6149 htt_htc_pkt_free(soc, pkt); 6150 } 6151 6152 return status; 6153 } 6154 6155 /** 6156 * dp_bk_pressure_stats_handler(): worker function to print back pressure 6157 * stats 6158 * 6159 * @context : argument to work function 6160 */ 6161 static void dp_bk_pressure_stats_handler(void *context) 6162 { 6163 struct dp_pdev *pdev = (struct dp_pdev *)context; 6164 struct dp_soc_srngs_state *soc_srngs_state, *soc_srngs_state_next; 6165 const char *ring_name; 6166 int i; 6167 struct dp_srng_ring_state *ring_state; 6168 6169 TAILQ_HEAD(, dp_soc_srngs_state) soc_srngs_state_list; 6170 6171 TAILQ_INIT(&soc_srngs_state_list); 6172 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 6173 TAILQ_CONCAT(&soc_srngs_state_list, &pdev->bkp_stats.list, 6174 list_elem); 6175 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 6176 6177 TAILQ_FOREACH_SAFE(soc_srngs_state, &soc_srngs_state_list, 6178 list_elem, soc_srngs_state_next) { 6179 TAILQ_REMOVE(&soc_srngs_state_list, soc_srngs_state, 6180 list_elem); 6181 6182 DP_PRINT_STATS("### START BKP stats for seq_num %u ###", 6183 soc_srngs_state->seq_num); 6184 for (i = 0; i < soc_srngs_state->max_ring_id; i++) { 6185 ring_state = &soc_srngs_state->ring_state[i]; 6186 ring_name = dp_srng_get_str_from_hal_ring_type 6187 (ring_state->ring_type); 6188 DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n", 6189 ring_name, 6190 ring_state->sw_head, 6191 ring_state->sw_tail); 6192 6193 DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n", 6194 ring_name, 6195 ring_state->hw_head, 6196 ring_state->hw_tail); 6197 } 6198 6199 DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###", 6200 soc_srngs_state->seq_num); 6201 qdf_mem_free(soc_srngs_state); 6202 } 6203 dp_print_napi_stats(pdev->soc); 6204 } 6205 6206 /* 6207 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 6208 * processing 6209 * @pdev: Datapath PDEV handle 6210 * 6211 */ 6212 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev) 6213 { 6214 struct dp_soc_srngs_state *ring_state, *ring_state_next; 6215 6216 if (!pdev->bkp_stats.work_queue) 6217 return; 6218 6219 qdf_flush_workqueue(0, pdev->bkp_stats.work_queue); 6220 qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue); 6221 qdf_flush_work(&pdev->bkp_stats.work); 6222 qdf_disable_work(&pdev->bkp_stats.work); 6223 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 6224 TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list, 6225 list_elem, ring_state_next) { 6226 TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state, 6227 list_elem); 6228 qdf_mem_free(ring_state); 6229 } 6230 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 6231 qdf_spinlock_destroy(&pdev->bkp_stats.list_lock); 6232 } 6233 6234 /* 6235 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 6236 * processing 6237 * @pdev: Datapath PDEV handle 6238 * 6239 * Return: QDF_STATUS_SUCCESS: Success 6240 * QDF_STATUS_E_NOMEM: Error 6241 */ 6242 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev) 6243 { 6244 TAILQ_INIT(&pdev->bkp_stats.list); 6245 pdev->bkp_stats.seq_num = 0; 6246 6247 qdf_create_work(0, &pdev->bkp_stats.work, 6248 dp_bk_pressure_stats_handler, pdev); 6249 6250 pdev->bkp_stats.work_queue = 6251 qdf_alloc_unbound_workqueue("dp_bkp_work_queue"); 6252 if (!pdev->bkp_stats.work_queue) 6253 goto fail; 6254 6255 qdf_spinlock_create(&pdev->bkp_stats.list_lock); 6256 return QDF_STATUS_SUCCESS; 6257 6258 fail: 6259 dp_htt_alert("BKP stats attach failed"); 6260 qdf_flush_work(&pdev->bkp_stats.work); 6261 qdf_disable_work(&pdev->bkp_stats.work); 6262 return QDF_STATUS_E_FAILURE; 6263 } 6264