1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <htt.h> 20 #include <hal_api.h> 21 #include "dp_htt.h" 22 #include "dp_peer.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_rx_mon.h" 26 #include "htt_stats.h" 27 #include "htt_ppdu_stats.h" 28 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 29 #include "cdp_txrx_cmn_struct.h" 30 31 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE 32 33 #define HTT_HTC_PKT_POOL_INIT_SIZE 64 34 #define HTT_T2H_MAX_MSG_SIZE 2048 35 36 #define HTT_MSG_BUF_SIZE(msg_bytes) \ 37 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) 38 39 #define HTT_PID_BIT_MASK 0x3 40 41 #define DP_EXT_MSG_LENGTH 2048 42 #define DP_HTT_SEND_HTC_PKT(soc, pkt) \ 43 do { \ 44 if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) == \ 45 QDF_STATUS_SUCCESS) \ 46 htt_htc_misc_pkt_list_add(soc, pkt); \ 47 } while (0) 48 49 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16 50 51 /** 52 * Bitmap of HTT PPDU TLV types for Default mode 53 */ 54 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 55 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 56 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 57 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 58 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 59 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 60 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 61 62 /** 63 * Bitmap of HTT PPDU TLV types for Sniffer mode 64 */ 65 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \ 66 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 67 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 68 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 69 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 70 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 71 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 72 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 73 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 74 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) 75 76 #define HTT_FRAMECTRL_DATATYPE 0x08 77 #define HTT_PPDU_DESC_MAX_DEPTH 16 78 79 /* 80 * dp_tx_stats_update() - Update per-peer statistics 81 * @soc: Datapath soc handle 82 * @peer: Datapath peer handle 83 * @ppdu: PPDU Descriptor 84 * @ack_rssi: RSSI of last ack received 85 * 86 * Return: None 87 */ 88 #ifdef FEATURE_PERPKT_INFO 89 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer, 90 struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi) 91 { 92 struct dp_pdev *pdev = peer->vdev->pdev; 93 uint8_t preamble, mcs; 94 uint16_t num_msdu; 95 96 preamble = ppdu->preamble; 97 mcs = ppdu->mcs; 98 num_msdu = ppdu->num_msdu; 99 100 /* If the peer statistics are already processed as part of 101 * per-MSDU completion handler, do not process these again in per-PPDU 102 * indications */ 103 if (soc->process_tx_status) 104 return; 105 106 DP_STATS_INC_PKT(peer, tx.comp_pkt, 107 num_msdu, (ppdu->success_bytes + 108 ppdu->retry_bytes + ppdu->failed_bytes)); 109 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); 110 DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate); 111 DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu); 112 DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu); 113 DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu); 114 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu); 115 DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc); 116 DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc); 117 if (!(ppdu->is_mcast)) 118 DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi); 119 120 DP_STATS_INC(peer, tx.retries, 121 (ppdu->long_retries + ppdu->short_retries)); 122 DP_STATS_INCC(peer, 123 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 124 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); 125 DP_STATS_INCC(peer, 126 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 127 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); 128 DP_STATS_INCC(peer, 129 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 130 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); 131 DP_STATS_INCC(peer, 132 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 133 ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); 134 DP_STATS_INCC(peer, 135 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 136 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); 137 DP_STATS_INCC(peer, 138 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 139 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); 140 DP_STATS_INCC(peer, 141 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 142 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); 143 DP_STATS_INCC(peer, 144 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 145 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); 146 DP_STATS_INCC(peer, 147 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, 148 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); 149 DP_STATS_INCC(peer, 150 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 151 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); 152 153 if (soc->cdp_soc.ol_ops->update_dp_stats) { 154 soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev, 155 &peer->stats, ppdu->peer_id, 156 UPDATE_PEER_STATS); 157 158 } 159 } 160 #endif 161 162 /* 163 * htt_htc_pkt_alloc() - Allocate HTC packet buffer 164 * @htt_soc: HTT SOC handle 165 * 166 * Return: Pointer to htc packet buffer 167 */ 168 static struct dp_htt_htc_pkt * 169 htt_htc_pkt_alloc(struct htt_soc *soc) 170 { 171 struct dp_htt_htc_pkt_union *pkt = NULL; 172 173 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 174 if (soc->htt_htc_pkt_freelist) { 175 pkt = soc->htt_htc_pkt_freelist; 176 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; 177 } 178 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 179 180 if (pkt == NULL) 181 pkt = qdf_mem_malloc(sizeof(*pkt)); 182 return &pkt->u.pkt; /* not actually a dereference */ 183 } 184 185 /* 186 * htt_htc_pkt_free() - Free HTC packet buffer 187 * @htt_soc: HTT SOC handle 188 */ 189 static void 190 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 191 { 192 struct dp_htt_htc_pkt_union *u_pkt = 193 (struct dp_htt_htc_pkt_union *)pkt; 194 195 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 196 u_pkt->u.next = soc->htt_htc_pkt_freelist; 197 soc->htt_htc_pkt_freelist = u_pkt; 198 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 199 } 200 201 /* 202 * htt_htc_pkt_pool_free() - Free HTC packet pool 203 * @htt_soc: HTT SOC handle 204 */ 205 static void 206 htt_htc_pkt_pool_free(struct htt_soc *soc) 207 { 208 struct dp_htt_htc_pkt_union *pkt, *next; 209 pkt = soc->htt_htc_pkt_freelist; 210 while (pkt) { 211 next = pkt->u.next; 212 qdf_mem_free(pkt); 213 pkt = next; 214 } 215 soc->htt_htc_pkt_freelist = NULL; 216 } 217 218 /* 219 * htt_htc_misc_pkt_list_trim() - trim misc list 220 * @htt_soc: HTT SOC handle 221 * @level: max no. of pkts in list 222 */ 223 static void 224 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level) 225 { 226 struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL; 227 int i = 0; 228 qdf_nbuf_t netbuf; 229 230 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 231 pkt = soc->htt_htc_pkt_misclist; 232 while (pkt) { 233 next = pkt->u.next; 234 /* trim the out grown list*/ 235 if (++i > level) { 236 netbuf = 237 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); 238 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 239 qdf_nbuf_free(netbuf); 240 qdf_mem_free(pkt); 241 pkt = NULL; 242 if (prev) 243 prev->u.next = NULL; 244 } 245 prev = pkt; 246 pkt = next; 247 } 248 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 249 } 250 251 /* 252 * htt_htc_misc_pkt_list_add() - Add pkt to misc list 253 * @htt_soc: HTT SOC handle 254 * @dp_htt_htc_pkt: pkt to be added to list 255 */ 256 static void 257 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 258 { 259 struct dp_htt_htc_pkt_union *u_pkt = 260 (struct dp_htt_htc_pkt_union *)pkt; 261 int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc, 262 pkt->htc_pkt.Endpoint) 263 + DP_HTT_HTC_PKT_MISCLIST_SIZE; 264 265 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 266 if (soc->htt_htc_pkt_misclist) { 267 u_pkt->u.next = soc->htt_htc_pkt_misclist; 268 soc->htt_htc_pkt_misclist = u_pkt; 269 } else { 270 soc->htt_htc_pkt_misclist = u_pkt; 271 } 272 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 273 274 /* only ce pipe size + tx_queue_depth could possibly be in use 275 * free older packets in the misclist 276 */ 277 htt_htc_misc_pkt_list_trim(soc, misclist_trim_level); 278 } 279 280 /* 281 * htt_htc_misc_pkt_pool_free() - free pkts in misc list 282 * @htt_soc: HTT SOC handle 283 */ 284 static void 285 htt_htc_misc_pkt_pool_free(struct htt_soc *soc) 286 { 287 struct dp_htt_htc_pkt_union *pkt, *next; 288 qdf_nbuf_t netbuf; 289 290 pkt = soc->htt_htc_pkt_misclist; 291 292 while (pkt) { 293 next = pkt->u.next; 294 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); 295 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 296 297 soc->stats.htc_pkt_free++; 298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 299 "%s: Pkt free count %d\n", 300 __func__, soc->stats.htc_pkt_free); 301 302 qdf_nbuf_free(netbuf); 303 qdf_mem_free(pkt); 304 pkt = next; 305 } 306 soc->htt_htc_pkt_misclist = NULL; 307 } 308 309 /* 310 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ 311 * @tgt_mac_addr: Target MAC 312 * @buffer: Output buffer 313 */ 314 static u_int8_t * 315 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) 316 { 317 #ifdef BIG_ENDIAN_HOST 318 /* 319 * The host endianness is opposite of the target endianness. 320 * To make u_int32_t elements come out correctly, the target->host 321 * upload has swizzled the bytes in each u_int32_t element of the 322 * message. 323 * For byte-array message fields like the MAC address, this 324 * upload swizzling puts the bytes in the wrong order, and needs 325 * to be undone. 326 */ 327 buffer[0] = tgt_mac_addr[3]; 328 buffer[1] = tgt_mac_addr[2]; 329 buffer[2] = tgt_mac_addr[1]; 330 buffer[3] = tgt_mac_addr[0]; 331 buffer[4] = tgt_mac_addr[7]; 332 buffer[5] = tgt_mac_addr[6]; 333 return buffer; 334 #else 335 /* 336 * The host endianness matches the target endianness - 337 * we can use the mac addr directly from the message buffer. 338 */ 339 return tgt_mac_addr; 340 #endif 341 } 342 343 /* 344 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer 345 * @soc: SOC handle 346 * @status: Completion status 347 * @netbuf: HTT buffer 348 */ 349 static void 350 dp_htt_h2t_send_complete_free_netbuf( 351 void *soc, A_STATUS status, qdf_nbuf_t netbuf) 352 { 353 qdf_nbuf_free(netbuf); 354 } 355 356 /* 357 * dp_htt_h2t_send_complete() - H2T completion handler 358 * @context: Opaque context (HTT SOC handle) 359 * @htc_pkt: HTC packet 360 */ 361 static void 362 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 363 { 364 void (*send_complete_part2)( 365 void *soc, A_STATUS status, qdf_nbuf_t msdu); 366 struct htt_soc *soc = (struct htt_soc *) context; 367 struct dp_htt_htc_pkt *htt_pkt; 368 qdf_nbuf_t netbuf; 369 370 send_complete_part2 = htc_pkt->pPktContext; 371 372 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 373 374 /* process (free or keep) the netbuf that held the message */ 375 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 376 /* 377 * adf sendcomplete is required for windows only 378 */ 379 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 380 if (send_complete_part2 != NULL) { 381 send_complete_part2( 382 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); 383 } 384 /* free the htt_htc_pkt / HTC_PACKET object */ 385 htt_htc_pkt_free(soc, htt_pkt); 386 } 387 388 /* 389 * htt_h2t_ver_req_msg() - Send HTT version request message to target 390 * @htt_soc: HTT SOC handle 391 * 392 * Return: 0 on success; error code on failure 393 */ 394 static int htt_h2t_ver_req_msg(struct htt_soc *soc) 395 { 396 struct dp_htt_htc_pkt *pkt; 397 qdf_nbuf_t msg; 398 uint32_t *msg_word; 399 400 msg = qdf_nbuf_alloc( 401 soc->osdev, 402 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), 403 /* reserve room for the HTC header */ 404 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 405 if (!msg) 406 return QDF_STATUS_E_NOMEM; 407 408 /* 409 * Set the length of the message. 410 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 411 * separately during the below call to qdf_nbuf_push_head. 412 * The contribution from the HTC header is added separately inside HTC. 413 */ 414 if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) { 415 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 416 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n", 417 __func__); 418 return QDF_STATUS_E_FAILURE; 419 } 420 421 /* fill in the message contents */ 422 msg_word = (u_int32_t *) qdf_nbuf_data(msg); 423 424 /* rewind beyond alignment pad to get to the HTC header reserved area */ 425 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 426 427 *msg_word = 0; 428 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); 429 430 pkt = htt_htc_pkt_alloc(soc); 431 if (!pkt) { 432 qdf_nbuf_free(msg); 433 return QDF_STATUS_E_FAILURE; 434 } 435 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 436 437 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 438 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), 439 qdf_nbuf_len(msg), soc->htc_endpoint, 440 1); /* tag - not relevant here */ 441 442 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 443 DP_HTT_SEND_HTC_PKT(soc, pkt); 444 return 0; 445 } 446 447 /* 448 * htt_srng_setup() - Send SRNG setup message to target 449 * @htt_soc: HTT SOC handle 450 * @mac_id: MAC Id 451 * @hal_srng: Opaque HAL SRNG pointer 452 * @hal_ring_type: SRNG ring type 453 * 454 * Return: 0 on success; error code on failure 455 */ 456 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng, 457 int hal_ring_type) 458 { 459 struct htt_soc *soc = (struct htt_soc *)htt_soc; 460 struct dp_htt_htc_pkt *pkt; 461 qdf_nbuf_t htt_msg; 462 uint32_t *msg_word; 463 struct hal_srng_params srng_params; 464 qdf_dma_addr_t hp_addr, tp_addr; 465 uint32_t ring_entry_size = 466 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); 467 int htt_ring_type, htt_ring_id; 468 469 /* Sizes should be set in 4-byte words */ 470 ring_entry_size = ring_entry_size >> 2; 471 472 htt_msg = qdf_nbuf_alloc(soc->osdev, 473 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), 474 /* reserve room for the HTC header */ 475 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 476 if (!htt_msg) 477 goto fail0; 478 479 hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params); 480 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng); 481 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng); 482 483 switch (hal_ring_type) { 484 case RXDMA_BUF: 485 #ifdef QCA_HOST2FW_RXBUF_RING 486 if (srng_params.ring_id == 487 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) { 488 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 489 htt_ring_type = HTT_SW_TO_SW_RING; 490 #ifdef IPA_OFFLOAD 491 } else if (srng_params.ring_id == 492 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) { 493 htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING; 494 htt_ring_type = HTT_SW_TO_SW_RING; 495 #endif 496 #else 497 if (srng_params.ring_id == 498 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 499 (mac_id * HAL_MAX_RINGS_PER_LMAC))) { 500 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 501 htt_ring_type = HTT_SW_TO_HW_RING; 502 #endif 503 } else if (srng_params.ring_id == 504 #ifdef IPA_OFFLOAD 505 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 + 506 #else 507 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 508 #endif 509 (mac_id * HAL_MAX_RINGS_PER_LMAC))) { 510 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 511 htt_ring_type = HTT_SW_TO_HW_RING; 512 } else { 513 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 514 "%s: Ring %d currently not supported\n", 515 __func__, srng_params.ring_id); 516 goto fail1; 517 } 518 519 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 520 "%s: ring_type %d ring_id %d\n", 521 __func__, hal_ring_type, srng_params.ring_id); 522 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 523 "%s: hp_addr 0x%llx tp_addr 0x%llx\n", 524 __func__, (uint64_t)hp_addr, (uint64_t)tp_addr); 525 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 526 "%s: htt_ring_id %d\n", __func__, htt_ring_id); 527 break; 528 case RXDMA_MONITOR_BUF: 529 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 530 htt_ring_type = HTT_SW_TO_HW_RING; 531 break; 532 case RXDMA_MONITOR_STATUS: 533 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 534 htt_ring_type = HTT_SW_TO_HW_RING; 535 break; 536 case RXDMA_MONITOR_DST: 537 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 538 htt_ring_type = HTT_HW_TO_SW_RING; 539 break; 540 case RXDMA_MONITOR_DESC: 541 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 542 htt_ring_type = HTT_SW_TO_HW_RING; 543 break; 544 case RXDMA_DST: 545 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 546 htt_ring_type = HTT_HW_TO_SW_RING; 547 break; 548 549 default: 550 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 551 "%s: Ring currently not supported\n", __func__); 552 goto fail1; 553 } 554 555 /* 556 * Set the length of the message. 557 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 558 * separately during the below call to qdf_nbuf_push_head. 559 * The contribution from the HTC header is added separately inside HTC. 560 */ 561 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { 562 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 563 "%s: Failed to expand head for SRING_SETUP msg\n", 564 __func__); 565 return QDF_STATUS_E_FAILURE; 566 } 567 568 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 569 570 /* rewind beyond alignment pad to get to the HTC header reserved area */ 571 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 572 573 /* word 0 */ 574 *msg_word = 0; 575 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); 576 577 if ((htt_ring_type == HTT_SW_TO_HW_RING) || 578 (htt_ring_type == HTT_HW_TO_SW_RING)) 579 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, 580 DP_SW2HW_MACID(mac_id)); 581 else 582 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); 583 584 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 585 "%s: mac_id %d\n", __func__, mac_id); 586 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); 587 /* TODO: Discuss with FW on changing this to unique ID and using 588 * htt_ring_type to send the type of ring 589 */ 590 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); 591 592 /* word 1 */ 593 msg_word++; 594 *msg_word = 0; 595 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, 596 srng_params.ring_base_paddr & 0xffffffff); 597 598 /* word 2 */ 599 msg_word++; 600 *msg_word = 0; 601 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, 602 (uint64_t)srng_params.ring_base_paddr >> 32); 603 604 /* word 3 */ 605 msg_word++; 606 *msg_word = 0; 607 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); 608 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, 609 (ring_entry_size * srng_params.num_entries)); 610 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 611 "%s: entry_size %d\n", __func__, 612 ring_entry_size); 613 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 614 "%s: num_entries %d\n", __func__, 615 srng_params.num_entries); 616 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 617 "%s: ring_size %d\n", __func__, 618 (ring_entry_size * srng_params.num_entries)); 619 if (htt_ring_type == HTT_SW_TO_HW_RING) 620 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( 621 *msg_word, 1); 622 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, 623 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 624 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, 625 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 626 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, 627 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); 628 629 /* word 4 */ 630 msg_word++; 631 *msg_word = 0; 632 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 633 hp_addr & 0xffffffff); 634 635 /* word 5 */ 636 msg_word++; 637 *msg_word = 0; 638 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 639 (uint64_t)hp_addr >> 32); 640 641 /* word 6 */ 642 msg_word++; 643 *msg_word = 0; 644 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 645 tp_addr & 0xffffffff); 646 647 /* word 7 */ 648 msg_word++; 649 *msg_word = 0; 650 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 651 (uint64_t)tp_addr >> 32); 652 653 /* word 8 */ 654 msg_word++; 655 *msg_word = 0; 656 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, 657 srng_params.msi_addr & 0xffffffff); 658 659 /* word 9 */ 660 msg_word++; 661 *msg_word = 0; 662 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, 663 (uint64_t)(srng_params.msi_addr) >> 32); 664 665 /* word 10 */ 666 msg_word++; 667 *msg_word = 0; 668 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, 669 srng_params.msi_data); 670 671 /* word 11 */ 672 msg_word++; 673 *msg_word = 0; 674 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, 675 srng_params.intr_batch_cntr_thres_entries * 676 ring_entry_size); 677 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, 678 srng_params.intr_timer_thres_us >> 3); 679 680 /* word 12 */ 681 msg_word++; 682 *msg_word = 0; 683 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 684 /* TODO: Setting low threshold to 1/8th of ring size - see 685 * if this needs to be configurable 686 */ 687 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, 688 srng_params.low_threshold); 689 } 690 /* "response_required" field should be set if a HTT response message is 691 * required after setting up the ring. 692 */ 693 pkt = htt_htc_pkt_alloc(soc); 694 if (!pkt) 695 goto fail1; 696 697 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 698 699 SET_HTC_PACKET_INFO_TX( 700 &pkt->htc_pkt, 701 dp_htt_h2t_send_complete_free_netbuf, 702 qdf_nbuf_data(htt_msg), 703 qdf_nbuf_len(htt_msg), 704 soc->htc_endpoint, 705 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 706 707 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 708 DP_HTT_SEND_HTC_PKT(soc, pkt); 709 710 return QDF_STATUS_SUCCESS; 711 712 fail1: 713 qdf_nbuf_free(htt_msg); 714 fail0: 715 return QDF_STATUS_E_FAILURE; 716 } 717 718 /* 719 * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter 720 * config message to target 721 * @htt_soc: HTT SOC handle 722 * @pdev_id: PDEV Id 723 * @hal_srng: Opaque HAL SRNG pointer 724 * @hal_ring_type: SRNG ring type 725 * @ring_buf_size: SRNG buffer size 726 * @htt_tlv_filter: Rx SRNG TLV and filter setting 727 * Return: 0 on success; error code on failure 728 */ 729 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng, 730 int hal_ring_type, int ring_buf_size, 731 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 732 { 733 struct htt_soc *soc = (struct htt_soc *)htt_soc; 734 struct dp_htt_htc_pkt *pkt; 735 qdf_nbuf_t htt_msg; 736 uint32_t *msg_word; 737 struct hal_srng_params srng_params; 738 uint32_t htt_ring_type, htt_ring_id; 739 uint32_t tlv_filter; 740 741 htt_msg = qdf_nbuf_alloc(soc->osdev, 742 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), 743 /* reserve room for the HTC header */ 744 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 745 if (!htt_msg) 746 goto fail0; 747 748 hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params); 749 750 switch (hal_ring_type) { 751 case RXDMA_BUF: 752 #if QCA_HOST2FW_RXBUF_RING 753 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 754 htt_ring_type = HTT_SW_TO_SW_RING; 755 #else 756 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 757 htt_ring_type = HTT_SW_TO_HW_RING; 758 #endif 759 break; 760 case RXDMA_MONITOR_BUF: 761 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 762 htt_ring_type = HTT_SW_TO_HW_RING; 763 break; 764 case RXDMA_MONITOR_STATUS: 765 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 766 htt_ring_type = HTT_SW_TO_HW_RING; 767 break; 768 case RXDMA_MONITOR_DST: 769 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 770 htt_ring_type = HTT_HW_TO_SW_RING; 771 break; 772 case RXDMA_MONITOR_DESC: 773 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 774 htt_ring_type = HTT_SW_TO_HW_RING; 775 break; 776 case RXDMA_DST: 777 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 778 htt_ring_type = HTT_HW_TO_SW_RING; 779 break; 780 781 default: 782 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 783 "%s: Ring currently not supported\n", __func__); 784 goto fail1; 785 } 786 787 /* 788 * Set the length of the message. 789 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 790 * separately during the below call to qdf_nbuf_push_head. 791 * The contribution from the HTC header is added separately inside HTC. 792 */ 793 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { 794 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 795 "%s: Failed to expand head for RX Ring Cfg msg\n", 796 __func__); 797 goto fail1; /* failure */ 798 } 799 800 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 801 802 /* rewind beyond alignment pad to get to the HTC header reserved area */ 803 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 804 805 /* word 0 */ 806 *msg_word = 0; 807 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); 808 809 /* 810 * pdev_id is indexed from 0 whereas mac_id is indexed from 1 811 * SW_TO_SW and SW_TO_HW rings are unaffected by this 812 */ 813 if (htt_ring_type == HTT_SW_TO_SW_RING || 814 htt_ring_type == HTT_SW_TO_HW_RING) 815 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, 816 DP_SW2HW_MACID(pdev_id)); 817 818 /* TODO: Discuss with FW on changing this to unique ID and using 819 * htt_ring_type to send the type of ring 820 */ 821 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); 822 823 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, 824 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 825 826 HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word, 827 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 828 829 /* word 1 */ 830 msg_word++; 831 *msg_word = 0; 832 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, 833 ring_buf_size); 834 835 /* word 2 */ 836 msg_word++; 837 *msg_word = 0; 838 839 if (htt_tlv_filter->enable_fp) { 840 /* TYPE: MGMT */ 841 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 842 FP, MGMT, 0000, 843 (htt_tlv_filter->fp_mgmt_filter & 844 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 845 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 846 FP, MGMT, 0001, 847 (htt_tlv_filter->fp_mgmt_filter & 848 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 849 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 850 FP, MGMT, 0010, 851 (htt_tlv_filter->fp_mgmt_filter & 852 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 853 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 854 FP, MGMT, 0011, 855 (htt_tlv_filter->fp_mgmt_filter & 856 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 857 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 858 FP, MGMT, 0100, 859 (htt_tlv_filter->fp_mgmt_filter & 860 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 861 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 862 FP, MGMT, 0101, 863 (htt_tlv_filter->fp_mgmt_filter & 864 FILTER_MGMT_PROBE_RES) ? 1 : 0); 865 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 866 FP, MGMT, 0110, 867 (htt_tlv_filter->fp_mgmt_filter & 868 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 869 /* reserved */ 870 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 871 MGMT, 0111, 872 (htt_tlv_filter->fp_mgmt_filter & 873 FILTER_MGMT_RESERVED_7) ? 1 : 0); 874 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 875 FP, MGMT, 1000, 876 (htt_tlv_filter->fp_mgmt_filter & 877 FILTER_MGMT_BEACON) ? 1 : 0); 878 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 879 FP, MGMT, 1001, 880 (htt_tlv_filter->fp_mgmt_filter & 881 FILTER_MGMT_ATIM) ? 1 : 0); 882 } 883 884 if (htt_tlv_filter->enable_md) { 885 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 886 MGMT, 0000, 1); 887 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 888 MGMT, 0001, 1); 889 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 890 MGMT, 0010, 1); 891 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 892 MGMT, 0011, 1); 893 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 894 MGMT, 0100, 1); 895 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 896 MGMT, 0101, 1); 897 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 898 MGMT, 0110, 1); 899 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 900 MGMT, 0111, 1); 901 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 902 MGMT, 1000, 1); 903 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 904 MGMT, 1001, 1); 905 } 906 907 if (htt_tlv_filter->enable_mo) { 908 /* TYPE: MGMT */ 909 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 910 MO, MGMT, 0000, 911 (htt_tlv_filter->mo_mgmt_filter & 912 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 913 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 914 MO, MGMT, 0001, 915 (htt_tlv_filter->mo_mgmt_filter & 916 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 917 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 918 MO, MGMT, 0010, 919 (htt_tlv_filter->mo_mgmt_filter & 920 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 921 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 922 MO, MGMT, 0011, 923 (htt_tlv_filter->mo_mgmt_filter & 924 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 925 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 926 MO, MGMT, 0100, 927 (htt_tlv_filter->mo_mgmt_filter & 928 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 929 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 930 MO, MGMT, 0101, 931 (htt_tlv_filter->mo_mgmt_filter & 932 FILTER_MGMT_PROBE_RES) ? 1 : 0); 933 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 934 MO, MGMT, 0110, 935 (htt_tlv_filter->mo_mgmt_filter & 936 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 937 /* reserved */ 938 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 939 MGMT, 0111, 940 (htt_tlv_filter->mo_mgmt_filter & 941 FILTER_MGMT_RESERVED_7) ? 1 : 0); 942 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 943 MO, MGMT, 1000, 944 (htt_tlv_filter->mo_mgmt_filter & 945 FILTER_MGMT_BEACON) ? 1 : 0); 946 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 947 MO, MGMT, 1001, 948 (htt_tlv_filter->mo_mgmt_filter & 949 FILTER_MGMT_ATIM) ? 1 : 0); 950 } 951 952 /* word 3 */ 953 msg_word++; 954 *msg_word = 0; 955 956 if (htt_tlv_filter->enable_fp) { 957 /* TYPE: MGMT */ 958 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 959 FP, MGMT, 1010, 960 (htt_tlv_filter->fp_mgmt_filter & 961 FILTER_MGMT_DISASSOC) ? 1 : 0); 962 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 963 FP, MGMT, 1011, 964 (htt_tlv_filter->fp_mgmt_filter & 965 FILTER_MGMT_AUTH) ? 1 : 0); 966 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 967 FP, MGMT, 1100, 968 (htt_tlv_filter->fp_mgmt_filter & 969 FILTER_MGMT_DEAUTH) ? 1 : 0); 970 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 971 FP, MGMT, 1101, 972 (htt_tlv_filter->fp_mgmt_filter & 973 FILTER_MGMT_ACTION) ? 1 : 0); 974 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 975 FP, MGMT, 1110, 976 (htt_tlv_filter->fp_mgmt_filter & 977 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 978 /* reserved*/ 979 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 980 MGMT, 1111, 981 (htt_tlv_filter->fp_mgmt_filter & 982 FILTER_MGMT_RESERVED_15) ? 1 : 0); 983 } 984 985 if (htt_tlv_filter->enable_md) { 986 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 987 MGMT, 1010, 1); 988 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 989 MGMT, 1011, 1); 990 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 991 MGMT, 1100, 1); 992 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 993 MGMT, 1101, 1); 994 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 995 MGMT, 1110, 1); 996 } 997 998 if (htt_tlv_filter->enable_mo) { 999 /* TYPE: MGMT */ 1000 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1001 MO, MGMT, 1010, 1002 (htt_tlv_filter->mo_mgmt_filter & 1003 FILTER_MGMT_DISASSOC) ? 1 : 0); 1004 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1005 MO, MGMT, 1011, 1006 (htt_tlv_filter->mo_mgmt_filter & 1007 FILTER_MGMT_AUTH) ? 1 : 0); 1008 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1009 MO, MGMT, 1100, 1010 (htt_tlv_filter->mo_mgmt_filter & 1011 FILTER_MGMT_DEAUTH) ? 1 : 0); 1012 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1013 MO, MGMT, 1101, 1014 (htt_tlv_filter->mo_mgmt_filter & 1015 FILTER_MGMT_ACTION) ? 1 : 0); 1016 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1017 MO, MGMT, 1110, 1018 (htt_tlv_filter->mo_mgmt_filter & 1019 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1020 /* reserved*/ 1021 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 1022 MGMT, 1111, 1023 (htt_tlv_filter->mo_mgmt_filter & 1024 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1025 } 1026 1027 /* word 4 */ 1028 msg_word++; 1029 *msg_word = 0; 1030 1031 if (htt_tlv_filter->enable_fp) { 1032 /* TYPE: CTRL */ 1033 /* reserved */ 1034 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1035 CTRL, 0000, 1036 (htt_tlv_filter->fp_ctrl_filter & 1037 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1038 /* reserved */ 1039 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1040 CTRL, 0001, 1041 (htt_tlv_filter->fp_ctrl_filter & 1042 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1043 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1044 CTRL, 0010, 1045 (htt_tlv_filter->fp_ctrl_filter & 1046 FILTER_CTRL_TRIGGER) ? 1 : 0); 1047 /* reserved */ 1048 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1049 CTRL, 0011, 1050 (htt_tlv_filter->fp_ctrl_filter & 1051 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1052 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1053 CTRL, 0100, 1054 (htt_tlv_filter->fp_ctrl_filter & 1055 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1056 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1057 CTRL, 0101, 1058 (htt_tlv_filter->fp_ctrl_filter & 1059 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1060 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1061 CTRL, 0110, 1062 (htt_tlv_filter->fp_ctrl_filter & 1063 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1064 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1065 CTRL, 0111, 1066 (htt_tlv_filter->fp_ctrl_filter & 1067 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1068 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1069 CTRL, 1000, 1070 (htt_tlv_filter->fp_ctrl_filter & 1071 FILTER_CTRL_BA_REQ) ? 1 : 0); 1072 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1073 CTRL, 1001, 1074 (htt_tlv_filter->fp_ctrl_filter & 1075 FILTER_CTRL_BA) ? 1 : 0); 1076 } 1077 1078 if (htt_tlv_filter->enable_md) { 1079 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1080 CTRL, 0000, 1); 1081 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1082 CTRL, 0001, 1); 1083 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1084 CTRL, 0010, 1); 1085 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1086 CTRL, 0011, 1); 1087 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1088 CTRL, 0100, 1); 1089 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1090 CTRL, 0101, 1); 1091 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1092 CTRL, 0110, 1); 1093 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1094 CTRL, 0111, 1); 1095 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1096 CTRL, 1000, 1); 1097 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1098 CTRL, 1001, 1); 1099 } 1100 1101 if (htt_tlv_filter->enable_mo) { 1102 /* TYPE: CTRL */ 1103 /* reserved */ 1104 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1105 CTRL, 0000, 1106 (htt_tlv_filter->mo_ctrl_filter & 1107 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1108 /* reserved */ 1109 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1110 CTRL, 0001, 1111 (htt_tlv_filter->mo_ctrl_filter & 1112 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1113 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1114 CTRL, 0010, 1115 (htt_tlv_filter->mo_ctrl_filter & 1116 FILTER_CTRL_TRIGGER) ? 1 : 0); 1117 /* reserved */ 1118 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1119 CTRL, 0011, 1120 (htt_tlv_filter->mo_ctrl_filter & 1121 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1122 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1123 CTRL, 0100, 1124 (htt_tlv_filter->mo_ctrl_filter & 1125 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1126 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1127 CTRL, 0101, 1128 (htt_tlv_filter->mo_ctrl_filter & 1129 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1130 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1131 CTRL, 0110, 1132 (htt_tlv_filter->mo_ctrl_filter & 1133 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1134 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1135 CTRL, 0111, 1136 (htt_tlv_filter->mo_ctrl_filter & 1137 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1138 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1139 CTRL, 1000, 1140 (htt_tlv_filter->mo_ctrl_filter & 1141 FILTER_CTRL_BA_REQ) ? 1 : 0); 1142 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1143 CTRL, 1001, 1144 (htt_tlv_filter->mo_ctrl_filter & 1145 FILTER_CTRL_BA) ? 1 : 0); 1146 } 1147 1148 /* word 5 */ 1149 msg_word++; 1150 *msg_word = 0; 1151 if (htt_tlv_filter->enable_fp) { 1152 /* TYPE: CTRL */ 1153 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1154 CTRL, 1010, 1155 (htt_tlv_filter->fp_ctrl_filter & 1156 FILTER_CTRL_PSPOLL) ? 1 : 0); 1157 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1158 CTRL, 1011, 1159 (htt_tlv_filter->fp_ctrl_filter & 1160 FILTER_CTRL_RTS) ? 1 : 0); 1161 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1162 CTRL, 1100, 1163 (htt_tlv_filter->fp_ctrl_filter & 1164 FILTER_CTRL_CTS) ? 1 : 0); 1165 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1166 CTRL, 1101, 1167 (htt_tlv_filter->fp_ctrl_filter & 1168 FILTER_CTRL_ACK) ? 1 : 0); 1169 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1170 CTRL, 1110, 1171 (htt_tlv_filter->fp_ctrl_filter & 1172 FILTER_CTRL_CFEND) ? 1 : 0); 1173 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1174 CTRL, 1111, 1175 (htt_tlv_filter->fp_ctrl_filter & 1176 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1177 /* TYPE: DATA */ 1178 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1179 DATA, MCAST, 1180 (htt_tlv_filter->fp_data_filter & 1181 FILTER_DATA_MCAST) ? 1 : 0); 1182 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1183 DATA, UCAST, 1184 (htt_tlv_filter->fp_data_filter & 1185 FILTER_DATA_UCAST) ? 1 : 0); 1186 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1187 DATA, NULL, 1188 (htt_tlv_filter->fp_data_filter & 1189 FILTER_DATA_NULL) ? 1 : 0); 1190 } 1191 1192 if (htt_tlv_filter->enable_md) { 1193 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1194 CTRL, 1010, 1); 1195 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1196 CTRL, 1011, 1); 1197 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1198 CTRL, 1100, 1); 1199 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1200 CTRL, 1101, 1); 1201 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1202 CTRL, 1110, 1); 1203 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1204 CTRL, 1111, 1); 1205 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1206 DATA, MCAST, 1); 1207 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1208 DATA, UCAST, 1); 1209 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1210 DATA, NULL, 1); 1211 } 1212 1213 if (htt_tlv_filter->enable_mo) { 1214 /* TYPE: CTRL */ 1215 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1216 CTRL, 1010, 1217 (htt_tlv_filter->mo_ctrl_filter & 1218 FILTER_CTRL_PSPOLL) ? 1 : 0); 1219 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1220 CTRL, 1011, 1221 (htt_tlv_filter->mo_ctrl_filter & 1222 FILTER_CTRL_RTS) ? 1 : 0); 1223 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1224 CTRL, 1100, 1225 (htt_tlv_filter->mo_ctrl_filter & 1226 FILTER_CTRL_CTS) ? 1 : 0); 1227 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1228 CTRL, 1101, 1229 (htt_tlv_filter->mo_ctrl_filter & 1230 FILTER_CTRL_ACK) ? 1 : 0); 1231 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1232 CTRL, 1110, 1233 (htt_tlv_filter->mo_ctrl_filter & 1234 FILTER_CTRL_CFEND) ? 1 : 0); 1235 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1236 CTRL, 1111, 1237 (htt_tlv_filter->mo_ctrl_filter & 1238 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1239 /* TYPE: DATA */ 1240 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1241 DATA, MCAST, 1242 (htt_tlv_filter->mo_data_filter & 1243 FILTER_DATA_MCAST) ? 1 : 0); 1244 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1245 DATA, UCAST, 1246 (htt_tlv_filter->mo_data_filter & 1247 FILTER_DATA_UCAST) ? 1 : 0); 1248 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1249 DATA, NULL, 1250 (htt_tlv_filter->mo_data_filter & 1251 FILTER_DATA_NULL) ? 1 : 0); 1252 } 1253 1254 /* word 6 */ 1255 msg_word++; 1256 *msg_word = 0; 1257 tlv_filter = 0; 1258 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, 1259 htt_tlv_filter->mpdu_start); 1260 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, 1261 htt_tlv_filter->msdu_start); 1262 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, 1263 htt_tlv_filter->packet); 1264 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, 1265 htt_tlv_filter->msdu_end); 1266 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, 1267 htt_tlv_filter->mpdu_end); 1268 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, 1269 htt_tlv_filter->packet_header); 1270 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, 1271 htt_tlv_filter->attention); 1272 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, 1273 htt_tlv_filter->ppdu_start); 1274 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, 1275 htt_tlv_filter->ppdu_end); 1276 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, 1277 htt_tlv_filter->ppdu_end_user_stats); 1278 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, 1279 PPDU_END_USER_STATS_EXT, 1280 htt_tlv_filter->ppdu_end_user_stats_ext); 1281 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, 1282 htt_tlv_filter->ppdu_end_status_done); 1283 /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/ 1284 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED, 1285 htt_tlv_filter->header_per_msdu); 1286 1287 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); 1288 1289 /* "response_required" field should be set if a HTT response message is 1290 * required after setting up the ring. 1291 */ 1292 pkt = htt_htc_pkt_alloc(soc); 1293 if (!pkt) 1294 goto fail1; 1295 1296 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 1297 1298 SET_HTC_PACKET_INFO_TX( 1299 &pkt->htc_pkt, 1300 dp_htt_h2t_send_complete_free_netbuf, 1301 qdf_nbuf_data(htt_msg), 1302 qdf_nbuf_len(htt_msg), 1303 soc->htc_endpoint, 1304 1); /* tag - not relevant here */ 1305 1306 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 1307 DP_HTT_SEND_HTC_PKT(soc, pkt); 1308 return QDF_STATUS_SUCCESS; 1309 1310 fail1: 1311 qdf_nbuf_free(htt_msg); 1312 fail0: 1313 return QDF_STATUS_E_FAILURE; 1314 } 1315 1316 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE 1317 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 1318 struct dp_soc *soc, qdf_nbuf_t htt_msg) 1319 1320 { 1321 uint32_t pdev_id; 1322 uint32_t *msg_word = NULL; 1323 uint32_t msg_remain_len = 0; 1324 1325 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1326 1327 /*COOKIE MSB*/ 1328 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 1329 1330 /* stats message length + 16 size of HTT header*/ 1331 msg_remain_len = qdf_min(htt_stats->msg_len + 16, 1332 (uint32_t)DP_EXT_MSG_LENGTH); 1333 1334 dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc, 1335 msg_word, msg_remain_len, 1336 WDI_NO_VAL, pdev_id); 1337 1338 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 1339 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 1340 } 1341 /* Need to be freed here as WDI handler will 1342 * make a copy of pkt to send data to application 1343 */ 1344 qdf_nbuf_free(htt_msg); 1345 return QDF_STATUS_SUCCESS; 1346 } 1347 #else 1348 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 1349 struct dp_soc *soc, qdf_nbuf_t htt_msg) 1350 { 1351 return QDF_STATUS_E_NOSUPPORT; 1352 } 1353 #endif 1354 1355 /** 1356 * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats 1357 * @htt_stats: htt stats info 1358 * 1359 * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message 1360 * contains sub messages which are identified by a TLV header. 1361 * In this function we will process the stream of T2H messages and read all the 1362 * TLV contained in the message. 1363 * 1364 * THe following cases have been taken care of 1365 * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer 1366 * In this case the buffer will contain multiple tlvs. 1367 * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer. 1368 * Only one tlv will be contained in the HTT message and this tag 1369 * will extend onto the next buffer. 1370 * Case 3: When the buffer is the continuation of the previous message 1371 * Case 4: tlv length is 0. which will indicate the end of message 1372 * 1373 * return: void 1374 */ 1375 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats, 1376 struct dp_soc *soc) 1377 { 1378 htt_tlv_tag_t tlv_type = 0xff; 1379 qdf_nbuf_t htt_msg = NULL; 1380 uint32_t *msg_word; 1381 uint8_t *tlv_buf_head = NULL; 1382 uint8_t *tlv_buf_tail = NULL; 1383 uint32_t msg_remain_len = 0; 1384 uint32_t tlv_remain_len = 0; 1385 uint32_t *tlv_start; 1386 int cookie_val; 1387 int cookie_msb; 1388 int pdev_id; 1389 bool copy_stats = false; 1390 struct dp_pdev *pdev; 1391 1392 /* Process node in the HTT message queue */ 1393 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 1394 != NULL) { 1395 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1396 cookie_val = *(msg_word + 1); 1397 htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET( 1398 *(msg_word + 1399 HTT_T2H_EXT_STATS_TLV_START_OFFSET)); 1400 1401 if (cookie_val) { 1402 if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg) 1403 == QDF_STATUS_SUCCESS) { 1404 continue; 1405 } 1406 } 1407 1408 cookie_msb = *(msg_word + 2); 1409 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 1410 pdev = soc->pdev_list[pdev_id]; 1411 1412 if (cookie_msb >> 2) { 1413 copy_stats = true; 1414 } 1415 1416 /* read 5th word */ 1417 msg_word = msg_word + 4; 1418 msg_remain_len = qdf_min(htt_stats->msg_len, 1419 (uint32_t) DP_EXT_MSG_LENGTH); 1420 /* Keep processing the node till node length is 0 */ 1421 while (msg_remain_len) { 1422 /* 1423 * if message is not a continuation of previous message 1424 * read the tlv type and tlv length 1425 */ 1426 if (!tlv_buf_head) { 1427 tlv_type = HTT_STATS_TLV_TAG_GET( 1428 *msg_word); 1429 tlv_remain_len = HTT_STATS_TLV_LENGTH_GET( 1430 *msg_word); 1431 } 1432 1433 if (tlv_remain_len == 0) { 1434 msg_remain_len = 0; 1435 1436 if (tlv_buf_head) { 1437 qdf_mem_free(tlv_buf_head); 1438 tlv_buf_head = NULL; 1439 tlv_buf_tail = NULL; 1440 } 1441 1442 goto error; 1443 } 1444 1445 if (!tlv_buf_head) 1446 tlv_remain_len += HTT_TLV_HDR_LEN; 1447 1448 if ((tlv_remain_len <= msg_remain_len)) { 1449 /* Case 3 */ 1450 if (tlv_buf_head) { 1451 qdf_mem_copy(tlv_buf_tail, 1452 (uint8_t *)msg_word, 1453 tlv_remain_len); 1454 tlv_start = (uint32_t *)tlv_buf_head; 1455 } else { 1456 /* Case 1 */ 1457 tlv_start = msg_word; 1458 } 1459 1460 if (copy_stats) 1461 dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start); 1462 else 1463 dp_htt_stats_print_tag(tlv_type, tlv_start); 1464 1465 msg_remain_len -= tlv_remain_len; 1466 1467 msg_word = (uint32_t *) 1468 (((uint8_t *)msg_word) + 1469 tlv_remain_len); 1470 1471 tlv_remain_len = 0; 1472 1473 if (tlv_buf_head) { 1474 qdf_mem_free(tlv_buf_head); 1475 tlv_buf_head = NULL; 1476 tlv_buf_tail = NULL; 1477 } 1478 1479 } else { /* tlv_remain_len > msg_remain_len */ 1480 /* Case 2 & 3 */ 1481 if (!tlv_buf_head) { 1482 tlv_buf_head = qdf_mem_malloc( 1483 tlv_remain_len); 1484 1485 if (!tlv_buf_head) { 1486 QDF_TRACE(QDF_MODULE_ID_TXRX, 1487 QDF_TRACE_LEVEL_ERROR, 1488 "Alloc failed"); 1489 goto error; 1490 } 1491 1492 tlv_buf_tail = tlv_buf_head; 1493 } 1494 1495 qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word, 1496 msg_remain_len); 1497 tlv_remain_len -= msg_remain_len; 1498 tlv_buf_tail += msg_remain_len; 1499 } 1500 } 1501 1502 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 1503 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 1504 } 1505 1506 qdf_nbuf_free(htt_msg); 1507 } 1508 return; 1509 1510 error: 1511 qdf_nbuf_free(htt_msg); 1512 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 1513 != NULL) 1514 qdf_nbuf_free(htt_msg); 1515 } 1516 1517 void htt_t2h_stats_handler(void *context) 1518 { 1519 struct dp_soc *soc = (struct dp_soc *)context; 1520 struct htt_stats_context htt_stats; 1521 uint32_t *msg_word; 1522 qdf_nbuf_t htt_msg = NULL; 1523 uint8_t done; 1524 uint8_t rem_stats; 1525 1526 if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) { 1527 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1528 "soc: 0x%pK, init_done: %d", soc, 1529 qdf_atomic_read(&soc->cmn_init_done)); 1530 return; 1531 } 1532 1533 qdf_mem_zero(&htt_stats, sizeof(htt_stats)); 1534 qdf_nbuf_queue_init(&htt_stats.msg); 1535 1536 /* pull one completed stats from soc->htt_stats_msg and process */ 1537 qdf_spin_lock_bh(&soc->htt_stats.lock); 1538 if (!soc->htt_stats.num_stats) { 1539 qdf_spin_unlock_bh(&soc->htt_stats.lock); 1540 return; 1541 } 1542 while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) { 1543 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1544 msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET; 1545 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 1546 qdf_nbuf_queue_add(&htt_stats.msg, htt_msg); 1547 /* 1548 * Done bit signifies that this is the last T2H buffer in the 1549 * stream of HTT EXT STATS message 1550 */ 1551 if (done) 1552 break; 1553 } 1554 rem_stats = --soc->htt_stats.num_stats; 1555 qdf_spin_unlock_bh(&soc->htt_stats.lock); 1556 1557 dp_process_htt_stat_msg(&htt_stats, soc); 1558 /* If there are more stats to process, schedule stats work again */ 1559 if (rem_stats) 1560 qdf_sched_work(0, &soc->htt_stats.work); 1561 } 1562 1563 /* 1564 * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, 1565 * if a new peer id arrives in a PPDU 1566 * pdev: DP pdev handle 1567 * @peer_id : peer unique identifier 1568 * @ppdu_info: per ppdu tlv structure 1569 * 1570 * return:user index to be populated 1571 */ 1572 #ifdef FEATURE_PERPKT_INFO 1573 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, 1574 uint16_t peer_id, 1575 struct ppdu_info *ppdu_info) 1576 { 1577 uint8_t user_index = 0; 1578 struct cdp_tx_completion_ppdu *ppdu_desc; 1579 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1580 1581 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1582 1583 while ((user_index + 1) <= ppdu_info->last_user) { 1584 ppdu_user_desc = &ppdu_desc->user[user_index]; 1585 if (ppdu_user_desc->peer_id != peer_id) { 1586 user_index++; 1587 continue; 1588 } else { 1589 /* Max users possible is 8 so user array index should 1590 * not exceed 7 1591 */ 1592 qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX); 1593 return user_index; 1594 } 1595 } 1596 1597 ppdu_info->last_user++; 1598 /* Max users possible is 8 so last user should not exceed 8 */ 1599 qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS); 1600 return ppdu_info->last_user - 1; 1601 } 1602 1603 /* 1604 * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv 1605 * pdev: DP pdev handle 1606 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv 1607 * @ppdu_info: per ppdu tlv structure 1608 * 1609 * return:void 1610 */ 1611 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, 1612 uint32_t *tag_buf, struct ppdu_info *ppdu_info) 1613 { 1614 uint16_t frame_type; 1615 uint16_t freq; 1616 struct dp_soc *soc = NULL; 1617 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 1618 1619 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1620 1621 tag_buf += 2; 1622 ppdu_desc->num_users = 1623 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); 1624 tag_buf++; 1625 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); 1626 1627 if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) || 1628 (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU)) 1629 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; 1630 else 1631 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 1632 1633 tag_buf += 2; 1634 ppdu_desc->tx_duration = *tag_buf; 1635 tag_buf += 3; 1636 ppdu_desc->ppdu_start_timestamp = *tag_buf; 1637 1638 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 1639 ppdu_desc->tx_duration; 1640 /* Ack time stamp is same as end time stamp*/ 1641 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 1642 1643 tag_buf++; 1644 1645 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); 1646 if (freq != ppdu_desc->channel) { 1647 soc = pdev->soc; 1648 ppdu_desc->channel = freq; 1649 if (soc && soc->cdp_soc.ol_ops->freq_to_channel) 1650 pdev->operating_channel = 1651 soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq); 1652 } 1653 1654 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); 1655 } 1656 1657 /* 1658 * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common 1659 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv 1660 * @ppdu_info: per ppdu tlv structure 1661 * 1662 * return:void 1663 */ 1664 static void dp_process_ppdu_stats_user_common_tlv( 1665 struct dp_pdev *pdev, uint32_t *tag_buf, 1666 struct ppdu_info *ppdu_info) 1667 { 1668 uint16_t peer_id; 1669 struct dp_peer *peer; 1670 struct cdp_tx_completion_ppdu *ppdu_desc; 1671 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1672 uint8_t curr_user_index = 0; 1673 1674 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1675 1676 tag_buf++; 1677 peer_id = HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); 1678 peer = dp_peer_find_by_id(pdev->soc, peer_id); 1679 1680 if (!peer) 1681 return; 1682 1683 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 1684 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 1685 1686 ppdu_user_desc->peer_id = peer_id; 1687 1688 tag_buf++; 1689 1690 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { 1691 ppdu_user_desc->is_mcast = true; 1692 ppdu_user_desc->mpdu_tried_mcast = 1693 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 1694 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; 1695 } else { 1696 ppdu_user_desc->mpdu_tried_ucast = 1697 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 1698 } 1699 1700 tag_buf++; 1701 1702 ppdu_user_desc->qos_ctrl = 1703 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); 1704 ppdu_user_desc->frame_ctrl = 1705 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); 1706 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; 1707 } 1708 1709 1710 /** 1711 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv 1712 * @pdev: DP pdev handle 1713 * @tag_buf: T2H message buffer carrying the user rate TLV 1714 * @ppdu_info: per ppdu tlv structure 1715 * 1716 * return:void 1717 */ 1718 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, 1719 uint32_t *tag_buf, 1720 struct ppdu_info *ppdu_info) 1721 { 1722 uint16_t peer_id; 1723 struct dp_peer *peer; 1724 struct cdp_tx_completion_ppdu *ppdu_desc; 1725 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1726 uint8_t curr_user_index = 0; 1727 1728 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1729 1730 tag_buf++; 1731 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 1732 peer = dp_peer_find_by_id(pdev->soc, peer_id); 1733 1734 if (!peer) 1735 return; 1736 1737 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 1738 1739 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 1740 ppdu_user_desc->peer_id = peer_id; 1741 1742 ppdu_user_desc->tid = 1743 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); 1744 1745 qdf_mem_copy(ppdu_user_desc->mac_addr, peer->mac_addr.raw, 1746 DP_MAC_ADDR_LEN); 1747 1748 tag_buf += 2; 1749 1750 ppdu_user_desc->ru_tones = (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - 1751 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; 1752 1753 tag_buf += 2; 1754 1755 ppdu_user_desc->ppdu_type = 1756 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); 1757 1758 tag_buf++; 1759 ppdu_user_desc->tx_rate = *tag_buf; 1760 1761 ppdu_user_desc->ltf_size = 1762 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); 1763 ppdu_user_desc->stbc = 1764 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); 1765 ppdu_user_desc->he_re = 1766 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); 1767 ppdu_user_desc->txbf = 1768 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); 1769 ppdu_user_desc->bw = 1770 HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2; 1771 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); 1772 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); 1773 ppdu_user_desc->preamble = 1774 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); 1775 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); 1776 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); 1777 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); 1778 } 1779 1780 /* 1781 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process 1782 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 1783 * pdev: DP PDEV handle 1784 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 1785 * @ppdu_info: per ppdu tlv structure 1786 * 1787 * return:void 1788 */ 1789 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 1790 struct dp_pdev *pdev, uint32_t *tag_buf, 1791 struct ppdu_info *ppdu_info) 1792 { 1793 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = 1794 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; 1795 1796 struct cdp_tx_completion_ppdu *ppdu_desc; 1797 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1798 uint8_t curr_user_index = 0; 1799 uint16_t peer_id; 1800 struct dp_peer *peer; 1801 1802 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1803 1804 tag_buf++; 1805 1806 peer_id = 1807 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 1808 1809 peer = dp_peer_find_by_id(pdev->soc, peer_id); 1810 1811 if (!peer) 1812 return; 1813 1814 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 1815 1816 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 1817 ppdu_user_desc->peer_id = peer_id; 1818 1819 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 1820 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 1821 CDP_BA_64_BIT_MAP_SIZE_DWORDS); 1822 } 1823 1824 /* 1825 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process 1826 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 1827 * soc: DP SOC handle 1828 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 1829 * @ppdu_info: per ppdu tlv structure 1830 * 1831 * return:void 1832 */ 1833 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 1834 struct dp_pdev *pdev, uint32_t *tag_buf, 1835 struct ppdu_info *ppdu_info) 1836 { 1837 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = 1838 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; 1839 1840 struct cdp_tx_completion_ppdu *ppdu_desc; 1841 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1842 uint8_t curr_user_index = 0; 1843 uint16_t peer_id; 1844 struct dp_peer *peer; 1845 1846 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1847 1848 tag_buf++; 1849 1850 peer_id = 1851 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 1852 1853 peer = dp_peer_find_by_id(pdev->soc, peer_id); 1854 1855 if (!peer) 1856 return; 1857 1858 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 1859 1860 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 1861 ppdu_user_desc->peer_id = peer_id; 1862 1863 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 1864 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 1865 CDP_BA_256_BIT_MAP_SIZE_DWORDS); 1866 } 1867 1868 /* 1869 * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process 1870 * htt_ppdu_stats_user_cmpltn_common_tlv 1871 * soc: DP SOC handle 1872 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv 1873 * @ppdu_info: per ppdu tlv structure 1874 * 1875 * return:void 1876 */ 1877 static void dp_process_ppdu_stats_user_cmpltn_common_tlv( 1878 struct dp_pdev *pdev, uint32_t *tag_buf, 1879 struct ppdu_info *ppdu_info) 1880 { 1881 uint16_t peer_id; 1882 struct dp_peer *peer; 1883 struct cdp_tx_completion_ppdu *ppdu_desc; 1884 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1885 uint8_t curr_user_index = 0; 1886 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = 1887 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; 1888 1889 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1890 1891 tag_buf++; 1892 peer_id = 1893 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); 1894 peer = dp_peer_find_by_id(pdev->soc, peer_id); 1895 1896 if (!peer) 1897 return; 1898 1899 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 1900 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 1901 ppdu_user_desc->peer_id = peer_id; 1902 1903 ppdu_user_desc->completion_status = 1904 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( 1905 *tag_buf); 1906 1907 ppdu_user_desc->tid = 1908 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); 1909 1910 1911 tag_buf++; 1912 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; 1913 1914 tag_buf++; 1915 1916 ppdu_user_desc->mpdu_success = 1917 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); 1918 1919 tag_buf++; 1920 1921 ppdu_user_desc->long_retries = 1922 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); 1923 1924 ppdu_user_desc->short_retries = 1925 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); 1926 ppdu_user_desc->retry_msdus = 1927 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; 1928 1929 ppdu_user_desc->is_ampdu = 1930 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); 1931 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; 1932 1933 } 1934 1935 /* 1936 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process 1937 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 1938 * pdev: DP PDEV handle 1939 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 1940 * @ppdu_info: per ppdu tlv structure 1941 * 1942 * return:void 1943 */ 1944 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 1945 struct dp_pdev *pdev, uint32_t *tag_buf, 1946 struct ppdu_info *ppdu_info) 1947 { 1948 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = 1949 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; 1950 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1951 struct cdp_tx_completion_ppdu *ppdu_desc; 1952 uint8_t curr_user_index = 0; 1953 uint16_t peer_id; 1954 struct dp_peer *peer; 1955 1956 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 1957 1958 tag_buf++; 1959 1960 peer_id = 1961 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 1962 1963 peer = dp_peer_find_by_id(pdev->soc, peer_id); 1964 1965 if (!peer) 1966 return; 1967 1968 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 1969 1970 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 1971 ppdu_user_desc->peer_id = peer_id; 1972 1973 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 1974 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 1975 CDP_BA_64_BIT_MAP_SIZE_DWORDS); 1976 } 1977 1978 /* 1979 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process 1980 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 1981 * pdev: DP PDEV handle 1982 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 1983 * @ppdu_info: per ppdu tlv structure 1984 * 1985 * return:void 1986 */ 1987 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 1988 struct dp_pdev *pdev, uint32_t *tag_buf, 1989 struct ppdu_info *ppdu_info) 1990 { 1991 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = 1992 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; 1993 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 1994 struct cdp_tx_completion_ppdu *ppdu_desc; 1995 uint8_t curr_user_index = 0; 1996 uint16_t peer_id; 1997 struct dp_peer *peer; 1998 1999 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2000 2001 tag_buf++; 2002 2003 peer_id = 2004 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 2005 2006 peer = dp_peer_find_by_id(pdev->soc, peer_id); 2007 2008 if (!peer) 2009 return; 2010 2011 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2012 2013 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2014 ppdu_user_desc->peer_id = peer_id; 2015 2016 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 2017 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 2018 CDP_BA_256_BIT_MAP_SIZE_DWORDS); 2019 } 2020 2021 /* 2022 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process 2023 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv 2024 * pdev: DP PDE handle 2025 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 2026 * @ppdu_info: per ppdu tlv structure 2027 * 2028 * return:void 2029 */ 2030 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 2031 struct dp_pdev *pdev, uint32_t *tag_buf, 2032 struct ppdu_info *ppdu_info) 2033 { 2034 uint16_t peer_id; 2035 struct dp_peer *peer; 2036 struct cdp_tx_completion_ppdu *ppdu_desc; 2037 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2038 uint8_t curr_user_index = 0; 2039 2040 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2041 2042 tag_buf += 2; 2043 peer_id = 2044 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); 2045 2046 2047 peer = dp_peer_find_by_id(pdev->soc, peer_id); 2048 2049 if (!peer) 2050 return; 2051 2052 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2053 2054 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2055 ppdu_user_desc->peer_id = peer_id; 2056 2057 tag_buf++; 2058 ppdu_user_desc->tid = 2059 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf); 2060 ppdu_user_desc->num_mpdu = 2061 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); 2062 2063 ppdu_user_desc->num_msdu = 2064 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); 2065 2066 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; 2067 2068 tag_buf += 2; 2069 ppdu_user_desc->success_bytes = *tag_buf; 2070 2071 } 2072 2073 /* 2074 * dp_process_ppdu_stats_user_common_array_tlv: Process 2075 * htt_ppdu_stats_user_common_array_tlv 2076 * pdev: DP PDEV handle 2077 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 2078 * @ppdu_info: per ppdu tlv structure 2079 * 2080 * return:void 2081 */ 2082 static void dp_process_ppdu_stats_user_common_array_tlv( 2083 struct dp_pdev *pdev, uint32_t *tag_buf, 2084 struct ppdu_info *ppdu_info) 2085 { 2086 uint32_t peer_id; 2087 struct dp_peer *peer; 2088 struct cdp_tx_completion_ppdu *ppdu_desc; 2089 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2090 uint8_t curr_user_index = 0; 2091 struct htt_tx_ppdu_stats_info *dp_stats_buf; 2092 2093 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2094 2095 tag_buf++; 2096 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; 2097 tag_buf += 3; 2098 peer_id = 2099 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); 2100 2101 peer = dp_peer_find_by_id(pdev->soc, peer_id); 2102 2103 if (!peer) { 2104 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2105 "Invalid peer"); 2106 return; 2107 } 2108 2109 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 2110 2111 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2112 2113 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; 2114 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; 2115 2116 tag_buf++; 2117 2118 ppdu_user_desc->success_msdus = 2119 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); 2120 ppdu_user_desc->retry_bytes = 2121 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); 2122 tag_buf++; 2123 ppdu_user_desc->failed_msdus = 2124 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); 2125 } 2126 2127 /* 2128 * dp_process_ppdu_stats_flush_tlv: Process 2129 * htt_ppdu_stats_flush_tlv 2130 * @pdev: DP PDEV handle 2131 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv 2132 * 2133 * return:void 2134 */ 2135 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, 2136 uint32_t *tag_buf) 2137 { 2138 uint32_t peer_id; 2139 uint32_t drop_reason; 2140 uint8_t tid; 2141 uint32_t num_msdu; 2142 struct dp_peer *peer; 2143 2144 tag_buf++; 2145 drop_reason = *tag_buf; 2146 2147 tag_buf++; 2148 num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); 2149 2150 tag_buf++; 2151 peer_id = 2152 HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); 2153 2154 peer = dp_peer_find_by_id(pdev->soc, peer_id); 2155 if (!peer) 2156 return; 2157 2158 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); 2159 2160 if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) { 2161 DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], 2162 num_msdu); 2163 } 2164 } 2165 2166 /* 2167 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process 2168 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2169 * @pdev: DP PDEV handle 2170 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2171 * @length: tlv_length 2172 * 2173 * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller 2174 */ 2175 static QDF_STATUS 2176 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, 2177 qdf_nbuf_t tag_buf, 2178 uint32_t ppdu_id) 2179 { 2180 uint32_t *nbuf_ptr; 2181 uint8_t trim_size; 2182 2183 if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) && 2184 (!pdev->bpr_enable)) 2185 return QDF_STATUS_SUCCESS; 2186 2187 trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf + 2188 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - 2189 qdf_nbuf_data(tag_buf)); 2190 2191 if (!qdf_nbuf_pull_head(tag_buf, trim_size)) 2192 return QDF_STATUS_SUCCESS; 2193 2194 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - 2195 pdev->mgmtctrl_frm_info.mgmt_buf_len); 2196 2197 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head( 2198 tag_buf, sizeof(ppdu_id)); 2199 *nbuf_ptr = ppdu_id; 2200 2201 if (pdev->bpr_enable) { 2202 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, 2203 tag_buf, HTT_INVALID_PEER, 2204 WDI_NO_VAL, pdev->pdev_id); 2205 } 2206 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { 2207 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, 2208 tag_buf, HTT_INVALID_PEER, 2209 WDI_NO_VAL, pdev->pdev_id); 2210 } 2211 2212 return QDF_STATUS_E_ALREADY; 2213 } 2214 2215 /** 2216 * dp_process_ppdu_tag(): Function to process the PPDU TLVs 2217 * @pdev: DP pdev handle 2218 * @tag_buf: TLV buffer 2219 * @tlv_len: length of tlv 2220 * @ppdu_info: per ppdu tlv structure 2221 * 2222 * return: void 2223 */ 2224 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf, 2225 uint32_t tlv_len, struct ppdu_info *ppdu_info) 2226 { 2227 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2228 2229 switch (tlv_type) { 2230 case HTT_PPDU_STATS_COMMON_TLV: 2231 qdf_assert_always(tlv_len == 2232 sizeof(htt_ppdu_stats_common_tlv)); 2233 dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info); 2234 break; 2235 case HTT_PPDU_STATS_USR_COMMON_TLV: 2236 qdf_assert_always(tlv_len == 2237 sizeof(htt_ppdu_stats_user_common_tlv)); 2238 dp_process_ppdu_stats_user_common_tlv( 2239 pdev, tag_buf, ppdu_info); 2240 break; 2241 case HTT_PPDU_STATS_USR_RATE_TLV: 2242 qdf_assert_always(tlv_len == 2243 sizeof(htt_ppdu_stats_user_rate_tlv)); 2244 dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info); 2245 break; 2246 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: 2247 qdf_assert_always(tlv_len == 2248 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv)); 2249 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 2250 pdev, tag_buf, ppdu_info); 2251 break; 2252 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: 2253 qdf_assert_always(tlv_len == 2254 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv)); 2255 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 2256 pdev, tag_buf, ppdu_info); 2257 break; 2258 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: 2259 qdf_assert_always(tlv_len == 2260 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv)); 2261 dp_process_ppdu_stats_user_cmpltn_common_tlv( 2262 pdev, tag_buf, ppdu_info); 2263 break; 2264 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: 2265 qdf_assert_always(tlv_len == 2266 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv)); 2267 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 2268 pdev, tag_buf, ppdu_info); 2269 break; 2270 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: 2271 qdf_assert_always(tlv_len == 2272 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv)); 2273 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 2274 pdev, tag_buf, ppdu_info); 2275 break; 2276 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: 2277 qdf_assert_always(tlv_len == 2278 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv)); 2279 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 2280 pdev, tag_buf, ppdu_info); 2281 break; 2282 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: 2283 qdf_assert_always(tlv_len == 2284 sizeof(htt_ppdu_stats_usr_common_array_tlv_v)); 2285 dp_process_ppdu_stats_user_common_array_tlv( 2286 pdev, tag_buf, ppdu_info); 2287 break; 2288 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: 2289 qdf_assert_always(tlv_len == 2290 sizeof(htt_ppdu_stats_flush_tlv)); 2291 dp_process_ppdu_stats_user_compltn_flush_tlv( 2292 pdev, tag_buf); 2293 break; 2294 default: 2295 break; 2296 } 2297 } 2298 2299 /** 2300 * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor 2301 * to upper layer 2302 * @pdev: DP pdev handle 2303 * @ppdu_info: per PPDU TLV descriptor 2304 * 2305 * return: void 2306 */ 2307 static 2308 void dp_ppdu_desc_deliver(struct dp_pdev *pdev, 2309 struct ppdu_info *ppdu_info) 2310 { 2311 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 2312 struct dp_peer *peer = NULL; 2313 qdf_nbuf_t nbuf; 2314 uint16_t i; 2315 2316 ppdu_desc = (struct cdp_tx_completion_ppdu *) 2317 qdf_nbuf_data(ppdu_info->nbuf); 2318 2319 ppdu_desc->num_users = ppdu_info->last_user; 2320 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 2321 2322 for (i = 0; i < ppdu_desc->num_users; i++) { 2323 2324 2325 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; 2326 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; 2327 2328 if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) { 2329 peer = dp_peer_find_by_id(pdev->soc, 2330 ppdu_desc->user[i].peer_id); 2331 /** 2332 * This check is to make sure peer is not deleted 2333 * after processing the TLVs. 2334 */ 2335 if (!peer) 2336 continue; 2337 2338 dp_tx_stats_update(pdev->soc, peer, 2339 &ppdu_desc->user[i], 2340 ppdu_desc->ack_rssi); 2341 } 2342 } 2343 2344 /* 2345 * Remove from the list 2346 */ 2347 TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 2348 nbuf = ppdu_info->nbuf; 2349 pdev->list_depth--; 2350 qdf_mem_free(ppdu_info); 2351 2352 qdf_assert_always(nbuf); 2353 2354 ppdu_desc = (struct cdp_tx_completion_ppdu *) 2355 qdf_nbuf_data(nbuf); 2356 2357 /** 2358 * Deliver PPDU stats only for valid (acked) data frames if 2359 * sniffer mode is not enabled. 2360 * If sniffer mode is enabled, PPDU stats for all frames 2361 * including mgmt/control frames should be delivered to upper layer 2362 */ 2363 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { 2364 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc, 2365 nbuf, HTT_INVALID_PEER, 2366 WDI_NO_VAL, pdev->pdev_id); 2367 } else { 2368 if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 && 2369 ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) { 2370 2371 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, 2372 pdev->soc, nbuf, HTT_INVALID_PEER, 2373 WDI_NO_VAL, pdev->pdev_id); 2374 } else 2375 qdf_nbuf_free(nbuf); 2376 } 2377 return; 2378 } 2379 2380 /** 2381 * dp_get_ppdu_desc(): Function to allocate new PPDU status 2382 * desc for new ppdu id 2383 * @pdev: DP pdev handle 2384 * @ppdu_id: PPDU unique identifier 2385 * @tlv_type: TLV type received 2386 * 2387 * return: ppdu_info per ppdu tlv structure 2388 */ 2389 static 2390 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, 2391 uint8_t tlv_type) 2392 { 2393 struct ppdu_info *ppdu_info = NULL; 2394 2395 /* 2396 * Find ppdu_id node exists or not 2397 */ 2398 TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) { 2399 2400 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { 2401 break; 2402 } 2403 } 2404 2405 if (ppdu_info) { 2406 /** 2407 * if we get tlv_type that is already been processed for ppdu, 2408 * that means we got a new ppdu with same ppdu id. 2409 * Hence Flush the older ppdu 2410 */ 2411 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) 2412 dp_ppdu_desc_deliver(pdev, ppdu_info); 2413 else 2414 return ppdu_info; 2415 } 2416 2417 /** 2418 * Flush the head ppdu descriptor if ppdu desc list reaches max 2419 * threshold 2420 */ 2421 if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 2422 ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list); 2423 dp_ppdu_desc_deliver(pdev, ppdu_info); 2424 } 2425 2426 /* 2427 * Allocate new ppdu_info node 2428 */ 2429 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); 2430 if (!ppdu_info) 2431 return NULL; 2432 2433 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, 2434 sizeof(struct cdp_tx_completion_ppdu), 0, 4, 2435 TRUE); 2436 if (!ppdu_info->nbuf) { 2437 qdf_mem_free(ppdu_info); 2438 return NULL; 2439 } 2440 2441 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), 2442 sizeof(struct cdp_tx_completion_ppdu)); 2443 2444 if (qdf_nbuf_put_tail(ppdu_info->nbuf, 2445 sizeof(struct cdp_tx_completion_ppdu)) == NULL) { 2446 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2447 "No tailroom for HTT PPDU"); 2448 qdf_nbuf_free(ppdu_info->nbuf); 2449 ppdu_info->nbuf = NULL; 2450 ppdu_info->last_user = 0; 2451 qdf_mem_free(ppdu_info); 2452 return NULL; 2453 } 2454 2455 /** 2456 * No lock is needed because all PPDU TLVs are processed in 2457 * same context and this list is updated in same context 2458 */ 2459 TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info, 2460 ppdu_info_list_elem); 2461 pdev->list_depth++; 2462 return ppdu_info; 2463 } 2464 2465 /** 2466 * dp_htt_process_tlv(): Function to process each PPDU TLVs 2467 * @pdev: DP pdev handle 2468 * @htt_t2h_msg: HTT target to host message 2469 * 2470 * return: ppdu_info per ppdu tlv structure 2471 */ 2472 2473 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, 2474 qdf_nbuf_t htt_t2h_msg) 2475 { 2476 uint32_t length; 2477 uint32_t ppdu_id; 2478 uint8_t tlv_type; 2479 uint32_t tlv_length, tlv_bitmap_expected; 2480 uint8_t *tlv_buf; 2481 struct ppdu_info *ppdu_info = NULL; 2482 2483 uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); 2484 2485 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); 2486 2487 msg_word = msg_word + 1; 2488 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); 2489 2490 2491 msg_word = msg_word + 3; 2492 while (length > 0) { 2493 tlv_buf = (uint8_t *)msg_word; 2494 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 2495 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 2496 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) 2497 pdev->stats.ppdu_stats_counter[tlv_type]++; 2498 2499 if (tlv_length == 0) 2500 break; 2501 2502 tlv_length += HTT_TLV_HDR_LEN; 2503 2504 /** 2505 * Not allocating separate ppdu descriptor for MGMT Payload 2506 * TLV as this is sent as separate WDI indication and it 2507 * doesn't contain any ppdu information 2508 */ 2509 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { 2510 pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; 2511 pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length; 2512 pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; 2513 msg_word = 2514 (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 2515 length -= (tlv_length); 2516 continue; 2517 } 2518 2519 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type); 2520 if (!ppdu_info) 2521 return NULL; 2522 ppdu_info->ppdu_id = ppdu_id; 2523 ppdu_info->tlv_bitmap |= (1 << tlv_type); 2524 2525 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); 2526 2527 /** 2528 * Increment pdev level tlv count to monitor 2529 * missing TLVs 2530 */ 2531 pdev->tlv_count++; 2532 ppdu_info->last_tlv_cnt = pdev->tlv_count; 2533 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 2534 length -= (tlv_length); 2535 } 2536 2537 if (!ppdu_info) 2538 return NULL; 2539 2540 pdev->last_ppdu_id = ppdu_id; 2541 2542 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 2543 2544 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { 2545 if (ppdu_info->is_ampdu) 2546 tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP; 2547 } 2548 2549 /** 2550 * Once all the TLVs for a given PPDU has been processed, 2551 * return PPDU status to be delivered to higher layer 2552 */ 2553 if (ppdu_info->tlv_bitmap == tlv_bitmap_expected) 2554 return ppdu_info; 2555 2556 return NULL; 2557 } 2558 #endif /* FEATURE_PERPKT_INFO */ 2559 2560 /** 2561 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW 2562 * @soc: DP SOC handle 2563 * @pdev_id: pdev id 2564 * @htt_t2h_msg: HTT message nbuf 2565 * 2566 * return:void 2567 */ 2568 #if defined(WDI_EVENT_ENABLE) 2569 #ifdef FEATURE_PERPKT_INFO 2570 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 2571 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 2572 { 2573 struct dp_pdev *pdev = soc->pdev_list[pdev_id]; 2574 struct ppdu_info *ppdu_info = NULL; 2575 bool free_buf = true; 2576 2577 if (!pdev) 2578 return true; 2579 2580 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && 2581 !pdev->mcopy_mode) 2582 return free_buf; 2583 2584 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); 2585 2586 if (pdev->mgmtctrl_frm_info.mgmt_buf) { 2587 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv 2588 (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) != 2589 QDF_STATUS_SUCCESS) 2590 free_buf = false; 2591 2592 pdev->mgmtctrl_frm_info.mgmt_buf = NULL; 2593 pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; 2594 pdev->mgmtctrl_frm_info.ppdu_id = 0; 2595 } 2596 2597 if (ppdu_info) 2598 dp_ppdu_desc_deliver(pdev, ppdu_info); 2599 2600 return free_buf; 2601 } 2602 #else 2603 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 2604 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 2605 { 2606 return true; 2607 } 2608 #endif 2609 #endif 2610 2611 /** 2612 * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats 2613 * @soc: DP SOC handle 2614 * @htt_t2h_msg: HTT message nbuf 2615 * 2616 * return:void 2617 */ 2618 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, 2619 qdf_nbuf_t htt_t2h_msg) 2620 { 2621 uint8_t done; 2622 qdf_nbuf_t msg_copy; 2623 uint32_t *msg_word; 2624 2625 msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); 2626 msg_word = msg_word + 3; 2627 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 2628 2629 /* 2630 * HTT EXT stats response comes as stream of TLVs which span over 2631 * multiple T2H messages. 2632 * The first message will carry length of the response. 2633 * For rest of the messages length will be zero. 2634 * 2635 * Clone the T2H message buffer and store it in a list to process 2636 * it later. 2637 * 2638 * The original T2H message buffers gets freed in the T2H HTT event 2639 * handler 2640 */ 2641 msg_copy = qdf_nbuf_clone(htt_t2h_msg); 2642 2643 if (!msg_copy) { 2644 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2645 "T2H messge clone failed for HTT EXT STATS"); 2646 goto error; 2647 } 2648 2649 qdf_spin_lock_bh(&soc->htt_stats.lock); 2650 qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy); 2651 /* 2652 * Done bit signifies that this is the last T2H buffer in the stream of 2653 * HTT EXT STATS message 2654 */ 2655 if (done) { 2656 soc->htt_stats.num_stats++; 2657 qdf_sched_work(0, &soc->htt_stats.work); 2658 } 2659 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2660 2661 return; 2662 2663 error: 2664 qdf_spin_lock_bh(&soc->htt_stats.lock); 2665 while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) 2666 != NULL) { 2667 qdf_nbuf_free(msg_copy); 2668 } 2669 soc->htt_stats.num_stats = 0; 2670 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2671 return; 2672 2673 } 2674 2675 /* 2676 * htt_soc_attach_target() - SOC level HTT setup 2677 * @htt_soc: HTT SOC handle 2678 * 2679 * Return: 0 on success; error code on failure 2680 */ 2681 int htt_soc_attach_target(void *htt_soc) 2682 { 2683 struct htt_soc *soc = (struct htt_soc *)htt_soc; 2684 2685 return htt_h2t_ver_req_msg(soc); 2686 } 2687 2688 2689 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) 2690 /* 2691 * dp_ppdu_stats_ind_handler() - PPDU stats msg handler 2692 * @htt_soc: HTT SOC handle 2693 * @msg_word: Pointer to payload 2694 * @htt_t2h_msg: HTT msg nbuf 2695 * 2696 * Return: True if buffer should be freed by caller. 2697 */ 2698 static bool 2699 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 2700 uint32_t *msg_word, 2701 qdf_nbuf_t htt_t2h_msg) 2702 { 2703 u_int8_t pdev_id; 2704 bool free_buf; 2705 qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE); 2706 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2707 "received HTT_T2H_MSG_TYPE_PPDU_STATS_IND\n"); 2708 pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 2709 pdev_id = DP_HW2SW_MACID(pdev_id); 2710 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, 2711 htt_t2h_msg); 2712 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, 2713 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, 2714 pdev_id); 2715 return free_buf; 2716 } 2717 #else 2718 static bool 2719 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 2720 uint32_t *msg_word, 2721 qdf_nbuf_t htt_t2h_msg) 2722 { 2723 return true; 2724 } 2725 #endif 2726 2727 #if defined(WDI_EVENT_ENABLE) && \ 2728 !defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN) 2729 /* 2730 * dp_pktlog_msg_handler() - Pktlog msg handler 2731 * @htt_soc: HTT SOC handle 2732 * @msg_word: Pointer to payload 2733 * 2734 * Return: None 2735 */ 2736 static void 2737 dp_pktlog_msg_handler(struct htt_soc *soc, 2738 uint32_t *msg_word) 2739 { 2740 uint8_t pdev_id; 2741 uint32_t *pl_hdr; 2742 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2743 "received HTT_T2H_MSG_TYPE_PKTLOG\n"); 2744 pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word); 2745 pdev_id = DP_HW2SW_MACID(pdev_id); 2746 pl_hdr = (msg_word + 1); 2747 dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, 2748 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL, 2749 pdev_id); 2750 } 2751 #else 2752 static void 2753 dp_pktlog_msg_handler(struct htt_soc *soc, 2754 uint32_t *msg_word) 2755 { 2756 } 2757 #endif 2758 2759 /* 2760 * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler 2761 * @context: Opaque context (HTT SOC handle) 2762 * @pkt: HTC packet 2763 */ 2764 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) 2765 { 2766 struct htt_soc *soc = (struct htt_soc *) context; 2767 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; 2768 u_int32_t *msg_word; 2769 enum htt_t2h_msg_type msg_type; 2770 bool free_buf = true; 2771 2772 /* check for successful message reception */ 2773 if (pkt->Status != QDF_STATUS_SUCCESS) { 2774 if (pkt->Status != QDF_STATUS_E_CANCELED) 2775 soc->stats.htc_err_cnt++; 2776 2777 qdf_nbuf_free(htt_t2h_msg); 2778 return; 2779 } 2780 2781 /* TODO: Check if we should pop the HTC/HTT header alignment padding */ 2782 2783 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); 2784 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 2785 switch (msg_type) { 2786 case HTT_T2H_MSG_TYPE_PEER_MAP: 2787 { 2788 u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN]; 2789 u_int8_t *peer_mac_addr; 2790 u_int16_t peer_id; 2791 u_int16_t hw_peer_id; 2792 u_int8_t vdev_id; 2793 2794 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); 2795 hw_peer_id = 2796 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); 2797 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); 2798 peer_mac_addr = htt_t2h_mac_addr_deswizzle( 2799 (u_int8_t *) (msg_word+1), 2800 &mac_addr_deswizzle_buf[0]); 2801 QDF_TRACE(QDF_MODULE_ID_TXRX, 2802 QDF_TRACE_LEVEL_INFO, 2803 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 2804 peer_id, vdev_id); 2805 2806 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, 2807 vdev_id, peer_mac_addr); 2808 break; 2809 } 2810 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 2811 { 2812 u_int16_t peer_id; 2813 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); 2814 2815 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id); 2816 break; 2817 } 2818 case HTT_T2H_MSG_TYPE_SEC_IND: 2819 { 2820 u_int16_t peer_id; 2821 enum htt_sec_type sec_type; 2822 int is_unicast; 2823 2824 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); 2825 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); 2826 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); 2827 /* point to the first part of the Michael key */ 2828 msg_word++; 2829 dp_rx_sec_ind_handler( 2830 soc->dp_soc, peer_id, sec_type, is_unicast, 2831 msg_word, msg_word + 2); 2832 break; 2833 } 2834 2835 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 2836 { 2837 free_buf = dp_ppdu_stats_ind_handler(soc, msg_word, 2838 htt_t2h_msg); 2839 break; 2840 } 2841 2842 case HTT_T2H_MSG_TYPE_PKTLOG: 2843 { 2844 dp_pktlog_msg_handler(soc, msg_word); 2845 break; 2846 } 2847 2848 case HTT_T2H_MSG_TYPE_VERSION_CONF: 2849 { 2850 htc_pm_runtime_put(soc->htc_soc); 2851 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); 2852 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); 2853 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2854 "target uses HTT version %d.%d; host uses %d.%d\n", 2855 soc->tgt_ver.major, soc->tgt_ver.minor, 2856 HTT_CURRENT_VERSION_MAJOR, 2857 HTT_CURRENT_VERSION_MINOR); 2858 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { 2859 QDF_TRACE(QDF_MODULE_ID_TXRX, 2860 QDF_TRACE_LEVEL_ERROR, 2861 "*** Incompatible host/target HTT versions!\n"); 2862 } 2863 /* abort if the target is incompatible with the host */ 2864 qdf_assert(soc->tgt_ver.major == 2865 HTT_CURRENT_VERSION_MAJOR); 2866 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { 2867 QDF_TRACE(QDF_MODULE_ID_TXRX, 2868 QDF_TRACE_LEVEL_WARN, 2869 "*** Warning: host/target HTT versions" 2870 " are different, though compatible!\n"); 2871 } 2872 break; 2873 } 2874 case HTT_T2H_MSG_TYPE_RX_ADDBA: 2875 { 2876 uint16_t peer_id; 2877 uint8_t tid; 2878 uint8_t win_sz; 2879 uint16_t status; 2880 struct dp_peer *peer; 2881 2882 /* 2883 * Update REO Queue Desc with new values 2884 */ 2885 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); 2886 tid = HTT_RX_ADDBA_TID_GET(*msg_word); 2887 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); 2888 peer = dp_peer_find_by_id(soc->dp_soc, peer_id); 2889 2890 /* 2891 * Window size needs to be incremented by 1 2892 * since fw needs to represent a value of 256 2893 * using just 8 bits 2894 */ 2895 if (peer) { 2896 status = dp_addba_requestprocess_wifi3(peer, 2897 0, tid, 0, win_sz + 1, 0xffff); 2898 QDF_TRACE(QDF_MODULE_ID_TXRX, 2899 QDF_TRACE_LEVEL_INFO, 2900 FL("PeerID %d BAW %d TID %d stat %d\n"), 2901 peer_id, win_sz, tid, status); 2902 2903 } else { 2904 QDF_TRACE(QDF_MODULE_ID_TXRX, 2905 QDF_TRACE_LEVEL_ERROR, 2906 FL("Peer not found peer id %d\n"), 2907 peer_id); 2908 } 2909 break; 2910 } 2911 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 2912 { 2913 dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg); 2914 break; 2915 } 2916 default: 2917 break; 2918 }; 2919 2920 /* Free the indication buffer */ 2921 if (free_buf) 2922 qdf_nbuf_free(htt_t2h_msg); 2923 } 2924 2925 /* 2926 * dp_htt_h2t_full() - Send full handler (called from HTC) 2927 * @context: Opaque context (HTT SOC handle) 2928 * @pkt: HTC packet 2929 * 2930 * Return: enum htc_send_full_action 2931 */ 2932 static enum htc_send_full_action 2933 dp_htt_h2t_full(void *context, HTC_PACKET *pkt) 2934 { 2935 return HTC_SEND_FULL_KEEP; 2936 } 2937 2938 /* 2939 * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages 2940 * @context: Opaque context (HTT SOC handle) 2941 * @nbuf: nbuf containing T2H message 2942 * @pipe_id: HIF pipe ID 2943 * 2944 * Return: QDF_STATUS 2945 * 2946 * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which 2947 * will be used for packet log and other high-priority HTT messages. Proper 2948 * HTC connection to be added later once required FW changes are available 2949 */ 2950 static QDF_STATUS 2951 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id) 2952 { 2953 A_STATUS rc = QDF_STATUS_SUCCESS; 2954 HTC_PACKET htc_pkt; 2955 2956 qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE); 2957 qdf_mem_zero(&htc_pkt, sizeof(htc_pkt)); 2958 htc_pkt.Status = QDF_STATUS_SUCCESS; 2959 htc_pkt.pPktContext = (void *)nbuf; 2960 dp_htt_t2h_msg_handler(context, &htc_pkt); 2961 2962 return rc; 2963 } 2964 2965 /* 2966 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC 2967 * @htt_soc: HTT SOC handle 2968 * 2969 * Return: 0 on success; error code on failure 2970 */ 2971 static int 2972 htt_htc_soc_attach(struct htt_soc *soc) 2973 { 2974 struct htc_service_connect_req connect; 2975 struct htc_service_connect_resp response; 2976 A_STATUS status; 2977 struct dp_soc *dpsoc = soc->dp_soc; 2978 2979 qdf_mem_set(&connect, sizeof(connect), 0); 2980 qdf_mem_set(&response, sizeof(response), 0); 2981 2982 connect.pMetaData = NULL; 2983 connect.MetaDataLength = 0; 2984 connect.EpCallbacks.pContext = soc; 2985 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; 2986 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 2987 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; 2988 2989 /* rx buffers currently are provided by HIF, not by EpRecvRefill */ 2990 connect.EpCallbacks.EpRecvRefill = NULL; 2991 2992 /* N/A, fill is done by HIF */ 2993 connect.EpCallbacks.RecvRefillWaterMark = 1; 2994 2995 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; 2996 /* 2997 * Specify how deep to let a queue get before htc_send_pkt will 2998 * call the EpSendFull function due to excessive send queue depth. 2999 */ 3000 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; 3001 3002 /* disable flow control for HTT data message service */ 3003 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 3004 3005 /* connect to control service */ 3006 connect.service_id = HTT_DATA_MSG_SVC; 3007 3008 status = htc_connect_service(soc->htc_soc, &connect, &response); 3009 3010 if (status != A_OK) 3011 return QDF_STATUS_E_FAILURE; 3012 3013 soc->htc_endpoint = response.Endpoint; 3014 3015 hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint); 3016 dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc, 3017 dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE); 3018 3019 return 0; /* success */ 3020 } 3021 3022 /* 3023 * htt_soc_attach() - SOC level HTT initialization 3024 * @dp_soc: Opaque Data path SOC handle 3025 * @ctrl_psoc: Opaque ctrl SOC handle 3026 * @htc_soc: SOC level HTC handle 3027 * @hal_soc: Opaque HAL SOC handle 3028 * @osdev: QDF device 3029 * 3030 * Return: HTT handle on success; NULL on failure 3031 */ 3032 void * 3033 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc, 3034 void *hal_soc, qdf_device_t osdev) 3035 { 3036 struct htt_soc *soc; 3037 int i; 3038 3039 soc = qdf_mem_malloc(sizeof(*soc)); 3040 3041 if (!soc) 3042 goto fail1; 3043 3044 soc->osdev = osdev; 3045 soc->ctrl_psoc = ctrl_psoc; 3046 soc->dp_soc = dp_soc; 3047 soc->htc_soc = htc_soc; 3048 soc->hal_soc = hal_soc; 3049 3050 /* TODO: See if any NSS related context is required in htt_soc */ 3051 3052 soc->htt_htc_pkt_freelist = NULL; 3053 3054 if (htt_htc_soc_attach(soc)) 3055 goto fail2; 3056 3057 /* TODO: See if any Rx data specific intialization is required. For 3058 * MCL use cases, the data will be received as single packet and 3059 * should not required any descriptor or reorder handling 3060 */ 3061 3062 HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex); 3063 3064 /* pre-allocate some HTC_PACKET objects */ 3065 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { 3066 struct dp_htt_htc_pkt_union *pkt; 3067 pkt = qdf_mem_malloc(sizeof(*pkt)); 3068 if (!pkt) 3069 break; 3070 3071 htt_htc_pkt_free(soc, &pkt->u.pkt); 3072 } 3073 3074 return soc; 3075 3076 fail2: 3077 qdf_mem_free(soc); 3078 3079 fail1: 3080 return NULL; 3081 } 3082 3083 3084 /* 3085 * htt_soc_detach() - Detach SOC level HTT 3086 * @htt_soc: HTT SOC handle 3087 */ 3088 void 3089 htt_soc_detach(void *htt_soc) 3090 { 3091 struct htt_soc *soc = (struct htt_soc *)htt_soc; 3092 3093 htt_htc_misc_pkt_pool_free(soc); 3094 htt_htc_pkt_pool_free(soc); 3095 HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex); 3096 qdf_mem_free(soc); 3097 } 3098 3099 /** 3100 * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW 3101 * @pdev: DP PDEV handle 3102 * @stats_type_upload_mask: stats type requested by user 3103 * @config_param_0: extra configuration parameters 3104 * @config_param_1: extra configuration parameters 3105 * @config_param_2: extra configuration parameters 3106 * @config_param_3: extra configuration parameters 3107 * @mac_id: mac number 3108 * 3109 * return: QDF STATUS 3110 */ 3111 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 3112 uint32_t stats_type_upload_mask, uint32_t config_param_0, 3113 uint32_t config_param_1, uint32_t config_param_2, 3114 uint32_t config_param_3, int cookie_val, int cookie_msb, 3115 uint8_t mac_id) 3116 { 3117 struct htt_soc *soc = pdev->soc->htt_handle; 3118 struct dp_htt_htc_pkt *pkt; 3119 qdf_nbuf_t msg; 3120 uint32_t *msg_word; 3121 uint8_t pdev_mask = 0; 3122 3123 msg = qdf_nbuf_alloc( 3124 soc->osdev, 3125 HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ), 3126 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 3127 3128 if (!msg) 3129 return QDF_STATUS_E_NOMEM; 3130 3131 /*TODO:Add support for SOC stats 3132 * Bit 0: SOC Stats 3133 * Bit 1: Pdev stats for pdev id 0 3134 * Bit 2: Pdev stats for pdev id 1 3135 * Bit 3: Pdev stats for pdev id 2 3136 */ 3137 mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 3138 3139 pdev_mask = 1 << DP_SW2HW_MACID(mac_id); 3140 /* 3141 * Set the length of the message. 3142 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3143 * separately during the below call to qdf_nbuf_push_head. 3144 * The contribution from the HTC header is added separately inside HTC. 3145 */ 3146 if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) { 3147 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3148 "Failed to expand head for HTT_EXT_STATS"); 3149 qdf_nbuf_free(msg); 3150 return QDF_STATUS_E_FAILURE; 3151 } 3152 3153 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3154 "-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n" 3155 "config_param_1 %u\n config_param_2 %u\n" 3156 "config_param_4 %u\n -------------\n", 3157 __func__, __LINE__, cookie_val, config_param_0, 3158 config_param_1, config_param_2, config_param_3); 3159 3160 msg_word = (uint32_t *) qdf_nbuf_data(msg); 3161 3162 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3163 *msg_word = 0; 3164 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ); 3165 HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask); 3166 HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask); 3167 3168 /* word 1 */ 3169 msg_word++; 3170 *msg_word = 0; 3171 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0); 3172 3173 /* word 2 */ 3174 msg_word++; 3175 *msg_word = 0; 3176 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1); 3177 3178 /* word 3 */ 3179 msg_word++; 3180 *msg_word = 0; 3181 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2); 3182 3183 /* word 4 */ 3184 msg_word++; 3185 *msg_word = 0; 3186 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3); 3187 3188 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0); 3189 3190 /* word 5 */ 3191 msg_word++; 3192 3193 /* word 6 */ 3194 msg_word++; 3195 *msg_word = 0; 3196 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val); 3197 3198 /* word 7 */ 3199 msg_word++; 3200 *msg_word = 0; 3201 /*Using last 2 bits for pdev_id */ 3202 cookie_msb = ((cookie_msb << 2) | pdev->pdev_id); 3203 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb); 3204 3205 pkt = htt_htc_pkt_alloc(soc); 3206 if (!pkt) { 3207 qdf_nbuf_free(msg); 3208 return QDF_STATUS_E_NOMEM; 3209 } 3210 3211 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3212 3213 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 3214 dp_htt_h2t_send_complete_free_netbuf, 3215 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 3216 soc->htc_endpoint, 3217 1); /* tag - not relevant here */ 3218 3219 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3220 DP_HTT_SEND_HTC_PKT(soc, pkt); 3221 return 0; 3222 } 3223 3224 /* This macro will revert once proper HTT header will define for 3225 * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file 3226 * */ 3227 #if defined(WDI_EVENT_ENABLE) 3228 /** 3229 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3230 * @pdev: DP PDEV handle 3231 * @stats_type_upload_mask: stats type requested by user 3232 * @mac_id: Mac id number 3233 * 3234 * return: QDF STATUS 3235 */ 3236 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3237 uint32_t stats_type_upload_mask, uint8_t mac_id) 3238 { 3239 struct htt_soc *soc = pdev->soc->htt_handle; 3240 struct dp_htt_htc_pkt *pkt; 3241 qdf_nbuf_t msg; 3242 uint32_t *msg_word; 3243 uint8_t pdev_mask; 3244 3245 msg = qdf_nbuf_alloc( 3246 soc->osdev, 3247 HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ), 3248 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); 3249 3250 if (!msg) { 3251 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3252 "Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer\n"); 3253 qdf_assert(0); 3254 return QDF_STATUS_E_NOMEM; 3255 } 3256 3257 /*TODO:Add support for SOC stats 3258 * Bit 0: SOC Stats 3259 * Bit 1: Pdev stats for pdev id 0 3260 * Bit 2: Pdev stats for pdev id 1 3261 * Bit 3: Pdev stats for pdev id 2 3262 */ 3263 pdev_mask = 1 << DP_SW2HW_MACID(mac_id); 3264 3265 /* 3266 * Set the length of the message. 3267 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3268 * separately during the below call to qdf_nbuf_push_head. 3269 * The contribution from the HTC header is added separately inside HTC. 3270 */ 3271 if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) { 3272 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3273 "Failed to expand head for HTT_CFG_STATS\n"); 3274 qdf_nbuf_free(msg); 3275 return QDF_STATUS_E_FAILURE; 3276 } 3277 3278 msg_word = (uint32_t *) qdf_nbuf_data(msg); 3279 3280 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3281 *msg_word = 0; 3282 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); 3283 HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask); 3284 HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word, 3285 stats_type_upload_mask); 3286 3287 pkt = htt_htc_pkt_alloc(soc); 3288 if (!pkt) { 3289 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3290 "Fail to allocate dp_htt_htc_pkt buffer\n"); 3291 qdf_assert(0); 3292 qdf_nbuf_free(msg); 3293 return QDF_STATUS_E_NOMEM; 3294 } 3295 3296 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3297 3298 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 3299 dp_htt_h2t_send_complete_free_netbuf, 3300 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 3301 soc->htc_endpoint, 3302 1); /* tag - not relevant here */ 3303 3304 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3305 DP_HTT_SEND_HTC_PKT(soc, pkt); 3306 return 0; 3307 } 3308 #endif 3309