1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <htt.h> 21 #include <hal_hw_headers.h> 22 #include <hal_api.h> 23 #include "dp_peer.h" 24 #include "dp_types.h" 25 #include "dp_internal.h" 26 #include "dp_ipa.h" 27 #include "dp_rx.h" 28 #include "htt_stats.h" 29 #include "htt_ppdu_stats.h" 30 #include "dp_htt.h" 31 #ifdef WIFI_MONITOR_SUPPORT 32 #include <dp_mon.h> 33 #endif 34 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 35 #include "cdp_txrx_cmn_struct.h" 36 #ifdef IPA_OPT_WIFI_DP 37 #include "cdp_txrx_ipa.h" 38 #endif 39 #ifdef FEATURE_PERPKT_INFO 40 #include "dp_ratetable.h" 41 #endif 42 #include <qdf_module.h> 43 #ifdef CONFIG_SAWF_DEF_QUEUES 44 #include <dp_sawf_htt.h> 45 #endif 46 47 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE 48 49 #define HTT_HTC_PKT_POOL_INIT_SIZE 64 50 51 #define HTT_MSG_BUF_SIZE(msg_bytes) \ 52 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) 53 54 #define HTT_PID_BIT_MASK 0x3 55 56 #define DP_EXT_MSG_LENGTH 2048 57 #define HTT_HEADER_LEN 16 58 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16 59 60 #define HTT_SHIFT_UPPER_TIMESTAMP 32 61 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000 62 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16 63 64 struct dp_htt_htc_pkt * 65 htt_htc_pkt_alloc(struct htt_soc *soc) 66 { 67 struct dp_htt_htc_pkt_union *pkt = NULL; 68 69 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 70 if (soc->htt_htc_pkt_freelist) { 71 pkt = soc->htt_htc_pkt_freelist; 72 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; 73 } 74 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 75 76 if (!pkt) 77 pkt = qdf_mem_malloc(sizeof(*pkt)); 78 79 if (!pkt) 80 return NULL; 81 82 htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0); 83 84 return &pkt->u.pkt; /* not actually a dereference */ 85 } 86 87 qdf_export_symbol(htt_htc_pkt_alloc); 88 89 void 90 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 91 { 92 struct dp_htt_htc_pkt_union *u_pkt = 93 (struct dp_htt_htc_pkt_union *)pkt; 94 95 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 96 htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0); 97 u_pkt->u.next = soc->htt_htc_pkt_freelist; 98 soc->htt_htc_pkt_freelist = u_pkt; 99 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 100 } 101 102 qdf_export_symbol(htt_htc_pkt_free); 103 104 void 105 htt_htc_pkt_pool_free(struct htt_soc *soc) 106 { 107 struct dp_htt_htc_pkt_union *pkt, *next; 108 pkt = soc->htt_htc_pkt_freelist; 109 while (pkt) { 110 next = pkt->u.next; 111 qdf_mem_free(pkt); 112 pkt = next; 113 } 114 soc->htt_htc_pkt_freelist = NULL; 115 } 116 117 118 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 119 120 /** 121 * htt_htc_misc_pkt_list_trim() - trim misc list 122 * @soc: HTT SOC handle 123 * @level: max no. of pkts in list 124 */ 125 static void 126 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level) 127 { 128 struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL; 129 int i = 0; 130 qdf_nbuf_t netbuf; 131 132 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 133 pkt = soc->htt_htc_pkt_misclist; 134 while (pkt) { 135 next = pkt->u.next; 136 /* trim the out grown list*/ 137 if (++i > level) { 138 netbuf = 139 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); 140 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 141 qdf_nbuf_free(netbuf); 142 qdf_mem_free(pkt); 143 pkt = NULL; 144 if (prev) 145 prev->u.next = NULL; 146 } 147 prev = pkt; 148 pkt = next; 149 } 150 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 151 } 152 153 void 154 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 155 { 156 struct dp_htt_htc_pkt_union *u_pkt = 157 (struct dp_htt_htc_pkt_union *)pkt; 158 int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc, 159 pkt->htc_pkt.Endpoint) 160 + DP_HTT_HTC_PKT_MISCLIST_SIZE; 161 162 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 163 if (soc->htt_htc_pkt_misclist) { 164 u_pkt->u.next = soc->htt_htc_pkt_misclist; 165 soc->htt_htc_pkt_misclist = u_pkt; 166 } else { 167 soc->htt_htc_pkt_misclist = u_pkt; 168 } 169 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 170 171 /* only ce pipe size + tx_queue_depth could possibly be in use 172 * free older packets in the misclist 173 */ 174 htt_htc_misc_pkt_list_trim(soc, misclist_trim_level); 175 } 176 177 qdf_export_symbol(htt_htc_misc_pkt_list_add); 178 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 179 180 /** 181 * htt_htc_misc_pkt_pool_free() - free pkts in misc list 182 * @soc: HTT SOC handle 183 */ 184 static void 185 htt_htc_misc_pkt_pool_free(struct htt_soc *soc) 186 { 187 struct dp_htt_htc_pkt_union *pkt, *next; 188 qdf_nbuf_t netbuf; 189 190 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 191 pkt = soc->htt_htc_pkt_misclist; 192 193 while (pkt) { 194 next = pkt->u.next; 195 if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) != 196 HTC_PACKET_MAGIC_COOKIE) { 197 pkt = next; 198 soc->stats.skip_count++; 199 continue; 200 } 201 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); 202 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 203 204 soc->stats.htc_pkt_free++; 205 dp_htt_info("%pK: Pkt free count %d", 206 soc->dp_soc, soc->stats.htc_pkt_free); 207 208 qdf_nbuf_free(netbuf); 209 qdf_mem_free(pkt); 210 pkt = next; 211 } 212 soc->htt_htc_pkt_misclist = NULL; 213 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 214 dp_info("HTC Packets, fail count = %d, skip count = %d", 215 soc->stats.fail_count, soc->stats.skip_count); 216 } 217 218 /** 219 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ 220 * @tgt_mac_addr: Target MAC 221 * @buffer: Output buffer 222 */ 223 static u_int8_t * 224 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) 225 { 226 #ifdef BIG_ENDIAN_HOST 227 /* 228 * The host endianness is opposite of the target endianness. 229 * To make u_int32_t elements come out correctly, the target->host 230 * upload has swizzled the bytes in each u_int32_t element of the 231 * message. 232 * For byte-array message fields like the MAC address, this 233 * upload swizzling puts the bytes in the wrong order, and needs 234 * to be undone. 235 */ 236 buffer[0] = tgt_mac_addr[3]; 237 buffer[1] = tgt_mac_addr[2]; 238 buffer[2] = tgt_mac_addr[1]; 239 buffer[3] = tgt_mac_addr[0]; 240 buffer[4] = tgt_mac_addr[7]; 241 buffer[5] = tgt_mac_addr[6]; 242 return buffer; 243 #else 244 /* 245 * The host endianness matches the target endianness - 246 * we can use the mac addr directly from the message buffer. 247 */ 248 return tgt_mac_addr; 249 #endif 250 } 251 252 /** 253 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer 254 * @soc: SOC handle 255 * @status: Completion status 256 * @netbuf: HTT buffer 257 */ 258 static void 259 dp_htt_h2t_send_complete_free_netbuf( 260 void *soc, A_STATUS status, qdf_nbuf_t netbuf) 261 { 262 qdf_nbuf_free(netbuf); 263 } 264 265 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 266 void 267 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 268 { 269 struct htt_soc *soc = (struct htt_soc *) context; 270 struct dp_htt_htc_pkt *htt_pkt; 271 qdf_nbuf_t netbuf; 272 273 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 274 275 /* process (free or keep) the netbuf that held the message */ 276 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 277 /* 278 * adf sendcomplete is required for windows only 279 */ 280 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 281 /* free the htt_htc_pkt / HTC_PACKET object */ 282 qdf_nbuf_free(netbuf); 283 htt_htc_pkt_free(soc, htt_pkt); 284 } 285 286 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 287 288 void 289 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 290 { 291 void (*send_complete_part2)( 292 void *soc, QDF_STATUS status, qdf_nbuf_t msdu); 293 struct htt_soc *soc = (struct htt_soc *) context; 294 struct dp_htt_htc_pkt *htt_pkt; 295 qdf_nbuf_t netbuf; 296 297 send_complete_part2 = htc_pkt->pPktContext; 298 299 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 300 301 /* process (free or keep) the netbuf that held the message */ 302 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 303 /* 304 * adf sendcomplete is required for windows only 305 */ 306 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 307 if (send_complete_part2){ 308 send_complete_part2( 309 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); 310 } 311 /* free the htt_htc_pkt / HTC_PACKET object */ 312 htt_htc_pkt_free(soc, htt_pkt); 313 } 314 315 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 316 317 /** 318 * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata version V1 319 * @soc: HTT SOC handle 320 * @msg: Pointer to nbuf 321 * 322 * Return: 0 on success; error code on failure 323 */ 324 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc, 325 qdf_nbuf_t *msg) 326 { 327 uint32_t *msg_word; 328 329 *msg = qdf_nbuf_alloc( 330 soc->osdev, 331 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), 332 /* reserve room for the HTC header */ 333 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 334 if (!*msg) 335 return QDF_STATUS_E_NOMEM; 336 337 /* 338 * Set the length of the message. 339 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 340 * separately during the below call to qdf_nbuf_push_head. 341 * The contribution from the HTC header is added separately inside HTC. 342 */ 343 if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) { 344 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 345 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg", 346 __func__); 347 return QDF_STATUS_E_FAILURE; 348 } 349 350 /* fill in the message contents */ 351 msg_word = (u_int32_t *)qdf_nbuf_data(*msg); 352 353 /* rewind beyond alignment pad to get to the HTC header reserved area */ 354 qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING); 355 356 *msg_word = 0; 357 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); 358 359 return QDF_STATUS_SUCCESS; 360 } 361 362 #ifdef QCA_DP_TX_FW_METADATA_V2 363 /** 364 * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata version V2 365 * @soc: HTT SOC handle 366 * @msg: Pointer to nbuf 367 * 368 * Return: 0 on success; error code on failure 369 */ 370 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc, 371 qdf_nbuf_t *msg) 372 { 373 uint32_t *msg_word; 374 375 *msg = qdf_nbuf_alloc( 376 soc->osdev, 377 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ), 378 /* reserve room for the HTC header */ 379 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 380 if (!*msg) 381 return QDF_STATUS_E_NOMEM; 382 383 /* 384 * Set the length of the message. 385 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 386 * separately during the below call to qdf_nbuf_push_head. 387 * The contribution from the HTC header is added separately inside HTC. 388 */ 389 if (!qdf_nbuf_put_tail(*msg, 390 HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) { 391 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 392 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg", 393 __func__); 394 return QDF_STATUS_E_FAILURE; 395 } 396 397 /* fill in the message contents */ 398 msg_word = (u_int32_t *)qdf_nbuf_data(*msg); 399 400 /* rewind beyond alignment pad to get to the HTC header reserved area */ 401 qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING); 402 403 *msg_word = 0; 404 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); 405 406 /* word 1 */ 407 msg_word++; 408 *msg_word = 0; 409 HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER); 410 HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ); 411 HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word, 412 HTT_OPTION_TLV_TCL_METADATA_V21); 413 414 return QDF_STATUS_SUCCESS; 415 } 416 417 /** 418 * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata version 419 * @soc: HTT SOC handle 420 * @msg: Pointer to nbuf 421 * 422 * Return: 0 on success; error code on failure 423 */ 424 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg) 425 { 426 /* Use tcl_metadata_v1 when NSS offload is enabled */ 427 if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) || 428 soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE) 429 return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg); 430 else 431 return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg); 432 } 433 #else 434 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg) 435 { 436 return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg); 437 } 438 #endif 439 440 /** 441 * htt_h2t_ver_req_msg() - Send HTT version request message to target 442 * @soc: HTT SOC handle 443 * 444 * Return: 0 on success; error code on failure 445 */ 446 static int htt_h2t_ver_req_msg(struct htt_soc *soc) 447 { 448 struct dp_htt_htc_pkt *pkt; 449 qdf_nbuf_t msg = NULL; 450 QDF_STATUS status; 451 452 status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg); 453 if (status != QDF_STATUS_SUCCESS) 454 return status; 455 456 pkt = htt_htc_pkt_alloc(soc); 457 if (!pkt) { 458 qdf_nbuf_free(msg); 459 return QDF_STATUS_E_FAILURE; 460 } 461 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 462 463 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 464 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), 465 qdf_nbuf_len(msg), soc->htc_endpoint, 466 HTC_TX_PACKET_TAG_RTPM_PUT_RC); 467 468 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 469 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, 470 NULL); 471 472 if (status != QDF_STATUS_SUCCESS) { 473 qdf_nbuf_free(msg); 474 htt_htc_pkt_free(soc, pkt); 475 } 476 477 return status; 478 } 479 480 #ifdef IPA_OPT_WIFI_DP 481 QDF_STATUS htt_h2t_rx_cce_super_rule_setup(struct htt_soc *soc, void *param) 482 { 483 struct wifi_dp_flt_setup *flt_params = 484 (struct wifi_dp_flt_setup *)param; 485 struct dp_htt_htc_pkt *pkt; 486 qdf_nbuf_t msg; 487 uint32_t *msg_word; 488 uint8_t *htt_logger_bufp; 489 uint16_t ver = 0; 490 uint8_t i, valid = 0; 491 uint8_t num_filters = flt_params->num_filters; 492 uint8_t pdev_id = flt_params->pdev_id; 493 uint8_t op = flt_params->op; 494 uint16_t ipv4 = qdf_ntohs(QDF_NBUF_TRAC_IPV4_ETH_TYPE); 495 uint16_t ipv6 = qdf_ntohs(QDF_NBUF_TRAC_IPV6_ETH_TYPE); 496 QDF_STATUS status; 497 498 if (num_filters > RX_CCE_SUPER_RULE_SETUP_NUM) { 499 dp_htt_err("Wrong filter count %d", num_filters); 500 return QDF_STATUS_FILT_REQ_ERROR; 501 } 502 503 msg = qdf_nbuf_alloc(soc->osdev, 504 HTT_MSG_BUF_SIZE(HTT_RX_CCE_SUPER_RULE_SETUP_SZ), 505 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, 506 true); 507 if (!msg) { 508 dp_htt_err("Fail to allocate SUPER_RULE_SETUP msg "); 509 return QDF_STATUS_E_FAILURE; 510 } 511 512 qdf_nbuf_put_tail(msg, HTT_RX_CCE_SUPER_RULE_SETUP_SZ); 513 msg_word = (uint32_t *)qdf_nbuf_data(msg); 514 memset(msg_word, 0, HTT_RX_CCE_SUPER_RULE_SETUP_SZ); 515 516 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 517 htt_logger_bufp = (uint8_t *)msg_word; 518 519 *msg_word = 0; 520 HTT_H2T_MSG_TYPE_SET(*msg_word, 521 HTT_H2T_MSG_TYPE_RX_CCE_SUPER_RULE_SETUP); 522 HTT_RX_CCE_SUPER_RULE_SETUP_PDEV_ID_SET(*msg_word, pdev_id); 523 HTT_RX_CCE_SUPER_RULE_SETUP_OPERATION_SET(*msg_word, op); 524 525 /* Set cce_super_rule_params */ 526 for (i = 0; i < RX_CCE_SUPER_RULE_SETUP_NUM; i++) { 527 valid = flt_params->flt_addr_params[i].valid; 528 ver = flt_params->flt_addr_params[i].l3_type; 529 msg_word++; 530 531 if (ver == ipv4) { 532 HTT_RX_CCE_SUPER_RULE_SETUP_IPV4_ADDR_ARRAY_SET( 533 msg_word, 534 flt_params->flt_addr_params[i].src_ipv4_addr); 535 } else if (ver == ipv6) { 536 HTT_RX_CCE_SUPER_RULE_SETUP_IPV6_ADDR_ARRAY_SET( 537 msg_word, 538 flt_params->flt_addr_params[i].src_ipv6_addr); 539 } else { 540 dp_htt_debug("Filter %d not in use.", i); 541 } 542 543 /* move uint32_t *msg_word by IPV6 addr size */ 544 msg_word += (QDF_IPV6_ADDR_SIZE / 4); 545 546 if (ver == ipv4) { 547 HTT_RX_CCE_SUPER_RULE_SETUP_IPV4_ADDR_ARRAY_SET( 548 msg_word, 549 flt_params->flt_addr_params[i].dst_ipv4_addr); 550 } else if (ver == ipv6) { 551 HTT_RX_CCE_SUPER_RULE_SETUP_IPV6_ADDR_ARRAY_SET( 552 msg_word, 553 flt_params->flt_addr_params[i].dst_ipv6_addr); 554 } else { 555 dp_htt_debug("Filter %d not in use.", i); 556 } 557 558 /* move uint32_t *msg_word by IPV6 addr size */ 559 msg_word += (QDF_IPV6_ADDR_SIZE / 4); 560 HTT_RX_CCE_SUPER_RULE_SETUP_L3_TYPE_SET(*msg_word, ver); 561 HTT_RX_CCE_SUPER_RULE_SETUP_L4_TYPE_SET( 562 *msg_word, 563 flt_params->flt_addr_params[i].l4_type); 564 HTT_RX_CCE_SUPER_RULE_SETUP_IS_VALID_SET(*msg_word, valid); 565 msg_word++; 566 HTT_RX_CCE_SUPER_RULE_SETUP_L4_SRC_PORT_SET( 567 *msg_word, 568 flt_params->flt_addr_params[i].src_port); 569 HTT_RX_CCE_SUPER_RULE_SETUP_L4_DST_PORT_SET( 570 *msg_word, 571 flt_params->flt_addr_params[i].dst_port); 572 573 dp_info("opt_dp:: pdev: %u ver %u, flt_num %u, op %u", 574 pdev_id, ver, i, op); 575 dp_info("valid %u", valid); 576 } 577 578 pkt = htt_htc_pkt_alloc(soc); 579 if (!pkt) { 580 dp_htt_err("Fail to allocate dp_htt_htc_pkt buffer"); 581 qdf_assert(0); 582 qdf_nbuf_free(msg); 583 return QDF_STATUS_E_NOMEM; 584 } 585 586 pkt->soc_ctxt = NULL; /*not used during send-done callback */ 587 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 588 dp_htt_h2t_send_complete_free_netbuf, 589 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 590 soc->htc_endpoint, 591 HTC_TX_PACKET_TAG_RUNTIME_PUT); 592 593 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 594 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 595 HTT_H2T_MSG_TYPE_RX_CCE_SUPER_RULE_SETUP, 596 htt_logger_bufp); 597 598 if (status != QDF_STATUS_SUCCESS) { 599 qdf_nbuf_free(msg); 600 htt_htc_pkt_free(soc, pkt); 601 } 602 return status; 603 } 604 #endif /* IPA_OPT_WIFI_DP */ 605 606 int htt_srng_setup(struct htt_soc *soc, int mac_id, 607 hal_ring_handle_t hal_ring_hdl, 608 int hal_ring_type) 609 { 610 struct dp_htt_htc_pkt *pkt; 611 qdf_nbuf_t htt_msg; 612 uint32_t *msg_word; 613 struct hal_srng_params srng_params; 614 qdf_dma_addr_t hp_addr, tp_addr; 615 uint32_t ring_entry_size = 616 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); 617 int htt_ring_type, htt_ring_id; 618 uint8_t *htt_logger_bufp; 619 int target_pdev_id; 620 int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id); 621 QDF_STATUS status; 622 623 /* Sizes should be set in 4-byte words */ 624 ring_entry_size = ring_entry_size >> 2; 625 626 htt_msg = qdf_nbuf_alloc(soc->osdev, 627 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), 628 /* reserve room for the HTC header */ 629 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 630 if (!htt_msg) { 631 dp_err("htt_msg alloc failed ring type %d", hal_ring_type); 632 goto fail0; 633 } 634 635 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); 636 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl); 637 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl); 638 639 switch (hal_ring_type) { 640 case RXDMA_BUF: 641 #ifdef QCA_HOST2FW_RXBUF_RING 642 if (srng_params.ring_id == 643 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 644 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 645 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 646 htt_ring_type = HTT_SW_TO_SW_RING; 647 #ifdef IPA_OFFLOAD 648 } else if (srng_params.ring_id == 649 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 + 650 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 651 htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING; 652 htt_ring_type = HTT_SW_TO_SW_RING; 653 #ifdef IPA_WDI3_VLAN_SUPPORT 654 } else if (srng_params.ring_id == 655 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 + 656 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 657 htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING; 658 htt_ring_type = HTT_SW_TO_SW_RING; 659 #endif 660 #endif 661 #else 662 if (srng_params.ring_id == 663 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 664 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 665 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 666 htt_ring_type = HTT_SW_TO_HW_RING; 667 #endif 668 } else if (srng_params.ring_id == 669 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 670 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 671 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 672 htt_ring_type = HTT_SW_TO_HW_RING; 673 #ifdef FEATURE_DIRECT_LINK 674 } else if (srng_params.ring_id == 675 (HAL_SRNG_WMAC1_RX_DIRECT_LINK_SW_REFILL_RING + 676 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 677 htt_ring_id = HTT_LPASS_TO_FW_RXBUF_RING; 678 htt_ring_type = HTT_SW_TO_SW_RING; 679 #endif 680 } else { 681 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 682 "%s: Ring %d currently not supported", 683 __func__, srng_params.ring_id); 684 goto fail1; 685 } 686 687 break; 688 case RXDMA_MONITOR_BUF: 689 htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc, 690 RXDMA_MONITOR_BUF); 691 htt_ring_type = HTT_SW_TO_HW_RING; 692 break; 693 case RXDMA_MONITOR_STATUS: 694 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 695 htt_ring_type = HTT_SW_TO_HW_RING; 696 break; 697 case RXDMA_MONITOR_DST: 698 htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc, 699 RXDMA_MONITOR_DST); 700 htt_ring_type = HTT_HW_TO_SW_RING; 701 break; 702 case RXDMA_MONITOR_DESC: 703 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 704 htt_ring_type = HTT_SW_TO_HW_RING; 705 break; 706 case RXDMA_DST: 707 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 708 htt_ring_type = HTT_HW_TO_SW_RING; 709 break; 710 case TX_MONITOR_BUF: 711 htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING; 712 htt_ring_type = HTT_SW_TO_HW_RING; 713 break; 714 case TX_MONITOR_DST: 715 htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING; 716 htt_ring_type = HTT_HW_TO_SW_RING; 717 break; 718 case SW2RXDMA_LINK_RELEASE: 719 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 720 htt_ring_type = HTT_SW_TO_HW_RING; 721 break; 722 723 default: 724 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 725 "%s: Ring currently not supported", __func__); 726 goto fail1; 727 } 728 729 dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx", 730 hal_ring_type, srng_params.ring_id, htt_ring_id, 731 (uint64_t)hp_addr, 732 (uint64_t)tp_addr); 733 /* 734 * Set the length of the message. 735 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 736 * separately during the below call to qdf_nbuf_push_head. 737 * The contribution from the HTC header is added separately inside HTC. 738 */ 739 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { 740 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 741 "%s: Failed to expand head for SRING_SETUP msg", 742 __func__); 743 return QDF_STATUS_E_FAILURE; 744 } 745 746 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 747 748 /* rewind beyond alignment pad to get to the HTC header reserved area */ 749 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 750 751 /* word 0 */ 752 *msg_word = 0; 753 htt_logger_bufp = (uint8_t *)msg_word; 754 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); 755 target_pdev_id = 756 dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id); 757 758 if ((htt_ring_type == HTT_SW_TO_HW_RING) || 759 (htt_ring_type == HTT_HW_TO_SW_RING)) 760 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id); 761 else 762 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); 763 764 dp_info("mac_id %d", mac_id); 765 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); 766 /* TODO: Discuss with FW on changing this to unique ID and using 767 * htt_ring_type to send the type of ring 768 */ 769 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); 770 771 /* word 1 */ 772 msg_word++; 773 *msg_word = 0; 774 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, 775 srng_params.ring_base_paddr & 0xffffffff); 776 777 /* word 2 */ 778 msg_word++; 779 *msg_word = 0; 780 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, 781 (uint64_t)srng_params.ring_base_paddr >> 32); 782 783 /* word 3 */ 784 msg_word++; 785 *msg_word = 0; 786 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); 787 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, 788 (ring_entry_size * srng_params.num_entries)); 789 dp_info("entry_size %d", ring_entry_size); 790 dp_info("num_entries %d", srng_params.num_entries); 791 dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries)); 792 if (htt_ring_type == HTT_SW_TO_HW_RING) 793 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( 794 *msg_word, 1); 795 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, 796 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 797 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, 798 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 799 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, 800 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); 801 802 /* word 4 */ 803 msg_word++; 804 *msg_word = 0; 805 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 806 hp_addr & 0xffffffff); 807 808 /* word 5 */ 809 msg_word++; 810 *msg_word = 0; 811 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 812 (uint64_t)hp_addr >> 32); 813 814 /* word 6 */ 815 msg_word++; 816 *msg_word = 0; 817 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 818 tp_addr & 0xffffffff); 819 820 /* word 7 */ 821 msg_word++; 822 *msg_word = 0; 823 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 824 (uint64_t)tp_addr >> 32); 825 826 /* word 8 */ 827 msg_word++; 828 *msg_word = 0; 829 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, 830 srng_params.msi_addr & 0xffffffff); 831 832 /* word 9 */ 833 msg_word++; 834 *msg_word = 0; 835 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, 836 (uint64_t)(srng_params.msi_addr) >> 32); 837 838 /* word 10 */ 839 msg_word++; 840 *msg_word = 0; 841 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, 842 qdf_cpu_to_le32(srng_params.msi_data)); 843 844 /* word 11 */ 845 msg_word++; 846 *msg_word = 0; 847 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, 848 srng_params.intr_batch_cntr_thres_entries * 849 ring_entry_size); 850 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, 851 srng_params.intr_timer_thres_us >> 3); 852 853 /* word 12 */ 854 msg_word++; 855 *msg_word = 0; 856 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 857 /* TODO: Setting low threshold to 1/8th of ring size - see 858 * if this needs to be configurable 859 */ 860 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, 861 srng_params.low_threshold); 862 } 863 /* "response_required" field should be set if a HTT response message is 864 * required after setting up the ring. 865 */ 866 pkt = htt_htc_pkt_alloc(soc); 867 if (!pkt) { 868 dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d", 869 hal_ring_type, srng_params.ring_id, htt_ring_id); 870 goto fail1; 871 } 872 873 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 874 875 SET_HTC_PACKET_INFO_TX( 876 &pkt->htc_pkt, 877 dp_htt_h2t_send_complete_free_netbuf, 878 qdf_nbuf_data(htt_msg), 879 qdf_nbuf_len(htt_msg), 880 soc->htc_endpoint, 881 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 882 883 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 884 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP, 885 htt_logger_bufp); 886 887 if (status != QDF_STATUS_SUCCESS) { 888 qdf_nbuf_free(htt_msg); 889 htt_htc_pkt_free(soc, pkt); 890 } 891 892 return status; 893 894 fail1: 895 qdf_nbuf_free(htt_msg); 896 fail0: 897 return QDF_STATUS_E_FAILURE; 898 } 899 900 qdf_export_symbol(htt_srng_setup); 901 902 #ifdef QCA_SUPPORT_FULL_MON 903 /** 904 * htt_h2t_full_mon_cfg() - Send full monitor configuration msg to FW 905 * 906 * @htt_soc: HTT Soc handle 907 * @pdev_id: Radio id 908 * @config: enabled/disable configuration 909 * 910 * Return: Success when HTT message is sent, error on failure 911 */ 912 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc, 913 uint8_t pdev_id, 914 enum dp_full_mon_config config) 915 { 916 struct htt_soc *soc = (struct htt_soc *)htt_soc; 917 struct dp_htt_htc_pkt *pkt; 918 qdf_nbuf_t htt_msg; 919 uint32_t *msg_word; 920 uint8_t *htt_logger_bufp; 921 922 htt_msg = qdf_nbuf_alloc(soc->osdev, 923 HTT_MSG_BUF_SIZE( 924 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ), 925 /* reserve room for the HTC header */ 926 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 927 4, 928 TRUE); 929 if (!htt_msg) 930 return QDF_STATUS_E_FAILURE; 931 932 /* 933 * Set the length of the message. 934 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 935 * separately during the below call to qdf_nbuf_push_head. 936 * The contribution from the HTC header is added separately inside HTC. 937 */ 938 if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) { 939 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 940 "%s: Failed to expand head for RX Ring Cfg msg", 941 __func__); 942 goto fail1; 943 } 944 945 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 946 947 /* rewind beyond alignment pad to get to the HTC header reserved area */ 948 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 949 950 /* word 0 */ 951 *msg_word = 0; 952 htt_logger_bufp = (uint8_t *)msg_word; 953 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); 954 HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET( 955 *msg_word, DP_SW2HW_MACID(pdev_id)); 956 957 msg_word++; 958 *msg_word = 0; 959 /* word 1 */ 960 if (config == DP_FULL_MON_ENABLE) { 961 HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true); 962 HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true); 963 HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true); 964 HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2); 965 } else if (config == DP_FULL_MON_DISABLE) { 966 /* As per MAC team's suggestion, While disabling full monitor 967 * mode, Set 'en' bit to true in full monitor mode register. 968 */ 969 HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true); 970 HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false); 971 HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false); 972 HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2); 973 } 974 975 pkt = htt_htc_pkt_alloc(soc); 976 if (!pkt) { 977 qdf_err("HTC packet allocation failed"); 978 goto fail1; 979 } 980 981 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 982 983 SET_HTC_PACKET_INFO_TX( 984 &pkt->htc_pkt, 985 dp_htt_h2t_send_complete_free_netbuf, 986 qdf_nbuf_data(htt_msg), 987 qdf_nbuf_len(htt_msg), 988 soc->htc_endpoint, 989 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 990 991 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 992 qdf_debug("config: %d", config); 993 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE, 994 htt_logger_bufp); 995 return QDF_STATUS_SUCCESS; 996 fail1: 997 qdf_nbuf_free(htt_msg); 998 return QDF_STATUS_E_FAILURE; 999 } 1000 1001 qdf_export_symbol(htt_h2t_full_mon_cfg); 1002 #else 1003 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc, 1004 uint8_t pdev_id, 1005 enum dp_full_mon_config config) 1006 { 1007 return 0; 1008 } 1009 1010 qdf_export_symbol(htt_h2t_full_mon_cfg); 1011 #endif 1012 1013 #ifdef QCA_UNDECODED_METADATA_SUPPORT 1014 static inline void 1015 dp_mon_rx_enable_phy_errors(uint32_t *msg_word, 1016 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 1017 { 1018 if (htt_tlv_filter->phy_err_filter_valid) { 1019 HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET 1020 (*msg_word, htt_tlv_filter->fp_phy_err); 1021 HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET 1022 (*msg_word, htt_tlv_filter->fp_phy_err_buf_src); 1023 HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET 1024 (*msg_word, htt_tlv_filter->fp_phy_err_buf_dest); 1025 1026 /* word 12*/ 1027 msg_word++; 1028 *msg_word = 0; 1029 HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET 1030 (*msg_word, htt_tlv_filter->phy_err_mask); 1031 1032 /* word 13*/ 1033 msg_word++; 1034 *msg_word = 0; 1035 HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET 1036 (*msg_word, htt_tlv_filter->phy_err_mask_cont); 1037 } 1038 } 1039 #else 1040 static inline void 1041 dp_mon_rx_enable_phy_errors(uint32_t *msg_word, 1042 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 1043 { 1044 } 1045 #endif 1046 1047 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id, 1048 hal_ring_handle_t hal_ring_hdl, 1049 int hal_ring_type, int ring_buf_size, 1050 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 1051 { 1052 struct htt_soc *soc = (struct htt_soc *)htt_soc; 1053 struct dp_htt_htc_pkt *pkt; 1054 qdf_nbuf_t htt_msg; 1055 uint32_t *msg_word; 1056 uint32_t *msg_word_data; 1057 struct hal_srng_params srng_params; 1058 uint32_t htt_ring_type, htt_ring_id; 1059 uint32_t tlv_filter; 1060 uint8_t *htt_logger_bufp; 1061 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx; 1062 uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx); 1063 int target_pdev_id; 1064 QDF_STATUS status; 1065 1066 htt_msg = qdf_nbuf_alloc(soc->osdev, 1067 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), 1068 /* reserve room for the HTC header */ 1069 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 1070 if (!htt_msg) { 1071 dp_err("htt_msg alloc failed ring type %d", hal_ring_type); 1072 goto fail0; 1073 } 1074 1075 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); 1076 1077 switch (hal_ring_type) { 1078 case RXDMA_BUF: 1079 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 1080 htt_ring_type = HTT_SW_TO_HW_RING; 1081 break; 1082 case RXDMA_MONITOR_BUF: 1083 htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc, 1084 RXDMA_MONITOR_BUF); 1085 htt_ring_type = HTT_SW_TO_HW_RING; 1086 break; 1087 case RXDMA_MONITOR_STATUS: 1088 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 1089 htt_ring_type = HTT_SW_TO_HW_RING; 1090 break; 1091 case RXDMA_MONITOR_DST: 1092 htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc, 1093 RXDMA_MONITOR_DST); 1094 htt_ring_type = HTT_HW_TO_SW_RING; 1095 break; 1096 case RXDMA_MONITOR_DESC: 1097 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 1098 htt_ring_type = HTT_SW_TO_HW_RING; 1099 break; 1100 case RXDMA_DST: 1101 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 1102 htt_ring_type = HTT_HW_TO_SW_RING; 1103 break; 1104 1105 default: 1106 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1107 "%s: Ring currently not supported", __func__); 1108 goto fail1; 1109 } 1110 1111 dp_info("ring_type %d ring_id %d htt_ring_id %d", 1112 hal_ring_type, srng_params.ring_id, htt_ring_id); 1113 1114 /* 1115 * Set the length of the message. 1116 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 1117 * separately during the below call to qdf_nbuf_push_head. 1118 * The contribution from the HTC header is added separately inside HTC. 1119 */ 1120 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { 1121 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1122 "%s: Failed to expand head for RX Ring Cfg msg", 1123 __func__); 1124 goto fail1; /* failure */ 1125 } 1126 1127 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 1128 1129 /* rewind beyond alignment pad to get to the HTC header reserved area */ 1130 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 1131 1132 /* word 0 */ 1133 htt_logger_bufp = (uint8_t *)msg_word; 1134 *msg_word = 0; 1135 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); 1136 1137 /* applicable only for post Li */ 1138 dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter); 1139 1140 /* 1141 * pdev_id is indexed from 0 whereas mac_id is indexed from 1 1142 * SW_TO_SW and SW_TO_HW rings are unaffected by this 1143 */ 1144 target_pdev_id = 1145 dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id); 1146 1147 if (htt_ring_type == HTT_SW_TO_SW_RING || 1148 htt_ring_type == HTT_SW_TO_HW_RING || 1149 htt_ring_type == HTT_HW_TO_SW_RING) 1150 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, 1151 target_pdev_id); 1152 1153 /* TODO: Discuss with FW on changing this to unique ID and using 1154 * htt_ring_type to send the type of ring 1155 */ 1156 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); 1157 1158 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, 1159 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 1160 1161 HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word, 1162 htt_tlv_filter->offset_valid); 1163 1164 if (mon_drop_th > 0) 1165 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, 1166 1); 1167 else 1168 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, 1169 0); 1170 1171 /* word 1 */ 1172 msg_word++; 1173 *msg_word = 0; 1174 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, 1175 ring_buf_size); 1176 1177 dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter); 1178 dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter); 1179 dp_mon_rx_mac_filter_set(soc->dp_soc, msg_word, htt_tlv_filter); 1180 1181 /* word 2 */ 1182 msg_word++; 1183 *msg_word = 0; 1184 1185 if (htt_tlv_filter->enable_fp) { 1186 /* TYPE: MGMT */ 1187 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1188 FP, MGMT, 0000, 1189 (htt_tlv_filter->fp_mgmt_filter & 1190 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1191 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1192 FP, MGMT, 0001, 1193 (htt_tlv_filter->fp_mgmt_filter & 1194 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1195 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1196 FP, MGMT, 0010, 1197 (htt_tlv_filter->fp_mgmt_filter & 1198 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1199 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1200 FP, MGMT, 0011, 1201 (htt_tlv_filter->fp_mgmt_filter & 1202 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1203 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1204 FP, MGMT, 0100, 1205 (htt_tlv_filter->fp_mgmt_filter & 1206 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1207 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1208 FP, MGMT, 0101, 1209 (htt_tlv_filter->fp_mgmt_filter & 1210 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1211 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1212 FP, MGMT, 0110, 1213 (htt_tlv_filter->fp_mgmt_filter & 1214 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1215 /* reserved */ 1216 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 1217 MGMT, 0111, 1218 (htt_tlv_filter->fp_mgmt_filter & 1219 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1220 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1221 FP, MGMT, 1000, 1222 (htt_tlv_filter->fp_mgmt_filter & 1223 FILTER_MGMT_BEACON) ? 1 : 0); 1224 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1225 FP, MGMT, 1001, 1226 (htt_tlv_filter->fp_mgmt_filter & 1227 FILTER_MGMT_ATIM) ? 1 : 0); 1228 } 1229 1230 if (htt_tlv_filter->enable_md) { 1231 /* TYPE: MGMT */ 1232 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1233 MD, MGMT, 0000, 1234 (htt_tlv_filter->md_mgmt_filter & 1235 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1236 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1237 MD, MGMT, 0001, 1238 (htt_tlv_filter->md_mgmt_filter & 1239 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1240 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1241 MD, MGMT, 0010, 1242 (htt_tlv_filter->md_mgmt_filter & 1243 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1244 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1245 MD, MGMT, 0011, 1246 (htt_tlv_filter->md_mgmt_filter & 1247 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1248 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1249 MD, MGMT, 0100, 1250 (htt_tlv_filter->md_mgmt_filter & 1251 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1252 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1253 MD, MGMT, 0101, 1254 (htt_tlv_filter->md_mgmt_filter & 1255 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1256 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1257 MD, MGMT, 0110, 1258 (htt_tlv_filter->md_mgmt_filter & 1259 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1260 /* reserved */ 1261 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 1262 MGMT, 0111, 1263 (htt_tlv_filter->md_mgmt_filter & 1264 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1265 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1266 MD, MGMT, 1000, 1267 (htt_tlv_filter->md_mgmt_filter & 1268 FILTER_MGMT_BEACON) ? 1 : 0); 1269 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1270 MD, MGMT, 1001, 1271 (htt_tlv_filter->md_mgmt_filter & 1272 FILTER_MGMT_ATIM) ? 1 : 0); 1273 } 1274 1275 if (htt_tlv_filter->enable_mo) { 1276 /* TYPE: MGMT */ 1277 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1278 MO, MGMT, 0000, 1279 (htt_tlv_filter->mo_mgmt_filter & 1280 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1281 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1282 MO, MGMT, 0001, 1283 (htt_tlv_filter->mo_mgmt_filter & 1284 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1285 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1286 MO, MGMT, 0010, 1287 (htt_tlv_filter->mo_mgmt_filter & 1288 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1289 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1290 MO, MGMT, 0011, 1291 (htt_tlv_filter->mo_mgmt_filter & 1292 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1293 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1294 MO, MGMT, 0100, 1295 (htt_tlv_filter->mo_mgmt_filter & 1296 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1297 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1298 MO, MGMT, 0101, 1299 (htt_tlv_filter->mo_mgmt_filter & 1300 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1301 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1302 MO, MGMT, 0110, 1303 (htt_tlv_filter->mo_mgmt_filter & 1304 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1305 /* reserved */ 1306 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 1307 MGMT, 0111, 1308 (htt_tlv_filter->mo_mgmt_filter & 1309 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1310 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1311 MO, MGMT, 1000, 1312 (htt_tlv_filter->mo_mgmt_filter & 1313 FILTER_MGMT_BEACON) ? 1 : 0); 1314 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1315 MO, MGMT, 1001, 1316 (htt_tlv_filter->mo_mgmt_filter & 1317 FILTER_MGMT_ATIM) ? 1 : 0); 1318 } 1319 1320 /* word 3 */ 1321 msg_word++; 1322 *msg_word = 0; 1323 1324 if (htt_tlv_filter->enable_fp) { 1325 /* TYPE: MGMT */ 1326 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1327 FP, MGMT, 1010, 1328 (htt_tlv_filter->fp_mgmt_filter & 1329 FILTER_MGMT_DISASSOC) ? 1 : 0); 1330 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1331 FP, MGMT, 1011, 1332 (htt_tlv_filter->fp_mgmt_filter & 1333 FILTER_MGMT_AUTH) ? 1 : 0); 1334 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1335 FP, MGMT, 1100, 1336 (htt_tlv_filter->fp_mgmt_filter & 1337 FILTER_MGMT_DEAUTH) ? 1 : 0); 1338 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1339 FP, MGMT, 1101, 1340 (htt_tlv_filter->fp_mgmt_filter & 1341 FILTER_MGMT_ACTION) ? 1 : 0); 1342 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1343 FP, MGMT, 1110, 1344 (htt_tlv_filter->fp_mgmt_filter & 1345 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1346 /* reserved*/ 1347 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 1348 MGMT, 1111, 1349 (htt_tlv_filter->fp_mgmt_filter & 1350 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1351 } 1352 1353 if (htt_tlv_filter->enable_md) { 1354 /* TYPE: MGMT */ 1355 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1356 MD, MGMT, 1010, 1357 (htt_tlv_filter->md_mgmt_filter & 1358 FILTER_MGMT_DISASSOC) ? 1 : 0); 1359 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1360 MD, MGMT, 1011, 1361 (htt_tlv_filter->md_mgmt_filter & 1362 FILTER_MGMT_AUTH) ? 1 : 0); 1363 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1364 MD, MGMT, 1100, 1365 (htt_tlv_filter->md_mgmt_filter & 1366 FILTER_MGMT_DEAUTH) ? 1 : 0); 1367 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1368 MD, MGMT, 1101, 1369 (htt_tlv_filter->md_mgmt_filter & 1370 FILTER_MGMT_ACTION) ? 1 : 0); 1371 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1372 MD, MGMT, 1110, 1373 (htt_tlv_filter->md_mgmt_filter & 1374 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1375 } 1376 1377 if (htt_tlv_filter->enable_mo) { 1378 /* TYPE: MGMT */ 1379 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1380 MO, MGMT, 1010, 1381 (htt_tlv_filter->mo_mgmt_filter & 1382 FILTER_MGMT_DISASSOC) ? 1 : 0); 1383 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1384 MO, MGMT, 1011, 1385 (htt_tlv_filter->mo_mgmt_filter & 1386 FILTER_MGMT_AUTH) ? 1 : 0); 1387 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1388 MO, MGMT, 1100, 1389 (htt_tlv_filter->mo_mgmt_filter & 1390 FILTER_MGMT_DEAUTH) ? 1 : 0); 1391 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1392 MO, MGMT, 1101, 1393 (htt_tlv_filter->mo_mgmt_filter & 1394 FILTER_MGMT_ACTION) ? 1 : 0); 1395 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1396 MO, MGMT, 1110, 1397 (htt_tlv_filter->mo_mgmt_filter & 1398 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1399 /* reserved*/ 1400 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 1401 MGMT, 1111, 1402 (htt_tlv_filter->mo_mgmt_filter & 1403 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1404 } 1405 1406 /* word 4 */ 1407 msg_word++; 1408 *msg_word = 0; 1409 1410 if (htt_tlv_filter->enable_fp) { 1411 /* TYPE: CTRL */ 1412 /* reserved */ 1413 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1414 CTRL, 0000, 1415 (htt_tlv_filter->fp_ctrl_filter & 1416 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1417 /* reserved */ 1418 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1419 CTRL, 0001, 1420 (htt_tlv_filter->fp_ctrl_filter & 1421 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1422 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1423 CTRL, 0010, 1424 (htt_tlv_filter->fp_ctrl_filter & 1425 FILTER_CTRL_TRIGGER) ? 1 : 0); 1426 /* reserved */ 1427 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1428 CTRL, 0011, 1429 (htt_tlv_filter->fp_ctrl_filter & 1430 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1431 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1432 CTRL, 0100, 1433 (htt_tlv_filter->fp_ctrl_filter & 1434 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1435 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1436 CTRL, 0101, 1437 (htt_tlv_filter->fp_ctrl_filter & 1438 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1439 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1440 CTRL, 0110, 1441 (htt_tlv_filter->fp_ctrl_filter & 1442 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1443 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1444 CTRL, 0111, 1445 (htt_tlv_filter->fp_ctrl_filter & 1446 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1447 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1448 CTRL, 1000, 1449 (htt_tlv_filter->fp_ctrl_filter & 1450 FILTER_CTRL_BA_REQ) ? 1 : 0); 1451 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1452 CTRL, 1001, 1453 (htt_tlv_filter->fp_ctrl_filter & 1454 FILTER_CTRL_BA) ? 1 : 0); 1455 } 1456 1457 if (htt_tlv_filter->enable_md) { 1458 /* TYPE: CTRL */ 1459 /* reserved */ 1460 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1461 CTRL, 0000, 1462 (htt_tlv_filter->md_ctrl_filter & 1463 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1464 /* reserved */ 1465 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1466 CTRL, 0001, 1467 (htt_tlv_filter->md_ctrl_filter & 1468 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1469 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1470 CTRL, 0010, 1471 (htt_tlv_filter->md_ctrl_filter & 1472 FILTER_CTRL_TRIGGER) ? 1 : 0); 1473 /* reserved */ 1474 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1475 CTRL, 0011, 1476 (htt_tlv_filter->md_ctrl_filter & 1477 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1478 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1479 CTRL, 0100, 1480 (htt_tlv_filter->md_ctrl_filter & 1481 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1482 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1483 CTRL, 0101, 1484 (htt_tlv_filter->md_ctrl_filter & 1485 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1486 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1487 CTRL, 0110, 1488 (htt_tlv_filter->md_ctrl_filter & 1489 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1490 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1491 CTRL, 0111, 1492 (htt_tlv_filter->md_ctrl_filter & 1493 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1494 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1495 CTRL, 1000, 1496 (htt_tlv_filter->md_ctrl_filter & 1497 FILTER_CTRL_BA_REQ) ? 1 : 0); 1498 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1499 CTRL, 1001, 1500 (htt_tlv_filter->md_ctrl_filter & 1501 FILTER_CTRL_BA) ? 1 : 0); 1502 } 1503 1504 if (htt_tlv_filter->enable_mo) { 1505 /* TYPE: CTRL */ 1506 /* reserved */ 1507 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1508 CTRL, 0000, 1509 (htt_tlv_filter->mo_ctrl_filter & 1510 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1511 /* reserved */ 1512 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1513 CTRL, 0001, 1514 (htt_tlv_filter->mo_ctrl_filter & 1515 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1516 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1517 CTRL, 0010, 1518 (htt_tlv_filter->mo_ctrl_filter & 1519 FILTER_CTRL_TRIGGER) ? 1 : 0); 1520 /* reserved */ 1521 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1522 CTRL, 0011, 1523 (htt_tlv_filter->mo_ctrl_filter & 1524 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1525 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1526 CTRL, 0100, 1527 (htt_tlv_filter->mo_ctrl_filter & 1528 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1529 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1530 CTRL, 0101, 1531 (htt_tlv_filter->mo_ctrl_filter & 1532 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1533 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1534 CTRL, 0110, 1535 (htt_tlv_filter->mo_ctrl_filter & 1536 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1537 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1538 CTRL, 0111, 1539 (htt_tlv_filter->mo_ctrl_filter & 1540 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1541 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1542 CTRL, 1000, 1543 (htt_tlv_filter->mo_ctrl_filter & 1544 FILTER_CTRL_BA_REQ) ? 1 : 0); 1545 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1546 CTRL, 1001, 1547 (htt_tlv_filter->mo_ctrl_filter & 1548 FILTER_CTRL_BA) ? 1 : 0); 1549 } 1550 1551 /* word 5 */ 1552 msg_word++; 1553 *msg_word = 0; 1554 if (htt_tlv_filter->enable_fp) { 1555 /* TYPE: CTRL */ 1556 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1557 CTRL, 1010, 1558 (htt_tlv_filter->fp_ctrl_filter & 1559 FILTER_CTRL_PSPOLL) ? 1 : 0); 1560 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1561 CTRL, 1011, 1562 (htt_tlv_filter->fp_ctrl_filter & 1563 FILTER_CTRL_RTS) ? 1 : 0); 1564 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1565 CTRL, 1100, 1566 (htt_tlv_filter->fp_ctrl_filter & 1567 FILTER_CTRL_CTS) ? 1 : 0); 1568 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1569 CTRL, 1101, 1570 (htt_tlv_filter->fp_ctrl_filter & 1571 FILTER_CTRL_ACK) ? 1 : 0); 1572 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1573 CTRL, 1110, 1574 (htt_tlv_filter->fp_ctrl_filter & 1575 FILTER_CTRL_CFEND) ? 1 : 0); 1576 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1577 CTRL, 1111, 1578 (htt_tlv_filter->fp_ctrl_filter & 1579 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1580 /* TYPE: DATA */ 1581 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1582 DATA, MCAST, 1583 (htt_tlv_filter->fp_data_filter & 1584 FILTER_DATA_MCAST) ? 1 : 0); 1585 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1586 DATA, UCAST, 1587 (htt_tlv_filter->fp_data_filter & 1588 FILTER_DATA_UCAST) ? 1 : 0); 1589 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1590 DATA, NULL, 1591 (htt_tlv_filter->fp_data_filter & 1592 FILTER_DATA_NULL) ? 1 : 0); 1593 } 1594 1595 if (htt_tlv_filter->enable_md) { 1596 /* TYPE: CTRL */ 1597 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1598 CTRL, 1010, 1599 (htt_tlv_filter->md_ctrl_filter & 1600 FILTER_CTRL_PSPOLL) ? 1 : 0); 1601 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1602 CTRL, 1011, 1603 (htt_tlv_filter->md_ctrl_filter & 1604 FILTER_CTRL_RTS) ? 1 : 0); 1605 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1606 CTRL, 1100, 1607 (htt_tlv_filter->md_ctrl_filter & 1608 FILTER_CTRL_CTS) ? 1 : 0); 1609 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1610 CTRL, 1101, 1611 (htt_tlv_filter->md_ctrl_filter & 1612 FILTER_CTRL_ACK) ? 1 : 0); 1613 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1614 CTRL, 1110, 1615 (htt_tlv_filter->md_ctrl_filter & 1616 FILTER_CTRL_CFEND) ? 1 : 0); 1617 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1618 CTRL, 1111, 1619 (htt_tlv_filter->md_ctrl_filter & 1620 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1621 /* TYPE: DATA */ 1622 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1623 DATA, MCAST, 1624 (htt_tlv_filter->md_data_filter & 1625 FILTER_DATA_MCAST) ? 1 : 0); 1626 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1627 DATA, UCAST, 1628 (htt_tlv_filter->md_data_filter & 1629 FILTER_DATA_UCAST) ? 1 : 0); 1630 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1631 DATA, NULL, 1632 (htt_tlv_filter->md_data_filter & 1633 FILTER_DATA_NULL) ? 1 : 0); 1634 } 1635 1636 if (htt_tlv_filter->enable_mo) { 1637 /* TYPE: CTRL */ 1638 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1639 CTRL, 1010, 1640 (htt_tlv_filter->mo_ctrl_filter & 1641 FILTER_CTRL_PSPOLL) ? 1 : 0); 1642 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1643 CTRL, 1011, 1644 (htt_tlv_filter->mo_ctrl_filter & 1645 FILTER_CTRL_RTS) ? 1 : 0); 1646 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1647 CTRL, 1100, 1648 (htt_tlv_filter->mo_ctrl_filter & 1649 FILTER_CTRL_CTS) ? 1 : 0); 1650 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1651 CTRL, 1101, 1652 (htt_tlv_filter->mo_ctrl_filter & 1653 FILTER_CTRL_ACK) ? 1 : 0); 1654 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1655 CTRL, 1110, 1656 (htt_tlv_filter->mo_ctrl_filter & 1657 FILTER_CTRL_CFEND) ? 1 : 0); 1658 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1659 CTRL, 1111, 1660 (htt_tlv_filter->mo_ctrl_filter & 1661 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1662 /* TYPE: DATA */ 1663 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1664 DATA, MCAST, 1665 (htt_tlv_filter->mo_data_filter & 1666 FILTER_DATA_MCAST) ? 1 : 0); 1667 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1668 DATA, UCAST, 1669 (htt_tlv_filter->mo_data_filter & 1670 FILTER_DATA_UCAST) ? 1 : 0); 1671 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1672 DATA, NULL, 1673 (htt_tlv_filter->mo_data_filter & 1674 FILTER_DATA_NULL) ? 1 : 0); 1675 } 1676 1677 /* word 6 */ 1678 msg_word++; 1679 *msg_word = 0; 1680 tlv_filter = 0; 1681 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, 1682 htt_tlv_filter->mpdu_start); 1683 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, 1684 htt_tlv_filter->msdu_start); 1685 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, 1686 htt_tlv_filter->packet); 1687 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, 1688 htt_tlv_filter->msdu_end); 1689 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, 1690 htt_tlv_filter->mpdu_end); 1691 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, 1692 htt_tlv_filter->packet_header); 1693 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, 1694 htt_tlv_filter->attention); 1695 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, 1696 htt_tlv_filter->ppdu_start); 1697 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, 1698 htt_tlv_filter->ppdu_end); 1699 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, 1700 htt_tlv_filter->ppdu_end_user_stats); 1701 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, 1702 PPDU_END_USER_STATS_EXT, 1703 htt_tlv_filter->ppdu_end_user_stats_ext); 1704 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, 1705 htt_tlv_filter->ppdu_end_status_done); 1706 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO, 1707 htt_tlv_filter->ppdu_start_user_info); 1708 /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/ 1709 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED, 1710 htt_tlv_filter->header_per_msdu); 1711 1712 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); 1713 1714 msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg); 1715 dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]", 1716 msg_word_data[0], msg_word_data[1], msg_word_data[2], 1717 msg_word_data[3], msg_word_data[4], msg_word_data[5], 1718 msg_word_data[6]); 1719 1720 /* word 7 */ 1721 msg_word++; 1722 *msg_word = 0; 1723 if (htt_tlv_filter->offset_valid) { 1724 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word, 1725 htt_tlv_filter->rx_packet_offset); 1726 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word, 1727 htt_tlv_filter->rx_header_offset); 1728 1729 /* word 8 */ 1730 msg_word++; 1731 *msg_word = 0; 1732 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word, 1733 htt_tlv_filter->rx_mpdu_end_offset); 1734 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word, 1735 htt_tlv_filter->rx_mpdu_start_offset); 1736 1737 /* word 9 */ 1738 msg_word++; 1739 *msg_word = 0; 1740 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word, 1741 htt_tlv_filter->rx_msdu_end_offset); 1742 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word, 1743 htt_tlv_filter->rx_msdu_start_offset); 1744 1745 /* word 10 */ 1746 msg_word++; 1747 *msg_word = 0; 1748 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word, 1749 htt_tlv_filter->rx_attn_offset); 1750 1751 /* word 11 */ 1752 msg_word++; 1753 *msg_word = 0; 1754 } else { 1755 /* word 11 */ 1756 msg_word += 4; 1757 *msg_word = 0; 1758 } 1759 1760 soc->dp_soc->arch_ops.dp_rx_word_mask_subscribe( 1761 soc->dp_soc, 1762 msg_word, 1763 (void *)htt_tlv_filter); 1764 1765 dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter); 1766 1767 if (mon_drop_th > 0) 1768 HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word, 1769 mon_drop_th); 1770 1771 dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter); 1772 1773 dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter); 1774 1775 /* word 14*/ 1776 msg_word += 3; 1777 1778 /* word 15*/ 1779 msg_word++; 1780 1781 /* word 16*/ 1782 msg_word++; 1783 *msg_word = 0; 1784 1785 dp_mon_rx_enable_pkt_tlv_offset(soc->dp_soc, msg_word, htt_tlv_filter); 1786 1787 /* word 20 and 21*/ 1788 msg_word += 4; 1789 *msg_word = 0; 1790 1791 dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter); 1792 1793 /* "response_required" field should be set if a HTT response message is 1794 * required after setting up the ring. 1795 */ 1796 pkt = htt_htc_pkt_alloc(soc); 1797 if (!pkt) { 1798 dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d", 1799 hal_ring_type, srng_params.ring_id, htt_ring_id); 1800 goto fail1; 1801 } 1802 1803 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 1804 1805 SET_HTC_PACKET_INFO_TX( 1806 &pkt->htc_pkt, 1807 dp_htt_h2t_send_complete_free_netbuf, 1808 qdf_nbuf_data(htt_msg), 1809 qdf_nbuf_len(htt_msg), 1810 soc->htc_endpoint, 1811 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 1812 1813 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 1814 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 1815 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG, 1816 htt_logger_bufp); 1817 1818 if (status != QDF_STATUS_SUCCESS) { 1819 qdf_nbuf_free(htt_msg); 1820 htt_htc_pkt_free(soc, pkt); 1821 } 1822 1823 return status; 1824 1825 fail1: 1826 qdf_nbuf_free(htt_msg); 1827 fail0: 1828 return QDF_STATUS_E_FAILURE; 1829 } 1830 1831 qdf_export_symbol(htt_h2t_rx_ring_cfg); 1832 1833 #if defined(HTT_STATS_ENABLE) 1834 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 1835 struct dp_soc *soc, qdf_nbuf_t htt_msg) 1836 1837 { 1838 uint32_t pdev_id; 1839 uint32_t *msg_word = NULL; 1840 uint32_t msg_remain_len = 0; 1841 1842 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1843 1844 /*COOKIE MSB*/ 1845 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 1846 1847 /* stats message length + 16 size of HTT header*/ 1848 msg_remain_len = qdf_min(htt_stats->msg_len + 16, 1849 (uint32_t)DP_EXT_MSG_LENGTH); 1850 1851 dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc, 1852 msg_word, msg_remain_len, 1853 WDI_NO_VAL, pdev_id); 1854 1855 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 1856 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 1857 } 1858 /* Need to be freed here as WDI handler will 1859 * make a copy of pkt to send data to application 1860 */ 1861 qdf_nbuf_free(htt_msg); 1862 return QDF_STATUS_SUCCESS; 1863 } 1864 #else 1865 static inline QDF_STATUS 1866 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 1867 struct dp_soc *soc, qdf_nbuf_t htt_msg) 1868 { 1869 return QDF_STATUS_E_NOSUPPORT; 1870 } 1871 #endif 1872 1873 #ifdef HTT_STATS_DEBUGFS_SUPPORT 1874 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer. 1875 * @pdev: dp pdev handle 1876 * @msg_word: HTT msg 1877 * @msg_len: Length of HTT msg sent 1878 * 1879 * Return: none 1880 */ 1881 static inline void 1882 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word, 1883 uint32_t msg_len) 1884 { 1885 struct htt_dbgfs_cfg dbgfs_cfg; 1886 int done = 0; 1887 1888 /* send 5th word of HTT msg to upper layer */ 1889 dbgfs_cfg.msg_word = (msg_word + 4); 1890 dbgfs_cfg.m = pdev->dbgfs_cfg->m; 1891 1892 /* stats message length + 16 size of HTT header*/ 1893 msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH); 1894 1895 if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process) 1896 pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg, 1897 (msg_len - HTT_HEADER_LEN)); 1898 1899 /* Get TLV Done bit from 4th msg word */ 1900 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3)); 1901 if (done) { 1902 if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event)) 1903 dp_htt_err("%pK: Failed to set event for debugfs htt stats" 1904 , pdev->soc); 1905 } 1906 } 1907 #else 1908 static inline void 1909 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word, 1910 uint32_t msg_len) 1911 { 1912 } 1913 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 1914 1915 #ifdef WLAN_SYSFS_DP_STATS 1916 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer. 1917 * @pdev: dp pdev handle 1918 * 1919 * This function sets the process id and printing mode within the sysfs config 1920 * struct. which enables DP_PRINT statements within this process to write to the 1921 * console buffer provided by the user space. 1922 * 1923 * Return: None 1924 */ 1925 static inline void 1926 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev) 1927 { 1928 struct dp_soc *soc = pdev->soc; 1929 1930 if (!soc) { 1931 dp_htt_err("soc is null"); 1932 return; 1933 } 1934 1935 if (!soc->sysfs_config) { 1936 dp_htt_err("soc->sysfs_config is NULL"); 1937 return; 1938 } 1939 1940 /* set sysfs config parameters */ 1941 soc->sysfs_config->process_id = qdf_get_current_pid(); 1942 soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED; 1943 } 1944 1945 /** 1946 * dp_htt_stats_sysfs_set_event() - Set sysfs stats event. 1947 * @soc: soc handle. 1948 * @msg_word: Pointer to htt msg word. 1949 * 1950 * Return: void 1951 */ 1952 static inline void 1953 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word) 1954 { 1955 int done = 0; 1956 1957 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3)); 1958 if (done) { 1959 if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done)) 1960 dp_htt_err("%pK:event compl Fail to set event ", 1961 soc); 1962 } 1963 } 1964 #else /* WLAN_SYSFS_DP_STATS */ 1965 static inline void 1966 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev) 1967 { 1968 } 1969 1970 static inline void 1971 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word) 1972 { 1973 } 1974 #endif /* WLAN_SYSFS_DP_STATS */ 1975 1976 /* dp_htt_set_pdev_obss_stats() - Function to set pdev obss stats. 1977 * @pdev: dp pdev handle 1978 * @tag_type: HTT TLV tag type 1979 * @tag_buf: TLV buffer pointer 1980 * 1981 * Return: None 1982 */ 1983 static inline void 1984 dp_htt_set_pdev_obss_stats(struct dp_pdev *pdev, uint32_t tag_type, 1985 uint32_t *tag_buf) 1986 { 1987 if (tag_type != HTT_STATS_PDEV_OBSS_PD_TAG) { 1988 dp_err("Tag mismatch"); 1989 return; 1990 } 1991 qdf_mem_copy(&pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv, 1992 tag_buf, sizeof(struct cdp_pdev_obss_pd_stats_tlv)); 1993 qdf_event_set(&pdev->fw_obss_stats_event); 1994 } 1995 1996 /** 1997 * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats 1998 * @htt_stats: htt stats info 1999 * @soc: dp_soc 2000 * 2001 * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message 2002 * contains sub messages which are identified by a TLV header. 2003 * In this function we will process the stream of T2H messages and read all the 2004 * TLV contained in the message. 2005 * 2006 * The following cases have been taken care of 2007 * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer 2008 * In this case the buffer will contain multiple tlvs. 2009 * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer. 2010 * Only one tlv will be contained in the HTT message and this tag 2011 * will extend onto the next buffer. 2012 * Case 3: When the buffer is the continuation of the previous message 2013 * Case 4: tlv length is 0. which will indicate the end of message 2014 * 2015 * Return: void 2016 */ 2017 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats, 2018 struct dp_soc *soc) 2019 { 2020 htt_tlv_tag_t tlv_type = 0xff; 2021 qdf_nbuf_t htt_msg = NULL; 2022 uint32_t *msg_word; 2023 uint8_t *tlv_buf_head = NULL; 2024 uint8_t *tlv_buf_tail = NULL; 2025 uint32_t msg_remain_len = 0; 2026 uint32_t tlv_remain_len = 0; 2027 uint32_t *tlv_start; 2028 int cookie_val = 0; 2029 int cookie_msb = 0; 2030 int pdev_id; 2031 bool copy_stats = false; 2032 struct dp_pdev *pdev; 2033 2034 /* Process node in the HTT message queue */ 2035 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 2036 != NULL) { 2037 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 2038 cookie_val = *(msg_word + 1); 2039 htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET( 2040 *(msg_word + 2041 HTT_T2H_EXT_STATS_TLV_START_OFFSET)); 2042 2043 if (cookie_val) { 2044 if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg) 2045 == QDF_STATUS_SUCCESS) { 2046 continue; 2047 } 2048 } 2049 2050 cookie_msb = *(msg_word + 2); 2051 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 2052 pdev = soc->pdev_list[pdev_id]; 2053 2054 if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) { 2055 dp_htt_stats_dbgfs_send_msg(pdev, msg_word, 2056 htt_stats->msg_len); 2057 qdf_nbuf_free(htt_msg); 2058 continue; 2059 } 2060 2061 if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE)) 2062 dp_htt_stats_sysfs_update_config(pdev); 2063 2064 if (cookie_msb & DBG_STATS_COOKIE_DP_STATS) 2065 copy_stats = true; 2066 2067 /* read 5th word */ 2068 msg_word = msg_word + 4; 2069 msg_remain_len = qdf_min(htt_stats->msg_len, 2070 (uint32_t) DP_EXT_MSG_LENGTH); 2071 /* Keep processing the node till node length is 0 */ 2072 while (msg_remain_len) { 2073 /* 2074 * if message is not a continuation of previous message 2075 * read the tlv type and tlv length 2076 */ 2077 if (!tlv_buf_head) { 2078 tlv_type = HTT_STATS_TLV_TAG_GET( 2079 *msg_word); 2080 tlv_remain_len = HTT_STATS_TLV_LENGTH_GET( 2081 *msg_word); 2082 } 2083 2084 if (tlv_remain_len == 0) { 2085 msg_remain_len = 0; 2086 2087 if (tlv_buf_head) { 2088 qdf_mem_free(tlv_buf_head); 2089 tlv_buf_head = NULL; 2090 tlv_buf_tail = NULL; 2091 } 2092 2093 goto error; 2094 } 2095 2096 if (!tlv_buf_head) 2097 tlv_remain_len += HTT_TLV_HDR_LEN; 2098 2099 if ((tlv_remain_len <= msg_remain_len)) { 2100 /* Case 3 */ 2101 if (tlv_buf_head) { 2102 qdf_mem_copy(tlv_buf_tail, 2103 (uint8_t *)msg_word, 2104 tlv_remain_len); 2105 tlv_start = (uint32_t *)tlv_buf_head; 2106 } else { 2107 /* Case 1 */ 2108 tlv_start = msg_word; 2109 } 2110 2111 if (copy_stats) 2112 dp_htt_stats_copy_tag(pdev, 2113 tlv_type, 2114 tlv_start); 2115 else 2116 dp_htt_stats_print_tag(pdev, 2117 tlv_type, 2118 tlv_start); 2119 2120 if (tlv_type == HTT_STATS_PEER_DETAILS_TAG || 2121 tlv_type == HTT_STATS_PEER_STATS_CMN_TAG) 2122 dp_peer_update_inactive_time(pdev, 2123 tlv_type, 2124 tlv_start); 2125 2126 if (cookie_msb & DBG_STATS_COOKIE_HTT_OBSS) 2127 dp_htt_set_pdev_obss_stats(pdev, 2128 tlv_type, 2129 tlv_start); 2130 2131 msg_remain_len -= tlv_remain_len; 2132 2133 msg_word = (uint32_t *) 2134 (((uint8_t *)msg_word) + 2135 tlv_remain_len); 2136 2137 tlv_remain_len = 0; 2138 2139 if (tlv_buf_head) { 2140 qdf_mem_free(tlv_buf_head); 2141 tlv_buf_head = NULL; 2142 tlv_buf_tail = NULL; 2143 } 2144 2145 } else { /* tlv_remain_len > msg_remain_len */ 2146 /* Case 2 & 3 */ 2147 if (!tlv_buf_head) { 2148 tlv_buf_head = qdf_mem_malloc( 2149 tlv_remain_len); 2150 2151 if (!tlv_buf_head) { 2152 QDF_TRACE(QDF_MODULE_ID_TXRX, 2153 QDF_TRACE_LEVEL_ERROR, 2154 "Alloc failed"); 2155 goto error; 2156 } 2157 2158 tlv_buf_tail = tlv_buf_head; 2159 } 2160 2161 qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word, 2162 msg_remain_len); 2163 tlv_remain_len -= msg_remain_len; 2164 tlv_buf_tail += msg_remain_len; 2165 } 2166 } 2167 2168 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 2169 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 2170 } 2171 2172 /* indicate event completion in case the event is done */ 2173 if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE)) 2174 dp_htt_stats_sysfs_set_event(soc, msg_word); 2175 2176 qdf_nbuf_free(htt_msg); 2177 } 2178 return; 2179 2180 error: 2181 qdf_nbuf_free(htt_msg); 2182 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 2183 != NULL) 2184 qdf_nbuf_free(htt_msg); 2185 } 2186 2187 void htt_t2h_stats_handler(void *context) 2188 { 2189 struct dp_soc *soc = (struct dp_soc *)context; 2190 struct htt_stats_context htt_stats; 2191 uint32_t *msg_word; 2192 qdf_nbuf_t htt_msg = NULL; 2193 uint8_t done; 2194 uint32_t rem_stats; 2195 2196 if (!soc) { 2197 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2198 "soc is NULL"); 2199 return; 2200 } 2201 2202 if (!qdf_atomic_read(&soc->cmn_init_done)) { 2203 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2204 "soc: 0x%pK, init_done: %d", soc, 2205 qdf_atomic_read(&soc->cmn_init_done)); 2206 return; 2207 } 2208 2209 qdf_mem_zero(&htt_stats, sizeof(htt_stats)); 2210 qdf_nbuf_queue_init(&htt_stats.msg); 2211 2212 /* pull one completed stats from soc->htt_stats_msg and process */ 2213 qdf_spin_lock_bh(&soc->htt_stats.lock); 2214 if (!soc->htt_stats.num_stats) { 2215 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2216 return; 2217 } 2218 while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) { 2219 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 2220 msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET; 2221 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 2222 qdf_nbuf_queue_add(&htt_stats.msg, htt_msg); 2223 /* 2224 * Done bit signifies that this is the last T2H buffer in the 2225 * stream of HTT EXT STATS message 2226 */ 2227 if (done) 2228 break; 2229 } 2230 rem_stats = --soc->htt_stats.num_stats; 2231 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2232 2233 /* If there are more stats to process, schedule stats work again. 2234 * Scheduling prior to processing ht_stats to queue with early 2235 * index 2236 */ 2237 if (rem_stats) 2238 qdf_sched_work(0, &soc->htt_stats.work); 2239 2240 dp_process_htt_stat_msg(&htt_stats, soc); 2241 } 2242 2243 /** 2244 * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats 2245 * @soc: DP SOC handle 2246 * @htt_t2h_msg: HTT message nbuf 2247 * 2248 * return:void 2249 */ 2250 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, 2251 qdf_nbuf_t htt_t2h_msg) 2252 { 2253 uint8_t done; 2254 qdf_nbuf_t msg_copy; 2255 uint32_t *msg_word; 2256 2257 msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); 2258 msg_word = msg_word + 3; 2259 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 2260 2261 /* 2262 * HTT EXT stats response comes as stream of TLVs which span over 2263 * multiple T2H messages. 2264 * The first message will carry length of the response. 2265 * For rest of the messages length will be zero. 2266 * 2267 * Clone the T2H message buffer and store it in a list to process 2268 * it later. 2269 * 2270 * The original T2H message buffers gets freed in the T2H HTT event 2271 * handler 2272 */ 2273 msg_copy = qdf_nbuf_clone(htt_t2h_msg); 2274 2275 if (!msg_copy) { 2276 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2277 "T2H message clone failed for HTT EXT STATS"); 2278 goto error; 2279 } 2280 2281 qdf_spin_lock_bh(&soc->htt_stats.lock); 2282 qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy); 2283 /* 2284 * Done bit signifies that this is the last T2H buffer in the stream of 2285 * HTT EXT STATS message 2286 */ 2287 if (done) { 2288 soc->htt_stats.num_stats++; 2289 qdf_sched_work(0, &soc->htt_stats.work); 2290 } 2291 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2292 2293 return; 2294 2295 error: 2296 qdf_spin_lock_bh(&soc->htt_stats.lock); 2297 while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) 2298 != NULL) { 2299 qdf_nbuf_free(msg_copy); 2300 } 2301 soc->htt_stats.num_stats = 0; 2302 qdf_spin_unlock_bh(&soc->htt_stats.lock); 2303 return; 2304 } 2305 2306 int htt_soc_attach_target(struct htt_soc *htt_soc) 2307 { 2308 struct htt_soc *soc = (struct htt_soc *)htt_soc; 2309 2310 return htt_h2t_ver_req_msg(soc); 2311 } 2312 2313 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc) 2314 { 2315 htt_soc->htc_soc = htc_soc; 2316 } 2317 2318 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc) 2319 { 2320 return htt_soc->htc_soc; 2321 } 2322 2323 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle) 2324 { 2325 int i; 2326 int j; 2327 int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX * 2328 sizeof(struct bp_handler); 2329 int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX * 2330 sizeof(struct bp_handler); 2331 struct htt_soc *htt_soc = NULL; 2332 2333 htt_soc = qdf_mem_malloc(sizeof(*htt_soc)); 2334 if (!htt_soc) { 2335 dp_err("HTT attach failed"); 2336 return NULL; 2337 } 2338 2339 for (i = 0; i < MAX_PDEV_CNT; i++) { 2340 htt_soc->pdevid_tt[i].umac_path = 2341 qdf_mem_malloc(umac_alloc_size); 2342 if (!htt_soc->pdevid_tt[i].umac_path) 2343 break; 2344 for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++) 2345 htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1; 2346 htt_soc->pdevid_tt[i].lmac_path = 2347 qdf_mem_malloc(lmac_alloc_size); 2348 if (!htt_soc->pdevid_tt[i].lmac_path) { 2349 qdf_mem_free(htt_soc->pdevid_tt[i].umac_path); 2350 break; 2351 } 2352 for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++) 2353 htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1; 2354 } 2355 2356 if (i != MAX_PDEV_CNT) { 2357 for (j = 0; j < i; j++) { 2358 qdf_mem_free(htt_soc->pdevid_tt[j].umac_path); 2359 qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path); 2360 } 2361 qdf_mem_free(htt_soc); 2362 return NULL; 2363 } 2364 2365 htt_soc->dp_soc = soc; 2366 htt_soc->htc_soc = htc_handle; 2367 HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex); 2368 2369 return htt_soc; 2370 } 2371 2372 #if defined(WDI_EVENT_ENABLE) && \ 2373 !defined(REMOVE_PKT_LOG) 2374 /** 2375 * dp_pktlog_msg_handler() - Pktlog msg handler 2376 * @soc: HTT SOC handle 2377 * @msg_word: Pointer to payload 2378 * 2379 * Return: None 2380 */ 2381 static void 2382 dp_pktlog_msg_handler(struct htt_soc *soc, 2383 uint32_t *msg_word) 2384 { 2385 uint8_t pdev_id; 2386 uint8_t target_pdev_id; 2387 uint32_t *pl_hdr; 2388 2389 target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word); 2390 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 2391 target_pdev_id); 2392 pl_hdr = (msg_word + 1); 2393 dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, 2394 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL, 2395 pdev_id); 2396 } 2397 #else 2398 static void 2399 dp_pktlog_msg_handler(struct htt_soc *soc, 2400 uint32_t *msg_word) 2401 { 2402 } 2403 #endif 2404 2405 #ifdef QCA_SUPPORT_PRIMARY_LINK_MIGRATE 2406 QDF_STATUS 2407 dp_h2t_ptqm_migration_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id, 2408 uint8_t pdev_id, 2409 uint8_t chip_id, uint16_t peer_id, 2410 uint16_t ml_peer_id, uint16_t src_info, 2411 QDF_STATUS status) 2412 { 2413 struct htt_soc *soc = dp_soc->htt_handle; 2414 struct dp_htt_htc_pkt *pkt; 2415 uint8_t *htt_logger_bufp; 2416 qdf_nbuf_t msg; 2417 uint32_t *msg_word; 2418 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2419 bool src_info_valid = false; 2420 2421 msg = qdf_nbuf_alloc( 2422 soc->osdev, 2423 HTT_MSG_BUF_SIZE(sizeof(htt_h2t_primary_link_peer_migrate_resp_t)), 2424 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 2425 2426 if (!msg) 2427 return QDF_STATUS_E_NOMEM; 2428 2429 /* 2430 * Set the length of the message. 2431 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 2432 * separately during the below call to qdf_nbuf_push_head. 2433 * The contribution from the HTC header is added separately inside HTC. 2434 */ 2435 if (qdf_nbuf_put_tail(msg, sizeof(htt_h2t_primary_link_peer_migrate_resp_t)) 2436 == NULL) { 2437 dp_htt_err("Failed to expand head for" 2438 "HTT_H2T_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_RESP"); 2439 qdf_nbuf_free(msg); 2440 return QDF_STATUS_E_FAILURE; 2441 } 2442 2443 msg_word = (uint32_t *)qdf_nbuf_data(msg); 2444 memset(msg_word, 0, sizeof(htt_h2t_primary_link_peer_migrate_resp_t)); 2445 2446 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 2447 htt_logger_bufp = (uint8_t *)msg_word; 2448 *msg_word = 0; 2449 HTT_H2T_MSG_TYPE_SET(*msg_word, 2450 HTT_H2T_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_RESP); 2451 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_PDEV_ID_SET(*msg_word, pdev_id); 2452 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_CHIP_ID_SET(*msg_word, chip_id); 2453 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_VDEV_ID_SET(*msg_word, vdev_id); 2454 2455 /* word 1 */ 2456 msg_word++; 2457 *msg_word = 0; 2458 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_SW_LINK_PEER_ID_SET(*msg_word, 2459 peer_id); 2460 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_ML_PEER_ID_SET(*msg_word, 2461 ml_peer_id); 2462 2463 /* word 1 */ 2464 msg_word++; 2465 *msg_word = 0; 2466 2467 if (src_info != 0) 2468 src_info_valid = true; 2469 2470 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_SRC_INFO_VALID_SET(*msg_word, 2471 src_info_valid); 2472 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_SRC_INFO_SET(*msg_word, 2473 src_info); 2474 HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_STATUS_SET(*msg_word, 2475 status); 2476 2477 pkt = htt_htc_pkt_alloc(soc); 2478 if (!pkt) { 2479 dp_htt_err("Fail to allocate dp_htt_htc_pkt buffer"); 2480 qdf_nbuf_free(msg); 2481 return QDF_STATUS_E_NOMEM; 2482 } 2483 2484 pkt->soc_ctxt = NULL; 2485 2486 /* macro to set packet parameters for TX */ 2487 SET_HTC_PACKET_INFO_TX( 2488 &pkt->htc_pkt, 2489 dp_htt_h2t_send_complete_free_netbuf, 2490 qdf_nbuf_data(msg), 2491 qdf_nbuf_len(msg), 2492 soc->htc_endpoint, 2493 HTC_TX_PACKET_TAG_RUNTIME_PUT); 2494 2495 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 2496 2497 ret = DP_HTT_SEND_HTC_PKT( 2498 soc, pkt, 2499 HTT_H2T_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_RESP, 2500 htt_logger_bufp); 2501 2502 if (ret != QDF_STATUS_SUCCESS) { 2503 qdf_nbuf_free(msg); 2504 htt_htc_pkt_free(soc, pkt); 2505 } 2506 2507 return ret; 2508 } 2509 #endif 2510 2511 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 2512 /** 2513 * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW 2514 * @soc: htt soc handle 2515 * @msg_word: buffer containing stats 2516 * 2517 * Return: void 2518 */ 2519 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc, 2520 uint32_t *msg_word) 2521 { 2522 struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc; 2523 uint8_t pdev_id; 2524 uint8_t vdev_id; 2525 uint8_t target_pdev_id; 2526 uint16_t payload_size; 2527 struct dp_pdev *pdev; 2528 struct dp_vdev *vdev; 2529 uint8_t *tlv_buf; 2530 uint32_t *tlv_buf_temp; 2531 uint32_t *tag_buf; 2532 htt_tlv_tag_t tlv_type; 2533 uint16_t tlv_length; 2534 uint64_t pkt_count = 0; 2535 uint64_t byte_count = 0; 2536 uint64_t soc_drop_cnt = 0; 2537 struct cdp_pkt_info tx_comp = { 0 }; 2538 struct cdp_pkt_info tx_failed = { 0 }; 2539 2540 target_pdev_id = 2541 HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word); 2542 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc, 2543 target_pdev_id); 2544 2545 if (pdev_id >= MAX_PDEV_CNT) 2546 return; 2547 2548 pdev = dpsoc->pdev_list[pdev_id]; 2549 if (!pdev) { 2550 dp_err("PDEV is NULL for pdev_id:%d", pdev_id); 2551 return; 2552 } 2553 2554 payload_size = 2555 HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word); 2556 2557 qdf_trace_hex_dump(QDF_MODULE_ID_DP_HTT, QDF_TRACE_LEVEL_INFO, 2558 (void *)msg_word, payload_size + 16); 2559 2560 /* Adjust msg_word to point to the first TLV in buffer */ 2561 msg_word = msg_word + 4; 2562 2563 /* Parse the received buffer till payload size reaches 0 */ 2564 while (payload_size > 0) { 2565 tlv_buf = (uint8_t *)msg_word; 2566 tlv_buf_temp = msg_word; 2567 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 2568 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 2569 2570 /* Add header size to tlv length*/ 2571 tlv_length += 4; 2572 2573 switch (tlv_type) { 2574 case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG: 2575 { 2576 tag_buf = tlv_buf_temp + 2577 HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT); 2578 soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf); 2579 DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt); 2580 break; 2581 } 2582 case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG: 2583 { 2584 tag_buf = tlv_buf_temp + 2585 HTT_VDEV_STATS_GET_INDEX(VDEV_ID); 2586 vdev_id = (uint8_t)(*tag_buf); 2587 vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id, 2588 DP_MOD_ID_HTT); 2589 2590 if (!vdev) 2591 goto invalid_vdev; 2592 2593 /* Extract received packet count from buffer */ 2594 tag_buf = tlv_buf_temp + 2595 HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT); 2596 pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2597 DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count); 2598 2599 /* Extract received packet byte count from buffer */ 2600 tag_buf = tlv_buf_temp + 2601 HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT); 2602 byte_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2603 DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count); 2604 2605 /* Extract tx success packet count from buffer */ 2606 tag_buf = tlv_buf_temp + 2607 HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT); 2608 pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2609 tx_comp.num = pkt_count; 2610 2611 /* Extract tx success packet byte count from buffer */ 2612 tag_buf = tlv_buf_temp + 2613 HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT); 2614 byte_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2615 tx_comp.bytes = byte_count; 2616 2617 /* Extract tx retry packet count from buffer */ 2618 tag_buf = tlv_buf_temp + 2619 HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT); 2620 pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2621 tx_comp.num += pkt_count; 2622 tx_failed.num = pkt_count; 2623 2624 /* Extract tx retry packet byte count from buffer */ 2625 tag_buf = tlv_buf_temp + 2626 HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT); 2627 byte_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2628 tx_comp.bytes += byte_count; 2629 tx_failed.bytes = byte_count; 2630 2631 /* Extract tx drop packet count from buffer */ 2632 tag_buf = tlv_buf_temp + 2633 HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT); 2634 pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2635 tx_comp.num += pkt_count; 2636 tx_failed.num += pkt_count; 2637 2638 /* Extract tx drop packet byte count from buffer */ 2639 tag_buf = tlv_buf_temp + 2640 HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT); 2641 byte_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2642 tx_comp.bytes += byte_count; 2643 tx_failed.bytes += byte_count; 2644 2645 /* Extract tx age-out packet count from buffer */ 2646 tag_buf = tlv_buf_temp + 2647 HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT); 2648 pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2649 tx_comp.num += pkt_count; 2650 tx_failed.num += pkt_count; 2651 2652 /* Extract tx age-out packet byte count from buffer */ 2653 tag_buf = tlv_buf_temp + 2654 HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT); 2655 byte_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2656 tx_comp.bytes += byte_count; 2657 tx_failed.bytes += byte_count; 2658 2659 /* Extract tqm bypass packet count from buffer */ 2660 tag_buf = tlv_buf_temp + 2661 HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT); 2662 pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2663 tx_comp.num += pkt_count; 2664 2665 /* Extract tx bypass packet byte count from buffer */ 2666 tag_buf = tlv_buf_temp + 2667 HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT); 2668 byte_count = HTT_VDEV_GET_STATS_U64(tag_buf); 2669 tx_comp.bytes += byte_count; 2670 2671 DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num); 2672 DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes); 2673 2674 DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num); 2675 2676 dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT); 2677 break; 2678 } 2679 default: 2680 qdf_assert(0); 2681 } 2682 invalid_vdev: 2683 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 2684 payload_size -= tlv_length; 2685 } 2686 } 2687 #else 2688 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc, 2689 uint32_t *msg_word) 2690 {} 2691 #endif 2692 2693 #ifdef CONFIG_SAWF_DEF_QUEUES 2694 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc, 2695 uint32_t *msg_word, 2696 qdf_nbuf_t htt_t2h_msg) 2697 { 2698 dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg); 2699 } 2700 #else 2701 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc, 2702 uint32_t *msg_word, 2703 qdf_nbuf_t htt_t2h_msg) 2704 {} 2705 #endif 2706 2707 #ifdef CONFIG_SAWF 2708 /** 2709 * dp_sawf_msduq_map() - Msdu queue creation information received 2710 * from target 2711 * @soc: soc handle. 2712 * @msg_word: Pointer to htt msg word. 2713 * @htt_t2h_msg: HTT message nbuf 2714 * 2715 * Return: void 2716 */ 2717 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word, 2718 qdf_nbuf_t htt_t2h_msg) 2719 { 2720 dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg); 2721 } 2722 2723 /** 2724 * dp_sawf_dynamic_ast_update() - Dynamic AST index update for SAWF peer 2725 * from target 2726 * @soc: soc handle. 2727 * @msg_word: Pointer to htt msg word. 2728 * @htt_t2h_msg: HTT message nbuf 2729 * 2730 * Return: void 2731 */ 2732 static void dp_sawf_dynamic_ast_update(struct htt_soc *soc, uint32_t *msg_word, 2733 qdf_nbuf_t htt_t2h_msg) 2734 { 2735 dp_htt_sawf_dynamic_ast_update(soc, msg_word, htt_t2h_msg); 2736 } 2737 2738 /** 2739 * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats 2740 * @soc: soc handle. 2741 * @htt_t2h_msg: HTT message nbuf 2742 * 2743 * Return: void 2744 */ 2745 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc, 2746 qdf_nbuf_t htt_t2h_msg) 2747 { 2748 dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg); 2749 } 2750 #else 2751 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word, 2752 qdf_nbuf_t htt_t2h_msg) 2753 {} 2754 2755 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc, 2756 qdf_nbuf_t htt_t2h_msg) 2757 {} 2758 static void dp_sawf_dynamic_ast_update(struct htt_soc *soc, uint32_t *msg_word, 2759 qdf_nbuf_t htt_t2h_msg) 2760 {} 2761 #endif 2762 2763 /** 2764 * time_allow_print() - time allow print 2765 * @htt_bp_handler: backpressure handler 2766 * @ring_id: ring_id (index) 2767 * @th_time: threshold time 2768 * 2769 * Return: 1 for successfully saving timestamp in array 2770 * and 0 for timestamp falling within 2 seconds after last one 2771 */ 2772 static bool time_allow_print(struct bp_handler *htt_bp_handler, 2773 u_int8_t ring_id, u_int32_t th_time) 2774 { 2775 unsigned long tstamp; 2776 struct bp_handler *path = &htt_bp_handler[ring_id]; 2777 2778 tstamp = qdf_get_system_timestamp(); 2779 2780 if (!path) 2781 return 0; //unable to print backpressure messages 2782 2783 if (path->bp_start_tt == -1) { 2784 path->bp_start_tt = tstamp; 2785 path->bp_duration = 0; 2786 path->bp_last_tt = tstamp; 2787 path->bp_counter = 1; 2788 return 1; 2789 } 2790 2791 path->bp_duration = tstamp - path->bp_start_tt; 2792 path->bp_last_tt = tstamp; 2793 path->bp_counter++; 2794 2795 if (path->bp_duration >= th_time) { 2796 path->bp_start_tt = -1; 2797 return 1; 2798 } 2799 2800 return 0; 2801 } 2802 2803 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type, 2804 struct dp_pdev *pdev, u_int8_t ring_id, 2805 u_int16_t hp_idx, u_int16_t tp_idx, 2806 u_int32_t bkp_time, 2807 struct bp_handler *htt_bp_handler, 2808 char *ring_stype) 2809 { 2810 dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ", 2811 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype); 2812 dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ", 2813 ring_id, hp_idx, tp_idx, bkp_time); 2814 dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld", 2815 htt_bp_handler[ring_id].bp_last_tt, 2816 htt_bp_handler[ring_id].bp_duration, 2817 htt_bp_handler[ring_id].bp_counter); 2818 } 2819 2820 /** 2821 * dp_get_srng_ring_state_from_hal(): Get hal level ring stats 2822 * @soc: DP_SOC handle 2823 * @pdev: DP pdev handle 2824 * @srng: DP_SRNG handle 2825 * @ring_type: srng src/dst ring 2826 * @state: ring state 2827 * @pdev: pdev 2828 * @srng: DP_SRNG handle 2829 * @ring_type: srng src/dst ring 2830 * @state: ring_state 2831 * 2832 * Return: void 2833 */ 2834 static QDF_STATUS 2835 dp_get_srng_ring_state_from_hal(struct dp_soc *soc, 2836 struct dp_pdev *pdev, 2837 struct dp_srng *srng, 2838 enum hal_ring_type ring_type, 2839 struct dp_srng_ring_state *state) 2840 { 2841 struct hal_soc *hal_soc; 2842 2843 if (!soc || !srng || !srng->hal_srng || !state) 2844 return QDF_STATUS_E_INVAL; 2845 2846 hal_soc = (struct hal_soc *)soc->hal_soc; 2847 2848 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail, 2849 &state->sw_head); 2850 2851 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head, 2852 &state->hw_tail, ring_type); 2853 2854 state->ring_type = ring_type; 2855 2856 return QDF_STATUS_SUCCESS; 2857 } 2858 2859 #ifdef QCA_MONITOR_PKT_SUPPORT 2860 static void 2861 dp_queue_mon_ring_stats(struct dp_pdev *pdev, 2862 int lmac_id, uint32_t *num_srng, 2863 struct dp_soc_srngs_state *soc_srngs_state) 2864 { 2865 QDF_STATUS status; 2866 2867 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) { 2868 status = dp_get_srng_ring_state_from_hal 2869 (pdev->soc, pdev, 2870 &pdev->soc->rxdma_mon_buf_ring[lmac_id], 2871 RXDMA_MONITOR_BUF, 2872 &soc_srngs_state->ring_state[*num_srng]); 2873 2874 if (status == QDF_STATUS_SUCCESS) 2875 qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS); 2876 2877 status = dp_get_srng_ring_state_from_hal 2878 (pdev->soc, pdev, 2879 &pdev->soc->rxdma_mon_dst_ring[lmac_id], 2880 RXDMA_MONITOR_DST, 2881 &soc_srngs_state->ring_state[*num_srng]); 2882 2883 if (status == QDF_STATUS_SUCCESS) 2884 qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS); 2885 2886 status = dp_get_srng_ring_state_from_hal 2887 (pdev->soc, pdev, 2888 &pdev->soc->rxdma_mon_desc_ring[lmac_id], 2889 RXDMA_MONITOR_DESC, 2890 &soc_srngs_state->ring_state[*num_srng]); 2891 2892 if (status == QDF_STATUS_SUCCESS) 2893 qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS); 2894 } 2895 } 2896 #else 2897 static void 2898 dp_queue_mon_ring_stats(struct dp_pdev *pdev, 2899 int lmac_id, uint32_t *num_srng, 2900 struct dp_soc_srngs_state *soc_srngs_state) 2901 { 2902 } 2903 #endif 2904 2905 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG 2906 static inline QDF_STATUS 2907 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev, 2908 struct dp_srng_ring_state *ring_state) 2909 { 2910 return dp_get_srng_ring_state_from_hal(pdev->soc, pdev, 2911 &pdev->soc->tcl_cmd_credit_ring, 2912 TCL_CMD_CREDIT, ring_state); 2913 } 2914 #else 2915 static inline QDF_STATUS 2916 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev, 2917 struct dp_srng_ring_state *ring_state) 2918 { 2919 return QDF_STATUS_SUCCESS; 2920 } 2921 #endif 2922 2923 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG 2924 static inline QDF_STATUS 2925 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev, 2926 struct dp_srng_ring_state *ring_state) 2927 { 2928 return dp_get_srng_ring_state_from_hal(pdev->soc, pdev, 2929 &pdev->soc->tcl_status_ring, 2930 TCL_STATUS, ring_state); 2931 } 2932 #else 2933 static inline QDF_STATUS 2934 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev, 2935 struct dp_srng_ring_state *ring_state) 2936 { 2937 return QDF_STATUS_SUCCESS; 2938 } 2939 #endif 2940 2941 /** 2942 * dp_queue_ring_stats() - Print pdev hal level ring stats 2943 * dp_queue_ring_stats(): Print pdev hal level ring stats 2944 * @pdev: DP_pdev handle 2945 * 2946 * Return: void 2947 */ 2948 static void dp_queue_ring_stats(struct dp_pdev *pdev) 2949 { 2950 uint32_t i; 2951 int mac_id; 2952 int lmac_id; 2953 uint32_t j = 0; 2954 struct dp_soc *soc = pdev->soc; 2955 struct dp_soc_srngs_state * soc_srngs_state = NULL; 2956 struct dp_soc_srngs_state *drop_srngs_state = NULL; 2957 QDF_STATUS status; 2958 2959 soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state)); 2960 if (!soc_srngs_state) { 2961 dp_htt_alert("Memory alloc failed for back pressure event"); 2962 return; 2963 } 2964 2965 status = dp_get_srng_ring_state_from_hal 2966 (pdev->soc, pdev, 2967 &pdev->soc->reo_exception_ring, 2968 REO_EXCEPTION, 2969 &soc_srngs_state->ring_state[j]); 2970 2971 if (status == QDF_STATUS_SUCCESS) 2972 qdf_assert_always(++j < DP_MAX_SRNGS); 2973 2974 status = dp_get_srng_ring_state_from_hal 2975 (pdev->soc, pdev, 2976 &pdev->soc->reo_reinject_ring, 2977 REO_REINJECT, 2978 &soc_srngs_state->ring_state[j]); 2979 2980 if (status == QDF_STATUS_SUCCESS) 2981 qdf_assert_always(++j < DP_MAX_SRNGS); 2982 2983 status = dp_get_srng_ring_state_from_hal 2984 (pdev->soc, pdev, 2985 &pdev->soc->reo_cmd_ring, 2986 REO_CMD, 2987 &soc_srngs_state->ring_state[j]); 2988 2989 if (status == QDF_STATUS_SUCCESS) 2990 qdf_assert_always(++j < DP_MAX_SRNGS); 2991 2992 status = dp_get_srng_ring_state_from_hal 2993 (pdev->soc, pdev, 2994 &pdev->soc->reo_status_ring, 2995 REO_STATUS, 2996 &soc_srngs_state->ring_state[j]); 2997 2998 if (status == QDF_STATUS_SUCCESS) 2999 qdf_assert_always(++j < DP_MAX_SRNGS); 3000 3001 status = dp_get_srng_ring_state_from_hal 3002 (pdev->soc, pdev, 3003 &pdev->soc->rx_rel_ring, 3004 WBM2SW_RELEASE, 3005 &soc_srngs_state->ring_state[j]); 3006 3007 if (status == QDF_STATUS_SUCCESS) 3008 qdf_assert_always(++j < DP_MAX_SRNGS); 3009 3010 status = dp_get_tcl_cmd_cred_ring_state_from_hal 3011 (pdev, &soc_srngs_state->ring_state[j]); 3012 if (status == QDF_STATUS_SUCCESS) 3013 qdf_assert_always(++j < DP_MAX_SRNGS); 3014 3015 status = dp_get_tcl_status_ring_state_from_hal 3016 (pdev, &soc_srngs_state->ring_state[j]); 3017 if (status == QDF_STATUS_SUCCESS) 3018 qdf_assert_always(++j < DP_MAX_SRNGS); 3019 3020 status = dp_get_srng_ring_state_from_hal 3021 (pdev->soc, pdev, 3022 &pdev->soc->wbm_desc_rel_ring, 3023 SW2WBM_RELEASE, 3024 &soc_srngs_state->ring_state[j]); 3025 3026 if (status == QDF_STATUS_SUCCESS) 3027 qdf_assert_always(++j < DP_MAX_SRNGS); 3028 3029 for (i = 0; i < MAX_REO_DEST_RINGS; i++) { 3030 status = dp_get_srng_ring_state_from_hal 3031 (pdev->soc, pdev, 3032 &pdev->soc->reo_dest_ring[i], 3033 REO_DST, 3034 &soc_srngs_state->ring_state[j]); 3035 3036 if (status == QDF_STATUS_SUCCESS) 3037 qdf_assert_always(++j < DP_MAX_SRNGS); 3038 } 3039 3040 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) { 3041 status = dp_get_srng_ring_state_from_hal 3042 (pdev->soc, pdev, 3043 &pdev->soc->tcl_data_ring[i], 3044 TCL_DATA, 3045 &soc_srngs_state->ring_state[j]); 3046 3047 if (status == QDF_STATUS_SUCCESS) 3048 qdf_assert_always(++j < DP_MAX_SRNGS); 3049 } 3050 3051 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { 3052 status = dp_get_srng_ring_state_from_hal 3053 (pdev->soc, pdev, 3054 &pdev->soc->tx_comp_ring[i], 3055 WBM2SW_RELEASE, 3056 &soc_srngs_state->ring_state[j]); 3057 3058 if (status == QDF_STATUS_SUCCESS) 3059 qdf_assert_always(++j < DP_MAX_SRNGS); 3060 } 3061 3062 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id); 3063 status = dp_get_srng_ring_state_from_hal 3064 (pdev->soc, pdev, 3065 &pdev->soc->rx_refill_buf_ring 3066 [lmac_id], 3067 RXDMA_BUF, 3068 &soc_srngs_state->ring_state[j]); 3069 3070 if (status == QDF_STATUS_SUCCESS) 3071 qdf_assert_always(++j < DP_MAX_SRNGS); 3072 3073 status = dp_get_srng_ring_state_from_hal 3074 (pdev->soc, pdev, 3075 &pdev->rx_refill_buf_ring2, 3076 RXDMA_BUF, 3077 &soc_srngs_state->ring_state[j]); 3078 3079 if (status == QDF_STATUS_SUCCESS) 3080 qdf_assert_always(++j < DP_MAX_SRNGS); 3081 3082 3083 for (i = 0; i < MAX_RX_MAC_RINGS; i++) { 3084 dp_get_srng_ring_state_from_hal 3085 (pdev->soc, pdev, 3086 &pdev->rx_mac_buf_ring[i], 3087 RXDMA_BUF, 3088 &soc_srngs_state->ring_state[j]); 3089 3090 if (status == QDF_STATUS_SUCCESS) 3091 qdf_assert_always(++j < DP_MAX_SRNGS); 3092 } 3093 3094 for (mac_id = 0; 3095 mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev; 3096 mac_id++) { 3097 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 3098 mac_id, pdev->pdev_id); 3099 3100 dp_queue_mon_ring_stats(pdev, lmac_id, &j, 3101 soc_srngs_state); 3102 3103 status = dp_get_srng_ring_state_from_hal 3104 (pdev->soc, pdev, 3105 &pdev->soc->rxdma_mon_status_ring[lmac_id], 3106 RXDMA_MONITOR_STATUS, 3107 &soc_srngs_state->ring_state[j]); 3108 3109 if (status == QDF_STATUS_SUCCESS) 3110 qdf_assert_always(++j < DP_MAX_SRNGS); 3111 } 3112 3113 for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 3114 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 3115 i, pdev->pdev_id); 3116 3117 status = dp_get_srng_ring_state_from_hal 3118 (pdev->soc, pdev, 3119 &pdev->soc->rxdma_err_dst_ring 3120 [lmac_id], 3121 RXDMA_DST, 3122 &soc_srngs_state->ring_state[j]); 3123 3124 if (status == QDF_STATUS_SUCCESS) 3125 qdf_assert_always(++j < DP_MAX_SRNGS); 3126 } 3127 soc_srngs_state->max_ring_id = j; 3128 3129 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 3130 3131 soc_srngs_state->seq_num = pdev->bkp_stats.seq_num; 3132 3133 if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) { 3134 drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list); 3135 qdf_assert_always(drop_srngs_state); 3136 TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state, 3137 list_elem); 3138 qdf_mem_free(drop_srngs_state); 3139 pdev->bkp_stats.queue_depth--; 3140 } 3141 3142 pdev->bkp_stats.queue_depth++; 3143 TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state, 3144 list_elem); 3145 pdev->bkp_stats.seq_num++; 3146 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 3147 3148 qdf_queue_work(0, pdev->bkp_stats.work_queue, 3149 &pdev->bkp_stats.work); 3150 } 3151 3152 /** 3153 * dp_htt_bkp_event_alert() - htt backpressure event alert 3154 * @msg_word: htt packet context 3155 * @soc: HTT SOC handle 3156 * 3157 * Return: after attempting to print stats 3158 */ 3159 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc) 3160 { 3161 u_int8_t ring_type; 3162 u_int8_t pdev_id; 3163 uint8_t target_pdev_id; 3164 u_int8_t ring_id; 3165 u_int16_t hp_idx; 3166 u_int16_t tp_idx; 3167 u_int32_t bkp_time; 3168 u_int32_t th_time; 3169 enum htt_t2h_msg_type msg_type; 3170 struct dp_soc *dpsoc; 3171 struct dp_pdev *pdev; 3172 struct dp_htt_timestamp *radio_tt; 3173 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 3174 3175 3176 if (!soc) 3177 return; 3178 3179 dpsoc = (struct dp_soc *)soc->dp_soc; 3180 soc_cfg_ctx = dpsoc->wlan_cfg_ctx; 3181 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 3182 ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word); 3183 target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word); 3184 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 3185 target_pdev_id); 3186 if (pdev_id >= MAX_PDEV_CNT) { 3187 dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id); 3188 return; 3189 } 3190 3191 th_time = wlan_cfg_time_control_bp(soc_cfg_ctx); 3192 pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id]; 3193 ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word); 3194 hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1)); 3195 tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1)); 3196 bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2)); 3197 radio_tt = &soc->pdevid_tt[pdev_id]; 3198 3199 switch (ring_type) { 3200 case HTT_SW_RING_TYPE_UMAC: 3201 if (!time_allow_print(radio_tt->umac_path, ring_id, th_time)) 3202 return; 3203 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 3204 bkp_time, radio_tt->umac_path, 3205 "HTT_SW_RING_TYPE_UMAC"); 3206 break; 3207 case HTT_SW_RING_TYPE_LMAC: 3208 if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time)) 3209 return; 3210 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 3211 bkp_time, radio_tt->lmac_path, 3212 "HTT_SW_RING_TYPE_LMAC"); 3213 break; 3214 default: 3215 dp_alert("Invalid ring type: %d", ring_type); 3216 break; 3217 } 3218 3219 dp_queue_ring_stats(pdev); 3220 } 3221 3222 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 3223 /** 3224 * dp_offload_ind_handler() - offload msg handler 3225 * @soc: HTT SOC handle 3226 * @msg_word: Pointer to payload 3227 * 3228 * Return: None 3229 */ 3230 static void 3231 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word) 3232 { 3233 u_int8_t pdev_id; 3234 u_int8_t target_pdev_id; 3235 3236 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 3237 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 3238 target_pdev_id); 3239 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc, 3240 msg_word, HTT_INVALID_VDEV, WDI_NO_VAL, 3241 pdev_id); 3242 } 3243 #else 3244 static void 3245 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word) 3246 { 3247 } 3248 #endif 3249 3250 #ifdef WLAN_FEATURE_11BE_MLO 3251 #ifdef WLAN_MLO_MULTI_CHIP 3252 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc, 3253 uint32_t ts_lo, uint32_t ts_hi) 3254 { 3255 uint64_t mlo_offset; 3256 3257 mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo); 3258 soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset 3259 ((struct cdp_soc_t *)soc, mlo_offset); 3260 } 3261 3262 static inline 3263 void dp_update_mlo_delta_tsf2(struct dp_soc *soc, struct dp_pdev *pdev) 3264 { 3265 uint64_t delta_tsf2 = 0; 3266 3267 hal_get_tsf2_offset(soc->hal_soc, pdev->lmac_id, &delta_tsf2); 3268 soc->cdp_soc.ops->mlo_ops->mlo_update_delta_tsf2 3269 ((struct cdp_soc_t *)soc, pdev->pdev_id, delta_tsf2); 3270 } 3271 #else 3272 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc, 3273 uint32_t ts_lo, uint32_t ts_hi) 3274 {} 3275 static inline 3276 void dp_update_mlo_delta_tsf2(struct dp_soc *soc, struct dp_pdev *pdev) 3277 {} 3278 #endif 3279 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc, 3280 uint32_t *msg_word) 3281 { 3282 uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 3283 uint8_t *mlo_peer_mac_addr; 3284 uint16_t mlo_peer_id; 3285 uint8_t num_links; 3286 struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX]; 3287 struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS]; 3288 MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff; 3289 uint16_t tlv_len = 0; 3290 int i = 0; 3291 3292 mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word); 3293 num_links = 3294 HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word); 3295 mlo_peer_mac_addr = 3296 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 3297 &mac_addr_deswizzle_buf[0]); 3298 3299 mlo_flow_info[0].ast_idx = 3300 HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3)); 3301 mlo_flow_info[0].ast_idx_valid = 3302 HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3)); 3303 mlo_flow_info[0].chip_id = 3304 HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3)); 3305 mlo_flow_info[0].tidmask = 3306 HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3)); 3307 mlo_flow_info[0].cache_set_num = 3308 HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3)); 3309 3310 mlo_flow_info[1].ast_idx = 3311 HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3)); 3312 mlo_flow_info[1].ast_idx_valid = 3313 HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3)); 3314 mlo_flow_info[1].chip_id = 3315 HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3)); 3316 mlo_flow_info[1].tidmask = 3317 HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3)); 3318 mlo_flow_info[1].cache_set_num = 3319 HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3)); 3320 3321 mlo_flow_info[2].ast_idx = 3322 HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3)); 3323 mlo_flow_info[2].ast_idx_valid = 3324 HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3)); 3325 mlo_flow_info[2].chip_id = 3326 HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3)); 3327 mlo_flow_info[2].tidmask = 3328 HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3)); 3329 mlo_flow_info[2].cache_set_num = 3330 HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3)); 3331 3332 msg_word = msg_word + 8; 3333 while (msg_word && (i < DP_MAX_MLO_LINKS)) { 3334 mlo_link_info[i].peer_chip_id = 0xFF; 3335 mlo_link_info[i].vdev_id = 0xFF; 3336 3337 tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word); 3338 tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word); 3339 3340 if (tlv_len == 0) { 3341 dp_err("TLV Length is 0"); 3342 break; 3343 } 3344 3345 if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) { 3346 mlo_link_info[i].peer_chip_id = 3347 HTT_RX_MLO_PEER_MAP_CHIP_ID_GET( 3348 *(msg_word + 1)); 3349 mlo_link_info[i].vdev_id = 3350 HTT_RX_MLO_PEER_MAP_VDEV_ID_GET( 3351 *(msg_word + 1)); 3352 } 3353 /* Add header size to tlv length */ 3354 tlv_len = tlv_len + HTT_TLV_HDR_LEN; 3355 msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len); 3356 i++; 3357 } 3358 3359 dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id, 3360 mlo_peer_mac_addr, 3361 mlo_flow_info, mlo_link_info); 3362 } 3363 3364 #ifdef QCA_SUPPORT_PRIMARY_LINK_MIGRATE 3365 static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc, 3366 uint32_t *msg_word) 3367 { 3368 u_int16_t peer_id; 3369 u_int16_t ml_peer_id; 3370 u_int16_t vdev_id; 3371 u_int8_t pdev_id; 3372 u_int8_t chip_id; 3373 3374 vdev_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_VDEV_ID_GET( 3375 *msg_word); 3376 pdev_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_PDEV_ID_GET( 3377 *msg_word); 3378 chip_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_CHIP_ID_GET( 3379 *msg_word); 3380 ml_peer_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_ML_PEER_ID_GET( 3381 *(msg_word + 1)); 3382 peer_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_SW_LINK_PEER_ID_GET( 3383 *(msg_word + 1)); 3384 3385 dp_htt_info("HTT_T2H_MSG_TYPE_PRIMARY_PEER_MIGRATE_IND msg" 3386 "for peer id %d vdev id %d", peer_id, vdev_id); 3387 3388 dp_htt_reo_migration(soc->dp_soc, peer_id, ml_peer_id, 3389 vdev_id, pdev_id, chip_id); 3390 } 3391 #else 3392 static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc, 3393 uint32_t *msg_word) 3394 { 3395 } 3396 #endif 3397 3398 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc, 3399 uint32_t *msg_word) 3400 { 3401 uint16_t mlo_peer_id; 3402 3403 mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word); 3404 dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id); 3405 } 3406 3407 static void 3408 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc, 3409 uint32_t *msg_word) 3410 { 3411 uint8_t pdev_id; 3412 uint8_t target_pdev_id; 3413 struct dp_pdev *pdev; 3414 3415 if (!soc) 3416 return; 3417 3418 target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word); 3419 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc, 3420 target_pdev_id); 3421 3422 if (pdev_id >= MAX_PDEV_CNT) { 3423 dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id); 3424 return; 3425 } 3426 3427 pdev = (struct dp_pdev *)soc->pdev_list[pdev_id]; 3428 3429 if (!pdev) { 3430 dp_err("Invalid pdev"); 3431 return; 3432 } 3433 dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc, 3434 msg_word, HTT_INVALID_PEER, WDI_NO_VAL, 3435 pdev_id); 3436 3437 qdf_spin_lock_bh(&soc->htt_stats.lock); 3438 pdev->timestamp.msg_type = 3439 HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word); 3440 pdev->timestamp.pdev_id = pdev_id; 3441 pdev->timestamp.chip_id = 3442 HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word); 3443 pdev->timestamp.mac_clk_freq = 3444 HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word); 3445 pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1); 3446 pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2); 3447 pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3); 3448 pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4); 3449 pdev->timestamp.mlo_offset_clks = *(msg_word + 5); 3450 pdev->timestamp.mlo_comp_us = 3451 HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET( 3452 *(msg_word + 6)); 3453 pdev->timestamp.mlo_comp_clks = 3454 HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET( 3455 *(msg_word + 6)); 3456 pdev->timestamp.mlo_comp_timer = 3457 HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET( 3458 *(msg_word + 7)); 3459 3460 dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d", 3461 pdev->timestamp.sync_tstmp_lo_us, 3462 pdev->timestamp.sync_tstmp_hi_us, 3463 pdev->timestamp.mlo_offset_lo_us, 3464 pdev->timestamp.mlo_offset_hi_us); 3465 3466 qdf_spin_unlock_bh(&soc->htt_stats.lock); 3467 3468 dp_update_mlo_ts_offset(soc, 3469 pdev->timestamp.mlo_offset_lo_us, 3470 pdev->timestamp.mlo_offset_hi_us); 3471 3472 dp_update_mlo_delta_tsf2(soc, pdev); 3473 } 3474 #else 3475 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc, 3476 uint32_t *msg_word) 3477 { 3478 qdf_assert_always(0); 3479 } 3480 3481 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc, 3482 uint32_t *msg_word) 3483 { 3484 qdf_assert_always(0); 3485 } 3486 3487 static void 3488 dp_rx_mlo_timestamp_ind_handler(void *soc_handle, 3489 uint32_t *msg_word) 3490 { 3491 qdf_assert_always(0); 3492 } 3493 3494 static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc, 3495 uint32_t *msg_word) 3496 { 3497 } 3498 #endif 3499 3500 /** 3501 * dp_htt_rx_addba_handler() - RX Addba HTT msg handler 3502 * @soc: DP Soc handler 3503 * @peer_id: ID of peer 3504 * @tid: TID number 3505 * @win_sz: BA window size 3506 * 3507 * Return: None 3508 */ 3509 static void 3510 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id, 3511 uint8_t tid, uint16_t win_sz) 3512 { 3513 uint16_t status; 3514 struct dp_peer *peer; 3515 3516 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT); 3517 3518 if (!peer) { 3519 dp_err("Peer not found peer id %d", peer_id); 3520 return; 3521 } 3522 3523 status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc, 3524 peer->mac_addr.raw, 3525 peer->vdev->vdev_id, 0, 3526 tid, 0, win_sz, 0xffff); 3527 3528 dp_addba_resp_tx_completion_wifi3( 3529 (struct cdp_soc_t *)soc, 3530 peer->mac_addr.raw, peer->vdev->vdev_id, 3531 tid, 3532 status); 3533 3534 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 3535 3536 dp_info("PeerID %d BAW %d TID %d stat %d", 3537 peer_id, win_sz, tid, status); 3538 } 3539 3540 /** 3541 * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler 3542 * @soc: HTT SOC handle 3543 * @msg_word: Pointer to payload 3544 * 3545 * Return: None 3546 */ 3547 static void 3548 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word) 3549 { 3550 uint8_t msg_type, valid, bits, offset; 3551 3552 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 3553 3554 msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET; 3555 valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word); 3556 bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word); 3557 offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word); 3558 3559 dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset); 3560 3561 if (valid) { 3562 soc->link_id_offset = offset; 3563 soc->link_id_bits = bits; 3564 } 3565 } 3566 3567 #ifdef IPA_OPT_WIFI_DP 3568 static void dp_ipa_rx_cce_super_rule_setup_done_handler(struct htt_soc *soc, 3569 uint32_t *msg_word) 3570 { 3571 uint8_t pdev_id = 0; 3572 uint8_t resp_type = 0; 3573 uint8_t is_rules_enough = 0; 3574 uint8_t num_rules_avail = 0; 3575 int filter0_result = 0, filter1_result = 0; 3576 bool is_success = false; 3577 3578 pdev_id = HTT_RX_CCE_SUPER_RULE_SETUP_DONE_PDEV_ID_GET(*msg_word); 3579 resp_type = HTT_RX_CCE_SUPER_RULE_SETUP_DONE_RESPONSE_TYPE_GET( 3580 *msg_word); 3581 dp_info("opt_dp:: cce_super_rule_rsp pdev_id: %d resp_type: %d", 3582 pdev_id, resp_type); 3583 3584 switch (resp_type) { 3585 case HTT_RX_CCE_SUPER_RULE_SETUP_REQ_RESPONSE: 3586 { 3587 is_rules_enough = 3588 HTT_RX_CCE_SUPER_RULE_SETUP_DONE_IS_RULE_ENOUGH_GET( 3589 *msg_word); 3590 num_rules_avail = 3591 HTT_RX_CCE_SUPER_RULE_SETUP_DONE_AVAIL_RULE_NUM_GET( 3592 *msg_word); 3593 if (is_rules_enough == 1) { 3594 is_success = true; 3595 soc->stats.reserve_fail_cnt = 0; 3596 } else { 3597 is_success = false; 3598 soc->stats.reserve_fail_cnt++; 3599 if (soc->stats.reserve_fail_cnt > 3600 MAX_RESERVE_FAIL_ATTEMPT) { 3601 /* 3602 * IPA will retry only after an hour by default 3603 * after MAX_RESERVE_FAIL_ATTEMPT 3604 */ 3605 soc->stats.abort_count++; 3606 soc->stats.reserve_fail_cnt = 0; 3607 dp_info( 3608 "opt_dp: Filter reserve failed max attempts"); 3609 } 3610 dp_info("opt_dp:: Filter reserve failed. Rules avail %d", 3611 num_rules_avail); 3612 } 3613 dp_ipa_wdi_opt_dpath_notify_flt_rsvd(is_success); 3614 break; 3615 } 3616 case HTT_RX_CCE_SUPER_RULE_INSTALL_RESPONSE: 3617 { 3618 filter0_result = 3619 HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_0_GET( 3620 *msg_word); 3621 filter1_result = 3622 HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_1_GET( 3623 *msg_word); 3624 3625 dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(filter0_result, 3626 filter1_result); 3627 break; 3628 } 3629 case HTT_RX_CCE_SUPER_RULE_RELEASE_RESPONSE: 3630 { 3631 filter0_result = 3632 HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_0_GET( 3633 *msg_word); 3634 filter1_result = 3635 HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_1_GET( 3636 *msg_word); 3637 3638 dp_ipa_wdi_opt_dpath_notify_flt_rlsd(filter0_result, 3639 filter1_result); 3640 break; 3641 } 3642 default: 3643 dp_info("opt_dp:: Wrong Super rule setup response"); 3644 }; 3645 3646 dp_info("opt_dp:: cce super rule resp type: %d, is_rules_enough: %d", 3647 resp_type, is_rules_enough); 3648 dp_info("num_rules_avail: %d, rslt0: %d, rslt1: %d", 3649 num_rules_avail, filter0_result, filter1_result); 3650 } 3651 #else 3652 static void dp_ipa_rx_cce_super_rule_setup_done_handler(struct htt_soc *soc, 3653 uint32_t *msg_word) 3654 { 3655 } 3656 #endif 3657 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 3658 static inline void 3659 dp_htt_peer_ext_evt(struct htt_soc *soc, uint32_t *msg_word) 3660 { 3661 struct dp_peer_ext_evt_info info; 3662 uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 3663 3664 info.peer_id = HTT_RX_PEER_EXTENDED_PEER_ID_GET(*msg_word); 3665 info.vdev_id = HTT_RX_PEER_EXTENDED_VDEV_ID_GET(*msg_word); 3666 info.link_id = 3667 HTT_RX_PEER_EXTENDED_LOGICAL_LINK_ID_GET(*(msg_word + 2)); 3668 info.link_id_valid = 3669 HTT_RX_PEER_EXTENDED_LOGICAL_LINK_ID_VALID_GET(*(msg_word + 2)); 3670 3671 info.peer_mac_addr = 3672 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 3673 &mac_addr_deswizzle_buf[0]); 3674 3675 dp_htt_info("peer id %u, vdev id %u, link id %u, valid %u,peer_mac " QDF_MAC_ADDR_FMT, 3676 info.peer_id, info.vdev_id, info.link_id, 3677 info.link_id_valid, QDF_MAC_ADDR_REF(info.peer_mac_addr)); 3678 3679 dp_rx_peer_ext_evt(soc->dp_soc, &info); 3680 } 3681 #else 3682 static inline void 3683 dp_htt_peer_ext_evt(struct htt_soc *soc, uint32_t *msg_word) 3684 { 3685 } 3686 #endif 3687 3688 void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) 3689 { 3690 struct htt_soc *soc = (struct htt_soc *) context; 3691 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; 3692 u_int32_t *msg_word; 3693 enum htt_t2h_msg_type msg_type; 3694 bool free_buf = true; 3695 3696 /* check for successful message reception */ 3697 if (pkt->Status != QDF_STATUS_SUCCESS) { 3698 if (pkt->Status != QDF_STATUS_E_CANCELED) 3699 soc->stats.htc_err_cnt++; 3700 3701 qdf_nbuf_free(htt_t2h_msg); 3702 return; 3703 } 3704 3705 /* TODO: Check if we should pop the HTC/HTT header alignment padding */ 3706 3707 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); 3708 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 3709 htt_event_record(soc->htt_logger_handle, 3710 msg_type, (uint8_t *)msg_word); 3711 switch (msg_type) { 3712 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 3713 { 3714 dp_htt_bkp_event_alert(msg_word, soc); 3715 break; 3716 } 3717 case HTT_T2H_MSG_TYPE_PEER_MAP: 3718 { 3719 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 3720 u_int8_t *peer_mac_addr; 3721 u_int16_t peer_id; 3722 u_int16_t hw_peer_id; 3723 u_int8_t vdev_id; 3724 u_int8_t is_wds; 3725 struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc; 3726 3727 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); 3728 hw_peer_id = 3729 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); 3730 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); 3731 peer_mac_addr = htt_t2h_mac_addr_deswizzle( 3732 (u_int8_t *) (msg_word+1), 3733 &mac_addr_deswizzle_buf[0]); 3734 QDF_TRACE(QDF_MODULE_ID_TXRX, 3735 QDF_TRACE_LEVEL_DEBUG, 3736 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 3737 peer_id, vdev_id); 3738 3739 /* 3740 * check if peer already exists for this peer_id, if so 3741 * this peer map event is in response for a wds peer add 3742 * wmi command sent during wds source port learning. 3743 * in this case just add the ast entry to the existing 3744 * peer ast_list. 3745 */ 3746 is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]); 3747 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, 3748 vdev_id, peer_mac_addr, 0, 3749 is_wds); 3750 break; 3751 } 3752 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 3753 { 3754 u_int16_t peer_id; 3755 u_int8_t vdev_id; 3756 u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0}; 3757 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); 3758 vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word); 3759 3760 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, 3761 vdev_id, mac_addr, 0, 3762 DP_PEER_WDS_COUNT_INVALID); 3763 break; 3764 } 3765 case HTT_T2H_MSG_TYPE_SEC_IND: 3766 { 3767 u_int16_t peer_id; 3768 enum cdp_sec_type sec_type; 3769 int is_unicast; 3770 3771 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); 3772 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); 3773 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); 3774 /* point to the first part of the Michael key */ 3775 msg_word++; 3776 dp_rx_sec_ind_handler( 3777 soc->dp_soc, peer_id, sec_type, is_unicast, 3778 msg_word, msg_word + 2); 3779 break; 3780 } 3781 3782 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 3783 { 3784 free_buf = 3785 dp_monitor_ppdu_stats_ind_handler(soc, 3786 msg_word, 3787 htt_t2h_msg); 3788 break; 3789 } 3790 3791 case HTT_T2H_MSG_TYPE_PKTLOG: 3792 { 3793 dp_pktlog_msg_handler(soc, msg_word); 3794 break; 3795 } 3796 3797 case HTT_T2H_MSG_TYPE_VERSION_CONF: 3798 { 3799 /* 3800 * HTC maintains runtime pm count for H2T messages that 3801 * have a response msg from FW. This count ensures that 3802 * in the case FW does not sent out the response or host 3803 * did not process this indication runtime_put happens 3804 * properly in the cleanup path. 3805 */ 3806 if (htc_dec_return_htt_runtime_cnt(soc->htc_soc) >= 0) 3807 htc_pm_runtime_put(soc->htc_soc); 3808 else 3809 soc->stats.htt_ver_req_put_skip++; 3810 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); 3811 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); 3812 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, 3813 "target uses HTT version %d.%d; host uses %d.%d", 3814 soc->tgt_ver.major, soc->tgt_ver.minor, 3815 HTT_CURRENT_VERSION_MAJOR, 3816 HTT_CURRENT_VERSION_MINOR); 3817 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { 3818 QDF_TRACE(QDF_MODULE_ID_TXRX, 3819 QDF_TRACE_LEVEL_WARN, 3820 "*** Incompatible host/target HTT versions!"); 3821 } 3822 /* abort if the target is incompatible with the host */ 3823 qdf_assert(soc->tgt_ver.major == 3824 HTT_CURRENT_VERSION_MAJOR); 3825 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { 3826 QDF_TRACE(QDF_MODULE_ID_TXRX, 3827 QDF_TRACE_LEVEL_INFO_LOW, 3828 "*** Warning: host/target HTT versions" 3829 " are different, though compatible!"); 3830 } 3831 break; 3832 } 3833 case HTT_T2H_MSG_TYPE_RX_ADDBA: 3834 { 3835 uint16_t peer_id; 3836 uint8_t tid; 3837 uint16_t win_sz; 3838 3839 /* 3840 * Update REO Queue Desc with new values 3841 */ 3842 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); 3843 tid = HTT_RX_ADDBA_TID_GET(*msg_word); 3844 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); 3845 3846 /* 3847 * Window size needs to be incremented by 1 3848 * since fw needs to represent a value of 256 3849 * using just 8 bits 3850 */ 3851 dp_htt_rx_addba_handler(soc->dp_soc, peer_id, 3852 tid, win_sz + 1); 3853 break; 3854 } 3855 case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN: 3856 { 3857 uint16_t peer_id; 3858 uint8_t tid; 3859 uint16_t win_sz; 3860 3861 peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word); 3862 tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word); 3863 3864 msg_word++; 3865 win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word); 3866 3867 dp_htt_rx_addba_handler(soc->dp_soc, peer_id, 3868 tid, win_sz); 3869 break; 3870 } 3871 case HTT_T2H_PPDU_ID_FMT_IND: 3872 { 3873 dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word); 3874 break; 3875 } 3876 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 3877 { 3878 dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg); 3879 break; 3880 } 3881 case HTT_T2H_MSG_TYPE_PEER_MAP_V2: 3882 { 3883 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 3884 u_int8_t *peer_mac_addr; 3885 u_int16_t peer_id; 3886 u_int16_t hw_peer_id; 3887 u_int8_t vdev_id; 3888 bool is_wds; 3889 u_int16_t ast_hash; 3890 struct dp_ast_flow_override_info ast_flow_info = {0}; 3891 3892 peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word); 3893 hw_peer_id = 3894 HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2)); 3895 vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word); 3896 peer_mac_addr = 3897 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 3898 &mac_addr_deswizzle_buf[0]); 3899 is_wds = 3900 HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3)); 3901 ast_hash = 3902 HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3)); 3903 /* 3904 * Update 4 ast_index per peer, ast valid mask 3905 * and TID flow valid mask. 3906 * AST valid mask is 3 bit field corresponds to 3907 * ast_index[3:1]. ast_index 0 is always valid. 3908 */ 3909 ast_flow_info.ast_valid_mask = 3910 HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3)); 3911 ast_flow_info.ast_idx[0] = hw_peer_id; 3912 ast_flow_info.ast_flow_mask[0] = 3913 HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4)); 3914 ast_flow_info.ast_idx[1] = 3915 HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4)); 3916 ast_flow_info.ast_flow_mask[1] = 3917 HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4)); 3918 ast_flow_info.ast_idx[2] = 3919 HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5)); 3920 ast_flow_info.ast_flow_mask[2] = 3921 HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4)); 3922 ast_flow_info.ast_idx[3] = 3923 HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6)); 3924 ast_flow_info.ast_flow_mask[3] = 3925 HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4)); 3926 /* 3927 * TID valid mask is applicable only 3928 * for HI and LOW priority flows. 3929 * tid_valid_mas is 8 bit field corresponds 3930 * to TID[7:0] 3931 */ 3932 ast_flow_info.tid_valid_low_pri_mask = 3933 HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5)); 3934 ast_flow_info.tid_valid_hi_pri_mask = 3935 HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5)); 3936 3937 QDF_TRACE(QDF_MODULE_ID_TXRX, 3938 QDF_TRACE_LEVEL_DEBUG, 3939 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 3940 peer_id, vdev_id); 3941 3942 QDF_TRACE(QDF_MODULE_ID_TXRX, 3943 QDF_TRACE_LEVEL_INFO, 3944 "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n", 3945 ast_flow_info.ast_idx[0], 3946 ast_flow_info.ast_idx[1], 3947 ast_flow_info.ast_idx[2], 3948 ast_flow_info.ast_idx[3]); 3949 3950 dp_rx_peer_map_handler(soc->dp_soc, peer_id, 3951 hw_peer_id, vdev_id, 3952 peer_mac_addr, ast_hash, 3953 is_wds); 3954 3955 /* 3956 * Update ast indexes for flow override support 3957 * Applicable only for non wds peers 3958 */ 3959 if (!soc->dp_soc->ast_offload_support) 3960 dp_peer_ast_index_flow_queue_map_create( 3961 soc->dp_soc, is_wds, 3962 peer_id, peer_mac_addr, 3963 &ast_flow_info); 3964 3965 break; 3966 } 3967 case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2: 3968 { 3969 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 3970 u_int8_t *mac_addr; 3971 u_int16_t peer_id; 3972 u_int8_t vdev_id; 3973 u_int8_t is_wds; 3974 u_int32_t free_wds_count; 3975 3976 peer_id = 3977 HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word); 3978 vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word); 3979 mac_addr = 3980 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 3981 &mac_addr_deswizzle_buf[0]); 3982 is_wds = 3983 HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2)); 3984 free_wds_count = 3985 HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4)); 3986 3987 QDF_TRACE(QDF_MODULE_ID_TXRX, 3988 QDF_TRACE_LEVEL_INFO, 3989 "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n", 3990 peer_id, vdev_id); 3991 3992 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, 3993 vdev_id, mac_addr, 3994 is_wds, free_wds_count); 3995 break; 3996 } 3997 case HTT_T2H_MSG_TYPE_RX_DELBA: 3998 { 3999 uint16_t peer_id; 4000 uint8_t tid; 4001 uint8_t win_sz; 4002 QDF_STATUS status; 4003 4004 peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word); 4005 tid = HTT_RX_DELBA_TID_GET(*msg_word); 4006 win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word); 4007 4008 status = dp_rx_delba_ind_handler( 4009 soc->dp_soc, 4010 peer_id, tid, win_sz); 4011 4012 QDF_TRACE(QDF_MODULE_ID_TXRX, 4013 QDF_TRACE_LEVEL_INFO, 4014 FL("DELBA PeerID %d BAW %d TID %d stat %d"), 4015 peer_id, win_sz, tid, status); 4016 break; 4017 } 4018 case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN: 4019 { 4020 uint16_t peer_id; 4021 uint8_t tid; 4022 uint16_t win_sz; 4023 QDF_STATUS status; 4024 4025 peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word); 4026 tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word); 4027 4028 msg_word++; 4029 win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word); 4030 4031 status = dp_rx_delba_ind_handler(soc->dp_soc, 4032 peer_id, tid, 4033 win_sz); 4034 4035 dp_info("DELBA PeerID %d BAW %d TID %d stat %d", 4036 peer_id, win_sz, tid, status); 4037 break; 4038 } 4039 case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND: 4040 { 4041 uint16_t num_entries; 4042 uint32_t cmem_ba_lo; 4043 uint32_t cmem_ba_hi; 4044 4045 num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word); 4046 cmem_ba_lo = *(msg_word + 1); 4047 cmem_ba_hi = *(msg_word + 2); 4048 4049 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 4050 FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"), 4051 num_entries, cmem_ba_lo, cmem_ba_hi); 4052 4053 dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries, 4054 cmem_ba_lo, cmem_ba_hi); 4055 break; 4056 } 4057 case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND: 4058 { 4059 dp_offload_ind_handler(soc, msg_word); 4060 break; 4061 } 4062 case HTT_T2H_MSG_TYPE_PEER_MAP_V3: 4063 { 4064 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 4065 u_int8_t *peer_mac_addr; 4066 u_int16_t peer_id; 4067 u_int16_t hw_peer_id; 4068 u_int8_t vdev_id; 4069 uint8_t is_wds; 4070 u_int16_t ast_hash = 0; 4071 4072 peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word); 4073 vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word); 4074 peer_mac_addr = 4075 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 4076 &mac_addr_deswizzle_buf[0]); 4077 hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3)); 4078 ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3)); 4079 is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4)); 4080 4081 dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n", 4082 peer_id, vdev_id); 4083 4084 dp_rx_peer_map_handler(soc->dp_soc, peer_id, 4085 hw_peer_id, vdev_id, 4086 peer_mac_addr, ast_hash, 4087 is_wds); 4088 4089 break; 4090 } 4091 case HTT_T2H_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_IND: 4092 { 4093 dp_htt_t2h_primary_link_migration(soc, msg_word); 4094 break; 4095 } 4096 case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP: 4097 { 4098 dp_htt_mlo_peer_map_handler(soc, msg_word); 4099 break; 4100 } 4101 case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP: 4102 { 4103 dp_htt_mlo_peer_unmap_handler(soc, msg_word); 4104 break; 4105 } 4106 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 4107 { 4108 dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word); 4109 break; 4110 } 4111 case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND: 4112 { 4113 dp_vdev_txrx_hw_stats_handler(soc, msg_word); 4114 break; 4115 } 4116 case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF: 4117 { 4118 dp_sawf_def_queues_update_map_report_conf(soc, msg_word, 4119 htt_t2h_msg); 4120 break; 4121 } 4122 case HTT_T2H_SAWF_MSDUQ_INFO_IND: 4123 { 4124 dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg); 4125 break; 4126 } 4127 case HTT_T2H_MSG_TYPE_PEER_AST_OVERRIDE_INDEX_IND: 4128 { 4129 dp_sawf_dynamic_ast_update(soc, msg_word, htt_t2h_msg); 4130 break; 4131 } 4132 case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND: 4133 { 4134 dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg); 4135 break; 4136 } 4137 case HTT_T2H_MSG_TYPE_RX_CCE_SUPER_RULE_SETUP_DONE: 4138 { 4139 dp_ipa_rx_cce_super_rule_setup_done_handler(soc, msg_word); 4140 break; 4141 } 4142 case HTT_T2H_MSG_TYPE_PEER_EXTENDED_EVENT: 4143 { 4144 dp_htt_peer_ext_evt(soc, msg_word); 4145 break; 4146 } 4147 default: 4148 break; 4149 }; 4150 4151 /* Free the indication buffer */ 4152 if (free_buf) 4153 qdf_nbuf_free(htt_t2h_msg); 4154 } 4155 4156 enum htc_send_full_action 4157 dp_htt_h2t_full(void *context, HTC_PACKET *pkt) 4158 { 4159 return HTC_SEND_FULL_KEEP; 4160 } 4161 4162 QDF_STATUS 4163 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id) 4164 { 4165 QDF_STATUS rc = QDF_STATUS_SUCCESS; 4166 HTC_PACKET htc_pkt; 4167 4168 qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE); 4169 qdf_mem_zero(&htc_pkt, sizeof(htc_pkt)); 4170 htc_pkt.Status = QDF_STATUS_SUCCESS; 4171 htc_pkt.pPktContext = (void *)nbuf; 4172 dp_htt_t2h_msg_handler(context, &htc_pkt); 4173 4174 return rc; 4175 } 4176 4177 /** 4178 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC 4179 * @soc: HTT SOC handle 4180 * 4181 * Return: QDF_STATUS 4182 */ 4183 static QDF_STATUS 4184 htt_htc_soc_attach(struct htt_soc *soc) 4185 { 4186 struct htc_service_connect_req connect; 4187 struct htc_service_connect_resp response; 4188 QDF_STATUS status; 4189 struct dp_soc *dpsoc = soc->dp_soc; 4190 4191 qdf_mem_zero(&connect, sizeof(connect)); 4192 qdf_mem_zero(&response, sizeof(response)); 4193 4194 connect.pMetaData = NULL; 4195 connect.MetaDataLength = 0; 4196 connect.EpCallbacks.pContext = soc; 4197 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; 4198 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 4199 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; 4200 4201 /* rx buffers currently are provided by HIF, not by EpRecvRefill */ 4202 connect.EpCallbacks.EpRecvRefill = NULL; 4203 4204 /* N/A, fill is done by HIF */ 4205 connect.EpCallbacks.RecvRefillWaterMark = 1; 4206 4207 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; 4208 /* 4209 * Specify how deep to let a queue get before htc_send_pkt will 4210 * call the EpSendFull function due to excessive send queue depth. 4211 */ 4212 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; 4213 4214 /* disable flow control for HTT data message service */ 4215 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 4216 4217 /* connect to control service */ 4218 connect.service_id = HTT_DATA_MSG_SVC; 4219 4220 status = htc_connect_service(soc->htc_soc, &connect, &response); 4221 4222 if (status != QDF_STATUS_SUCCESS) 4223 return status; 4224 4225 soc->htc_endpoint = response.Endpoint; 4226 4227 hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint); 4228 4229 htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc); 4230 dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc, 4231 dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE); 4232 4233 return QDF_STATUS_SUCCESS; /* success */ 4234 } 4235 4236 void * 4237 htt_soc_initialize(struct htt_soc *htt_soc, 4238 struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 4239 HTC_HANDLE htc_soc, 4240 hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev) 4241 { 4242 struct htt_soc *soc = (struct htt_soc *)htt_soc; 4243 4244 soc->osdev = osdev; 4245 soc->ctrl_psoc = ctrl_psoc; 4246 soc->htc_soc = htc_soc; 4247 soc->hal_soc = hal_soc_hdl; 4248 4249 if (htt_htc_soc_attach(soc)) 4250 goto fail2; 4251 4252 return soc; 4253 4254 fail2: 4255 return NULL; 4256 } 4257 4258 void htt_soc_htc_dealloc(struct htt_soc *htt_handle) 4259 { 4260 htt_interface_logging_deinit(htt_handle->htt_logger_handle); 4261 htt_htc_misc_pkt_pool_free(htt_handle); 4262 htt_htc_pkt_pool_free(htt_handle); 4263 } 4264 4265 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc) 4266 { 4267 int i; 4268 4269 soc->htt_htc_pkt_freelist = NULL; 4270 /* pre-allocate some HTC_PACKET objects */ 4271 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { 4272 struct dp_htt_htc_pkt_union *pkt; 4273 pkt = qdf_mem_malloc(sizeof(*pkt)); 4274 if (!pkt) 4275 return QDF_STATUS_E_NOMEM; 4276 4277 htt_htc_pkt_free(soc, &pkt->u.pkt); 4278 } 4279 return QDF_STATUS_SUCCESS; 4280 } 4281 4282 void htt_soc_detach(struct htt_soc *htt_hdl) 4283 { 4284 int i; 4285 struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl; 4286 4287 for (i = 0; i < MAX_PDEV_CNT; i++) { 4288 qdf_mem_free(htt_handle->pdevid_tt[i].umac_path); 4289 qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path); 4290 } 4291 4292 HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex); 4293 qdf_mem_free(htt_handle); 4294 4295 } 4296 4297 /** 4298 * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW 4299 * @pdev: DP PDEV handle 4300 * @stats_type_upload_mask: stats type requested by user 4301 * @config_param_0: extra configuration parameters 4302 * @config_param_1: extra configuration parameters 4303 * @config_param_2: extra configuration parameters 4304 * @config_param_3: extra configuration parameters 4305 * @cookie_val: cookie value 4306 * @cookie_msb: msb of debug status cookie 4307 * @mac_id: mac number 4308 * 4309 * return: QDF STATUS 4310 */ 4311 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 4312 uint32_t stats_type_upload_mask, uint32_t config_param_0, 4313 uint32_t config_param_1, uint32_t config_param_2, 4314 uint32_t config_param_3, int cookie_val, int cookie_msb, 4315 uint8_t mac_id) 4316 { 4317 struct htt_soc *soc = pdev->soc->htt_handle; 4318 struct dp_htt_htc_pkt *pkt; 4319 qdf_nbuf_t msg; 4320 uint32_t *msg_word; 4321 uint8_t pdev_mask = 0; 4322 uint8_t *htt_logger_bufp; 4323 int mac_for_pdev; 4324 int target_pdev_id; 4325 QDF_STATUS status; 4326 4327 msg = qdf_nbuf_alloc( 4328 soc->osdev, 4329 HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ), 4330 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 4331 4332 if (!msg) 4333 return QDF_STATUS_E_NOMEM; 4334 4335 /*TODO:Add support for SOC stats 4336 * Bit 0: SOC Stats 4337 * Bit 1: Pdev stats for pdev id 0 4338 * Bit 2: Pdev stats for pdev id 1 4339 * Bit 3: Pdev stats for pdev id 2 4340 */ 4341 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 4342 target_pdev_id = 4343 dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); 4344 4345 pdev_mask = 1 << target_pdev_id; 4346 4347 /* 4348 * Set the length of the message. 4349 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 4350 * separately during the below call to qdf_nbuf_push_head. 4351 * The contribution from the HTC header is added separately inside HTC. 4352 */ 4353 if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) { 4354 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 4355 "Failed to expand head for HTT_EXT_STATS"); 4356 qdf_nbuf_free(msg); 4357 return QDF_STATUS_E_FAILURE; 4358 } 4359 4360 msg_word = (uint32_t *) qdf_nbuf_data(msg); 4361 4362 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 4363 htt_logger_bufp = (uint8_t *)msg_word; 4364 *msg_word = 0; 4365 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ); 4366 HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask); 4367 HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask); 4368 4369 /* word 1 */ 4370 msg_word++; 4371 *msg_word = 0; 4372 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0); 4373 4374 /* word 2 */ 4375 msg_word++; 4376 *msg_word = 0; 4377 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1); 4378 4379 /* word 3 */ 4380 msg_word++; 4381 *msg_word = 0; 4382 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2); 4383 4384 /* word 4 */ 4385 msg_word++; 4386 *msg_word = 0; 4387 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3); 4388 4389 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0); 4390 4391 /* word 5 */ 4392 msg_word++; 4393 4394 /* word 6 */ 4395 msg_word++; 4396 *msg_word = 0; 4397 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val); 4398 4399 /* word 7 */ 4400 msg_word++; 4401 *msg_word = 0; 4402 /* Currently Using last 2 bits for pdev_id 4403 * For future reference, reserving 3 bits in cookie_msb for pdev_id 4404 */ 4405 cookie_msb = (cookie_msb | pdev->pdev_id); 4406 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb); 4407 4408 pkt = htt_htc_pkt_alloc(soc); 4409 if (!pkt) { 4410 qdf_nbuf_free(msg); 4411 return QDF_STATUS_E_NOMEM; 4412 } 4413 4414 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 4415 4416 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 4417 dp_htt_h2t_send_complete_free_netbuf, 4418 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 4419 soc->htc_endpoint, 4420 /* tag for FW response msg not guaranteed */ 4421 HTC_TX_PACKET_TAG_RUNTIME_PUT); 4422 4423 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 4424 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ, 4425 htt_logger_bufp); 4426 4427 if (status != QDF_STATUS_SUCCESS) { 4428 qdf_nbuf_free(msg); 4429 htt_htc_pkt_free(soc, pkt); 4430 } 4431 4432 return status; 4433 } 4434 4435 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 4436 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF 4437 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000 4438 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32 4439 4440 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc, 4441 uint8_t pdev_id, bool enable, 4442 bool reset, uint64_t reset_bitmask) 4443 { 4444 struct htt_soc *soc = dpsoc->htt_handle; 4445 struct dp_htt_htc_pkt *pkt; 4446 qdf_nbuf_t msg; 4447 uint32_t *msg_word; 4448 uint8_t *htt_logger_bufp; 4449 QDF_STATUS status; 4450 int duration; 4451 uint32_t bitmask; 4452 int target_pdev_id; 4453 4454 msg = qdf_nbuf_alloc( 4455 soc->osdev, 4456 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)), 4457 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); 4458 4459 if (!msg) { 4460 dp_htt_err("%pK: Fail to allocate " 4461 "HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc); 4462 return QDF_STATUS_E_NOMEM; 4463 } 4464 4465 if (pdev_id != INVALID_PDEV_ID) 4466 target_pdev_id = DP_SW2HW_MACID(pdev_id); 4467 else 4468 target_pdev_id = 0; 4469 4470 duration = 4471 wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx); 4472 4473 /* 4474 * Set the length of the message. 4475 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 4476 * separately during the below call to qdf_nbuf_push_head. 4477 * The contribution from the HTC header is added separately inside HTC. 4478 */ 4479 if (!qdf_nbuf_put_tail(msg, 4480 sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) { 4481 dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS" 4482 , dpsoc); 4483 qdf_nbuf_free(msg); 4484 return QDF_STATUS_E_FAILURE; 4485 } 4486 4487 msg_word = (uint32_t *)qdf_nbuf_data(msg); 4488 4489 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 4490 htt_logger_bufp = (uint8_t *)msg_word; 4491 *msg_word = 0; 4492 4493 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG); 4494 HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id); 4495 4496 HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable); 4497 4498 HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word, 4499 (duration >> 3)); 4500 4501 HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset); 4502 4503 msg_word++; 4504 *msg_word = 0; 4505 bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK); 4506 *msg_word = bitmask; 4507 4508 msg_word++; 4509 *msg_word = 0; 4510 bitmask = 4511 ((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >> 4512 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT); 4513 *msg_word = bitmask; 4514 4515 pkt = htt_htc_pkt_alloc(soc); 4516 if (!pkt) { 4517 dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", 4518 dpsoc); 4519 qdf_assert(0); 4520 qdf_nbuf_free(msg); 4521 return QDF_STATUS_E_NOMEM; 4522 } 4523 4524 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 4525 4526 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 4527 dp_htt_h2t_send_complete_free_netbuf, 4528 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 4529 soc->htc_endpoint, 4530 /* tag for no FW response msg */ 4531 HTC_TX_PACKET_TAG_RUNTIME_PUT); 4532 4533 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 4534 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 4535 HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG, 4536 htt_logger_bufp); 4537 4538 if (status != QDF_STATUS_SUCCESS) { 4539 qdf_nbuf_free(msg); 4540 htt_htc_pkt_free(soc, pkt); 4541 } 4542 4543 return status; 4544 } 4545 #else 4546 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc, 4547 uint8_t pdev_id, bool enable, 4548 bool reset, uint64_t reset_bitmask) 4549 { 4550 return QDF_STATUS_SUCCESS; 4551 } 4552 #endif 4553 4554 /** 4555 * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration 4556 * HTT message to pass to FW 4557 * @pdev: DP PDEV handle 4558 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 4559 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 4560 * @mac_id: mac id 4561 * 4562 * tuple_mask[1:0]: 4563 * 00 - Do not report 3 tuple hash value 4564 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 4565 * 01 - Report 3 tuple hash value in flow_id_toeplitz 4566 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 4567 * 4568 * return: QDF STATUS 4569 */ 4570 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, 4571 uint32_t tuple_mask, uint8_t mac_id) 4572 { 4573 struct htt_soc *soc = pdev->soc->htt_handle; 4574 struct dp_htt_htc_pkt *pkt; 4575 qdf_nbuf_t msg; 4576 uint32_t *msg_word; 4577 uint8_t *htt_logger_bufp; 4578 int mac_for_pdev; 4579 int target_pdev_id; 4580 4581 msg = qdf_nbuf_alloc( 4582 soc->osdev, 4583 HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES), 4584 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 4585 4586 if (!msg) 4587 return QDF_STATUS_E_NOMEM; 4588 4589 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 4590 target_pdev_id = 4591 dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); 4592 4593 /* 4594 * Set the length of the message. 4595 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 4596 * separately during the below call to qdf_nbuf_push_head. 4597 * The contribution from the HTC header is added separately inside HTC. 4598 */ 4599 if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) { 4600 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 4601 "Failed to expand head for HTT_3TUPLE_CONFIG"); 4602 qdf_nbuf_free(msg); 4603 return QDF_STATUS_E_FAILURE; 4604 } 4605 4606 dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------", 4607 pdev->soc, tuple_mask, target_pdev_id); 4608 4609 msg_word = (uint32_t *)qdf_nbuf_data(msg); 4610 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 4611 htt_logger_bufp = (uint8_t *)msg_word; 4612 4613 *msg_word = 0; 4614 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG); 4615 HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id); 4616 4617 msg_word++; 4618 *msg_word = 0; 4619 HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask); 4620 HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask); 4621 4622 pkt = htt_htc_pkt_alloc(soc); 4623 if (!pkt) { 4624 qdf_nbuf_free(msg); 4625 return QDF_STATUS_E_NOMEM; 4626 } 4627 4628 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 4629 4630 SET_HTC_PACKET_INFO_TX( 4631 &pkt->htc_pkt, 4632 dp_htt_h2t_send_complete_free_netbuf, 4633 qdf_nbuf_data(msg), 4634 qdf_nbuf_len(msg), 4635 soc->htc_endpoint, 4636 /* tag for no FW response msg */ 4637 HTC_TX_PACKET_TAG_RUNTIME_PUT); 4638 4639 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 4640 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG, 4641 htt_logger_bufp); 4642 4643 return QDF_STATUS_SUCCESS; 4644 } 4645 4646 /* This macro will revert once proper HTT header will define for 4647 * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file 4648 * */ 4649 #if defined(WDI_EVENT_ENABLE) 4650 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 4651 uint32_t stats_type_upload_mask, uint8_t mac_id) 4652 { 4653 struct htt_soc *soc = pdev->soc->htt_handle; 4654 struct dp_htt_htc_pkt *pkt; 4655 qdf_nbuf_t msg; 4656 uint32_t *msg_word; 4657 uint8_t pdev_mask; 4658 QDF_STATUS status; 4659 4660 msg = qdf_nbuf_alloc( 4661 soc->osdev, 4662 HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ), 4663 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); 4664 4665 if (!msg) { 4666 dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer" 4667 , pdev->soc); 4668 qdf_assert(0); 4669 return QDF_STATUS_E_NOMEM; 4670 } 4671 4672 /*TODO:Add support for SOC stats 4673 * Bit 0: SOC Stats 4674 * Bit 1: Pdev stats for pdev id 0 4675 * Bit 2: Pdev stats for pdev id 1 4676 * Bit 3: Pdev stats for pdev id 2 4677 */ 4678 pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, 4679 mac_id); 4680 4681 /* 4682 * Set the length of the message. 4683 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 4684 * separately during the below call to qdf_nbuf_push_head. 4685 * The contribution from the HTC header is added separately inside HTC. 4686 */ 4687 if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) { 4688 dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS" 4689 , pdev->soc); 4690 qdf_nbuf_free(msg); 4691 return QDF_STATUS_E_FAILURE; 4692 } 4693 4694 msg_word = (uint32_t *) qdf_nbuf_data(msg); 4695 4696 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 4697 *msg_word = 0; 4698 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); 4699 HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask); 4700 HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word, 4701 stats_type_upload_mask); 4702 4703 pkt = htt_htc_pkt_alloc(soc); 4704 if (!pkt) { 4705 dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc); 4706 qdf_assert(0); 4707 qdf_nbuf_free(msg); 4708 return QDF_STATUS_E_NOMEM; 4709 } 4710 4711 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 4712 4713 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 4714 dp_htt_h2t_send_complete_free_netbuf, 4715 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 4716 soc->htc_endpoint, 4717 /* tag for no FW response msg */ 4718 HTC_TX_PACKET_TAG_RUNTIME_PUT); 4719 4720 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 4721 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG, 4722 (uint8_t *)msg_word); 4723 4724 if (status != QDF_STATUS_SUCCESS) { 4725 qdf_nbuf_free(msg); 4726 htt_htc_pkt_free(soc, pkt); 4727 } 4728 4729 return status; 4730 } 4731 4732 qdf_export_symbol(dp_h2t_cfg_stats_msg_send); 4733 #endif 4734 4735 void 4736 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, 4737 uint32_t *tag_buf) 4738 { 4739 struct dp_peer *peer = NULL; 4740 switch (tag_type) { 4741 case HTT_STATS_PEER_DETAILS_TAG: 4742 { 4743 htt_peer_details_tlv *dp_stats_buf = 4744 (htt_peer_details_tlv *)tag_buf; 4745 4746 pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id; 4747 } 4748 break; 4749 case HTT_STATS_PEER_STATS_CMN_TAG: 4750 { 4751 htt_peer_stats_cmn_tlv *dp_stats_buf = 4752 (htt_peer_stats_cmn_tlv *)tag_buf; 4753 4754 peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id, 4755 DP_MOD_ID_HTT); 4756 4757 if (peer && !peer->bss_peer) { 4758 peer->stats.tx.inactive_time = 4759 dp_stats_buf->inactive_time; 4760 qdf_event_set(&pdev->fw_peer_stats_event); 4761 } 4762 if (peer) 4763 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 4764 } 4765 break; 4766 default: 4767 qdf_err("Invalid tag_type: %u", tag_type); 4768 } 4769 } 4770 4771 QDF_STATUS 4772 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev, 4773 struct dp_htt_rx_flow_fst_setup *fse_setup_info) 4774 { 4775 struct htt_soc *soc = pdev->soc->htt_handle; 4776 struct dp_htt_htc_pkt *pkt; 4777 qdf_nbuf_t msg; 4778 u_int32_t *msg_word; 4779 struct htt_h2t_msg_rx_fse_setup_t *fse_setup; 4780 uint8_t *htt_logger_bufp; 4781 u_int32_t *key; 4782 QDF_STATUS status; 4783 4784 msg = qdf_nbuf_alloc( 4785 soc->osdev, 4786 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)), 4787 /* reserve room for the HTC header */ 4788 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 4789 4790 if (!msg) 4791 return QDF_STATUS_E_NOMEM; 4792 4793 /* 4794 * Set the length of the message. 4795 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 4796 * separately during the below call to qdf_nbuf_push_head. 4797 * The contribution from the HTC header is added separately inside HTC. 4798 */ 4799 if (!qdf_nbuf_put_tail(msg, 4800 sizeof(struct htt_h2t_msg_rx_fse_setup_t))) { 4801 qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg"); 4802 return QDF_STATUS_E_FAILURE; 4803 } 4804 4805 /* fill in the message contents */ 4806 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 4807 4808 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t)); 4809 /* rewind beyond alignment pad to get to the HTC header reserved area */ 4810 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 4811 htt_logger_bufp = (uint8_t *)msg_word; 4812 4813 *msg_word = 0; 4814 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG); 4815 4816 fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word; 4817 4818 HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id); 4819 4820 msg_word++; 4821 HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries); 4822 HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search); 4823 HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word, 4824 fse_setup_info->ip_da_sa_prefix); 4825 4826 msg_word++; 4827 HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word, 4828 fse_setup_info->base_addr_lo); 4829 msg_word++; 4830 HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word, 4831 fse_setup_info->base_addr_hi); 4832 4833 key = (u_int32_t *)fse_setup_info->hash_key; 4834 fse_setup->toeplitz31_0 = *key++; 4835 fse_setup->toeplitz63_32 = *key++; 4836 fse_setup->toeplitz95_64 = *key++; 4837 fse_setup->toeplitz127_96 = *key++; 4838 fse_setup->toeplitz159_128 = *key++; 4839 fse_setup->toeplitz191_160 = *key++; 4840 fse_setup->toeplitz223_192 = *key++; 4841 fse_setup->toeplitz255_224 = *key++; 4842 fse_setup->toeplitz287_256 = *key++; 4843 fse_setup->toeplitz314_288 = *key; 4844 4845 msg_word++; 4846 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0); 4847 msg_word++; 4848 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32); 4849 msg_word++; 4850 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64); 4851 msg_word++; 4852 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96); 4853 msg_word++; 4854 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128); 4855 msg_word++; 4856 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160); 4857 msg_word++; 4858 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192); 4859 msg_word++; 4860 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224); 4861 msg_word++; 4862 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256); 4863 msg_word++; 4864 HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word, 4865 fse_setup->toeplitz314_288); 4866 4867 pkt = htt_htc_pkt_alloc(soc); 4868 if (!pkt) { 4869 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 4870 qdf_assert(0); 4871 qdf_nbuf_free(msg); 4872 return QDF_STATUS_E_RESOURCES; /* failure */ 4873 } 4874 4875 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 4876 4877 SET_HTC_PACKET_INFO_TX( 4878 &pkt->htc_pkt, 4879 dp_htt_h2t_send_complete_free_netbuf, 4880 qdf_nbuf_data(msg), 4881 qdf_nbuf_len(msg), 4882 soc->htc_endpoint, 4883 /* tag for no FW response msg */ 4884 HTC_TX_PACKET_TAG_RUNTIME_PUT); 4885 4886 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 4887 4888 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 4889 HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG, 4890 htt_logger_bufp); 4891 4892 if (status == QDF_STATUS_SUCCESS) { 4893 dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u", 4894 fse_setup_info->pdev_id); 4895 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, 4896 (void *)fse_setup_info->hash_key, 4897 fse_setup_info->hash_key_len); 4898 } else { 4899 qdf_nbuf_free(msg); 4900 htt_htc_pkt_free(soc, pkt); 4901 } 4902 4903 return status; 4904 } 4905 4906 QDF_STATUS 4907 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev, 4908 struct dp_htt_rx_flow_fst_operation *fse_op_info) 4909 { 4910 struct htt_soc *soc = pdev->soc->htt_handle; 4911 struct dp_htt_htc_pkt *pkt; 4912 qdf_nbuf_t msg; 4913 u_int32_t *msg_word; 4914 struct htt_h2t_msg_rx_fse_operation_t *fse_operation; 4915 uint8_t *htt_logger_bufp; 4916 QDF_STATUS status; 4917 4918 msg = qdf_nbuf_alloc( 4919 soc->osdev, 4920 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)), 4921 /* reserve room for the HTC header */ 4922 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 4923 if (!msg) 4924 return QDF_STATUS_E_NOMEM; 4925 4926 /* 4927 * Set the length of the message. 4928 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 4929 * separately during the below call to qdf_nbuf_push_head. 4930 * The contribution from the HTC header is added separately inside HTC. 4931 */ 4932 if (!qdf_nbuf_put_tail(msg, 4933 sizeof(struct htt_h2t_msg_rx_fse_operation_t))) { 4934 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); 4935 qdf_nbuf_free(msg); 4936 return QDF_STATUS_E_FAILURE; 4937 } 4938 4939 /* fill in the message contents */ 4940 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 4941 4942 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t)); 4943 /* rewind beyond alignment pad to get to the HTC header reserved area */ 4944 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 4945 htt_logger_bufp = (uint8_t *)msg_word; 4946 4947 *msg_word = 0; 4948 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG); 4949 4950 fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word; 4951 4952 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id); 4953 msg_word++; 4954 HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false); 4955 if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) { 4956 HTT_RX_FSE_OPERATION_SET(*msg_word, 4957 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY); 4958 msg_word++; 4959 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4960 *msg_word, 4961 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0)); 4962 msg_word++; 4963 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4964 *msg_word, 4965 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32)); 4966 msg_word++; 4967 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4968 *msg_word, 4969 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64)); 4970 msg_word++; 4971 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4972 *msg_word, 4973 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96)); 4974 msg_word++; 4975 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4976 *msg_word, 4977 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0)); 4978 msg_word++; 4979 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4980 *msg_word, 4981 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32)); 4982 msg_word++; 4983 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4984 *msg_word, 4985 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64)); 4986 msg_word++; 4987 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 4988 *msg_word, 4989 qdf_htonl( 4990 fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96)); 4991 msg_word++; 4992 HTT_RX_FSE_SOURCEPORT_SET( 4993 *msg_word, 4994 fse_op_info->rx_flow->flow_tuple_info.src_port); 4995 HTT_RX_FSE_DESTPORT_SET( 4996 *msg_word, 4997 fse_op_info->rx_flow->flow_tuple_info.dest_port); 4998 msg_word++; 4999 HTT_RX_FSE_L4_PROTO_SET( 5000 *msg_word, 5001 fse_op_info->rx_flow->flow_tuple_info.l4_protocol); 5002 } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) { 5003 HTT_RX_FSE_OPERATION_SET(*msg_word, 5004 HTT_RX_FSE_CACHE_INVALIDATE_FULL); 5005 } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) { 5006 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE); 5007 } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) { 5008 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE); 5009 } 5010 5011 pkt = htt_htc_pkt_alloc(soc); 5012 if (!pkt) { 5013 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 5014 qdf_assert(0); 5015 qdf_nbuf_free(msg); 5016 return QDF_STATUS_E_RESOURCES; /* failure */ 5017 } 5018 5019 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5020 5021 SET_HTC_PACKET_INFO_TX( 5022 &pkt->htc_pkt, 5023 dp_htt_h2t_send_complete_free_netbuf, 5024 qdf_nbuf_data(msg), 5025 qdf_nbuf_len(msg), 5026 soc->htc_endpoint, 5027 /* tag for no FW response msg */ 5028 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5029 5030 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5031 5032 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 5033 HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG, 5034 htt_logger_bufp); 5035 5036 if (status == QDF_STATUS_SUCCESS) { 5037 dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u", 5038 fse_op_info->pdev_id); 5039 } else { 5040 qdf_nbuf_free(msg); 5041 htt_htc_pkt_free(soc, pkt); 5042 } 5043 5044 return status; 5045 } 5046 5047 /** 5048 * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA 5049 * @pdev: DP pdev handle 5050 * @fisa_config: Fisa config struct 5051 * 5052 * Return: Success when HTT message is sent, error on failure 5053 */ 5054 QDF_STATUS 5055 dp_htt_rx_fisa_config(struct dp_pdev *pdev, 5056 struct dp_htt_rx_fisa_cfg *fisa_config) 5057 { 5058 struct htt_soc *soc = pdev->soc->htt_handle; 5059 struct dp_htt_htc_pkt *pkt; 5060 qdf_nbuf_t msg; 5061 u_int32_t *msg_word; 5062 struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config; 5063 uint8_t *htt_logger_bufp; 5064 uint32_t len; 5065 QDF_STATUS status; 5066 5067 len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t)); 5068 5069 msg = qdf_nbuf_alloc(soc->osdev, 5070 len, 5071 /* reserve room for the HTC header */ 5072 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 5073 4, 5074 TRUE); 5075 if (!msg) 5076 return QDF_STATUS_E_NOMEM; 5077 5078 /* 5079 * Set the length of the message. 5080 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5081 * separately during the below call to qdf_nbuf_push_head. 5082 * The contribution from the HTC header is added separately inside HTC. 5083 */ 5084 if (!qdf_nbuf_put_tail(msg, 5085 sizeof(struct htt_h2t_msg_type_fisa_config_t))) { 5086 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); 5087 qdf_nbuf_free(msg); 5088 return QDF_STATUS_E_FAILURE; 5089 } 5090 5091 /* fill in the message contents */ 5092 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 5093 5094 memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t)); 5095 /* rewind beyond alignment pad to get to the HTC header reserved area */ 5096 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5097 htt_logger_bufp = (uint8_t *)msg_word; 5098 5099 *msg_word = 0; 5100 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG); 5101 5102 htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word; 5103 5104 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id); 5105 5106 msg_word++; 5107 HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1); 5108 HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf); 5109 5110 msg_word++; 5111 htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout; 5112 5113 pkt = htt_htc_pkt_alloc(soc); 5114 if (!pkt) { 5115 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 5116 qdf_assert(0); 5117 qdf_nbuf_free(msg); 5118 return QDF_STATUS_E_RESOURCES; /* failure */ 5119 } 5120 5121 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5122 5123 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 5124 dp_htt_h2t_send_complete_free_netbuf, 5125 qdf_nbuf_data(msg), 5126 qdf_nbuf_len(msg), 5127 soc->htc_endpoint, 5128 /* tag for no FW response msg */ 5129 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5130 5131 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5132 5133 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG, 5134 htt_logger_bufp); 5135 5136 if (status == QDF_STATUS_SUCCESS) { 5137 dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u", 5138 fisa_config->pdev_id); 5139 } else { 5140 qdf_nbuf_free(msg); 5141 htt_htc_pkt_free(soc, pkt); 5142 } 5143 5144 return status; 5145 } 5146 5147 #ifdef WLAN_SUPPORT_PPEDS 5148 /** 5149 * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config 5150 * @soc: Data path SoC handle 5151 * @cfg: RxDMA and RxOLE PPE config 5152 * 5153 * Return: Success when HTT message is sent, error on failure 5154 */ 5155 QDF_STATUS 5156 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc, 5157 struct dp_htt_rxdma_rxole_ppe_config *cfg) 5158 { 5159 struct htt_soc *htt_handle = soc->htt_handle; 5160 uint32_t len; 5161 qdf_nbuf_t msg; 5162 u_int32_t *msg_word; 5163 QDF_STATUS status; 5164 uint8_t *htt_logger_bufp; 5165 struct dp_htt_htc_pkt *pkt; 5166 5167 len = HTT_MSG_BUF_SIZE( 5168 sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t)); 5169 5170 msg = qdf_nbuf_alloc(soc->osdev, 5171 len, 5172 /* reserve room for the HTC header */ 5173 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 5174 4, 5175 TRUE); 5176 if (!msg) 5177 return QDF_STATUS_E_NOMEM; 5178 5179 /* 5180 * Set the length of the message. 5181 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5182 * separately during the below call to qdf_nbuf_push_head. 5183 * The contribution from the HTC header is added separately inside HTC. 5184 */ 5185 if (!qdf_nbuf_put_tail( 5186 msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) { 5187 qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg"); 5188 qdf_nbuf_free(msg); 5189 return QDF_STATUS_E_FAILURE; 5190 } 5191 5192 /* fill in the message contents */ 5193 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 5194 5195 memset(msg_word, 0, 5196 sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t)); 5197 5198 /* Rewind beyond alignment pad to get to the HTC header reserved area */ 5199 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5200 htt_logger_bufp = (uint8_t *)msg_word; 5201 5202 *msg_word = 0; 5203 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG); 5204 HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override); 5205 HTT_PPE_CFG_REO_DEST_IND_SET( 5206 *msg_word, cfg->reo_destination_indication); 5207 HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET( 5208 *msg_word, cfg->multi_buffer_msdu_override_en); 5209 HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET( 5210 *msg_word, cfg->intra_bss_override); 5211 HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET( 5212 *msg_word, cfg->decap_raw_override); 5213 HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET( 5214 *msg_word, cfg->decap_nwifi_override); 5215 HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET( 5216 *msg_word, cfg->ip_frag_override); 5217 5218 pkt = htt_htc_pkt_alloc(htt_handle); 5219 if (!pkt) { 5220 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 5221 qdf_assert(0); 5222 qdf_nbuf_free(msg); 5223 return QDF_STATUS_E_RESOURCES; /* failure */ 5224 } 5225 5226 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5227 5228 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 5229 dp_htt_h2t_send_complete_free_netbuf, 5230 qdf_nbuf_data(msg), 5231 qdf_nbuf_len(msg), 5232 htt_handle->htc_endpoint, 5233 /* tag for no FW response msg */ 5234 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5235 5236 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5237 5238 status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt, 5239 HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG, 5240 htt_logger_bufp); 5241 5242 if (status != QDF_STATUS_SUCCESS) { 5243 qdf_nbuf_free(msg); 5244 htt_htc_pkt_free(htt_handle, pkt); 5245 return status; 5246 } 5247 5248 dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent"); 5249 return status; 5250 } 5251 #endif /* WLAN_SUPPORT_PPEDS */ 5252 5253 /** 5254 * dp_bk_pressure_stats_handler(): worker function to print back pressure 5255 * stats 5256 * 5257 * @context : argument to work function 5258 */ 5259 static void dp_bk_pressure_stats_handler(void *context) 5260 { 5261 struct dp_pdev *pdev = (struct dp_pdev *)context; 5262 struct dp_soc_srngs_state *soc_srngs_state = NULL; 5263 const char *ring_name; 5264 int i; 5265 struct dp_srng_ring_state *ring_state; 5266 bool empty_flag; 5267 5268 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 5269 5270 /* Extract only first entry for printing in one work event */ 5271 if (pdev->bkp_stats.queue_depth && 5272 !TAILQ_EMPTY(&pdev->bkp_stats.list)) { 5273 soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list); 5274 TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state, 5275 list_elem); 5276 pdev->bkp_stats.queue_depth--; 5277 } 5278 5279 empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list); 5280 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 5281 5282 if (soc_srngs_state) { 5283 DP_PRINT_STATS("### BKP stats for seq_num %u START ###", 5284 soc_srngs_state->seq_num); 5285 for (i = 0; i < soc_srngs_state->max_ring_id; i++) { 5286 ring_state = &soc_srngs_state->ring_state[i]; 5287 ring_name = dp_srng_get_str_from_hal_ring_type 5288 (ring_state->ring_type); 5289 DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n", 5290 ring_name, 5291 ring_state->sw_head, 5292 ring_state->sw_tail); 5293 5294 DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n", 5295 ring_name, 5296 ring_state->hw_head, 5297 ring_state->hw_tail); 5298 } 5299 5300 DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###", 5301 soc_srngs_state->seq_num); 5302 qdf_mem_free(soc_srngs_state); 5303 } 5304 dp_print_napi_stats(pdev->soc); 5305 5306 /* Schedule work again if queue is not empty */ 5307 if (!empty_flag) 5308 qdf_queue_work(0, pdev->bkp_stats.work_queue, 5309 &pdev->bkp_stats.work); 5310 } 5311 5312 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev) 5313 { 5314 struct dp_soc_srngs_state *ring_state, *ring_state_next; 5315 5316 if (!pdev->bkp_stats.work_queue) 5317 return; 5318 5319 qdf_flush_workqueue(0, pdev->bkp_stats.work_queue); 5320 qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue); 5321 qdf_flush_work(&pdev->bkp_stats.work); 5322 qdf_disable_work(&pdev->bkp_stats.work); 5323 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 5324 TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list, 5325 list_elem, ring_state_next) { 5326 TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state, 5327 list_elem); 5328 qdf_mem_free(ring_state); 5329 } 5330 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 5331 qdf_spinlock_destroy(&pdev->bkp_stats.list_lock); 5332 } 5333 5334 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev) 5335 { 5336 TAILQ_INIT(&pdev->bkp_stats.list); 5337 pdev->bkp_stats.seq_num = 0; 5338 pdev->bkp_stats.queue_depth = 0; 5339 5340 qdf_create_work(0, &pdev->bkp_stats.work, 5341 dp_bk_pressure_stats_handler, pdev); 5342 5343 pdev->bkp_stats.work_queue = 5344 qdf_alloc_unbound_workqueue("dp_bkp_work_queue"); 5345 if (!pdev->bkp_stats.work_queue) 5346 goto fail; 5347 5348 qdf_spinlock_create(&pdev->bkp_stats.list_lock); 5349 return QDF_STATUS_SUCCESS; 5350 5351 fail: 5352 dp_htt_alert("BKP stats attach failed"); 5353 qdf_flush_work(&pdev->bkp_stats.work); 5354 qdf_disable_work(&pdev->bkp_stats.work); 5355 return QDF_STATUS_E_FAILURE; 5356 } 5357 5358 #ifdef DP_UMAC_HW_RESET_SUPPORT 5359 QDF_STATUS dp_htt_umac_reset_send_setup_cmd( 5360 struct dp_soc *soc, 5361 const struct dp_htt_umac_reset_setup_cmd_params *setup_params) 5362 { 5363 struct htt_soc *htt_handle = soc->htt_handle; 5364 uint32_t len; 5365 qdf_nbuf_t msg; 5366 u_int32_t *msg_word; 5367 QDF_STATUS status; 5368 uint8_t *htt_logger_bufp; 5369 struct dp_htt_htc_pkt *pkt; 5370 5371 len = HTT_MSG_BUF_SIZE( 5372 HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES); 5373 5374 msg = qdf_nbuf_alloc(soc->osdev, 5375 len, 5376 /* reserve room for the HTC header */ 5377 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 5378 4, 5379 TRUE); 5380 if (!msg) 5381 return QDF_STATUS_E_NOMEM; 5382 5383 /* 5384 * Set the length of the message. 5385 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5386 * separately during the below call to qdf_nbuf_push_head. 5387 * The contribution from the HTC header is added separately inside HTC. 5388 */ 5389 if (!qdf_nbuf_put_tail( 5390 msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) { 5391 dp_htt_err("Failed to expand head"); 5392 qdf_nbuf_free(msg); 5393 return QDF_STATUS_E_FAILURE; 5394 } 5395 5396 /* fill in the message contents */ 5397 msg_word = (uint32_t *)qdf_nbuf_data(msg); 5398 5399 /* Rewind beyond alignment pad to get to the HTC header reserved area */ 5400 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5401 htt_logger_bufp = (uint8_t *)msg_word; 5402 5403 qdf_mem_zero(msg_word, 5404 HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES); 5405 5406 HTT_H2T_MSG_TYPE_SET( 5407 *msg_word, 5408 HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP); 5409 HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET( 5410 *msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling); 5411 HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET( 5412 *msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling); 5413 5414 msg_word++; 5415 *msg_word = setup_params->msi_data; 5416 5417 msg_word++; 5418 *msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t); 5419 5420 msg_word++; 5421 *msg_word = setup_params->shmem_addr_low; 5422 5423 msg_word++; 5424 *msg_word = setup_params->shmem_addr_high; 5425 5426 pkt = htt_htc_pkt_alloc(htt_handle); 5427 if (!pkt) { 5428 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 5429 qdf_assert(0); 5430 qdf_nbuf_free(msg); 5431 return QDF_STATUS_E_NOMEM; 5432 } 5433 5434 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5435 5436 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 5437 dp_htt_h2t_send_complete_free_netbuf, 5438 qdf_nbuf_data(msg), 5439 qdf_nbuf_len(msg), 5440 htt_handle->htc_endpoint, 5441 /* tag for no FW response msg */ 5442 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5443 5444 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5445 5446 status = DP_HTT_SEND_HTC_PKT( 5447 htt_handle, pkt, 5448 HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP, 5449 htt_logger_bufp); 5450 5451 if (QDF_IS_STATUS_ERROR(status)) { 5452 qdf_nbuf_free(msg); 5453 htt_htc_pkt_free(htt_handle, pkt); 5454 return status; 5455 } 5456 5457 dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent"); 5458 return status; 5459 } 5460 5461 QDF_STATUS dp_htt_umac_reset_send_start_pre_reset_cmd( 5462 struct dp_soc *soc, bool is_initiator, bool is_umac_hang) 5463 { 5464 struct htt_soc *htt_handle = soc->htt_handle; 5465 uint32_t len; 5466 qdf_nbuf_t msg; 5467 u_int32_t *msg_word; 5468 QDF_STATUS status; 5469 uint8_t *htt_logger_bufp; 5470 struct dp_htt_htc_pkt *pkt; 5471 5472 len = HTT_MSG_BUF_SIZE( 5473 HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_BYTES); 5474 5475 msg = qdf_nbuf_alloc(soc->osdev, 5476 len, 5477 /* reserve room for the HTC header */ 5478 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 5479 4, 5480 TRUE); 5481 if (!msg) 5482 return QDF_STATUS_E_NOMEM; 5483 5484 /* 5485 * Set the length of the message. 5486 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 5487 * separately during the below call to qdf_nbuf_push_head. 5488 * The contribution from the HTC header is added separately inside HTC. 5489 */ 5490 if (!qdf_nbuf_put_tail( 5491 msg, HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_BYTES)) { 5492 dp_htt_err("Failed to expand head"); 5493 qdf_nbuf_free(msg); 5494 return QDF_STATUS_E_FAILURE; 5495 } 5496 5497 /* fill in the message contents */ 5498 msg_word = (uint32_t *)qdf_nbuf_data(msg); 5499 5500 /* Rewind beyond alignment pad to get to the HTC header reserved area */ 5501 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 5502 htt_logger_bufp = (uint8_t *)msg_word; 5503 5504 qdf_mem_zero(msg_word, 5505 HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_BYTES); 5506 5507 HTT_H2T_MSG_TYPE_SET( 5508 *msg_word, 5509 HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET); 5510 5511 HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_IS_INITIATOR_SET( 5512 *msg_word, is_initiator); 5513 5514 HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_IS_UMAC_HANG_SET( 5515 *msg_word, is_umac_hang); 5516 5517 pkt = htt_htc_pkt_alloc(htt_handle); 5518 if (!pkt) { 5519 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 5520 qdf_assert(0); 5521 qdf_nbuf_free(msg); 5522 return QDF_STATUS_E_NOMEM; 5523 } 5524 5525 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 5526 5527 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 5528 dp_htt_h2t_send_complete_free_netbuf, 5529 qdf_nbuf_data(msg), 5530 qdf_nbuf_len(msg), 5531 htt_handle->htc_endpoint, 5532 /* tag for no FW response msg */ 5533 HTC_TX_PACKET_TAG_RUNTIME_PUT); 5534 5535 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 5536 5537 status = DP_HTT_SEND_HTC_PKT( 5538 htt_handle, pkt, 5539 HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET, 5540 htt_logger_bufp); 5541 5542 if (QDF_IS_STATUS_ERROR(status)) { 5543 qdf_nbuf_free(msg); 5544 htt_htc_pkt_free(htt_handle, pkt); 5545 return status; 5546 } 5547 5548 dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET sent"); 5549 return status; 5550 } 5551 #endif 5552