1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <htt.h> 20 #include <hal_hw_headers.h> 21 #include <hal_api.h> 22 #include "dp_peer.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_rx.h" 26 #include "htt_stats.h" 27 #include "htt_ppdu_stats.h" 28 #include "dp_htt.h" 29 #ifdef WIFI_MONITOR_SUPPORT 30 #include <dp_mon.h> 31 #endif 32 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 33 #include "cdp_txrx_cmn_struct.h" 34 35 #ifdef FEATURE_PERPKT_INFO 36 #include "dp_ratetable.h" 37 #endif 38 #include <qdf_module.h> 39 40 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE 41 42 #define HTT_HTC_PKT_POOL_INIT_SIZE 64 43 44 #define HTT_MSG_BUF_SIZE(msg_bytes) \ 45 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) 46 47 #define HTT_PID_BIT_MASK 0x3 48 49 #define DP_EXT_MSG_LENGTH 2048 50 #define HTT_HEADER_LEN 16 51 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16 52 53 #define HTT_SHIFT_UPPER_TIMESTAMP 32 54 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000 55 56 #define HTT_HTC_PKT_STATUS_SUCCESS \ 57 ((pkt->htc_pkt.Status != QDF_STATUS_E_CANCELED) && \ 58 (pkt->htc_pkt.Status != QDF_STATUS_E_RESOURCES)) 59 60 /* 61 * htt_htc_pkt_alloc() - Allocate HTC packet buffer 62 * @htt_soc: HTT SOC handle 63 * 64 * Return: Pointer to htc packet buffer 65 */ 66 static struct dp_htt_htc_pkt * 67 htt_htc_pkt_alloc(struct htt_soc *soc) 68 { 69 struct dp_htt_htc_pkt_union *pkt = NULL; 70 71 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 72 if (soc->htt_htc_pkt_freelist) { 73 pkt = soc->htt_htc_pkt_freelist; 74 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; 75 } 76 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 77 78 if (!pkt) 79 pkt = qdf_mem_malloc(sizeof(*pkt)); 80 81 if (!pkt) 82 return NULL; 83 84 htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0); 85 86 return &pkt->u.pkt; /* not actually a dereference */ 87 } 88 89 /* 90 * htt_htc_pkt_free() - Free HTC packet buffer 91 * @htt_soc: HTT SOC handle 92 */ 93 static void 94 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 95 { 96 struct dp_htt_htc_pkt_union *u_pkt = 97 (struct dp_htt_htc_pkt_union *)pkt; 98 99 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 100 htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0); 101 u_pkt->u.next = soc->htt_htc_pkt_freelist; 102 soc->htt_htc_pkt_freelist = u_pkt; 103 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 104 } 105 106 /* 107 * htt_htc_pkt_pool_free() - Free HTC packet pool 108 * @htt_soc: HTT SOC handle 109 */ 110 void 111 htt_htc_pkt_pool_free(struct htt_soc *soc) 112 { 113 struct dp_htt_htc_pkt_union *pkt, *next; 114 pkt = soc->htt_htc_pkt_freelist; 115 while (pkt) { 116 next = pkt->u.next; 117 qdf_mem_free(pkt); 118 pkt = next; 119 } 120 soc->htt_htc_pkt_freelist = NULL; 121 } 122 123 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 124 125 static void 126 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 127 { 128 } 129 130 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 131 132 /* 133 * htt_htc_misc_pkt_list_trim() - trim misc list 134 * @htt_soc: HTT SOC handle 135 * @level: max no. of pkts in list 136 */ 137 static void 138 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level) 139 { 140 struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL; 141 int i = 0; 142 qdf_nbuf_t netbuf; 143 144 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 145 pkt = soc->htt_htc_pkt_misclist; 146 while (pkt) { 147 next = pkt->u.next; 148 /* trim the out grown list*/ 149 if (++i > level) { 150 netbuf = 151 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); 152 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 153 qdf_nbuf_free(netbuf); 154 qdf_mem_free(pkt); 155 pkt = NULL; 156 if (prev) 157 prev->u.next = NULL; 158 } 159 prev = pkt; 160 pkt = next; 161 } 162 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 163 } 164 165 /* 166 * htt_htc_misc_pkt_list_add() - Add pkt to misc list 167 * @htt_soc: HTT SOC handle 168 * @dp_htt_htc_pkt: pkt to be added to list 169 */ 170 static void 171 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 172 { 173 struct dp_htt_htc_pkt_union *u_pkt = 174 (struct dp_htt_htc_pkt_union *)pkt; 175 int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc, 176 pkt->htc_pkt.Endpoint) 177 + DP_HTT_HTC_PKT_MISCLIST_SIZE; 178 179 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 180 if (soc->htt_htc_pkt_misclist) { 181 u_pkt->u.next = soc->htt_htc_pkt_misclist; 182 soc->htt_htc_pkt_misclist = u_pkt; 183 } else { 184 soc->htt_htc_pkt_misclist = u_pkt; 185 } 186 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 187 188 /* only ce pipe size + tx_queue_depth could possibly be in use 189 * free older packets in the misclist 190 */ 191 htt_htc_misc_pkt_list_trim(soc, misclist_trim_level); 192 } 193 194 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 195 196 /** 197 * DP_HTT_SEND_HTC_PKT() - Send htt packet from host 198 * @soc : HTT SOC handle 199 * @pkt: pkt to be send 200 * @cmd : command to be recorded in dp htt logger 201 * @buf : Pointer to buffer needs to be recored for above cmd 202 * 203 * Return: None 204 */ 205 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc, 206 struct dp_htt_htc_pkt *pkt, 207 uint8_t cmd, uint8_t *buf) 208 { 209 QDF_STATUS status; 210 211 htt_command_record(soc->htt_logger_handle, cmd, buf); 212 213 status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt); 214 if (status == QDF_STATUS_SUCCESS && HTT_HTC_PKT_STATUS_SUCCESS) 215 htt_htc_misc_pkt_list_add(soc, pkt); 216 else 217 soc->stats.fail_count++; 218 return status; 219 } 220 221 /* 222 * htt_htc_misc_pkt_pool_free() - free pkts in misc list 223 * @htt_soc: HTT SOC handle 224 */ 225 static void 226 htt_htc_misc_pkt_pool_free(struct htt_soc *soc) 227 { 228 struct dp_htt_htc_pkt_union *pkt, *next; 229 qdf_nbuf_t netbuf; 230 231 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 232 pkt = soc->htt_htc_pkt_misclist; 233 234 while (pkt) { 235 next = pkt->u.next; 236 if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) != 237 HTC_PACKET_MAGIC_COOKIE) { 238 pkt = next; 239 soc->stats.skip_count++; 240 continue; 241 } 242 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); 243 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); 244 245 soc->stats.htc_pkt_free++; 246 dp_htt_info("%pK: Pkt free count %d", 247 soc->dp_soc, soc->stats.htc_pkt_free); 248 249 qdf_nbuf_free(netbuf); 250 qdf_mem_free(pkt); 251 pkt = next; 252 } 253 soc->htt_htc_pkt_misclist = NULL; 254 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 255 dp_info("HTC Packets, fail count = %d, skip count = %d", 256 soc->stats.fail_count, soc->stats.skip_count); 257 } 258 259 /* 260 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ 261 * @tgt_mac_addr: Target MAC 262 * @buffer: Output buffer 263 */ 264 static u_int8_t * 265 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) 266 { 267 #ifdef BIG_ENDIAN_HOST 268 /* 269 * The host endianness is opposite of the target endianness. 270 * To make u_int32_t elements come out correctly, the target->host 271 * upload has swizzled the bytes in each u_int32_t element of the 272 * message. 273 * For byte-array message fields like the MAC address, this 274 * upload swizzling puts the bytes in the wrong order, and needs 275 * to be undone. 276 */ 277 buffer[0] = tgt_mac_addr[3]; 278 buffer[1] = tgt_mac_addr[2]; 279 buffer[2] = tgt_mac_addr[1]; 280 buffer[3] = tgt_mac_addr[0]; 281 buffer[4] = tgt_mac_addr[7]; 282 buffer[5] = tgt_mac_addr[6]; 283 return buffer; 284 #else 285 /* 286 * The host endianness matches the target endianness - 287 * we can use the mac addr directly from the message buffer. 288 */ 289 return tgt_mac_addr; 290 #endif 291 } 292 293 /* 294 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer 295 * @soc: SOC handle 296 * @status: Completion status 297 * @netbuf: HTT buffer 298 */ 299 static void 300 dp_htt_h2t_send_complete_free_netbuf( 301 void *soc, A_STATUS status, qdf_nbuf_t netbuf) 302 { 303 qdf_nbuf_free(netbuf); 304 } 305 306 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 307 /* 308 * dp_htt_h2t_send_complete() - H2T completion handler 309 * @context: Opaque context (HTT SOC handle) 310 * @htc_pkt: HTC packet 311 */ 312 static void 313 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 314 { 315 struct htt_soc *soc = (struct htt_soc *) context; 316 struct dp_htt_htc_pkt *htt_pkt; 317 qdf_nbuf_t netbuf; 318 319 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 320 321 /* process (free or keep) the netbuf that held the message */ 322 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 323 /* 324 * adf sendcomplete is required for windows only 325 */ 326 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 327 /* free the htt_htc_pkt / HTC_PACKET object */ 328 qdf_nbuf_free(netbuf); 329 htt_htc_pkt_free(soc, htt_pkt); 330 } 331 332 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 333 334 /* 335 * * dp_htt_h2t_send_complete() - H2T completion handler 336 * * @context: Opaque context (HTT SOC handle) 337 * * @htc_pkt: HTC packet 338 * */ 339 static void 340 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 341 { 342 void (*send_complete_part2)( 343 void *soc, QDF_STATUS status, qdf_nbuf_t msdu); 344 struct htt_soc *soc = (struct htt_soc *) context; 345 struct dp_htt_htc_pkt *htt_pkt; 346 qdf_nbuf_t netbuf; 347 348 send_complete_part2 = htc_pkt->pPktContext; 349 350 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 351 352 /* process (free or keep) the netbuf that held the message */ 353 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 354 /* 355 * adf sendcomplete is required for windows only 356 */ 357 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 358 if (send_complete_part2){ 359 send_complete_part2( 360 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); 361 } 362 /* free the htt_htc_pkt / HTC_PACKET object */ 363 htt_htc_pkt_free(soc, htt_pkt); 364 } 365 366 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 367 368 /* 369 * htt_h2t_ver_req_msg() - Send HTT version request message to target 370 * @htt_soc: HTT SOC handle 371 * 372 * Return: 0 on success; error code on failure 373 */ 374 static int htt_h2t_ver_req_msg(struct htt_soc *soc) 375 { 376 struct dp_htt_htc_pkt *pkt; 377 qdf_nbuf_t msg; 378 uint32_t *msg_word; 379 QDF_STATUS status; 380 381 msg = qdf_nbuf_alloc( 382 soc->osdev, 383 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), 384 /* reserve room for the HTC header */ 385 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 386 if (!msg) 387 return QDF_STATUS_E_NOMEM; 388 389 /* 390 * Set the length of the message. 391 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 392 * separately during the below call to qdf_nbuf_push_head. 393 * The contribution from the HTC header is added separately inside HTC. 394 */ 395 if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) { 396 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 397 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg", 398 __func__); 399 return QDF_STATUS_E_FAILURE; 400 } 401 402 /* fill in the message contents */ 403 msg_word = (u_int32_t *) qdf_nbuf_data(msg); 404 405 /* rewind beyond alignment pad to get to the HTC header reserved area */ 406 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 407 408 *msg_word = 0; 409 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); 410 411 pkt = htt_htc_pkt_alloc(soc); 412 if (!pkt) { 413 qdf_nbuf_free(msg); 414 return QDF_STATUS_E_FAILURE; 415 } 416 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 417 418 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 419 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), 420 qdf_nbuf_len(msg), soc->htc_endpoint, 421 HTC_TX_PACKET_TAG_RTPM_PUT_RC); 422 423 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 424 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, 425 NULL); 426 427 if (status != QDF_STATUS_SUCCESS) { 428 qdf_nbuf_free(msg); 429 htt_htc_pkt_free(soc, pkt); 430 } 431 432 return status; 433 } 434 435 /* 436 * htt_srng_setup() - Send SRNG setup message to target 437 * @htt_soc: HTT SOC handle 438 * @mac_id: MAC Id 439 * @hal_srng: Opaque HAL SRNG pointer 440 * @hal_ring_type: SRNG ring type 441 * 442 * Return: 0 on success; error code on failure 443 */ 444 int htt_srng_setup(struct htt_soc *soc, int mac_id, 445 hal_ring_handle_t hal_ring_hdl, 446 int hal_ring_type) 447 { 448 struct dp_htt_htc_pkt *pkt; 449 qdf_nbuf_t htt_msg; 450 uint32_t *msg_word; 451 struct hal_srng_params srng_params; 452 qdf_dma_addr_t hp_addr, tp_addr; 453 uint32_t ring_entry_size = 454 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); 455 int htt_ring_type, htt_ring_id; 456 uint8_t *htt_logger_bufp; 457 int target_pdev_id; 458 int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id); 459 QDF_STATUS status; 460 461 /* Sizes should be set in 4-byte words */ 462 ring_entry_size = ring_entry_size >> 2; 463 464 htt_msg = qdf_nbuf_alloc(soc->osdev, 465 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), 466 /* reserve room for the HTC header */ 467 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 468 if (!htt_msg) 469 goto fail0; 470 471 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); 472 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl); 473 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl); 474 475 switch (hal_ring_type) { 476 case RXDMA_BUF: 477 #ifdef QCA_HOST2FW_RXBUF_RING 478 if (srng_params.ring_id == 479 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) { 480 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 481 htt_ring_type = HTT_SW_TO_SW_RING; 482 #ifdef IPA_OFFLOAD 483 } else if (srng_params.ring_id == 484 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) { 485 htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING; 486 htt_ring_type = HTT_SW_TO_SW_RING; 487 #endif 488 #else 489 if (srng_params.ring_id == 490 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 491 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 492 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 493 htt_ring_type = HTT_SW_TO_HW_RING; 494 #endif 495 } else if (srng_params.ring_id == 496 #ifdef IPA_OFFLOAD 497 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 + 498 #else 499 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 500 #endif 501 (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { 502 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 503 htt_ring_type = HTT_SW_TO_HW_RING; 504 } else { 505 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 506 "%s: Ring %d currently not supported", 507 __func__, srng_params.ring_id); 508 goto fail1; 509 } 510 511 dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx", 512 hal_ring_type, srng_params.ring_id, htt_ring_id, 513 (uint64_t)hp_addr, 514 (uint64_t)tp_addr); 515 break; 516 case RXDMA_MONITOR_BUF: 517 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 518 htt_ring_type = HTT_SW_TO_HW_RING; 519 break; 520 case RXDMA_MONITOR_STATUS: 521 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 522 htt_ring_type = HTT_SW_TO_HW_RING; 523 break; 524 case RXDMA_MONITOR_DST: 525 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 526 htt_ring_type = HTT_HW_TO_SW_RING; 527 break; 528 case RXDMA_MONITOR_DESC: 529 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 530 htt_ring_type = HTT_SW_TO_HW_RING; 531 break; 532 case RXDMA_DST: 533 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 534 htt_ring_type = HTT_HW_TO_SW_RING; 535 break; 536 537 default: 538 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 539 "%s: Ring currently not supported", __func__); 540 goto fail1; 541 } 542 543 /* 544 * Set the length of the message. 545 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 546 * separately during the below call to qdf_nbuf_push_head. 547 * The contribution from the HTC header is added separately inside HTC. 548 */ 549 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { 550 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 551 "%s: Failed to expand head for SRING_SETUP msg", 552 __func__); 553 return QDF_STATUS_E_FAILURE; 554 } 555 556 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 557 558 /* rewind beyond alignment pad to get to the HTC header reserved area */ 559 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 560 561 /* word 0 */ 562 *msg_word = 0; 563 htt_logger_bufp = (uint8_t *)msg_word; 564 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); 565 target_pdev_id = 566 dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id); 567 568 if ((htt_ring_type == HTT_SW_TO_HW_RING) || 569 (htt_ring_type == HTT_HW_TO_SW_RING)) 570 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id); 571 else 572 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); 573 574 dp_info("mac_id %d", mac_id); 575 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); 576 /* TODO: Discuss with FW on changing this to unique ID and using 577 * htt_ring_type to send the type of ring 578 */ 579 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); 580 581 /* word 1 */ 582 msg_word++; 583 *msg_word = 0; 584 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, 585 srng_params.ring_base_paddr & 0xffffffff); 586 587 /* word 2 */ 588 msg_word++; 589 *msg_word = 0; 590 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, 591 (uint64_t)srng_params.ring_base_paddr >> 32); 592 593 /* word 3 */ 594 msg_word++; 595 *msg_word = 0; 596 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); 597 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, 598 (ring_entry_size * srng_params.num_entries)); 599 dp_info("entry_size %d", ring_entry_size); 600 dp_info("num_entries %d", srng_params.num_entries); 601 dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries)); 602 if (htt_ring_type == HTT_SW_TO_HW_RING) 603 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( 604 *msg_word, 1); 605 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, 606 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 607 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, 608 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 609 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, 610 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); 611 612 /* word 4 */ 613 msg_word++; 614 *msg_word = 0; 615 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 616 hp_addr & 0xffffffff); 617 618 /* word 5 */ 619 msg_word++; 620 *msg_word = 0; 621 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 622 (uint64_t)hp_addr >> 32); 623 624 /* word 6 */ 625 msg_word++; 626 *msg_word = 0; 627 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 628 tp_addr & 0xffffffff); 629 630 /* word 7 */ 631 msg_word++; 632 *msg_word = 0; 633 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 634 (uint64_t)tp_addr >> 32); 635 636 /* word 8 */ 637 msg_word++; 638 *msg_word = 0; 639 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, 640 srng_params.msi_addr & 0xffffffff); 641 642 /* word 9 */ 643 msg_word++; 644 *msg_word = 0; 645 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, 646 (uint64_t)(srng_params.msi_addr) >> 32); 647 648 /* word 10 */ 649 msg_word++; 650 *msg_word = 0; 651 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, 652 qdf_cpu_to_le32(srng_params.msi_data)); 653 654 /* word 11 */ 655 msg_word++; 656 *msg_word = 0; 657 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, 658 srng_params.intr_batch_cntr_thres_entries * 659 ring_entry_size); 660 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, 661 srng_params.intr_timer_thres_us >> 3); 662 663 /* word 12 */ 664 msg_word++; 665 *msg_word = 0; 666 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 667 /* TODO: Setting low threshold to 1/8th of ring size - see 668 * if this needs to be configurable 669 */ 670 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, 671 srng_params.low_threshold); 672 } 673 /* "response_required" field should be set if a HTT response message is 674 * required after setting up the ring. 675 */ 676 pkt = htt_htc_pkt_alloc(soc); 677 if (!pkt) 678 goto fail1; 679 680 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 681 682 SET_HTC_PACKET_INFO_TX( 683 &pkt->htc_pkt, 684 dp_htt_h2t_send_complete_free_netbuf, 685 qdf_nbuf_data(htt_msg), 686 qdf_nbuf_len(htt_msg), 687 soc->htc_endpoint, 688 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 689 690 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 691 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP, 692 htt_logger_bufp); 693 694 if (status != QDF_STATUS_SUCCESS) { 695 qdf_nbuf_free(htt_msg); 696 htt_htc_pkt_free(soc, pkt); 697 } 698 699 return status; 700 701 fail1: 702 qdf_nbuf_free(htt_msg); 703 fail0: 704 return QDF_STATUS_E_FAILURE; 705 } 706 707 qdf_export_symbol(htt_srng_setup); 708 709 #ifdef QCA_SUPPORT_FULL_MON 710 /** 711 * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW 712 * 713 * @htt_soc: HTT Soc handle 714 * @pdev_id: Radio id 715 * @dp_full_mon_config: enabled/disable configuration 716 * 717 * Return: Success when HTT message is sent, error on failure 718 */ 719 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc, 720 uint8_t pdev_id, 721 enum dp_full_mon_config config) 722 { 723 struct htt_soc *soc = (struct htt_soc *)htt_soc; 724 struct dp_htt_htc_pkt *pkt; 725 qdf_nbuf_t htt_msg; 726 uint32_t *msg_word; 727 uint8_t *htt_logger_bufp; 728 729 htt_msg = qdf_nbuf_alloc(soc->osdev, 730 HTT_MSG_BUF_SIZE( 731 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ), 732 /* reserve room for the HTC header */ 733 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 734 4, 735 TRUE); 736 if (!htt_msg) 737 return QDF_STATUS_E_FAILURE; 738 739 /* 740 * Set the length of the message. 741 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 742 * separately during the below call to qdf_nbuf_push_head. 743 * The contribution from the HTC header is added separately inside HTC. 744 */ 745 if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) { 746 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 747 "%s: Failed to expand head for RX Ring Cfg msg", 748 __func__); 749 goto fail1; 750 } 751 752 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 753 754 /* rewind beyond alignment pad to get to the HTC header reserved area */ 755 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 756 757 /* word 0 */ 758 *msg_word = 0; 759 htt_logger_bufp = (uint8_t *)msg_word; 760 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); 761 HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET( 762 *msg_word, DP_SW2HW_MACID(pdev_id)); 763 764 msg_word++; 765 *msg_word = 0; 766 /* word 1 */ 767 if (config == DP_FULL_MON_ENABLE) { 768 HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true); 769 HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true); 770 HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true); 771 HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2); 772 } else if (config == DP_FULL_MON_DISABLE) { 773 /* As per MAC team's suggestion, While disbaling full monitor 774 * mode, Set 'en' bit to true in full monitor mode register. 775 */ 776 HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true); 777 HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false); 778 HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false); 779 HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2); 780 } 781 782 pkt = htt_htc_pkt_alloc(soc); 783 if (!pkt) { 784 qdf_err("HTC packet allocation failed"); 785 goto fail1; 786 } 787 788 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 789 790 SET_HTC_PACKET_INFO_TX( 791 &pkt->htc_pkt, 792 dp_htt_h2t_send_complete_free_netbuf, 793 qdf_nbuf_data(htt_msg), 794 qdf_nbuf_len(htt_msg), 795 soc->htc_endpoint, 796 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 797 798 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 799 qdf_debug("config: %d", config); 800 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE, 801 htt_logger_bufp); 802 return QDF_STATUS_SUCCESS; 803 fail1: 804 qdf_nbuf_free(htt_msg); 805 return QDF_STATUS_E_FAILURE; 806 } 807 808 qdf_export_symbol(htt_h2t_full_mon_cfg); 809 #else 810 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc, 811 uint8_t pdev_id, 812 enum dp_full_mon_config config) 813 { 814 return 0; 815 } 816 817 qdf_export_symbol(htt_h2t_full_mon_cfg); 818 #endif 819 820 /* 821 * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter 822 * config message to target 823 * @htt_soc: HTT SOC handle 824 * @pdev_id: WIN- PDEV Id, MCL- mac id 825 * @hal_srng: Opaque HAL SRNG pointer 826 * @hal_ring_type: SRNG ring type 827 * @ring_buf_size: SRNG buffer size 828 * @htt_tlv_filter: Rx SRNG TLV and filter setting 829 * Return: 0 on success; error code on failure 830 */ 831 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id, 832 hal_ring_handle_t hal_ring_hdl, 833 int hal_ring_type, int ring_buf_size, 834 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 835 { 836 struct htt_soc *soc = (struct htt_soc *)htt_soc; 837 struct dp_htt_htc_pkt *pkt; 838 qdf_nbuf_t htt_msg; 839 uint32_t *msg_word; 840 struct hal_srng_params srng_params; 841 uint32_t htt_ring_type, htt_ring_id; 842 uint32_t tlv_filter; 843 uint8_t *htt_logger_bufp; 844 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx; 845 uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx); 846 int target_pdev_id; 847 QDF_STATUS status; 848 849 htt_msg = qdf_nbuf_alloc(soc->osdev, 850 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), 851 /* reserve room for the HTC header */ 852 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 853 if (!htt_msg) 854 goto fail0; 855 856 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); 857 858 switch (hal_ring_type) { 859 case RXDMA_BUF: 860 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 861 htt_ring_type = HTT_SW_TO_HW_RING; 862 break; 863 case RXDMA_MONITOR_BUF: 864 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 865 htt_ring_type = HTT_SW_TO_HW_RING; 866 break; 867 case RXDMA_MONITOR_STATUS: 868 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 869 htt_ring_type = HTT_SW_TO_HW_RING; 870 break; 871 case RXDMA_MONITOR_DST: 872 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 873 htt_ring_type = HTT_HW_TO_SW_RING; 874 break; 875 case RXDMA_MONITOR_DESC: 876 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 877 htt_ring_type = HTT_SW_TO_HW_RING; 878 break; 879 case RXDMA_DST: 880 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 881 htt_ring_type = HTT_HW_TO_SW_RING; 882 break; 883 884 default: 885 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 886 "%s: Ring currently not supported", __func__); 887 goto fail1; 888 } 889 890 /* 891 * Set the length of the message. 892 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 893 * separately during the below call to qdf_nbuf_push_head. 894 * The contribution from the HTC header is added separately inside HTC. 895 */ 896 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { 897 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 898 "%s: Failed to expand head for RX Ring Cfg msg", 899 __func__); 900 goto fail1; /* failure */ 901 } 902 903 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 904 905 /* rewind beyond alignment pad to get to the HTC header reserved area */ 906 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 907 908 /* word 0 */ 909 htt_logger_bufp = (uint8_t *)msg_word; 910 *msg_word = 0; 911 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); 912 913 /* 914 * pdev_id is indexed from 0 whereas mac_id is indexed from 1 915 * SW_TO_SW and SW_TO_HW rings are unaffected by this 916 */ 917 target_pdev_id = 918 dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id); 919 920 if (htt_ring_type == HTT_SW_TO_SW_RING || 921 htt_ring_type == HTT_SW_TO_HW_RING) 922 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, 923 target_pdev_id); 924 925 /* TODO: Discuss with FW on changing this to unique ID and using 926 * htt_ring_type to send the type of ring 927 */ 928 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); 929 930 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, 931 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 932 933 HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word, 934 htt_tlv_filter->offset_valid); 935 936 if (mon_drop_th > 0) 937 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, 938 1); 939 else 940 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, 941 0); 942 943 /* word 1 */ 944 msg_word++; 945 *msg_word = 0; 946 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, 947 ring_buf_size); 948 949 /* word 2 */ 950 msg_word++; 951 *msg_word = 0; 952 953 if (htt_tlv_filter->enable_fp) { 954 /* TYPE: MGMT */ 955 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 956 FP, MGMT, 0000, 957 (htt_tlv_filter->fp_mgmt_filter & 958 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 959 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 960 FP, MGMT, 0001, 961 (htt_tlv_filter->fp_mgmt_filter & 962 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 963 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 964 FP, MGMT, 0010, 965 (htt_tlv_filter->fp_mgmt_filter & 966 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 967 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 968 FP, MGMT, 0011, 969 (htt_tlv_filter->fp_mgmt_filter & 970 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 971 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 972 FP, MGMT, 0100, 973 (htt_tlv_filter->fp_mgmt_filter & 974 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 975 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 976 FP, MGMT, 0101, 977 (htt_tlv_filter->fp_mgmt_filter & 978 FILTER_MGMT_PROBE_RES) ? 1 : 0); 979 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 980 FP, MGMT, 0110, 981 (htt_tlv_filter->fp_mgmt_filter & 982 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 983 /* reserved */ 984 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 985 MGMT, 0111, 986 (htt_tlv_filter->fp_mgmt_filter & 987 FILTER_MGMT_RESERVED_7) ? 1 : 0); 988 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 989 FP, MGMT, 1000, 990 (htt_tlv_filter->fp_mgmt_filter & 991 FILTER_MGMT_BEACON) ? 1 : 0); 992 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 993 FP, MGMT, 1001, 994 (htt_tlv_filter->fp_mgmt_filter & 995 FILTER_MGMT_ATIM) ? 1 : 0); 996 } 997 998 if (htt_tlv_filter->enable_md) { 999 /* TYPE: MGMT */ 1000 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1001 MD, MGMT, 0000, 1002 (htt_tlv_filter->md_mgmt_filter & 1003 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1004 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1005 MD, MGMT, 0001, 1006 (htt_tlv_filter->md_mgmt_filter & 1007 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1008 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1009 MD, MGMT, 0010, 1010 (htt_tlv_filter->md_mgmt_filter & 1011 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1012 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1013 MD, MGMT, 0011, 1014 (htt_tlv_filter->md_mgmt_filter & 1015 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1016 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1017 MD, MGMT, 0100, 1018 (htt_tlv_filter->md_mgmt_filter & 1019 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1020 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1021 MD, MGMT, 0101, 1022 (htt_tlv_filter->md_mgmt_filter & 1023 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1024 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1025 MD, MGMT, 0110, 1026 (htt_tlv_filter->md_mgmt_filter & 1027 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1028 /* reserved */ 1029 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 1030 MGMT, 0111, 1031 (htt_tlv_filter->md_mgmt_filter & 1032 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1033 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1034 MD, MGMT, 1000, 1035 (htt_tlv_filter->md_mgmt_filter & 1036 FILTER_MGMT_BEACON) ? 1 : 0); 1037 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1038 MD, MGMT, 1001, 1039 (htt_tlv_filter->md_mgmt_filter & 1040 FILTER_MGMT_ATIM) ? 1 : 0); 1041 } 1042 1043 if (htt_tlv_filter->enable_mo) { 1044 /* TYPE: MGMT */ 1045 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1046 MO, MGMT, 0000, 1047 (htt_tlv_filter->mo_mgmt_filter & 1048 FILTER_MGMT_ASSOC_REQ) ? 1 : 0); 1049 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1050 MO, MGMT, 0001, 1051 (htt_tlv_filter->mo_mgmt_filter & 1052 FILTER_MGMT_ASSOC_RES) ? 1 : 0); 1053 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1054 MO, MGMT, 0010, 1055 (htt_tlv_filter->mo_mgmt_filter & 1056 FILTER_MGMT_REASSOC_REQ) ? 1 : 0); 1057 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1058 MO, MGMT, 0011, 1059 (htt_tlv_filter->mo_mgmt_filter & 1060 FILTER_MGMT_REASSOC_RES) ? 1 : 0); 1061 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1062 MO, MGMT, 0100, 1063 (htt_tlv_filter->mo_mgmt_filter & 1064 FILTER_MGMT_PROBE_REQ) ? 1 : 0); 1065 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1066 MO, MGMT, 0101, 1067 (htt_tlv_filter->mo_mgmt_filter & 1068 FILTER_MGMT_PROBE_RES) ? 1 : 0); 1069 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1070 MO, MGMT, 0110, 1071 (htt_tlv_filter->mo_mgmt_filter & 1072 FILTER_MGMT_TIM_ADVT) ? 1 : 0); 1073 /* reserved */ 1074 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 1075 MGMT, 0111, 1076 (htt_tlv_filter->mo_mgmt_filter & 1077 FILTER_MGMT_RESERVED_7) ? 1 : 0); 1078 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1079 MO, MGMT, 1000, 1080 (htt_tlv_filter->mo_mgmt_filter & 1081 FILTER_MGMT_BEACON) ? 1 : 0); 1082 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, 1083 MO, MGMT, 1001, 1084 (htt_tlv_filter->mo_mgmt_filter & 1085 FILTER_MGMT_ATIM) ? 1 : 0); 1086 } 1087 1088 /* word 3 */ 1089 msg_word++; 1090 *msg_word = 0; 1091 1092 if (htt_tlv_filter->enable_fp) { 1093 /* TYPE: MGMT */ 1094 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1095 FP, MGMT, 1010, 1096 (htt_tlv_filter->fp_mgmt_filter & 1097 FILTER_MGMT_DISASSOC) ? 1 : 0); 1098 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1099 FP, MGMT, 1011, 1100 (htt_tlv_filter->fp_mgmt_filter & 1101 FILTER_MGMT_AUTH) ? 1 : 0); 1102 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1103 FP, MGMT, 1100, 1104 (htt_tlv_filter->fp_mgmt_filter & 1105 FILTER_MGMT_DEAUTH) ? 1 : 0); 1106 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1107 FP, MGMT, 1101, 1108 (htt_tlv_filter->fp_mgmt_filter & 1109 FILTER_MGMT_ACTION) ? 1 : 0); 1110 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1111 FP, MGMT, 1110, 1112 (htt_tlv_filter->fp_mgmt_filter & 1113 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1114 /* reserved*/ 1115 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 1116 MGMT, 1111, 1117 (htt_tlv_filter->fp_mgmt_filter & 1118 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1119 } 1120 1121 if (htt_tlv_filter->enable_md) { 1122 /* TYPE: MGMT */ 1123 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1124 MD, MGMT, 1010, 1125 (htt_tlv_filter->md_mgmt_filter & 1126 FILTER_MGMT_DISASSOC) ? 1 : 0); 1127 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1128 MD, MGMT, 1011, 1129 (htt_tlv_filter->md_mgmt_filter & 1130 FILTER_MGMT_AUTH) ? 1 : 0); 1131 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1132 MD, MGMT, 1100, 1133 (htt_tlv_filter->md_mgmt_filter & 1134 FILTER_MGMT_DEAUTH) ? 1 : 0); 1135 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1136 MD, MGMT, 1101, 1137 (htt_tlv_filter->md_mgmt_filter & 1138 FILTER_MGMT_ACTION) ? 1 : 0); 1139 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1140 MD, MGMT, 1110, 1141 (htt_tlv_filter->md_mgmt_filter & 1142 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1143 } 1144 1145 if (htt_tlv_filter->enable_mo) { 1146 /* TYPE: MGMT */ 1147 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1148 MO, MGMT, 1010, 1149 (htt_tlv_filter->mo_mgmt_filter & 1150 FILTER_MGMT_DISASSOC) ? 1 : 0); 1151 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1152 MO, MGMT, 1011, 1153 (htt_tlv_filter->mo_mgmt_filter & 1154 FILTER_MGMT_AUTH) ? 1 : 0); 1155 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1156 MO, MGMT, 1100, 1157 (htt_tlv_filter->mo_mgmt_filter & 1158 FILTER_MGMT_DEAUTH) ? 1 : 0); 1159 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1160 MO, MGMT, 1101, 1161 (htt_tlv_filter->mo_mgmt_filter & 1162 FILTER_MGMT_ACTION) ? 1 : 0); 1163 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, 1164 MO, MGMT, 1110, 1165 (htt_tlv_filter->mo_mgmt_filter & 1166 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); 1167 /* reserved*/ 1168 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 1169 MGMT, 1111, 1170 (htt_tlv_filter->mo_mgmt_filter & 1171 FILTER_MGMT_RESERVED_15) ? 1 : 0); 1172 } 1173 1174 /* word 4 */ 1175 msg_word++; 1176 *msg_word = 0; 1177 1178 if (htt_tlv_filter->enable_fp) { 1179 /* TYPE: CTRL */ 1180 /* reserved */ 1181 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1182 CTRL, 0000, 1183 (htt_tlv_filter->fp_ctrl_filter & 1184 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1185 /* reserved */ 1186 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1187 CTRL, 0001, 1188 (htt_tlv_filter->fp_ctrl_filter & 1189 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1190 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1191 CTRL, 0010, 1192 (htt_tlv_filter->fp_ctrl_filter & 1193 FILTER_CTRL_TRIGGER) ? 1 : 0); 1194 /* reserved */ 1195 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1196 CTRL, 0011, 1197 (htt_tlv_filter->fp_ctrl_filter & 1198 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1199 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1200 CTRL, 0100, 1201 (htt_tlv_filter->fp_ctrl_filter & 1202 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1203 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1204 CTRL, 0101, 1205 (htt_tlv_filter->fp_ctrl_filter & 1206 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1207 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1208 CTRL, 0110, 1209 (htt_tlv_filter->fp_ctrl_filter & 1210 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1211 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1212 CTRL, 0111, 1213 (htt_tlv_filter->fp_ctrl_filter & 1214 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1215 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1216 CTRL, 1000, 1217 (htt_tlv_filter->fp_ctrl_filter & 1218 FILTER_CTRL_BA_REQ) ? 1 : 0); 1219 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 1220 CTRL, 1001, 1221 (htt_tlv_filter->fp_ctrl_filter & 1222 FILTER_CTRL_BA) ? 1 : 0); 1223 } 1224 1225 if (htt_tlv_filter->enable_md) { 1226 /* TYPE: CTRL */ 1227 /* reserved */ 1228 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1229 CTRL, 0000, 1230 (htt_tlv_filter->md_ctrl_filter & 1231 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1232 /* reserved */ 1233 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1234 CTRL, 0001, 1235 (htt_tlv_filter->md_ctrl_filter & 1236 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1237 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1238 CTRL, 0010, 1239 (htt_tlv_filter->md_ctrl_filter & 1240 FILTER_CTRL_TRIGGER) ? 1 : 0); 1241 /* reserved */ 1242 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1243 CTRL, 0011, 1244 (htt_tlv_filter->md_ctrl_filter & 1245 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1246 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1247 CTRL, 0100, 1248 (htt_tlv_filter->md_ctrl_filter & 1249 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1250 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1251 CTRL, 0101, 1252 (htt_tlv_filter->md_ctrl_filter & 1253 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1254 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1255 CTRL, 0110, 1256 (htt_tlv_filter->md_ctrl_filter & 1257 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1258 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1259 CTRL, 0111, 1260 (htt_tlv_filter->md_ctrl_filter & 1261 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1262 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1263 CTRL, 1000, 1264 (htt_tlv_filter->md_ctrl_filter & 1265 FILTER_CTRL_BA_REQ) ? 1 : 0); 1266 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 1267 CTRL, 1001, 1268 (htt_tlv_filter->md_ctrl_filter & 1269 FILTER_CTRL_BA) ? 1 : 0); 1270 } 1271 1272 if (htt_tlv_filter->enable_mo) { 1273 /* TYPE: CTRL */ 1274 /* reserved */ 1275 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1276 CTRL, 0000, 1277 (htt_tlv_filter->mo_ctrl_filter & 1278 FILTER_CTRL_RESERVED_1) ? 1 : 0); 1279 /* reserved */ 1280 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1281 CTRL, 0001, 1282 (htt_tlv_filter->mo_ctrl_filter & 1283 FILTER_CTRL_RESERVED_2) ? 1 : 0); 1284 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1285 CTRL, 0010, 1286 (htt_tlv_filter->mo_ctrl_filter & 1287 FILTER_CTRL_TRIGGER) ? 1 : 0); 1288 /* reserved */ 1289 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1290 CTRL, 0011, 1291 (htt_tlv_filter->mo_ctrl_filter & 1292 FILTER_CTRL_RESERVED_4) ? 1 : 0); 1293 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1294 CTRL, 0100, 1295 (htt_tlv_filter->mo_ctrl_filter & 1296 FILTER_CTRL_BF_REP_POLL) ? 1 : 0); 1297 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1298 CTRL, 0101, 1299 (htt_tlv_filter->mo_ctrl_filter & 1300 FILTER_CTRL_VHT_NDP) ? 1 : 0); 1301 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1302 CTRL, 0110, 1303 (htt_tlv_filter->mo_ctrl_filter & 1304 FILTER_CTRL_FRAME_EXT) ? 1 : 0); 1305 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1306 CTRL, 0111, 1307 (htt_tlv_filter->mo_ctrl_filter & 1308 FILTER_CTRL_CTRLWRAP) ? 1 : 0); 1309 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1310 CTRL, 1000, 1311 (htt_tlv_filter->mo_ctrl_filter & 1312 FILTER_CTRL_BA_REQ) ? 1 : 0); 1313 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 1314 CTRL, 1001, 1315 (htt_tlv_filter->mo_ctrl_filter & 1316 FILTER_CTRL_BA) ? 1 : 0); 1317 } 1318 1319 /* word 5 */ 1320 msg_word++; 1321 *msg_word = 0; 1322 if (htt_tlv_filter->enable_fp) { 1323 /* TYPE: CTRL */ 1324 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1325 CTRL, 1010, 1326 (htt_tlv_filter->fp_ctrl_filter & 1327 FILTER_CTRL_PSPOLL) ? 1 : 0); 1328 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1329 CTRL, 1011, 1330 (htt_tlv_filter->fp_ctrl_filter & 1331 FILTER_CTRL_RTS) ? 1 : 0); 1332 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1333 CTRL, 1100, 1334 (htt_tlv_filter->fp_ctrl_filter & 1335 FILTER_CTRL_CTS) ? 1 : 0); 1336 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1337 CTRL, 1101, 1338 (htt_tlv_filter->fp_ctrl_filter & 1339 FILTER_CTRL_ACK) ? 1 : 0); 1340 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1341 CTRL, 1110, 1342 (htt_tlv_filter->fp_ctrl_filter & 1343 FILTER_CTRL_CFEND) ? 1 : 0); 1344 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1345 CTRL, 1111, 1346 (htt_tlv_filter->fp_ctrl_filter & 1347 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1348 /* TYPE: DATA */ 1349 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1350 DATA, MCAST, 1351 (htt_tlv_filter->fp_data_filter & 1352 FILTER_DATA_MCAST) ? 1 : 0); 1353 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1354 DATA, UCAST, 1355 (htt_tlv_filter->fp_data_filter & 1356 FILTER_DATA_UCAST) ? 1 : 0); 1357 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 1358 DATA, NULL, 1359 (htt_tlv_filter->fp_data_filter & 1360 FILTER_DATA_NULL) ? 1 : 0); 1361 } 1362 1363 if (htt_tlv_filter->enable_md) { 1364 /* TYPE: CTRL */ 1365 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1366 CTRL, 1010, 1367 (htt_tlv_filter->md_ctrl_filter & 1368 FILTER_CTRL_PSPOLL) ? 1 : 0); 1369 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1370 CTRL, 1011, 1371 (htt_tlv_filter->md_ctrl_filter & 1372 FILTER_CTRL_RTS) ? 1 : 0); 1373 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1374 CTRL, 1100, 1375 (htt_tlv_filter->md_ctrl_filter & 1376 FILTER_CTRL_CTS) ? 1 : 0); 1377 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1378 CTRL, 1101, 1379 (htt_tlv_filter->md_ctrl_filter & 1380 FILTER_CTRL_ACK) ? 1 : 0); 1381 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1382 CTRL, 1110, 1383 (htt_tlv_filter->md_ctrl_filter & 1384 FILTER_CTRL_CFEND) ? 1 : 0); 1385 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1386 CTRL, 1111, 1387 (htt_tlv_filter->md_ctrl_filter & 1388 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1389 /* TYPE: DATA */ 1390 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1391 DATA, MCAST, 1392 (htt_tlv_filter->md_data_filter & 1393 FILTER_DATA_MCAST) ? 1 : 0); 1394 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1395 DATA, UCAST, 1396 (htt_tlv_filter->md_data_filter & 1397 FILTER_DATA_UCAST) ? 1 : 0); 1398 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 1399 DATA, NULL, 1400 (htt_tlv_filter->md_data_filter & 1401 FILTER_DATA_NULL) ? 1 : 0); 1402 } 1403 1404 if (htt_tlv_filter->enable_mo) { 1405 /* TYPE: CTRL */ 1406 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1407 CTRL, 1010, 1408 (htt_tlv_filter->mo_ctrl_filter & 1409 FILTER_CTRL_PSPOLL) ? 1 : 0); 1410 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1411 CTRL, 1011, 1412 (htt_tlv_filter->mo_ctrl_filter & 1413 FILTER_CTRL_RTS) ? 1 : 0); 1414 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1415 CTRL, 1100, 1416 (htt_tlv_filter->mo_ctrl_filter & 1417 FILTER_CTRL_CTS) ? 1 : 0); 1418 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1419 CTRL, 1101, 1420 (htt_tlv_filter->mo_ctrl_filter & 1421 FILTER_CTRL_ACK) ? 1 : 0); 1422 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1423 CTRL, 1110, 1424 (htt_tlv_filter->mo_ctrl_filter & 1425 FILTER_CTRL_CFEND) ? 1 : 0); 1426 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1427 CTRL, 1111, 1428 (htt_tlv_filter->mo_ctrl_filter & 1429 FILTER_CTRL_CFEND_CFACK) ? 1 : 0); 1430 /* TYPE: DATA */ 1431 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1432 DATA, MCAST, 1433 (htt_tlv_filter->mo_data_filter & 1434 FILTER_DATA_MCAST) ? 1 : 0); 1435 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1436 DATA, UCAST, 1437 (htt_tlv_filter->mo_data_filter & 1438 FILTER_DATA_UCAST) ? 1 : 0); 1439 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 1440 DATA, NULL, 1441 (htt_tlv_filter->mo_data_filter & 1442 FILTER_DATA_NULL) ? 1 : 0); 1443 } 1444 1445 /* word 6 */ 1446 msg_word++; 1447 *msg_word = 0; 1448 tlv_filter = 0; 1449 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, 1450 htt_tlv_filter->mpdu_start); 1451 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, 1452 htt_tlv_filter->msdu_start); 1453 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, 1454 htt_tlv_filter->packet); 1455 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, 1456 htt_tlv_filter->msdu_end); 1457 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, 1458 htt_tlv_filter->mpdu_end); 1459 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, 1460 htt_tlv_filter->packet_header); 1461 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, 1462 htt_tlv_filter->attention); 1463 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, 1464 htt_tlv_filter->ppdu_start); 1465 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, 1466 htt_tlv_filter->ppdu_end); 1467 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, 1468 htt_tlv_filter->ppdu_end_user_stats); 1469 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, 1470 PPDU_END_USER_STATS_EXT, 1471 htt_tlv_filter->ppdu_end_user_stats_ext); 1472 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, 1473 htt_tlv_filter->ppdu_end_status_done); 1474 /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/ 1475 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED, 1476 htt_tlv_filter->header_per_msdu); 1477 1478 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); 1479 1480 msg_word++; 1481 *msg_word = 0; 1482 if (htt_tlv_filter->offset_valid) { 1483 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word, 1484 htt_tlv_filter->rx_packet_offset); 1485 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word, 1486 htt_tlv_filter->rx_header_offset); 1487 1488 msg_word++; 1489 *msg_word = 0; 1490 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word, 1491 htt_tlv_filter->rx_mpdu_end_offset); 1492 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word, 1493 htt_tlv_filter->rx_mpdu_start_offset); 1494 1495 msg_word++; 1496 *msg_word = 0; 1497 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word, 1498 htt_tlv_filter->rx_msdu_end_offset); 1499 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word, 1500 htt_tlv_filter->rx_msdu_start_offset); 1501 1502 msg_word++; 1503 *msg_word = 0; 1504 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word, 1505 htt_tlv_filter->rx_attn_offset); 1506 msg_word++; 1507 *msg_word = 0; 1508 } else { 1509 msg_word += 4; 1510 *msg_word = 0; 1511 } 1512 1513 if (mon_drop_th > 0) 1514 HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word, 1515 mon_drop_th); 1516 1517 /* "response_required" field should be set if a HTT response message is 1518 * required after setting up the ring. 1519 */ 1520 pkt = htt_htc_pkt_alloc(soc); 1521 if (!pkt) 1522 goto fail1; 1523 1524 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 1525 1526 SET_HTC_PACKET_INFO_TX( 1527 &pkt->htc_pkt, 1528 dp_htt_h2t_send_complete_free_netbuf, 1529 qdf_nbuf_data(htt_msg), 1530 qdf_nbuf_len(htt_msg), 1531 soc->htc_endpoint, 1532 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ 1533 1534 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 1535 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 1536 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG, 1537 htt_logger_bufp); 1538 1539 if (status != QDF_STATUS_SUCCESS) { 1540 qdf_nbuf_free(htt_msg); 1541 htt_htc_pkt_free(soc, pkt); 1542 } 1543 1544 return status; 1545 1546 fail1: 1547 qdf_nbuf_free(htt_msg); 1548 fail0: 1549 return QDF_STATUS_E_FAILURE; 1550 } 1551 1552 qdf_export_symbol(htt_h2t_rx_ring_cfg); 1553 1554 #if defined(HTT_STATS_ENABLE) 1555 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 1556 struct dp_soc *soc, qdf_nbuf_t htt_msg) 1557 1558 { 1559 uint32_t pdev_id; 1560 uint32_t *msg_word = NULL; 1561 uint32_t msg_remain_len = 0; 1562 1563 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1564 1565 /*COOKIE MSB*/ 1566 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 1567 1568 /* stats message length + 16 size of HTT header*/ 1569 msg_remain_len = qdf_min(htt_stats->msg_len + 16, 1570 (uint32_t)DP_EXT_MSG_LENGTH); 1571 1572 dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc, 1573 msg_word, msg_remain_len, 1574 WDI_NO_VAL, pdev_id); 1575 1576 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 1577 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 1578 } 1579 /* Need to be freed here as WDI handler will 1580 * make a copy of pkt to send data to application 1581 */ 1582 qdf_nbuf_free(htt_msg); 1583 return QDF_STATUS_SUCCESS; 1584 } 1585 #else 1586 static inline QDF_STATUS 1587 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, 1588 struct dp_soc *soc, qdf_nbuf_t htt_msg) 1589 { 1590 return QDF_STATUS_E_NOSUPPORT; 1591 } 1592 #endif 1593 1594 #ifdef HTT_STATS_DEBUGFS_SUPPORT 1595 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer 1596 * @pdev: dp pdev handle 1597 * @msg_word: HTT msg 1598 * @msg_len: Length of HTT msg sent 1599 * 1600 * Return: none 1601 */ 1602 static inline void 1603 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word, 1604 uint32_t msg_len) 1605 { 1606 struct htt_dbgfs_cfg dbgfs_cfg; 1607 int done = 0; 1608 1609 /* send 5th word of HTT msg to upper layer */ 1610 dbgfs_cfg.msg_word = (msg_word + 4); 1611 dbgfs_cfg.m = pdev->dbgfs_cfg->m; 1612 1613 /* stats message length + 16 size of HTT header*/ 1614 msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH); 1615 1616 if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process) 1617 pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg, 1618 (msg_len - HTT_HEADER_LEN)); 1619 1620 /* Get TLV Done bit from 4th msg word */ 1621 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3)); 1622 if (done) { 1623 if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event)) 1624 dp_htt_err("%pK: Failed to set event for debugfs htt stats" 1625 , pdev->soc); 1626 } 1627 } 1628 #else 1629 static inline void 1630 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word, 1631 uint32_t msg_len) 1632 { 1633 } 1634 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 1635 1636 /** 1637 * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats 1638 * @htt_stats: htt stats info 1639 * 1640 * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message 1641 * contains sub messages which are identified by a TLV header. 1642 * In this function we will process the stream of T2H messages and read all the 1643 * TLV contained in the message. 1644 * 1645 * THe following cases have been taken care of 1646 * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer 1647 * In this case the buffer will contain multiple tlvs. 1648 * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer. 1649 * Only one tlv will be contained in the HTT message and this tag 1650 * will extend onto the next buffer. 1651 * Case 3: When the buffer is the continuation of the previous message 1652 * Case 4: tlv length is 0. which will indicate the end of message 1653 * 1654 * return: void 1655 */ 1656 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats, 1657 struct dp_soc *soc) 1658 { 1659 htt_tlv_tag_t tlv_type = 0xff; 1660 qdf_nbuf_t htt_msg = NULL; 1661 uint32_t *msg_word; 1662 uint8_t *tlv_buf_head = NULL; 1663 uint8_t *tlv_buf_tail = NULL; 1664 uint32_t msg_remain_len = 0; 1665 uint32_t tlv_remain_len = 0; 1666 uint32_t *tlv_start; 1667 int cookie_val = 0; 1668 int cookie_msb = 0; 1669 int pdev_id; 1670 bool copy_stats = false; 1671 struct dp_pdev *pdev; 1672 1673 /* Process node in the HTT message queue */ 1674 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 1675 != NULL) { 1676 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1677 cookie_val = *(msg_word + 1); 1678 htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET( 1679 *(msg_word + 1680 HTT_T2H_EXT_STATS_TLV_START_OFFSET)); 1681 1682 if (cookie_val) { 1683 if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg) 1684 == QDF_STATUS_SUCCESS) { 1685 continue; 1686 } 1687 } 1688 1689 cookie_msb = *(msg_word + 2); 1690 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; 1691 pdev = soc->pdev_list[pdev_id]; 1692 1693 if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) { 1694 dp_htt_stats_dbgfs_send_msg(pdev, msg_word, 1695 htt_stats->msg_len); 1696 qdf_nbuf_free(htt_msg); 1697 continue; 1698 } 1699 1700 if (cookie_msb & DBG_STATS_COOKIE_DP_STATS) 1701 copy_stats = true; 1702 1703 /* read 5th word */ 1704 msg_word = msg_word + 4; 1705 msg_remain_len = qdf_min(htt_stats->msg_len, 1706 (uint32_t) DP_EXT_MSG_LENGTH); 1707 /* Keep processing the node till node length is 0 */ 1708 while (msg_remain_len) { 1709 /* 1710 * if message is not a continuation of previous message 1711 * read the tlv type and tlv length 1712 */ 1713 if (!tlv_buf_head) { 1714 tlv_type = HTT_STATS_TLV_TAG_GET( 1715 *msg_word); 1716 tlv_remain_len = HTT_STATS_TLV_LENGTH_GET( 1717 *msg_word); 1718 } 1719 1720 if (tlv_remain_len == 0) { 1721 msg_remain_len = 0; 1722 1723 if (tlv_buf_head) { 1724 qdf_mem_free(tlv_buf_head); 1725 tlv_buf_head = NULL; 1726 tlv_buf_tail = NULL; 1727 } 1728 1729 goto error; 1730 } 1731 1732 if (!tlv_buf_head) 1733 tlv_remain_len += HTT_TLV_HDR_LEN; 1734 1735 if ((tlv_remain_len <= msg_remain_len)) { 1736 /* Case 3 */ 1737 if (tlv_buf_head) { 1738 qdf_mem_copy(tlv_buf_tail, 1739 (uint8_t *)msg_word, 1740 tlv_remain_len); 1741 tlv_start = (uint32_t *)tlv_buf_head; 1742 } else { 1743 /* Case 1 */ 1744 tlv_start = msg_word; 1745 } 1746 1747 if (copy_stats) 1748 dp_htt_stats_copy_tag(pdev, 1749 tlv_type, 1750 tlv_start); 1751 else 1752 dp_htt_stats_print_tag(pdev, 1753 tlv_type, 1754 tlv_start); 1755 1756 if (tlv_type == HTT_STATS_PEER_DETAILS_TAG || 1757 tlv_type == HTT_STATS_PEER_STATS_CMN_TAG) 1758 dp_peer_update_inactive_time(pdev, 1759 tlv_type, 1760 tlv_start); 1761 1762 msg_remain_len -= tlv_remain_len; 1763 1764 msg_word = (uint32_t *) 1765 (((uint8_t *)msg_word) + 1766 tlv_remain_len); 1767 1768 tlv_remain_len = 0; 1769 1770 if (tlv_buf_head) { 1771 qdf_mem_free(tlv_buf_head); 1772 tlv_buf_head = NULL; 1773 tlv_buf_tail = NULL; 1774 } 1775 1776 } else { /* tlv_remain_len > msg_remain_len */ 1777 /* Case 2 & 3 */ 1778 if (!tlv_buf_head) { 1779 tlv_buf_head = qdf_mem_malloc( 1780 tlv_remain_len); 1781 1782 if (!tlv_buf_head) { 1783 QDF_TRACE(QDF_MODULE_ID_TXRX, 1784 QDF_TRACE_LEVEL_ERROR, 1785 "Alloc failed"); 1786 goto error; 1787 } 1788 1789 tlv_buf_tail = tlv_buf_head; 1790 } 1791 1792 qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word, 1793 msg_remain_len); 1794 tlv_remain_len -= msg_remain_len; 1795 tlv_buf_tail += msg_remain_len; 1796 } 1797 } 1798 1799 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { 1800 htt_stats->msg_len -= DP_EXT_MSG_LENGTH; 1801 } 1802 1803 qdf_nbuf_free(htt_msg); 1804 } 1805 return; 1806 1807 error: 1808 qdf_nbuf_free(htt_msg); 1809 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) 1810 != NULL) 1811 qdf_nbuf_free(htt_msg); 1812 } 1813 1814 void htt_t2h_stats_handler(void *context) 1815 { 1816 struct dp_soc *soc = (struct dp_soc *)context; 1817 struct htt_stats_context htt_stats; 1818 uint32_t *msg_word; 1819 qdf_nbuf_t htt_msg = NULL; 1820 uint8_t done; 1821 uint32_t rem_stats; 1822 1823 if (!soc) { 1824 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1825 "soc is NULL"); 1826 return; 1827 } 1828 1829 if (!qdf_atomic_read(&soc->cmn_init_done)) { 1830 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1831 "soc: 0x%pK, init_done: %d", soc, 1832 qdf_atomic_read(&soc->cmn_init_done)); 1833 return; 1834 } 1835 1836 qdf_mem_zero(&htt_stats, sizeof(htt_stats)); 1837 qdf_nbuf_queue_init(&htt_stats.msg); 1838 1839 /* pull one completed stats from soc->htt_stats_msg and process */ 1840 qdf_spin_lock_bh(&soc->htt_stats.lock); 1841 if (!soc->htt_stats.num_stats) { 1842 qdf_spin_unlock_bh(&soc->htt_stats.lock); 1843 return; 1844 } 1845 while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) { 1846 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); 1847 msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET; 1848 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 1849 qdf_nbuf_queue_add(&htt_stats.msg, htt_msg); 1850 /* 1851 * Done bit signifies that this is the last T2H buffer in the 1852 * stream of HTT EXT STATS message 1853 */ 1854 if (done) 1855 break; 1856 } 1857 rem_stats = --soc->htt_stats.num_stats; 1858 qdf_spin_unlock_bh(&soc->htt_stats.lock); 1859 1860 /* If there are more stats to process, schedule stats work again. 1861 * Scheduling prior to processing ht_stats to queue with early 1862 * index 1863 */ 1864 if (rem_stats) 1865 qdf_sched_work(0, &soc->htt_stats.work); 1866 1867 dp_process_htt_stat_msg(&htt_stats, soc); 1868 } 1869 1870 /** 1871 * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats 1872 * @soc: DP SOC handle 1873 * @htt_t2h_msg: HTT message nbuf 1874 * 1875 * return:void 1876 */ 1877 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, 1878 qdf_nbuf_t htt_t2h_msg) 1879 { 1880 uint8_t done; 1881 qdf_nbuf_t msg_copy; 1882 uint32_t *msg_word; 1883 1884 msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); 1885 msg_word = msg_word + 3; 1886 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); 1887 1888 /* 1889 * HTT EXT stats response comes as stream of TLVs which span over 1890 * multiple T2H messages. 1891 * The first message will carry length of the response. 1892 * For rest of the messages length will be zero. 1893 * 1894 * Clone the T2H message buffer and store it in a list to process 1895 * it later. 1896 * 1897 * The original T2H message buffers gets freed in the T2H HTT event 1898 * handler 1899 */ 1900 msg_copy = qdf_nbuf_clone(htt_t2h_msg); 1901 1902 if (!msg_copy) { 1903 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1904 "T2H messge clone failed for HTT EXT STATS"); 1905 goto error; 1906 } 1907 1908 qdf_spin_lock_bh(&soc->htt_stats.lock); 1909 qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy); 1910 /* 1911 * Done bit signifies that this is the last T2H buffer in the stream of 1912 * HTT EXT STATS message 1913 */ 1914 if (done) { 1915 soc->htt_stats.num_stats++; 1916 qdf_sched_work(0, &soc->htt_stats.work); 1917 } 1918 qdf_spin_unlock_bh(&soc->htt_stats.lock); 1919 1920 return; 1921 1922 error: 1923 qdf_spin_lock_bh(&soc->htt_stats.lock); 1924 while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) 1925 != NULL) { 1926 qdf_nbuf_free(msg_copy); 1927 } 1928 soc->htt_stats.num_stats = 0; 1929 qdf_spin_unlock_bh(&soc->htt_stats.lock); 1930 return; 1931 } 1932 1933 /* 1934 * htt_soc_attach_target() - SOC level HTT setup 1935 * @htt_soc: HTT SOC handle 1936 * 1937 * Return: 0 on success; error code on failure 1938 */ 1939 int htt_soc_attach_target(struct htt_soc *htt_soc) 1940 { 1941 struct htt_soc *soc = (struct htt_soc *)htt_soc; 1942 1943 return htt_h2t_ver_req_msg(soc); 1944 } 1945 1946 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc) 1947 { 1948 htt_soc->htc_soc = htc_soc; 1949 } 1950 1951 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc) 1952 { 1953 return htt_soc->htc_soc; 1954 } 1955 1956 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle) 1957 { 1958 int i; 1959 int j; 1960 int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long); 1961 struct htt_soc *htt_soc = NULL; 1962 1963 htt_soc = qdf_mem_malloc(sizeof(*htt_soc)); 1964 if (!htt_soc) { 1965 dp_err("HTT attach failed"); 1966 return NULL; 1967 } 1968 1969 for (i = 0; i < MAX_PDEV_CNT; i++) { 1970 htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size); 1971 if (!htt_soc->pdevid_tt[i].umac_ttt) 1972 break; 1973 qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1); 1974 htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size); 1975 if (!htt_soc->pdevid_tt[i].lmac_ttt) { 1976 qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt); 1977 break; 1978 } 1979 qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1); 1980 } 1981 if (i != MAX_PDEV_CNT) { 1982 for (j = 0; j < i; j++) { 1983 qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt); 1984 qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt); 1985 } 1986 qdf_mem_free(htt_soc); 1987 return NULL; 1988 } 1989 1990 htt_soc->dp_soc = soc; 1991 htt_soc->htc_soc = htc_handle; 1992 HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex); 1993 1994 return htt_soc; 1995 } 1996 1997 #if defined(WDI_EVENT_ENABLE) && \ 1998 !defined(REMOVE_PKT_LOG) 1999 /* 2000 * dp_pktlog_msg_handler() - Pktlog msg handler 2001 * @htt_soc: HTT SOC handle 2002 * @msg_word: Pointer to payload 2003 * 2004 * Return: None 2005 */ 2006 static void 2007 dp_pktlog_msg_handler(struct htt_soc *soc, 2008 uint32_t *msg_word) 2009 { 2010 uint8_t pdev_id; 2011 uint8_t target_pdev_id; 2012 uint32_t *pl_hdr; 2013 2014 target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word); 2015 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 2016 target_pdev_id); 2017 pl_hdr = (msg_word + 1); 2018 dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, 2019 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL, 2020 pdev_id); 2021 } 2022 #else 2023 static void 2024 dp_pktlog_msg_handler(struct htt_soc *soc, 2025 uint32_t *msg_word) 2026 { 2027 } 2028 #endif 2029 2030 /* 2031 * time_allow_print() - time allow print 2032 * @htt_ring_tt: ringi_id array of timestamps 2033 * @ring_id: ring_id (index) 2034 * 2035 * Return: 1 for successfully saving timestamp in array 2036 * and 0 for timestamp falling within 2 seconds after last one 2037 */ 2038 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id) 2039 { 2040 unsigned long tstamp; 2041 unsigned long delta; 2042 2043 tstamp = qdf_get_system_timestamp(); 2044 2045 if (!htt_ring_tt) 2046 return 0; //unable to print backpressure messages 2047 2048 if (htt_ring_tt[ring_id] == -1) { 2049 htt_ring_tt[ring_id] = tstamp; 2050 return 1; 2051 } 2052 delta = tstamp - htt_ring_tt[ring_id]; 2053 if (delta >= 2000) { 2054 htt_ring_tt[ring_id] = tstamp; 2055 return 1; 2056 } 2057 2058 return 0; 2059 } 2060 2061 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type, 2062 struct dp_pdev *pdev, u_int8_t ring_id, 2063 u_int16_t hp_idx, u_int16_t tp_idx, 2064 u_int32_t bkp_time, char *ring_stype) 2065 { 2066 dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ", 2067 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype); 2068 dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ", 2069 ring_id, hp_idx, tp_idx, bkp_time); 2070 } 2071 2072 /** 2073 * dp_get_srng_ring_state_from_hal(): Get hal level ring stats 2074 * @soc: DP_SOC handle 2075 * @srng: DP_SRNG handle 2076 * @ring_type: srng src/dst ring 2077 * 2078 * Return: void 2079 */ 2080 static QDF_STATUS 2081 dp_get_srng_ring_state_from_hal(struct dp_soc *soc, 2082 struct dp_pdev *pdev, 2083 struct dp_srng *srng, 2084 enum hal_ring_type ring_type, 2085 struct dp_srng_ring_state *state) 2086 { 2087 struct hal_soc *hal_soc; 2088 2089 if (!soc || !srng || !srng->hal_srng || !state) 2090 return QDF_STATUS_E_INVAL; 2091 2092 hal_soc = (struct hal_soc *)soc->hal_soc; 2093 2094 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail, 2095 &state->sw_head); 2096 2097 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head, 2098 &state->hw_tail, ring_type); 2099 2100 state->ring_type = ring_type; 2101 2102 return QDF_STATUS_SUCCESS; 2103 } 2104 2105 #ifdef QCA_MONITOR_PKT_SUPPORT 2106 static void 2107 dp_queue_mon_ring_stats(struct dp_pdev *pdev, 2108 int lmac_id, uint32_t *num_srng, 2109 struct dp_soc_srngs_state *soc_srngs_state) 2110 { 2111 QDF_STATUS status; 2112 2113 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) { 2114 status = dp_get_srng_ring_state_from_hal 2115 (pdev->soc, pdev, 2116 &pdev->soc->rxdma_mon_buf_ring[lmac_id], 2117 RXDMA_MONITOR_BUF, 2118 &soc_srngs_state->ring_state[*num_srng]); 2119 2120 if (status == QDF_STATUS_SUCCESS) 2121 qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS); 2122 2123 status = dp_get_srng_ring_state_from_hal 2124 (pdev->soc, pdev, 2125 &pdev->soc->rxdma_mon_dst_ring[lmac_id], 2126 RXDMA_MONITOR_DST, 2127 &soc_srngs_state->ring_state[*num_srng]); 2128 2129 if (status == QDF_STATUS_SUCCESS) 2130 qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS); 2131 2132 status = dp_get_srng_ring_state_from_hal 2133 (pdev->soc, pdev, 2134 &pdev->soc->rxdma_mon_desc_ring[lmac_id], 2135 RXDMA_MONITOR_DESC, 2136 &soc_srngs_state->ring_state[*num_srng]); 2137 2138 if (status == QDF_STATUS_SUCCESS) 2139 qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS); 2140 } 2141 } 2142 #else 2143 static void 2144 dp_queue_mon_ring_stats(struct dp_pdev *pdev, 2145 int lmac_id, uint32_t *num_srng, 2146 struct dp_soc_srngs_state *soc_srngs_state) 2147 { 2148 } 2149 #endif 2150 2151 /** 2152 * dp_queue_srng_ring_stats(): Print pdev hal level ring stats 2153 * @pdev: DP_pdev handle 2154 * 2155 * Return: void 2156 */ 2157 static void dp_queue_ring_stats(struct dp_pdev *pdev) 2158 { 2159 uint32_t i; 2160 int mac_id; 2161 int lmac_id; 2162 uint32_t j = 0; 2163 struct dp_soc_srngs_state * soc_srngs_state = NULL; 2164 QDF_STATUS status; 2165 2166 soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state)); 2167 if (!soc_srngs_state) { 2168 dp_htt_alert("Memory alloc failed for back pressure event"); 2169 return; 2170 } 2171 2172 status = dp_get_srng_ring_state_from_hal 2173 (pdev->soc, pdev, 2174 &pdev->soc->reo_exception_ring, 2175 REO_EXCEPTION, 2176 &soc_srngs_state->ring_state[j]); 2177 2178 if (status == QDF_STATUS_SUCCESS) 2179 qdf_assert_always(++j < DP_MAX_SRNGS); 2180 2181 status = dp_get_srng_ring_state_from_hal 2182 (pdev->soc, pdev, 2183 &pdev->soc->reo_reinject_ring, 2184 REO_REINJECT, 2185 &soc_srngs_state->ring_state[j]); 2186 2187 if (status == QDF_STATUS_SUCCESS) 2188 qdf_assert_always(++j < DP_MAX_SRNGS); 2189 2190 status = dp_get_srng_ring_state_from_hal 2191 (pdev->soc, pdev, 2192 &pdev->soc->reo_cmd_ring, 2193 REO_CMD, 2194 &soc_srngs_state->ring_state[j]); 2195 2196 if (status == QDF_STATUS_SUCCESS) 2197 qdf_assert_always(++j < DP_MAX_SRNGS); 2198 2199 status = dp_get_srng_ring_state_from_hal 2200 (pdev->soc, pdev, 2201 &pdev->soc->reo_status_ring, 2202 REO_STATUS, 2203 &soc_srngs_state->ring_state[j]); 2204 2205 if (status == QDF_STATUS_SUCCESS) 2206 qdf_assert_always(++j < DP_MAX_SRNGS); 2207 2208 status = dp_get_srng_ring_state_from_hal 2209 (pdev->soc, pdev, 2210 &pdev->soc->rx_rel_ring, 2211 WBM2SW_RELEASE, 2212 &soc_srngs_state->ring_state[j]); 2213 2214 if (status == QDF_STATUS_SUCCESS) 2215 qdf_assert_always(++j < DP_MAX_SRNGS); 2216 2217 status = dp_get_srng_ring_state_from_hal 2218 (pdev->soc, pdev, 2219 &pdev->soc->tcl_cmd_credit_ring, 2220 TCL_CMD_CREDIT, 2221 &soc_srngs_state->ring_state[j]); 2222 2223 if (status == QDF_STATUS_SUCCESS) 2224 qdf_assert_always(++j < DP_MAX_SRNGS); 2225 2226 status = dp_get_srng_ring_state_from_hal 2227 (pdev->soc, pdev, 2228 &pdev->soc->tcl_status_ring, 2229 TCL_STATUS, 2230 &soc_srngs_state->ring_state[j]); 2231 2232 if (status == QDF_STATUS_SUCCESS) 2233 qdf_assert_always(++j < DP_MAX_SRNGS); 2234 2235 status = dp_get_srng_ring_state_from_hal 2236 (pdev->soc, pdev, 2237 &pdev->soc->wbm_desc_rel_ring, 2238 SW2WBM_RELEASE, 2239 &soc_srngs_state->ring_state[j]); 2240 2241 if (status == QDF_STATUS_SUCCESS) 2242 qdf_assert_always(++j < DP_MAX_SRNGS); 2243 2244 for (i = 0; i < MAX_REO_DEST_RINGS; i++) { 2245 status = dp_get_srng_ring_state_from_hal 2246 (pdev->soc, pdev, 2247 &pdev->soc->reo_dest_ring[i], 2248 REO_DST, 2249 &soc_srngs_state->ring_state[j]); 2250 2251 if (status == QDF_STATUS_SUCCESS) 2252 qdf_assert_always(++j < DP_MAX_SRNGS); 2253 } 2254 2255 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) { 2256 status = dp_get_srng_ring_state_from_hal 2257 (pdev->soc, pdev, 2258 &pdev->soc->tcl_data_ring[i], 2259 TCL_DATA, 2260 &soc_srngs_state->ring_state[j]); 2261 2262 if (status == QDF_STATUS_SUCCESS) 2263 qdf_assert_always(++j < DP_MAX_SRNGS); 2264 } 2265 2266 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { 2267 status = dp_get_srng_ring_state_from_hal 2268 (pdev->soc, pdev, 2269 &pdev->soc->tx_comp_ring[i], 2270 WBM2SW_RELEASE, 2271 &soc_srngs_state->ring_state[j]); 2272 2273 if (status == QDF_STATUS_SUCCESS) 2274 qdf_assert_always(++j < DP_MAX_SRNGS); 2275 } 2276 2277 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id); 2278 status = dp_get_srng_ring_state_from_hal 2279 (pdev->soc, pdev, 2280 &pdev->soc->rx_refill_buf_ring 2281 [lmac_id], 2282 RXDMA_BUF, 2283 &soc_srngs_state->ring_state[j]); 2284 2285 if (status == QDF_STATUS_SUCCESS) 2286 qdf_assert_always(++j < DP_MAX_SRNGS); 2287 2288 status = dp_get_srng_ring_state_from_hal 2289 (pdev->soc, pdev, 2290 &pdev->rx_refill_buf_ring2, 2291 RXDMA_BUF, 2292 &soc_srngs_state->ring_state[j]); 2293 2294 if (status == QDF_STATUS_SUCCESS) 2295 qdf_assert_always(++j < DP_MAX_SRNGS); 2296 2297 2298 for (i = 0; i < MAX_RX_MAC_RINGS; i++) { 2299 dp_get_srng_ring_state_from_hal 2300 (pdev->soc, pdev, 2301 &pdev->rx_mac_buf_ring[i], 2302 RXDMA_BUF, 2303 &soc_srngs_state->ring_state[j]); 2304 2305 if (status == QDF_STATUS_SUCCESS) 2306 qdf_assert_always(++j < DP_MAX_SRNGS); 2307 } 2308 2309 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 2310 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 2311 mac_id, pdev->pdev_id); 2312 2313 dp_queue_mon_ring_stats(pdev, lmac_id, &j, 2314 soc_srngs_state); 2315 2316 status = dp_get_srng_ring_state_from_hal 2317 (pdev->soc, pdev, 2318 &pdev->soc->rxdma_mon_status_ring[lmac_id], 2319 RXDMA_MONITOR_STATUS, 2320 &soc_srngs_state->ring_state[j]); 2321 2322 if (status == QDF_STATUS_SUCCESS) 2323 qdf_assert_always(++j < DP_MAX_SRNGS); 2324 } 2325 2326 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { 2327 lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 2328 i, pdev->pdev_id); 2329 2330 status = dp_get_srng_ring_state_from_hal 2331 (pdev->soc, pdev, 2332 &pdev->soc->rxdma_err_dst_ring 2333 [lmac_id], 2334 RXDMA_DST, 2335 &soc_srngs_state->ring_state[j]); 2336 2337 if (status == QDF_STATUS_SUCCESS) 2338 qdf_assert_always(++j < DP_MAX_SRNGS); 2339 } 2340 soc_srngs_state->max_ring_id = j; 2341 2342 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 2343 2344 soc_srngs_state->seq_num = pdev->bkp_stats.seq_num; 2345 TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state, 2346 list_elem); 2347 pdev->bkp_stats.seq_num++; 2348 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 2349 2350 qdf_queue_work(0, pdev->bkp_stats.work_queue, 2351 &pdev->bkp_stats.work); 2352 } 2353 2354 /* 2355 * dp_htt_bkp_event_alert() - htt backpressure event alert 2356 * @msg_word: htt packet context 2357 * @htt_soc: HTT SOC handle 2358 * 2359 * Return: after attempting to print stats 2360 */ 2361 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc) 2362 { 2363 u_int8_t ring_type; 2364 u_int8_t pdev_id; 2365 uint8_t target_pdev_id; 2366 u_int8_t ring_id; 2367 u_int16_t hp_idx; 2368 u_int16_t tp_idx; 2369 u_int32_t bkp_time; 2370 enum htt_t2h_msg_type msg_type; 2371 struct dp_soc *dpsoc; 2372 struct dp_pdev *pdev; 2373 struct dp_htt_timestamp *radio_tt; 2374 2375 if (!soc) 2376 return; 2377 2378 dpsoc = (struct dp_soc *)soc->dp_soc; 2379 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 2380 ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word); 2381 target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word); 2382 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 2383 target_pdev_id); 2384 if (pdev_id >= MAX_PDEV_CNT) { 2385 dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id); 2386 return; 2387 } 2388 2389 pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id]; 2390 ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word); 2391 hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1)); 2392 tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1)); 2393 bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2)); 2394 radio_tt = &soc->pdevid_tt[pdev_id]; 2395 2396 switch (ring_type) { 2397 case HTT_SW_RING_TYPE_UMAC: 2398 if (!time_allow_print(radio_tt->umac_ttt, ring_id)) 2399 return; 2400 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 2401 bkp_time, "HTT_SW_RING_TYPE_UMAC"); 2402 break; 2403 case HTT_SW_RING_TYPE_LMAC: 2404 if (!time_allow_print(radio_tt->lmac_ttt, ring_id)) 2405 return; 2406 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 2407 bkp_time, "HTT_SW_RING_TYPE_LMAC"); 2408 break; 2409 default: 2410 dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx, 2411 bkp_time, "UNKNOWN"); 2412 break; 2413 } 2414 2415 dp_queue_ring_stats(pdev); 2416 } 2417 2418 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2419 /* 2420 * dp_offload_ind_handler() - offload msg handler 2421 * @htt_soc: HTT SOC handle 2422 * @msg_word: Pointer to payload 2423 * 2424 * Return: None 2425 */ 2426 static void 2427 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word) 2428 { 2429 u_int8_t pdev_id; 2430 u_int8_t target_pdev_id; 2431 2432 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 2433 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 2434 target_pdev_id); 2435 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc, 2436 msg_word, HTT_INVALID_VDEV, WDI_NO_VAL, 2437 pdev_id); 2438 } 2439 #else 2440 static void 2441 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word) 2442 { 2443 } 2444 #endif 2445 2446 /* 2447 * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler 2448 * @context: Opaque context (HTT SOC handle) 2449 * @pkt: HTC packet 2450 */ 2451 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) 2452 { 2453 struct htt_soc *soc = (struct htt_soc *) context; 2454 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; 2455 u_int32_t *msg_word; 2456 enum htt_t2h_msg_type msg_type; 2457 bool free_buf = true; 2458 2459 /* check for successful message reception */ 2460 if (pkt->Status != QDF_STATUS_SUCCESS) { 2461 if (pkt->Status != QDF_STATUS_E_CANCELED) 2462 soc->stats.htc_err_cnt++; 2463 2464 qdf_nbuf_free(htt_t2h_msg); 2465 return; 2466 } 2467 2468 /* TODO: Check if we should pop the HTC/HTT header alignment padding */ 2469 2470 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); 2471 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 2472 htt_event_record(soc->htt_logger_handle, 2473 msg_type, (uint8_t *)msg_word); 2474 switch (msg_type) { 2475 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 2476 { 2477 dp_htt_bkp_event_alert(msg_word, soc); 2478 break; 2479 } 2480 case HTT_T2H_MSG_TYPE_PEER_MAP: 2481 { 2482 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 2483 u_int8_t *peer_mac_addr; 2484 u_int16_t peer_id; 2485 u_int16_t hw_peer_id; 2486 u_int8_t vdev_id; 2487 u_int8_t is_wds; 2488 struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc; 2489 2490 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); 2491 hw_peer_id = 2492 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); 2493 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); 2494 peer_mac_addr = htt_t2h_mac_addr_deswizzle( 2495 (u_int8_t *) (msg_word+1), 2496 &mac_addr_deswizzle_buf[0]); 2497 QDF_TRACE(QDF_MODULE_ID_TXRX, 2498 QDF_TRACE_LEVEL_INFO, 2499 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 2500 peer_id, vdev_id); 2501 2502 /* 2503 * check if peer already exists for this peer_id, if so 2504 * this peer map event is in response for a wds peer add 2505 * wmi command sent during wds source port learning. 2506 * in this case just add the ast entry to the existing 2507 * peer ast_list. 2508 */ 2509 is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]); 2510 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, 2511 vdev_id, peer_mac_addr, 0, 2512 is_wds); 2513 break; 2514 } 2515 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 2516 { 2517 u_int16_t peer_id; 2518 u_int8_t vdev_id; 2519 u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0}; 2520 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); 2521 vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word); 2522 2523 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, 2524 vdev_id, mac_addr, 0, 2525 DP_PEER_WDS_COUNT_INVALID); 2526 break; 2527 } 2528 case HTT_T2H_MSG_TYPE_SEC_IND: 2529 { 2530 u_int16_t peer_id; 2531 enum cdp_sec_type sec_type; 2532 int is_unicast; 2533 2534 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); 2535 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); 2536 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); 2537 /* point to the first part of the Michael key */ 2538 msg_word++; 2539 dp_rx_sec_ind_handler( 2540 soc->dp_soc, peer_id, sec_type, is_unicast, 2541 msg_word, msg_word + 2); 2542 break; 2543 } 2544 2545 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 2546 { 2547 free_buf = 2548 dp_monitor_ppdu_stats_ind_handler(soc, 2549 msg_word, 2550 htt_t2h_msg); 2551 break; 2552 } 2553 2554 case HTT_T2H_MSG_TYPE_PKTLOG: 2555 { 2556 dp_pktlog_msg_handler(soc, msg_word); 2557 break; 2558 } 2559 2560 case HTT_T2H_MSG_TYPE_VERSION_CONF: 2561 { 2562 /* 2563 * HTC maintains runtime pm count for H2T messages that 2564 * have a response msg from FW. This count ensures that 2565 * in the case FW does not sent out the response or host 2566 * did not process this indication runtime_put happens 2567 * properly in the cleanup path. 2568 */ 2569 if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0) 2570 htc_pm_runtime_put(soc->htc_soc); 2571 else 2572 soc->stats.htt_ver_req_put_skip++; 2573 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); 2574 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); 2575 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, 2576 "target uses HTT version %d.%d; host uses %d.%d", 2577 soc->tgt_ver.major, soc->tgt_ver.minor, 2578 HTT_CURRENT_VERSION_MAJOR, 2579 HTT_CURRENT_VERSION_MINOR); 2580 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { 2581 QDF_TRACE(QDF_MODULE_ID_TXRX, 2582 QDF_TRACE_LEVEL_WARN, 2583 "*** Incompatible host/target HTT versions!"); 2584 } 2585 /* abort if the target is incompatible with the host */ 2586 qdf_assert(soc->tgt_ver.major == 2587 HTT_CURRENT_VERSION_MAJOR); 2588 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { 2589 QDF_TRACE(QDF_MODULE_ID_TXRX, 2590 QDF_TRACE_LEVEL_INFO_LOW, 2591 "*** Warning: host/target HTT versions" 2592 " are different, though compatible!"); 2593 } 2594 break; 2595 } 2596 case HTT_T2H_MSG_TYPE_RX_ADDBA: 2597 { 2598 uint16_t peer_id; 2599 uint8_t tid; 2600 uint8_t win_sz; 2601 uint16_t status; 2602 struct dp_peer *peer; 2603 2604 /* 2605 * Update REO Queue Desc with new values 2606 */ 2607 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); 2608 tid = HTT_RX_ADDBA_TID_GET(*msg_word); 2609 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); 2610 peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id, 2611 DP_MOD_ID_HTT); 2612 2613 /* 2614 * Window size needs to be incremented by 1 2615 * since fw needs to represent a value of 256 2616 * using just 8 bits 2617 */ 2618 if (peer) { 2619 status = dp_addba_requestprocess_wifi3( 2620 (struct cdp_soc_t *)soc->dp_soc, 2621 peer->mac_addr.raw, peer->vdev->vdev_id, 2622 0, tid, 0, win_sz + 1, 0xffff); 2623 2624 /* 2625 * If PEER_LOCK_REF_PROTECT enbled dec ref 2626 * which is inc by dp_peer_get_ref_by_id 2627 */ 2628 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 2629 2630 QDF_TRACE(QDF_MODULE_ID_TXRX, 2631 QDF_TRACE_LEVEL_INFO, 2632 FL("PeerID %d BAW %d TID %d stat %d"), 2633 peer_id, win_sz, tid, status); 2634 2635 } else { 2636 QDF_TRACE(QDF_MODULE_ID_TXRX, 2637 QDF_TRACE_LEVEL_ERROR, 2638 FL("Peer not found peer id %d"), 2639 peer_id); 2640 } 2641 break; 2642 } 2643 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 2644 { 2645 dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg); 2646 break; 2647 } 2648 case HTT_T2H_MSG_TYPE_PEER_MAP_V2: 2649 { 2650 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 2651 u_int8_t *peer_mac_addr; 2652 u_int16_t peer_id; 2653 u_int16_t hw_peer_id; 2654 u_int8_t vdev_id; 2655 bool is_wds; 2656 u_int16_t ast_hash; 2657 struct dp_ast_flow_override_info ast_flow_info; 2658 2659 qdf_mem_set(&ast_flow_info, 0, 2660 sizeof(struct dp_ast_flow_override_info)); 2661 2662 peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word); 2663 hw_peer_id = 2664 HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2)); 2665 vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word); 2666 peer_mac_addr = 2667 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 2668 &mac_addr_deswizzle_buf[0]); 2669 is_wds = 2670 HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3)); 2671 ast_hash = 2672 HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3)); 2673 /* 2674 * Update 4 ast_index per peer, ast valid mask 2675 * and TID flow valid mask. 2676 * AST valid mask is 3 bit field corresponds to 2677 * ast_index[3:1]. ast_index 0 is always valid. 2678 */ 2679 ast_flow_info.ast_valid_mask = 2680 HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3)); 2681 ast_flow_info.ast_idx[0] = hw_peer_id; 2682 ast_flow_info.ast_flow_mask[0] = 2683 HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4)); 2684 ast_flow_info.ast_idx[1] = 2685 HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4)); 2686 ast_flow_info.ast_flow_mask[1] = 2687 HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4)); 2688 ast_flow_info.ast_idx[2] = 2689 HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5)); 2690 ast_flow_info.ast_flow_mask[2] = 2691 HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4)); 2692 ast_flow_info.ast_idx[3] = 2693 HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6)); 2694 ast_flow_info.ast_flow_mask[3] = 2695 HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4)); 2696 /* 2697 * TID valid mask is applicable only 2698 * for HI and LOW priority flows. 2699 * tid_valid_mas is 8 bit field corresponds 2700 * to TID[7:0] 2701 */ 2702 ast_flow_info.tid_valid_low_pri_mask = 2703 HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5)); 2704 ast_flow_info.tid_valid_hi_pri_mask = 2705 HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5)); 2706 2707 QDF_TRACE(QDF_MODULE_ID_TXRX, 2708 QDF_TRACE_LEVEL_INFO, 2709 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 2710 peer_id, vdev_id); 2711 2712 QDF_TRACE(QDF_MODULE_ID_TXRX, 2713 QDF_TRACE_LEVEL_INFO, 2714 "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n", 2715 ast_flow_info.ast_idx[0], 2716 ast_flow_info.ast_idx[1], 2717 ast_flow_info.ast_idx[2], 2718 ast_flow_info.ast_idx[3]); 2719 2720 dp_rx_peer_map_handler(soc->dp_soc, peer_id, 2721 hw_peer_id, vdev_id, 2722 peer_mac_addr, ast_hash, 2723 is_wds); 2724 2725 /* 2726 * Update ast indexes for flow override support 2727 * Applicable only for non wds peers 2728 */ 2729 dp_peer_ast_index_flow_queue_map_create( 2730 soc->dp_soc, is_wds, 2731 peer_id, peer_mac_addr, 2732 &ast_flow_info); 2733 2734 break; 2735 } 2736 case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2: 2737 { 2738 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; 2739 u_int8_t *mac_addr; 2740 u_int16_t peer_id; 2741 u_int8_t vdev_id; 2742 u_int8_t is_wds; 2743 u_int32_t free_wds_count; 2744 2745 peer_id = 2746 HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word); 2747 vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word); 2748 mac_addr = 2749 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), 2750 &mac_addr_deswizzle_buf[0]); 2751 is_wds = 2752 HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2)); 2753 free_wds_count = 2754 HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4)); 2755 2756 QDF_TRACE(QDF_MODULE_ID_TXRX, 2757 QDF_TRACE_LEVEL_INFO, 2758 "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n", 2759 peer_id, vdev_id); 2760 2761 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, 2762 vdev_id, mac_addr, 2763 is_wds, free_wds_count); 2764 break; 2765 } 2766 case HTT_T2H_MSG_TYPE_RX_DELBA: 2767 { 2768 uint16_t peer_id; 2769 uint8_t tid; 2770 uint8_t win_sz; 2771 QDF_STATUS status; 2772 2773 peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word); 2774 tid = HTT_RX_DELBA_TID_GET(*msg_word); 2775 win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word); 2776 2777 status = dp_rx_delba_ind_handler( 2778 soc->dp_soc, 2779 peer_id, tid, win_sz); 2780 2781 QDF_TRACE(QDF_MODULE_ID_TXRX, 2782 QDF_TRACE_LEVEL_INFO, 2783 FL("DELBA PeerID %d BAW %d TID %d stat %d"), 2784 peer_id, win_sz, tid, status); 2785 break; 2786 } 2787 case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND: 2788 { 2789 uint16_t num_entries; 2790 uint32_t cmem_ba_lo; 2791 uint32_t cmem_ba_hi; 2792 2793 num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word); 2794 cmem_ba_lo = *(msg_word + 1); 2795 cmem_ba_hi = *(msg_word + 2); 2796 2797 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2798 FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"), 2799 num_entries, cmem_ba_lo, cmem_ba_hi); 2800 2801 dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries, 2802 cmem_ba_lo, cmem_ba_hi); 2803 break; 2804 } 2805 case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND: 2806 { 2807 dp_offload_ind_handler(soc, msg_word); 2808 break; 2809 } 2810 default: 2811 break; 2812 }; 2813 2814 /* Free the indication buffer */ 2815 if (free_buf) 2816 qdf_nbuf_free(htt_t2h_msg); 2817 } 2818 2819 /* 2820 * dp_htt_h2t_full() - Send full handler (called from HTC) 2821 * @context: Opaque context (HTT SOC handle) 2822 * @pkt: HTC packet 2823 * 2824 * Return: enum htc_send_full_action 2825 */ 2826 static enum htc_send_full_action 2827 dp_htt_h2t_full(void *context, HTC_PACKET *pkt) 2828 { 2829 return HTC_SEND_FULL_KEEP; 2830 } 2831 2832 /* 2833 * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages 2834 * @context: Opaque context (HTT SOC handle) 2835 * @nbuf: nbuf containing T2H message 2836 * @pipe_id: HIF pipe ID 2837 * 2838 * Return: QDF_STATUS 2839 * 2840 * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which 2841 * will be used for packet log and other high-priority HTT messages. Proper 2842 * HTC connection to be added later once required FW changes are available 2843 */ 2844 static QDF_STATUS 2845 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id) 2846 { 2847 QDF_STATUS rc = QDF_STATUS_SUCCESS; 2848 HTC_PACKET htc_pkt; 2849 2850 qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE); 2851 qdf_mem_zero(&htc_pkt, sizeof(htc_pkt)); 2852 htc_pkt.Status = QDF_STATUS_SUCCESS; 2853 htc_pkt.pPktContext = (void *)nbuf; 2854 dp_htt_t2h_msg_handler(context, &htc_pkt); 2855 2856 return rc; 2857 } 2858 2859 /* 2860 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC 2861 * @htt_soc: HTT SOC handle 2862 * 2863 * Return: QDF_STATUS 2864 */ 2865 static QDF_STATUS 2866 htt_htc_soc_attach(struct htt_soc *soc) 2867 { 2868 struct htc_service_connect_req connect; 2869 struct htc_service_connect_resp response; 2870 QDF_STATUS status; 2871 struct dp_soc *dpsoc = soc->dp_soc; 2872 2873 qdf_mem_zero(&connect, sizeof(connect)); 2874 qdf_mem_zero(&response, sizeof(response)); 2875 2876 connect.pMetaData = NULL; 2877 connect.MetaDataLength = 0; 2878 connect.EpCallbacks.pContext = soc; 2879 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; 2880 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 2881 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; 2882 2883 /* rx buffers currently are provided by HIF, not by EpRecvRefill */ 2884 connect.EpCallbacks.EpRecvRefill = NULL; 2885 2886 /* N/A, fill is done by HIF */ 2887 connect.EpCallbacks.RecvRefillWaterMark = 1; 2888 2889 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; 2890 /* 2891 * Specify how deep to let a queue get before htc_send_pkt will 2892 * call the EpSendFull function due to excessive send queue depth. 2893 */ 2894 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; 2895 2896 /* disable flow control for HTT data message service */ 2897 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 2898 2899 /* connect to control service */ 2900 connect.service_id = HTT_DATA_MSG_SVC; 2901 2902 status = htc_connect_service(soc->htc_soc, &connect, &response); 2903 2904 if (status != QDF_STATUS_SUCCESS) 2905 return status; 2906 2907 soc->htc_endpoint = response.Endpoint; 2908 2909 hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint); 2910 2911 htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc); 2912 dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc, 2913 dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE); 2914 2915 return QDF_STATUS_SUCCESS; /* success */ 2916 } 2917 2918 /* 2919 * htt_soc_initialize() - SOC level HTT initialization 2920 * @htt_soc: Opaque htt SOC handle 2921 * @ctrl_psoc: Opaque ctrl SOC handle 2922 * @htc_soc: SOC level HTC handle 2923 * @hal_soc: Opaque HAL SOC handle 2924 * @osdev: QDF device 2925 * 2926 * Return: HTT handle on success; NULL on failure 2927 */ 2928 void * 2929 htt_soc_initialize(struct htt_soc *htt_soc, 2930 struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 2931 HTC_HANDLE htc_soc, 2932 hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev) 2933 { 2934 struct htt_soc *soc = (struct htt_soc *)htt_soc; 2935 2936 soc->osdev = osdev; 2937 soc->ctrl_psoc = ctrl_psoc; 2938 soc->htc_soc = htc_soc; 2939 soc->hal_soc = hal_soc_hdl; 2940 2941 if (htt_htc_soc_attach(soc)) 2942 goto fail2; 2943 2944 return soc; 2945 2946 fail2: 2947 return NULL; 2948 } 2949 2950 void htt_soc_htc_dealloc(struct htt_soc *htt_handle) 2951 { 2952 htt_interface_logging_deinit(htt_handle->htt_logger_handle); 2953 htt_htc_misc_pkt_pool_free(htt_handle); 2954 htt_htc_pkt_pool_free(htt_handle); 2955 } 2956 2957 /* 2958 * htt_soc_htc_prealloc() - HTC memory prealloc 2959 * @htt_soc: SOC level HTT handle 2960 * 2961 * Return: QDF_STATUS_SUCCESS on Success or 2962 * QDF_STATUS_E_NOMEM on allocation failure 2963 */ 2964 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc) 2965 { 2966 int i; 2967 2968 soc->htt_htc_pkt_freelist = NULL; 2969 /* pre-allocate some HTC_PACKET objects */ 2970 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { 2971 struct dp_htt_htc_pkt_union *pkt; 2972 pkt = qdf_mem_malloc(sizeof(*pkt)); 2973 if (!pkt) 2974 return QDF_STATUS_E_NOMEM; 2975 2976 htt_htc_pkt_free(soc, &pkt->u.pkt); 2977 } 2978 return QDF_STATUS_SUCCESS; 2979 } 2980 2981 /* 2982 * htt_soc_detach() - Free SOC level HTT handle 2983 * @htt_hdl: HTT SOC handle 2984 */ 2985 void htt_soc_detach(struct htt_soc *htt_hdl) 2986 { 2987 int i; 2988 struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl; 2989 2990 for (i = 0; i < MAX_PDEV_CNT; i++) { 2991 qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt); 2992 qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt); 2993 } 2994 2995 HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex); 2996 qdf_mem_free(htt_handle); 2997 2998 } 2999 3000 /** 3001 * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW 3002 * @pdev: DP PDEV handle 3003 * @stats_type_upload_mask: stats type requested by user 3004 * @config_param_0: extra configuration parameters 3005 * @config_param_1: extra configuration parameters 3006 * @config_param_2: extra configuration parameters 3007 * @config_param_3: extra configuration parameters 3008 * @mac_id: mac number 3009 * 3010 * return: QDF STATUS 3011 */ 3012 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 3013 uint32_t stats_type_upload_mask, uint32_t config_param_0, 3014 uint32_t config_param_1, uint32_t config_param_2, 3015 uint32_t config_param_3, int cookie_val, int cookie_msb, 3016 uint8_t mac_id) 3017 { 3018 struct htt_soc *soc = pdev->soc->htt_handle; 3019 struct dp_htt_htc_pkt *pkt; 3020 qdf_nbuf_t msg; 3021 uint32_t *msg_word; 3022 uint8_t pdev_mask = 0; 3023 uint8_t *htt_logger_bufp; 3024 int mac_for_pdev; 3025 int target_pdev_id; 3026 QDF_STATUS status; 3027 3028 msg = qdf_nbuf_alloc( 3029 soc->osdev, 3030 HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ), 3031 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 3032 3033 if (!msg) 3034 return QDF_STATUS_E_NOMEM; 3035 3036 /*TODO:Add support for SOC stats 3037 * Bit 0: SOC Stats 3038 * Bit 1: Pdev stats for pdev id 0 3039 * Bit 2: Pdev stats for pdev id 1 3040 * Bit 3: Pdev stats for pdev id 2 3041 */ 3042 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 3043 target_pdev_id = 3044 dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); 3045 3046 pdev_mask = 1 << target_pdev_id; 3047 3048 /* 3049 * Set the length of the message. 3050 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3051 * separately during the below call to qdf_nbuf_push_head. 3052 * The contribution from the HTC header is added separately inside HTC. 3053 */ 3054 if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) { 3055 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3056 "Failed to expand head for HTT_EXT_STATS"); 3057 qdf_nbuf_free(msg); 3058 return QDF_STATUS_E_FAILURE; 3059 } 3060 3061 dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n" 3062 "config_param_1 %u\n config_param_2 %u\n" 3063 "config_param_4 %u\n -------------", 3064 pdev->soc, cookie_val, 3065 config_param_0, 3066 config_param_1, config_param_2, config_param_3); 3067 3068 msg_word = (uint32_t *) qdf_nbuf_data(msg); 3069 3070 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3071 htt_logger_bufp = (uint8_t *)msg_word; 3072 *msg_word = 0; 3073 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ); 3074 HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask); 3075 HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask); 3076 3077 /* word 1 */ 3078 msg_word++; 3079 *msg_word = 0; 3080 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0); 3081 3082 /* word 2 */ 3083 msg_word++; 3084 *msg_word = 0; 3085 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1); 3086 3087 /* word 3 */ 3088 msg_word++; 3089 *msg_word = 0; 3090 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2); 3091 3092 /* word 4 */ 3093 msg_word++; 3094 *msg_word = 0; 3095 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3); 3096 3097 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0); 3098 3099 /* word 5 */ 3100 msg_word++; 3101 3102 /* word 6 */ 3103 msg_word++; 3104 *msg_word = 0; 3105 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val); 3106 3107 /* word 7 */ 3108 msg_word++; 3109 *msg_word = 0; 3110 /* Currently Using last 2 bits for pdev_id 3111 * For future reference, reserving 3 bits in cookie_msb for pdev_id 3112 */ 3113 cookie_msb = (cookie_msb | pdev->pdev_id); 3114 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb); 3115 3116 pkt = htt_htc_pkt_alloc(soc); 3117 if (!pkt) { 3118 qdf_nbuf_free(msg); 3119 return QDF_STATUS_E_NOMEM; 3120 } 3121 3122 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3123 3124 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 3125 dp_htt_h2t_send_complete_free_netbuf, 3126 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 3127 soc->htc_endpoint, 3128 /* tag for FW response msg not guaranteed */ 3129 HTC_TX_PACKET_TAG_RUNTIME_PUT); 3130 3131 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3132 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ, 3133 htt_logger_bufp); 3134 3135 if (status != QDF_STATUS_SUCCESS) { 3136 qdf_nbuf_free(msg); 3137 htt_htc_pkt_free(soc, pkt); 3138 } 3139 3140 return status; 3141 } 3142 3143 /** 3144 * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration 3145 * HTT message to pass to FW 3146 * @pdev: DP PDEV handle 3147 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 3148 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 3149 * 3150 * tuple_mask[1:0]: 3151 * 00 - Do not report 3 tuple hash value 3152 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 3153 * 01 - Report 3 tuple hash value in flow_id_toeplitz 3154 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 3155 * 3156 * return: QDF STATUS 3157 */ 3158 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, 3159 uint32_t tuple_mask, uint8_t mac_id) 3160 { 3161 struct htt_soc *soc = pdev->soc->htt_handle; 3162 struct dp_htt_htc_pkt *pkt; 3163 qdf_nbuf_t msg; 3164 uint32_t *msg_word; 3165 uint8_t *htt_logger_bufp; 3166 int mac_for_pdev; 3167 int target_pdev_id; 3168 3169 msg = qdf_nbuf_alloc( 3170 soc->osdev, 3171 HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES), 3172 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 3173 3174 if (!msg) 3175 return QDF_STATUS_E_NOMEM; 3176 3177 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 3178 target_pdev_id = 3179 dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); 3180 3181 /* 3182 * Set the length of the message. 3183 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3184 * separately during the below call to qdf_nbuf_push_head. 3185 * The contribution from the HTC header is added separately inside HTC. 3186 */ 3187 if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) { 3188 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3189 "Failed to expand head for HTT_3TUPLE_CONFIG"); 3190 qdf_nbuf_free(msg); 3191 return QDF_STATUS_E_FAILURE; 3192 } 3193 3194 dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------", 3195 pdev->soc, tuple_mask, target_pdev_id); 3196 3197 msg_word = (uint32_t *)qdf_nbuf_data(msg); 3198 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3199 htt_logger_bufp = (uint8_t *)msg_word; 3200 3201 *msg_word = 0; 3202 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG); 3203 HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id); 3204 3205 msg_word++; 3206 *msg_word = 0; 3207 HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask); 3208 HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask); 3209 3210 pkt = htt_htc_pkt_alloc(soc); 3211 if (!pkt) { 3212 qdf_nbuf_free(msg); 3213 return QDF_STATUS_E_NOMEM; 3214 } 3215 3216 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3217 3218 SET_HTC_PACKET_INFO_TX( 3219 &pkt->htc_pkt, 3220 dp_htt_h2t_send_complete_free_netbuf, 3221 qdf_nbuf_data(msg), 3222 qdf_nbuf_len(msg), 3223 soc->htc_endpoint, 3224 /* tag for no FW response msg */ 3225 HTC_TX_PACKET_TAG_RUNTIME_PUT); 3226 3227 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3228 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG, 3229 htt_logger_bufp); 3230 3231 return QDF_STATUS_SUCCESS; 3232 } 3233 3234 /* This macro will revert once proper HTT header will define for 3235 * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file 3236 * */ 3237 #if defined(WDI_EVENT_ENABLE) 3238 /** 3239 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3240 * @pdev: DP PDEV handle 3241 * @stats_type_upload_mask: stats type requested by user 3242 * @mac_id: Mac id number 3243 * 3244 * return: QDF STATUS 3245 */ 3246 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3247 uint32_t stats_type_upload_mask, uint8_t mac_id) 3248 { 3249 struct htt_soc *soc = pdev->soc->htt_handle; 3250 struct dp_htt_htc_pkt *pkt; 3251 qdf_nbuf_t msg; 3252 uint32_t *msg_word; 3253 uint8_t pdev_mask; 3254 QDF_STATUS status; 3255 3256 msg = qdf_nbuf_alloc( 3257 soc->osdev, 3258 HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ), 3259 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); 3260 3261 if (!msg) { 3262 dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer" 3263 , pdev->soc); 3264 qdf_assert(0); 3265 return QDF_STATUS_E_NOMEM; 3266 } 3267 3268 /*TODO:Add support for SOC stats 3269 * Bit 0: SOC Stats 3270 * Bit 1: Pdev stats for pdev id 0 3271 * Bit 2: Pdev stats for pdev id 1 3272 * Bit 3: Pdev stats for pdev id 2 3273 */ 3274 pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, 3275 mac_id); 3276 3277 /* 3278 * Set the length of the message. 3279 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3280 * separately during the below call to qdf_nbuf_push_head. 3281 * The contribution from the HTC header is added separately inside HTC. 3282 */ 3283 if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) { 3284 dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS" 3285 , pdev->soc); 3286 qdf_nbuf_free(msg); 3287 return QDF_STATUS_E_FAILURE; 3288 } 3289 3290 msg_word = (uint32_t *) qdf_nbuf_data(msg); 3291 3292 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3293 *msg_word = 0; 3294 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); 3295 HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask); 3296 HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word, 3297 stats_type_upload_mask); 3298 3299 pkt = htt_htc_pkt_alloc(soc); 3300 if (!pkt) { 3301 dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc); 3302 qdf_assert(0); 3303 qdf_nbuf_free(msg); 3304 return QDF_STATUS_E_NOMEM; 3305 } 3306 3307 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3308 3309 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 3310 dp_htt_h2t_send_complete_free_netbuf, 3311 qdf_nbuf_data(msg), qdf_nbuf_len(msg), 3312 soc->htc_endpoint, 3313 /* tag for no FW response msg */ 3314 HTC_TX_PACKET_TAG_RUNTIME_PUT); 3315 3316 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3317 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG, 3318 (uint8_t *)msg_word); 3319 3320 if (status != QDF_STATUS_SUCCESS) { 3321 qdf_nbuf_free(msg); 3322 htt_htc_pkt_free(soc, pkt); 3323 } 3324 3325 return status; 3326 } 3327 3328 qdf_export_symbol(dp_h2t_cfg_stats_msg_send); 3329 #endif 3330 3331 void 3332 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, 3333 uint32_t *tag_buf) 3334 { 3335 struct dp_peer *peer = NULL; 3336 switch (tag_type) { 3337 case HTT_STATS_PEER_DETAILS_TAG: 3338 { 3339 htt_peer_details_tlv *dp_stats_buf = 3340 (htt_peer_details_tlv *)tag_buf; 3341 3342 pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id; 3343 } 3344 break; 3345 case HTT_STATS_PEER_STATS_CMN_TAG: 3346 { 3347 htt_peer_stats_cmn_tlv *dp_stats_buf = 3348 (htt_peer_stats_cmn_tlv *)tag_buf; 3349 3350 peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id, 3351 DP_MOD_ID_HTT); 3352 3353 if (peer && !peer->bss_peer) { 3354 peer->stats.tx.inactive_time = 3355 dp_stats_buf->inactive_time; 3356 qdf_event_set(&pdev->fw_peer_stats_event); 3357 } 3358 if (peer) 3359 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 3360 } 3361 break; 3362 default: 3363 qdf_err("Invalid tag_type"); 3364 } 3365 } 3366 3367 /** 3368 * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW 3369 * @pdev: DP pdev handle 3370 * @fse_setup_info: FST setup parameters 3371 * 3372 * Return: Success when HTT message is sent, error on failure 3373 */ 3374 QDF_STATUS 3375 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev, 3376 struct dp_htt_rx_flow_fst_setup *fse_setup_info) 3377 { 3378 struct htt_soc *soc = pdev->soc->htt_handle; 3379 struct dp_htt_htc_pkt *pkt; 3380 qdf_nbuf_t msg; 3381 u_int32_t *msg_word; 3382 struct htt_h2t_msg_rx_fse_setup_t *fse_setup; 3383 uint8_t *htt_logger_bufp; 3384 u_int32_t *key; 3385 QDF_STATUS status; 3386 3387 msg = qdf_nbuf_alloc( 3388 soc->osdev, 3389 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)), 3390 /* reserve room for the HTC header */ 3391 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 3392 3393 if (!msg) 3394 return QDF_STATUS_E_NOMEM; 3395 3396 /* 3397 * Set the length of the message. 3398 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3399 * separately during the below call to qdf_nbuf_push_head. 3400 * The contribution from the HTC header is added separately inside HTC. 3401 */ 3402 if (!qdf_nbuf_put_tail(msg, 3403 sizeof(struct htt_h2t_msg_rx_fse_setup_t))) { 3404 qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg"); 3405 return QDF_STATUS_E_FAILURE; 3406 } 3407 3408 /* fill in the message contents */ 3409 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 3410 3411 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t)); 3412 /* rewind beyond alignment pad to get to the HTC header reserved area */ 3413 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3414 htt_logger_bufp = (uint8_t *)msg_word; 3415 3416 *msg_word = 0; 3417 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG); 3418 3419 fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word; 3420 3421 HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id); 3422 3423 msg_word++; 3424 HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries); 3425 HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search); 3426 HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word, 3427 fse_setup_info->ip_da_sa_prefix); 3428 3429 msg_word++; 3430 HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word, 3431 fse_setup_info->base_addr_lo); 3432 msg_word++; 3433 HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word, 3434 fse_setup_info->base_addr_hi); 3435 3436 key = (u_int32_t *)fse_setup_info->hash_key; 3437 fse_setup->toeplitz31_0 = *key++; 3438 fse_setup->toeplitz63_32 = *key++; 3439 fse_setup->toeplitz95_64 = *key++; 3440 fse_setup->toeplitz127_96 = *key++; 3441 fse_setup->toeplitz159_128 = *key++; 3442 fse_setup->toeplitz191_160 = *key++; 3443 fse_setup->toeplitz223_192 = *key++; 3444 fse_setup->toeplitz255_224 = *key++; 3445 fse_setup->toeplitz287_256 = *key++; 3446 fse_setup->toeplitz314_288 = *key; 3447 3448 msg_word++; 3449 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0); 3450 msg_word++; 3451 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32); 3452 msg_word++; 3453 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64); 3454 msg_word++; 3455 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96); 3456 msg_word++; 3457 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128); 3458 msg_word++; 3459 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160); 3460 msg_word++; 3461 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192); 3462 msg_word++; 3463 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224); 3464 msg_word++; 3465 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256); 3466 msg_word++; 3467 HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word, 3468 fse_setup->toeplitz314_288); 3469 3470 pkt = htt_htc_pkt_alloc(soc); 3471 if (!pkt) { 3472 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 3473 qdf_assert(0); 3474 qdf_nbuf_free(msg); 3475 return QDF_STATUS_E_RESOURCES; /* failure */ 3476 } 3477 3478 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3479 3480 SET_HTC_PACKET_INFO_TX( 3481 &pkt->htc_pkt, 3482 dp_htt_h2t_send_complete_free_netbuf, 3483 qdf_nbuf_data(msg), 3484 qdf_nbuf_len(msg), 3485 soc->htc_endpoint, 3486 /* tag for no FW response msg */ 3487 HTC_TX_PACKET_TAG_RUNTIME_PUT); 3488 3489 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3490 3491 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 3492 HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG, 3493 htt_logger_bufp); 3494 3495 if (status == QDF_STATUS_SUCCESS) { 3496 dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u", 3497 fse_setup_info->pdev_id); 3498 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, 3499 (void *)fse_setup_info->hash_key, 3500 fse_setup_info->hash_key_len); 3501 } else { 3502 qdf_nbuf_free(msg); 3503 htt_htc_pkt_free(soc, pkt); 3504 } 3505 3506 return status; 3507 } 3508 3509 /** 3510 * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to 3511 * add/del a flow in HW 3512 * @pdev: DP pdev handle 3513 * @fse_op_info: Flow entry parameters 3514 * 3515 * Return: Success when HTT message is sent, error on failure 3516 */ 3517 QDF_STATUS 3518 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev, 3519 struct dp_htt_rx_flow_fst_operation *fse_op_info) 3520 { 3521 struct htt_soc *soc = pdev->soc->htt_handle; 3522 struct dp_htt_htc_pkt *pkt; 3523 qdf_nbuf_t msg; 3524 u_int32_t *msg_word; 3525 struct htt_h2t_msg_rx_fse_operation_t *fse_operation; 3526 uint8_t *htt_logger_bufp; 3527 QDF_STATUS status; 3528 3529 msg = qdf_nbuf_alloc( 3530 soc->osdev, 3531 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)), 3532 /* reserve room for the HTC header */ 3533 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 3534 if (!msg) 3535 return QDF_STATUS_E_NOMEM; 3536 3537 /* 3538 * Set the length of the message. 3539 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3540 * separately during the below call to qdf_nbuf_push_head. 3541 * The contribution from the HTC header is added separately inside HTC. 3542 */ 3543 if (!qdf_nbuf_put_tail(msg, 3544 sizeof(struct htt_h2t_msg_rx_fse_operation_t))) { 3545 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); 3546 qdf_nbuf_free(msg); 3547 return QDF_STATUS_E_FAILURE; 3548 } 3549 3550 /* fill in the message contents */ 3551 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 3552 3553 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t)); 3554 /* rewind beyond alignment pad to get to the HTC header reserved area */ 3555 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3556 htt_logger_bufp = (uint8_t *)msg_word; 3557 3558 *msg_word = 0; 3559 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG); 3560 3561 fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word; 3562 3563 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id); 3564 msg_word++; 3565 HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false); 3566 if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) { 3567 HTT_RX_FSE_OPERATION_SET(*msg_word, 3568 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY); 3569 msg_word++; 3570 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3571 *msg_word, 3572 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0)); 3573 msg_word++; 3574 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3575 *msg_word, 3576 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32)); 3577 msg_word++; 3578 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3579 *msg_word, 3580 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64)); 3581 msg_word++; 3582 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3583 *msg_word, 3584 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96)); 3585 msg_word++; 3586 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3587 *msg_word, 3588 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0)); 3589 msg_word++; 3590 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3591 *msg_word, 3592 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32)); 3593 msg_word++; 3594 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3595 *msg_word, 3596 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64)); 3597 msg_word++; 3598 HTT_RX_FSE_OPERATION_IP_ADDR_SET( 3599 *msg_word, 3600 qdf_htonl( 3601 fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96)); 3602 msg_word++; 3603 HTT_RX_FSE_SOURCEPORT_SET( 3604 *msg_word, 3605 fse_op_info->rx_flow->flow_tuple_info.src_port); 3606 HTT_RX_FSE_DESTPORT_SET( 3607 *msg_word, 3608 fse_op_info->rx_flow->flow_tuple_info.dest_port); 3609 msg_word++; 3610 HTT_RX_FSE_L4_PROTO_SET( 3611 *msg_word, 3612 fse_op_info->rx_flow->flow_tuple_info.l4_protocol); 3613 } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) { 3614 HTT_RX_FSE_OPERATION_SET(*msg_word, 3615 HTT_RX_FSE_CACHE_INVALIDATE_FULL); 3616 } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) { 3617 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE); 3618 } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) { 3619 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE); 3620 } 3621 3622 pkt = htt_htc_pkt_alloc(soc); 3623 if (!pkt) { 3624 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 3625 qdf_assert(0); 3626 qdf_nbuf_free(msg); 3627 return QDF_STATUS_E_RESOURCES; /* failure */ 3628 } 3629 3630 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3631 3632 SET_HTC_PACKET_INFO_TX( 3633 &pkt->htc_pkt, 3634 dp_htt_h2t_send_complete_free_netbuf, 3635 qdf_nbuf_data(msg), 3636 qdf_nbuf_len(msg), 3637 soc->htc_endpoint, 3638 /* tag for no FW response msg */ 3639 HTC_TX_PACKET_TAG_RUNTIME_PUT); 3640 3641 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3642 3643 status = DP_HTT_SEND_HTC_PKT(soc, pkt, 3644 HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG, 3645 htt_logger_bufp); 3646 3647 if (status == QDF_STATUS_SUCCESS) { 3648 dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u", 3649 fse_op_info->pdev_id); 3650 } else { 3651 qdf_nbuf_free(msg); 3652 htt_htc_pkt_free(soc, pkt); 3653 } 3654 3655 return status; 3656 } 3657 3658 /** 3659 * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA 3660 * @pdev: DP pdev handle 3661 * @fse_op_info: Flow entry parameters 3662 * 3663 * Return: Success when HTT message is sent, error on failure 3664 */ 3665 QDF_STATUS 3666 dp_htt_rx_fisa_config(struct dp_pdev *pdev, 3667 struct dp_htt_rx_fisa_cfg *fisa_config) 3668 { 3669 struct htt_soc *soc = pdev->soc->htt_handle; 3670 struct dp_htt_htc_pkt *pkt; 3671 qdf_nbuf_t msg; 3672 u_int32_t *msg_word; 3673 struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config; 3674 uint8_t *htt_logger_bufp; 3675 uint32_t len; 3676 QDF_STATUS status; 3677 3678 len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t)); 3679 3680 msg = qdf_nbuf_alloc(soc->osdev, 3681 len, 3682 /* reserve room for the HTC header */ 3683 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 3684 4, 3685 TRUE); 3686 if (!msg) 3687 return QDF_STATUS_E_NOMEM; 3688 3689 /* 3690 * Set the length of the message. 3691 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 3692 * separately during the below call to qdf_nbuf_push_head. 3693 * The contribution from the HTC header is added separately inside HTC. 3694 */ 3695 if (!qdf_nbuf_put_tail(msg, 3696 sizeof(struct htt_h2t_msg_type_fisa_config_t))) { 3697 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); 3698 qdf_nbuf_free(msg); 3699 return QDF_STATUS_E_FAILURE; 3700 } 3701 3702 /* fill in the message contents */ 3703 msg_word = (u_int32_t *)qdf_nbuf_data(msg); 3704 3705 memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t)); 3706 /* rewind beyond alignment pad to get to the HTC header reserved area */ 3707 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 3708 htt_logger_bufp = (uint8_t *)msg_word; 3709 3710 *msg_word = 0; 3711 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG); 3712 3713 htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word; 3714 3715 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id); 3716 3717 msg_word++; 3718 HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1); 3719 HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf); 3720 3721 msg_word++; 3722 htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout; 3723 3724 pkt = htt_htc_pkt_alloc(soc); 3725 if (!pkt) { 3726 qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); 3727 qdf_assert(0); 3728 qdf_nbuf_free(msg); 3729 return QDF_STATUS_E_RESOURCES; /* failure */ 3730 } 3731 3732 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 3733 3734 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 3735 dp_htt_h2t_send_complete_free_netbuf, 3736 qdf_nbuf_data(msg), 3737 qdf_nbuf_len(msg), 3738 soc->htc_endpoint, 3739 /* tag for no FW response msg */ 3740 HTC_TX_PACKET_TAG_RUNTIME_PUT); 3741 3742 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 3743 3744 status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG, 3745 htt_logger_bufp); 3746 3747 if (status == QDF_STATUS_SUCCESS) { 3748 dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u", 3749 fisa_config->pdev_id); 3750 } else { 3751 qdf_nbuf_free(msg); 3752 htt_htc_pkt_free(soc, pkt); 3753 } 3754 3755 return status; 3756 } 3757 3758 /** 3759 * dp_bk_pressure_stats_handler(): worker function to print back pressure 3760 * stats 3761 * 3762 * @context : argument to work function 3763 */ 3764 static void dp_bk_pressure_stats_handler(void *context) 3765 { 3766 struct dp_pdev *pdev = (struct dp_pdev *)context; 3767 struct dp_soc_srngs_state *soc_srngs_state, *soc_srngs_state_next; 3768 const char *ring_name; 3769 int i; 3770 struct dp_srng_ring_state *ring_state; 3771 3772 TAILQ_HEAD(, dp_soc_srngs_state) soc_srngs_state_list; 3773 3774 TAILQ_INIT(&soc_srngs_state_list); 3775 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 3776 TAILQ_CONCAT(&soc_srngs_state_list, &pdev->bkp_stats.list, 3777 list_elem); 3778 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 3779 3780 TAILQ_FOREACH_SAFE(soc_srngs_state, &soc_srngs_state_list, 3781 list_elem, soc_srngs_state_next) { 3782 TAILQ_REMOVE(&soc_srngs_state_list, soc_srngs_state, 3783 list_elem); 3784 3785 DP_PRINT_STATS("### START BKP stats for seq_num %u ###", 3786 soc_srngs_state->seq_num); 3787 for (i = 0; i < soc_srngs_state->max_ring_id; i++) { 3788 ring_state = &soc_srngs_state->ring_state[i]; 3789 ring_name = dp_srng_get_str_from_hal_ring_type 3790 (ring_state->ring_type); 3791 DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n", 3792 ring_name, 3793 ring_state->sw_head, 3794 ring_state->sw_tail); 3795 3796 DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n", 3797 ring_name, 3798 ring_state->hw_head, 3799 ring_state->hw_tail); 3800 } 3801 3802 DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###", 3803 soc_srngs_state->seq_num); 3804 qdf_mem_free(soc_srngs_state); 3805 } 3806 dp_print_napi_stats(pdev->soc); 3807 } 3808 3809 /* 3810 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 3811 * processing 3812 * @pdev: Datapath PDEV handle 3813 * 3814 */ 3815 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev) 3816 { 3817 struct dp_soc_srngs_state *ring_state, *ring_state_next; 3818 3819 if (!pdev->bkp_stats.work_queue) 3820 return; 3821 3822 qdf_flush_workqueue(0, pdev->bkp_stats.work_queue); 3823 qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue); 3824 qdf_flush_work(&pdev->bkp_stats.work); 3825 qdf_disable_work(&pdev->bkp_stats.work); 3826 qdf_spin_lock_bh(&pdev->bkp_stats.list_lock); 3827 TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list, 3828 list_elem, ring_state_next) { 3829 TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state, 3830 list_elem); 3831 qdf_mem_free(ring_state); 3832 } 3833 qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock); 3834 qdf_spinlock_destroy(&pdev->bkp_stats.list_lock); 3835 } 3836 3837 /* 3838 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 3839 * processing 3840 * @pdev: Datapath PDEV handle 3841 * 3842 * Return: QDF_STATUS_SUCCESS: Success 3843 * QDF_STATUS_E_NOMEM: Error 3844 */ 3845 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev) 3846 { 3847 TAILQ_INIT(&pdev->bkp_stats.list); 3848 pdev->bkp_stats.seq_num = 0; 3849 3850 qdf_create_work(0, &pdev->bkp_stats.work, 3851 dp_bk_pressure_stats_handler, pdev); 3852 3853 pdev->bkp_stats.work_queue = 3854 qdf_alloc_unbound_workqueue("dp_bkp_work_queue"); 3855 if (!pdev->bkp_stats.work_queue) 3856 goto fail; 3857 3858 qdf_spinlock_create(&pdev->bkp_stats.list_lock); 3859 return QDF_STATUS_SUCCESS; 3860 3861 fail: 3862 dp_htt_alert("BKP stats attach failed"); 3863 qdf_flush_work(&pdev->bkp_stats.work); 3864 qdf_disable_work(&pdev->bkp_stats.work); 3865 return QDF_STATUS_E_FAILURE; 3866 } 3867