1 /* 2 * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <htt.h> 20 #include <hal_api.h> 21 #include "dp_htt.h" 22 #include "dp_peer.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_rx_mon.h" 26 27 #define HTT_HTC_PKT_POOL_INIT_SIZE 64 28 29 #define HTT_MSG_BUF_SIZE(msg_bytes) \ 30 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) 31 32 /* 33 * htt_htc_pkt_alloc() - Allocate HTC packet buffer 34 * @htt_soc: HTT SOC handle 35 * 36 * Return: Pointer to htc packet buffer 37 */ 38 static struct dp_htt_htc_pkt * 39 htt_htc_pkt_alloc(struct htt_soc *soc) 40 { 41 struct dp_htt_htc_pkt_union *pkt = NULL; 42 43 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 44 if (soc->htt_htc_pkt_freelist) { 45 pkt = soc->htt_htc_pkt_freelist; 46 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; 47 } 48 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 49 50 if (pkt == NULL) 51 pkt = qdf_mem_malloc(sizeof(*pkt)); 52 return &pkt->u.pkt; /* not actually a dereference */ 53 } 54 55 /* 56 * htt_htc_pkt_free() - Free HTC packet buffer 57 * @htt_soc: HTT SOC handle 58 */ 59 static void 60 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) 61 { 62 struct dp_htt_htc_pkt_union *u_pkt = 63 (struct dp_htt_htc_pkt_union *)pkt; 64 65 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); 66 u_pkt->u.next = soc->htt_htc_pkt_freelist; 67 soc->htt_htc_pkt_freelist = u_pkt; 68 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); 69 } 70 71 /* 72 * htt_htc_pkt_pool_free() - Free HTC packet pool 73 * @htt_soc: HTT SOC handle 74 */ 75 static void 76 htt_htc_pkt_pool_free(struct htt_soc *soc) 77 { 78 struct dp_htt_htc_pkt_union *pkt, *next; 79 pkt = soc->htt_htc_pkt_freelist; 80 while (pkt) { 81 next = pkt->u.next; 82 qdf_mem_free(pkt); 83 pkt = next; 84 } 85 soc->htt_htc_pkt_freelist = NULL; 86 } 87 88 /* 89 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianess differ 90 * @tgt_mac_addr: Target MAC 91 * @buffer: Output buffer 92 */ 93 static u_int8_t * 94 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) 95 { 96 #ifdef BIG_ENDIAN_HOST 97 /* 98 * The host endianness is opposite of the target endianness. 99 * To make u_int32_t elements come out correctly, the target->host 100 * upload has swizzled the bytes in each u_int32_t element of the 101 * message. 102 * For byte-array message fields like the MAC address, this 103 * upload swizzling puts the bytes in the wrong order, and needs 104 * to be undone. 105 */ 106 buffer[0] = tgt_mac_addr[3]; 107 buffer[1] = tgt_mac_addr[2]; 108 buffer[2] = tgt_mac_addr[1]; 109 buffer[3] = tgt_mac_addr[0]; 110 buffer[4] = tgt_mac_addr[7]; 111 buffer[5] = tgt_mac_addr[6]; 112 return buffer; 113 #else 114 /* 115 * The host endianness matches the target endianness - 116 * we can use the mac addr directly from the message buffer. 117 */ 118 return tgt_mac_addr; 119 #endif 120 } 121 122 /* 123 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer 124 * @soc: SOC handle 125 * @status: Completion status 126 * @netbuf: HTT buffer 127 */ 128 static void 129 dp_htt_h2t_send_complete_free_netbuf( 130 void *soc, A_STATUS status, qdf_nbuf_t netbuf) 131 { 132 qdf_nbuf_free(netbuf); 133 } 134 135 /* 136 * dp_htt_h2t_send_complete() - H2T completion handler 137 * @context: Opaque context (HTT SOC handle) 138 * @htc_pkt: HTC packet 139 */ 140 static void 141 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) 142 { 143 void (*send_complete_part2)( 144 void *soc, A_STATUS status, qdf_nbuf_t msdu); 145 struct htt_soc *soc = (struct htt_soc *) context; 146 struct dp_htt_htc_pkt *htt_pkt; 147 qdf_nbuf_t netbuf; 148 149 send_complete_part2 = htc_pkt->pPktContext; 150 151 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); 152 153 /* process (free or keep) the netbuf that held the message */ 154 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; 155 /* 156 * adf sendcomplete is required for windows only 157 */ 158 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ 159 if (send_complete_part2 != NULL) { 160 send_complete_part2( 161 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); 162 } 163 /* free the htt_htc_pkt / HTC_PACKET object */ 164 htt_htc_pkt_free(soc, htt_pkt); 165 } 166 167 /* 168 * htt_h2t_ver_req_msg() - Send HTT version request message to target 169 * @htt_soc: HTT SOC handle 170 * 171 * Return: 0 on success; error code on failure 172 */ 173 static int htt_h2t_ver_req_msg(struct htt_soc *soc) 174 { 175 struct dp_htt_htc_pkt *pkt; 176 qdf_nbuf_t msg; 177 uint32_t *msg_word; 178 179 msg = qdf_nbuf_alloc( 180 soc->osdev, 181 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), 182 /* reserve room for the HTC header */ 183 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 184 if (!msg) 185 return QDF_STATUS_E_NOMEM; 186 187 /* 188 * Set the length of the message. 189 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 190 * separately during the below call to qdf_nbuf_push_head. 191 * The contribution from the HTC header is added separately inside HTC. 192 */ 193 if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) { 194 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 195 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n", 196 __func__); 197 return QDF_STATUS_E_FAILURE; 198 } 199 200 /* fill in the message contents */ 201 msg_word = (u_int32_t *) qdf_nbuf_data(msg); 202 203 /* rewind beyond alignment pad to get to the HTC header reserved area */ 204 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); 205 206 *msg_word = 0; 207 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); 208 209 pkt = htt_htc_pkt_alloc(soc); 210 if (!pkt) { 211 qdf_nbuf_free(msg); 212 return QDF_STATUS_E_FAILURE; 213 } 214 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 215 216 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, 217 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), 218 qdf_nbuf_len(msg), soc->htc_endpoint, 219 1); /* tag - not relevant here */ 220 221 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); 222 htc_send_pkt(soc->htc_soc, &pkt->htc_pkt); 223 return 0; 224 } 225 226 /* 227 * htt_srng_setup() - Send SRNG setup message to target 228 * @htt_soc: HTT SOC handle 229 * @mac_id: MAC Id 230 * @hal_srng: Opaque HAL SRNG pointer 231 * @hal_ring_type: SRNG ring type 232 * 233 * Return: 0 on success; error code on failure 234 */ 235 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng, 236 int hal_ring_type) 237 { 238 struct htt_soc *soc = (struct htt_soc *)htt_soc; 239 struct dp_htt_htc_pkt *pkt; 240 qdf_nbuf_t htt_msg; 241 uint32_t *msg_word; 242 struct hal_srng_params srng_params; 243 qdf_dma_addr_t hp_addr, tp_addr; 244 uint32_t ring_entry_size = 245 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); 246 int htt_ring_type, htt_ring_id; 247 248 /* Sizes should be set in 4-byte words */ 249 ring_entry_size = ring_entry_size >> 2; 250 251 htt_msg = qdf_nbuf_alloc(soc->osdev, 252 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), 253 /* reserve room for the HTC header */ 254 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 255 if (!htt_msg) 256 goto fail0; 257 258 hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params); 259 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng); 260 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng); 261 262 switch (hal_ring_type) { 263 case RXDMA_BUF: 264 #ifdef QCA_HOST2FW_RXBUF_RING 265 if (srng_params.ring_id == 266 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF)) { 267 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 268 htt_ring_type = HTT_SW_TO_SW_RING; 269 #else 270 if (srng_params.ring_id == 271 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF + 272 (mac_id * HAL_MAX_RINGS_PER_LMAC))) { 273 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 274 htt_ring_type = HTT_SW_TO_HW_RING; 275 #endif 276 } else if (srng_params.ring_id == 277 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 278 (mac_id * HAL_MAX_RINGS_PER_LMAC))) { 279 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 280 htt_ring_type = HTT_SW_TO_HW_RING; 281 } else { 282 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 283 "%s: Ring %d currently not supported\n", 284 __func__, srng_params.ring_id); 285 goto fail1; 286 } 287 288 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 289 "%s: ring_type %d ring_id %d\n", 290 __func__, hal_ring_type, srng_params.ring_id); 291 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 292 "%s: hp_addr 0x%llx tp_addr 0x%llx\n", 293 __func__, (uint64_t)hp_addr, (uint64_t)tp_addr); 294 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 295 "%s: htt_ring_id %d\n", __func__, htt_ring_id); 296 break; 297 case RXDMA_MONITOR_BUF: 298 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 299 htt_ring_type = HTT_SW_TO_HW_RING; 300 break; 301 case RXDMA_MONITOR_STATUS: 302 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 303 htt_ring_type = HTT_SW_TO_HW_RING; 304 break; 305 case RXDMA_MONITOR_DST: 306 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 307 htt_ring_type = HTT_HW_TO_SW_RING; 308 break; 309 case RXDMA_MONITOR_DESC: 310 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 311 htt_ring_type = HTT_SW_TO_HW_RING; 312 break; 313 314 default: 315 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 316 "%s: Ring currently not supported\n", __func__); 317 goto fail1; 318 } 319 320 /* 321 * Set the length of the message. 322 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 323 * separately during the below call to qdf_nbuf_push_head. 324 * The contribution from the HTC header is added separately inside HTC. 325 */ 326 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { 327 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 328 "%s: Failed to expand head for SRING_SETUP msg\n", 329 __func__); 330 return QDF_STATUS_E_FAILURE; 331 } 332 333 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 334 335 /* rewind beyond alignment pad to get to the HTC header reserved area */ 336 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 337 338 /* word 0 */ 339 *msg_word = 0; 340 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); 341 342 if (htt_ring_type == HTT_SW_TO_HW_RING) 343 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, 344 DP_SW2HW_MACID(mac_id)); 345 else 346 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); 347 348 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 349 "%s: mac_id %d\n", __func__, mac_id); 350 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); 351 /* TODO: Discuss with FW on changing this to unique ID and using 352 * htt_ring_type to send the type of ring 353 */ 354 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); 355 356 /* word 1 */ 357 msg_word++; 358 *msg_word = 0; 359 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, 360 srng_params.ring_base_paddr & 0xffffffff); 361 362 /* word 2 */ 363 msg_word++; 364 *msg_word = 0; 365 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, 366 (uint64_t)srng_params.ring_base_paddr >> 32); 367 368 /* word 3 */ 369 msg_word++; 370 *msg_word = 0; 371 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); 372 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, 373 (ring_entry_size * srng_params.num_entries)); 374 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 375 "%s: entry_size %d\n", __func__, 376 ring_entry_size); 377 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 378 "%s: num_entries %d\n", __func__, 379 srng_params.num_entries); 380 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 381 "%s: ring_size %d\n", __func__, 382 (ring_entry_size * srng_params.num_entries)); 383 if (htt_ring_type == HTT_SW_TO_HW_RING) 384 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( 385 *msg_word, 1); 386 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, 387 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 388 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, 389 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 390 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, 391 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); 392 393 /* word 4 */ 394 msg_word++; 395 *msg_word = 0; 396 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 397 hp_addr & 0xffffffff); 398 399 /* word 5 */ 400 msg_word++; 401 *msg_word = 0; 402 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 403 (uint64_t)hp_addr >> 32); 404 405 /* word 6 */ 406 msg_word++; 407 *msg_word = 0; 408 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, 409 tp_addr & 0xffffffff); 410 411 /* word 7 */ 412 msg_word++; 413 *msg_word = 0; 414 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, 415 (uint64_t)tp_addr >> 32); 416 417 /* word 8 */ 418 msg_word++; 419 *msg_word = 0; 420 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, 421 srng_params.msi_addr & 0xffffffff); 422 423 /* word 9 */ 424 msg_word++; 425 *msg_word = 0; 426 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, 427 (uint64_t)(srng_params.msi_addr) >> 32); 428 429 /* word 10 */ 430 msg_word++; 431 *msg_word = 0; 432 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, 433 srng_params.msi_data); 434 435 /* word 11 */ 436 msg_word++; 437 *msg_word = 0; 438 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, 439 srng_params.intr_batch_cntr_thres_entries * 440 ring_entry_size); 441 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, 442 srng_params.intr_timer_thres_us >> 3); 443 444 /* word 12 */ 445 msg_word++; 446 *msg_word = 0; 447 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 448 /* TODO: Setting low threshold to 1/8th of ring size - see 449 * if this needs to be configurable 450 */ 451 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, 452 srng_params.low_threshold); 453 } 454 /* "response_required" field should be set if a HTT response message is 455 * required after setting up the ring. 456 */ 457 pkt = htt_htc_pkt_alloc(soc); 458 if (!pkt) 459 goto fail1; 460 461 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 462 463 SET_HTC_PACKET_INFO_TX( 464 &pkt->htc_pkt, 465 dp_htt_h2t_send_complete_free_netbuf, 466 qdf_nbuf_data(htt_msg), 467 qdf_nbuf_len(htt_msg), 468 soc->htc_endpoint, 469 1); /* tag - not relevant here */ 470 471 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 472 htc_send_pkt(soc->htc_soc, &pkt->htc_pkt); 473 474 return QDF_STATUS_SUCCESS; 475 476 fail1: 477 qdf_nbuf_free(htt_msg); 478 fail0: 479 return QDF_STATUS_E_FAILURE; 480 } 481 482 /* 483 * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter 484 * config message to target 485 * @htt_soc: HTT SOC handle 486 * @pdev_id: PDEV Id 487 * @hal_srng: Opaque HAL SRNG pointer 488 * @hal_ring_type: SRNG ring type 489 * @ring_buf_size: SRNG buffer size 490 * @htt_tlv_filter: Rx SRNG TLV and filter setting 491 * Return: 0 on success; error code on failure 492 */ 493 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng, 494 int hal_ring_type, int ring_buf_size, 495 struct htt_rx_ring_tlv_filter *htt_tlv_filter) 496 { 497 struct htt_soc *soc = (struct htt_soc *)htt_soc; 498 struct dp_htt_htc_pkt *pkt; 499 qdf_nbuf_t htt_msg; 500 uint32_t *msg_word; 501 struct hal_srng_params srng_params; 502 uint32_t htt_ring_type, htt_ring_id; 503 uint32_t tlv_filter; 504 505 htt_msg = qdf_nbuf_alloc(soc->osdev, 506 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), 507 /* reserve room for the HTC header */ 508 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); 509 if (!htt_msg) 510 goto fail0; 511 512 hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params); 513 514 switch (hal_ring_type) { 515 case RXDMA_BUF: 516 #if QCA_HOST2FW_RXBUF_RING 517 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 518 htt_ring_type = HTT_SW_TO_SW_RING; 519 #else 520 htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 521 htt_ring_type = HTT_SW_TO_HW_RING; 522 #endif 523 break; 524 case RXDMA_MONITOR_BUF: 525 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 526 htt_ring_type = HTT_SW_TO_HW_RING; 527 break; 528 case RXDMA_MONITOR_STATUS: 529 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 530 htt_ring_type = HTT_SW_TO_HW_RING; 531 break; 532 case RXDMA_MONITOR_DST: 533 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 534 htt_ring_type = HTT_HW_TO_SW_RING; 535 break; 536 case RXDMA_MONITOR_DESC: 537 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 538 htt_ring_type = HTT_SW_TO_HW_RING; 539 break; 540 541 default: 542 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 543 "%s: Ring currently not supported\n", __func__); 544 goto fail1; 545 } 546 547 /* 548 * Set the length of the message. 549 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added 550 * separately during the below call to qdf_nbuf_push_head. 551 * The contribution from the HTC header is added separately inside HTC. 552 */ 553 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { 554 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 555 "%s: Failed to expand head for RX Ring Cfg msg\n", 556 __func__); 557 goto fail1; /* failure */ 558 } 559 560 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); 561 562 /* rewind beyond alignment pad to get to the HTC header reserved area */ 563 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); 564 565 /* word 0 */ 566 *msg_word = 0; 567 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); 568 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, pdev_id); 569 /* TODO: Discuss with FW on changing this to unique ID and using 570 * htt_ring_type to send the type of ring 571 */ 572 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); 573 574 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, 575 !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); 576 577 HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word, 578 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); 579 580 /* word 1 */ 581 msg_word++; 582 *msg_word = 0; 583 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, 584 ring_buf_size); 585 586 /* word 2 */ 587 msg_word++; 588 *msg_word = 0; 589 590 if (htt_tlv_filter->enable_fp) { 591 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 592 MGMT, 0000, 1); 593 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 594 MGMT, 0001, 1); 595 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 596 MGMT, 0010, 1); 597 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 598 MGMT, 0011, 1); 599 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 600 MGMT, 0100, 1); 601 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 602 MGMT, 0101, 1); 603 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 604 MGMT, 0110, 1); 605 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 606 MGMT, 1000, 1); 607 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, 608 MGMT, 1001, 1); 609 } 610 611 if (htt_tlv_filter->enable_md) { 612 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 613 MGMT, 0000, 1); 614 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 615 MGMT, 0001, 1); 616 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 617 MGMT, 0010, 1); 618 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 619 MGMT, 0011, 1); 620 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 621 MGMT, 0100, 1); 622 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 623 MGMT, 0101, 1); 624 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 625 MGMT, 0110, 1); 626 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 627 MGMT, 1000, 1); 628 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, 629 MGMT, 1001, 1); 630 } 631 632 if (htt_tlv_filter->enable_mo) { 633 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 634 MGMT, 0000, 1); 635 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 636 MGMT, 0001, 1); 637 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 638 MGMT, 0010, 1); 639 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 640 MGMT, 0011, 1); 641 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 642 MGMT, 0100, 1); 643 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 644 MGMT, 0101, 1); 645 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 646 MGMT, 0110, 1); 647 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 648 MGMT, 1000, 1); 649 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, 650 MGMT, 1001, 1); 651 } 652 /* word 3 */ 653 msg_word++; 654 *msg_word = 0; 655 656 if (htt_tlv_filter->enable_fp) { 657 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 658 MGMT, 1010, 1); 659 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 660 MGMT, 1011, 1); 661 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 662 MGMT, 1100, 1); 663 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 664 MGMT, 1101, 1); 665 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, 666 MGMT, 1110, 1); 667 } 668 669 if (htt_tlv_filter->enable_md) { 670 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 671 MGMT, 1010, 1); 672 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 673 MGMT, 1011, 1); 674 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 675 MGMT, 1100, 1); 676 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 677 MGMT, 1101, 1); 678 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, 679 MGMT, 1110, 1); 680 } 681 682 if (htt_tlv_filter->enable_mo) { 683 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 684 MGMT, 1010, 1); 685 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 686 MGMT, 1011, 1); 687 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 688 MGMT, 1100, 1); 689 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 690 MGMT, 1101, 1); 691 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, 692 MGMT, 1110, 1); 693 } 694 695 /* word 4 */ 696 msg_word++; 697 *msg_word = 0; 698 699 if (htt_tlv_filter->enable_fp) { 700 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 701 CTRL, 0111, 1); 702 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 703 CTRL, 1000, 1); 704 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, 705 CTRL, 1001, 1); 706 } 707 708 if (htt_tlv_filter->enable_md) { 709 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 710 CTRL, 0111, 1); 711 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 712 CTRL, 1000, 1); 713 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, 714 CTRL, 1001, 1); 715 } 716 717 if (htt_tlv_filter->enable_mo) { 718 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 719 CTRL, 0111, 1); 720 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 721 CTRL, 1000, 1); 722 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, 723 CTRL, 1001, 1); 724 } 725 726 /* word 5 */ 727 msg_word++; 728 *msg_word = 0; 729 if (htt_tlv_filter->enable_fp) { 730 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 731 CTRL, 1010, 1); 732 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 733 CTRL, 1011, 1); 734 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 735 CTRL, 1100, 1); 736 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 737 CTRL, 1101, 1); 738 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 739 CTRL, 1110, 1); 740 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 741 CTRL, 1111, 1); 742 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 743 CTRL, 1111, 1); 744 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 745 DATA, MCAST, 1); 746 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 747 DATA, UCAST, 1); 748 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, 749 DATA, NULL, 1); 750 } 751 752 if (htt_tlv_filter->enable_md) { 753 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 754 CTRL, 1010, 1); 755 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 756 CTRL, 1011, 1); 757 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 758 CTRL, 1100, 1); 759 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 760 CTRL, 1101, 1); 761 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 762 CTRL, 1110, 1); 763 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 764 CTRL, 1111, 1); 765 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 766 CTRL, 1111, 1); 767 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 768 DATA, MCAST, 1); 769 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 770 DATA, UCAST, 1); 771 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, 772 DATA, NULL, 1); 773 } 774 if (htt_tlv_filter->enable_mo) { 775 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 776 CTRL, 1010, 1); 777 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 778 CTRL, 1011, 1); 779 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 780 CTRL, 1100, 1); 781 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 782 CTRL, 1101, 1); 783 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 784 CTRL, 1110, 1); 785 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 786 CTRL, 1111, 1); 787 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 788 CTRL, 1111, 1); 789 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 790 DATA, MCAST, 1); 791 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 792 DATA, UCAST, 1); 793 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, 794 DATA, NULL, 1); 795 } 796 797 /* word 6 */ 798 msg_word++; 799 *msg_word = 0; 800 tlv_filter = 0; 801 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, 802 htt_tlv_filter->mpdu_start); 803 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, 804 htt_tlv_filter->msdu_start); 805 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, 806 htt_tlv_filter->packet); 807 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, 808 htt_tlv_filter->msdu_end); 809 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, 810 htt_tlv_filter->mpdu_end); 811 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, 812 htt_tlv_filter->packet_header); 813 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, 814 htt_tlv_filter->ppdu_end_status_done); 815 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, 816 htt_tlv_filter->ppdu_start); 817 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, 818 htt_tlv_filter->ppdu_end); 819 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, 820 htt_tlv_filter->ppdu_end_user_stats); 821 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, 822 PPDU_END_USER_STATS_EXT, 823 htt_tlv_filter->ppdu_end_user_stats_ext); 824 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, 825 htt_tlv_filter->ppdu_end_status_done); 826 827 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); 828 829 /* "response_required" field should be set if a HTT response message is 830 * required after setting up the ring. 831 */ 832 pkt = htt_htc_pkt_alloc(soc); 833 if (!pkt) 834 goto fail1; 835 836 pkt->soc_ctxt = NULL; /* not used during send-done callback */ 837 838 SET_HTC_PACKET_INFO_TX( 839 &pkt->htc_pkt, 840 dp_htt_h2t_send_complete_free_netbuf, 841 qdf_nbuf_data(htt_msg), 842 qdf_nbuf_len(htt_msg), 843 soc->htc_endpoint, 844 1); /* tag - not relevant here */ 845 846 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); 847 htc_send_pkt(soc->htc_soc, &pkt->htc_pkt); 848 return QDF_STATUS_SUCCESS; 849 850 fail1: 851 qdf_nbuf_free(htt_msg); 852 fail0: 853 return QDF_STATUS_E_FAILURE; 854 } 855 856 /* 857 * htt_soc_attach_target() - SOC level HTT setup 858 * @htt_soc: HTT SOC handle 859 * 860 * Return: 0 on success; error code on failure 861 */ 862 int htt_soc_attach_target(void *htt_soc) 863 { 864 struct htt_soc *soc = (struct htt_soc *)htt_soc; 865 866 return htt_h2t_ver_req_msg(soc); 867 } 868 869 870 /* 871 * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler 872 * @context: Opaque context (HTT SOC handle) 873 * @pkt: HTC packet 874 */ 875 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) 876 { 877 struct htt_soc *soc = (struct htt_soc *) context; 878 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; 879 u_int32_t *msg_word; 880 enum htt_t2h_msg_type msg_type; 881 882 /* check for successful message reception */ 883 if (pkt->Status != A_OK) { 884 if (pkt->Status != A_ECANCELED) 885 soc->stats.htc_err_cnt++; 886 887 qdf_nbuf_free(htt_t2h_msg); 888 return; 889 } 890 891 /* TODO: Check if we should pop the HTC/HTT header alignment padding */ 892 893 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); 894 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); 895 switch (msg_type) { 896 case HTT_T2H_MSG_TYPE_PEER_MAP: 897 { 898 u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN]; 899 u_int8_t *peer_mac_addr; 900 u_int16_t peer_id; 901 u_int16_t hw_peer_id; 902 u_int8_t vdev_id; 903 904 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); 905 hw_peer_id = 906 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); 907 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); 908 peer_mac_addr = htt_t2h_mac_addr_deswizzle( 909 (u_int8_t *) (msg_word+1), 910 &mac_addr_deswizzle_buf[0]); 911 QDF_TRACE(QDF_MODULE_ID_TXRX, 912 QDF_TRACE_LEVEL_INFO, 913 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", 914 peer_id, vdev_id); 915 916 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, 917 vdev_id, peer_mac_addr); 918 break; 919 } 920 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 921 { 922 u_int16_t peer_id; 923 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); 924 925 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id); 926 break; 927 } 928 case HTT_T2H_MSG_TYPE_SEC_IND: 929 { 930 u_int16_t peer_id; 931 enum htt_sec_type sec_type; 932 int is_unicast; 933 934 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); 935 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); 936 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); 937 /* point to the first part of the Michael key */ 938 msg_word++; 939 dp_rx_sec_ind_handler( 940 soc->dp_soc, peer_id, sec_type, is_unicast, 941 msg_word, msg_word + 2); 942 break; 943 } 944 #ifdef notyet 945 #ifndef REMOVE_PKT_LOG 946 case HTT_T2H_MSG_TYPE_PKTLOG: 947 { 948 u_int32_t *pl_hdr; 949 pl_hdr = (msg_word + 1); 950 wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, 951 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL); 952 break; 953 } 954 #endif 955 #endif /* notyet */ 956 case HTT_T2H_MSG_TYPE_VERSION_CONF: 957 { 958 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); 959 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); 960 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 961 "target uses HTT version %d.%d; host uses %d.%d\n", 962 soc->tgt_ver.major, soc->tgt_ver.minor, 963 HTT_CURRENT_VERSION_MAJOR, 964 HTT_CURRENT_VERSION_MINOR); 965 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { 966 QDF_TRACE(QDF_MODULE_ID_TXRX, 967 QDF_TRACE_LEVEL_ERROR, 968 "*** Incompatible host/target HTT versions!\n"); 969 } 970 /* abort if the target is incompatible with the host */ 971 qdf_assert(soc->tgt_ver.major == 972 HTT_CURRENT_VERSION_MAJOR); 973 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { 974 QDF_TRACE(QDF_MODULE_ID_TXRX, 975 QDF_TRACE_LEVEL_WARN, 976 "*** Warning: host/target HTT versions" 977 " are different, though compatible!\n"); 978 } 979 break; 980 } 981 case HTT_T2H_MSG_TYPE_RX_ADDBA: 982 { 983 uint16_t peer_id; 984 uint8_t tid; 985 uint8_t win_sz; 986 uint16_t status; 987 struct dp_peer *peer; 988 989 /* 990 * Update REO Queue Desc with new values 991 */ 992 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); 993 tid = HTT_RX_ADDBA_TID_GET(*msg_word); 994 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); 995 peer = dp_peer_find_by_id(soc->dp_soc, peer_id); 996 997 /* 998 * Window size needs to be incremented by 1 999 * since fw needs to represent a value of 256 1000 * using just 8 bits 1001 */ 1002 if (peer) { 1003 status = dp_addba_requestprocess_wifi3(peer, 1004 0, tid, 0, win_sz + 1, 0xffff); 1005 QDF_TRACE(QDF_MODULE_ID_TXRX, 1006 QDF_TRACE_LEVEL_INFO, 1007 FL("PeerID %d BAW %d TID %d stat %d\n"), 1008 peer_id, win_sz, tid, status); 1009 1010 } else { 1011 QDF_TRACE(QDF_MODULE_ID_TXRX, 1012 QDF_TRACE_LEVEL_ERROR, 1013 FL("Peer not found peer id %d\n"), 1014 peer_id); 1015 } 1016 break; 1017 } 1018 1019 1020 default: 1021 break; 1022 }; 1023 1024 /* Free the indication buffer */ 1025 qdf_nbuf_free(htt_t2h_msg); 1026 } 1027 1028 /* 1029 * dp_htt_h2t_full() - Send full handler (called from HTC) 1030 * @context: Opaque context (HTT SOC handle) 1031 * @pkt: HTC packet 1032 * 1033 * Return: enum htc_send_full_action 1034 */ 1035 static enum htc_send_full_action 1036 dp_htt_h2t_full(void *context, HTC_PACKET *pkt) 1037 { 1038 return HTC_SEND_FULL_KEEP; 1039 } 1040 1041 /* 1042 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC 1043 * @htt_soc: HTT SOC handle 1044 * 1045 * Return: 0 on success; error code on failure 1046 */ 1047 static int 1048 htt_htc_soc_attach(struct htt_soc *soc) 1049 { 1050 struct htc_service_connect_req connect; 1051 struct htc_service_connect_resp response; 1052 A_STATUS status; 1053 1054 qdf_mem_set(&connect, sizeof(connect), 0); 1055 qdf_mem_set(&response, sizeof(response), 0); 1056 1057 connect.pMetaData = NULL; 1058 connect.MetaDataLength = 0; 1059 connect.EpCallbacks.pContext = soc; 1060 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; 1061 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 1062 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; 1063 1064 /* rx buffers currently are provided by HIF, not by EpRecvRefill */ 1065 connect.EpCallbacks.EpRecvRefill = NULL; 1066 1067 /* N/A, fill is done by HIF */ 1068 connect.EpCallbacks.RecvRefillWaterMark = 1; 1069 1070 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; 1071 /* 1072 * Specify how deep to let a queue get before htc_send_pkt will 1073 * call the EpSendFull function due to excessive send queue depth. 1074 */ 1075 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; 1076 1077 /* disable flow control for HTT data message service */ 1078 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 1079 1080 /* connect to control service */ 1081 connect.service_id = HTT_DATA_MSG_SVC; 1082 1083 status = htc_connect_service(soc->htc_soc, &connect, &response); 1084 1085 if (status != A_OK) 1086 return QDF_STATUS_E_FAILURE; 1087 1088 soc->htc_endpoint = response.Endpoint; 1089 1090 return 0; /* success */ 1091 } 1092 1093 /* 1094 * htt_soc_attach() - SOC level HTT initialization 1095 * @dp_soc: Opaque Data path SOC handle 1096 * @osif_soc: Opaque OSIF SOC handle 1097 * @htc_soc: SOC level HTC handle 1098 * @hal_soc: Opaque HAL SOC handle 1099 * @osdev: QDF device 1100 * 1101 * Return: HTT handle on success; NULL on failure 1102 */ 1103 void * 1104 htt_soc_attach(void *dp_soc, void *osif_soc, HTC_HANDLE htc_soc, 1105 void *hal_soc, qdf_device_t osdev) 1106 { 1107 struct htt_soc *soc; 1108 int i; 1109 1110 soc = qdf_mem_malloc(sizeof(*soc)); 1111 1112 if (!soc) 1113 goto fail1; 1114 1115 soc->osdev = osdev; 1116 soc->osif_soc = osif_soc; 1117 soc->dp_soc = dp_soc; 1118 soc->htc_soc = htc_soc; 1119 soc->hal_soc = hal_soc; 1120 1121 /* TODO: See if any NSS related context is requred in htt_soc */ 1122 1123 soc->htt_htc_pkt_freelist = NULL; 1124 1125 if (htt_htc_soc_attach(soc)) 1126 goto fail2; 1127 1128 /* TODO: See if any Rx data specific intialization is required. For 1129 * MCL use cases, the data will be received as single packet and 1130 * should not required any descriptor or reorder handling 1131 */ 1132 1133 HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex); 1134 1135 /* pre-allocate some HTC_PACKET objects */ 1136 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { 1137 struct dp_htt_htc_pkt_union *pkt; 1138 pkt = qdf_mem_malloc(sizeof(*pkt)); 1139 if (!pkt) 1140 break; 1141 1142 htt_htc_pkt_free(soc, &pkt->u.pkt); 1143 } 1144 1145 return soc; 1146 1147 fail2: 1148 qdf_mem_free(soc); 1149 1150 fail1: 1151 return NULL; 1152 } 1153 1154 1155 /* 1156 * htt_soc_detach() - Detach SOC level HTT 1157 * @htt_soc: HTT SOC handle 1158 */ 1159 void 1160 htt_soc_detach(void *htt_soc) 1161 { 1162 struct htt_soc *soc = (struct htt_soc *)htt_soc; 1163 1164 htt_htc_pkt_pool_free(soc); 1165 HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex); 1166 qdf_mem_free(soc); 1167 } 1168 1169