1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "ce_api.h" 21 #include "ce_internal.h" 22 #include "ce_main.h" 23 #include "ce_reg.h" 24 #include "hif.h" 25 #include "hif_debug.h" 26 #include "hif_io32.h" 27 #include "qdf_lock.h" 28 #include "hif_main.h" 29 #include "hif_napi.h" 30 #include "qdf_module.h" 31 #include "regtable.h" 32 33 /* 34 * Support for Copy Engine hardware, which is mainly used for 35 * communication between Host and Target over a PCIe interconnect. 36 */ 37 38 /* 39 * A single CopyEngine (CE) comprises two "rings": 40 * a source ring 41 * a destination ring 42 * 43 * Each ring consists of a number of descriptors which specify 44 * an address, length, and meta-data. 45 * 46 * Typically, one side of the PCIe interconnect (Host or Target) 47 * controls one ring and the other side controls the other ring. 48 * The source side chooses when to initiate a transfer and it 49 * chooses what to send (buffer address, length). The destination 50 * side keeps a supply of "anonymous receive buffers" available and 51 * it handles incoming data as it arrives (when the destination 52 * receives an interrupt). 53 * 54 * The sender may send a simple buffer (address/length) or it may 55 * send a small list of buffers. When a small list is sent, hardware 56 * "gathers" these and they end up in a single destination buffer 57 * with a single interrupt. 58 * 59 * There are several "contexts" managed by this layer -- more, it 60 * may seem -- than should be needed. These are provided mainly for 61 * maximum flexibility and especially to facilitate a simpler HIF 62 * implementation. There are per-CopyEngine recv, send, and watermark 63 * contexts. These are supplied by the caller when a recv, send, 64 * or watermark handler is established and they are echoed back to 65 * the caller when the respective callbacks are invoked. There is 66 * also a per-transfer context supplied by the caller when a buffer 67 * (or sendlist) is sent and when a buffer is enqueued for recv. 68 * These per-transfer contexts are echoed back to the caller when 69 * the buffer is sent/received. 70 * Target TX harsh result toeplitz_hash_result 71 */ 72 73 /* NB: Modeled after ce_completed_send_next */ 74 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */ 75 #define CE_WM_SHFT 1 76 77 #ifdef WLAN_FEATURE_FASTPATH 78 #ifdef QCA_WIFI_3_0 79 static inline void 80 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, 81 uint64_t dma_addr, 82 uint32_t user_flags) 83 { 84 shadow_src_desc->buffer_addr_hi = 85 (uint32_t)((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK); 86 user_flags |= shadow_src_desc->buffer_addr_hi; 87 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, 88 sizeof(uint32_t)); 89 } 90 #else 91 static inline void 92 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, 93 uint64_t dma_addr, 94 uint32_t user_flags) 95 { 96 } 97 #endif 98 99 #define SLOTS_PER_DATAPATH_TX 2 100 101 /** 102 * ce_send_fast() - CE layer Tx buffer posting function 103 * @copyeng: copy engine handle 104 * @msdu: msdu to be sent 105 * @transfer_id: transfer_id 106 * @download_len: packet download length 107 * 108 * Assumption : Called with an array of MSDU's 109 * Function: 110 * For each msdu in the array 111 * 1. Check no. of available entries 112 * 2. Create src ring entries (allocated in consistent memory 113 * 3. Write index to h/w 114 * 115 * Return: No. of packets that could be sent 116 */ 117 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, 118 unsigned int transfer_id, uint32_t download_len) 119 { 120 struct CE_state *ce_state = (struct CE_state *)copyeng; 121 struct hif_softc *scn = ce_state->scn; 122 struct CE_ring_state *src_ring = ce_state->src_ring; 123 u_int32_t ctrl_addr = ce_state->ctrl_addr; 124 unsigned int nentries_mask = src_ring->nentries_mask; 125 unsigned int write_index; 126 unsigned int sw_index; 127 unsigned int frag_len; 128 uint64_t dma_addr; 129 uint32_t user_flags; 130 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE; 131 bool ok_to_send = true; 132 133 /* 134 * Create a log assuming the call will go through, and if not, we would 135 * add an error trace as well. 136 * Please add the same failure log for any additional error paths. 137 */ 138 DPTRACE(qdf_dp_trace(msdu, 139 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 140 QDF_TRACE_DEFAULT_PDEV_ID, 141 qdf_nbuf_data_addr(msdu), 142 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 143 144 qdf_spin_lock_bh(&ce_state->ce_index_lock); 145 146 /* 147 * Request runtime PM resume if it has already suspended and make 148 * sure there is no PCIe link access. 149 */ 150 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_CE) != 0) 151 ok_to_send = false; 152 153 if (ok_to_send) { 154 Q_TARGET_ACCESS_BEGIN(scn); 155 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 156 } 157 158 write_index = src_ring->write_index; 159 sw_index = src_ring->sw_index; 160 hif_record_ce_desc_event(scn, ce_state->id, 161 FAST_TX_SOFTWARE_INDEX_UPDATE, 162 NULL, NULL, sw_index, 0); 163 164 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) 165 < SLOTS_PER_DATAPATH_TX)) { 166 hif_err_rl("Source ring full, required %d, available %d", 167 SLOTS_PER_DATAPATH_TX, 168 CE_RING_DELTA(nentries_mask, write_index, 169 sw_index - 1)); 170 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 171 if (ok_to_send) 172 Q_TARGET_ACCESS_END(scn); 173 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 174 175 DPTRACE(qdf_dp_trace(NULL, 176 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, 177 QDF_TRACE_DEFAULT_PDEV_ID, 178 NULL, 0, QDF_TX)); 179 180 return 0; 181 } 182 183 { 184 struct CE_src_desc *src_ring_base = 185 (struct CE_src_desc *)src_ring->base_addr_owner_space; 186 struct CE_src_desc *shadow_base = 187 (struct CE_src_desc *)src_ring->shadow_base; 188 struct CE_src_desc *src_desc = 189 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 190 struct CE_src_desc *shadow_src_desc = 191 CE_SRC_RING_TO_DESC(shadow_base, write_index); 192 193 /* 194 * First fill out the ring descriptor for the HTC HTT frame 195 * header. These are uncached writes. Should we use a local 196 * structure instead? 197 */ 198 /* HTT/HTC header can be passed as a argument */ 199 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0); 200 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 201 0xFFFFFFFF); 202 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK; 203 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 204 shadow_src_desc->meta_data = transfer_id; 205 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0); 206 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 207 download_len -= shadow_src_desc->nbytes; 208 /* 209 * HTC HTT header is a word stream, so byte swap if CE byte 210 * swap enabled 211 */ 212 shadow_src_desc->byte_swap = ((ce_state->attr_flags & 213 CE_ATTR_BYTE_SWAP_DATA) != 0); 214 /* For the first one, it still does not need to write */ 215 shadow_src_desc->gather = 1; 216 *src_desc = *shadow_src_desc; 217 /* By default we could initialize the transfer context to this 218 * value 219 */ 220 src_ring->per_transfer_context[write_index] = 221 CE_SENDLIST_ITEM_CTXT; 222 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 223 224 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index); 225 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index); 226 /* 227 * Now fill out the ring descriptor for the actual data 228 * packet 229 */ 230 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1); 231 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 232 0xFFFFFFFF); 233 /* 234 * Clear packet offset for all but the first CE desc. 235 */ 236 user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M; 237 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 238 shadow_src_desc->meta_data = transfer_id; 239 240 /* get actual packet length */ 241 frag_len = qdf_nbuf_get_frag_len(msdu, 1); 242 243 /* download remaining bytes of payload */ 244 shadow_src_desc->nbytes = download_len; 245 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 246 if (shadow_src_desc->nbytes > frag_len) 247 shadow_src_desc->nbytes = frag_len; 248 249 /* Data packet is a byte stream, so disable byte swap */ 250 shadow_src_desc->byte_swap = 0; 251 /* For the last one, gather is not set */ 252 shadow_src_desc->gather = 0; 253 *src_desc = *shadow_src_desc; 254 src_ring->per_transfer_context[write_index] = msdu; 255 256 hif_record_ce_desc_event(scn, ce_state->id, type, 257 (union ce_desc *)src_desc, 258 src_ring->per_transfer_context[write_index], 259 write_index, shadow_src_desc->nbytes); 260 261 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 262 263 DPTRACE(qdf_dp_trace(msdu, 264 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 265 QDF_TRACE_DEFAULT_PDEV_ID, 266 qdf_nbuf_data_addr(msdu), 267 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 268 } 269 270 src_ring->write_index = write_index; 271 272 if (ok_to_send) { 273 if (qdf_likely(ce_state->state == CE_RUNNING)) { 274 type = FAST_TX_WRITE_INDEX_UPDATE; 275 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 276 write_index); 277 Q_TARGET_ACCESS_END(scn); 278 } else { 279 ce_state->state = CE_PENDING; 280 } 281 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_CE); 282 } 283 284 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 285 286 /* sent 1 packet */ 287 return 1; 288 } 289 290 /** 291 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler 292 * @ce_state: handle to copy engine state 293 * @cmpl_msdus: Rx msdus 294 * @num_cmpls: number of Rx msdus 295 * @ctrl_addr: CE control address 296 * 297 * Return: None 298 */ 299 static void ce_fastpath_rx_handle(struct CE_state *ce_state, 300 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls, 301 uint32_t ctrl_addr) 302 { 303 struct hif_softc *scn = ce_state->scn; 304 struct CE_ring_state *dest_ring = ce_state->dest_ring; 305 uint32_t nentries_mask = dest_ring->nentries_mask; 306 uint32_t write_index; 307 308 qdf_spin_unlock(&ce_state->ce_index_lock); 309 ce_state->fastpath_handler(ce_state->context, cmpl_msdus, num_cmpls); 310 qdf_spin_lock(&ce_state->ce_index_lock); 311 312 /* Update Destination Ring Write Index */ 313 write_index = dest_ring->write_index; 314 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls); 315 316 hif_record_ce_desc_event(scn, ce_state->id, 317 FAST_RX_WRITE_INDEX_UPDATE, 318 NULL, NULL, write_index, 0); 319 320 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 321 dest_ring->write_index = write_index; 322 } 323 324 /** 325 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs 326 * @scn: hif_context 327 * @ce_id: Copy engine ID 328 * 1) Go through the CE ring, and find the completions 329 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[] 330 * 3) Unmap buffer & accumulate in an array. 331 * 4) Call message handler when array is full or when exiting the handler 332 * 333 * Return: void 334 */ 335 336 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) 337 { 338 struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; 339 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 340 struct CE_ring_state *dest_ring = ce_state->dest_ring; 341 struct CE_dest_desc *dest_ring_base = 342 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 343 344 uint32_t nentries_mask = dest_ring->nentries_mask; 345 uint32_t sw_index = dest_ring->sw_index; 346 uint32_t nbytes; 347 qdf_nbuf_t nbuf; 348 dma_addr_t paddr; 349 struct CE_dest_desc *dest_desc; 350 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM]; 351 uint32_t ctrl_addr = ce_state->ctrl_addr; 352 uint32_t nbuf_cmpl_idx = 0; 353 unsigned int more_comp_cnt = 0; 354 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 355 struct ce_ops *ce_services = hif_state->ce_services; 356 357 more_data: 358 for (;;) { 359 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, 360 sw_index); 361 362 /* 363 * The following 2 reads are from non-cached memory 364 */ 365 nbytes = dest_desc->nbytes; 366 367 /* If completion is invalid, break */ 368 if (qdf_unlikely(nbytes == 0)) 369 break; 370 371 /* 372 * Build the nbuf list from valid completions 373 */ 374 nbuf = dest_ring->per_transfer_context[sw_index]; 375 376 /* 377 * No lock is needed here, since this is the only thread 378 * that accesses the sw_index 379 */ 380 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 381 382 /* 383 * CAREFUL : Uncached write, but still less expensive, 384 * since most modern caches use "write-combining" to 385 * flush multiple cache-writes all at once. 386 */ 387 dest_desc->nbytes = 0; 388 389 /* 390 * Per our understanding this is not required on our 391 * since we are doing the same cache invalidation 392 * operation on the same buffer twice in succession, 393 * without any modifiication to this buffer by CPU in 394 * between. 395 * However, this code with 2 syncs in succession has 396 * been undergoing some testing at a customer site, 397 * and seemed to be showing no problems so far. Would 398 * like to validate from the customer, that this line 399 * is really not required, before we remove this line 400 * completely. 401 */ 402 paddr = QDF_NBUF_CB_PADDR(nbuf); 403 404 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr, 405 (skb_end_pointer(nbuf) - 406 (nbuf)->data), 407 DMA_FROM_DEVICE); 408 409 qdf_nbuf_put_tail(nbuf, nbytes); 410 411 qdf_assert_always(nbuf->data); 412 413 QDF_NBUF_CB_RX_CTX_ID(nbuf) = 414 hif_get_rx_ctx_id(ce_state->id, hif_hdl); 415 cmpl_msdus[nbuf_cmpl_idx++] = nbuf; 416 417 /* 418 * we are not posting the buffers back instead 419 * reusing the buffers 420 */ 421 if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) { 422 hif_record_ce_desc_event(scn, ce_state->id, 423 FAST_RX_SOFTWARE_INDEX_UPDATE, 424 NULL, NULL, sw_index, 0); 425 dest_ring->sw_index = sw_index; 426 ce_fastpath_rx_handle(ce_state, cmpl_msdus, 427 nbuf_cmpl_idx, ctrl_addr); 428 429 ce_state->receive_count += nbuf_cmpl_idx; 430 if (qdf_unlikely(hif_ce_service_should_yield( 431 scn, ce_state))) { 432 ce_state->force_break = 1; 433 qdf_atomic_set(&ce_state->rx_pending, 1); 434 return; 435 } 436 437 nbuf_cmpl_idx = 0; 438 more_comp_cnt = 0; 439 } 440 } 441 442 hif_record_ce_desc_event(scn, ce_state->id, 443 FAST_RX_SOFTWARE_INDEX_UPDATE, 444 NULL, NULL, sw_index, 0); 445 446 dest_ring->sw_index = sw_index; 447 448 /* 449 * If there are not enough completions to fill the array, 450 * just call the message handler here 451 */ 452 if (nbuf_cmpl_idx) { 453 ce_fastpath_rx_handle(ce_state, cmpl_msdus, 454 nbuf_cmpl_idx, ctrl_addr); 455 456 ce_state->receive_count += nbuf_cmpl_idx; 457 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 458 ce_state->force_break = 1; 459 qdf_atomic_set(&ce_state->rx_pending, 1); 460 return; 461 } 462 463 /* check for more packets after upper layer processing */ 464 nbuf_cmpl_idx = 0; 465 more_comp_cnt = 0; 466 goto more_data; 467 } 468 469 hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu()); 470 471 qdf_atomic_set(&ce_state->rx_pending, 0); 472 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 473 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 474 HOST_IS_COPY_COMPLETE_MASK); 475 } else { 476 hif_err_rl("%s: target access is not allowed", __func__); 477 return; 478 } 479 480 if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) { 481 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 482 goto more_data; 483 } else { 484 hif_err("Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 485 nentries_mask, 486 ce_state->dest_ring->sw_index, 487 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr)); 488 } 489 } 490 #ifdef NAPI_YIELD_BUDGET_BASED 491 /* 492 * Caution : Before you modify this code, please refer hif_napi_poll 493 * function to understand how napi_complete gets called and make the 494 * necessary changes. Force break has to be done till WIN disables the 495 * interrupt at source 496 */ 497 ce_state->force_break = 1; 498 #endif 499 } 500 501 /** 502 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled 503 * @scn: Handle to HIF context 504 * 505 * Return: true if fastpath is enabled else false. 506 */ 507 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 508 { 509 return scn->fastpath_mode_on; 510 } 511 #else 512 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) 513 { 514 } 515 516 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 517 { 518 return false; 519 } 520 #endif /* WLAN_FEATURE_FASTPATH */ 521 522 static QDF_STATUS 523 ce_send_nolock_legacy(struct CE_handle *copyeng, 524 void *per_transfer_context, 525 qdf_dma_addr_t buffer, 526 uint32_t nbytes, 527 uint32_t transfer_id, 528 uint32_t flags, 529 uint32_t user_flags) 530 { 531 QDF_STATUS status; 532 struct CE_state *CE_state = (struct CE_state *)copyeng; 533 struct CE_ring_state *src_ring = CE_state->src_ring; 534 uint32_t ctrl_addr = CE_state->ctrl_addr; 535 unsigned int nentries_mask = src_ring->nentries_mask; 536 unsigned int sw_index = src_ring->sw_index; 537 unsigned int write_index = src_ring->write_index; 538 uint64_t dma_addr = buffer; 539 struct hif_softc *scn = CE_state->scn; 540 541 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 542 return QDF_STATUS_E_FAILURE; 543 if (unlikely(CE_RING_DELTA(nentries_mask, 544 write_index, sw_index - 1) <= 0)) { 545 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 546 Q_TARGET_ACCESS_END(scn); 547 return QDF_STATUS_E_FAILURE; 548 } 549 { 550 enum hif_ce_event_type event_type; 551 struct CE_src_desc *src_ring_base = 552 (struct CE_src_desc *)src_ring->base_addr_owner_space; 553 struct CE_src_desc *shadow_base = 554 (struct CE_src_desc *)src_ring->shadow_base; 555 struct CE_src_desc *src_desc = 556 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 557 struct CE_src_desc *shadow_src_desc = 558 CE_SRC_RING_TO_DESC(shadow_base, write_index); 559 560 /* Update low 32 bits source descriptor address */ 561 shadow_src_desc->buffer_addr = 562 (uint32_t)(dma_addr & 0xFFFFFFFF); 563 564 #ifdef QCA_WIFI_3_0 565 shadow_src_desc->buffer_addr_hi = 566 (uint32_t)((dma_addr >> 32) & 567 CE_RING_BASE_ADDR_HIGH_MASK); 568 user_flags |= shadow_src_desc->buffer_addr_hi; 569 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, 570 sizeof(uint32_t)); 571 #endif 572 shadow_src_desc->target_int_disable = 0; 573 shadow_src_desc->host_int_disable = 0; 574 575 shadow_src_desc->meta_data = transfer_id; 576 577 /* 578 * Set the swap bit if: 579 * typical sends on this CE are swapped (host is big-endian) 580 * and this send doesn't disable the swapping 581 * (data is not bytestream) 582 */ 583 shadow_src_desc->byte_swap = 584 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 585 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); 586 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); 587 shadow_src_desc->nbytes = nbytes; 588 ce_validate_nbytes(nbytes, CE_state); 589 590 *src_desc = *shadow_src_desc; 591 592 src_ring->per_transfer_context[write_index] = 593 per_transfer_context; 594 595 /* Update Source Ring Write Index */ 596 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 597 598 /* WORKAROUND */ 599 if (shadow_src_desc->gather) { 600 event_type = HIF_TX_GATHER_DESC_POST; 601 } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) { 602 event_type = HIF_TX_DESC_SOFTWARE_POST; 603 CE_state->state = CE_PENDING; 604 } else { 605 event_type = HIF_TX_DESC_POST; 606 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 607 write_index); 608 } 609 610 /* src_ring->write index hasn't been updated event though 611 * the register has already been written to. 612 */ 613 hif_record_ce_desc_event(scn, CE_state->id, event_type, 614 (union ce_desc *)shadow_src_desc, per_transfer_context, 615 src_ring->write_index, nbytes); 616 617 src_ring->write_index = write_index; 618 status = QDF_STATUS_SUCCESS; 619 } 620 Q_TARGET_ACCESS_END(scn); 621 return status; 622 } 623 624 static QDF_STATUS 625 ce_sendlist_send_legacy(struct CE_handle *copyeng, 626 void *per_transfer_context, 627 struct ce_sendlist *sendlist, unsigned int transfer_id) 628 { 629 QDF_STATUS status = QDF_STATUS_E_NOMEM; 630 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 631 struct CE_state *CE_state = (struct CE_state *)copyeng; 632 struct CE_ring_state *src_ring = CE_state->src_ring; 633 unsigned int nentries_mask = src_ring->nentries_mask; 634 unsigned int num_items = sl->num_items; 635 unsigned int sw_index; 636 unsigned int write_index; 637 struct hif_softc *scn = CE_state->scn; 638 639 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); 640 641 qdf_spin_lock_bh(&CE_state->ce_index_lock); 642 643 if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data && 644 Q_TARGET_ACCESS_BEGIN(scn) == 0) { 645 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR( 646 scn, CE_state->ctrl_addr); 647 Q_TARGET_ACCESS_END(scn); 648 } 649 650 sw_index = src_ring->sw_index; 651 write_index = src_ring->write_index; 652 653 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >= 654 num_items) { 655 struct ce_sendlist_item *item; 656 int i; 657 658 /* handle all but the last item uniformly */ 659 for (i = 0; i < num_items - 1; i++) { 660 item = &sl->item[i]; 661 /* TBDXXX: Support extensible sendlist_types? */ 662 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 663 status = ce_send_nolock_legacy(copyeng, 664 CE_SENDLIST_ITEM_CTXT, 665 (qdf_dma_addr_t)item->data, 666 item->u.nbytes, transfer_id, 667 item->flags | CE_SEND_FLAG_GATHER, 668 item->user_flags); 669 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 670 } 671 /* provide valid context pointer for final item */ 672 item = &sl->item[i]; 673 /* TBDXXX: Support extensible sendlist_types? */ 674 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 675 status = ce_send_nolock_legacy(copyeng, per_transfer_context, 676 (qdf_dma_addr_t) item->data, 677 item->u.nbytes, 678 transfer_id, item->flags, 679 item->user_flags); 680 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 681 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, 682 QDF_NBUF_TX_PKT_CE); 683 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, 684 QDF_DP_TRACE_CE_PACKET_PTR_RECORD, 685 QDF_TRACE_DEFAULT_PDEV_ID, 686 (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data), 687 sizeof(((qdf_nbuf_t)per_transfer_context)->data), 688 QDF_TX)); 689 } else { 690 /* 691 * Probably not worth the additional complexity to support 692 * partial sends with continuation or notification. We expect 693 * to use large rings and small sendlists. If we can't handle 694 * the entire request at once, punt it back to the caller. 695 */ 696 } 697 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 698 699 return status; 700 } 701 702 /** 703 * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine 704 * @copyeng: copy engine handle 705 * @per_recv_context: virtual address of the nbuf 706 * @buffer: physical address of the nbuf 707 * 708 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued 709 */ 710 static QDF_STATUS 711 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng, 712 void *per_recv_context, qdf_dma_addr_t buffer) 713 { 714 QDF_STATUS status; 715 struct CE_state *CE_state = (struct CE_state *)copyeng; 716 struct CE_ring_state *dest_ring = CE_state->dest_ring; 717 uint32_t ctrl_addr = CE_state->ctrl_addr; 718 unsigned int nentries_mask = dest_ring->nentries_mask; 719 unsigned int write_index; 720 unsigned int sw_index; 721 uint64_t dma_addr = buffer; 722 struct hif_softc *scn = CE_state->scn; 723 724 qdf_spin_lock_bh(&CE_state->ce_index_lock); 725 write_index = dest_ring->write_index; 726 sw_index = dest_ring->sw_index; 727 728 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 729 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 730 return QDF_STATUS_E_IO; 731 } 732 733 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) || 734 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) { 735 struct CE_dest_desc *dest_ring_base = 736 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 737 struct CE_dest_desc *dest_desc = 738 CE_DEST_RING_TO_DESC(dest_ring_base, write_index); 739 740 /* Update low 32 bit destination descriptor */ 741 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF); 742 #ifdef QCA_WIFI_3_0 743 dest_desc->buffer_addr_hi = 744 (uint32_t)((dma_addr >> 32) & 745 CE_RING_BASE_ADDR_HIGH_MASK); 746 #endif 747 dest_desc->nbytes = 0; 748 749 dest_ring->per_transfer_context[write_index] = 750 per_recv_context; 751 752 hif_record_ce_desc_event(scn, CE_state->id, 753 HIF_RX_DESC_POST, 754 (union ce_desc *)dest_desc, 755 per_recv_context, 756 write_index, 0); 757 758 /* Update Destination Ring Write Index */ 759 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 760 if (write_index != sw_index) { 761 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 762 dest_ring->write_index = write_index; 763 } 764 status = QDF_STATUS_SUCCESS; 765 } else 766 status = QDF_STATUS_E_FAILURE; 767 768 Q_TARGET_ACCESS_END(scn); 769 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 770 return status; 771 } 772 773 static unsigned int 774 ce_send_entries_done_nolock_legacy(struct hif_softc *scn, 775 struct CE_state *CE_state) 776 { 777 struct CE_ring_state *src_ring = CE_state->src_ring; 778 uint32_t ctrl_addr = CE_state->ctrl_addr; 779 unsigned int nentries_mask = src_ring->nentries_mask; 780 unsigned int sw_index; 781 unsigned int read_index; 782 783 sw_index = src_ring->sw_index; 784 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr); 785 786 return CE_RING_DELTA(nentries_mask, sw_index, read_index); 787 } 788 789 static unsigned int 790 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn, 791 struct CE_state *CE_state) 792 { 793 struct CE_ring_state *dest_ring = CE_state->dest_ring; 794 uint32_t ctrl_addr = CE_state->ctrl_addr; 795 unsigned int nentries_mask = dest_ring->nentries_mask; 796 unsigned int sw_index; 797 unsigned int read_index; 798 799 sw_index = dest_ring->sw_index; 800 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr); 801 802 return CE_RING_DELTA(nentries_mask, sw_index, read_index); 803 } 804 805 static QDF_STATUS 806 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state, 807 void **per_CE_contextp, 808 void **per_transfer_contextp, 809 qdf_dma_addr_t *bufferp, 810 unsigned int *nbytesp, 811 unsigned int *transfer_idp, 812 unsigned int *flagsp) 813 { 814 QDF_STATUS status; 815 struct CE_ring_state *dest_ring = CE_state->dest_ring; 816 unsigned int nentries_mask = dest_ring->nentries_mask; 817 unsigned int sw_index = dest_ring->sw_index; 818 struct hif_softc *scn = CE_state->scn; 819 struct CE_dest_desc *dest_ring_base = 820 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 821 struct CE_dest_desc *dest_desc = 822 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); 823 int nbytes; 824 struct CE_dest_desc dest_desc_info; 825 /* 826 * By copying the dest_desc_info element to local memory, we could 827 * avoid extra memory read from non-cachable memory. 828 */ 829 dest_desc_info = *dest_desc; 830 nbytes = dest_desc_info.nbytes; 831 if (nbytes == 0) { 832 /* 833 * This closes a relatively unusual race where the Host 834 * sees the updated DRRI before the update to the 835 * corresponding descriptor has completed. We treat this 836 * as a descriptor that is not yet done. 837 */ 838 status = QDF_STATUS_E_FAILURE; 839 goto done; 840 } 841 842 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION, 843 (union ce_desc *)dest_desc, 844 dest_ring->per_transfer_context[sw_index], 845 sw_index, 0); 846 847 dest_desc->nbytes = 0; 848 849 /* Return data from completed destination descriptor */ 850 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info); 851 *nbytesp = nbytes; 852 *transfer_idp = dest_desc_info.meta_data; 853 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; 854 855 if (per_CE_contextp) 856 *per_CE_contextp = CE_state->recv_context; 857 858 if (per_transfer_contextp) { 859 *per_transfer_contextp = 860 dest_ring->per_transfer_context[sw_index]; 861 } 862 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 863 864 /* Update sw_index */ 865 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 866 dest_ring->sw_index = sw_index; 867 status = QDF_STATUS_SUCCESS; 868 869 done: 870 return status; 871 } 872 873 /* NB: Modeled after ce_completed_recv_next_nolock */ 874 static QDF_STATUS 875 ce_revoke_recv_next_legacy(struct CE_handle *copyeng, 876 void **per_CE_contextp, 877 void **per_transfer_contextp, 878 qdf_dma_addr_t *bufferp) 879 { 880 struct CE_state *CE_state; 881 struct CE_ring_state *dest_ring; 882 unsigned int nentries_mask; 883 unsigned int sw_index; 884 unsigned int write_index; 885 QDF_STATUS status; 886 struct hif_softc *scn; 887 888 CE_state = (struct CE_state *)copyeng; 889 dest_ring = CE_state->dest_ring; 890 if (!dest_ring) 891 return QDF_STATUS_E_FAILURE; 892 893 scn = CE_state->scn; 894 qdf_spin_lock(&CE_state->ce_index_lock); 895 nentries_mask = dest_ring->nentries_mask; 896 sw_index = dest_ring->sw_index; 897 write_index = dest_ring->write_index; 898 if (write_index != sw_index) { 899 struct CE_dest_desc *dest_ring_base = 900 (struct CE_dest_desc *)dest_ring-> 901 base_addr_owner_space; 902 struct CE_dest_desc *dest_desc = 903 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); 904 905 /* Return data from completed destination descriptor */ 906 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc); 907 908 if (per_CE_contextp) 909 *per_CE_contextp = CE_state->recv_context; 910 911 if (per_transfer_contextp) { 912 *per_transfer_contextp = 913 dest_ring->per_transfer_context[sw_index]; 914 } 915 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 916 917 /* Update sw_index */ 918 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 919 dest_ring->sw_index = sw_index; 920 status = QDF_STATUS_SUCCESS; 921 } else { 922 status = QDF_STATUS_E_FAILURE; 923 } 924 qdf_spin_unlock(&CE_state->ce_index_lock); 925 926 return status; 927 } 928 929 /* 930 * Guts of ce_completed_send_next. 931 * The caller takes responsibility for any necessary locking. 932 */ 933 static QDF_STATUS 934 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state, 935 void **per_CE_contextp, 936 void **per_transfer_contextp, 937 qdf_dma_addr_t *bufferp, 938 unsigned int *nbytesp, 939 unsigned int *transfer_idp, 940 unsigned int *sw_idx, 941 unsigned int *hw_idx, 942 uint32_t *toeplitz_hash_result) 943 { 944 QDF_STATUS status = QDF_STATUS_E_FAILURE; 945 struct CE_ring_state *src_ring = CE_state->src_ring; 946 uint32_t ctrl_addr = CE_state->ctrl_addr; 947 unsigned int nentries_mask = src_ring->nentries_mask; 948 unsigned int sw_index = src_ring->sw_index; 949 unsigned int read_index; 950 struct hif_softc *scn = CE_state->scn; 951 952 if (src_ring->hw_index == sw_index) { 953 /* 954 * The SW completion index has caught up with the cached 955 * version of the HW completion index. 956 * Update the cached HW completion index to see whether 957 * the SW has really caught up to the HW, or if the cached 958 * value of the HW index has become stale. 959 */ 960 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 961 return QDF_STATUS_E_FAILURE; 962 src_ring->hw_index = 963 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr); 964 if (Q_TARGET_ACCESS_END(scn) < 0) 965 return QDF_STATUS_E_FAILURE; 966 } 967 read_index = src_ring->hw_index; 968 969 if (sw_idx) 970 *sw_idx = sw_index; 971 972 if (hw_idx) 973 *hw_idx = read_index; 974 975 if ((read_index != sw_index) && (read_index != 0xffffffff)) { 976 struct CE_src_desc *shadow_base = 977 (struct CE_src_desc *)src_ring->shadow_base; 978 struct CE_src_desc *shadow_src_desc = 979 CE_SRC_RING_TO_DESC(shadow_base, sw_index); 980 #ifdef QCA_WIFI_3_0 981 struct CE_src_desc *src_ring_base = 982 (struct CE_src_desc *)src_ring->base_addr_owner_space; 983 struct CE_src_desc *src_desc = 984 CE_SRC_RING_TO_DESC(src_ring_base, sw_index); 985 #endif 986 hif_record_ce_desc_event(scn, CE_state->id, 987 HIF_TX_DESC_COMPLETION, 988 (union ce_desc *)shadow_src_desc, 989 src_ring->per_transfer_context[sw_index], 990 sw_index, shadow_src_desc->nbytes); 991 992 /* Return data from completed source descriptor */ 993 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc); 994 *nbytesp = shadow_src_desc->nbytes; 995 *transfer_idp = shadow_src_desc->meta_data; 996 #ifdef QCA_WIFI_3_0 997 *toeplitz_hash_result = src_desc->toeplitz_hash_result; 998 #else 999 *toeplitz_hash_result = 0; 1000 #endif 1001 if (per_CE_contextp) 1002 *per_CE_contextp = CE_state->send_context; 1003 1004 if (per_transfer_contextp) { 1005 *per_transfer_contextp = 1006 src_ring->per_transfer_context[sw_index]; 1007 } 1008 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 1009 1010 /* Update sw_index */ 1011 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1012 src_ring->sw_index = sw_index; 1013 status = QDF_STATUS_SUCCESS; 1014 } 1015 1016 return status; 1017 } 1018 1019 static QDF_STATUS 1020 ce_cancel_send_next_legacy(struct CE_handle *copyeng, 1021 void **per_CE_contextp, 1022 void **per_transfer_contextp, 1023 qdf_dma_addr_t *bufferp, 1024 unsigned int *nbytesp, 1025 unsigned int *transfer_idp, 1026 uint32_t *toeplitz_hash_result) 1027 { 1028 struct CE_state *CE_state; 1029 struct CE_ring_state *src_ring; 1030 unsigned int nentries_mask; 1031 unsigned int sw_index; 1032 unsigned int write_index; 1033 QDF_STATUS status; 1034 struct hif_softc *scn; 1035 1036 CE_state = (struct CE_state *)copyeng; 1037 src_ring = CE_state->src_ring; 1038 if (!src_ring) 1039 return QDF_STATUS_E_FAILURE; 1040 1041 scn = CE_state->scn; 1042 qdf_spin_lock(&CE_state->ce_index_lock); 1043 nentries_mask = src_ring->nentries_mask; 1044 sw_index = src_ring->sw_index; 1045 write_index = src_ring->write_index; 1046 1047 if (write_index != sw_index) { 1048 struct CE_src_desc *src_ring_base = 1049 (struct CE_src_desc *)src_ring->base_addr_owner_space; 1050 struct CE_src_desc *src_desc = 1051 CE_SRC_RING_TO_DESC(src_ring_base, sw_index); 1052 1053 /* Return data from completed source descriptor */ 1054 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc); 1055 *nbytesp = src_desc->nbytes; 1056 *transfer_idp = src_desc->meta_data; 1057 #ifdef QCA_WIFI_3_0 1058 *toeplitz_hash_result = src_desc->toeplitz_hash_result; 1059 #else 1060 *toeplitz_hash_result = 0; 1061 #endif 1062 1063 if (per_CE_contextp) 1064 *per_CE_contextp = CE_state->send_context; 1065 1066 if (per_transfer_contextp) { 1067 *per_transfer_contextp = 1068 src_ring->per_transfer_context[sw_index]; 1069 } 1070 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 1071 1072 /* Update sw_index */ 1073 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1074 src_ring->sw_index = sw_index; 1075 status = QDF_STATUS_SUCCESS; 1076 } else { 1077 status = QDF_STATUS_E_FAILURE; 1078 } 1079 qdf_spin_unlock(&CE_state->ce_index_lock); 1080 1081 return status; 1082 } 1083 1084 /* 1085 * Adjust interrupts for the copy complete handler. 1086 * If it's needed for either send or recv, then unmask 1087 * this interrupt; otherwise, mask it. 1088 * 1089 * Called with target_lock held. 1090 */ 1091 static void 1092 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state, 1093 int disable_copy_compl_intr) 1094 { 1095 uint32_t ctrl_addr = CE_state->ctrl_addr; 1096 struct hif_softc *scn = CE_state->scn; 1097 1098 CE_state->disable_copy_compl_intr = disable_copy_compl_intr; 1099 1100 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1101 return; 1102 1103 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1104 hif_err_rl("%s: target access is not allowed", __func__); 1105 return; 1106 } 1107 1108 if ((!disable_copy_compl_intr) && 1109 (CE_state->send_cb || CE_state->recv_cb)) 1110 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1111 else 1112 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1113 1114 if (CE_state->watermark_cb) 1115 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1116 else 1117 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1118 Q_TARGET_ACCESS_END(scn); 1119 } 1120 1121 #ifdef QCA_WIFI_WCN6450 1122 int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu, 1123 unsigned int transfer_id, uint32_t download_len) 1124 { 1125 struct CE_state *ce_state = (struct CE_state *)copyeng; 1126 struct hif_softc *scn = ce_state->scn; 1127 struct CE_ring_state *src_ring = ce_state->src_ring; 1128 u_int32_t ctrl_addr = ce_state->ctrl_addr; 1129 unsigned int nentries_mask = src_ring->nentries_mask; 1130 unsigned int write_index; 1131 unsigned int sw_index; 1132 unsigned int frag_len; 1133 uint64_t dma_addr; 1134 uint32_t user_flags; 1135 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE; 1136 1137 /* 1138 * Create a log assuming the call will go through, and if not, we would 1139 * add an error trace as well. 1140 * Please add the same failure log for any additional error paths. 1141 */ 1142 DPTRACE(qdf_dp_trace(msdu, 1143 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 1144 QDF_TRACE_DEFAULT_PDEV_ID, 1145 qdf_nbuf_data_addr(msdu), 1146 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 1147 1148 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 1149 1150 write_index = src_ring->write_index; 1151 sw_index = src_ring->sw_index; 1152 hif_record_ce_desc_event(scn, ce_state->id, 1153 FAST_TX_SOFTWARE_INDEX_UPDATE, 1154 NULL, NULL, sw_index, 0); 1155 1156 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) 1157 < SLOTS_PER_DATAPATH_TX)) { 1158 hif_err_rl("Source ring full, required %d, available %d", 1159 SLOTS_PER_DATAPATH_TX, 1160 CE_RING_DELTA(nentries_mask, write_index, 1161 sw_index - 1)); 1162 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 1163 1164 DPTRACE(qdf_dp_trace(NULL, 1165 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, 1166 QDF_TRACE_DEFAULT_PDEV_ID, 1167 NULL, 0, QDF_TX)); 1168 1169 return -ENOSPC; 1170 } 1171 1172 { 1173 struct CE_src_desc *src_ring_base = 1174 (struct CE_src_desc *)src_ring->base_addr_owner_space; 1175 struct CE_src_desc *shadow_base = 1176 (struct CE_src_desc *)src_ring->shadow_base; 1177 struct CE_src_desc *src_desc = 1178 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 1179 struct CE_src_desc *shadow_src_desc = 1180 CE_SRC_RING_TO_DESC(shadow_base, write_index); 1181 1182 /* 1183 * First fill out the ring descriptor for the HTC HTT frame 1184 * header. These are uncached writes. Should we use a local 1185 * structure instead? 1186 */ 1187 /* HTT/HTC header can be passed as a argument */ 1188 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0); 1189 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 1190 0xFFFFFFFF); 1191 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK; 1192 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 1193 shadow_src_desc->meta_data = transfer_id; 1194 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0); 1195 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 1196 download_len -= shadow_src_desc->nbytes; 1197 /* 1198 * HTC HTT header is a word stream, so byte swap if CE byte 1199 * swap enabled 1200 */ 1201 shadow_src_desc->byte_swap = ((ce_state->attr_flags & 1202 CE_ATTR_BYTE_SWAP_DATA) != 0); 1203 /* For the first one, it still does not need to write */ 1204 shadow_src_desc->gather = 1; 1205 *src_desc = *shadow_src_desc; 1206 /* By default we could initialize the transfer context to this 1207 * value 1208 */ 1209 src_ring->per_transfer_context[write_index] = 1210 CE_SENDLIST_ITEM_CTXT; 1211 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 1212 1213 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index); 1214 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index); 1215 /* 1216 * Now fill out the ring descriptor for the actual data 1217 * packet 1218 */ 1219 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1); 1220 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 1221 0xFFFFFFFF); 1222 /* 1223 * Clear packet offset for all but the first CE desc. 1224 */ 1225 user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M; 1226 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 1227 shadow_src_desc->meta_data = transfer_id; 1228 1229 /* get actual packet length */ 1230 frag_len = qdf_nbuf_get_frag_len(msdu, 1); 1231 1232 /* download remaining bytes of payload */ 1233 shadow_src_desc->nbytes = download_len; 1234 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 1235 if (shadow_src_desc->nbytes > frag_len) 1236 shadow_src_desc->nbytes = frag_len; 1237 1238 /* Data packet is a byte stream, so disable byte swap */ 1239 shadow_src_desc->byte_swap = 0; 1240 /* For the last one, gather is not set */ 1241 shadow_src_desc->gather = 0; 1242 *src_desc = *shadow_src_desc; 1243 src_ring->per_transfer_context[write_index] = msdu; 1244 1245 hif_record_ce_desc_event(scn, ce_state->id, type, 1246 (union ce_desc *)src_desc, 1247 src_ring->per_transfer_context[write_index], 1248 write_index, shadow_src_desc->nbytes); 1249 1250 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 1251 1252 DPTRACE(qdf_dp_trace(msdu, 1253 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 1254 QDF_TRACE_DEFAULT_PDEV_ID, 1255 qdf_nbuf_data_addr(msdu), 1256 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 1257 } 1258 1259 src_ring->write_index = write_index; 1260 1261 return 0; 1262 } 1263 1264 static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr, 1265 uint32_t ce_id, struct CE_attr *attr) 1266 { 1267 uint32_t addr_low; 1268 uint32_t addr_high; 1269 uint32_t msi_data_start; 1270 uint32_t msi_data_count; 1271 uint32_t msi_irq_start; 1272 uint32_t tmp; 1273 int ret; 1274 int irq_id; 1275 1276 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 1277 &msi_data_count, &msi_data_start, 1278 &msi_irq_start); 1279 1280 /* msi config not found */ 1281 if (ret) { 1282 hif_debug("%s: failed to get user msi assignment ret %d", 1283 __func__, ret); 1284 return; 1285 } 1286 1287 irq_id = scn->int_assignment->msi_idx[ce_id]; 1288 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high); 1289 1290 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, addr_low); 1291 tmp = CE_MSI_ADDR_HIGH_GET(scn, ctrl_addr); 1292 tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK; 1293 tmp |= (addr_high & CE_RING_BASE_ADDR_HIGH_MASK); 1294 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, tmp); 1295 CE_MSI_DATA_SET(scn, ctrl_addr, irq_id + msi_data_start); 1296 CE_MSI_EN_SET(scn, ctrl_addr); 1297 } 1298 1299 static void ce_legacy_src_intr_thres_setup(struct hif_softc *scn, 1300 uint32_t ctrl_addr, 1301 struct CE_attr *attr, 1302 uint32_t timer_thrs, 1303 uint32_t count_thrs) 1304 { 1305 uint32_t tmp; 1306 1307 tmp = CE_CHANNEL_SRC_BATCH_TIMER_INT_SETUP_GET(scn, ctrl_addr); 1308 1309 if (count_thrs) { 1310 tmp &= ~CE_SRC_BATCH_COUNTER_THRESH_MASK; 1311 tmp |= ((count_thrs << CE_SRC_BATCH_COUNTER_THRESH_LSB) & 1312 CE_SRC_BATCH_COUNTER_THRESH_MASK); 1313 } 1314 1315 if (timer_thrs) { 1316 tmp &= ~CE_SRC_BATCH_TIMER_THRESH_MASK; 1317 tmp |= ((timer_thrs << CE_SRC_BATCH_TIMER_THRESH_LSB) & 1318 CE_SRC_BATCH_TIMER_THRESH_MASK); 1319 } 1320 1321 CE_CHANNEL_SRC_BATCH_TIMER_INT_SETUP(scn, ctrl_addr, tmp); 1322 CE_CHANNEL_SRC_TIMER_BATCH_INT_EN(scn, ctrl_addr); 1323 } 1324 1325 static void ce_legacy_dest_intr_thres_setup(struct hif_softc *scn, 1326 uint32_t ctrl_addr, 1327 struct CE_attr *attr, 1328 uint32_t timer_thrs, 1329 uint32_t count_thrs) 1330 { 1331 uint32_t tmp; 1332 1333 tmp = CE_CHANNEL_DST_BATCH_TIMER_INT_SETUP_GET(scn, ctrl_addr); 1334 1335 if (count_thrs) { 1336 tmp &= ~CE_DST_BATCH_COUNTER_THRESH_MASK; 1337 tmp |= ((count_thrs << CE_DST_BATCH_COUNTER_THRESH_LSB) & 1338 CE_DST_BATCH_COUNTER_THRESH_MASK); 1339 } 1340 1341 if (timer_thrs) { 1342 tmp &= ~CE_DST_BATCH_TIMER_THRESH_MASK; 1343 tmp |= ((timer_thrs << CE_DST_BATCH_TIMER_THRESH_LSB) & 1344 CE_DST_BATCH_TIMER_THRESH_MASK); 1345 } 1346 1347 CE_CHANNEL_DST_BATCH_TIMER_INT_SETUP(scn, ctrl_addr, tmp); 1348 CE_CHANNEL_DST_TIMER_BATCH_INT_EN(scn, ctrl_addr); 1349 } 1350 #else 1351 static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr, 1352 uint32_t ce_id, struct CE_attr *attr) 1353 { 1354 } 1355 1356 static void ce_legacy_src_intr_thres_setup(struct hif_softc *scn, 1357 uint32_t ctrl_addr, 1358 struct CE_attr *attr, 1359 uint32_t timer_thrs, 1360 uint32_t count_thrs) 1361 { 1362 } 1363 1364 static void ce_legacy_dest_intr_thres_setup(struct hif_softc *scn, 1365 uint32_t ctrl_addr, 1366 struct CE_attr *attr, 1367 uint32_t timer_thrs, 1368 uint32_t count_thrs) 1369 { 1370 } 1371 #endif /* QCA_WIFI_WCN6450 */ 1372 1373 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, 1374 struct CE_ring_state *src_ring, 1375 struct CE_attr *attr) 1376 { 1377 uint32_t ctrl_addr; 1378 uint64_t dma_addr; 1379 uint32_t timer_thrs; 1380 uint32_t count_thrs; 1381 1382 QDF_ASSERT(ce_id < scn->ce_count); 1383 ctrl_addr = CE_BASE_ADDRESS(ce_id); 1384 1385 src_ring->hw_index = 1386 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1387 src_ring->sw_index = src_ring->hw_index; 1388 src_ring->write_index = 1389 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1390 dma_addr = src_ring->base_addr_CE_space; 1391 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr, 1392 (uint32_t)(dma_addr & 0xFFFFFFFF)); 1393 1394 /* if SR_BA_ADDRESS_HIGH register exists */ 1395 if (is_register_supported(SR_BA_ADDRESS_HIGH)) { 1396 uint32_t tmp; 1397 1398 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET( 1399 scn, ctrl_addr); 1400 tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK; 1401 dma_addr = 1402 ((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK) | tmp; 1403 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, 1404 ctrl_addr, (uint32_t)dma_addr); 1405 } 1406 CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries); 1407 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max); 1408 #ifdef BIG_ENDIAN_HOST 1409 /* Enable source ring byte swap for big endian host */ 1410 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); 1411 #endif 1412 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0); 1413 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries); 1414 1415 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { 1416 /* In 8us units */ 1417 timer_thrs = CE_SRC_BATCH_TIMER_THRESHOLD >> 3; 1418 /* Batch counter threshold 1 in Dwrod units */ 1419 count_thrs = (CE_SRC_BATCH_COUNTER_THRESHOLD * 1420 (sizeof(struct CE_src_desc) >> 2)); 1421 ce_legacy_msi_param_setup(scn, ctrl_addr, ce_id, attr); 1422 ce_legacy_src_intr_thres_setup(scn, ctrl_addr, attr, 1423 timer_thrs, count_thrs); 1424 } 1425 } 1426 1427 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id, 1428 struct CE_ring_state *dest_ring, 1429 struct CE_attr *attr) 1430 { 1431 uint32_t ctrl_addr; 1432 uint64_t dma_addr; 1433 uint32_t timer_thrs; 1434 uint32_t count_thrs; 1435 1436 QDF_ASSERT(ce_id < scn->ce_count); 1437 ctrl_addr = CE_BASE_ADDRESS(ce_id); 1438 dest_ring->sw_index = 1439 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1440 dest_ring->write_index = 1441 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1442 dma_addr = dest_ring->base_addr_CE_space; 1443 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr, 1444 (uint32_t)(dma_addr & 0xFFFFFFFF)); 1445 1446 /* if DR_BA_ADDRESS_HIGH exists */ 1447 if (is_register_supported(DR_BA_ADDRESS_HIGH)) { 1448 uint32_t tmp; 1449 1450 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, 1451 ctrl_addr); 1452 tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK; 1453 dma_addr = 1454 ((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK) | tmp; 1455 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, 1456 ctrl_addr, (uint32_t)dma_addr); 1457 } 1458 1459 CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries); 1460 #ifdef BIG_ENDIAN_HOST 1461 /* Enable Dest ring byte swap for big endian host */ 1462 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); 1463 #endif 1464 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0); 1465 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries); 1466 1467 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { 1468 /* In 8us units */ 1469 timer_thrs = CE_DST_BATCH_TIMER_THRESHOLD >> 3; 1470 /* Batch counter threshold 1 in Dwrod units */ 1471 count_thrs = CE_DST_BATCH_COUNTER_THRESHOLD; 1472 1473 ce_legacy_msi_param_setup(scn, ctrl_addr, ce_id, attr); 1474 ce_legacy_dest_intr_thres_setup(scn, ctrl_addr, attr, 1475 timer_thrs, count_thrs); 1476 } 1477 } 1478 1479 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type) 1480 { 1481 switch (ring_type) { 1482 case CE_RING_SRC: 1483 return sizeof(struct CE_src_desc); 1484 case CE_RING_DEST: 1485 return sizeof(struct CE_dest_desc); 1486 case CE_RING_STATUS: 1487 qdf_assert(0); 1488 return 0; 1489 default: 1490 return 0; 1491 } 1492 1493 return 0; 1494 } 1495 1496 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type, 1497 uint32_t ce_id, struct CE_ring_state *ring, 1498 struct CE_attr *attr) 1499 { 1500 int status = Q_TARGET_ACCESS_BEGIN(scn); 1501 1502 if (status < 0) 1503 goto out; 1504 1505 switch (ring_type) { 1506 case CE_RING_SRC: 1507 ce_legacy_src_ring_setup(scn, ce_id, ring, attr); 1508 break; 1509 case CE_RING_DEST: 1510 ce_legacy_dest_ring_setup(scn, ce_id, ring, attr); 1511 break; 1512 case CE_RING_STATUS: 1513 default: 1514 qdf_assert(0); 1515 break; 1516 } 1517 1518 Q_TARGET_ACCESS_END(scn); 1519 out: 1520 return status; 1521 } 1522 1523 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn, 1524 struct pld_shadow_reg_v2_cfg **shadow_config, 1525 int *num_shadow_registers_configured) 1526 { 1527 *num_shadow_registers_configured = 0; 1528 *shadow_config = NULL; 1529 } 1530 1531 static bool ce_check_int_watermark(struct CE_state *CE_state, 1532 unsigned int *flags) 1533 { 1534 uint32_t ce_int_status; 1535 uint32_t ctrl_addr = CE_state->ctrl_addr; 1536 struct hif_softc *scn = CE_state->scn; 1537 1538 ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr); 1539 if (ce_int_status & CE_WATERMARK_MASK) { 1540 /* Convert HW IS bits to software flags */ 1541 *flags = 1542 (ce_int_status & CE_WATERMARK_MASK) >> 1543 CE_WM_SHFT; 1544 return true; 1545 } 1546 1547 return false; 1548 } 1549 1550 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx) { } 1551 1552 #ifdef HIF_CE_LOG_INFO 1553 /** 1554 * ce_get_index_info_legacy(): Get CE index info 1555 * @scn: HIF Context 1556 * @ce_state: CE opaque handle 1557 * @info: CE info 1558 * 1559 * Return: 0 for success and non zero for failure 1560 */ 1561 static 1562 int ce_get_index_info_legacy(struct hif_softc *scn, void *ce_state, 1563 struct ce_index *info) 1564 { 1565 struct CE_state *state = (struct CE_state *)ce_state; 1566 1567 info->id = state->id; 1568 if (state->src_ring) { 1569 info->u.legacy_info.sw_index = state->src_ring->sw_index; 1570 info->u.legacy_info.write_index = state->src_ring->write_index; 1571 } else if (state->dest_ring) { 1572 info->u.legacy_info.sw_index = state->dest_ring->sw_index; 1573 info->u.legacy_info.write_index = state->dest_ring->write_index; 1574 } 1575 1576 return 0; 1577 } 1578 #endif 1579 1580 #ifdef CONFIG_SHADOW_V3 1581 static void ce_prepare_shadow_register_v3_cfg_legacy(struct hif_softc *scn, 1582 struct pld_shadow_reg_v3_cfg **shadow_config, 1583 int *num_shadow_registers_configured) 1584 { 1585 hif_get_shadow_reg_config_v3(scn, shadow_config, 1586 num_shadow_registers_configured); 1587 1588 if (*num_shadow_registers_configured != 0) { 1589 hif_err("shadow register configuration already constructed"); 1590 return; 1591 } 1592 1593 hif_preare_shadow_register_cfg_v3(scn); 1594 hif_get_shadow_reg_config_v3(scn, shadow_config, 1595 num_shadow_registers_configured); 1596 } 1597 #endif 1598 1599 struct ce_ops ce_service_legacy = { 1600 .ce_get_desc_size = ce_get_desc_size_legacy, 1601 .ce_ring_setup = ce_ring_setup_legacy, 1602 .ce_sendlist_send = ce_sendlist_send_legacy, 1603 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy, 1604 .ce_revoke_recv_next = ce_revoke_recv_next_legacy, 1605 .ce_cancel_send_next = ce_cancel_send_next_legacy, 1606 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy, 1607 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy, 1608 .ce_send_nolock = ce_send_nolock_legacy, 1609 .watermark_int = ce_check_int_watermark, 1610 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy, 1611 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy, 1612 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy, 1613 .ce_prepare_shadow_register_v2_cfg = 1614 ce_prepare_shadow_register_v2_cfg_legacy, 1615 #ifdef HIF_CE_LOG_INFO 1616 .ce_get_index_info = 1617 ce_get_index_info_legacy, 1618 #endif 1619 #ifdef CONFIG_SHADOW_V3 1620 .ce_prepare_shadow_register_v3_cfg = 1621 ce_prepare_shadow_register_v3_cfg_legacy, 1622 #endif 1623 }; 1624 1625 struct ce_ops *ce_services_legacy() 1626 { 1627 return &ce_service_legacy; 1628 } 1629 1630 qdf_export_symbol(ce_services_legacy); 1631 1632 void ce_service_legacy_init(void) 1633 { 1634 ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy); 1635 } 1636