1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "ce_api.h" 21 #include "ce_internal.h" 22 #include "ce_main.h" 23 #include "ce_reg.h" 24 #include "hif.h" 25 #include "hif_debug.h" 26 #include "hif_io32.h" 27 #include "qdf_lock.h" 28 #include "hif_main.h" 29 #include "hif_napi.h" 30 #include "qdf_module.h" 31 #include "regtable.h" 32 33 /* 34 * Support for Copy Engine hardware, which is mainly used for 35 * communication between Host and Target over a PCIe interconnect. 36 */ 37 38 /* 39 * A single CopyEngine (CE) comprises two "rings": 40 * a source ring 41 * a destination ring 42 * 43 * Each ring consists of a number of descriptors which specify 44 * an address, length, and meta-data. 45 * 46 * Typically, one side of the PCIe interconnect (Host or Target) 47 * controls one ring and the other side controls the other ring. 48 * The source side chooses when to initiate a transfer and it 49 * chooses what to send (buffer address, length). The destination 50 * side keeps a supply of "anonymous receive buffers" available and 51 * it handles incoming data as it arrives (when the destination 52 * receives an interrupt). 53 * 54 * The sender may send a simple buffer (address/length) or it may 55 * send a small list of buffers. When a small list is sent, hardware 56 * "gathers" these and they end up in a single destination buffer 57 * with a single interrupt. 58 * 59 * There are several "contexts" managed by this layer -- more, it 60 * may seem -- than should be needed. These are provided mainly for 61 * maximum flexibility and especially to facilitate a simpler HIF 62 * implementation. There are per-CopyEngine recv, send, and watermark 63 * contexts. These are supplied by the caller when a recv, send, 64 * or watermark handler is established and they are echoed back to 65 * the caller when the respective callbacks are invoked. There is 66 * also a per-transfer context supplied by the caller when a buffer 67 * (or sendlist) is sent and when a buffer is enqueued for recv. 68 * These per-transfer contexts are echoed back to the caller when 69 * the buffer is sent/received. 70 * Target TX harsh result toeplitz_hash_result 71 */ 72 73 /* NB: Modeled after ce_completed_send_next */ 74 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */ 75 #define CE_WM_SHFT 1 76 77 #ifdef WLAN_FEATURE_FASTPATH 78 #ifdef QCA_WIFI_3_0 79 static inline void 80 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, 81 uint64_t dma_addr, 82 uint32_t user_flags) 83 { 84 shadow_src_desc->buffer_addr_hi = 85 (uint32_t)((dma_addr >> 32) & 0x1F); 86 user_flags |= shadow_src_desc->buffer_addr_hi; 87 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, 88 sizeof(uint32_t)); 89 } 90 #else 91 static inline void 92 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, 93 uint64_t dma_addr, 94 uint32_t user_flags) 95 { 96 } 97 #endif 98 99 #define SLOTS_PER_DATAPATH_TX 2 100 101 /** 102 * ce_send_fast() CE layer Tx buffer posting function 103 * @copyeng: copy engine handle 104 * @msdu: msdu to be sent 105 * @transfer_id: transfer_id 106 * @download_len: packet download length 107 * 108 * Assumption : Called with an array of MSDU's 109 * Function: 110 * For each msdu in the array 111 * 1. Check no. of available entries 112 * 2. Create src ring entries (allocated in consistent memory 113 * 3. Write index to h/w 114 * 115 * Return: No. of packets that could be sent 116 */ 117 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, 118 unsigned int transfer_id, uint32_t download_len) 119 { 120 struct CE_state *ce_state = (struct CE_state *)copyeng; 121 struct hif_softc *scn = ce_state->scn; 122 struct CE_ring_state *src_ring = ce_state->src_ring; 123 u_int32_t ctrl_addr = ce_state->ctrl_addr; 124 unsigned int nentries_mask = src_ring->nentries_mask; 125 unsigned int write_index; 126 unsigned int sw_index; 127 unsigned int frag_len; 128 uint64_t dma_addr; 129 uint32_t user_flags; 130 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE; 131 bool ok_to_send = true; 132 133 /* 134 * Create a log assuming the call will go through, and if not, we would 135 * add an error trace as well. 136 * Please add the same failure log for any additional error paths. 137 */ 138 DPTRACE(qdf_dp_trace(msdu, 139 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 140 QDF_TRACE_DEFAULT_PDEV_ID, 141 qdf_nbuf_data_addr(msdu), 142 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 143 144 qdf_spin_lock_bh(&ce_state->ce_index_lock); 145 146 /* 147 * Request runtime PM resume if it has already suspended and make 148 * sure there is no PCIe link access. 149 */ 150 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_CE) != 0) 151 ok_to_send = false; 152 153 if (ok_to_send) { 154 Q_TARGET_ACCESS_BEGIN(scn); 155 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 156 } 157 158 write_index = src_ring->write_index; 159 sw_index = src_ring->sw_index; 160 hif_record_ce_desc_event(scn, ce_state->id, 161 FAST_TX_SOFTWARE_INDEX_UPDATE, 162 NULL, NULL, sw_index, 0); 163 164 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) 165 < SLOTS_PER_DATAPATH_TX)) { 166 hif_err_rl("Source ring full, required %d, available %d", 167 SLOTS_PER_DATAPATH_TX, 168 CE_RING_DELTA(nentries_mask, write_index, 169 sw_index - 1)); 170 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 171 if (ok_to_send) 172 Q_TARGET_ACCESS_END(scn); 173 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 174 175 DPTRACE(qdf_dp_trace(NULL, 176 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, 177 QDF_TRACE_DEFAULT_PDEV_ID, 178 NULL, 0, QDF_TX)); 179 180 return 0; 181 } 182 183 { 184 struct CE_src_desc *src_ring_base = 185 (struct CE_src_desc *)src_ring->base_addr_owner_space; 186 struct CE_src_desc *shadow_base = 187 (struct CE_src_desc *)src_ring->shadow_base; 188 struct CE_src_desc *src_desc = 189 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 190 struct CE_src_desc *shadow_src_desc = 191 CE_SRC_RING_TO_DESC(shadow_base, write_index); 192 193 /* 194 * First fill out the ring descriptor for the HTC HTT frame 195 * header. These are uncached writes. Should we use a local 196 * structure instead? 197 */ 198 /* HTT/HTC header can be passed as a argument */ 199 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0); 200 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 201 0xFFFFFFFF); 202 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK; 203 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 204 shadow_src_desc->meta_data = transfer_id; 205 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0); 206 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 207 download_len -= shadow_src_desc->nbytes; 208 /* 209 * HTC HTT header is a word stream, so byte swap if CE byte 210 * swap enabled 211 */ 212 shadow_src_desc->byte_swap = ((ce_state->attr_flags & 213 CE_ATTR_BYTE_SWAP_DATA) != 0); 214 /* For the first one, it still does not need to write */ 215 shadow_src_desc->gather = 1; 216 *src_desc = *shadow_src_desc; 217 /* By default we could initialize the transfer context to this 218 * value 219 */ 220 src_ring->per_transfer_context[write_index] = 221 CE_SENDLIST_ITEM_CTXT; 222 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 223 224 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index); 225 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index); 226 /* 227 * Now fill out the ring descriptor for the actual data 228 * packet 229 */ 230 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1); 231 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 232 0xFFFFFFFF); 233 /* 234 * Clear packet offset for all but the first CE desc. 235 */ 236 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 237 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 238 shadow_src_desc->meta_data = transfer_id; 239 240 /* get actual packet length */ 241 frag_len = qdf_nbuf_get_frag_len(msdu, 1); 242 243 /* download remaining bytes of payload */ 244 shadow_src_desc->nbytes = download_len; 245 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 246 if (shadow_src_desc->nbytes > frag_len) 247 shadow_src_desc->nbytes = frag_len; 248 249 /* Data packet is a byte stream, so disable byte swap */ 250 shadow_src_desc->byte_swap = 0; 251 /* For the last one, gather is not set */ 252 shadow_src_desc->gather = 0; 253 *src_desc = *shadow_src_desc; 254 src_ring->per_transfer_context[write_index] = msdu; 255 256 hif_record_ce_desc_event(scn, ce_state->id, type, 257 (union ce_desc *)src_desc, 258 src_ring->per_transfer_context[write_index], 259 write_index, shadow_src_desc->nbytes); 260 261 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 262 263 DPTRACE(qdf_dp_trace(msdu, 264 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 265 QDF_TRACE_DEFAULT_PDEV_ID, 266 qdf_nbuf_data_addr(msdu), 267 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 268 } 269 270 src_ring->write_index = write_index; 271 272 if (ok_to_send) { 273 if (qdf_likely(ce_state->state == CE_RUNNING)) { 274 type = FAST_TX_WRITE_INDEX_UPDATE; 275 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 276 write_index); 277 Q_TARGET_ACCESS_END(scn); 278 } else { 279 ce_state->state = CE_PENDING; 280 } 281 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_CE); 282 } 283 284 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 285 286 /* sent 1 packet */ 287 return 1; 288 } 289 290 /** 291 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler 292 * @ce_state: handle to copy engine state 293 * @cmpl_msdus: Rx msdus 294 * @num_cmpls: number of Rx msdus 295 * @ctrl_addr: CE control address 296 * 297 * Return: None 298 */ 299 static void ce_fastpath_rx_handle(struct CE_state *ce_state, 300 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls, 301 uint32_t ctrl_addr) 302 { 303 struct hif_softc *scn = ce_state->scn; 304 struct CE_ring_state *dest_ring = ce_state->dest_ring; 305 uint32_t nentries_mask = dest_ring->nentries_mask; 306 uint32_t write_index; 307 308 qdf_spin_unlock(&ce_state->ce_index_lock); 309 ce_state->fastpath_handler(ce_state->context, cmpl_msdus, num_cmpls); 310 qdf_spin_lock(&ce_state->ce_index_lock); 311 312 /* Update Destination Ring Write Index */ 313 write_index = dest_ring->write_index; 314 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls); 315 316 hif_record_ce_desc_event(scn, ce_state->id, 317 FAST_RX_WRITE_INDEX_UPDATE, 318 NULL, NULL, write_index, 0); 319 320 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 321 dest_ring->write_index = write_index; 322 } 323 324 /** 325 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs 326 * @scn: hif_context 327 * @ce_id: Copy engine ID 328 * 1) Go through the CE ring, and find the completions 329 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[] 330 * 3) Unmap buffer & accumulate in an array. 331 * 4) Call message handler when array is full or when exiting the handler 332 * 333 * Return: void 334 */ 335 336 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) 337 { 338 struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; 339 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 340 struct CE_ring_state *dest_ring = ce_state->dest_ring; 341 struct CE_dest_desc *dest_ring_base = 342 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 343 344 uint32_t nentries_mask = dest_ring->nentries_mask; 345 uint32_t sw_index = dest_ring->sw_index; 346 uint32_t nbytes; 347 qdf_nbuf_t nbuf; 348 dma_addr_t paddr; 349 struct CE_dest_desc *dest_desc; 350 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM]; 351 uint32_t ctrl_addr = ce_state->ctrl_addr; 352 uint32_t nbuf_cmpl_idx = 0; 353 unsigned int more_comp_cnt = 0; 354 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 355 struct ce_ops *ce_services = hif_state->ce_services; 356 357 more_data: 358 for (;;) { 359 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, 360 sw_index); 361 362 /* 363 * The following 2 reads are from non-cached memory 364 */ 365 nbytes = dest_desc->nbytes; 366 367 /* If completion is invalid, break */ 368 if (qdf_unlikely(nbytes == 0)) 369 break; 370 371 /* 372 * Build the nbuf list from valid completions 373 */ 374 nbuf = dest_ring->per_transfer_context[sw_index]; 375 376 /* 377 * No lock is needed here, since this is the only thread 378 * that accesses the sw_index 379 */ 380 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 381 382 /* 383 * CAREFUL : Uncached write, but still less expensive, 384 * since most modern caches use "write-combining" to 385 * flush multiple cache-writes all at once. 386 */ 387 dest_desc->nbytes = 0; 388 389 /* 390 * Per our understanding this is not required on our 391 * since we are doing the same cache invalidation 392 * operation on the same buffer twice in succession, 393 * without any modifiication to this buffer by CPU in 394 * between. 395 * However, this code with 2 syncs in succession has 396 * been undergoing some testing at a customer site, 397 * and seemed to be showing no problems so far. Would 398 * like to validate from the customer, that this line 399 * is really not required, before we remove this line 400 * completely. 401 */ 402 paddr = QDF_NBUF_CB_PADDR(nbuf); 403 404 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr, 405 (skb_end_pointer(nbuf) - 406 (nbuf)->data), 407 DMA_FROM_DEVICE); 408 409 qdf_nbuf_put_tail(nbuf, nbytes); 410 411 qdf_assert_always(nbuf->data); 412 413 QDF_NBUF_CB_RX_CTX_ID(nbuf) = 414 hif_get_rx_ctx_id(ce_state->id, hif_hdl); 415 cmpl_msdus[nbuf_cmpl_idx++] = nbuf; 416 417 /* 418 * we are not posting the buffers back instead 419 * reusing the buffers 420 */ 421 if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) { 422 hif_record_ce_desc_event(scn, ce_state->id, 423 FAST_RX_SOFTWARE_INDEX_UPDATE, 424 NULL, NULL, sw_index, 0); 425 dest_ring->sw_index = sw_index; 426 ce_fastpath_rx_handle(ce_state, cmpl_msdus, 427 nbuf_cmpl_idx, ctrl_addr); 428 429 ce_state->receive_count += nbuf_cmpl_idx; 430 if (qdf_unlikely(hif_ce_service_should_yield( 431 scn, ce_state))) { 432 ce_state->force_break = 1; 433 qdf_atomic_set(&ce_state->rx_pending, 1); 434 return; 435 } 436 437 nbuf_cmpl_idx = 0; 438 more_comp_cnt = 0; 439 } 440 } 441 442 hif_record_ce_desc_event(scn, ce_state->id, 443 FAST_RX_SOFTWARE_INDEX_UPDATE, 444 NULL, NULL, sw_index, 0); 445 446 dest_ring->sw_index = sw_index; 447 448 /* 449 * If there are not enough completions to fill the array, 450 * just call the message handler here 451 */ 452 if (nbuf_cmpl_idx) { 453 ce_fastpath_rx_handle(ce_state, cmpl_msdus, 454 nbuf_cmpl_idx, ctrl_addr); 455 456 ce_state->receive_count += nbuf_cmpl_idx; 457 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 458 ce_state->force_break = 1; 459 qdf_atomic_set(&ce_state->rx_pending, 1); 460 return; 461 } 462 463 /* check for more packets after upper layer processing */ 464 nbuf_cmpl_idx = 0; 465 more_comp_cnt = 0; 466 goto more_data; 467 } 468 469 hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu()); 470 471 qdf_atomic_set(&ce_state->rx_pending, 0); 472 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 473 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 474 HOST_IS_COPY_COMPLETE_MASK); 475 } else { 476 hif_err_rl("%s: target access is not allowed", __func__); 477 return; 478 } 479 480 if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) { 481 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 482 goto more_data; 483 } else { 484 hif_err("Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 485 nentries_mask, 486 ce_state->dest_ring->sw_index, 487 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr)); 488 } 489 } 490 #ifdef NAPI_YIELD_BUDGET_BASED 491 /* 492 * Caution : Before you modify this code, please refer hif_napi_poll 493 * function to understand how napi_complete gets called and make the 494 * necessary changes. Force break has to be done till WIN disables the 495 * interrupt at source 496 */ 497 ce_state->force_break = 1; 498 #endif 499 } 500 501 /** 502 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled 503 * @scn: Handle to HIF context 504 * 505 * Return: true if fastpath is enabled else false. 506 */ 507 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 508 { 509 return scn->fastpath_mode_on; 510 } 511 #else 512 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) 513 { 514 } 515 516 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 517 { 518 return false; 519 } 520 #endif /* WLAN_FEATURE_FASTPATH */ 521 522 static QDF_STATUS 523 ce_send_nolock_legacy(struct CE_handle *copyeng, 524 void *per_transfer_context, 525 qdf_dma_addr_t buffer, 526 uint32_t nbytes, 527 uint32_t transfer_id, 528 uint32_t flags, 529 uint32_t user_flags) 530 { 531 QDF_STATUS status; 532 struct CE_state *CE_state = (struct CE_state *)copyeng; 533 struct CE_ring_state *src_ring = CE_state->src_ring; 534 uint32_t ctrl_addr = CE_state->ctrl_addr; 535 unsigned int nentries_mask = src_ring->nentries_mask; 536 unsigned int sw_index = src_ring->sw_index; 537 unsigned int write_index = src_ring->write_index; 538 uint64_t dma_addr = buffer; 539 struct hif_softc *scn = CE_state->scn; 540 541 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 542 return QDF_STATUS_E_FAILURE; 543 if (unlikely(CE_RING_DELTA(nentries_mask, 544 write_index, sw_index - 1) <= 0)) { 545 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 546 Q_TARGET_ACCESS_END(scn); 547 return QDF_STATUS_E_FAILURE; 548 } 549 { 550 enum hif_ce_event_type event_type; 551 struct CE_src_desc *src_ring_base = 552 (struct CE_src_desc *)src_ring->base_addr_owner_space; 553 struct CE_src_desc *shadow_base = 554 (struct CE_src_desc *)src_ring->shadow_base; 555 struct CE_src_desc *src_desc = 556 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 557 struct CE_src_desc *shadow_src_desc = 558 CE_SRC_RING_TO_DESC(shadow_base, write_index); 559 560 /* Update low 32 bits source descriptor address */ 561 shadow_src_desc->buffer_addr = 562 (uint32_t)(dma_addr & 0xFFFFFFFF); 563 #ifdef QCA_WIFI_3_0 564 shadow_src_desc->buffer_addr_hi = 565 (uint32_t)((dma_addr >> 32) & 0x1F); 566 user_flags |= shadow_src_desc->buffer_addr_hi; 567 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, 568 sizeof(uint32_t)); 569 #endif 570 shadow_src_desc->target_int_disable = 0; 571 shadow_src_desc->host_int_disable = 0; 572 573 shadow_src_desc->meta_data = transfer_id; 574 575 /* 576 * Set the swap bit if: 577 * typical sends on this CE are swapped (host is big-endian) 578 * and this send doesn't disable the swapping 579 * (data is not bytestream) 580 */ 581 shadow_src_desc->byte_swap = 582 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 583 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); 584 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); 585 shadow_src_desc->nbytes = nbytes; 586 ce_validate_nbytes(nbytes, CE_state); 587 588 *src_desc = *shadow_src_desc; 589 590 src_ring->per_transfer_context[write_index] = 591 per_transfer_context; 592 593 /* Update Source Ring Write Index */ 594 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 595 596 /* WORKAROUND */ 597 if (shadow_src_desc->gather) { 598 event_type = HIF_TX_GATHER_DESC_POST; 599 } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) { 600 event_type = HIF_TX_DESC_SOFTWARE_POST; 601 CE_state->state = CE_PENDING; 602 } else { 603 event_type = HIF_TX_DESC_POST; 604 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 605 write_index); 606 } 607 608 /* src_ring->write index hasn't been updated event though 609 * the register has allready been written to. 610 */ 611 hif_record_ce_desc_event(scn, CE_state->id, event_type, 612 (union ce_desc *)shadow_src_desc, per_transfer_context, 613 src_ring->write_index, nbytes); 614 615 src_ring->write_index = write_index; 616 status = QDF_STATUS_SUCCESS; 617 } 618 Q_TARGET_ACCESS_END(scn); 619 return status; 620 } 621 622 static QDF_STATUS 623 ce_sendlist_send_legacy(struct CE_handle *copyeng, 624 void *per_transfer_context, 625 struct ce_sendlist *sendlist, unsigned int transfer_id) 626 { 627 QDF_STATUS status = QDF_STATUS_E_NOMEM; 628 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 629 struct CE_state *CE_state = (struct CE_state *)copyeng; 630 struct CE_ring_state *src_ring = CE_state->src_ring; 631 unsigned int nentries_mask = src_ring->nentries_mask; 632 unsigned int num_items = sl->num_items; 633 unsigned int sw_index; 634 unsigned int write_index; 635 struct hif_softc *scn = CE_state->scn; 636 637 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); 638 639 qdf_spin_lock_bh(&CE_state->ce_index_lock); 640 641 if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data && 642 Q_TARGET_ACCESS_BEGIN(scn) == 0) { 643 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR( 644 scn, CE_state->ctrl_addr); 645 Q_TARGET_ACCESS_END(scn); 646 } 647 648 sw_index = src_ring->sw_index; 649 write_index = src_ring->write_index; 650 651 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >= 652 num_items) { 653 struct ce_sendlist_item *item; 654 int i; 655 656 /* handle all but the last item uniformly */ 657 for (i = 0; i < num_items - 1; i++) { 658 item = &sl->item[i]; 659 /* TBDXXX: Support extensible sendlist_types? */ 660 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 661 status = ce_send_nolock_legacy(copyeng, 662 CE_SENDLIST_ITEM_CTXT, 663 (qdf_dma_addr_t)item->data, 664 item->u.nbytes, transfer_id, 665 item->flags | CE_SEND_FLAG_GATHER, 666 item->user_flags); 667 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 668 } 669 /* provide valid context pointer for final item */ 670 item = &sl->item[i]; 671 /* TBDXXX: Support extensible sendlist_types? */ 672 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 673 status = ce_send_nolock_legacy(copyeng, per_transfer_context, 674 (qdf_dma_addr_t) item->data, 675 item->u.nbytes, 676 transfer_id, item->flags, 677 item->user_flags); 678 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 679 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, 680 QDF_NBUF_TX_PKT_CE); 681 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, 682 QDF_DP_TRACE_CE_PACKET_PTR_RECORD, 683 QDF_TRACE_DEFAULT_PDEV_ID, 684 (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data), 685 sizeof(((qdf_nbuf_t)per_transfer_context)->data), 686 QDF_TX)); 687 } else { 688 /* 689 * Probably not worth the additional complexity to support 690 * partial sends with continuation or notification. We expect 691 * to use large rings and small sendlists. If we can't handle 692 * the entire request at once, punt it back to the caller. 693 */ 694 } 695 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 696 697 return status; 698 } 699 700 /** 701 * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine 702 * @coyeng: copy engine handle 703 * @per_recv_context: virtual address of the nbuf 704 * @buffer: physical address of the nbuf 705 * 706 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued 707 */ 708 static QDF_STATUS 709 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng, 710 void *per_recv_context, qdf_dma_addr_t buffer) 711 { 712 QDF_STATUS status; 713 struct CE_state *CE_state = (struct CE_state *)copyeng; 714 struct CE_ring_state *dest_ring = CE_state->dest_ring; 715 uint32_t ctrl_addr = CE_state->ctrl_addr; 716 unsigned int nentries_mask = dest_ring->nentries_mask; 717 unsigned int write_index; 718 unsigned int sw_index; 719 uint64_t dma_addr = buffer; 720 struct hif_softc *scn = CE_state->scn; 721 722 qdf_spin_lock_bh(&CE_state->ce_index_lock); 723 write_index = dest_ring->write_index; 724 sw_index = dest_ring->sw_index; 725 726 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 727 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 728 return QDF_STATUS_E_IO; 729 } 730 731 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) || 732 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) { 733 struct CE_dest_desc *dest_ring_base = 734 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 735 struct CE_dest_desc *dest_desc = 736 CE_DEST_RING_TO_DESC(dest_ring_base, write_index); 737 738 /* Update low 32 bit destination descriptor */ 739 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF); 740 #ifdef QCA_WIFI_3_0 741 dest_desc->buffer_addr_hi = 742 (uint32_t)((dma_addr >> 32) & 0x1F); 743 #endif 744 dest_desc->nbytes = 0; 745 746 dest_ring->per_transfer_context[write_index] = 747 per_recv_context; 748 749 hif_record_ce_desc_event(scn, CE_state->id, 750 HIF_RX_DESC_POST, 751 (union ce_desc *)dest_desc, 752 per_recv_context, 753 write_index, 0); 754 755 /* Update Destination Ring Write Index */ 756 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 757 if (write_index != sw_index) { 758 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 759 dest_ring->write_index = write_index; 760 } 761 status = QDF_STATUS_SUCCESS; 762 } else 763 status = QDF_STATUS_E_FAILURE; 764 765 Q_TARGET_ACCESS_END(scn); 766 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 767 return status; 768 } 769 770 static unsigned int 771 ce_send_entries_done_nolock_legacy(struct hif_softc *scn, 772 struct CE_state *CE_state) 773 { 774 struct CE_ring_state *src_ring = CE_state->src_ring; 775 uint32_t ctrl_addr = CE_state->ctrl_addr; 776 unsigned int nentries_mask = src_ring->nentries_mask; 777 unsigned int sw_index; 778 unsigned int read_index; 779 780 sw_index = src_ring->sw_index; 781 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr); 782 783 return CE_RING_DELTA(nentries_mask, sw_index, read_index); 784 } 785 786 static unsigned int 787 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn, 788 struct CE_state *CE_state) 789 { 790 struct CE_ring_state *dest_ring = CE_state->dest_ring; 791 uint32_t ctrl_addr = CE_state->ctrl_addr; 792 unsigned int nentries_mask = dest_ring->nentries_mask; 793 unsigned int sw_index; 794 unsigned int read_index; 795 796 sw_index = dest_ring->sw_index; 797 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr); 798 799 return CE_RING_DELTA(nentries_mask, sw_index, read_index); 800 } 801 802 static QDF_STATUS 803 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state, 804 void **per_CE_contextp, 805 void **per_transfer_contextp, 806 qdf_dma_addr_t *bufferp, 807 unsigned int *nbytesp, 808 unsigned int *transfer_idp, 809 unsigned int *flagsp) 810 { 811 QDF_STATUS status; 812 struct CE_ring_state *dest_ring = CE_state->dest_ring; 813 unsigned int nentries_mask = dest_ring->nentries_mask; 814 unsigned int sw_index = dest_ring->sw_index; 815 struct hif_softc *scn = CE_state->scn; 816 struct CE_dest_desc *dest_ring_base = 817 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 818 struct CE_dest_desc *dest_desc = 819 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); 820 int nbytes; 821 struct CE_dest_desc dest_desc_info; 822 /* 823 * By copying the dest_desc_info element to local memory, we could 824 * avoid extra memory read from non-cachable memory. 825 */ 826 dest_desc_info = *dest_desc; 827 nbytes = dest_desc_info.nbytes; 828 if (nbytes == 0) { 829 /* 830 * This closes a relatively unusual race where the Host 831 * sees the updated DRRI before the update to the 832 * corresponding descriptor has completed. We treat this 833 * as a descriptor that is not yet done. 834 */ 835 status = QDF_STATUS_E_FAILURE; 836 goto done; 837 } 838 839 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION, 840 (union ce_desc *)dest_desc, 841 dest_ring->per_transfer_context[sw_index], 842 sw_index, 0); 843 844 dest_desc->nbytes = 0; 845 846 /* Return data from completed destination descriptor */ 847 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info); 848 *nbytesp = nbytes; 849 *transfer_idp = dest_desc_info.meta_data; 850 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; 851 852 if (per_CE_contextp) 853 *per_CE_contextp = CE_state->recv_context; 854 855 if (per_transfer_contextp) { 856 *per_transfer_contextp = 857 dest_ring->per_transfer_context[sw_index]; 858 } 859 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 860 861 /* Update sw_index */ 862 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 863 dest_ring->sw_index = sw_index; 864 status = QDF_STATUS_SUCCESS; 865 866 done: 867 return status; 868 } 869 870 /* NB: Modeled after ce_completed_recv_next_nolock */ 871 static QDF_STATUS 872 ce_revoke_recv_next_legacy(struct CE_handle *copyeng, 873 void **per_CE_contextp, 874 void **per_transfer_contextp, 875 qdf_dma_addr_t *bufferp) 876 { 877 struct CE_state *CE_state; 878 struct CE_ring_state *dest_ring; 879 unsigned int nentries_mask; 880 unsigned int sw_index; 881 unsigned int write_index; 882 QDF_STATUS status; 883 struct hif_softc *scn; 884 885 CE_state = (struct CE_state *)copyeng; 886 dest_ring = CE_state->dest_ring; 887 if (!dest_ring) 888 return QDF_STATUS_E_FAILURE; 889 890 scn = CE_state->scn; 891 qdf_spin_lock(&CE_state->ce_index_lock); 892 nentries_mask = dest_ring->nentries_mask; 893 sw_index = dest_ring->sw_index; 894 write_index = dest_ring->write_index; 895 if (write_index != sw_index) { 896 struct CE_dest_desc *dest_ring_base = 897 (struct CE_dest_desc *)dest_ring-> 898 base_addr_owner_space; 899 struct CE_dest_desc *dest_desc = 900 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); 901 902 /* Return data from completed destination descriptor */ 903 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc); 904 905 if (per_CE_contextp) 906 *per_CE_contextp = CE_state->recv_context; 907 908 if (per_transfer_contextp) { 909 *per_transfer_contextp = 910 dest_ring->per_transfer_context[sw_index]; 911 } 912 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 913 914 /* Update sw_index */ 915 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 916 dest_ring->sw_index = sw_index; 917 status = QDF_STATUS_SUCCESS; 918 } else { 919 status = QDF_STATUS_E_FAILURE; 920 } 921 qdf_spin_unlock(&CE_state->ce_index_lock); 922 923 return status; 924 } 925 926 /* 927 * Guts of ce_completed_send_next. 928 * The caller takes responsibility for any necessary locking. 929 */ 930 static QDF_STATUS 931 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state, 932 void **per_CE_contextp, 933 void **per_transfer_contextp, 934 qdf_dma_addr_t *bufferp, 935 unsigned int *nbytesp, 936 unsigned int *transfer_idp, 937 unsigned int *sw_idx, 938 unsigned int *hw_idx, 939 uint32_t *toeplitz_hash_result) 940 { 941 QDF_STATUS status = QDF_STATUS_E_FAILURE; 942 struct CE_ring_state *src_ring = CE_state->src_ring; 943 uint32_t ctrl_addr = CE_state->ctrl_addr; 944 unsigned int nentries_mask = src_ring->nentries_mask; 945 unsigned int sw_index = src_ring->sw_index; 946 unsigned int read_index; 947 struct hif_softc *scn = CE_state->scn; 948 949 if (src_ring->hw_index == sw_index) { 950 /* 951 * The SW completion index has caught up with the cached 952 * version of the HW completion index. 953 * Update the cached HW completion index to see whether 954 * the SW has really caught up to the HW, or if the cached 955 * value of the HW index has become stale. 956 */ 957 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 958 return QDF_STATUS_E_FAILURE; 959 src_ring->hw_index = 960 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr); 961 if (Q_TARGET_ACCESS_END(scn) < 0) 962 return QDF_STATUS_E_FAILURE; 963 } 964 read_index = src_ring->hw_index; 965 966 if (sw_idx) 967 *sw_idx = sw_index; 968 969 if (hw_idx) 970 *hw_idx = read_index; 971 972 if ((read_index != sw_index) && (read_index != 0xffffffff)) { 973 struct CE_src_desc *shadow_base = 974 (struct CE_src_desc *)src_ring->shadow_base; 975 struct CE_src_desc *shadow_src_desc = 976 CE_SRC_RING_TO_DESC(shadow_base, sw_index); 977 #ifdef QCA_WIFI_3_0 978 struct CE_src_desc *src_ring_base = 979 (struct CE_src_desc *)src_ring->base_addr_owner_space; 980 struct CE_src_desc *src_desc = 981 CE_SRC_RING_TO_DESC(src_ring_base, sw_index); 982 #endif 983 hif_record_ce_desc_event(scn, CE_state->id, 984 HIF_TX_DESC_COMPLETION, 985 (union ce_desc *)shadow_src_desc, 986 src_ring->per_transfer_context[sw_index], 987 sw_index, shadow_src_desc->nbytes); 988 989 /* Return data from completed source descriptor */ 990 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc); 991 *nbytesp = shadow_src_desc->nbytes; 992 *transfer_idp = shadow_src_desc->meta_data; 993 #ifdef QCA_WIFI_3_0 994 *toeplitz_hash_result = src_desc->toeplitz_hash_result; 995 #else 996 *toeplitz_hash_result = 0; 997 #endif 998 if (per_CE_contextp) 999 *per_CE_contextp = CE_state->send_context; 1000 1001 if (per_transfer_contextp) { 1002 *per_transfer_contextp = 1003 src_ring->per_transfer_context[sw_index]; 1004 } 1005 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 1006 1007 /* Update sw_index */ 1008 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1009 src_ring->sw_index = sw_index; 1010 status = QDF_STATUS_SUCCESS; 1011 } 1012 1013 return status; 1014 } 1015 1016 static QDF_STATUS 1017 ce_cancel_send_next_legacy(struct CE_handle *copyeng, 1018 void **per_CE_contextp, 1019 void **per_transfer_contextp, 1020 qdf_dma_addr_t *bufferp, 1021 unsigned int *nbytesp, 1022 unsigned int *transfer_idp, 1023 uint32_t *toeplitz_hash_result) 1024 { 1025 struct CE_state *CE_state; 1026 struct CE_ring_state *src_ring; 1027 unsigned int nentries_mask; 1028 unsigned int sw_index; 1029 unsigned int write_index; 1030 QDF_STATUS status; 1031 struct hif_softc *scn; 1032 1033 CE_state = (struct CE_state *)copyeng; 1034 src_ring = CE_state->src_ring; 1035 if (!src_ring) 1036 return QDF_STATUS_E_FAILURE; 1037 1038 scn = CE_state->scn; 1039 qdf_spin_lock(&CE_state->ce_index_lock); 1040 nentries_mask = src_ring->nentries_mask; 1041 sw_index = src_ring->sw_index; 1042 write_index = src_ring->write_index; 1043 1044 if (write_index != sw_index) { 1045 struct CE_src_desc *src_ring_base = 1046 (struct CE_src_desc *)src_ring->base_addr_owner_space; 1047 struct CE_src_desc *src_desc = 1048 CE_SRC_RING_TO_DESC(src_ring_base, sw_index); 1049 1050 /* Return data from completed source descriptor */ 1051 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc); 1052 *nbytesp = src_desc->nbytes; 1053 *transfer_idp = src_desc->meta_data; 1054 #ifdef QCA_WIFI_3_0 1055 *toeplitz_hash_result = src_desc->toeplitz_hash_result; 1056 #else 1057 *toeplitz_hash_result = 0; 1058 #endif 1059 1060 if (per_CE_contextp) 1061 *per_CE_contextp = CE_state->send_context; 1062 1063 if (per_transfer_contextp) { 1064 *per_transfer_contextp = 1065 src_ring->per_transfer_context[sw_index]; 1066 } 1067 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 1068 1069 /* Update sw_index */ 1070 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1071 src_ring->sw_index = sw_index; 1072 status = QDF_STATUS_SUCCESS; 1073 } else { 1074 status = QDF_STATUS_E_FAILURE; 1075 } 1076 qdf_spin_unlock(&CE_state->ce_index_lock); 1077 1078 return status; 1079 } 1080 1081 /* 1082 * Adjust interrupts for the copy complete handler. 1083 * If it's needed for either send or recv, then unmask 1084 * this interrupt; otherwise, mask it. 1085 * 1086 * Called with target_lock held. 1087 */ 1088 static void 1089 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state, 1090 int disable_copy_compl_intr) 1091 { 1092 uint32_t ctrl_addr = CE_state->ctrl_addr; 1093 struct hif_softc *scn = CE_state->scn; 1094 1095 CE_state->disable_copy_compl_intr = disable_copy_compl_intr; 1096 1097 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1098 return; 1099 1100 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1101 hif_err_rl("%s: target access is not allowed", __func__); 1102 return; 1103 } 1104 1105 if ((!disable_copy_compl_intr) && 1106 (CE_state->send_cb || CE_state->recv_cb)) 1107 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1108 else 1109 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1110 1111 if (CE_state->watermark_cb) 1112 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1113 else 1114 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1115 Q_TARGET_ACCESS_END(scn); 1116 } 1117 1118 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, 1119 struct CE_ring_state *src_ring, 1120 struct CE_attr *attr) 1121 { 1122 uint32_t ctrl_addr; 1123 uint64_t dma_addr; 1124 1125 QDF_ASSERT(ce_id < scn->ce_count); 1126 ctrl_addr = CE_BASE_ADDRESS(ce_id); 1127 1128 src_ring->hw_index = 1129 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1130 src_ring->sw_index = src_ring->hw_index; 1131 src_ring->write_index = 1132 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1133 dma_addr = src_ring->base_addr_CE_space; 1134 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr, 1135 (uint32_t)(dma_addr & 0xFFFFFFFF)); 1136 1137 /* if SR_BA_ADDRESS_HIGH register exists */ 1138 if (is_register_supported(SR_BA_ADDRESS_HIGH)) { 1139 uint32_t tmp; 1140 1141 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET( 1142 scn, ctrl_addr); 1143 tmp &= ~0x1F; 1144 dma_addr = ((dma_addr >> 32) & 0x1F) | tmp; 1145 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, 1146 ctrl_addr, (uint32_t)dma_addr); 1147 } 1148 CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries); 1149 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max); 1150 #ifdef BIG_ENDIAN_HOST 1151 /* Enable source ring byte swap for big endian host */ 1152 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); 1153 #endif 1154 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0); 1155 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries); 1156 } 1157 1158 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id, 1159 struct CE_ring_state *dest_ring, 1160 struct CE_attr *attr) 1161 { 1162 uint32_t ctrl_addr; 1163 uint64_t dma_addr; 1164 1165 QDF_ASSERT(ce_id < scn->ce_count); 1166 ctrl_addr = CE_BASE_ADDRESS(ce_id); 1167 dest_ring->sw_index = 1168 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1169 dest_ring->write_index = 1170 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1171 dma_addr = dest_ring->base_addr_CE_space; 1172 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr, 1173 (uint32_t)(dma_addr & 0xFFFFFFFF)); 1174 1175 /* if DR_BA_ADDRESS_HIGH exists */ 1176 if (is_register_supported(DR_BA_ADDRESS_HIGH)) { 1177 uint32_t tmp; 1178 1179 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, 1180 ctrl_addr); 1181 tmp &= ~0x1F; 1182 dma_addr = ((dma_addr >> 32) & 0x1F) | tmp; 1183 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, 1184 ctrl_addr, (uint32_t)dma_addr); 1185 } 1186 1187 CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries); 1188 #ifdef BIG_ENDIAN_HOST 1189 /* Enable Dest ring byte swap for big endian host */ 1190 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); 1191 #endif 1192 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0); 1193 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries); 1194 } 1195 1196 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type) 1197 { 1198 switch (ring_type) { 1199 case CE_RING_SRC: 1200 return sizeof(struct CE_src_desc); 1201 case CE_RING_DEST: 1202 return sizeof(struct CE_dest_desc); 1203 case CE_RING_STATUS: 1204 qdf_assert(0); 1205 return 0; 1206 default: 1207 return 0; 1208 } 1209 1210 return 0; 1211 } 1212 1213 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type, 1214 uint32_t ce_id, struct CE_ring_state *ring, 1215 struct CE_attr *attr) 1216 { 1217 int status = Q_TARGET_ACCESS_BEGIN(scn); 1218 1219 if (status < 0) 1220 goto out; 1221 1222 switch (ring_type) { 1223 case CE_RING_SRC: 1224 ce_legacy_src_ring_setup(scn, ce_id, ring, attr); 1225 break; 1226 case CE_RING_DEST: 1227 ce_legacy_dest_ring_setup(scn, ce_id, ring, attr); 1228 break; 1229 case CE_RING_STATUS: 1230 default: 1231 qdf_assert(0); 1232 break; 1233 } 1234 1235 Q_TARGET_ACCESS_END(scn); 1236 out: 1237 return status; 1238 } 1239 1240 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn, 1241 struct pld_shadow_reg_v2_cfg **shadow_config, 1242 int *num_shadow_registers_configured) 1243 { 1244 *num_shadow_registers_configured = 0; 1245 *shadow_config = NULL; 1246 } 1247 1248 static bool ce_check_int_watermark(struct CE_state *CE_state, 1249 unsigned int *flags) 1250 { 1251 uint32_t ce_int_status; 1252 uint32_t ctrl_addr = CE_state->ctrl_addr; 1253 struct hif_softc *scn = CE_state->scn; 1254 1255 ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr); 1256 if (ce_int_status & CE_WATERMARK_MASK) { 1257 /* Convert HW IS bits to software flags */ 1258 *flags = 1259 (ce_int_status & CE_WATERMARK_MASK) >> 1260 CE_WM_SHFT; 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx) { } 1268 1269 #ifdef HIF_CE_LOG_INFO 1270 /** 1271 * ce_get_index_info_legacy(): Get CE index info 1272 * @scn: HIF Context 1273 * @ce_state: CE opaque handle 1274 * @info: CE info 1275 * 1276 * Return: 0 for success and non zero for failure 1277 */ 1278 static 1279 int ce_get_index_info_legacy(struct hif_softc *scn, void *ce_state, 1280 struct ce_index *info) 1281 { 1282 struct CE_state *state = (struct CE_state *)ce_state; 1283 1284 info->id = state->id; 1285 if (state->src_ring) { 1286 info->u.legacy_info.sw_index = state->src_ring->sw_index; 1287 info->u.legacy_info.write_index = state->src_ring->write_index; 1288 } else if (state->dest_ring) { 1289 info->u.legacy_info.sw_index = state->dest_ring->sw_index; 1290 info->u.legacy_info.write_index = state->dest_ring->write_index; 1291 } 1292 1293 return 0; 1294 } 1295 #endif 1296 1297 struct ce_ops ce_service_legacy = { 1298 .ce_get_desc_size = ce_get_desc_size_legacy, 1299 .ce_ring_setup = ce_ring_setup_legacy, 1300 .ce_sendlist_send = ce_sendlist_send_legacy, 1301 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy, 1302 .ce_revoke_recv_next = ce_revoke_recv_next_legacy, 1303 .ce_cancel_send_next = ce_cancel_send_next_legacy, 1304 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy, 1305 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy, 1306 .ce_send_nolock = ce_send_nolock_legacy, 1307 .watermark_int = ce_check_int_watermark, 1308 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy, 1309 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy, 1310 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy, 1311 .ce_prepare_shadow_register_v2_cfg = 1312 ce_prepare_shadow_register_v2_cfg_legacy, 1313 #ifdef HIF_CE_LOG_INFO 1314 .ce_get_index_info = 1315 ce_get_index_info_legacy, 1316 #endif 1317 }; 1318 1319 struct ce_ops *ce_services_legacy() 1320 { 1321 return &ce_service_legacy; 1322 } 1323 1324 qdf_export_symbol(ce_services_legacy); 1325 1326 void ce_service_legacy_init(void) 1327 { 1328 ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy); 1329 } 1330