1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "ce_api.h" 20 #include "ce_internal.h" 21 #include "ce_main.h" 22 #include "ce_reg.h" 23 #include "hif.h" 24 #include "hif_debug.h" 25 #include "hif_io32.h" 26 #include "qdf_lock.h" 27 #include "hif_main.h" 28 #include "hif_napi.h" 29 #include "qdf_module.h" 30 #include "regtable.h" 31 32 /* 33 * Support for Copy Engine hardware, which is mainly used for 34 * communication between Host and Target over a PCIe interconnect. 35 */ 36 37 /* 38 * A single CopyEngine (CE) comprises two "rings": 39 * a source ring 40 * a destination ring 41 * 42 * Each ring consists of a number of descriptors which specify 43 * an address, length, and meta-data. 44 * 45 * Typically, one side of the PCIe interconnect (Host or Target) 46 * controls one ring and the other side controls the other ring. 47 * The source side chooses when to initiate a transfer and it 48 * chooses what to send (buffer address, length). The destination 49 * side keeps a supply of "anonymous receive buffers" available and 50 * it handles incoming data as it arrives (when the destination 51 * receives an interrupt). 52 * 53 * The sender may send a simple buffer (address/length) or it may 54 * send a small list of buffers. When a small list is sent, hardware 55 * "gathers" these and they end up in a single destination buffer 56 * with a single interrupt. 57 * 58 * There are several "contexts" managed by this layer -- more, it 59 * may seem -- than should be needed. These are provided mainly for 60 * maximum flexibility and especially to facilitate a simpler HIF 61 * implementation. There are per-CopyEngine recv, send, and watermark 62 * contexts. These are supplied by the caller when a recv, send, 63 * or watermark handler is established and they are echoed back to 64 * the caller when the respective callbacks are invoked. There is 65 * also a per-transfer context supplied by the caller when a buffer 66 * (or sendlist) is sent and when a buffer is enqueued for recv. 67 * These per-transfer contexts are echoed back to the caller when 68 * the buffer is sent/received. 69 * Target TX harsh result toeplitz_hash_result 70 */ 71 72 /* NB: Modeled after ce_completed_send_next */ 73 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */ 74 #define CE_WM_SHFT 1 75 76 #ifdef WLAN_FEATURE_FASTPATH 77 #ifdef QCA_WIFI_3_0 78 static inline void 79 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, 80 uint64_t dma_addr, 81 uint32_t user_flags) 82 { 83 shadow_src_desc->buffer_addr_hi = 84 (uint32_t)((dma_addr >> 32) & 0x1F); 85 user_flags |= shadow_src_desc->buffer_addr_hi; 86 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, 87 sizeof(uint32_t)); 88 } 89 #else 90 static inline void 91 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, 92 uint64_t dma_addr, 93 uint32_t user_flags) 94 { 95 } 96 #endif 97 98 #define SLOTS_PER_DATAPATH_TX 2 99 100 /** 101 * ce_send_fast() CE layer Tx buffer posting function 102 * @copyeng: copy engine handle 103 * @msdu: msdu to be sent 104 * @transfer_id: transfer_id 105 * @download_len: packet download length 106 * 107 * Assumption : Called with an array of MSDU's 108 * Function: 109 * For each msdu in the array 110 * 1. Check no. of available entries 111 * 2. Create src ring entries (allocated in consistent memory 112 * 3. Write index to h/w 113 * 114 * Return: No. of packets that could be sent 115 */ 116 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, 117 unsigned int transfer_id, uint32_t download_len) 118 { 119 struct CE_state *ce_state = (struct CE_state *)copyeng; 120 struct hif_softc *scn = ce_state->scn; 121 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 122 struct CE_ring_state *src_ring = ce_state->src_ring; 123 u_int32_t ctrl_addr = ce_state->ctrl_addr; 124 unsigned int nentries_mask = src_ring->nentries_mask; 125 unsigned int write_index; 126 unsigned int sw_index; 127 unsigned int frag_len; 128 uint64_t dma_addr; 129 uint32_t user_flags; 130 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE; 131 bool ok_to_send = true; 132 133 /* 134 * Create a log assuming the call will go through, and if not, we would 135 * add an error trace as well. 136 * Please add the same failure log for any additional error paths. 137 */ 138 DPTRACE(qdf_dp_trace(msdu, 139 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 140 QDF_TRACE_DEFAULT_PDEV_ID, 141 qdf_nbuf_data_addr(msdu), 142 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 143 144 qdf_spin_lock_bh(&ce_state->ce_index_lock); 145 146 /* 147 * Request runtime PM resume if it has already suspended and make 148 * sure there is no PCIe link access. 149 */ 150 if (hif_pm_runtime_get(hif_hdl) != 0) 151 ok_to_send = false; 152 153 if (ok_to_send) { 154 Q_TARGET_ACCESS_BEGIN(scn); 155 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 156 } 157 158 write_index = src_ring->write_index; 159 sw_index = src_ring->sw_index; 160 hif_record_ce_desc_event(scn, ce_state->id, 161 FAST_TX_SOFTWARE_INDEX_UPDATE, 162 NULL, NULL, sw_index, 0); 163 164 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) 165 < SLOTS_PER_DATAPATH_TX)) { 166 hif_err_rl("Source ring full, required %d, available %d", 167 SLOTS_PER_DATAPATH_TX, 168 CE_RING_DELTA(nentries_mask, write_index, 169 sw_index - 1)); 170 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 171 if (ok_to_send) 172 Q_TARGET_ACCESS_END(scn); 173 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 174 175 DPTRACE(qdf_dp_trace(NULL, 176 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, 177 QDF_TRACE_DEFAULT_PDEV_ID, 178 NULL, 0, QDF_TX)); 179 180 return 0; 181 } 182 183 { 184 struct CE_src_desc *src_ring_base = 185 (struct CE_src_desc *)src_ring->base_addr_owner_space; 186 struct CE_src_desc *shadow_base = 187 (struct CE_src_desc *)src_ring->shadow_base; 188 struct CE_src_desc *src_desc = 189 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 190 struct CE_src_desc *shadow_src_desc = 191 CE_SRC_RING_TO_DESC(shadow_base, write_index); 192 193 hif_pm_runtime_get_noresume(hif_hdl); 194 195 /* 196 * First fill out the ring descriptor for the HTC HTT frame 197 * header. These are uncached writes. Should we use a local 198 * structure instead? 199 */ 200 /* HTT/HTC header can be passed as a argument */ 201 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0); 202 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 203 0xFFFFFFFF); 204 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK; 205 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 206 shadow_src_desc->meta_data = transfer_id; 207 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0); 208 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 209 download_len -= shadow_src_desc->nbytes; 210 /* 211 * HTC HTT header is a word stream, so byte swap if CE byte 212 * swap enabled 213 */ 214 shadow_src_desc->byte_swap = ((ce_state->attr_flags & 215 CE_ATTR_BYTE_SWAP_DATA) != 0); 216 /* For the first one, it still does not need to write */ 217 shadow_src_desc->gather = 1; 218 *src_desc = *shadow_src_desc; 219 /* By default we could initialize the transfer context to this 220 * value 221 */ 222 src_ring->per_transfer_context[write_index] = 223 CE_SENDLIST_ITEM_CTXT; 224 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 225 226 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index); 227 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index); 228 /* 229 * Now fill out the ring descriptor for the actual data 230 * packet 231 */ 232 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1); 233 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & 234 0xFFFFFFFF); 235 /* 236 * Clear packet offset for all but the first CE desc. 237 */ 238 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 239 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); 240 shadow_src_desc->meta_data = transfer_id; 241 242 /* get actual packet length */ 243 frag_len = qdf_nbuf_get_frag_len(msdu, 1); 244 245 /* download remaining bytes of payload */ 246 shadow_src_desc->nbytes = download_len; 247 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); 248 if (shadow_src_desc->nbytes > frag_len) 249 shadow_src_desc->nbytes = frag_len; 250 251 /* Data packet is a byte stream, so disable byte swap */ 252 shadow_src_desc->byte_swap = 0; 253 /* For the last one, gather is not set */ 254 shadow_src_desc->gather = 0; 255 *src_desc = *shadow_src_desc; 256 src_ring->per_transfer_context[write_index] = msdu; 257 258 hif_record_ce_desc_event(scn, ce_state->id, type, 259 (union ce_desc *)src_desc, 260 src_ring->per_transfer_context[write_index], 261 write_index, shadow_src_desc->nbytes); 262 263 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 264 265 DPTRACE(qdf_dp_trace(msdu, 266 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, 267 QDF_TRACE_DEFAULT_PDEV_ID, 268 qdf_nbuf_data_addr(msdu), 269 sizeof(qdf_nbuf_data(msdu)), QDF_TX)); 270 } 271 272 src_ring->write_index = write_index; 273 274 if (ok_to_send) { 275 if (qdf_likely(ce_state->state == CE_RUNNING)) { 276 type = FAST_TX_WRITE_INDEX_UPDATE; 277 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 278 write_index); 279 Q_TARGET_ACCESS_END(scn); 280 } else { 281 ce_state->state = CE_PENDING; 282 } 283 hif_pm_runtime_put(hif_hdl); 284 } 285 286 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 287 288 /* sent 1 packet */ 289 return 1; 290 } 291 292 /** 293 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler 294 * @ce_state: handle to copy engine state 295 * @cmpl_msdus: Rx msdus 296 * @num_cmpls: number of Rx msdus 297 * @ctrl_addr: CE control address 298 * 299 * Return: None 300 */ 301 static void ce_fastpath_rx_handle(struct CE_state *ce_state, 302 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls, 303 uint32_t ctrl_addr) 304 { 305 struct hif_softc *scn = ce_state->scn; 306 struct CE_ring_state *dest_ring = ce_state->dest_ring; 307 uint32_t nentries_mask = dest_ring->nentries_mask; 308 uint32_t write_index; 309 310 qdf_spin_unlock(&ce_state->ce_index_lock); 311 ce_state->fastpath_handler(ce_state->context, cmpl_msdus, num_cmpls); 312 qdf_spin_lock(&ce_state->ce_index_lock); 313 314 /* Update Destination Ring Write Index */ 315 write_index = dest_ring->write_index; 316 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls); 317 318 hif_record_ce_desc_event(scn, ce_state->id, 319 FAST_RX_WRITE_INDEX_UPDATE, 320 NULL, NULL, write_index, 0); 321 322 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 323 dest_ring->write_index = write_index; 324 } 325 326 /** 327 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs 328 * @scn: hif_context 329 * @ce_id: Copy engine ID 330 * 1) Go through the CE ring, and find the completions 331 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[] 332 * 3) Unmap buffer & accumulate in an array. 333 * 4) Call message handler when array is full or when exiting the handler 334 * 335 * Return: void 336 */ 337 338 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) 339 { 340 struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; 341 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 342 struct CE_ring_state *dest_ring = ce_state->dest_ring; 343 struct CE_dest_desc *dest_ring_base = 344 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 345 346 uint32_t nentries_mask = dest_ring->nentries_mask; 347 uint32_t sw_index = dest_ring->sw_index; 348 uint32_t nbytes; 349 qdf_nbuf_t nbuf; 350 dma_addr_t paddr; 351 struct CE_dest_desc *dest_desc; 352 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM]; 353 uint32_t ctrl_addr = ce_state->ctrl_addr; 354 uint32_t nbuf_cmpl_idx = 0; 355 unsigned int more_comp_cnt = 0; 356 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 357 struct ce_ops *ce_services = hif_state->ce_services; 358 359 more_data: 360 for (;;) { 361 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, 362 sw_index); 363 364 /* 365 * The following 2 reads are from non-cached memory 366 */ 367 nbytes = dest_desc->nbytes; 368 369 /* If completion is invalid, break */ 370 if (qdf_unlikely(nbytes == 0)) 371 break; 372 373 /* 374 * Build the nbuf list from valid completions 375 */ 376 nbuf = dest_ring->per_transfer_context[sw_index]; 377 378 /* 379 * No lock is needed here, since this is the only thread 380 * that accesses the sw_index 381 */ 382 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 383 384 /* 385 * CAREFUL : Uncached write, but still less expensive, 386 * since most modern caches use "write-combining" to 387 * flush multiple cache-writes all at once. 388 */ 389 dest_desc->nbytes = 0; 390 391 /* 392 * Per our understanding this is not required on our 393 * since we are doing the same cache invalidation 394 * operation on the same buffer twice in succession, 395 * without any modifiication to this buffer by CPU in 396 * between. 397 * However, this code with 2 syncs in succession has 398 * been undergoing some testing at a customer site, 399 * and seemed to be showing no problems so far. Would 400 * like to validate from the customer, that this line 401 * is really not required, before we remove this line 402 * completely. 403 */ 404 paddr = QDF_NBUF_CB_PADDR(nbuf); 405 406 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr, 407 (skb_end_pointer(nbuf) - 408 (nbuf)->data), 409 DMA_FROM_DEVICE); 410 411 qdf_nbuf_put_tail(nbuf, nbytes); 412 413 qdf_assert_always(nbuf->data); 414 415 QDF_NBUF_CB_RX_CTX_ID(nbuf) = 416 hif_get_rx_ctx_id(ce_state->id, hif_hdl); 417 cmpl_msdus[nbuf_cmpl_idx++] = nbuf; 418 419 /* 420 * we are not posting the buffers back instead 421 * reusing the buffers 422 */ 423 if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) { 424 hif_record_ce_desc_event(scn, ce_state->id, 425 FAST_RX_SOFTWARE_INDEX_UPDATE, 426 NULL, NULL, sw_index, 0); 427 dest_ring->sw_index = sw_index; 428 ce_fastpath_rx_handle(ce_state, cmpl_msdus, 429 nbuf_cmpl_idx, ctrl_addr); 430 431 ce_state->receive_count += nbuf_cmpl_idx; 432 if (qdf_unlikely(hif_ce_service_should_yield( 433 scn, ce_state))) { 434 ce_state->force_break = 1; 435 qdf_atomic_set(&ce_state->rx_pending, 1); 436 return; 437 } 438 439 nbuf_cmpl_idx = 0; 440 more_comp_cnt = 0; 441 } 442 } 443 444 hif_record_ce_desc_event(scn, ce_state->id, 445 FAST_RX_SOFTWARE_INDEX_UPDATE, 446 NULL, NULL, sw_index, 0); 447 448 dest_ring->sw_index = sw_index; 449 450 /* 451 * If there are not enough completions to fill the array, 452 * just call the message handler here 453 */ 454 if (nbuf_cmpl_idx) { 455 ce_fastpath_rx_handle(ce_state, cmpl_msdus, 456 nbuf_cmpl_idx, ctrl_addr); 457 458 ce_state->receive_count += nbuf_cmpl_idx; 459 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 460 ce_state->force_break = 1; 461 qdf_atomic_set(&ce_state->rx_pending, 1); 462 return; 463 } 464 465 /* check for more packets after upper layer processing */ 466 nbuf_cmpl_idx = 0; 467 more_comp_cnt = 0; 468 goto more_data; 469 } 470 471 hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu()); 472 473 qdf_atomic_set(&ce_state->rx_pending, 0); 474 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 475 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 476 HOST_IS_COPY_COMPLETE_MASK); 477 } else { 478 hif_err_rl("%s: target access is not allowed", __func__); 479 return; 480 } 481 482 if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) { 483 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 484 goto more_data; 485 } else { 486 HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 487 __func__, nentries_mask, 488 ce_state->dest_ring->sw_index, 489 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr)); 490 } 491 } 492 #ifdef NAPI_YIELD_BUDGET_BASED 493 /* 494 * Caution : Before you modify this code, please refer hif_napi_poll 495 * function to understand how napi_complete gets called and make the 496 * necessary changes. Force break has to be done till WIN disables the 497 * interrupt at source 498 */ 499 ce_state->force_break = 1; 500 #endif 501 } 502 503 /** 504 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled 505 * @scn: Handle to HIF context 506 * 507 * Return: true if fastpath is enabled else false. 508 */ 509 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 510 { 511 return scn->fastpath_mode_on; 512 } 513 #else 514 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) 515 { 516 } 517 518 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 519 { 520 return false; 521 } 522 #endif /* WLAN_FEATURE_FASTPATH */ 523 524 static int 525 ce_send_nolock_legacy(struct CE_handle *copyeng, 526 void *per_transfer_context, 527 qdf_dma_addr_t buffer, 528 uint32_t nbytes, 529 uint32_t transfer_id, 530 uint32_t flags, 531 uint32_t user_flags) 532 { 533 int status; 534 struct CE_state *CE_state = (struct CE_state *)copyeng; 535 struct CE_ring_state *src_ring = CE_state->src_ring; 536 uint32_t ctrl_addr = CE_state->ctrl_addr; 537 unsigned int nentries_mask = src_ring->nentries_mask; 538 unsigned int sw_index = src_ring->sw_index; 539 unsigned int write_index = src_ring->write_index; 540 uint64_t dma_addr = buffer; 541 struct hif_softc *scn = CE_state->scn; 542 543 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 544 return QDF_STATUS_E_FAILURE; 545 if (unlikely(CE_RING_DELTA(nentries_mask, 546 write_index, sw_index - 1) <= 0)) { 547 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 548 Q_TARGET_ACCESS_END(scn); 549 return QDF_STATUS_E_FAILURE; 550 } 551 { 552 enum hif_ce_event_type event_type; 553 struct CE_src_desc *src_ring_base = 554 (struct CE_src_desc *)src_ring->base_addr_owner_space; 555 struct CE_src_desc *shadow_base = 556 (struct CE_src_desc *)src_ring->shadow_base; 557 struct CE_src_desc *src_desc = 558 CE_SRC_RING_TO_DESC(src_ring_base, write_index); 559 struct CE_src_desc *shadow_src_desc = 560 CE_SRC_RING_TO_DESC(shadow_base, write_index); 561 562 /* Update low 32 bits source descriptor address */ 563 shadow_src_desc->buffer_addr = 564 (uint32_t)(dma_addr & 0xFFFFFFFF); 565 #ifdef QCA_WIFI_3_0 566 shadow_src_desc->buffer_addr_hi = 567 (uint32_t)((dma_addr >> 32) & 0x1F); 568 user_flags |= shadow_src_desc->buffer_addr_hi; 569 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, 570 sizeof(uint32_t)); 571 #endif 572 shadow_src_desc->target_int_disable = 0; 573 shadow_src_desc->host_int_disable = 0; 574 575 shadow_src_desc->meta_data = transfer_id; 576 577 /* 578 * Set the swap bit if: 579 * typical sends on this CE are swapped (host is big-endian) 580 * and this send doesn't disable the swapping 581 * (data is not bytestream) 582 */ 583 shadow_src_desc->byte_swap = 584 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 585 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); 586 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); 587 shadow_src_desc->nbytes = nbytes; 588 ce_validate_nbytes(nbytes, CE_state); 589 590 *src_desc = *shadow_src_desc; 591 592 src_ring->per_transfer_context[write_index] = 593 per_transfer_context; 594 595 /* Update Source Ring Write Index */ 596 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 597 598 /* WORKAROUND */ 599 if (shadow_src_desc->gather) { 600 event_type = HIF_TX_GATHER_DESC_POST; 601 } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) { 602 event_type = HIF_TX_DESC_SOFTWARE_POST; 603 CE_state->state = CE_PENDING; 604 } else { 605 event_type = HIF_TX_DESC_POST; 606 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 607 write_index); 608 } 609 610 /* src_ring->write index hasn't been updated event though 611 * the register has allready been written to. 612 */ 613 hif_record_ce_desc_event(scn, CE_state->id, event_type, 614 (union ce_desc *)shadow_src_desc, per_transfer_context, 615 src_ring->write_index, nbytes); 616 617 src_ring->write_index = write_index; 618 status = QDF_STATUS_SUCCESS; 619 } 620 Q_TARGET_ACCESS_END(scn); 621 return status; 622 } 623 624 static int 625 ce_sendlist_send_legacy(struct CE_handle *copyeng, 626 void *per_transfer_context, 627 struct ce_sendlist *sendlist, unsigned int transfer_id) 628 { 629 int status = -ENOMEM; 630 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 631 struct CE_state *CE_state = (struct CE_state *)copyeng; 632 struct CE_ring_state *src_ring = CE_state->src_ring; 633 unsigned int nentries_mask = src_ring->nentries_mask; 634 unsigned int num_items = sl->num_items; 635 unsigned int sw_index; 636 unsigned int write_index; 637 struct hif_softc *scn = CE_state->scn; 638 639 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); 640 641 qdf_spin_lock_bh(&CE_state->ce_index_lock); 642 643 if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data && 644 Q_TARGET_ACCESS_BEGIN(scn) == 0) { 645 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR( 646 scn, CE_state->ctrl_addr); 647 Q_TARGET_ACCESS_END(scn); 648 } 649 650 sw_index = src_ring->sw_index; 651 write_index = src_ring->write_index; 652 653 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >= 654 num_items) { 655 struct ce_sendlist_item *item; 656 int i; 657 658 /* handle all but the last item uniformly */ 659 for (i = 0; i < num_items - 1; i++) { 660 item = &sl->item[i]; 661 /* TBDXXX: Support extensible sendlist_types? */ 662 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 663 status = ce_send_nolock_legacy(copyeng, 664 CE_SENDLIST_ITEM_CTXT, 665 (qdf_dma_addr_t)item->data, 666 item->u.nbytes, transfer_id, 667 item->flags | CE_SEND_FLAG_GATHER, 668 item->user_flags); 669 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 670 } 671 /* provide valid context pointer for final item */ 672 item = &sl->item[i]; 673 /* TBDXXX: Support extensible sendlist_types? */ 674 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 675 status = ce_send_nolock_legacy(copyeng, per_transfer_context, 676 (qdf_dma_addr_t) item->data, 677 item->u.nbytes, 678 transfer_id, item->flags, 679 item->user_flags); 680 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 681 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, 682 QDF_NBUF_TX_PKT_CE); 683 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, 684 QDF_DP_TRACE_CE_PACKET_PTR_RECORD, 685 QDF_TRACE_DEFAULT_PDEV_ID, 686 (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data), 687 sizeof(((qdf_nbuf_t)per_transfer_context)->data), 688 QDF_TX)); 689 } else { 690 /* 691 * Probably not worth the additional complexity to support 692 * partial sends with continuation or notification. We expect 693 * to use large rings and small sendlists. If we can't handle 694 * the entire request at once, punt it back to the caller. 695 */ 696 } 697 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 698 699 return status; 700 } 701 702 /** 703 * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine 704 * @coyeng: copy engine handle 705 * @per_recv_context: virtual address of the nbuf 706 * @buffer: physical address of the nbuf 707 * 708 * Return: 0 if the buffer is enqueued 709 */ 710 static int 711 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng, 712 void *per_recv_context, qdf_dma_addr_t buffer) 713 { 714 int status; 715 struct CE_state *CE_state = (struct CE_state *)copyeng; 716 struct CE_ring_state *dest_ring = CE_state->dest_ring; 717 uint32_t ctrl_addr = CE_state->ctrl_addr; 718 unsigned int nentries_mask = dest_ring->nentries_mask; 719 unsigned int write_index; 720 unsigned int sw_index; 721 uint64_t dma_addr = buffer; 722 struct hif_softc *scn = CE_state->scn; 723 724 qdf_spin_lock_bh(&CE_state->ce_index_lock); 725 write_index = dest_ring->write_index; 726 sw_index = dest_ring->sw_index; 727 728 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 729 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 730 return -EIO; 731 } 732 733 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) || 734 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) { 735 struct CE_dest_desc *dest_ring_base = 736 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 737 struct CE_dest_desc *dest_desc = 738 CE_DEST_RING_TO_DESC(dest_ring_base, write_index); 739 740 /* Update low 32 bit destination descriptor */ 741 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF); 742 #ifdef QCA_WIFI_3_0 743 dest_desc->buffer_addr_hi = 744 (uint32_t)((dma_addr >> 32) & 0x1F); 745 #endif 746 dest_desc->nbytes = 0; 747 748 dest_ring->per_transfer_context[write_index] = 749 per_recv_context; 750 751 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST, 752 (union ce_desc *)dest_desc, per_recv_context, 753 write_index, 0); 754 755 /* Update Destination Ring Write Index */ 756 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 757 if (write_index != sw_index) { 758 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 759 dest_ring->write_index = write_index; 760 } 761 status = QDF_STATUS_SUCCESS; 762 } else 763 status = QDF_STATUS_E_FAILURE; 764 765 Q_TARGET_ACCESS_END(scn); 766 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 767 return status; 768 } 769 770 static unsigned int 771 ce_send_entries_done_nolock_legacy(struct hif_softc *scn, 772 struct CE_state *CE_state) 773 { 774 struct CE_ring_state *src_ring = CE_state->src_ring; 775 uint32_t ctrl_addr = CE_state->ctrl_addr; 776 unsigned int nentries_mask = src_ring->nentries_mask; 777 unsigned int sw_index; 778 unsigned int read_index; 779 780 sw_index = src_ring->sw_index; 781 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr); 782 783 return CE_RING_DELTA(nentries_mask, sw_index, read_index); 784 } 785 786 static unsigned int 787 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn, 788 struct CE_state *CE_state) 789 { 790 struct CE_ring_state *dest_ring = CE_state->dest_ring; 791 uint32_t ctrl_addr = CE_state->ctrl_addr; 792 unsigned int nentries_mask = dest_ring->nentries_mask; 793 unsigned int sw_index; 794 unsigned int read_index; 795 796 sw_index = dest_ring->sw_index; 797 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr); 798 799 return CE_RING_DELTA(nentries_mask, sw_index, read_index); 800 } 801 802 static int 803 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state, 804 void **per_CE_contextp, 805 void **per_transfer_contextp, 806 qdf_dma_addr_t *bufferp, 807 unsigned int *nbytesp, 808 unsigned int *transfer_idp, 809 unsigned int *flagsp) 810 { 811 int status; 812 struct CE_ring_state *dest_ring = CE_state->dest_ring; 813 unsigned int nentries_mask = dest_ring->nentries_mask; 814 unsigned int sw_index = dest_ring->sw_index; 815 struct hif_softc *scn = CE_state->scn; 816 struct CE_dest_desc *dest_ring_base = 817 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 818 struct CE_dest_desc *dest_desc = 819 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); 820 int nbytes; 821 struct CE_dest_desc dest_desc_info; 822 /* 823 * By copying the dest_desc_info element to local memory, we could 824 * avoid extra memory read from non-cachable memory. 825 */ 826 dest_desc_info = *dest_desc; 827 nbytes = dest_desc_info.nbytes; 828 if (nbytes == 0) { 829 /* 830 * This closes a relatively unusual race where the Host 831 * sees the updated DRRI before the update to the 832 * corresponding descriptor has completed. We treat this 833 * as a descriptor that is not yet done. 834 */ 835 status = QDF_STATUS_E_FAILURE; 836 goto done; 837 } 838 839 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION, 840 (union ce_desc *)dest_desc, 841 dest_ring->per_transfer_context[sw_index], 842 sw_index, 0); 843 844 dest_desc->nbytes = 0; 845 846 /* Return data from completed destination descriptor */ 847 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info); 848 *nbytesp = nbytes; 849 *transfer_idp = dest_desc_info.meta_data; 850 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; 851 852 if (per_CE_contextp) 853 *per_CE_contextp = CE_state->recv_context; 854 855 if (per_transfer_contextp) { 856 *per_transfer_contextp = 857 dest_ring->per_transfer_context[sw_index]; 858 } 859 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 860 861 /* Update sw_index */ 862 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 863 dest_ring->sw_index = sw_index; 864 status = QDF_STATUS_SUCCESS; 865 866 done: 867 return status; 868 } 869 870 /* NB: Modeled after ce_completed_recv_next_nolock */ 871 static QDF_STATUS 872 ce_revoke_recv_next_legacy(struct CE_handle *copyeng, 873 void **per_CE_contextp, 874 void **per_transfer_contextp, 875 qdf_dma_addr_t *bufferp) 876 { 877 struct CE_state *CE_state; 878 struct CE_ring_state *dest_ring; 879 unsigned int nentries_mask; 880 unsigned int sw_index; 881 unsigned int write_index; 882 QDF_STATUS status; 883 struct hif_softc *scn; 884 885 CE_state = (struct CE_state *)copyeng; 886 dest_ring = CE_state->dest_ring; 887 if (!dest_ring) 888 return QDF_STATUS_E_FAILURE; 889 890 scn = CE_state->scn; 891 qdf_spin_lock(&CE_state->ce_index_lock); 892 nentries_mask = dest_ring->nentries_mask; 893 sw_index = dest_ring->sw_index; 894 write_index = dest_ring->write_index; 895 if (write_index != sw_index) { 896 struct CE_dest_desc *dest_ring_base = 897 (struct CE_dest_desc *)dest_ring-> 898 base_addr_owner_space; 899 struct CE_dest_desc *dest_desc = 900 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); 901 902 /* Return data from completed destination descriptor */ 903 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc); 904 905 if (per_CE_contextp) 906 *per_CE_contextp = CE_state->recv_context; 907 908 if (per_transfer_contextp) { 909 *per_transfer_contextp = 910 dest_ring->per_transfer_context[sw_index]; 911 } 912 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 913 914 /* Update sw_index */ 915 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 916 dest_ring->sw_index = sw_index; 917 status = QDF_STATUS_SUCCESS; 918 } else { 919 status = QDF_STATUS_E_FAILURE; 920 } 921 qdf_spin_unlock(&CE_state->ce_index_lock); 922 923 return status; 924 } 925 926 /* 927 * Guts of ce_completed_send_next. 928 * The caller takes responsibility for any necessary locking. 929 */ 930 static int 931 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state, 932 void **per_CE_contextp, 933 void **per_transfer_contextp, 934 qdf_dma_addr_t *bufferp, 935 unsigned int *nbytesp, 936 unsigned int *transfer_idp, 937 unsigned int *sw_idx, 938 unsigned int *hw_idx, 939 uint32_t *toeplitz_hash_result) 940 { 941 int status = QDF_STATUS_E_FAILURE; 942 struct CE_ring_state *src_ring = CE_state->src_ring; 943 uint32_t ctrl_addr = CE_state->ctrl_addr; 944 unsigned int nentries_mask = src_ring->nentries_mask; 945 unsigned int sw_index = src_ring->sw_index; 946 unsigned int read_index; 947 struct hif_softc *scn = CE_state->scn; 948 949 if (src_ring->hw_index == sw_index) { 950 /* 951 * The SW completion index has caught up with the cached 952 * version of the HW completion index. 953 * Update the cached HW completion index to see whether 954 * the SW has really caught up to the HW, or if the cached 955 * value of the HW index has become stale. 956 */ 957 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 958 return QDF_STATUS_E_FAILURE; 959 src_ring->hw_index = 960 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr); 961 if (Q_TARGET_ACCESS_END(scn) < 0) 962 return QDF_STATUS_E_FAILURE; 963 } 964 read_index = src_ring->hw_index; 965 966 if (sw_idx) 967 *sw_idx = sw_index; 968 969 if (hw_idx) 970 *hw_idx = read_index; 971 972 if ((read_index != sw_index) && (read_index != 0xffffffff)) { 973 struct CE_src_desc *shadow_base = 974 (struct CE_src_desc *)src_ring->shadow_base; 975 struct CE_src_desc *shadow_src_desc = 976 CE_SRC_RING_TO_DESC(shadow_base, sw_index); 977 #ifdef QCA_WIFI_3_0 978 struct CE_src_desc *src_ring_base = 979 (struct CE_src_desc *)src_ring->base_addr_owner_space; 980 struct CE_src_desc *src_desc = 981 CE_SRC_RING_TO_DESC(src_ring_base, sw_index); 982 #endif 983 hif_record_ce_desc_event(scn, CE_state->id, 984 HIF_TX_DESC_COMPLETION, 985 (union ce_desc *)shadow_src_desc, 986 src_ring->per_transfer_context[sw_index], 987 sw_index, shadow_src_desc->nbytes); 988 989 /* Return data from completed source descriptor */ 990 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc); 991 *nbytesp = shadow_src_desc->nbytes; 992 *transfer_idp = shadow_src_desc->meta_data; 993 #ifdef QCA_WIFI_3_0 994 *toeplitz_hash_result = src_desc->toeplitz_hash_result; 995 #else 996 *toeplitz_hash_result = 0; 997 #endif 998 if (per_CE_contextp) 999 *per_CE_contextp = CE_state->send_context; 1000 1001 if (per_transfer_contextp) { 1002 *per_transfer_contextp = 1003 src_ring->per_transfer_context[sw_index]; 1004 } 1005 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 1006 1007 /* Update sw_index */ 1008 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1009 src_ring->sw_index = sw_index; 1010 status = QDF_STATUS_SUCCESS; 1011 } 1012 1013 return status; 1014 } 1015 1016 static QDF_STATUS 1017 ce_cancel_send_next_legacy(struct CE_handle *copyeng, 1018 void **per_CE_contextp, 1019 void **per_transfer_contextp, 1020 qdf_dma_addr_t *bufferp, 1021 unsigned int *nbytesp, 1022 unsigned int *transfer_idp, 1023 uint32_t *toeplitz_hash_result) 1024 { 1025 struct CE_state *CE_state; 1026 struct CE_ring_state *src_ring; 1027 unsigned int nentries_mask; 1028 unsigned int sw_index; 1029 unsigned int write_index; 1030 QDF_STATUS status; 1031 struct hif_softc *scn; 1032 1033 CE_state = (struct CE_state *)copyeng; 1034 src_ring = CE_state->src_ring; 1035 if (!src_ring) 1036 return QDF_STATUS_E_FAILURE; 1037 1038 scn = CE_state->scn; 1039 qdf_spin_lock(&CE_state->ce_index_lock); 1040 nentries_mask = src_ring->nentries_mask; 1041 sw_index = src_ring->sw_index; 1042 write_index = src_ring->write_index; 1043 1044 if (write_index != sw_index) { 1045 struct CE_src_desc *src_ring_base = 1046 (struct CE_src_desc *)src_ring->base_addr_owner_space; 1047 struct CE_src_desc *src_desc = 1048 CE_SRC_RING_TO_DESC(src_ring_base, sw_index); 1049 1050 /* Return data from completed source descriptor */ 1051 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc); 1052 *nbytesp = src_desc->nbytes; 1053 *transfer_idp = src_desc->meta_data; 1054 #ifdef QCA_WIFI_3_0 1055 *toeplitz_hash_result = src_desc->toeplitz_hash_result; 1056 #else 1057 *toeplitz_hash_result = 0; 1058 #endif 1059 1060 if (per_CE_contextp) 1061 *per_CE_contextp = CE_state->send_context; 1062 1063 if (per_transfer_contextp) { 1064 *per_transfer_contextp = 1065 src_ring->per_transfer_context[sw_index]; 1066 } 1067 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 1068 1069 /* Update sw_index */ 1070 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1071 src_ring->sw_index = sw_index; 1072 status = QDF_STATUS_SUCCESS; 1073 } else { 1074 status = QDF_STATUS_E_FAILURE; 1075 } 1076 qdf_spin_unlock(&CE_state->ce_index_lock); 1077 1078 return status; 1079 } 1080 1081 /* 1082 * Adjust interrupts for the copy complete handler. 1083 * If it's needed for either send or recv, then unmask 1084 * this interrupt; otherwise, mask it. 1085 * 1086 * Called with target_lock held. 1087 */ 1088 static void 1089 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state, 1090 int disable_copy_compl_intr) 1091 { 1092 uint32_t ctrl_addr = CE_state->ctrl_addr; 1093 struct hif_softc *scn = CE_state->scn; 1094 1095 CE_state->disable_copy_compl_intr = disable_copy_compl_intr; 1096 1097 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1098 return; 1099 1100 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1101 hif_err_rl("%s: target access is not allowed", __func__); 1102 return; 1103 } 1104 1105 if ((!disable_copy_compl_intr) && 1106 (CE_state->send_cb || CE_state->recv_cb)) 1107 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1108 else 1109 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1110 1111 if (CE_state->watermark_cb) 1112 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1113 else 1114 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1115 Q_TARGET_ACCESS_END(scn); 1116 } 1117 1118 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, 1119 struct CE_ring_state *src_ring, 1120 struct CE_attr *attr) 1121 { 1122 uint32_t ctrl_addr; 1123 uint64_t dma_addr; 1124 1125 QDF_ASSERT(ce_id < scn->ce_count); 1126 ctrl_addr = CE_BASE_ADDRESS(ce_id); 1127 1128 src_ring->hw_index = 1129 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1130 src_ring->sw_index = src_ring->hw_index; 1131 src_ring->write_index = 1132 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1133 dma_addr = src_ring->base_addr_CE_space; 1134 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr, 1135 (uint32_t)(dma_addr & 0xFFFFFFFF)); 1136 1137 /* if SR_BA_ADDRESS_HIGH register exists */ 1138 if (is_register_supported(SR_BA_ADDRESS_HIGH)) { 1139 uint32_t tmp; 1140 1141 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET( 1142 scn, ctrl_addr); 1143 tmp &= ~0x1F; 1144 dma_addr = ((dma_addr >> 32) & 0x1F) | tmp; 1145 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, 1146 ctrl_addr, (uint32_t)dma_addr); 1147 } 1148 CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries); 1149 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max); 1150 #ifdef BIG_ENDIAN_HOST 1151 /* Enable source ring byte swap for big endian host */ 1152 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); 1153 #endif 1154 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0); 1155 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries); 1156 } 1157 1158 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id, 1159 struct CE_ring_state *dest_ring, 1160 struct CE_attr *attr) 1161 { 1162 uint32_t ctrl_addr; 1163 uint64_t dma_addr; 1164 1165 QDF_ASSERT(ce_id < scn->ce_count); 1166 ctrl_addr = CE_BASE_ADDRESS(ce_id); 1167 dest_ring->sw_index = 1168 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1169 dest_ring->write_index = 1170 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); 1171 dma_addr = dest_ring->base_addr_CE_space; 1172 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr, 1173 (uint32_t)(dma_addr & 0xFFFFFFFF)); 1174 1175 /* if DR_BA_ADDRESS_HIGH exists */ 1176 if (is_register_supported(DR_BA_ADDRESS_HIGH)) { 1177 uint32_t tmp; 1178 1179 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, 1180 ctrl_addr); 1181 tmp &= ~0x1F; 1182 dma_addr = ((dma_addr >> 32) & 0x1F) | tmp; 1183 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, 1184 ctrl_addr, (uint32_t)dma_addr); 1185 } 1186 1187 CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries); 1188 #ifdef BIG_ENDIAN_HOST 1189 /* Enable Dest ring byte swap for big endian host */ 1190 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); 1191 #endif 1192 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0); 1193 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries); 1194 } 1195 1196 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type) 1197 { 1198 switch (ring_type) { 1199 case CE_RING_SRC: 1200 return sizeof(struct CE_src_desc); 1201 case CE_RING_DEST: 1202 return sizeof(struct CE_dest_desc); 1203 case CE_RING_STATUS: 1204 qdf_assert(0); 1205 return 0; 1206 default: 1207 return 0; 1208 } 1209 1210 return 0; 1211 } 1212 1213 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type, 1214 uint32_t ce_id, struct CE_ring_state *ring, 1215 struct CE_attr *attr) 1216 { 1217 int status = Q_TARGET_ACCESS_BEGIN(scn); 1218 1219 if (status < 0) 1220 goto out; 1221 1222 switch (ring_type) { 1223 case CE_RING_SRC: 1224 ce_legacy_src_ring_setup(scn, ce_id, ring, attr); 1225 break; 1226 case CE_RING_DEST: 1227 ce_legacy_dest_ring_setup(scn, ce_id, ring, attr); 1228 break; 1229 case CE_RING_STATUS: 1230 default: 1231 qdf_assert(0); 1232 break; 1233 } 1234 1235 Q_TARGET_ACCESS_END(scn); 1236 out: 1237 return status; 1238 } 1239 1240 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn, 1241 struct pld_shadow_reg_v2_cfg **shadow_config, 1242 int *num_shadow_registers_configured) 1243 { 1244 *num_shadow_registers_configured = 0; 1245 *shadow_config = NULL; 1246 } 1247 1248 static bool ce_check_int_watermark(struct CE_state *CE_state, 1249 unsigned int *flags) 1250 { 1251 uint32_t ce_int_status; 1252 uint32_t ctrl_addr = CE_state->ctrl_addr; 1253 struct hif_softc *scn = CE_state->scn; 1254 1255 ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr); 1256 if (ce_int_status & CE_WATERMARK_MASK) { 1257 /* Convert HW IS bits to software flags */ 1258 *flags = 1259 (ce_int_status & CE_WATERMARK_MASK) >> 1260 CE_WM_SHFT; 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 struct ce_ops ce_service_legacy = { 1268 .ce_get_desc_size = ce_get_desc_size_legacy, 1269 .ce_ring_setup = ce_ring_setup_legacy, 1270 .ce_sendlist_send = ce_sendlist_send_legacy, 1271 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy, 1272 .ce_revoke_recv_next = ce_revoke_recv_next_legacy, 1273 .ce_cancel_send_next = ce_cancel_send_next_legacy, 1274 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy, 1275 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy, 1276 .ce_send_nolock = ce_send_nolock_legacy, 1277 .watermark_int = ce_check_int_watermark, 1278 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy, 1279 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy, 1280 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy, 1281 .ce_prepare_shadow_register_v2_cfg = 1282 ce_prepare_shadow_register_v2_cfg_legacy, 1283 }; 1284 1285 struct ce_ops *ce_services_legacy() 1286 { 1287 return &ce_service_legacy; 1288 } 1289 1290 qdf_export_symbol(ce_services_legacy); 1291 1292 void ce_service_legacy_init(void) 1293 { 1294 ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy); 1295 } 1296