1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 #include "hif_io32.h" 19 #include "reg_struct.h" 20 #include "ce_api.h" 21 #include "ce_main.h" 22 #include "ce_internal.h" 23 #include "ce_reg.h" 24 #include "qdf_lock.h" 25 #include "regtable.h" 26 #include "hif_main.h" 27 #include "hif_debug.h" 28 #include "hal_api.h" 29 #include "pld_common.h" 30 #include "qdf_module.h" 31 #include "hif.h" 32 33 /* 34 * Support for Copy Engine hardware, which is mainly used for 35 * communication between Host and Target over a PCIe interconnect. 36 */ 37 38 /* 39 * A single CopyEngine (CE) comprises two "rings": 40 * a source ring 41 * a destination ring 42 * 43 * Each ring consists of a number of descriptors which specify 44 * an address, length, and meta-data. 45 * 46 * Typically, one side of the PCIe interconnect (Host or Target) 47 * controls one ring and the other side controls the other ring. 48 * The source side chooses when to initiate a transfer and it 49 * chooses what to send (buffer address, length). The destination 50 * side keeps a supply of "anonymous receive buffers" available and 51 * it handles incoming data as it arrives (when the destination 52 * receives an interrupt). 53 * 54 * The sender may send a simple buffer (address/length) or it may 55 * send a small list of buffers. When a small list is sent, hardware 56 * "gathers" these and they end up in a single destination buffer 57 * with a single interrupt. 58 * 59 * There are several "contexts" managed by this layer -- more, it 60 * may seem -- than should be needed. These are provided mainly for 61 * maximum flexibility and especially to facilitate a simpler HIF 62 * implementation. There are per-CopyEngine recv, send, and watermark 63 * contexts. These are supplied by the caller when a recv, send, 64 * or watermark handler is established and they are echoed back to 65 * the caller when the respective callbacks are invoked. There is 66 * also a per-transfer context supplied by the caller when a buffer 67 * (or sendlist) is sent and when a buffer is enqueued for recv. 68 * These per-transfer contexts are echoed back to the caller when 69 * the buffer is sent/received. 70 * Target TX harsh result toeplitz_hash_result 71 */ 72 73 #define CE_ADDR_COPY(desc, dma_addr) do {\ 74 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\ 75 0xFFFFFFFF);\ 76 (desc)->buffer_addr_hi =\ 77 (uint32_t)(((dma_addr) >> 32) & 0xFF);\ 78 } while (0) 79 80 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 81 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id, 82 enum hif_ce_event_type type, 83 union ce_srng_desc *descriptor, 84 void *memory, int index, 85 int len, void *hal_ring) 86 { 87 int record_index; 88 struct hif_ce_desc_event *event; 89 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 90 struct hif_ce_desc_event *hist_ev = NULL; 91 92 if (ce_id < CE_COUNT_MAX) 93 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; 94 else 95 return; 96 97 if (ce_id >= CE_COUNT_MAX) 98 return; 99 100 if (!ce_hist->enable[ce_id]) 101 return; 102 103 if (!hist_ev) 104 return; 105 106 record_index = get_next_record_index( 107 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); 108 109 event = &hist_ev[record_index]; 110 111 hif_clear_ce_desc_debug_data(event); 112 113 event->type = type; 114 event->time = qdf_get_log_timestamp(); 115 event->cpu_id = qdf_get_cpu(); 116 117 if (descriptor) 118 qdf_mem_copy(&event->descriptor, descriptor, 119 hal_get_entrysize_from_srng(hal_ring)); 120 121 if (hal_ring) 122 hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp, 123 &event->current_hp); 124 125 event->memory = memory; 126 event->index = index; 127 128 if (event->type == HIF_CE_SRC_RING_BUFFER_POST) 129 hif_ce_desc_record_rx_paddr(scn, event, memory); 130 131 if (ce_hist->data_enable[ce_id]) 132 hif_ce_desc_data_record(event, len); 133 } 134 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */ 135 136 static QDF_STATUS 137 ce_send_nolock_srng(struct CE_handle *copyeng, 138 void *per_transfer_context, 139 qdf_dma_addr_t buffer, 140 uint32_t nbytes, 141 uint32_t transfer_id, 142 uint32_t flags, 143 uint32_t user_flags) 144 { 145 QDF_STATUS status; 146 struct CE_state *CE_state = (struct CE_state *)copyeng; 147 struct CE_ring_state *src_ring = CE_state->src_ring; 148 unsigned int nentries_mask = src_ring->nentries_mask; 149 unsigned int write_index = src_ring->write_index; 150 uint64_t dma_addr = buffer; 151 struct hif_softc *scn = CE_state->scn; 152 153 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 154 return QDF_STATUS_E_FAILURE; 155 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, 156 false) <= 0)) { 157 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); 158 Q_TARGET_ACCESS_END(scn); 159 return QDF_STATUS_E_FAILURE; 160 } 161 { 162 enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST; 163 struct ce_srng_src_desc *src_desc; 164 165 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { 166 Q_TARGET_ACCESS_END(scn); 167 return QDF_STATUS_E_FAILURE; 168 } 169 170 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc, 171 src_ring->srng_ctx); 172 if (!src_desc) { 173 Q_TARGET_ACCESS_END(scn); 174 return QDF_STATUS_E_INVAL; 175 } 176 177 /* Update low 32 bits source descriptor address */ 178 src_desc->buffer_addr_lo = 179 (uint32_t)(dma_addr & 0xFFFFFFFF); 180 src_desc->buffer_addr_hi = 181 (uint32_t)((dma_addr >> 32) & 0xFF); 182 183 src_desc->meta_data = transfer_id; 184 185 /* 186 * Set the swap bit if: 187 * typical sends on this CE are swapped (host is big-endian) 188 * and this send doesn't disable the swapping 189 * (data is not bytestream) 190 */ 191 src_desc->byte_swap = 192 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 193 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); 194 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); 195 src_desc->nbytes = nbytes; 196 197 src_ring->per_transfer_context[write_index] = 198 per_transfer_context; 199 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 200 201 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx); 202 203 /* src_ring->write index hasn't been updated event though 204 * the register has allready been written to. 205 */ 206 hif_record_ce_srng_desc_event(scn, CE_state->id, event_type, 207 (union ce_srng_desc *)src_desc, 208 per_transfer_context, 209 src_ring->write_index, nbytes, 210 src_ring->srng_ctx); 211 212 src_ring->write_index = write_index; 213 status = QDF_STATUS_SUCCESS; 214 } 215 Q_TARGET_ACCESS_END(scn); 216 return status; 217 } 218 219 static QDF_STATUS 220 ce_sendlist_send_srng(struct CE_handle *copyeng, 221 void *per_transfer_context, 222 struct ce_sendlist *sendlist, unsigned int transfer_id) 223 { 224 QDF_STATUS status = QDF_STATUS_E_NOMEM; 225 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 226 struct CE_state *CE_state = (struct CE_state *)copyeng; 227 struct CE_ring_state *src_ring = CE_state->src_ring; 228 unsigned int num_items = sl->num_items; 229 unsigned int sw_index; 230 unsigned int write_index; 231 struct hif_softc *scn = CE_state->scn; 232 233 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); 234 235 qdf_spin_lock_bh(&CE_state->ce_index_lock); 236 sw_index = src_ring->sw_index; 237 write_index = src_ring->write_index; 238 239 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >= 240 num_items) { 241 struct ce_sendlist_item *item; 242 int i; 243 244 /* handle all but the last item uniformly */ 245 for (i = 0; i < num_items - 1; i++) { 246 item = &sl->item[i]; 247 /* TBDXXX: Support extensible sendlist_types? */ 248 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 249 status = ce_send_nolock_srng(copyeng, 250 CE_SENDLIST_ITEM_CTXT, 251 (qdf_dma_addr_t) item->data, 252 item->u.nbytes, transfer_id, 253 item->flags | CE_SEND_FLAG_GATHER, 254 item->user_flags); 255 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 256 } 257 /* provide valid context pointer for final item */ 258 item = &sl->item[i]; 259 /* TBDXXX: Support extensible sendlist_types? */ 260 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); 261 status = ce_send_nolock_srng(copyeng, per_transfer_context, 262 (qdf_dma_addr_t) item->data, 263 item->u.nbytes, 264 transfer_id, item->flags, 265 item->user_flags); 266 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 267 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, 268 QDF_NBUF_TX_PKT_CE); 269 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, 270 QDF_DP_TRACE_CE_PACKET_PTR_RECORD, 271 QDF_TRACE_DEFAULT_PDEV_ID, 272 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data), 273 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX)); 274 } else { 275 /* 276 * Probably not worth the additional complexity to support 277 * partial sends with continuation or notification. We expect 278 * to use large rings and small sendlists. If we can't handle 279 * the entire request at once, punt it back to the caller. 280 */ 281 } 282 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 283 284 return status; 285 } 286 287 #define SLOTS_PER_DATAPATH_TX 2 288 289 #ifndef AH_NEED_TX_DATA_SWAP 290 #define AH_NEED_TX_DATA_SWAP 0 291 #endif 292 /** 293 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine 294 * @coyeng: copy engine handle 295 * @per_recv_context: virtual address of the nbuf 296 * @buffer: physical address of the nbuf 297 * 298 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued 299 */ 300 static QDF_STATUS 301 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng, 302 void *per_recv_context, qdf_dma_addr_t buffer) 303 { 304 QDF_STATUS status; 305 struct CE_state *CE_state = (struct CE_state *)copyeng; 306 struct CE_ring_state *dest_ring = CE_state->dest_ring; 307 unsigned int nentries_mask = dest_ring->nentries_mask; 308 unsigned int write_index; 309 unsigned int sw_index; 310 uint64_t dma_addr = buffer; 311 struct hif_softc *scn = CE_state->scn; 312 struct ce_srng_dest_desc *dest_desc = NULL; 313 314 qdf_spin_lock_bh(&CE_state->ce_index_lock); 315 write_index = dest_ring->write_index; 316 sw_index = dest_ring->sw_index; 317 318 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 319 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 320 return QDF_STATUS_E_IO; 321 } 322 323 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) { 324 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 325 return QDF_STATUS_E_FAILURE; 326 } 327 328 if ((hal_srng_src_num_avail(scn->hal_soc, 329 dest_ring->srng_ctx, false) > 0)) { 330 dest_desc = hal_srng_src_get_next(scn->hal_soc, 331 dest_ring->srng_ctx); 332 333 if (!dest_desc) { 334 status = QDF_STATUS_E_FAILURE; 335 } else { 336 337 CE_ADDR_COPY(dest_desc, dma_addr); 338 339 dest_ring->per_transfer_context[write_index] = 340 per_recv_context; 341 342 /* Update Destination Ring Write Index */ 343 write_index = CE_RING_IDX_INCR(nentries_mask, 344 write_index); 345 status = QDF_STATUS_SUCCESS; 346 } 347 } else { 348 dest_desc = NULL; 349 status = QDF_STATUS_E_FAILURE; 350 } 351 352 dest_ring->write_index = write_index; 353 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx); 354 hif_record_ce_srng_desc_event(scn, CE_state->id, 355 HIF_CE_DEST_RING_BUFFER_POST, 356 (union ce_srng_desc *)dest_desc, 357 per_recv_context, 358 dest_ring->write_index, 0, 359 dest_ring->srng_ctx); 360 361 Q_TARGET_ACCESS_END(scn); 362 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 363 return status; 364 } 365 366 /* 367 * Guts of ce_recv_entries_done. 368 * The caller takes responsibility for any necessary locking. 369 */ 370 static unsigned int 371 ce_recv_entries_done_nolock_srng(struct hif_softc *scn, 372 struct CE_state *CE_state) 373 { 374 struct CE_ring_state *status_ring = CE_state->status_ring; 375 376 return hal_srng_dst_num_valid(scn->hal_soc, 377 status_ring->srng_ctx, false); 378 } 379 380 /* 381 * Guts of ce_send_entries_done. 382 * The caller takes responsibility for any necessary locking. 383 */ 384 static unsigned int 385 ce_send_entries_done_nolock_srng(struct hif_softc *scn, 386 struct CE_state *CE_state) 387 { 388 389 struct CE_ring_state *src_ring = CE_state->src_ring; 390 int count = 0; 391 392 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) 393 return 0; 394 395 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx); 396 397 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); 398 399 return count; 400 } 401 402 /* 403 * Guts of ce_completed_recv_next. 404 * The caller takes responsibility for any necessary locking. 405 */ 406 static QDF_STATUS 407 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state, 408 void **per_CE_contextp, 409 void **per_transfer_contextp, 410 qdf_dma_addr_t *bufferp, 411 unsigned int *nbytesp, 412 unsigned int *transfer_idp, 413 unsigned int *flagsp) 414 { 415 QDF_STATUS status; 416 struct CE_ring_state *dest_ring = CE_state->dest_ring; 417 struct CE_ring_state *status_ring = CE_state->status_ring; 418 unsigned int nentries_mask = dest_ring->nentries_mask; 419 unsigned int sw_index = dest_ring->sw_index; 420 struct hif_softc *scn = CE_state->scn; 421 struct ce_srng_dest_status_desc *dest_status = NULL; 422 int nbytes; 423 struct ce_srng_dest_status_desc dest_status_info; 424 425 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) { 426 status = QDF_STATUS_E_FAILURE; 427 goto done; 428 } 429 430 dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx); 431 if (!dest_status) { 432 status = QDF_STATUS_E_FAILURE; 433 hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx); 434 goto done; 435 } 436 437 /* 438 * By copying the dest_desc_info element to local memory, we could 439 * avoid extra memory read from non-cachable memory. 440 */ 441 dest_status_info = *dest_status; 442 nbytes = dest_status_info.nbytes; 443 if (nbytes == 0) { 444 uint32_t hp, tp; 445 446 /* 447 * This closes a relatively unusual race where the Host 448 * sees the updated DRRI before the update to the 449 * corresponding descriptor has completed. We treat this 450 * as a descriptor that is not yet done. 451 */ 452 hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx, 453 &tp, &hp); 454 hif_info_rl("No data to reap, hp %d tp %d", hp, tp); 455 status = QDF_STATUS_E_FAILURE; 456 hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx); 457 goto done; 458 } 459 460 /* 461 * Move the tail pointer since nbytes is non-zero and 462 * this entry is processed. 463 */ 464 hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx); 465 466 dest_status->nbytes = 0; 467 468 *nbytesp = nbytes; 469 *transfer_idp = dest_status_info.meta_data; 470 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; 471 472 if (per_CE_contextp) 473 *per_CE_contextp = CE_state->recv_context; 474 475 /* NOTE: sw_index is more like a read_index in this context. It has a 476 * one-to-one mapping with status ring. 477 * Get the per trasnfer context from dest_ring. 478 */ 479 if (per_transfer_contextp) 480 *per_transfer_contextp = 481 dest_ring->per_transfer_context[sw_index]; 482 483 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ 484 485 /* Update sw_index */ 486 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 487 dest_ring->sw_index = sw_index; 488 status = QDF_STATUS_SUCCESS; 489 490 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx); 491 hif_record_ce_srng_desc_event(scn, CE_state->id, 492 HIF_CE_DEST_RING_BUFFER_REAP, 493 NULL, 494 dest_ring-> 495 per_transfer_context[sw_index], 496 dest_ring->sw_index, nbytes, 497 dest_ring->srng_ctx); 498 499 done: 500 hif_record_ce_srng_desc_event(scn, CE_state->id, 501 HIF_CE_DEST_STATUS_RING_REAP, 502 (union ce_srng_desc *)dest_status, 503 NULL, 504 -1, 0, 505 status_ring->srng_ctx); 506 507 return status; 508 } 509 510 static QDF_STATUS 511 ce_revoke_recv_next_srng(struct CE_handle *copyeng, 512 void **per_CE_contextp, 513 void **per_transfer_contextp, qdf_dma_addr_t *bufferp) 514 { 515 struct CE_state *CE_state = (struct CE_state *)copyeng; 516 struct CE_ring_state *dest_ring = CE_state->dest_ring; 517 unsigned int sw_index; 518 519 if (!dest_ring) 520 return QDF_STATUS_E_FAILURE; 521 522 sw_index = dest_ring->sw_index; 523 524 if (per_CE_contextp) 525 *per_CE_contextp = CE_state->recv_context; 526 527 /* NOTE: sw_index is more like a read_index in this context. It has a 528 * one-to-one mapping with status ring. 529 * Get the per trasnfer context from dest_ring. 530 */ 531 if (per_transfer_contextp) 532 *per_transfer_contextp = 533 dest_ring->per_transfer_context[sw_index]; 534 535 if (!dest_ring->per_transfer_context[sw_index]) 536 return QDF_STATUS_E_FAILURE; 537 538 /* provide end condition */ 539 dest_ring->per_transfer_context[sw_index] = NULL; 540 541 /* Update sw_index */ 542 sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index); 543 dest_ring->sw_index = sw_index; 544 return QDF_STATUS_SUCCESS; 545 } 546 547 /* 548 * Guts of ce_completed_send_next. 549 * The caller takes responsibility for any necessary locking. 550 */ 551 static QDF_STATUS 552 ce_completed_send_next_nolock_srng(struct CE_state *CE_state, 553 void **per_CE_contextp, 554 void **per_transfer_contextp, 555 qdf_dma_addr_t *bufferp, 556 unsigned int *nbytesp, 557 unsigned int *transfer_idp, 558 unsigned int *sw_idx, 559 unsigned int *hw_idx, 560 uint32_t *toeplitz_hash_result) 561 { 562 QDF_STATUS status = QDF_STATUS_E_FAILURE; 563 struct CE_ring_state *src_ring = CE_state->src_ring; 564 unsigned int nentries_mask = src_ring->nentries_mask; 565 unsigned int sw_index = src_ring->sw_index; 566 unsigned int swi = src_ring->sw_index; 567 struct hif_softc *scn = CE_state->scn; 568 struct ce_srng_src_desc *src_desc; 569 570 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { 571 status = QDF_STATUS_E_FAILURE; 572 return status; 573 } 574 575 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx); 576 if (src_desc) { 577 hif_record_ce_srng_desc_event(scn, CE_state->id, 578 HIF_TX_DESC_COMPLETION, 579 (union ce_srng_desc *)src_desc, 580 src_ring-> 581 per_transfer_context[swi], 582 swi, src_desc->nbytes, 583 src_ring->srng_ctx); 584 585 /* Return data from completed source descriptor */ 586 *bufferp = (qdf_dma_addr_t) 587 (((uint64_t)(src_desc)->buffer_addr_lo + 588 ((uint64_t)((src_desc)->buffer_addr_hi & 589 0xFF) << 32))); 590 *nbytesp = src_desc->nbytes; 591 *transfer_idp = src_desc->meta_data; 592 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/ 593 594 if (per_CE_contextp) 595 *per_CE_contextp = CE_state->send_context; 596 597 /* sw_index is used more like read index */ 598 if (per_transfer_contextp) 599 *per_transfer_contextp = 600 src_ring->per_transfer_context[sw_index]; 601 602 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 603 604 /* Update sw_index */ 605 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 606 src_ring->sw_index = sw_index; 607 status = QDF_STATUS_SUCCESS; 608 } 609 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); 610 611 return status; 612 } 613 614 /* NB: Modelled after ce_completed_send_next */ 615 static QDF_STATUS 616 ce_cancel_send_next_srng(struct CE_handle *copyeng, 617 void **per_CE_contextp, 618 void **per_transfer_contextp, 619 qdf_dma_addr_t *bufferp, 620 unsigned int *nbytesp, 621 unsigned int *transfer_idp, 622 uint32_t *toeplitz_hash_result) 623 { 624 struct CE_state *CE_state; 625 QDF_STATUS status = QDF_STATUS_E_FAILURE; 626 struct CE_ring_state *src_ring; 627 unsigned int nentries_mask; 628 unsigned int sw_index; 629 struct hif_softc *scn; 630 struct ce_srng_src_desc *src_desc; 631 632 CE_state = (struct CE_state *)copyeng; 633 src_ring = CE_state->src_ring; 634 if (!src_ring) 635 return QDF_STATUS_E_FAILURE; 636 637 nentries_mask = src_ring->nentries_mask; 638 sw_index = src_ring->sw_index; 639 scn = CE_state->scn; 640 641 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { 642 status = QDF_STATUS_E_FAILURE; 643 return status; 644 } 645 646 src_desc = hal_srng_src_pending_reap_next(scn->hal_soc, 647 src_ring->srng_ctx); 648 if (src_desc) { 649 /* Return data from completed source descriptor */ 650 *bufferp = (qdf_dma_addr_t) 651 (((uint64_t)(src_desc)->buffer_addr_lo + 652 ((uint64_t)((src_desc)->buffer_addr_hi & 653 0xFF) << 32))); 654 *nbytesp = src_desc->nbytes; 655 *transfer_idp = src_desc->meta_data; 656 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/ 657 658 if (per_CE_contextp) 659 *per_CE_contextp = CE_state->send_context; 660 661 /* sw_index is used more like read index */ 662 if (per_transfer_contextp) 663 *per_transfer_contextp = 664 src_ring->per_transfer_context[sw_index]; 665 666 src_ring->per_transfer_context[sw_index] = 0; /* sanity */ 667 668 /* Update sw_index */ 669 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 670 src_ring->sw_index = sw_index; 671 status = QDF_STATUS_SUCCESS; 672 } 673 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); 674 675 return status; 676 } 677 678 /* 679 * Adjust interrupts for the copy complete handler. 680 * If it's needed for either send or recv, then unmask 681 * this interrupt; otherwise, mask it. 682 * 683 * Called with target_lock held. 684 */ 685 static void 686 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state, 687 int disable_copy_compl_intr) 688 { 689 } 690 691 static bool ce_check_int_watermark_srng(struct CE_state *CE_state, 692 unsigned int *flags) 693 { 694 /*TODO*/ 695 return false; 696 } 697 698 static uint32_t ce_get_desc_size_srng(uint8_t ring_type) 699 { 700 switch (ring_type) { 701 case CE_RING_SRC: 702 return sizeof(struct ce_srng_src_desc); 703 case CE_RING_DEST: 704 return sizeof(struct ce_srng_dest_desc); 705 case CE_RING_STATUS: 706 return sizeof(struct ce_srng_dest_status_desc); 707 default: 708 return 0; 709 } 710 return 0; 711 } 712 713 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id, 714 struct hal_srng_params *ring_params) 715 { 716 uint32_t addr_low; 717 uint32_t addr_high; 718 uint32_t msi_data_start; 719 uint32_t msi_data_count; 720 uint32_t msi_irq_start; 721 int ret; 722 int irq_id; 723 724 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 725 &msi_data_count, &msi_data_start, 726 &msi_irq_start); 727 728 /* msi config not found */ 729 if (ret) 730 return; 731 732 irq_id = scn->int_assignment->msi_idx[ce_id]; 733 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high); 734 735 ring_params->msi_addr = addr_low; 736 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); 737 ring_params->msi_data = irq_id + msi_data_start; 738 ring_params->flags |= HAL_SRNG_MSI_INTR; 739 740 hif_debug("ce_id %d irq_id %d, msi_addr %pK, msi_data %d", ce_id, 741 irq_id, (void *)ring_params->msi_addr, ring_params->msi_data); 742 } 743 744 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, 745 struct CE_ring_state *src_ring, 746 struct CE_attr *attr) 747 { 748 struct hal_srng_params ring_params = {0}; 749 750 hif_debug("%s: ce_id %d", __func__, ce_id); 751 752 ring_params.ring_base_paddr = src_ring->base_addr_CE_space; 753 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space; 754 ring_params.num_entries = src_ring->nentries; 755 /* 756 * The minimum increment for the timer is 8us 757 * A default value of 0 disables the timer 758 * A valid default value caused continuous interrupts to 759 * fire with MSI enabled. Need to revisit usage of the timer 760 */ 761 762 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { 763 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); 764 765 ring_params.intr_timer_thres_us = 0; 766 ring_params.intr_batch_cntr_thres_entries = 1; 767 ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER; 768 } 769 770 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0, 771 &ring_params); 772 } 773 774 /** 775 * ce_srng_initialize_dest_timer_interrupt_war() - war initialization 776 * @dest_ring: ring being initialized 777 * @ring_params: pointer to initialized parameters 778 * 779 * For Napier & Hawkeye v1, the status ring timer interrupts do not work 780 * As a work arround host configures the destination rings to be a proxy for 781 * work needing to be done. 782 * 783 * The interrupts are setup such that if the destination ring is less than fully 784 * posted, there is likely undone work for the status ring that the host should 785 * process. 786 * 787 * There is a timing bug in srng based copy engines such that a fully posted 788 * srng based copy engine has 2 empty entries instead of just one. The copy 789 * engine data sturctures work with 1 empty entry, but the software frequently 790 * fails to post the last entry due to the race condition. 791 */ 792 static void ce_srng_initialize_dest_timer_interrupt_war( 793 struct CE_ring_state *dest_ring, 794 struct hal_srng_params *ring_params) 795 { 796 int num_buffers_when_fully_posted = dest_ring->nentries - 2; 797 798 ring_params->low_threshold = num_buffers_when_fully_posted - 1; 799 ring_params->intr_timer_thres_us = 1024; 800 ring_params->intr_batch_cntr_thres_entries = 0; 801 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 802 } 803 804 static void ce_srng_dest_ring_setup(struct hif_softc *scn, 805 uint32_t ce_id, 806 struct CE_ring_state *dest_ring, 807 struct CE_attr *attr) 808 { 809 struct hal_srng_params ring_params = {0}; 810 bool status_ring_timer_thresh_work_arround = true; 811 812 hif_debug("ce_id: %d", ce_id); 813 814 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space; 815 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space; 816 ring_params.num_entries = dest_ring->nentries; 817 ring_params.max_buffer_length = attr->src_sz_max; 818 819 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { 820 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); 821 if (status_ring_timer_thresh_work_arround) { 822 ce_srng_initialize_dest_timer_interrupt_war( 823 dest_ring, &ring_params); 824 } else { 825 /* normal behavior for future chips */ 826 ring_params.low_threshold = dest_ring->nentries >> 3; 827 ring_params.intr_timer_thres_us = 100000; 828 ring_params.intr_batch_cntr_thres_entries = 0; 829 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 830 } 831 ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER; 832 } 833 834 /*Dest ring is also source ring*/ 835 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0, 836 &ring_params); 837 } 838 839 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG 840 /** 841 * ce_status_ring_config_int_threshold() - configure ce status ring interrupt 842 * thresholds 843 * @scn: hif handle 844 * @ring_params: ce srng params 845 * 846 * Return: None 847 */ 848 static inline 849 void ce_status_ring_config_int_threshold(struct hif_softc *scn, 850 struct hal_srng_params *ring_params) 851 { 852 ring_params->intr_timer_thres_us = 853 scn->ini_cfg.ce_status_ring_timer_threshold; 854 ring_params->intr_batch_cntr_thres_entries = 855 scn->ini_cfg.ce_status_ring_batch_count_threshold; 856 } 857 #else 858 static inline 859 void ce_status_ring_config_int_threshold(struct hif_softc *scn, 860 struct hal_srng_params *ring_params) 861 { 862 ring_params->intr_timer_thres_us = 0x1000; 863 ring_params->intr_batch_cntr_thres_entries = 0x1; 864 } 865 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */ 866 867 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id, 868 struct CE_ring_state *status_ring, 869 struct CE_attr *attr) 870 { 871 struct hal_srng_params ring_params = {0}; 872 873 hif_debug("ce_id: %d", ce_id); 874 875 ring_params.ring_base_paddr = status_ring->base_addr_CE_space; 876 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space; 877 ring_params.num_entries = status_ring->nentries; 878 879 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { 880 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); 881 ce_status_ring_config_int_threshold(scn, &ring_params); 882 } 883 884 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS, 885 ce_id, 0, &ring_params); 886 } 887 888 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type, 889 uint32_t ce_id, struct CE_ring_state *ring, 890 struct CE_attr *attr) 891 { 892 switch (ring_type) { 893 case CE_RING_SRC: 894 ce_srng_src_ring_setup(scn, ce_id, ring, attr); 895 break; 896 case CE_RING_DEST: 897 ce_srng_dest_ring_setup(scn, ce_id, ring, attr); 898 break; 899 case CE_RING_STATUS: 900 ce_srng_status_ring_setup(scn, ce_id, ring, attr); 901 break; 902 default: 903 qdf_assert(0); 904 break; 905 } 906 907 return 0; 908 } 909 910 static void ce_ring_cleanup_srng(struct hif_softc *scn, 911 struct CE_state *CE_state, 912 uint8_t ring_type) 913 { 914 hal_ring_handle_t hal_srng = NULL; 915 916 switch (ring_type) { 917 case CE_RING_SRC: 918 hal_srng = (hal_ring_handle_t)CE_state->src_ring->srng_ctx; 919 break; 920 case CE_RING_DEST: 921 hal_srng = (hal_ring_handle_t)CE_state->dest_ring->srng_ctx; 922 break; 923 case CE_RING_STATUS: 924 hal_srng = (hal_ring_handle_t)CE_state->status_ring->srng_ctx; 925 break; 926 } 927 928 if (hal_srng) 929 hal_srng_cleanup(scn->hal_soc, hal_srng); 930 } 931 932 static void ce_construct_shadow_config_srng(struct hif_softc *scn) 933 { 934 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 935 int ce_id; 936 937 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 938 if (hif_state->host_ce_config[ce_id].src_nentries) 939 hal_set_one_shadow_config(scn->hal_soc, 940 CE_SRC, ce_id); 941 942 if (hif_state->host_ce_config[ce_id].dest_nentries) { 943 hal_set_one_shadow_config(scn->hal_soc, 944 CE_DST, ce_id); 945 946 hal_set_one_shadow_config(scn->hal_soc, 947 CE_DST_STATUS, ce_id); 948 } 949 } 950 } 951 952 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn, 953 struct pld_shadow_reg_v2_cfg **shadow_config, 954 int *num_shadow_registers_configured) 955 { 956 if (!scn->hal_soc) { 957 hif_err("hal not initialized: not initializing shadow config"); 958 return; 959 } 960 961 hal_get_shadow_config(scn->hal_soc, shadow_config, 962 num_shadow_registers_configured); 963 964 if (*num_shadow_registers_configured != 0) { 965 hif_err("hal shadow register configuration allready constructed"); 966 967 /* return with original configuration*/ 968 return; 969 } 970 hal_construct_srng_shadow_regs(scn->hal_soc); 971 ce_construct_shadow_config_srng(scn); 972 hal_set_shadow_regs(scn->hal_soc); 973 hal_construct_shadow_regs(scn->hal_soc); 974 /* get updated configuration */ 975 hal_get_shadow_config(scn->hal_soc, shadow_config, 976 num_shadow_registers_configured); 977 } 978 979 #ifdef HIF_CE_LOG_INFO 980 /** 981 * ce_get_index_info_srng(): Get CE index info 982 * @scn: HIF Context 983 * @ce_state: CE opaque handle 984 * @info: CE info 985 * 986 * Return: 0 for success and non zero for failure 987 */ 988 static 989 int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state, 990 struct ce_index *info) 991 { 992 struct CE_state *CE_state = (struct CE_state *)ce_state; 993 uint32_t tp, hp; 994 995 info->id = CE_state->id; 996 if (CE_state->src_ring) { 997 hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx, 998 &tp, &hp); 999 info->u.srng_info.tp = tp; 1000 info->u.srng_info.hp = hp; 1001 } else if (CE_state->dest_ring && CE_state->status_ring) { 1002 hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx, 1003 &tp, &hp); 1004 info->u.srng_info.status_tp = tp; 1005 info->u.srng_info.status_hp = hp; 1006 hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx, 1007 &tp, &hp); 1008 info->u.srng_info.tp = tp; 1009 info->u.srng_info.hp = hp; 1010 } 1011 1012 return 0; 1013 } 1014 #endif 1015 1016 static struct ce_ops ce_service_srng = { 1017 .ce_get_desc_size = ce_get_desc_size_srng, 1018 .ce_ring_setup = ce_ring_setup_srng, 1019 .ce_srng_cleanup = ce_ring_cleanup_srng, 1020 .ce_sendlist_send = ce_sendlist_send_srng, 1021 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng, 1022 .ce_revoke_recv_next = ce_revoke_recv_next_srng, 1023 .ce_cancel_send_next = ce_cancel_send_next_srng, 1024 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng, 1025 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng, 1026 .ce_send_nolock = ce_send_nolock_srng, 1027 .watermark_int = ce_check_int_watermark_srng, 1028 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng, 1029 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng, 1030 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng, 1031 .ce_prepare_shadow_register_v2_cfg = 1032 ce_prepare_shadow_register_v2_cfg_srng, 1033 #ifdef HIF_CE_LOG_INFO 1034 .ce_get_index_info = 1035 ce_get_index_info_srng, 1036 #endif 1037 }; 1038 1039 struct ce_ops *ce_services_srng() 1040 { 1041 return &ce_service_srng; 1042 } 1043 qdf_export_symbol(ce_services_srng); 1044 1045 void ce_service_srng_init(void) 1046 { 1047 ce_service_register_module(CE_SVC_SRNG, &ce_services_srng); 1048 } 1049