1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef __COPY_ENGINE_API_H__ 21 #define __COPY_ENGINE_API_H__ 22 23 #include "pld_common.h" 24 #include "ce_main.h" 25 #include "hif_main.h" 26 27 /* TBDXXX: Use int return values for consistency with Target */ 28 29 /* TBDXXX: Perhaps merge Host/Target-->common */ 30 31 /* 32 * Copy Engine support: low-level Target-side Copy Engine API. 33 * This is a hardware access layer used by code that understands 34 * how to use copy engines. 35 */ 36 37 /* 38 * A "struct CE_handle *" serves as an opaque pointer-sized 39 * handle to a specific copy engine. 40 */ 41 struct CE_handle; 42 43 /* 44 * "Send Completion" callback type for Send Completion Notification. 45 * 46 * If a Send Completion callback is registered and one or more sends 47 * have completed, the callback is invoked. 48 * 49 * per_ce_send_context is a context supplied by the calling layer 50 * (via ce_send_cb_register). It is associated with a copy engine. 51 * 52 * per_transfer_send_context is context supplied by the calling layer 53 * (via the "send" call). It may be different for each invocation 54 * of send. 55 * 56 * The buffer parameter is the first byte sent of the first buffer 57 * sent (if more than one buffer). 58 * 59 * nbytes is the number of bytes of that buffer that were sent. 60 * 61 * transfer_id matches the value used when the buffer or 62 * buf_list was sent. 63 * 64 * Implementation note: Pops 1 completed send buffer from Source ring 65 */ 66 typedef void (*ce_send_cb)(struct CE_handle *copyeng, 67 void *per_ce_send_context, 68 void *per_transfer_send_context, 69 qdf_dma_addr_t buffer, 70 unsigned int nbytes, 71 unsigned int transfer_id, 72 unsigned int sw_index, 73 unsigned int hw_index, 74 uint32_t toeplitz_hash_result); 75 76 /* 77 * "Buffer Received" callback type for Buffer Received Notification. 78 * 79 * Implementation note: Pops 1 completed recv buffer from Dest ring 80 */ 81 typedef void (*CE_recv_cb)(struct CE_handle *copyeng, 82 void *per_CE_recv_context, 83 void *per_transfer_recv_context, 84 qdf_dma_addr_t buffer, 85 unsigned int nbytes, 86 unsigned int transfer_id, 87 unsigned int flags); 88 89 /* 90 * Copy Engine Watermark callback type. 91 * 92 * Allows upper layers to be notified when watermarks are reached: 93 * space is available and/or running short in a source ring 94 * buffers are exhausted and/or abundant in a destination ring 95 * 96 * The flags parameter indicates which condition triggered this 97 * callback. See CE_WM_FLAG_*. 98 * 99 * Watermark APIs are provided to allow upper layers "batch" 100 * descriptor processing and to allow upper layers to 101 * throttle/unthrottle. 102 */ 103 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng, 104 void *per_CE_wm_context, unsigned int flags); 105 106 107 #define CE_WM_FLAG_SEND_HIGH 1 108 #define CE_WM_FLAG_SEND_LOW 2 109 #define CE_WM_FLAG_RECV_HIGH 4 110 #define CE_WM_FLAG_RECV_LOW 8 111 #define CE_HTT_TX_CE 4 112 113 114 /** 115 * ce_service_srng_init() - Initialization routine for CE services 116 * in SRNG based targets 117 * Return : None 118 */ 119 void ce_service_srng_init(void); 120 121 /** 122 * ce_service_legacy_init() - Initialization routine for CE services 123 * in legacy targets 124 * Return : None 125 */ 126 void ce_service_legacy_init(void); 127 128 /* A list of buffers to be gathered and sent */ 129 struct ce_sendlist; 130 131 /* Copy Engine settable attributes */ 132 struct CE_attr; 133 134 /*==================Send=====================================================*/ 135 136 /* ce_send flags */ 137 /* disable ring's byte swap, even if the default policy is to swap */ 138 #define CE_SEND_FLAG_SWAP_DISABLE 1 139 140 /* 141 * Queue a source buffer to be sent to an anonymous destination buffer. 142 * copyeng - which copy engine to use 143 * buffer - address of buffer 144 * nbytes - number of bytes to send 145 * transfer_id - arbitrary ID; reflected to destination 146 * flags - CE_SEND_FLAG_* values 147 * Returns QDF_STATUS. 148 * 149 * Note: If no flags are specified, use CE's default data swap mode. 150 * 151 * Implementation note: pushes 1 buffer to Source ring 152 */ 153 QDF_STATUS ce_send(struct CE_handle *copyeng, 154 void *per_transfer_send_context, 155 qdf_dma_addr_t buffer, 156 unsigned int nbytes, 157 unsigned int transfer_id, 158 unsigned int flags, 159 unsigned int user_flags); 160 161 #ifdef WLAN_FEATURE_FASTPATH 162 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, 163 unsigned int transfer_id, uint32_t download_len); 164 165 #endif 166 167 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls); 168 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, 169 qdf_nbuf_t msdu, 170 uint32_t transfer_id, 171 uint32_t len, 172 uint32_t sendhead); 173 174 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, 175 qdf_nbuf_t msdu, 176 uint32_t transfer_id, 177 uint32_t len); 178 /* 179 * Register a Send Callback function. 180 * This function is called as soon as the contents of a Send 181 * have reached the destination, unless disable_interrupts is 182 * requested. In this case, the callback is invoked when the 183 * send status is polled, shortly after the send completes. 184 */ 185 void ce_send_cb_register(struct CE_handle *copyeng, 186 ce_send_cb fn_ptr, 187 void *per_ce_send_context, int disable_interrupts); 188 189 /* 190 * Return the size of a SendList. This allows the caller to allocate 191 * a SendList while the SendList structure remains opaque. 192 */ 193 unsigned int ce_sendlist_sizeof(void); 194 195 /* Initialize a sendlist */ 196 void ce_sendlist_init(struct ce_sendlist *sendlist); 197 198 /** 199 * ce_sendlist_buf_add() - Append a simple buffer (address/length) to a sendlist 200 * @sendlist: Sendlist 201 * @buffer: buffer 202 * @nbytes: number of bytes to append 203 * @flags: flags 204 * @user_flags: user flags 205 * 206 * Return: QDF_STATUS 207 */ 208 QDF_STATUS ce_sendlist_buf_add(struct ce_sendlist *sendlist, 209 qdf_dma_addr_t buffer, 210 unsigned int nbytes, 211 /* OR-ed with internal flags */ 212 uint32_t flags, 213 uint32_t user_flags); 214 215 /* 216 * ce_sendlist_send() - Queue a "sendlist" of buffers to be sent using gather to 217 * a single anonymous destination buffer 218 * @copyeng: which copy engine to use 219 * @per_transfer_send_context: Per transfer send context 220 * @sendlist: list of simple buffers to send using gather 221 * @transfer_id: arbitrary ID; reflected to destination 222 * 223 * Implementation note: Pushes multiple buffers with Gather to Source ring. 224 * 225 * Return: QDF_STATUS 226 */ 227 QDF_STATUS ce_sendlist_send(struct CE_handle *copyeng, 228 void *per_transfer_send_context, 229 struct ce_sendlist *sendlist, 230 unsigned int transfer_id); 231 232 /*==================Recv=====================================================*/ 233 234 /** 235 * ce_recv_buf_enqueue() - Make a buffer available to receive. The buffer must 236 * be at least of a minimal size appropriate for this copy engine (src_sz_max 237 * attribute). 238 * @copyeng: which copy engine to use 239 * @per_transfer_recv_context: context passed back to caller's recv_cb 240 * @buffer: address of buffer in CE space 241 * 242 * Implementation note: Pushes a buffer to Dest ring. 243 * 244 * Return: QDF_STATUS. 245 */ 246 QDF_STATUS ce_recv_buf_enqueue(struct CE_handle *copyeng, 247 void *per_transfer_recv_context, 248 qdf_dma_addr_t buffer); 249 250 /* 251 * Register a Receive Callback function. 252 * This function is called as soon as data is received 253 * from the source. 254 */ 255 void ce_recv_cb_register(struct CE_handle *copyeng, 256 CE_recv_cb fn_ptr, 257 void *per_CE_recv_context, 258 int disable_interrupts); 259 260 /*==================CE Watermark=============================================*/ 261 262 /* 263 * Register a Watermark Callback function. 264 * This function is called as soon as a watermark level 265 * is crossed. A Watermark Callback function is free to 266 * handle received data "en masse"; but then some coordination 267 * is required with a registered Receive Callback function. 268 * [Suggestion: Either handle Receives in a Receive Callback 269 * or en masse in a Watermark Callback; but not both.] 270 */ 271 void ce_watermark_cb_register(struct CE_handle *copyeng, 272 CE_watermark_cb fn_ptr, 273 void *per_CE_wm_context); 274 275 /* 276 * Set low/high watermarks for the send/source side of a copy engine. 277 * 278 * Typically, the destination side CPU manages watermarks for 279 * the receive side and the source side CPU manages watermarks 280 * for the send side. 281 * 282 * A low watermark of 0 is never hit (so the watermark function 283 * will never be called for a Low Watermark condition). 284 * 285 * A high watermark equal to nentries is never hit (so the 286 * watermark function will never be called for a High Watermark 287 * condition). 288 */ 289 void ce_send_watermarks_set(struct CE_handle *copyeng, 290 unsigned int low_alert_nentries, 291 unsigned int high_alert_nentries); 292 293 /* Set low/high watermarks for the receive/destination side of copy engine. */ 294 void ce_recv_watermarks_set(struct CE_handle *copyeng, 295 unsigned int low_alert_nentries, 296 unsigned int high_alert_nentries); 297 298 /* 299 * Return the number of entries that can be queued 300 * to a ring at an instant in time. 301 * 302 * For source ring, does not imply that destination-side 303 * buffers are available; merely indicates descriptor space 304 * in the source ring. 305 * 306 * For destination ring, does not imply that previously 307 * received buffers have been processed; merely indicates 308 * descriptor space in destination ring. 309 * 310 * Mainly for use with CE Watermark callback. 311 */ 312 unsigned int ce_send_entries_avail(struct CE_handle *copyeng); 313 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng); 314 315 /* recv flags */ 316 /* Data is byte-swapped */ 317 #define CE_RECV_FLAG_SWAPPED 1 318 319 /** 320 * ce_completed_recv_next() - Supply data for the next completed unprocessed 321 * receive descriptor. 322 * @copyeng: which copy engine to use 323 * @per_CE_contextp: CE context 324 * @per_transfer_contextp: Transfer context 325 * @bufferp: buffer pointer 326 * @nbytesp: number of bytes 327 * @transfer_idp: Transfer idp 328 * @flagsp: flags 329 * 330 * For use 331 * with CE Watermark callback, 332 * in a recv_cb function when processing buf_lists 333 * in a recv_cb function in order to mitigate recv_cb's. 334 * 335 * Implementation note: Pops buffer from Dest ring. 336 * 337 * Return: QDF_STATUS 338 */ 339 QDF_STATUS ce_completed_recv_next(struct CE_handle *copyeng, 340 void **per_CE_contextp, 341 void **per_transfer_contextp, 342 qdf_dma_addr_t *bufferp, 343 unsigned int *nbytesp, 344 unsigned int *transfer_idp, 345 unsigned int *flagsp); 346 347 /** 348 * ce_completed_send_next() - Supply data for the next completed unprocessed 349 * send descriptor. 350 * @copyeng: which copy engine to use 351 * @per_CE_contextp: CE context 352 * @per_transfer_contextp: Transfer context 353 * @bufferp: buffer pointer 354 * @nbytesp: number of bytes 355 * @transfer_idp: Transfer idp 356 * @sw_idx: SW index 357 * @hw_idx: HW index 358 * @toeplitz_hash_result: toeplitz hash result 359 * 360 * For use 361 * with CE Watermark callback 362 * in a send_cb function in order to mitigate send_cb's. 363 * 364 * Implementation note: Pops 1 completed send buffer from Source ring 365 * 366 * Return: QDF_STATUS 367 */ 368 QDF_STATUS ce_completed_send_next(struct CE_handle *copyeng, 369 void **per_CE_contextp, 370 void **per_transfer_contextp, 371 qdf_dma_addr_t *bufferp, 372 unsigned int *nbytesp, 373 unsigned int *transfer_idp, 374 unsigned int *sw_idx, 375 unsigned int *hw_idx, 376 uint32_t *toeplitz_hash_result); 377 378 /*==================CE Engine Initialization=================================*/ 379 380 /* Initialize an instance of a CE */ 381 struct CE_handle *ce_init(struct hif_softc *scn, 382 unsigned int CE_id, struct CE_attr *attr); 383 384 /*==================CE Engine Shutdown=======================================*/ 385 /* 386 * Support clean shutdown by allowing the caller to revoke 387 * receive buffers. Target DMA must be stopped before using 388 * this API. 389 */ 390 QDF_STATUS 391 ce_revoke_recv_next(struct CE_handle *copyeng, 392 void **per_CE_contextp, 393 void **per_transfer_contextp, 394 qdf_dma_addr_t *bufferp); 395 396 /* 397 * Support clean shutdown by allowing the caller to cancel 398 * pending sends. Target DMA must be stopped before using 399 * this API. 400 */ 401 QDF_STATUS 402 ce_cancel_send_next(struct CE_handle *copyeng, 403 void **per_CE_contextp, 404 void **per_transfer_contextp, 405 qdf_dma_addr_t *bufferp, 406 unsigned int *nbytesp, 407 unsigned int *transfer_idp, 408 uint32_t *toeplitz_hash_result); 409 410 void ce_fini(struct CE_handle *copyeng); 411 412 /*==================CE Interrupt Handlers====================================*/ 413 void ce_per_engine_service_any(int irq, struct hif_softc *scn); 414 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id); 415 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id); 416 417 /*===================CE cmpl interrupt Enable/Disable =======================*/ 418 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn); 419 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn); 420 421 /* API to check if any of the copy engine pipes has 422 * pending frames for processing 423 */ 424 bool ce_get_rx_pending(struct hif_softc *scn); 425 426 /** 427 * war_ce_src_ring_write_idx_set() - Set write index for CE source ring 428 * @scn: HIF context 429 * @ctrl_addr: address 430 * @write_index: write index 431 * 432 * Return: None 433 */ 434 void war_ce_src_ring_write_idx_set(struct hif_softc *scn, 435 u32 ctrl_addr, unsigned int write_index); 436 437 /* CE_attr.flags values */ 438 #define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */ 439 #define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */ 440 #define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */ 441 #define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */ 442 #define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */ 443 #define CE_ATTR_DIAG 0x20 /* Diag CE */ 444 #define CE_ATTR_INIT_ON_DEMAND 0x40 /* Initialized on demand */ 445 #define CE_ATTR_HI_TASKLET 0x80 /* HI_TASKLET CE */ 446 447 /** 448 * struct CE_attr - Attributes of an instance of a Copy Engine 449 * @flags: CE_ATTR_* values 450 * @priority: TBD 451 * @src_nentries: #entries in source ring - Must be a power of 2 452 * @src_sz_max: Max source send size for this CE. This is also the minimum 453 * size of a destination buffer 454 * @dest_nentries: #entries in destination ring - Must be a power of 2 455 * @reserved: Future Use 456 */ 457 struct CE_attr { 458 unsigned int flags; 459 unsigned int priority; 460 unsigned int src_nentries; 461 unsigned int src_sz_max; 462 unsigned int dest_nentries; 463 void *reserved; 464 }; 465 466 /* 467 * When using sendlist_send to transfer multiple buffer fragments, the 468 * transfer context of each fragment, except last one, will be filled 469 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for 470 * each fragment done with send and the transfer context would be 471 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the 472 * status of a send completion. 473 */ 474 #define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) 475 476 /* 477 * This is an opaque type that is at least large enough to hold 478 * a sendlist. A sendlist can only be accessed through CE APIs, 479 * but this allows a sendlist to be allocated on the run-time 480 * stack. TBDXXX: un-opaque would be simpler... 481 */ 482 struct ce_sendlist { 483 unsigned int word[62]; 484 }; 485 486 #define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */ 487 #define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */ 488 #define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */ 489 490 #ifdef IPA_OFFLOAD 491 void ce_ipa_get_resource(struct CE_handle *ce, 492 qdf_shared_mem_t **ce_sr, 493 uint32_t *ce_sr_ring_size, 494 qdf_dma_addr_t *ce_reg_paddr); 495 #else 496 /** 497 * ce_ipa_get_resource() - get uc resource on copyengine 498 * @ce: copyengine context 499 * @ce_sr: copyengine source ring resource info 500 * @ce_sr_ring_size: copyengine source ring size 501 * @ce_reg_paddr: copyengine register physical address 502 * 503 * Copy engine should release resource to micro controller 504 * Micro controller needs 505 * - Copy engine source descriptor base address 506 * - Copy engine source descriptor size 507 * - PCI BAR address to access copy engine register 508 * 509 * Return: None 510 */ 511 static inline void ce_ipa_get_resource(struct CE_handle *ce, 512 qdf_shared_mem_t **ce_sr, 513 uint32_t *ce_sr_ring_size, 514 qdf_dma_addr_t *ce_reg_paddr) 515 { 516 } 517 #endif /* IPA_OFFLOAD */ 518 519 static inline void ce_pkt_error_count_incr( 520 struct HIF_CE_state *_hif_state, 521 enum ol_ath_hif_pkt_ecodes _hif_ecode) 522 { 523 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state); 524 525 if (_hif_ecode == HIF_PIPE_NO_RESOURCE) 526 (scn->pkt_stats.hif_pipe_no_resrc_count) 527 += 1; 528 } 529 530 bool ce_check_rx_pending(struct CE_state *CE_state); 531 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id); 532 struct ce_ops *ce_services_srng(void); 533 struct ce_ops *ce_services_legacy(void); 534 bool ce_srng_based(struct hif_softc *scn); 535 /* Forward declaration */ 536 struct CE_ring_state; 537 538 struct ce_ops { 539 uint32_t (*ce_get_desc_size)(uint8_t ring_type); 540 int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type, 541 uint32_t ce_id, struct CE_ring_state *ring, 542 struct CE_attr *attr); 543 void (*ce_srng_cleanup)(struct hif_softc *scn, 544 struct CE_state *CE_state, uint8_t ring_type); 545 QDF_STATUS (*ce_send_nolock)(struct CE_handle *copyeng, 546 void *per_transfer_context, 547 qdf_dma_addr_t buffer, 548 uint32_t nbytes, 549 uint32_t transfer_id, 550 uint32_t flags, 551 uint32_t user_flags); 552 QDF_STATUS (*ce_sendlist_send)(struct CE_handle *copyeng, 553 void *per_transfer_context, 554 struct ce_sendlist *sendlist, 555 unsigned int transfer_id); 556 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng, 557 void **per_CE_contextp, 558 void **per_transfer_contextp, 559 qdf_dma_addr_t *bufferp); 560 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng, 561 void **per_CE_contextp, void **per_transfer_contextp, 562 qdf_dma_addr_t *bufferp, unsigned int *nbytesp, 563 unsigned int *transfer_idp, 564 uint32_t *toeplitz_hash_result); 565 QDF_STATUS (*ce_recv_buf_enqueue)(struct CE_handle *copyeng, 566 void *per_recv_context, 567 qdf_dma_addr_t buffer); 568 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags); 569 QDF_STATUS (*ce_completed_recv_next_nolock)( 570 struct CE_state *CE_state, 571 void **per_CE_contextp, 572 void **per_transfer_contextp, 573 qdf_dma_addr_t *bufferp, 574 unsigned int *nbytesp, 575 unsigned int *transfer_idp, 576 unsigned int *flagsp); 577 QDF_STATUS (*ce_completed_send_next_nolock)( 578 struct CE_state *CE_state, 579 void **per_CE_contextp, 580 void **per_transfer_contextp, 581 qdf_dma_addr_t *bufferp, 582 unsigned int *nbytesp, 583 unsigned int *transfer_idp, 584 unsigned int *sw_idx, 585 unsigned int *hw_idx, 586 uint32_t *toeplitz_hash_result); 587 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn, 588 struct CE_state *CE_state); 589 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn, 590 struct CE_state *CE_state); 591 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state, 592 int disable_copy_compl_intr); 593 void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn, 594 struct pld_shadow_reg_v2_cfg **shadow_config, 595 int *num_shadow_registers_configured); 596 int (*ce_get_index_info)(struct hif_softc *scn, void *ce_state, 597 struct ce_index *info); 598 #ifdef CONFIG_SHADOW_V3 599 void (*ce_prepare_shadow_register_v3_cfg)(struct hif_softc *scn, 600 struct pld_shadow_reg_v3_cfg **shadow_config, 601 int *num_shadow_registers_configured); 602 #endif 603 #ifdef FEATURE_DIRECT_LINK 604 QDF_STATUS (*ce_set_irq_config_by_ceid)(struct hif_softc *scn, 605 uint8_t ce_id, uint64_t addr, 606 uint32_t data); 607 uint16_t (*ce_get_direct_link_dest_buffers)(struct hif_softc *scn, 608 uint64_t **dma_addr, 609 uint32_t *buf_size); 610 QDF_STATUS (*ce_get_direct_link_ring_info)(struct hif_softc *scn, 611 struct hif_direct_link_ce_info *info, 612 uint8_t max_ce_info_len); 613 #endif 614 }; 615 616 int hif_ce_bus_early_suspend(struct hif_softc *scn); 617 int hif_ce_bus_late_resume(struct hif_softc *scn); 618 619 /* 620 * ce_engine_service_reg: 621 * @scn: hif_context 622 * @CE_id: Copy engine ID 623 * 624 * Called from ce_per_engine_service and goes through the regular interrupt 625 * handling that does not involve the WLAN fast path feature. 626 * 627 * Returns void 628 */ 629 void ce_engine_service_reg(struct hif_softc *scn, int CE_id); 630 631 /** 632 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs 633 * @scn: hif_context 634 * @ce_id: Copy engine ID 635 * 636 * Return: void 637 */ 638 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id); 639 640 #endif /* __COPY_ENGINE_API_H__ */ 641