1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef __COPY_ENGINE_API_H__ 20 #define __COPY_ENGINE_API_H__ 21 22 #include "pld_common.h" 23 #include "ce_main.h" 24 #include "hif_main.h" 25 26 /* TBDXXX: Use int return values for consistency with Target */ 27 28 /* TBDXXX: Perhaps merge Host/Target-->common */ 29 30 /* 31 * Copy Engine support: low-level Target-side Copy Engine API. 32 * This is a hardware access layer used by code that understands 33 * how to use copy engines. 34 */ 35 36 /* 37 * A "struct CE_handle *" serves as an opaque pointer-sized 38 * handle to a specific copy engine. 39 */ 40 struct CE_handle; 41 42 /* 43 * "Send Completion" callback type for Send Completion Notification. 44 * 45 * If a Send Completion callback is registered and one or more sends 46 * have completed, the callback is invoked. 47 * 48 * per_ce_send_context is a context supplied by the calling layer 49 * (via ce_send_cb_register). It is associated with a copy engine. 50 * 51 * per_transfer_send_context is context supplied by the calling layer 52 * (via the "send" call). It may be different for each invocation 53 * of send. 54 * 55 * The buffer parameter is the first byte sent of the first buffer 56 * sent (if more than one buffer). 57 * 58 * nbytes is the number of bytes of that buffer that were sent. 59 * 60 * transfer_id matches the value used when the buffer or 61 * buf_list was sent. 62 * 63 * Implementation note: Pops 1 completed send buffer from Source ring 64 */ 65 typedef void (*ce_send_cb)(struct CE_handle *copyeng, 66 void *per_ce_send_context, 67 void *per_transfer_send_context, 68 qdf_dma_addr_t buffer, 69 unsigned int nbytes, 70 unsigned int transfer_id, 71 unsigned int sw_index, 72 unsigned int hw_index, 73 uint32_t toeplitz_hash_result); 74 75 /* 76 * "Buffer Received" callback type for Buffer Received Notification. 77 * 78 * Implementation note: Pops 1 completed recv buffer from Dest ring 79 */ 80 typedef void (*CE_recv_cb)(struct CE_handle *copyeng, 81 void *per_CE_recv_context, 82 void *per_transfer_recv_context, 83 qdf_dma_addr_t buffer, 84 unsigned int nbytes, 85 unsigned int transfer_id, 86 unsigned int flags); 87 88 /* 89 * Copy Engine Watermark callback type. 90 * 91 * Allows upper layers to be notified when watermarks are reached: 92 * space is available and/or running short in a source ring 93 * buffers are exhausted and/or abundant in a destination ring 94 * 95 * The flags parameter indicates which condition triggered this 96 * callback. See CE_WM_FLAG_*. 97 * 98 * Watermark APIs are provided to allow upper layers "batch" 99 * descriptor processing and to allow upper layers to 100 * throttle/unthrottle. 101 */ 102 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng, 103 void *per_CE_wm_context, unsigned int flags); 104 105 #define CE_WM_FLAG_SEND_HIGH 1 106 #define CE_WM_FLAG_SEND_LOW 2 107 #define CE_WM_FLAG_RECV_HIGH 4 108 #define CE_WM_FLAG_RECV_LOW 8 109 #define CE_HTT_TX_CE 4 110 111 /* A list of buffers to be gathered and sent */ 112 struct ce_sendlist; 113 114 /* Copy Engine settable attributes */ 115 struct CE_attr; 116 117 /*==================Send=====================================================*/ 118 119 /* ce_send flags */ 120 /* disable ring's byte swap, even if the default policy is to swap */ 121 #define CE_SEND_FLAG_SWAP_DISABLE 1 122 123 /* 124 * Queue a source buffer to be sent to an anonymous destination buffer. 125 * copyeng - which copy engine to use 126 * buffer - address of buffer 127 * nbytes - number of bytes to send 128 * transfer_id - arbitrary ID; reflected to destination 129 * flags - CE_SEND_FLAG_* values 130 * Returns 0 on success; otherwise an error status. 131 * 132 * Note: If no flags are specified, use CE's default data swap mode. 133 * 134 * Implementation note: pushes 1 buffer to Source ring 135 */ 136 int ce_send(struct CE_handle *copyeng, 137 void *per_transfer_send_context, 138 qdf_dma_addr_t buffer, 139 unsigned int nbytes, 140 unsigned int transfer_id, 141 unsigned int flags, 142 unsigned int user_flags); 143 144 #ifdef WLAN_FEATURE_FASTPATH 145 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, 146 unsigned int transfer_id, uint32_t download_len); 147 148 #endif 149 150 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls); 151 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, 152 qdf_nbuf_t msdu, 153 uint32_t transfer_id, 154 uint32_t len, 155 uint32_t sendhead); 156 157 extern int ce_send_single(struct CE_handle *ce_tx_hdl, 158 qdf_nbuf_t msdu, 159 uint32_t transfer_id, 160 uint32_t len); 161 /* 162 * Register a Send Callback function. 163 * This function is called as soon as the contents of a Send 164 * have reached the destination, unless disable_interrupts is 165 * requested. In this case, the callback is invoked when the 166 * send status is polled, shortly after the send completes. 167 */ 168 void ce_send_cb_register(struct CE_handle *copyeng, 169 ce_send_cb fn_ptr, 170 void *per_ce_send_context, int disable_interrupts); 171 172 /* 173 * Return the size of a SendList. This allows the caller to allocate 174 * a SendList while the SendList structure remains opaque. 175 */ 176 unsigned int ce_sendlist_sizeof(void); 177 178 /* Initialize a sendlist */ 179 void ce_sendlist_init(struct ce_sendlist *sendlist); 180 181 /* Append a simple buffer (address/length) to a sendlist. */ 182 int ce_sendlist_buf_add(struct ce_sendlist *sendlist, 183 qdf_dma_addr_t buffer, 184 unsigned int nbytes, 185 /* OR-ed with internal flags */ 186 uint32_t flags, 187 uint32_t user_flags); 188 189 /* 190 * Queue a "sendlist" of buffers to be sent using gather to a single 191 * anonymous destination buffer 192 * copyeng - which copy engine to use 193 * sendlist - list of simple buffers to send using gather 194 * transfer_id - arbitrary ID; reflected to destination 195 * Returns 0 on success; otherwise an error status. 196 * 197 * Implemenation note: Pushes multiple buffers with Gather to Source ring. 198 */ 199 int ce_sendlist_send(struct CE_handle *copyeng, 200 void *per_transfer_send_context, 201 struct ce_sendlist *sendlist, 202 unsigned int transfer_id); 203 204 /*==================Recv=====================================================*/ 205 206 /* 207 * Make a buffer available to receive. The buffer must be at least of a 208 * minimal size appropriate for this copy engine (src_sz_max attribute). 209 * copyeng - which copy engine to use 210 * per_transfer_recv_context - context passed back to caller's recv_cb 211 * buffer - address of buffer in CE space 212 * Returns 0 on success; otherwise an error status. 213 * 214 * Implemenation note: Pushes a buffer to Dest ring. 215 */ 216 int ce_recv_buf_enqueue(struct CE_handle *copyeng, 217 void *per_transfer_recv_context, 218 qdf_dma_addr_t buffer); 219 220 /* 221 * Register a Receive Callback function. 222 * This function is called as soon as data is received 223 * from the source. 224 */ 225 void ce_recv_cb_register(struct CE_handle *copyeng, 226 CE_recv_cb fn_ptr, 227 void *per_CE_recv_context, 228 int disable_interrupts); 229 230 /*==================CE Watermark=============================================*/ 231 232 /* 233 * Register a Watermark Callback function. 234 * This function is called as soon as a watermark level 235 * is crossed. A Watermark Callback function is free to 236 * handle received data "en masse"; but then some coordination 237 * is required with a registered Receive Callback function. 238 * [Suggestion: Either handle Receives in a Receive Callback 239 * or en masse in a Watermark Callback; but not both.] 240 */ 241 void ce_watermark_cb_register(struct CE_handle *copyeng, 242 CE_watermark_cb fn_ptr, 243 void *per_CE_wm_context); 244 245 /* 246 * Set low/high watermarks for the send/source side of a copy engine. 247 * 248 * Typically, the destination side CPU manages watermarks for 249 * the receive side and the source side CPU manages watermarks 250 * for the send side. 251 * 252 * A low watermark of 0 is never hit (so the watermark function 253 * will never be called for a Low Watermark condition). 254 * 255 * A high watermark equal to nentries is never hit (so the 256 * watermark function will never be called for a High Watermark 257 * condition). 258 */ 259 void ce_send_watermarks_set(struct CE_handle *copyeng, 260 unsigned int low_alert_nentries, 261 unsigned int high_alert_nentries); 262 263 /* Set low/high watermarks for the receive/destination side of copy engine. */ 264 void ce_recv_watermarks_set(struct CE_handle *copyeng, 265 unsigned int low_alert_nentries, 266 unsigned int high_alert_nentries); 267 268 /* 269 * Return the number of entries that can be queued 270 * to a ring at an instant in time. 271 * 272 * For source ring, does not imply that destination-side 273 * buffers are available; merely indicates descriptor space 274 * in the source ring. 275 * 276 * For destination ring, does not imply that previously 277 * received buffers have been processed; merely indicates 278 * descriptor space in destination ring. 279 * 280 * Mainly for use with CE Watermark callback. 281 */ 282 unsigned int ce_send_entries_avail(struct CE_handle *copyeng); 283 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng); 284 285 /* 286 * Return the number of entries in the ring that are ready 287 * to be processed by software. 288 * 289 * For source ring, the number of descriptors that have 290 * been completed and can now be overwritten with new send 291 * descriptors. 292 * 293 * For destination ring, the number of descriptors that 294 * are available to be processed (newly received buffers). 295 */ 296 unsigned int ce_send_entries_done(struct CE_handle *copyeng); 297 unsigned int ce_recv_entries_done(struct CE_handle *copyeng); 298 299 /* recv flags */ 300 /* Data is byte-swapped */ 301 #define CE_RECV_FLAG_SWAPPED 1 302 303 /* 304 * Supply data for the next completed unprocessed receive descriptor. 305 * 306 * For use 307 * with CE Watermark callback, 308 * in a recv_cb function when processing buf_lists 309 * in a recv_cb function in order to mitigate recv_cb's. 310 * 311 * Implemenation note: Pops buffer from Dest ring. 312 */ 313 int ce_completed_recv_next(struct CE_handle *copyeng, 314 void **per_CE_contextp, 315 void **per_transfer_contextp, 316 qdf_dma_addr_t *bufferp, 317 unsigned int *nbytesp, 318 unsigned int *transfer_idp, 319 unsigned int *flagsp); 320 321 /* 322 * Supply data for the next completed unprocessed send descriptor. 323 * 324 * For use 325 * with CE Watermark callback 326 * in a send_cb function in order to mitigate send_cb's. 327 * 328 * Implementation note: Pops 1 completed send buffer from Source ring 329 */ 330 int ce_completed_send_next(struct CE_handle *copyeng, 331 void **per_CE_contextp, 332 void **per_transfer_contextp, 333 qdf_dma_addr_t *bufferp, 334 unsigned int *nbytesp, 335 unsigned int *transfer_idp, 336 unsigned int *sw_idx, 337 unsigned int *hw_idx, 338 uint32_t *toeplitz_hash_result); 339 340 /*==================CE Engine Initialization=================================*/ 341 342 /* Initialize an instance of a CE */ 343 struct CE_handle *ce_init(struct hif_softc *scn, 344 unsigned int CE_id, struct CE_attr *attr); 345 346 /*==================CE Engine Shutdown=======================================*/ 347 /* 348 * Support clean shutdown by allowing the caller to revoke 349 * receive buffers. Target DMA must be stopped before using 350 * this API. 351 */ 352 QDF_STATUS 353 ce_revoke_recv_next(struct CE_handle *copyeng, 354 void **per_CE_contextp, 355 void **per_transfer_contextp, 356 qdf_dma_addr_t *bufferp); 357 358 /* 359 * Support clean shutdown by allowing the caller to cancel 360 * pending sends. Target DMA must be stopped before using 361 * this API. 362 */ 363 QDF_STATUS 364 ce_cancel_send_next(struct CE_handle *copyeng, 365 void **per_CE_contextp, 366 void **per_transfer_contextp, 367 qdf_dma_addr_t *bufferp, 368 unsigned int *nbytesp, 369 unsigned int *transfer_idp, 370 uint32_t *toeplitz_hash_result); 371 372 void ce_fini(struct CE_handle *copyeng); 373 374 /*==================CE Interrupt Handlers====================================*/ 375 void ce_per_engine_service_any(int irq, struct hif_softc *scn); 376 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id); 377 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id); 378 379 /*===================CE cmpl interrupt Enable/Disable =======================*/ 380 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn); 381 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn); 382 383 /* API to check if any of the copy engine pipes has 384 * pending frames for prcoessing 385 */ 386 bool ce_get_rx_pending(struct hif_softc *scn); 387 388 /* CE_attr.flags values */ 389 #define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */ 390 #define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */ 391 #define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */ 392 #define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */ 393 #define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */ 394 #define CE_ATTR_DIAG 0x20 /* Diag CE */ 395 396 /** 397 * stuct CE_attr - Attributes of an instance of a Copy Engine 398 * @flags: CE_ATTR_* values 399 * @priority: TBD 400 * @src_nentries: #entries in source ring - Must be a power of 2 401 * @src_sz_max: Max source send size for this CE. This is also the minimum 402 * size of a destination buffer 403 * @dest_nentries: #entries in destination ring - Must be a power of 2 404 * @reserved: Future Use 405 */ 406 struct CE_attr { 407 unsigned int flags; 408 unsigned int priority; 409 unsigned int src_nentries; 410 unsigned int src_sz_max; 411 unsigned int dest_nentries; 412 void *reserved; 413 }; 414 415 /* 416 * When using sendlist_send to transfer multiple buffer fragments, the 417 * transfer context of each fragment, except last one, will be filled 418 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for 419 * each fragment done with send and the transfer context would be 420 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the 421 * status of a send completion. 422 */ 423 #define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) 424 425 /* 426 * This is an opaque type that is at least large enough to hold 427 * a sendlist. A sendlist can only be accessed through CE APIs, 428 * but this allows a sendlist to be allocated on the run-time 429 * stack. TBDXXX: un-opaque would be simpler... 430 */ 431 struct ce_sendlist { 432 unsigned int word[62]; 433 }; 434 435 #define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */ 436 #define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */ 437 #define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */ 438 439 #ifdef IPA_OFFLOAD 440 void ce_ipa_get_resource(struct CE_handle *ce, 441 qdf_shared_mem_t **ce_sr, 442 uint32_t *ce_sr_ring_size, 443 qdf_dma_addr_t *ce_reg_paddr); 444 #else 445 /** 446 * ce_ipa_get_resource() - get uc resource on copyengine 447 * @ce: copyengine context 448 * @ce_sr: copyengine source ring resource info 449 * @ce_sr_ring_size: copyengine source ring size 450 * @ce_reg_paddr: copyengine register physical address 451 * 452 * Copy engine should release resource to micro controller 453 * Micro controller needs 454 * - Copy engine source descriptor base address 455 * - Copy engine source descriptor size 456 * - PCI BAR address to access copy engine regiser 457 * 458 * Return: None 459 */ 460 static inline void ce_ipa_get_resource(struct CE_handle *ce, 461 qdf_shared_mem_t **ce_sr, 462 uint32_t *ce_sr_ring_size, 463 qdf_dma_addr_t *ce_reg_paddr) 464 { 465 } 466 #endif /* IPA_OFFLOAD */ 467 468 static inline void ce_pkt_error_count_incr( 469 struct HIF_CE_state *_hif_state, 470 enum ol_ath_hif_pkt_ecodes _hif_ecode) 471 { 472 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state); 473 474 if (_hif_ecode == HIF_PIPE_NO_RESOURCE) 475 (scn->pkt_stats.hif_pipe_no_resrc_count) 476 += 1; 477 } 478 479 bool ce_check_rx_pending(struct CE_state *CE_state); 480 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id); 481 struct ce_ops *ce_services_srng(void); 482 struct ce_ops *ce_services_legacy(void); 483 bool ce_srng_based(struct hif_softc *scn); 484 /* Forward declaration */ 485 struct CE_ring_state; 486 487 struct ce_ops { 488 uint32_t (*ce_get_desc_size)(uint8_t ring_type); 489 int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type, 490 uint32_t ce_id, struct CE_ring_state *ring, 491 struct CE_attr *attr); 492 int (*ce_send_nolock)(struct CE_handle *copyeng, 493 void *per_transfer_context, 494 qdf_dma_addr_t buffer, 495 uint32_t nbytes, 496 uint32_t transfer_id, 497 uint32_t flags, 498 uint32_t user_flags); 499 int (*ce_sendlist_send)(struct CE_handle *copyeng, 500 void *per_transfer_context, 501 struct ce_sendlist *sendlist, unsigned int transfer_id); 502 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng, 503 void **per_CE_contextp, 504 void **per_transfer_contextp, 505 qdf_dma_addr_t *bufferp); 506 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng, 507 void **per_CE_contextp, void **per_transfer_contextp, 508 qdf_dma_addr_t *bufferp, unsigned int *nbytesp, 509 unsigned int *transfer_idp, 510 uint32_t *toeplitz_hash_result); 511 int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng, 512 void *per_recv_context, qdf_dma_addr_t buffer); 513 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags); 514 int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state, 515 void **per_CE_contextp, 516 void **per_transfer_contextp, 517 qdf_dma_addr_t *bufferp, 518 unsigned int *nbytesp, 519 unsigned int *transfer_idp, 520 unsigned int *flagsp); 521 int (*ce_completed_send_next_nolock)(struct CE_state *CE_state, 522 void **per_CE_contextp, 523 void **per_transfer_contextp, 524 qdf_dma_addr_t *bufferp, 525 unsigned int *nbytesp, 526 unsigned int *transfer_idp, 527 unsigned int *sw_idx, 528 unsigned int *hw_idx, 529 uint32_t *toeplitz_hash_result); 530 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn, 531 struct CE_state *CE_state); 532 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn, 533 struct CE_state *CE_state); 534 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state, 535 int disable_copy_compl_intr); 536 void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn, 537 struct pld_shadow_reg_v2_cfg **shadow_config, 538 int *num_shadow_registers_configured); 539 540 }; 541 542 int hif_ce_bus_early_suspend(struct hif_softc *scn); 543 int hif_ce_bus_late_resume(struct hif_softc *scn); 544 #endif /* __COPY_ENGINE_API_H__ */ 545