1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef __COPY_ENGINE_INTERNAL_H__ 21 #define __COPY_ENGINE_INTERNAL_H__ 22 23 #include <hif.h> /* A_TARGET_WRITE */ 24 25 /* Copy Engine operational state */ 26 enum CE_op_state { 27 CE_UNUSED, 28 CE_PAUSED, 29 CE_RUNNING, 30 CE_PENDING, 31 }; 32 33 enum ol_ath_hif_ce_ecodes { 34 CE_RING_DELTA_FAIL = 0 35 }; 36 37 struct CE_src_desc; 38 39 /* Copy Engine Ring internal state */ 40 struct CE_ring_state { 41 42 /* Number of entries in this ring; must be power of 2 */ 43 unsigned int nentries; 44 unsigned int nentries_mask; 45 46 /* 47 * For dest ring, this is the next index to be processed 48 * by software after it was/is received into. 49 * 50 * For src ring, this is the last descriptor that was sent 51 * and completion processed by software. 52 * 53 * Regardless of src or dest ring, this is an invariant 54 * (modulo ring size): 55 * write index >= read index >= sw_index 56 */ 57 unsigned int sw_index; 58 unsigned int write_index; /* cached copy */ 59 /* 60 * For src ring, this is the next index not yet processed by HW. 61 * This is a cached copy of the real HW index (read index), used 62 * for avoiding reading the HW index register more often than 63 * necessary. 64 * This extends the invariant: 65 * write index >= read index >= hw_index >= sw_index 66 * 67 * For dest ring, this is currently unused. 68 */ 69 unsigned int hw_index; /* cached copy */ 70 71 /* Start of DMA-coherent area reserved for descriptors */ 72 void *base_addr_owner_space_unaligned; /* Host address space */ 73 qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */ 74 75 /* 76 * Actual start of descriptors. 77 * Aligned to descriptor-size boundary. 78 * Points into reserved DMA-coherent area, above. 79 */ 80 void *base_addr_owner_space; /* Host address space */ 81 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 82 /* 83 * Start of shadow copy of descriptors, within regular memory. 84 * Aligned to descriptor-size boundary. 85 */ 86 char *shadow_base_unaligned; 87 struct CE_src_desc *shadow_base; 88 89 unsigned int low_water_mark_nentries; 90 unsigned int high_water_mark_nentries; 91 void *srng_ctx; 92 void **per_transfer_context; 93 94 /* HAL CE ring type */ 95 uint32_t hal_ring_type; 96 /* ring memory prealloc */ 97 uint8_t is_ring_prealloc; 98 99 OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */ 100 }; 101 102 /* Copy Engine internal state */ 103 struct CE_state { 104 struct hif_softc *scn; 105 unsigned int id; 106 unsigned int attr_flags; /* CE_ATTR_* */ 107 uint32_t ctrl_addr; /* relative to BAR */ 108 enum CE_op_state state; 109 110 #ifdef WLAN_FEATURE_FASTPATH 111 fastpath_msg_handler fastpath_handler; 112 void *context; 113 #endif /* WLAN_FEATURE_FASTPATH */ 114 qdf_work_t oom_allocation_work; 115 116 ce_send_cb send_cb; 117 void *send_context; 118 119 CE_recv_cb recv_cb; 120 void *recv_context; 121 122 /* misc_cbs - are any callbacks besides send and recv enabled? */ 123 uint8_t misc_cbs; 124 125 CE_watermark_cb watermark_cb; 126 void *wm_context; 127 128 /*Record the state of the copy compl interrupt */ 129 int disable_copy_compl_intr; 130 131 unsigned int src_sz_max; 132 struct CE_ring_state *src_ring; 133 struct CE_ring_state *dest_ring; 134 struct CE_ring_state *status_ring; 135 atomic_t rx_pending; 136 137 qdf_spinlock_t ce_index_lock; 138 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 139 qdf_spinlock_t ce_interrupt_lock; 140 #endif 141 /* Flag to indicate whether to break out the DPC context */ 142 bool force_break; 143 144 /* time in nanoseconds to yield control of napi poll */ 145 unsigned long long ce_service_yield_time; 146 /* CE service start time in nanoseconds */ 147 unsigned long long ce_service_start_time; 148 /* Num Of Receive Buffers handled for one interrupt DPC routine */ 149 unsigned int receive_count; 150 /* epping */ 151 bool timer_inited; 152 qdf_timer_t poll_timer; 153 154 /* datapath - for faster access, use bools instead of a bitmap */ 155 bool htt_tx_data; 156 bool htt_rx_data; 157 qdf_lro_ctx_t lro_data; 158 159 void (*service)(struct hif_softc *scn, int CE_id); 160 #ifdef WLAN_TRACEPOINTS 161 /* CE tasklet sched time in nanoseconds */ 162 unsigned long long ce_tasklet_sched_time; 163 #endif 164 }; 165 166 /* Descriptor rings must be aligned to this boundary */ 167 #define CE_DESC_RING_ALIGN 8 168 #define CLOCK_OVERRIDE 0x2 169 170 #ifdef QCA_WIFI_3_0 171 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \ 172 (qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \ 173 ((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32))) 174 #else 175 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \ 176 (qdf_dma_addr_t)((desc)->buffer_addr) 177 #endif 178 179 #ifdef QCA_WIFI_3_0 180 struct CE_src_desc { 181 uint32_t buffer_addr:32; 182 #if _BYTE_ORDER == _BIG_ENDIAN 183 uint32_t gather:1, 184 enable_11h:1, 185 meta_data_low:2, /* fw_metadata_low */ 186 packet_result_offset:12, 187 toeplitz_hash_enable:1, 188 addr_y_search_disable:1, 189 addr_x_search_disable:1, 190 misc_int_disable:1, 191 target_int_disable:1, 192 host_int_disable:1, 193 dest_byte_swap:1, 194 byte_swap:1, 195 type:2, 196 tx_classify:1, 197 buffer_addr_hi:5; 198 uint32_t meta_data:16, /* fw_metadata_high */ 199 nbytes:16; /* length in register map */ 200 #else 201 uint32_t buffer_addr_hi:5, 202 tx_classify:1, 203 type:2, 204 byte_swap:1, /* src_byte_swap */ 205 dest_byte_swap:1, 206 host_int_disable:1, 207 target_int_disable:1, 208 misc_int_disable:1, 209 addr_x_search_disable:1, 210 addr_y_search_disable:1, 211 toeplitz_hash_enable:1, 212 packet_result_offset:12, 213 meta_data_low:2, /* fw_metadata_low */ 214 enable_11h:1, 215 gather:1; 216 uint32_t nbytes:16, /* length in register map */ 217 meta_data:16; /* fw_metadata_high */ 218 #endif 219 uint32_t toeplitz_hash_result:32; 220 }; 221 222 struct CE_dest_desc { 223 uint32_t buffer_addr:32; 224 #if _BYTE_ORDER == _BIG_ENDIAN 225 uint32_t gather:1, 226 enable_11h:1, 227 meta_data_low:2, /* fw_metadata_low */ 228 packet_result_offset:12, 229 toeplitz_hash_enable:1, 230 addr_y_search_disable:1, 231 addr_x_search_disable:1, 232 misc_int_disable:1, 233 target_int_disable:1, 234 host_int_disable:1, 235 byte_swap:1, 236 src_byte_swap:1, 237 type:2, 238 tx_classify:1, 239 buffer_addr_hi:5; 240 uint32_t meta_data:16, /* fw_metadata_high */ 241 nbytes:16; /* length in register map */ 242 #else 243 uint32_t buffer_addr_hi:5, 244 tx_classify:1, 245 type:2, 246 src_byte_swap:1, 247 byte_swap:1, /* dest_byte_swap */ 248 host_int_disable:1, 249 target_int_disable:1, 250 misc_int_disable:1, 251 addr_x_search_disable:1, 252 addr_y_search_disable:1, 253 toeplitz_hash_enable:1, 254 packet_result_offset:12, 255 meta_data_low:2, /* fw_metadata_low */ 256 enable_11h:1, 257 gather:1; 258 uint32_t nbytes:16, /* length in register map */ 259 meta_data:16; /* fw_metadata_high */ 260 #endif 261 uint32_t toeplitz_hash_result:32; 262 }; 263 #else 264 struct CE_src_desc { 265 uint32_t buffer_addr; 266 #if _BYTE_ORDER == _BIG_ENDIAN 267 uint32_t meta_data:12, 268 target_int_disable:1, 269 host_int_disable:1, 270 byte_swap:1, 271 gather:1, 272 nbytes:16; 273 #else 274 275 uint32_t nbytes:16, 276 gather:1, 277 byte_swap:1, 278 host_int_disable:1, 279 target_int_disable:1, 280 meta_data:12; 281 #endif 282 }; 283 284 struct CE_dest_desc { 285 uint32_t buffer_addr; 286 #if _BYTE_ORDER == _BIG_ENDIAN 287 uint32_t meta_data:12, 288 target_int_disable:1, 289 host_int_disable:1, 290 byte_swap:1, 291 gather:1, 292 nbytes:16; 293 #else 294 uint32_t nbytes:16, 295 gather:1, 296 byte_swap:1, 297 host_int_disable:1, 298 target_int_disable:1, 299 meta_data:12; 300 #endif 301 }; 302 #endif /* QCA_WIFI_3_0 */ 303 304 struct ce_srng_src_desc { 305 uint32_t buffer_addr_lo; 306 #if _BYTE_ORDER == _BIG_ENDIAN 307 uint32_t nbytes:16, 308 rsvd:4, 309 gather:1, 310 dest_swap:1, 311 byte_swap:1, 312 toeplitz_hash_enable:1, 313 buffer_addr_hi:8; 314 uint32_t rsvd1:16, 315 meta_data:16; 316 uint32_t loop_count:4, 317 ring_id:8, 318 rsvd3:20; 319 #else 320 uint32_t buffer_addr_hi:8, 321 toeplitz_hash_enable:1, 322 byte_swap:1, 323 dest_swap:1, 324 gather:1, 325 rsvd:4, 326 nbytes:16; 327 uint32_t meta_data:16, 328 rsvd1:16; 329 uint32_t rsvd3:20, 330 ring_id:8, 331 loop_count:4; 332 #endif 333 }; 334 struct ce_srng_dest_desc { 335 uint32_t buffer_addr_lo; 336 #if _BYTE_ORDER == _BIG_ENDIAN 337 uint32_t loop_count:4, 338 ring_id:8, 339 rsvd1:12, 340 buffer_addr_hi:8; 341 #else 342 uint32_t buffer_addr_hi:8, 343 rsvd1:12, 344 ring_id:8, 345 loop_count:4; 346 #endif 347 }; 348 struct ce_srng_dest_status_desc { 349 #if _BYTE_ORDER == _BIG_ENDIAN 350 uint32_t nbytes:16, 351 rsvd:4, 352 gather:1, 353 dest_swap:1, 354 byte_swap:1, 355 toeplitz_hash_enable:1, 356 rsvd0:8; 357 uint32_t rsvd1:16, 358 meta_data:16; 359 #else 360 uint32_t rsvd0:8, 361 toeplitz_hash_enable:1, 362 byte_swap:1, 363 dest_swap:1, 364 gather:1, 365 rsvd:4, 366 nbytes:16; 367 uint32_t meta_data:16, 368 rsvd1:16; 369 #endif 370 uint32_t toeplitz_hash; 371 #if _BYTE_ORDER == _BIG_ENDIAN 372 uint32_t loop_count:4, 373 ring_id:8, 374 rsvd3:20; 375 #else 376 uint32_t rsvd3:20, 377 ring_id:8, 378 loop_count:4; 379 #endif 380 }; 381 382 #define CE_SENDLIST_ITEMS_MAX 12 383 384 /** 385 * union ce_desc - unified data type for ce descriptors 386 * 387 * Both src and destination descriptors follow the same format. 388 * They use different data structures for different access semantics. 389 * Here we provide a unifying data type. 390 */ 391 union ce_desc { 392 struct CE_src_desc src_desc; 393 struct CE_dest_desc dest_desc; 394 }; 395 396 /** 397 * union ce_srng_desc - unified data type for ce srng descriptors 398 * @src_desc: ce srng Source ring descriptor 399 * @dest_desc: ce srng destination ring descriptor 400 * @dest_status_desc: ce srng status ring descriptor 401 */ 402 union ce_srng_desc { 403 struct ce_srng_src_desc src_desc; 404 struct ce_srng_dest_desc dest_desc; 405 struct ce_srng_dest_status_desc dest_status_desc; 406 }; 407 408 /** 409 * enum hif_ce_event_type - HIF copy engine event type 410 * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring. 411 * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring. 412 * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update) 413 * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring. 414 * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write 415 * index in a normal tx 416 * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring. 417 * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index 418 * of the RX ring in fastpath 419 * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software 420 * index of the RX ring in fastpath 421 * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index 422 * of the TX ring in fastpath 423 * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recorded when dropping a write to 424 * the write index in fastpath 425 * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software 426 * index of the RX ring in fastpath 427 * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh 428 * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet 429 * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet 430 * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule 431 * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh 432 * @HIF_CE_REAP_EXIT: records when we process completion outside of a bh 433 * @NAPI_SCHEDULE: records when napi is scheduled from the irq context 434 * @NAPI_POLL_ENTER: records the start of the napi poll function 435 * @NAPI_COMPLETE: records when interrupts are re-enabled 436 * @NAPI_POLL_EXIT: records when the napi poll function returns 437 * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate 438 * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails 439 * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails 440 * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring 441 * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring 442 * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring 443 * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped 444 * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation 445 * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map 446 * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map 447 */ 448 enum hif_ce_event_type { 449 HIF_RX_DESC_POST, 450 HIF_RX_DESC_COMPLETION, 451 HIF_TX_GATHER_DESC_POST, 452 HIF_TX_DESC_POST, 453 HIF_TX_DESC_SOFTWARE_POST, 454 HIF_TX_DESC_COMPLETION, 455 FAST_RX_WRITE_INDEX_UPDATE, 456 FAST_RX_SOFTWARE_INDEX_UPDATE, 457 FAST_TX_WRITE_INDEX_UPDATE, 458 FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE, 459 FAST_TX_SOFTWARE_INDEX_UPDATE, 460 RESUME_WRITE_INDEX_UPDATE, 461 462 HIF_IRQ_EVENT = 0x10, 463 HIF_CE_TASKLET_ENTRY, 464 HIF_CE_TASKLET_RESCHEDULE, 465 HIF_CE_TASKLET_EXIT, 466 HIF_CE_REAP_ENTRY, 467 HIF_CE_REAP_EXIT, 468 NAPI_SCHEDULE, 469 NAPI_POLL_ENTER, 470 NAPI_COMPLETE, 471 NAPI_POLL_EXIT, 472 473 HIF_RX_NBUF_ALLOC_FAILURE = 0x20, 474 HIF_RX_NBUF_MAP_FAILURE, 475 HIF_RX_NBUF_ENQUEUE_FAILURE, 476 477 HIF_CE_SRC_RING_BUFFER_POST, 478 HIF_CE_DEST_RING_BUFFER_POST, 479 HIF_CE_DEST_RING_BUFFER_REAP, 480 HIF_CE_DEST_STATUS_RING_REAP, 481 482 HIF_RX_DESC_PRE_NBUF_ALLOC, 483 HIF_RX_DESC_PRE_NBUF_MAP, 484 HIF_RX_DESC_POST_NBUF_MAP, 485 486 HIF_EVENT_TYPE_MAX, 487 }; 488 489 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size); 490 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id); 491 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, 492 enum hif_ce_event_type type, 493 union ce_desc *descriptor, void *memory, 494 int index, int len); 495 496 enum ce_sendlist_type_e { 497 CE_SIMPLE_BUFFER_TYPE, 498 /* TBDXXX: CE_RX_DESC_LIST, */ 499 }; 500 501 /* 502 * There's a public "ce_sendlist" and a private "ce_sendlist_s". 503 * The former is an opaque structure with sufficient space 504 * to hold the latter. The latter is the actual structure 505 * definition and it is only used internally. The opaque version 506 * of the structure allows callers to allocate an instance on the 507 * run-time stack without knowing any of the details of the 508 * structure layout. 509 */ 510 struct ce_sendlist_s { 511 unsigned int num_items; 512 struct ce_sendlist_item { 513 enum ce_sendlist_type_e send_type; 514 dma_addr_t data; /* e.g. buffer or desc list */ 515 union { 516 unsigned int nbytes; /* simple buffer */ 517 unsigned int ndesc; /* Rx descriptor list */ 518 } u; 519 /* flags: externally-specified flags; 520 * OR-ed with internal flags 521 */ 522 uint32_t flags; 523 uint32_t user_flags; 524 } item[CE_SENDLIST_ITEMS_MAX]; 525 }; 526 527 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state 528 *ce_state); 529 530 #ifdef WLAN_FEATURE_FASTPATH 531 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl); 532 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl); 533 #else 534 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 535 { 536 } 537 538 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 539 { 540 } 541 #endif 542 543 /* which ring of a CE? */ 544 #define CE_RING_SRC 0 545 #define CE_RING_DEST 1 546 #define CE_RING_STATUS 2 547 548 #define CDC_WAR_MAGIC_STR 0xceef0000 549 #define CDC_WAR_DATA_CE 4 550 551 /* Additional internal-only ce_send flags */ 552 #define CE_SEND_FLAG_GATHER 0x00010000 /* Use Gather */ 553 554 /** 555 * hif_get_wake_ce_id() - gets the copy engine id used for waking up 556 * @scn: The hif context to use 557 * @ce_id: a pointer where the copy engine Id should be populated 558 * 559 * Return: errno 560 */ 561 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id); 562 563 /** 564 * hif_get_fw_diag_ce_id() - gets the copy engine id used for FW diag 565 * @scn: The hif context to use 566 * @ce_id: a pointer where the copy engine Id should be populated 567 * 568 * Return: errno 569 */ 570 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id); 571 572 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 573 574 #ifndef HIF_CE_HISTORY_MAX 575 #define HIF_CE_HISTORY_MAX 1024 576 #endif 577 578 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64 579 580 /** 581 * struct hif_ce_desc_event - structure for detailing a ce event 582 * @index: location of the descriptor in the ce ring; 583 * @type: what the event was 584 * @time: when it happened 585 * @current_hp: holds the current ring hp value 586 * @current_tp: holds the current ring tp value 587 * @descriptor: descriptor enqueued or dequeued 588 * @memory: virtual address that was used 589 * @dma_addr: physical/iova address based on smmu status 590 * @dma_to_phy: physical address from iova address 591 * @virt_to_phy: physical address from virtual address 592 * @actual_data_len: length of the data 593 * @data: data pointed by descriptor 594 */ 595 struct hif_ce_desc_event { 596 int index; 597 enum hif_ce_event_type type; 598 uint64_t time; 599 int cpu_id; 600 #ifdef HELIUMPLUS 601 union ce_desc descriptor; 602 #else 603 uint32_t current_hp; 604 uint32_t current_tp; 605 union ce_srng_desc descriptor; 606 #endif 607 void *memory; 608 609 #ifdef HIF_RECORD_PADDR 610 /* iova/pa based on smmu status */ 611 qdf_dma_addr_t dma_addr; 612 /* store pa from iova address */ 613 qdf_dma_addr_t dma_to_phy; 614 /* store pa */ 615 qdf_dma_addr_t virt_to_phy; 616 #endif /* HIF_RECORD_ADDR */ 617 618 #ifdef HIF_CE_DEBUG_DATA_BUF 619 size_t actual_data_len; 620 uint8_t *data; 621 #endif /* HIF_CE_DEBUG_DATA_BUF */ 622 }; 623 #else 624 struct hif_ce_desc_event; 625 #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/ 626 627 /** 628 * get_next_record_index() - get the next record index 629 * @table_index: atomic index variable to increment 630 * @array_size: array size of the circular buffer 631 * 632 * Increment the atomic index and reserve the value. 633 * Takes care of buffer wrap. 634 * Guaranteed to be thread safe as long as fewer than array_size contexts 635 * try to access the array. If there are more than array_size contexts 636 * trying to access the array, full locking of the recording process would 637 * be needed to have sane logging. 638 */ 639 int get_next_record_index(qdf_atomic_t *table_index, int array_size); 640 641 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 642 /** 643 * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor 644 * @scn: structure detailing a ce event 645 * @ce_id: length of the data 646 * @type: event_type 647 * @descriptor: ce src/dest/status ring descriptor 648 * @memory: nbuf 649 * @index: current sw/write index 650 * @len: len of the buffer 651 * @hal_ring: ce hw ring 652 * 653 * Return: None 654 */ 655 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id, 656 enum hif_ce_event_type type, 657 union ce_srng_desc *descriptor, 658 void *memory, int index, 659 int len, void *hal_ring); 660 661 /** 662 * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event 663 * upto data field before reusing it. 664 * 665 * @event: record every CE event 666 * 667 * Return: None 668 */ 669 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event); 670 #else 671 static inline 672 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id, 673 enum hif_ce_event_type type, 674 union ce_srng_desc *descriptor, 675 void *memory, int index, 676 int len, void *hal_ring) 677 { 678 } 679 680 static inline 681 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 682 { 683 } 684 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */ 685 686 #ifdef HIF_CE_DEBUG_DATA_BUF 687 /** 688 * hif_ce_desc_data_record() - Record data pointed by the CE descriptor 689 * @event: structure detailing a ce event 690 * @len: length of the data 691 * Return: 692 */ 693 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len); 694 695 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id); 696 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id); 697 #else 698 static inline 699 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 700 { 701 return QDF_STATUS_SUCCESS; 702 } 703 704 static inline 705 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { } 706 707 static inline 708 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) 709 { 710 } 711 #endif /*HIF_CE_DEBUG_DATA_BUF*/ 712 713 #ifdef HIF_CONFIG_SLUB_DEBUG_ON 714 /** 715 * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors 716 * @nbytes: nbytes value being written into a send descriptor 717 * @ce_state: context of the copy engine 718 719 * nbytes should be non-zero and less than max configured for the copy engine 720 * 721 * Return: none 722 */ 723 static inline void ce_validate_nbytes(uint32_t nbytes, 724 struct CE_state *ce_state) 725 { 726 if (nbytes <= 0 || nbytes > ce_state->src_sz_max) 727 QDF_BUG(0); 728 } 729 #else 730 static inline void ce_validate_nbytes(uint32_t nbytes, 731 struct CE_state *ce_state) 732 { 733 } 734 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */ 735 736 #if defined(HIF_RECORD_PADDR) 737 /** 738 * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU 739 * IOVA addr and MMU virtual addr for Rx 740 * @scn: hif_softc 741 * @nbuf: buffer posted to fw 742 * 743 * record physical address for ce_event_type HIF_RX_DESC_POST and 744 * HIF_RX_DESC_COMPLETION 745 * 746 * Return: none 747 */ 748 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, 749 struct hif_ce_desc_event *event, 750 qdf_nbuf_t nbuf); 751 #else 752 static inline 753 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, 754 struct hif_ce_desc_event *event, 755 qdf_nbuf_t nbuf) 756 { 757 } 758 #endif /* HIF_RECORD_PADDR */ 759 #endif /* __COPY_ENGINE_INTERNAL_H__ */ 760