1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hif.h" 21 #include "hif_io32.h" 22 #include "ce_api.h" 23 #include "ce_main.h" 24 #include "ce_internal.h" 25 #include "ce_reg.h" 26 #include "qdf_lock.h" 27 #include "regtable.h" 28 #include "hif_main.h" 29 #include "hif_debug.h" 30 #include "hif_napi.h" 31 #include "qdf_module.h" 32 #include <qdf_tracepoint.h> 33 34 #ifdef IPA_OFFLOAD 35 #ifdef QCA_WIFI_3_0 36 #define CE_IPA_RING_INIT(ce_desc) \ 37 do { \ 38 ce_desc->gather = 0; \ 39 ce_desc->enable_11h = 0; \ 40 ce_desc->meta_data_low = 0; \ 41 ce_desc->packet_result_offset = 64; \ 42 ce_desc->toeplitz_hash_enable = 0; \ 43 ce_desc->addr_y_search_disable = 0; \ 44 ce_desc->addr_x_search_disable = 0; \ 45 ce_desc->misc_int_disable = 0; \ 46 ce_desc->target_int_disable = 0; \ 47 ce_desc->host_int_disable = 0; \ 48 ce_desc->dest_byte_swap = 0; \ 49 ce_desc->byte_swap = 0; \ 50 ce_desc->type = 2; \ 51 ce_desc->tx_classify = 1; \ 52 ce_desc->buffer_addr_hi = 0; \ 53 ce_desc->meta_data = 0; \ 54 ce_desc->nbytes = 128; \ 55 } while (0) 56 #else 57 #define CE_IPA_RING_INIT(ce_desc) \ 58 do { \ 59 ce_desc->byte_swap = 0; \ 60 ce_desc->nbytes = 60; \ 61 ce_desc->gather = 0; \ 62 } while (0) 63 #endif /* QCA_WIFI_3_0 */ 64 #endif /* IPA_OFFLOAD */ 65 66 static int war1_allow_sleep; 67 /* io32 write workaround */ 68 static int hif_ce_war1; 69 70 /** 71 * hif_ce_war_disable() - disable ce war gobally 72 */ 73 void hif_ce_war_disable(void) 74 { 75 hif_ce_war1 = 0; 76 } 77 78 /** 79 * hif_ce_war_enable() - enable ce war gobally 80 */ 81 void hif_ce_war_enable(void) 82 { 83 hif_ce_war1 = 1; 84 } 85 86 /* 87 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 88 * for defined here 89 */ 90 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 91 92 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1) 93 #define CE_DEBUG_DATA_PER_ROW 16 94 95 static const char *ce_event_type_to_str(enum hif_ce_event_type type); 96 97 int get_next_record_index(qdf_atomic_t *table_index, int array_size) 98 { 99 int record_index = qdf_atomic_inc_return(table_index); 100 101 if (record_index == array_size) 102 qdf_atomic_sub(array_size, table_index); 103 104 while (record_index >= array_size) 105 record_index -= array_size; 106 107 return record_index; 108 } 109 110 qdf_export_symbol(get_next_record_index); 111 112 #ifdef HIF_CE_DEBUG_DATA_BUF 113 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) 114 { 115 uint8_t *data = NULL; 116 117 if (!event->data) { 118 hif_err_rl("No ce debug memory allocated"); 119 return; 120 } 121 122 if (event->memory && len > 0) 123 data = qdf_nbuf_data((qdf_nbuf_t)event->memory); 124 125 event->actual_data_len = 0; 126 qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE); 127 128 if (data && len > 0) { 129 qdf_mem_copy(event->data, data, 130 ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ? 131 len : CE_DEBUG_MAX_DATA_BUF_SIZE)); 132 event->actual_data_len = len; 133 } 134 } 135 136 qdf_export_symbol(hif_ce_desc_data_record); 137 138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 139 { 140 qdf_mem_zero(event, 141 offsetof(struct hif_ce_desc_event, data)); 142 } 143 144 qdf_export_symbol(hif_clear_ce_desc_debug_data); 145 #else 146 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 147 { 148 qdf_mem_zero(event, sizeof(struct hif_ce_desc_event)); 149 } 150 151 qdf_export_symbol(hif_clear_ce_desc_debug_data); 152 #endif /* HIF_CE_DEBUG_DATA_BUF */ 153 154 #if defined(HIF_RECORD_PADDR) 155 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, 156 struct hif_ce_desc_event *event, 157 qdf_nbuf_t memory) 158 { 159 if (memory) { 160 event->dma_addr = QDF_NBUF_CB_PADDR(memory); 161 event->dma_to_phy = qdf_mem_paddr_from_dmaaddr( 162 scn->qdf_dev, 163 event->dma_addr); 164 165 event->virt_to_phy = 166 virt_to_phys(qdf_nbuf_data(memory)); 167 } 168 } 169 #endif /* HIF_RECORD_RX_PADDR */ 170 171 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) 172 { 173 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 174 struct ce_desc_hist *ce_hist; 175 struct latest_evt_history *evt; 176 int i; 177 178 if (!scn) 179 return; 180 181 ce_hist = &scn->hif_ce_desc_hist; 182 183 for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) { 184 if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST]) 185 continue; 186 187 evt = &ce_hist->latest_evt[i]; 188 hif_info_high("CE_id:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x", 189 (i + HIF_CE_MAX_LATEST_HIST), evt->cpu_id, 190 evt->irq_entry_ts, evt->bh_entry_ts, 191 evt->bh_resched_ts, evt->bh_exit_ts, 192 evt->bh_work_ts, evt->ring_hp, evt->ring_tp); 193 } 194 } 195 196 void hif_record_latest_evt(struct ce_desc_hist *ce_hist, 197 uint8_t type, 198 int ce_id, uint64_t time, 199 uint32_t hp, uint32_t tp) 200 { 201 struct latest_evt_history *latest_evt; 202 203 if (ce_id != 2 && ce_id != 3) 204 return; 205 206 latest_evt = &ce_hist->latest_evt[ce_id - HIF_CE_MAX_LATEST_HIST]; 207 208 switch (type) { 209 case HIF_IRQ_EVENT: 210 latest_evt->irq_entry_ts = time; 211 latest_evt->cpu_id = qdf_get_cpu(); 212 break; 213 case HIF_CE_TASKLET_ENTRY: 214 latest_evt->bh_entry_ts = time; 215 break; 216 case HIF_CE_TASKLET_RESCHEDULE: 217 latest_evt->bh_resched_ts = time; 218 break; 219 case HIF_CE_TASKLET_EXIT: 220 latest_evt->bh_exit_ts = time; 221 break; 222 case HIF_TX_DESC_COMPLETION: 223 case HIF_CE_DEST_STATUS_RING_REAP: 224 latest_evt->bh_work_ts = time; 225 latest_evt->ring_hp = hp; 226 latest_evt->ring_tp = tp; 227 break; 228 default: 229 break; 230 } 231 } 232 233 /** 234 * hif_record_ce_desc_event() - record ce descriptor events 235 * @scn: hif_softc 236 * @ce_id: which ce is the event occurring on 237 * @type: what happened 238 * @descriptor: pointer to the descriptor posted/completed 239 * @memory: virtual address of buffer related to the descriptor 240 * @index: index that the descriptor was/will be at. 241 * @len: 242 */ 243 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, 244 enum hif_ce_event_type type, 245 union ce_desc *descriptor, 246 void *memory, int index, 247 int len) 248 { 249 int record_index; 250 struct hif_ce_desc_event *event; 251 252 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 253 struct hif_ce_desc_event *hist_ev = NULL; 254 255 if (ce_id < CE_COUNT_MAX) 256 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; 257 else 258 return; 259 260 if (ce_id >= CE_COUNT_MAX) 261 return; 262 263 if (!ce_hist->enable[ce_id]) 264 return; 265 266 if (!hist_ev) 267 return; 268 269 record_index = get_next_record_index( 270 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); 271 272 event = &hist_ev[record_index]; 273 274 hif_clear_ce_desc_debug_data(event); 275 276 event->type = type; 277 event->time = qdf_get_log_timestamp(); 278 event->cpu_id = qdf_get_cpu(); 279 280 if (descriptor) 281 qdf_mem_copy(&event->descriptor, descriptor, 282 sizeof(union ce_desc)); 283 284 event->memory = memory; 285 event->index = index; 286 287 if (event->type == HIF_RX_DESC_POST || 288 event->type == HIF_RX_DESC_COMPLETION) 289 hif_ce_desc_record_rx_paddr(scn, event, memory); 290 291 if (ce_hist->data_enable[ce_id]) 292 hif_ce_desc_data_record(event, len); 293 294 hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0); 295 } 296 qdf_export_symbol(hif_record_ce_desc_event); 297 298 /** 299 * ce_init_ce_desc_event_log() - initialize the ce event log 300 * @scn: HIF context 301 * @ce_id: copy engine id for which we are initializing the log 302 * @size: size of array to dedicate 303 * 304 * Currently the passed size is ignored in favor of a precompiled value. 305 */ 306 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size) 307 { 308 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 309 qdf_atomic_init(&ce_hist->history_index[ce_id]); 310 qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]); 311 } 312 313 /** 314 * ce_deinit_ce_desc_event_log() - deinitialize the ce event log 315 * @scn: HIF context 316 * @ce_id: copy engine id for which we are deinitializing the log 317 * 318 */ 319 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 320 { 321 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 322 323 qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]); 324 } 325 326 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 327 void hif_record_ce_desc_event(struct hif_softc *scn, 328 int ce_id, enum hif_ce_event_type type, 329 union ce_desc *descriptor, void *memory, 330 int index, int len) 331 { 332 } 333 qdf_export_symbol(hif_record_ce_desc_event); 334 335 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, 336 int size) 337 { 338 } 339 340 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 341 { 342 } 343 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 344 345 #ifdef NAPI_YIELD_BUDGET_BASED 346 bool hif_ce_service_should_yield(struct hif_softc *scn, 347 struct CE_state *ce_state) 348 { 349 bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count); 350 351 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 352 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This 353 * can happen in fast path handling as processing is happening in 354 * batches. 355 */ 356 if (yield) 357 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 358 359 return yield; 360 } 361 #else 362 /** 363 * hif_ce_service_should_yield() - return true if the service is hogging the cpu 364 * @scn: hif context 365 * @ce_state: context of the copy engine being serviced 366 * 367 * Return: true if the service should yield 368 */ 369 bool hif_ce_service_should_yield(struct hif_softc *scn, 370 struct CE_state *ce_state) 371 { 372 bool yield, time_limit_reached, rxpkt_thresh_reached = 0; 373 374 time_limit_reached = qdf_time_sched_clock() > 375 ce_state->ce_service_yield_time ? 1 : 0; 376 377 if (!time_limit_reached) 378 rxpkt_thresh_reached = hif_max_num_receives_reached 379 (scn, ce_state->receive_count); 380 381 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 382 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This 383 * can happen in fast path handling as processing is happening in 384 * batches. 385 */ 386 if (rxpkt_thresh_reached) 387 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 388 389 yield = time_limit_reached || rxpkt_thresh_reached; 390 391 if (yield && 392 ce_state->htt_rx_data && 393 hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) { 394 hif_napi_update_yield_stats(ce_state, 395 time_limit_reached, 396 rxpkt_thresh_reached); 397 } 398 399 return yield; 400 } 401 qdf_export_symbol(hif_ce_service_should_yield); 402 #endif 403 404 void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush) 405 { 406 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 407 struct CE_ring_state *src_ring = ce_state->src_ring; 408 struct hif_softc *scn = ce_state->scn; 409 410 if (force_flush) 411 ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT); 412 413 if (ce_ring_get_clear_event(src_ring, CE_RING_FLUSH_EVENT)) { 414 qdf_spin_lock_bh(&ce_state->ce_index_lock); 415 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 416 src_ring->write_index); 417 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 418 419 src_ring->last_flush_ts = qdf_get_log_timestamp(); 420 hif_debug("flushed"); 421 } 422 } 423 424 /* Make sure this wrapper is called under ce_index_lock */ 425 void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl, 426 int coalesce) 427 { 428 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 429 struct CE_ring_state *src_ring = ce_state->src_ring; 430 struct hif_softc *scn = ce_state->scn; 431 432 if (!coalesce) 433 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 434 src_ring->write_index); 435 } 436 437 /* 438 * Guts of ce_send, used by both ce_send and ce_sendlist_send. 439 * The caller takes responsibility for any needed locking. 440 */ 441 442 void war_ce_src_ring_write_idx_set(struct hif_softc *scn, 443 u32 ctrl_addr, unsigned int write_index) 444 { 445 if (hif_ce_war1) { 446 void __iomem *indicator_addr; 447 448 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS; 449 450 if (!war1_allow_sleep 451 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) { 452 hif_write32_mb(scn, indicator_addr, 453 (CDC_WAR_MAGIC_STR | write_index)); 454 } else { 455 unsigned long irq_flags; 456 457 local_irq_save(irq_flags); 458 hif_write32_mb(scn, indicator_addr, 1); 459 460 /* 461 * PCIE write waits for ACK in IPQ8K, there is no 462 * need to read back value. 463 */ 464 (void)hif_read32_mb(scn, indicator_addr); 465 /* conservative */ 466 (void)hif_read32_mb(scn, indicator_addr); 467 468 CE_SRC_RING_WRITE_IDX_SET(scn, 469 ctrl_addr, write_index); 470 471 hif_write32_mb(scn, indicator_addr, 0); 472 local_irq_restore(irq_flags); 473 } 474 } else { 475 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 476 } 477 } 478 479 qdf_export_symbol(war_ce_src_ring_write_idx_set); 480 481 QDF_STATUS 482 ce_send(struct CE_handle *copyeng, 483 void *per_transfer_context, 484 qdf_dma_addr_t buffer, 485 uint32_t nbytes, 486 uint32_t transfer_id, 487 uint32_t flags, 488 uint32_t user_flag) 489 { 490 struct CE_state *CE_state = (struct CE_state *)copyeng; 491 QDF_STATUS status; 492 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 493 494 qdf_spin_lock_bh(&CE_state->ce_index_lock); 495 status = hif_state->ce_services->ce_send_nolock(copyeng, 496 per_transfer_context, buffer, nbytes, 497 transfer_id, flags, user_flag); 498 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 499 500 return status; 501 } 502 qdf_export_symbol(ce_send); 503 504 unsigned int ce_sendlist_sizeof(void) 505 { 506 return sizeof(struct ce_sendlist); 507 } 508 509 void ce_sendlist_init(struct ce_sendlist *sendlist) 510 { 511 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 512 513 sl->num_items = 0; 514 } 515 516 QDF_STATUS 517 ce_sendlist_buf_add(struct ce_sendlist *sendlist, 518 qdf_dma_addr_t buffer, 519 uint32_t nbytes, 520 uint32_t flags, 521 uint32_t user_flags) 522 { 523 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 524 unsigned int num_items = sl->num_items; 525 struct ce_sendlist_item *item; 526 527 if (num_items >= CE_SENDLIST_ITEMS_MAX) { 528 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX); 529 return QDF_STATUS_E_RESOURCES; 530 } 531 532 item = &sl->item[num_items]; 533 item->send_type = CE_SIMPLE_BUFFER_TYPE; 534 item->data = buffer; 535 item->u.nbytes = nbytes; 536 item->flags = flags; 537 item->user_flags = user_flags; 538 sl->num_items = num_items + 1; 539 return QDF_STATUS_SUCCESS; 540 } 541 542 QDF_STATUS 543 ce_sendlist_send(struct CE_handle *copyeng, 544 void *per_transfer_context, 545 struct ce_sendlist *sendlist, unsigned int transfer_id) 546 { 547 struct CE_state *CE_state = (struct CE_state *)copyeng; 548 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 549 550 return hif_state->ce_services->ce_sendlist_send(copyeng, 551 per_transfer_context, sendlist, transfer_id); 552 } 553 554 #ifndef AH_NEED_TX_DATA_SWAP 555 #define AH_NEED_TX_DATA_SWAP 0 556 #endif 557 558 /** 559 * ce_batch_send() - sends bunch of msdus at once 560 * @ce_tx_hdl : pointer to CE handle 561 * @msdu : list of msdus to be sent 562 * @transfer_id : transfer id 563 * @len : Downloaded length 564 * @sendhead : sendhead 565 * 566 * Assumption : Called with an array of MSDU's 567 * Function: 568 * For each msdu in the array 569 * 1. Send each msdu 570 * 2. Increment write index accordinlgy. 571 * 572 * Return: list of msds not sent 573 */ 574 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 575 uint32_t transfer_id, u_int32_t len, uint32_t sendhead) 576 { 577 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 578 struct hif_softc *scn = ce_state->scn; 579 struct CE_ring_state *src_ring = ce_state->src_ring; 580 u_int32_t ctrl_addr = ce_state->ctrl_addr; 581 /* A_target_id_t targid = TARGID(scn);*/ 582 583 uint32_t nentries_mask = src_ring->nentries_mask; 584 uint32_t sw_index, write_index; 585 586 struct CE_src_desc *src_desc_base = 587 (struct CE_src_desc *)src_ring->base_addr_owner_space; 588 uint32_t *src_desc; 589 590 struct CE_src_desc lsrc_desc = {0}; 591 int deltacount = 0; 592 qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext; 593 594 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 595 sw_index = src_ring->sw_index; 596 write_index = src_ring->write_index; 597 598 deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1); 599 600 while (msdu) { 601 tempnext = qdf_nbuf_next(msdu); 602 603 if (deltacount < 2) { 604 if (sendhead) 605 return msdu; 606 hif_err("Out of descriptors"); 607 src_ring->write_index = write_index; 608 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 609 write_index); 610 611 sw_index = src_ring->sw_index; 612 write_index = src_ring->write_index; 613 614 deltacount = CE_RING_DELTA(nentries_mask, write_index, 615 sw_index-1); 616 if (!freelist) { 617 freelist = msdu; 618 hfreelist = msdu; 619 } else { 620 qdf_nbuf_set_next(freelist, msdu); 621 freelist = msdu; 622 } 623 qdf_nbuf_set_next(msdu, NULL); 624 msdu = tempnext; 625 continue; 626 } 627 628 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, 629 write_index); 630 631 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 632 633 lsrc_desc.meta_data = transfer_id; 634 if (len > msdu->len) 635 len = msdu->len; 636 lsrc_desc.nbytes = len; 637 /* Data packet is a byte stream, so disable byte swap */ 638 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 639 lsrc_desc.gather = 0; /*For the last one, gather is not set*/ 640 641 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 642 643 644 src_ring->per_transfer_context[write_index] = msdu; 645 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 646 647 if (sendhead) 648 break; 649 qdf_nbuf_set_next(msdu, NULL); 650 msdu = tempnext; 651 652 } 653 654 655 src_ring->write_index = write_index; 656 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 657 658 return hfreelist; 659 } 660 661 /** 662 * ce_update_tx_ring() - Advance sw index. 663 * @ce_tx_hdl : pointer to CE handle 664 * @num_htt_cmpls : htt completions received. 665 * 666 * Function: 667 * Increment the value of sw index of src ring 668 * according to number of htt completions 669 * received. 670 * 671 * Return: void 672 */ 673 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE 674 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 675 { 676 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 677 struct CE_ring_state *src_ring = ce_state->src_ring; 678 uint32_t nentries_mask = src_ring->nentries_mask; 679 /* 680 * Advance the s/w index: 681 * This effectively simulates completing the CE ring descriptors 682 */ 683 src_ring->sw_index = 684 CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index, 685 num_htt_cmpls); 686 } 687 #else 688 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 689 {} 690 #endif 691 692 /** 693 * ce_send_single() - sends 694 * @ce_tx_hdl : pointer to CE handle 695 * @msdu : msdu to be sent 696 * @transfer_id : transfer id 697 * @len : Downloaded length 698 * 699 * Function: 700 * 1. Send one msdu 701 * 2. Increment write index of src ring accordinlgy. 702 * 703 * Return: QDF_STATUS: CE sent status 704 */ 705 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 706 uint32_t transfer_id, u_int32_t len) 707 { 708 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 709 struct hif_softc *scn = ce_state->scn; 710 struct CE_ring_state *src_ring = ce_state->src_ring; 711 uint32_t ctrl_addr = ce_state->ctrl_addr; 712 /*A_target_id_t targid = TARGID(scn);*/ 713 714 uint32_t nentries_mask = src_ring->nentries_mask; 715 uint32_t sw_index, write_index; 716 717 struct CE_src_desc *src_desc_base = 718 (struct CE_src_desc *)src_ring->base_addr_owner_space; 719 uint32_t *src_desc; 720 721 struct CE_src_desc lsrc_desc = {0}; 722 enum hif_ce_event_type event_type; 723 724 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 725 sw_index = src_ring->sw_index; 726 write_index = src_ring->write_index; 727 728 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, 729 sw_index-1) < 1)) { 730 hif_err("ce send fail %d %d %d", nentries_mask, 731 write_index, sw_index); 732 return QDF_STATUS_E_RESOURCES; 733 } 734 735 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index); 736 737 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 738 739 lsrc_desc.meta_data = transfer_id; 740 lsrc_desc.nbytes = len; 741 /* Data packet is a byte stream, so disable byte swap */ 742 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 743 lsrc_desc.gather = 0; /* For the last one, gather is not set */ 744 745 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 746 747 748 src_ring->per_transfer_context[write_index] = msdu; 749 750 if (((struct CE_src_desc *)src_desc)->gather) 751 event_type = HIF_TX_GATHER_DESC_POST; 752 else if (qdf_unlikely(ce_state->state != CE_RUNNING)) 753 event_type = HIF_TX_DESC_SOFTWARE_POST; 754 else 755 event_type = HIF_TX_DESC_POST; 756 757 hif_record_ce_desc_event(scn, ce_state->id, event_type, 758 (union ce_desc *)src_desc, msdu, 759 write_index, len); 760 761 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 762 763 src_ring->write_index = write_index; 764 765 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 766 767 return QDF_STATUS_SUCCESS; 768 } 769 770 /** 771 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine 772 * @copyeng: copy engine handle 773 * @per_recv_context: virtual address of the nbuf 774 * @buffer: physical address of the nbuf 775 * 776 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued 777 */ 778 QDF_STATUS 779 ce_recv_buf_enqueue(struct CE_handle *copyeng, 780 void *per_recv_context, qdf_dma_addr_t buffer) 781 { 782 struct CE_state *CE_state = (struct CE_state *)copyeng; 783 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 784 785 return hif_state->ce_services->ce_recv_buf_enqueue(copyeng, 786 per_recv_context, buffer); 787 } 788 qdf_export_symbol(ce_recv_buf_enqueue); 789 790 void 791 ce_send_watermarks_set(struct CE_handle *copyeng, 792 unsigned int low_alert_nentries, 793 unsigned int high_alert_nentries) 794 { 795 struct CE_state *CE_state = (struct CE_state *)copyeng; 796 uint32_t ctrl_addr = CE_state->ctrl_addr; 797 struct hif_softc *scn = CE_state->scn; 798 799 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries); 800 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries); 801 } 802 803 void 804 ce_recv_watermarks_set(struct CE_handle *copyeng, 805 unsigned int low_alert_nentries, 806 unsigned int high_alert_nentries) 807 { 808 struct CE_state *CE_state = (struct CE_state *)copyeng; 809 uint32_t ctrl_addr = CE_state->ctrl_addr; 810 struct hif_softc *scn = CE_state->scn; 811 812 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 813 low_alert_nentries); 814 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, 815 high_alert_nentries); 816 } 817 818 unsigned int ce_send_entries_avail(struct CE_handle *copyeng) 819 { 820 struct CE_state *CE_state = (struct CE_state *)copyeng; 821 struct CE_ring_state *src_ring = CE_state->src_ring; 822 unsigned int nentries_mask = src_ring->nentries_mask; 823 unsigned int sw_index; 824 unsigned int write_index; 825 826 qdf_spin_lock(&CE_state->ce_index_lock); 827 sw_index = src_ring->sw_index; 828 write_index = src_ring->write_index; 829 qdf_spin_unlock(&CE_state->ce_index_lock); 830 831 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 832 } 833 834 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng) 835 { 836 struct CE_state *CE_state = (struct CE_state *)copyeng; 837 struct CE_ring_state *dest_ring = CE_state->dest_ring; 838 unsigned int nentries_mask = dest_ring->nentries_mask; 839 unsigned int sw_index; 840 unsigned int write_index; 841 842 qdf_spin_lock(&CE_state->ce_index_lock); 843 sw_index = dest_ring->sw_index; 844 write_index = dest_ring->write_index; 845 qdf_spin_unlock(&CE_state->ce_index_lock); 846 847 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 848 } 849 850 /* 851 * Guts of ce_completed_recv_next. 852 * The caller takes responsibility for any necessary locking. 853 */ 854 QDF_STATUS 855 ce_completed_recv_next(struct CE_handle *copyeng, 856 void **per_CE_contextp, 857 void **per_transfer_contextp, 858 qdf_dma_addr_t *bufferp, 859 unsigned int *nbytesp, 860 unsigned int *transfer_idp, unsigned int *flagsp) 861 { 862 struct CE_state *CE_state = (struct CE_state *)copyeng; 863 QDF_STATUS status; 864 struct hif_softc *scn = CE_state->scn; 865 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 866 struct ce_ops *ce_services; 867 868 ce_services = hif_state->ce_services; 869 qdf_spin_lock_bh(&CE_state->ce_index_lock); 870 status = 871 ce_services->ce_completed_recv_next_nolock(CE_state, 872 per_CE_contextp, per_transfer_contextp, bufferp, 873 nbytesp, transfer_idp, flagsp); 874 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 875 876 return status; 877 } 878 879 QDF_STATUS 880 ce_revoke_recv_next(struct CE_handle *copyeng, 881 void **per_CE_contextp, 882 void **per_transfer_contextp, qdf_dma_addr_t *bufferp) 883 { 884 struct CE_state *CE_state = (struct CE_state *)copyeng; 885 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 886 887 return hif_state->ce_services->ce_revoke_recv_next(copyeng, 888 per_CE_contextp, per_transfer_contextp, bufferp); 889 } 890 891 QDF_STATUS 892 ce_cancel_send_next(struct CE_handle *copyeng, 893 void **per_CE_contextp, 894 void **per_transfer_contextp, 895 qdf_dma_addr_t *bufferp, 896 unsigned int *nbytesp, 897 unsigned int *transfer_idp, 898 uint32_t *toeplitz_hash_result) 899 { 900 struct CE_state *CE_state = (struct CE_state *)copyeng; 901 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 902 903 return hif_state->ce_services->ce_cancel_send_next 904 (copyeng, per_CE_contextp, per_transfer_contextp, 905 bufferp, nbytesp, transfer_idp, toeplitz_hash_result); 906 } 907 qdf_export_symbol(ce_cancel_send_next); 908 909 QDF_STATUS 910 ce_completed_send_next(struct CE_handle *copyeng, 911 void **per_CE_contextp, 912 void **per_transfer_contextp, 913 qdf_dma_addr_t *bufferp, 914 unsigned int *nbytesp, 915 unsigned int *transfer_idp, 916 unsigned int *sw_idx, 917 unsigned int *hw_idx, 918 unsigned int *toeplitz_hash_result) 919 { 920 struct CE_state *CE_state = (struct CE_state *)copyeng; 921 struct hif_softc *scn = CE_state->scn; 922 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 923 struct ce_ops *ce_services; 924 QDF_STATUS status; 925 926 ce_services = hif_state->ce_services; 927 qdf_spin_lock_bh(&CE_state->ce_index_lock); 928 status = 929 ce_services->ce_completed_send_next_nolock(CE_state, 930 per_CE_contextp, per_transfer_contextp, 931 bufferp, nbytesp, transfer_idp, sw_idx, 932 hw_idx, toeplitz_hash_result); 933 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 934 935 return status; 936 } 937 938 #ifdef ATH_11AC_TXCOMPACT 939 /* CE engine descriptor reap 940 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service 941 * does receive and reaping of completed descriptor , 942 * This function only handles reaping of Tx complete descriptor. 943 * The Function is called from threshold reap poll routine 944 * hif_send_complete_check so should not contain receive functionality 945 * within it . 946 */ 947 948 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id) 949 { 950 void *CE_context; 951 void *transfer_context; 952 qdf_dma_addr_t buf; 953 unsigned int nbytes; 954 unsigned int id; 955 unsigned int sw_idx, hw_idx; 956 uint32_t toeplitz_hash_result; 957 struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; 958 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 959 960 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 961 return; 962 963 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, 964 NULL, NULL, 0, 0); 965 966 /* Since this function is called from both user context and 967 * tasklet context the spinlock has to lock the bottom halves. 968 * This fix assumes that ATH_11AC_TXCOMPACT flag is always 969 * enabled in TX polling mode. If this is not the case, more 970 * bottom halve spin lock changes are needed. Due to data path 971 * performance concern, after internal discussion we've decided 972 * to make minimum change, i.e., only address the issue occurred 973 * in this function. The possible negative effect of this minimum 974 * change is that, in the future, if some other function will also 975 * be opened to let the user context to use, those cases need to be 976 * addressed by change spin_lock to spin_lock_bh also. 977 */ 978 979 qdf_spin_lock_bh(&CE_state->ce_index_lock); 980 981 if (CE_state->send_cb) { 982 { 983 struct ce_ops *ce_services = hif_state->ce_services; 984 /* Pop completed send buffers and call the 985 * registered send callback for each 986 */ 987 while (ce_services->ce_completed_send_next_nolock 988 (CE_state, &CE_context, 989 &transfer_context, &buf, 990 &nbytes, &id, &sw_idx, &hw_idx, 991 &toeplitz_hash_result) == 992 QDF_STATUS_SUCCESS) { 993 if (ce_id != CE_HTT_H2T_MSG) { 994 qdf_spin_unlock_bh( 995 &CE_state->ce_index_lock); 996 CE_state->send_cb( 997 (struct CE_handle *) 998 CE_state, CE_context, 999 transfer_context, buf, 1000 nbytes, id, sw_idx, hw_idx, 1001 toeplitz_hash_result); 1002 qdf_spin_lock_bh( 1003 &CE_state->ce_index_lock); 1004 } else { 1005 struct HIF_CE_pipe_info *pipe_info = 1006 (struct HIF_CE_pipe_info *) 1007 CE_context; 1008 1009 qdf_spin_lock_bh(&pipe_info-> 1010 completion_freeq_lock); 1011 pipe_info->num_sends_allowed++; 1012 qdf_spin_unlock_bh(&pipe_info-> 1013 completion_freeq_lock); 1014 } 1015 } 1016 } 1017 } 1018 1019 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 1020 1021 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, 1022 NULL, NULL, 0, 0); 1023 Q_TARGET_ACCESS_END(scn); 1024 } 1025 1026 #endif /*ATH_11AC_TXCOMPACT */ 1027 1028 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 1029 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode) 1030 { 1031 // QDF_IS_EPPING_ENABLED is pre lithium feature 1032 // CE4 completion is enabled only lithium and later 1033 // so no need to check for EPPING 1034 return true; 1035 } 1036 1037 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 1038 1039 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode) 1040 { 1041 if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode)) 1042 return true; 1043 else 1044 return false; 1045 } 1046 1047 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 1048 1049 /* 1050 * ce_engine_service_reg: 1051 * 1052 * Called from ce_per_engine_service and goes through the regular interrupt 1053 * handling that does not involve the WLAN fast path feature. 1054 * 1055 * Returns void 1056 */ 1057 void ce_engine_service_reg(struct hif_softc *scn, int CE_id) 1058 { 1059 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1060 uint32_t ctrl_addr = CE_state->ctrl_addr; 1061 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1062 void *CE_context; 1063 void *transfer_context; 1064 qdf_dma_addr_t buf; 1065 unsigned int nbytes; 1066 unsigned int id; 1067 unsigned int flags; 1068 unsigned int more_comp_cnt = 0; 1069 unsigned int more_snd_comp_cnt = 0; 1070 unsigned int sw_idx, hw_idx; 1071 uint32_t toeplitz_hash_result; 1072 uint32_t mode = hif_get_conparam(scn); 1073 1074 more_completions: 1075 if (CE_state->recv_cb) { 1076 1077 /* Pop completed recv buffers and call 1078 * the registered recv callback for each 1079 */ 1080 while (hif_state->ce_services->ce_completed_recv_next_nolock 1081 (CE_state, &CE_context, &transfer_context, 1082 &buf, &nbytes, &id, &flags) == 1083 QDF_STATUS_SUCCESS) { 1084 qdf_spin_unlock(&CE_state->ce_index_lock); 1085 CE_state->recv_cb((struct CE_handle *)CE_state, 1086 CE_context, transfer_context, buf, 1087 nbytes, id, flags); 1088 1089 qdf_spin_lock(&CE_state->ce_index_lock); 1090 /* 1091 * EV #112693 - 1092 * [Peregrine][ES1][WB342][Win8x86][Performance] 1093 * BSoD_0x133 occurred in VHT80 UDP_DL 1094 * Break out DPC by force if number of loops in 1095 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES 1096 * to avoid spending too long time in 1097 * DPC for each interrupt handling. Schedule another 1098 * DPC to avoid data loss if we had taken 1099 * force-break action before apply to Windows OS 1100 * only currently, Linux/MAC os can expand to their 1101 * platform if necessary 1102 */ 1103 1104 /* Break the receive processes by 1105 * force if force_break set up 1106 */ 1107 if (qdf_unlikely(CE_state->force_break)) { 1108 qdf_atomic_set(&CE_state->rx_pending, 1); 1109 return; 1110 } 1111 } 1112 } 1113 1114 /* 1115 * Attention: We may experience potential infinite loop for below 1116 * While Loop during Sending Stress test. 1117 * Resolve the same way as Receive Case (Refer to EV #112693) 1118 */ 1119 1120 if (CE_state->send_cb) { 1121 /* Pop completed send buffers and call 1122 * the registered send callback for each 1123 */ 1124 1125 #ifdef ATH_11AC_TXCOMPACT 1126 while (hif_state->ce_services->ce_completed_send_next_nolock 1127 (CE_state, &CE_context, 1128 &transfer_context, &buf, &nbytes, 1129 &id, &sw_idx, &hw_idx, 1130 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1131 1132 if (check_ce_id_and_epping_enabled(CE_id, mode)) { 1133 qdf_spin_unlock(&CE_state->ce_index_lock); 1134 CE_state->send_cb((struct CE_handle *)CE_state, 1135 CE_context, transfer_context, 1136 buf, nbytes, id, sw_idx, 1137 hw_idx, toeplitz_hash_result); 1138 qdf_spin_lock(&CE_state->ce_index_lock); 1139 } else { 1140 struct HIF_CE_pipe_info *pipe_info = 1141 (struct HIF_CE_pipe_info *)CE_context; 1142 1143 qdf_spin_lock_bh(&pipe_info-> 1144 completion_freeq_lock); 1145 pipe_info->num_sends_allowed++; 1146 qdf_spin_unlock_bh(&pipe_info-> 1147 completion_freeq_lock); 1148 } 1149 } 1150 #else /*ATH_11AC_TXCOMPACT */ 1151 while (hif_state->ce_services->ce_completed_send_next_nolock 1152 (CE_state, &CE_context, 1153 &transfer_context, &buf, &nbytes, 1154 &id, &sw_idx, &hw_idx, 1155 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1156 qdf_spin_unlock(&CE_state->ce_index_lock); 1157 CE_state->send_cb((struct CE_handle *)CE_state, 1158 CE_context, transfer_context, buf, 1159 nbytes, id, sw_idx, hw_idx, 1160 toeplitz_hash_result); 1161 qdf_spin_lock(&CE_state->ce_index_lock); 1162 } 1163 #endif /*ATH_11AC_TXCOMPACT */ 1164 } 1165 1166 more_watermarks: 1167 if (CE_state->misc_cbs) { 1168 if (CE_state->watermark_cb && 1169 hif_state->ce_services->watermark_int(CE_state, 1170 &flags)) { 1171 qdf_spin_unlock(&CE_state->ce_index_lock); 1172 /* Convert HW IS bits to software flags */ 1173 CE_state->watermark_cb((struct CE_handle *)CE_state, 1174 CE_state->wm_context, flags); 1175 qdf_spin_lock(&CE_state->ce_index_lock); 1176 } 1177 } 1178 1179 /* 1180 * Clear the misc interrupts (watermark) that were handled above, 1181 * and that will be checked again below. 1182 * Clear and check for copy-complete interrupts again, just in case 1183 * more copy completions happened while the misc interrupts were being 1184 * handled. 1185 */ 1186 if (!ce_srng_based(scn) && !CE_state->msi_supported) { 1187 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1188 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 1189 CE_WATERMARK_MASK | 1190 HOST_IS_COPY_COMPLETE_MASK); 1191 } else { 1192 qdf_atomic_set(&CE_state->rx_pending, 0); 1193 hif_err_rl("%s: target access is not allowed", 1194 __func__); 1195 return; 1196 } 1197 } 1198 1199 /* 1200 * Now that per-engine interrupts are cleared, verify that 1201 * no recv interrupts arrive while processing send interrupts, 1202 * and no recv or send interrupts happened while processing 1203 * misc interrupts.Go back and check again.Keep checking until 1204 * we find no more events to process. 1205 */ 1206 if (CE_state->recv_cb && 1207 hif_state->ce_services->ce_recv_entries_done_nolock(scn, 1208 CE_state)) { 1209 if (QDF_IS_EPPING_ENABLED(mode) || 1210 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1211 goto more_completions; 1212 } else { 1213 if (!ce_srng_based(scn) && 1214 !CE_state->batch_intr_supported) { 1215 hif_err_rl( 1216 "Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 1217 CE_state->id, 1218 CE_state->dest_ring->nentries_mask, 1219 CE_state->dest_ring->sw_index, 1220 CE_DEST_RING_READ_IDX_GET(scn, 1221 CE_state->ctrl_addr)); 1222 } 1223 } 1224 } 1225 1226 if (CE_state->send_cb && 1227 hif_state->ce_services->ce_send_entries_done_nolock(scn, 1228 CE_state)) { 1229 if (QDF_IS_EPPING_ENABLED(mode) || 1230 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1231 goto more_completions; 1232 } else { 1233 if (!ce_srng_based(scn) && 1234 !CE_state->batch_intr_supported) { 1235 hif_err_rl( 1236 "Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x", 1237 CE_state->id, 1238 CE_state->src_ring->nentries_mask, 1239 CE_state->src_ring->sw_index, 1240 CE_state->src_ring->hw_index, 1241 CE_state->src_ring->write_index, 1242 CE_SRC_RING_READ_IDX_GET(scn, 1243 CE_state->ctrl_addr)); 1244 } 1245 } 1246 } 1247 1248 if (CE_state->misc_cbs && CE_state->watermark_cb) { 1249 if (hif_state->ce_services->watermark_int(CE_state, &flags)) 1250 goto more_watermarks; 1251 } 1252 1253 qdf_atomic_set(&CE_state->rx_pending, 0); 1254 } 1255 1256 #ifdef WLAN_TRACEPOINTS 1257 /** 1258 * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling 1259 * latency 1260 * @ce_state: CE context 1261 * 1262 * Return: None 1263 */ 1264 static inline 1265 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state) 1266 { 1267 qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id, 1268 ce_state->ce_service_start_time - 1269 ce_state->ce_tasklet_sched_time); 1270 } 1271 #else 1272 static inline 1273 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state) 1274 { 1275 } 1276 #endif 1277 1278 /* 1279 * Guts of interrupt handler for per-engine interrupts on a particular CE. 1280 * 1281 * Invokes registered callbacks for recv_complete, 1282 * send_complete, and watermarks. 1283 * 1284 * Returns: number of messages processed 1285 */ 1286 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id) 1287 { 1288 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1289 1290 if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data)) 1291 return CE_state->receive_count; 1292 1293 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 1294 hif_err("[premature rc=0]"); 1295 return 0; /* no work done */ 1296 } 1297 1298 /* Clear force_break flag and re-initialize receive_count to 0 */ 1299 CE_state->receive_count = 0; 1300 CE_state->force_break = 0; 1301 CE_state->ce_service_start_time = qdf_time_sched_clock(); 1302 CE_state->ce_service_yield_time = 1303 CE_state->ce_service_start_time + 1304 hif_get_ce_service_max_yield_time( 1305 (struct hif_opaque_softc *)scn); 1306 1307 ce_trace_tasklet_sched_latency(CE_state); 1308 1309 qdf_spin_lock(&CE_state->ce_index_lock); 1310 1311 CE_state->service(scn, CE_id); 1312 1313 qdf_spin_unlock(&CE_state->ce_index_lock); 1314 1315 if (Q_TARGET_ACCESS_END(scn) < 0) 1316 hif_err("<--[premature rc=%d]", CE_state->receive_count); 1317 return CE_state->receive_count; 1318 } 1319 qdf_export_symbol(ce_per_engine_service); 1320 1321 /* 1322 * Handler for per-engine interrupts on ALL active CEs. 1323 * This is used in cases where the system is sharing a 1324 * single interrupt for all CEs 1325 */ 1326 1327 void ce_per_engine_service_any(int irq, struct hif_softc *scn) 1328 { 1329 int CE_id; 1330 uint32_t intr_summary; 1331 1332 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1333 return; 1334 1335 if (!qdf_atomic_read(&scn->tasklet_from_intr)) { 1336 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1337 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1338 1339 if (qdf_atomic_read(&CE_state->rx_pending)) { 1340 qdf_atomic_set(&CE_state->rx_pending, 0); 1341 ce_per_engine_service(scn, CE_id); 1342 } 1343 } 1344 1345 Q_TARGET_ACCESS_END(scn); 1346 return; 1347 } 1348 1349 intr_summary = CE_INTERRUPT_SUMMARY(scn); 1350 1351 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) { 1352 if (intr_summary & (1 << CE_id)) 1353 intr_summary &= ~(1 << CE_id); 1354 else 1355 continue; /* no intr pending on this CE */ 1356 1357 ce_per_engine_service(scn, CE_id); 1358 } 1359 1360 Q_TARGET_ACCESS_END(scn); 1361 } 1362 1363 /*Iterate the CE_state list and disable the compl interrupt 1364 * if it has been registered already. 1365 */ 1366 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1367 { 1368 int CE_id; 1369 1370 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1371 return; 1372 1373 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1374 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1375 uint32_t ctrl_addr = CE_state->ctrl_addr; 1376 1377 /* if the interrupt is currently enabled, disable it */ 1378 if (!CE_state->disable_copy_compl_intr 1379 && (CE_state->send_cb || CE_state->recv_cb)) 1380 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1381 1382 if (CE_state->watermark_cb) 1383 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1384 } 1385 Q_TARGET_ACCESS_END(scn); 1386 } 1387 1388 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1389 { 1390 int CE_id; 1391 1392 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1393 return; 1394 1395 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1396 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1397 uint32_t ctrl_addr = CE_state->ctrl_addr; 1398 1399 /* 1400 * If the CE is supposed to have copy complete interrupts 1401 * enabled (i.e. there a callback registered, and the 1402 * "disable" flag is not set), then re-enable the interrupt. 1403 */ 1404 if (!CE_state->disable_copy_compl_intr 1405 && (CE_state->send_cb || CE_state->recv_cb)) 1406 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1407 1408 if (CE_state->watermark_cb) 1409 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1410 } 1411 Q_TARGET_ACCESS_END(scn); 1412 } 1413 1414 /** 1415 * ce_send_cb_register(): register completion handler 1416 * @copyeng: CE_state representing the ce we are adding the behavior to 1417 * @fn_ptr: callback that the ce should use when processing tx completions 1418 * @ce_send_context: context to pass back in the callback 1419 * @disable_interrupts: if the interrupts should be enabled or not. 1420 * 1421 * Caller should guarantee that no transactions are in progress before 1422 * switching the callback function. 1423 * 1424 * Registers the send context before the fn pointer so that if the cb is valid 1425 * the context should be valid. 1426 * 1427 * Beware that currently this function will enable completion interrupts. 1428 */ 1429 void 1430 ce_send_cb_register(struct CE_handle *copyeng, 1431 ce_send_cb fn_ptr, 1432 void *ce_send_context, int disable_interrupts) 1433 { 1434 struct CE_state *CE_state = (struct CE_state *)copyeng; 1435 struct hif_softc *scn; 1436 struct HIF_CE_state *hif_state; 1437 1438 if (!CE_state) { 1439 hif_err("Error CE state = NULL"); 1440 return; 1441 } 1442 scn = CE_state->scn; 1443 hif_state = HIF_GET_CE_STATE(scn); 1444 if (!hif_state) { 1445 hif_err("Error HIF state = NULL"); 1446 return; 1447 } 1448 CE_state->send_context = ce_send_context; 1449 CE_state->send_cb = fn_ptr; 1450 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1451 disable_interrupts); 1452 } 1453 qdf_export_symbol(ce_send_cb_register); 1454 1455 /** 1456 * ce_recv_cb_register(): register completion handler 1457 * @copyeng: CE_state representing the ce we are adding the behavior to 1458 * @fn_ptr: callback that the ce should use when processing rx completions 1459 * @CE_recv_context: context to pass back in the callback 1460 * @disable_interrupts: if the interrupts should be enabled or not. 1461 * 1462 * Registers the send context before the fn pointer so that if the cb is valid 1463 * the context should be valid. 1464 * 1465 * Caller should guarantee that no transactions are in progress before 1466 * switching the callback function. 1467 */ 1468 void 1469 ce_recv_cb_register(struct CE_handle *copyeng, 1470 CE_recv_cb fn_ptr, 1471 void *CE_recv_context, int disable_interrupts) 1472 { 1473 struct CE_state *CE_state = (struct CE_state *)copyeng; 1474 struct hif_softc *scn; 1475 struct HIF_CE_state *hif_state; 1476 1477 if (!CE_state) { 1478 hif_err("ERROR CE state = NULL"); 1479 return; 1480 } 1481 scn = CE_state->scn; 1482 hif_state = HIF_GET_CE_STATE(scn); 1483 if (!hif_state) { 1484 hif_err("Error HIF state = NULL"); 1485 return; 1486 } 1487 CE_state->recv_context = CE_recv_context; 1488 CE_state->recv_cb = fn_ptr; 1489 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1490 disable_interrupts); 1491 } 1492 qdf_export_symbol(ce_recv_cb_register); 1493 1494 /** 1495 * ce_watermark_cb_register(): register completion handler 1496 * @copyeng: CE_state representing the ce we are adding the behavior to 1497 * @fn_ptr: callback that the ce should use when processing watermark events 1498 * @CE_wm_context: context to pass back in the callback 1499 * 1500 * Caller should guarantee that no watermark events are being processed before 1501 * switching the callback function. 1502 */ 1503 void 1504 ce_watermark_cb_register(struct CE_handle *copyeng, 1505 CE_watermark_cb fn_ptr, void *CE_wm_context) 1506 { 1507 struct CE_state *CE_state = (struct CE_state *)copyeng; 1508 struct hif_softc *scn = CE_state->scn; 1509 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1510 1511 CE_state->watermark_cb = fn_ptr; 1512 CE_state->wm_context = CE_wm_context; 1513 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1514 0); 1515 if (fn_ptr) 1516 CE_state->misc_cbs = 1; 1517 } 1518 1519 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT 1520 void 1521 ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *), 1522 void *custom_cb_context) 1523 { 1524 struct CE_state *CE_state = (struct CE_state *)copyeng; 1525 1526 CE_state->custom_cb = custom_cb; 1527 CE_state->custom_cb_context = custom_cb_context; 1528 qdf_atomic_init(&CE_state->custom_cb_pending); 1529 } 1530 1531 void 1532 ce_unregister_custom_cb(struct CE_handle *copyeng) 1533 { 1534 struct CE_state *CE_state = (struct CE_state *)copyeng; 1535 1536 qdf_assert_always(!qdf_atomic_read(&CE_state->custom_cb_pending)); 1537 CE_state->custom_cb = NULL; 1538 CE_state->custom_cb_context = NULL; 1539 } 1540 1541 void 1542 ce_enable_custom_cb(struct CE_handle *copyeng) 1543 { 1544 struct CE_state *CE_state = (struct CE_state *)copyeng; 1545 int32_t custom_cb_pending; 1546 1547 qdf_assert_always(CE_state->custom_cb); 1548 qdf_assert_always(CE_state->custom_cb_context); 1549 1550 custom_cb_pending = qdf_atomic_inc_return(&CE_state->custom_cb_pending); 1551 qdf_assert_always(custom_cb_pending >= 1); 1552 } 1553 1554 void 1555 ce_disable_custom_cb(struct CE_handle *copyeng) 1556 { 1557 struct CE_state *CE_state = (struct CE_state *)copyeng; 1558 1559 qdf_assert_always(CE_state->custom_cb); 1560 qdf_assert_always(CE_state->custom_cb_context); 1561 1562 qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending); 1563 } 1564 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */ 1565 1566 bool ce_get_rx_pending(struct hif_softc *scn) 1567 { 1568 int CE_id; 1569 1570 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1571 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1572 1573 if (qdf_atomic_read(&CE_state->rx_pending)) 1574 return true; 1575 } 1576 1577 return false; 1578 } 1579 1580 /** 1581 * ce_check_rx_pending() - ce_check_rx_pending 1582 * @CE_state: context of the copy engine to check 1583 * 1584 * Return: true if there per_engine_service 1585 * didn't process all the rx descriptors. 1586 */ 1587 bool ce_check_rx_pending(struct CE_state *CE_state) 1588 { 1589 if (qdf_atomic_read(&CE_state->rx_pending)) 1590 return true; 1591 else 1592 return false; 1593 } 1594 qdf_export_symbol(ce_check_rx_pending); 1595 1596 #ifdef IPA_OFFLOAD 1597 #ifdef QCN7605_SUPPORT 1598 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state) 1599 { 1600 u_int32_t ctrl_addr = CE_state->ctrl_addr; 1601 struct hif_softc *scn = CE_state->scn; 1602 qdf_dma_addr_t wr_index_addr; 1603 1604 wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr); 1605 return wr_index_addr; 1606 } 1607 #else 1608 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state) 1609 { 1610 struct hif_softc *scn = CE_state->scn; 1611 qdf_dma_addr_t wr_index_addr; 1612 1613 wr_index_addr = CE_BASE_ADDRESS(CE_state->id) + 1614 SR_WR_INDEX_ADDRESS; 1615 return wr_index_addr; 1616 } 1617 #endif 1618 1619 /** 1620 * ce_ipa_get_resource() - get uc resource on copyengine 1621 * @ce: copyengine context 1622 * @ce_sr: copyengine source ring resource info 1623 * @ce_sr_ring_size: copyengine source ring size 1624 * @ce_reg_paddr: copyengine register physical address 1625 * 1626 * Copy engine should release resource to micro controller 1627 * Micro controller needs 1628 * - Copy engine source descriptor base address 1629 * - Copy engine source descriptor size 1630 * - PCI BAR address to access copy engine register 1631 * 1632 * Return: None 1633 */ 1634 void ce_ipa_get_resource(struct CE_handle *ce, 1635 qdf_shared_mem_t **ce_sr, 1636 uint32_t *ce_sr_ring_size, 1637 qdf_dma_addr_t *ce_reg_paddr) 1638 { 1639 struct CE_state *CE_state = (struct CE_state *)ce; 1640 uint32_t ring_loop; 1641 struct CE_src_desc *ce_desc; 1642 qdf_dma_addr_t phy_mem_base; 1643 struct hif_softc *scn = CE_state->scn; 1644 1645 if (CE_UNUSED == CE_state->state) { 1646 *qdf_mem_get_dma_addr_ptr(scn->qdf_dev, 1647 &CE_state->scn->ipa_ce_ring->mem_info) = 0; 1648 *ce_sr_ring_size = 0; 1649 return; 1650 } 1651 1652 /* Update default value for descriptor */ 1653 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries; 1654 ring_loop++) { 1655 ce_desc = (struct CE_src_desc *) 1656 ((char *)CE_state->src_ring->base_addr_owner_space + 1657 ring_loop * (sizeof(struct CE_src_desc))); 1658 CE_IPA_RING_INIT(ce_desc); 1659 } 1660 1661 /* Get BAR address */ 1662 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base); 1663 1664 *ce_sr = CE_state->scn->ipa_ce_ring; 1665 *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries * 1666 sizeof(struct CE_src_desc)); 1667 *ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state); 1668 1669 } 1670 1671 #endif /* IPA_OFFLOAD */ 1672 1673 #ifdef HIF_CE_DEBUG_DATA_BUF 1674 /** 1675 * hif_dump_desc_data_buf() - record ce descriptor events 1676 * @buf: buffer to copy to 1677 * @pos: Current position till which the buf is filled 1678 * @data: Data to be copied 1679 * @data_len: Length of the data to be copied 1680 */ 1681 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos, 1682 uint8_t *data, uint32_t data_len) 1683 { 1684 pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n", 1685 CE_DEBUG_MAX_DATA_BUF_SIZE); 1686 1687 if ((data_len > 0) && data) { 1688 if (data_len < 16) { 1689 hex_dump_to_buffer(data, 1690 CE_DEBUG_DATA_PER_ROW, 1691 16, 1, buf + pos, 1692 (ssize_t)PAGE_SIZE - pos, 1693 false); 1694 pos += CE_DEBUG_PRINT_BUF_SIZE(data_len); 1695 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); 1696 } else { 1697 uint32_t rows = (data_len / 16) + 1; 1698 uint32_t row = 0; 1699 1700 for (row = 0; row < rows; row++) { 1701 hex_dump_to_buffer(data + (row * 16), 1702 CE_DEBUG_DATA_PER_ROW, 1703 16, 1, buf + pos, 1704 (ssize_t)PAGE_SIZE 1705 - pos, false); 1706 pos += 1707 CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW); 1708 pos += snprintf(buf + pos, PAGE_SIZE - pos, 1709 "\n"); 1710 } 1711 } 1712 } 1713 1714 return pos; 1715 } 1716 #endif 1717 1718 /* 1719 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1720 * for defined here 1721 */ 1722 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1723 static const char *ce_event_type_to_str(enum hif_ce_event_type type) 1724 { 1725 switch (type) { 1726 case HIF_RX_DESC_POST: 1727 return "HIF_RX_DESC_POST"; 1728 case HIF_RX_DESC_COMPLETION: 1729 return "HIF_RX_DESC_COMPLETION"; 1730 case HIF_TX_GATHER_DESC_POST: 1731 return "HIF_TX_GATHER_DESC_POST"; 1732 case HIF_TX_DESC_POST: 1733 return "HIF_TX_DESC_POST"; 1734 case HIF_TX_DESC_SOFTWARE_POST: 1735 return "HIF_TX_DESC_SOFTWARE_POST"; 1736 case HIF_TX_DESC_COMPLETION: 1737 return "HIF_TX_DESC_COMPLETION"; 1738 case FAST_RX_WRITE_INDEX_UPDATE: 1739 return "FAST_RX_WRITE_INDEX_UPDATE"; 1740 case FAST_RX_SOFTWARE_INDEX_UPDATE: 1741 return "FAST_RX_SOFTWARE_INDEX_UPDATE"; 1742 case FAST_TX_WRITE_INDEX_UPDATE: 1743 return "FAST_TX_WRITE_INDEX_UPDATE"; 1744 case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: 1745 return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE"; 1746 case FAST_TX_SOFTWARE_INDEX_UPDATE: 1747 return "FAST_TX_SOFTWARE_INDEX_UPDATE"; 1748 case RESUME_WRITE_INDEX_UPDATE: 1749 return "RESUME_WRITE_INDEX_UPDATE"; 1750 case HIF_IRQ_EVENT: 1751 return "HIF_IRQ_EVENT"; 1752 case HIF_CE_TASKLET_ENTRY: 1753 return "HIF_CE_TASKLET_ENTRY"; 1754 case HIF_CE_TASKLET_RESCHEDULE: 1755 return "HIF_CE_TASKLET_RESCHEDULE"; 1756 case HIF_CE_TASKLET_EXIT: 1757 return "HIF_CE_TASKLET_EXIT"; 1758 case HIF_CE_REAP_ENTRY: 1759 return "HIF_CE_REAP_ENTRY"; 1760 case HIF_CE_REAP_EXIT: 1761 return "HIF_CE_REAP_EXIT"; 1762 case NAPI_SCHEDULE: 1763 return "NAPI_SCHEDULE"; 1764 case NAPI_POLL_ENTER: 1765 return "NAPI_POLL_ENTER"; 1766 case NAPI_COMPLETE: 1767 return "NAPI_COMPLETE"; 1768 case NAPI_POLL_EXIT: 1769 return "NAPI_POLL_EXIT"; 1770 case HIF_RX_NBUF_ALLOC_FAILURE: 1771 return "HIF_RX_NBUF_ALLOC_FAILURE"; 1772 case HIF_RX_NBUF_MAP_FAILURE: 1773 return "HIF_RX_NBUF_MAP_FAILURE"; 1774 case HIF_RX_NBUF_ENQUEUE_FAILURE: 1775 return "HIF_RX_NBUF_ENQUEUE_FAILURE"; 1776 default: 1777 return "invalid"; 1778 } 1779 } 1780 1781 /** 1782 * hif_dump_desc_event() - record ce descriptor events 1783 * @scn: HIF context 1784 * @buf: Buffer to which to be copied 1785 */ 1786 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf) 1787 { 1788 struct hif_ce_desc_event *event; 1789 uint64_t secs, usecs; 1790 ssize_t len = 0; 1791 struct ce_desc_hist *ce_hist = NULL; 1792 struct hif_ce_desc_event *hist_ev = NULL; 1793 1794 if (!scn) 1795 return -EINVAL; 1796 1797 ce_hist = &scn->hif_ce_desc_hist; 1798 1799 if (ce_hist->hist_id >= CE_COUNT_MAX || 1800 ce_hist->hist_index >= HIF_CE_HISTORY_MAX) { 1801 qdf_print("Invalid values"); 1802 return -EINVAL; 1803 } 1804 1805 hist_ev = 1806 (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id]; 1807 1808 if (!hist_ev) { 1809 qdf_print("Low Memory"); 1810 return -EINVAL; 1811 } 1812 1813 event = &hist_ev[ce_hist->hist_index]; 1814 1815 qdf_log_timestamp_to_secs(event->time, &secs, &usecs); 1816 1817 len += snprintf(buf, PAGE_SIZE - len, 1818 "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK", 1819 secs, usecs, ce_hist->hist_id, 1820 ce_event_type_to_str(event->type), 1821 event->index, event->memory); 1822 #ifdef HIF_CE_DEBUG_DATA_BUF 1823 len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu", 1824 event->actual_data_len); 1825 #endif 1826 1827 len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: "); 1828 1829 hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc), 1830 16, 1, buf + len, 1831 (ssize_t)PAGE_SIZE - len, false); 1832 len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc)); 1833 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1834 1835 #ifdef HIF_CE_DEBUG_DATA_BUF 1836 if (ce_hist->data_enable[ce_hist->hist_id]) 1837 len = hif_dump_desc_data_buf(buf, len, event->data, 1838 (event->actual_data_len < 1839 CE_DEBUG_MAX_DATA_BUF_SIZE) ? 1840 event->actual_data_len : 1841 CE_DEBUG_MAX_DATA_BUF_SIZE); 1842 #endif /*HIF_CE_DEBUG_DATA_BUF*/ 1843 1844 len += snprintf(buf + len, PAGE_SIZE - len, "END\n"); 1845 1846 return len; 1847 } 1848 1849 /* 1850 * hif_store_desc_trace_buf_index() - 1851 * API to get the CE id and CE debug storage buffer index 1852 * 1853 * @dev: network device 1854 * @attr: sysfs attribute 1855 * @buf: data got from the user 1856 * 1857 * Return total length 1858 */ 1859 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 1860 const char *buf, size_t size) 1861 { 1862 struct ce_desc_hist *ce_hist = NULL; 1863 1864 if (!scn) 1865 return -EINVAL; 1866 1867 ce_hist = &scn->hif_ce_desc_hist; 1868 1869 if (!size) { 1870 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1871 return -EINVAL; 1872 } 1873 1874 if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id, 1875 (unsigned int *)&ce_hist->hist_index) != 2) { 1876 qdf_nofl_err("%s: Invalid input value.", __func__); 1877 return -EINVAL; 1878 } 1879 if ((ce_hist->hist_id >= CE_COUNT_MAX) || 1880 (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) { 1881 qdf_print("Invalid values"); 1882 return -EINVAL; 1883 } 1884 1885 return size; 1886 } 1887 1888 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1889 1890 #ifdef HIF_CE_DEBUG_DATA_BUF 1891 /* 1892 * hif_ce_en_desc_hist() - 1893 * API to enable recording the CE desc history 1894 * 1895 * @dev: network device 1896 * @attr: sysfs attribute 1897 * @buf: buffer to copy the data. 1898 * 1899 * Starts recording the ce desc history 1900 * 1901 * Return total length copied 1902 */ 1903 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size) 1904 { 1905 struct ce_desc_hist *ce_hist = NULL; 1906 uint32_t cfg = 0; 1907 uint32_t ce_id = 0; 1908 1909 if (!scn) 1910 return -EINVAL; 1911 1912 ce_hist = &scn->hif_ce_desc_hist; 1913 1914 if (!size) { 1915 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1916 return -EINVAL; 1917 } 1918 1919 if (sscanf(buf, "%u %u", (unsigned int *)&ce_id, 1920 (unsigned int *)&cfg) != 2) { 1921 qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.", 1922 __func__); 1923 return -EINVAL; 1924 } 1925 if (ce_id >= CE_COUNT_MAX) { 1926 qdf_print("Invalid value CE Id"); 1927 return -EINVAL; 1928 } 1929 1930 if ((cfg > 1 || cfg < 0)) { 1931 qdf_print("Invalid values: enter 0 or 1"); 1932 return -EINVAL; 1933 } 1934 1935 if (!ce_hist->hist_ev[ce_id]) 1936 return -EINVAL; 1937 1938 qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1939 if (cfg == 1) { 1940 if (ce_hist->data_enable[ce_id] == 1) { 1941 qdf_debug("Already Enabled"); 1942 } else { 1943 if (alloc_mem_ce_debug_hist_data(scn, ce_id) 1944 == QDF_STATUS_E_NOMEM){ 1945 ce_hist->data_enable[ce_id] = 0; 1946 qdf_err("%s:Memory Alloc failed", __func__); 1947 } else 1948 ce_hist->data_enable[ce_id] = 1; 1949 } 1950 } else if (cfg == 0) { 1951 if (ce_hist->data_enable[ce_id] == 0) { 1952 qdf_debug("Already Disabled"); 1953 } else { 1954 ce_hist->data_enable[ce_id] = 0; 1955 free_mem_ce_debug_hist_data(scn, ce_id); 1956 } 1957 } 1958 qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1959 1960 return size; 1961 } 1962 1963 /* 1964 * hif_disp_ce_enable_desc_data_hist() - 1965 * API to display value of data_enable 1966 * 1967 * @dev: network device 1968 * @attr: sysfs attribute 1969 * @buf: buffer to copy the data. 1970 * 1971 * Return total length copied 1972 */ 1973 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf) 1974 { 1975 ssize_t len = 0; 1976 uint32_t ce_id = 0; 1977 struct ce_desc_hist *ce_hist = NULL; 1978 1979 if (!scn) 1980 return -EINVAL; 1981 1982 ce_hist = &scn->hif_ce_desc_hist; 1983 1984 for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) { 1985 len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n", 1986 ce_id, ce_hist->data_enable[ce_id]); 1987 } 1988 1989 return len; 1990 } 1991 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1992 1993 #ifdef OL_ATH_SMART_LOGGING 1994 #define GUARD_SPACE 10 1995 #define LOG_ID_SZ 4 1996 /* 1997 * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf 1998 * @src_ring: SRC ring state 1999 * @buf_cur: Current pointer in ring buffer 2000 * @buf_init:Start of the ring buffer 2001 * @buf_sz: Size of the ring buffer 2002 * @skb_sz: Max size of the SKB buffer to be copied 2003 * 2004 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 2005 * the given buf, skb_sz is the max buffer size to be copied 2006 * 2007 * Return: Current pointer in ring buffer 2008 */ 2009 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring, 2010 uint8_t *buf_cur, uint8_t *buf_init, 2011 uint32_t buf_sz, uint32_t skb_sz) 2012 { 2013 struct CE_src_desc *src_ring_base; 2014 uint32_t len, entry; 2015 struct CE_src_desc *src_desc; 2016 qdf_nbuf_t nbuf; 2017 uint32_t available_buf; 2018 2019 src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space; 2020 len = sizeof(struct CE_ring_state); 2021 available_buf = buf_sz - (buf_cur - buf_init); 2022 if (available_buf < (len + GUARD_SPACE)) { 2023 buf_cur = buf_init; 2024 } 2025 2026 qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state)); 2027 buf_cur += sizeof(struct CE_ring_state); 2028 2029 for (entry = 0; entry < src_ring->nentries; entry++) { 2030 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry); 2031 nbuf = src_ring->per_transfer_context[entry]; 2032 if (nbuf) { 2033 uint32_t skb_len = qdf_nbuf_len(nbuf); 2034 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 2035 2036 len = sizeof(struct CE_src_desc) + skb_cp_len 2037 + LOG_ID_SZ + sizeof(skb_cp_len); 2038 available_buf = buf_sz - (buf_cur - buf_init); 2039 if (available_buf < (len + GUARD_SPACE)) { 2040 buf_cur = buf_init; 2041 } 2042 qdf_mem_copy(buf_cur, src_desc, 2043 sizeof(struct CE_src_desc)); 2044 buf_cur += sizeof(struct CE_src_desc); 2045 2046 available_buf = buf_sz - (buf_cur - buf_init); 2047 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 2048 skb_cp_len); 2049 2050 if (skb_cp_len) { 2051 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 2052 skb_cp_len); 2053 buf_cur += skb_cp_len; 2054 } 2055 } else { 2056 len = sizeof(struct CE_src_desc) + LOG_ID_SZ; 2057 available_buf = buf_sz - (buf_cur - buf_init); 2058 if (available_buf < (len + GUARD_SPACE)) { 2059 buf_cur = buf_init; 2060 } 2061 qdf_mem_copy(buf_cur, src_desc, 2062 sizeof(struct CE_src_desc)); 2063 buf_cur += sizeof(struct CE_src_desc); 2064 available_buf = buf_sz - (buf_cur - buf_init); 2065 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 2066 } 2067 } 2068 2069 return buf_cur; 2070 } 2071 2072 /* 2073 * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf 2074 * @dest_ring: SRC ring state 2075 * @buf_cur: Current pointer in ring buffer 2076 * @buf_init:Start of the ring buffer 2077 * @buf_sz: Size of the ring buffer 2078 * @skb_sz: Max size of the SKB buffer to be copied 2079 * 2080 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 2081 * the given buf, skb_sz is the max buffer size to be copied 2082 * 2083 * Return: Current pointer in ring buffer 2084 */ 2085 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring, 2086 uint8_t *buf_cur, uint8_t *buf_init, 2087 uint32_t buf_sz, uint32_t skb_sz) 2088 { 2089 struct CE_dest_desc *dest_ring_base; 2090 uint32_t len, entry; 2091 struct CE_dest_desc *dest_desc; 2092 qdf_nbuf_t nbuf; 2093 uint32_t available_buf; 2094 2095 dest_ring_base = 2096 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 2097 2098 len = sizeof(struct CE_ring_state); 2099 available_buf = buf_sz - (buf_cur - buf_init); 2100 if (available_buf < (len + GUARD_SPACE)) { 2101 buf_cur = buf_init; 2102 } 2103 2104 qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state)); 2105 buf_cur += sizeof(struct CE_ring_state); 2106 2107 for (entry = 0; entry < dest_ring->nentries; entry++) { 2108 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry); 2109 2110 nbuf = dest_ring->per_transfer_context[entry]; 2111 if (nbuf) { 2112 uint32_t skb_len = qdf_nbuf_len(nbuf); 2113 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 2114 2115 len = sizeof(struct CE_dest_desc) + skb_cp_len 2116 + LOG_ID_SZ + sizeof(skb_cp_len); 2117 2118 available_buf = buf_sz - (buf_cur - buf_init); 2119 if (available_buf < (len + GUARD_SPACE)) { 2120 buf_cur = buf_init; 2121 } 2122 2123 qdf_mem_copy(buf_cur, dest_desc, 2124 sizeof(struct CE_dest_desc)); 2125 buf_cur += sizeof(struct CE_dest_desc); 2126 available_buf = buf_sz - (buf_cur - buf_init); 2127 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 2128 skb_cp_len); 2129 if (skb_cp_len) { 2130 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 2131 skb_cp_len); 2132 buf_cur += skb_cp_len; 2133 } 2134 } else { 2135 len = sizeof(struct CE_dest_desc) + LOG_ID_SZ; 2136 available_buf = buf_sz - (buf_cur - buf_init); 2137 if (available_buf < (len + GUARD_SPACE)) { 2138 buf_cur = buf_init; 2139 } 2140 qdf_mem_copy(buf_cur, dest_desc, 2141 sizeof(struct CE_dest_desc)); 2142 buf_cur += sizeof(struct CE_dest_desc); 2143 available_buf = buf_sz - (buf_cur - buf_init); 2144 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 2145 } 2146 } 2147 return buf_cur; 2148 } 2149 2150 /** 2151 * hif_log_dump_ce() - Copy all the CE DEST ring to buf 2152 * @scn: 2153 * @buf_cur: 2154 * @buf_init: 2155 * @buf_sz: 2156 * @ce: 2157 * @skb_sz: 2158 * 2159 * Calls the respective function to dump all the CE SRC/DEST ring descriptors 2160 * and buffers pointed by them in to the given buf 2161 */ 2162 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, 2163 uint8_t *buf_init, uint32_t buf_sz, 2164 uint32_t ce, uint32_t skb_sz) 2165 { 2166 struct CE_state *ce_state; 2167 struct CE_ring_state *src_ring; 2168 struct CE_ring_state *dest_ring; 2169 2170 ce_state = scn->ce_id_to_state[ce]; 2171 src_ring = ce_state->src_ring; 2172 dest_ring = ce_state->dest_ring; 2173 2174 if (src_ring) { 2175 buf_cur = hif_log_src_ce_dump(src_ring, buf_cur, 2176 buf_init, buf_sz, skb_sz); 2177 } else if (dest_ring) { 2178 buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur, 2179 buf_init, buf_sz, skb_sz); 2180 } 2181 2182 return buf_cur; 2183 } 2184 2185 qdf_export_symbol(hif_log_dump_ce); 2186 #endif /* OL_ATH_SMART_LOGGING */ 2187 2188