1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hif.h" 21 #include "hif_io32.h" 22 #include "ce_api.h" 23 #include "ce_main.h" 24 #include "ce_internal.h" 25 #include "ce_reg.h" 26 #include "qdf_lock.h" 27 #include "regtable.h" 28 #include "hif_main.h" 29 #include "hif_debug.h" 30 #include "hif_napi.h" 31 #include "qdf_module.h" 32 #include <qdf_tracepoint.h> 33 34 #ifdef IPA_OFFLOAD 35 #ifdef QCA_WIFI_3_0 36 #define CE_IPA_RING_INIT(ce_desc) \ 37 do { \ 38 ce_desc->gather = 0; \ 39 ce_desc->enable_11h = 0; \ 40 ce_desc->meta_data_low = 0; \ 41 ce_desc->packet_result_offset = 64; \ 42 ce_desc->toeplitz_hash_enable = 0; \ 43 ce_desc->addr_y_search_disable = 0; \ 44 ce_desc->addr_x_search_disable = 0; \ 45 ce_desc->misc_int_disable = 0; \ 46 ce_desc->target_int_disable = 0; \ 47 ce_desc->host_int_disable = 0; \ 48 ce_desc->dest_byte_swap = 0; \ 49 ce_desc->byte_swap = 0; \ 50 ce_desc->type = 2; \ 51 ce_desc->tx_classify = 1; \ 52 ce_desc->buffer_addr_hi = 0; \ 53 ce_desc->meta_data = 0; \ 54 ce_desc->nbytes = 128; \ 55 } while (0) 56 #else 57 #define CE_IPA_RING_INIT(ce_desc) \ 58 do { \ 59 ce_desc->byte_swap = 0; \ 60 ce_desc->nbytes = 60; \ 61 ce_desc->gather = 0; \ 62 } while (0) 63 #endif /* QCA_WIFI_3_0 */ 64 #endif /* IPA_OFFLOAD */ 65 66 static int war1_allow_sleep; 67 /* io32 write workaround */ 68 static int hif_ce_war1; 69 70 /** 71 * hif_ce_war_disable() - disable ce war gobally 72 */ 73 void hif_ce_war_disable(void) 74 { 75 hif_ce_war1 = 0; 76 } 77 78 /** 79 * hif_ce_war_enable() - enable ce war gobally 80 */ 81 void hif_ce_war_enable(void) 82 { 83 hif_ce_war1 = 1; 84 } 85 86 /* 87 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 88 * for defined here 89 */ 90 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 91 92 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1) 93 #define CE_DEBUG_DATA_PER_ROW 16 94 95 static const char *ce_event_type_to_str(enum hif_ce_event_type type); 96 97 int get_next_record_index(qdf_atomic_t *table_index, int array_size) 98 { 99 int record_index = qdf_atomic_inc_return(table_index); 100 101 if (record_index == array_size) 102 qdf_atomic_sub(array_size, table_index); 103 104 while (record_index >= array_size) 105 record_index -= array_size; 106 107 return record_index; 108 } 109 110 qdf_export_symbol(get_next_record_index); 111 112 #ifdef HIF_CE_DEBUG_DATA_BUF 113 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) 114 { 115 uint8_t *data = NULL; 116 117 if (!event->data) { 118 hif_err_rl("No ce debug memory allocated"); 119 return; 120 } 121 122 if (event->memory && len > 0) 123 data = qdf_nbuf_data((qdf_nbuf_t)event->memory); 124 125 event->actual_data_len = 0; 126 qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE); 127 128 if (data && len > 0) { 129 qdf_mem_copy(event->data, data, 130 ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ? 131 len : CE_DEBUG_MAX_DATA_BUF_SIZE)); 132 event->actual_data_len = len; 133 } 134 } 135 136 qdf_export_symbol(hif_ce_desc_data_record); 137 138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 139 { 140 qdf_mem_zero(event, 141 offsetof(struct hif_ce_desc_event, data)); 142 } 143 144 qdf_export_symbol(hif_clear_ce_desc_debug_data); 145 #else 146 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 147 { 148 qdf_mem_zero(event, sizeof(struct hif_ce_desc_event)); 149 } 150 151 qdf_export_symbol(hif_clear_ce_desc_debug_data); 152 #endif /* HIF_CE_DEBUG_DATA_BUF */ 153 154 #if defined(HIF_RECORD_PADDR) 155 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, 156 struct hif_ce_desc_event *event, 157 qdf_nbuf_t memory) 158 { 159 if (memory) { 160 event->dma_addr = QDF_NBUF_CB_PADDR(memory); 161 event->dma_to_phy = qdf_mem_paddr_from_dmaaddr( 162 scn->qdf_dev, 163 event->dma_addr); 164 165 event->virt_to_phy = 166 virt_to_phys(qdf_nbuf_data(memory)); 167 } 168 } 169 #endif /* HIF_RECORD_RX_PADDR */ 170 171 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) 172 { 173 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 174 struct ce_desc_hist *ce_hist; 175 struct latest_evt_history *evt; 176 int i; 177 178 if (!scn) 179 return; 180 181 ce_hist = &scn->hif_ce_desc_hist; 182 183 for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) { 184 if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST]) 185 continue; 186 187 evt = &ce_hist->latest_evt[i]; 188 hif_info_high("CE_id:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x", 189 (i + HIF_CE_MAX_LATEST_HIST), evt->cpu_id, 190 evt->irq_entry_ts, evt->bh_entry_ts, 191 evt->bh_resched_ts, evt->bh_exit_ts, 192 evt->bh_work_ts, evt->ring_hp, evt->ring_tp); 193 } 194 } 195 196 void hif_record_latest_evt(struct ce_desc_hist *ce_hist, 197 uint8_t type, 198 int ce_id, uint64_t time, 199 uint32_t hp, uint32_t tp) 200 { 201 struct latest_evt_history *latest_evt; 202 203 if (ce_id != 2 && ce_id != 3) 204 return; 205 206 latest_evt = &ce_hist->latest_evt[ce_id - HIF_CE_MAX_LATEST_HIST]; 207 208 switch (type) { 209 case HIF_IRQ_EVENT: 210 latest_evt->irq_entry_ts = time; 211 latest_evt->cpu_id = qdf_get_cpu(); 212 break; 213 case HIF_CE_TASKLET_ENTRY: 214 latest_evt->bh_entry_ts = time; 215 break; 216 case HIF_CE_TASKLET_RESCHEDULE: 217 latest_evt->bh_resched_ts = time; 218 break; 219 case HIF_CE_TASKLET_EXIT: 220 latest_evt->bh_exit_ts = time; 221 break; 222 case HIF_TX_DESC_COMPLETION: 223 case HIF_CE_DEST_STATUS_RING_REAP: 224 latest_evt->bh_work_ts = time; 225 latest_evt->ring_hp = hp; 226 latest_evt->ring_tp = tp; 227 break; 228 default: 229 break; 230 } 231 } 232 233 /** 234 * hif_record_ce_desc_event() - record ce descriptor events 235 * @scn: hif_softc 236 * @ce_id: which ce is the event occurring on 237 * @type: what happened 238 * @descriptor: pointer to the descriptor posted/completed 239 * @memory: virtual address of buffer related to the descriptor 240 * @index: index that the descriptor was/will be at. 241 * @len: 242 */ 243 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, 244 enum hif_ce_event_type type, 245 union ce_desc *descriptor, 246 void *memory, int index, 247 int len) 248 { 249 int record_index; 250 struct hif_ce_desc_event *event; 251 252 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 253 struct hif_ce_desc_event *hist_ev = NULL; 254 255 if (ce_id < CE_COUNT_MAX) 256 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; 257 else 258 return; 259 260 if (ce_id >= CE_COUNT_MAX) 261 return; 262 263 if (!ce_hist->enable[ce_id]) 264 return; 265 266 if (!hist_ev) 267 return; 268 269 record_index = get_next_record_index( 270 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); 271 272 event = &hist_ev[record_index]; 273 274 hif_clear_ce_desc_debug_data(event); 275 276 event->type = type; 277 event->time = qdf_get_log_timestamp(); 278 event->cpu_id = qdf_get_cpu(); 279 280 if (descriptor) 281 qdf_mem_copy(&event->descriptor, descriptor, 282 sizeof(union ce_desc)); 283 284 event->memory = memory; 285 event->index = index; 286 287 if (event->type == HIF_RX_DESC_POST || 288 event->type == HIF_RX_DESC_COMPLETION) 289 hif_ce_desc_record_rx_paddr(scn, event, memory); 290 291 if (ce_hist->data_enable[ce_id]) 292 hif_ce_desc_data_record(event, len); 293 294 hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0); 295 } 296 qdf_export_symbol(hif_record_ce_desc_event); 297 298 /** 299 * ce_init_ce_desc_event_log() - initialize the ce event log 300 * @scn: HIF context 301 * @ce_id: copy engine id for which we are initializing the log 302 * @size: size of array to dedicate 303 * 304 * Currently the passed size is ignored in favor of a precompiled value. 305 */ 306 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size) 307 { 308 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 309 qdf_atomic_init(&ce_hist->history_index[ce_id]); 310 qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]); 311 } 312 313 /** 314 * ce_deinit_ce_desc_event_log() - deinitialize the ce event log 315 * @scn: HIF context 316 * @ce_id: copy engine id for which we are deinitializing the log 317 * 318 */ 319 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 320 { 321 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 322 323 qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]); 324 } 325 326 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 327 void hif_record_ce_desc_event(struct hif_softc *scn, 328 int ce_id, enum hif_ce_event_type type, 329 union ce_desc *descriptor, void *memory, 330 int index, int len) 331 { 332 } 333 qdf_export_symbol(hif_record_ce_desc_event); 334 335 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, 336 int size) 337 { 338 } 339 340 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 341 { 342 } 343 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 344 345 #ifdef NAPI_YIELD_BUDGET_BASED 346 bool hif_ce_service_should_yield(struct hif_softc *scn, 347 struct CE_state *ce_state) 348 { 349 bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count); 350 351 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 352 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This 353 * can happen in fast path handling as processing is happening in 354 * batches. 355 */ 356 if (yield) 357 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 358 359 return yield; 360 } 361 #else 362 /** 363 * hif_ce_service_should_yield() - return true if the service is hogging the cpu 364 * @scn: hif context 365 * @ce_state: context of the copy engine being serviced 366 * 367 * Return: true if the service should yield 368 */ 369 bool hif_ce_service_should_yield(struct hif_softc *scn, 370 struct CE_state *ce_state) 371 { 372 bool yield, time_limit_reached, rxpkt_thresh_reached = 0; 373 374 time_limit_reached = qdf_time_sched_clock() > 375 ce_state->ce_service_yield_time ? 1 : 0; 376 377 if (!time_limit_reached) 378 rxpkt_thresh_reached = hif_max_num_receives_reached 379 (scn, ce_state->receive_count); 380 381 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 382 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This 383 * can happen in fast path handling as processing is happening in 384 * batches. 385 */ 386 if (rxpkt_thresh_reached) 387 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 388 389 yield = time_limit_reached || rxpkt_thresh_reached; 390 391 if (yield && 392 ce_state->htt_rx_data && 393 hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) { 394 hif_napi_update_yield_stats(ce_state, 395 time_limit_reached, 396 rxpkt_thresh_reached); 397 } 398 399 return yield; 400 } 401 qdf_export_symbol(hif_ce_service_should_yield); 402 #endif 403 404 void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush) 405 { 406 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 407 struct CE_ring_state *src_ring = ce_state->src_ring; 408 struct hif_softc *scn = ce_state->scn; 409 410 if (force_flush) 411 ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT); 412 413 if (ce_ring_get_clear_event(src_ring, CE_RING_FLUSH_EVENT)) { 414 qdf_spin_lock_bh(&ce_state->ce_index_lock); 415 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 416 src_ring->write_index); 417 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 418 419 src_ring->last_flush_ts = qdf_get_log_timestamp(); 420 hif_debug("flushed"); 421 } 422 } 423 424 /* Make sure this wrapper is called under ce_index_lock */ 425 void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl, 426 bool flush) 427 { 428 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 429 struct CE_ring_state *src_ring = ce_state->src_ring; 430 struct hif_softc *scn = ce_state->scn; 431 432 if (flush) 433 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 434 src_ring->write_index); 435 else 436 ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT); 437 } 438 439 /* 440 * Guts of ce_send, used by both ce_send and ce_sendlist_send. 441 * The caller takes responsibility for any needed locking. 442 */ 443 444 void war_ce_src_ring_write_idx_set(struct hif_softc *scn, 445 u32 ctrl_addr, unsigned int write_index) 446 { 447 if (hif_ce_war1) { 448 void __iomem *indicator_addr; 449 450 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS; 451 452 if (!war1_allow_sleep 453 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) { 454 hif_write32_mb(scn, indicator_addr, 455 (CDC_WAR_MAGIC_STR | write_index)); 456 } else { 457 unsigned long irq_flags; 458 459 local_irq_save(irq_flags); 460 hif_write32_mb(scn, indicator_addr, 1); 461 462 /* 463 * PCIE write waits for ACK in IPQ8K, there is no 464 * need to read back value. 465 */ 466 (void)hif_read32_mb(scn, indicator_addr); 467 /* conservative */ 468 (void)hif_read32_mb(scn, indicator_addr); 469 470 CE_SRC_RING_WRITE_IDX_SET(scn, 471 ctrl_addr, write_index); 472 473 hif_write32_mb(scn, indicator_addr, 0); 474 local_irq_restore(irq_flags); 475 } 476 } else { 477 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 478 } 479 } 480 481 qdf_export_symbol(war_ce_src_ring_write_idx_set); 482 483 QDF_STATUS 484 ce_send(struct CE_handle *copyeng, 485 void *per_transfer_context, 486 qdf_dma_addr_t buffer, 487 uint32_t nbytes, 488 uint32_t transfer_id, 489 uint32_t flags, 490 uint32_t user_flag) 491 { 492 struct CE_state *CE_state = (struct CE_state *)copyeng; 493 QDF_STATUS status; 494 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 495 496 qdf_spin_lock_bh(&CE_state->ce_index_lock); 497 status = hif_state->ce_services->ce_send_nolock(copyeng, 498 per_transfer_context, buffer, nbytes, 499 transfer_id, flags, user_flag); 500 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 501 502 return status; 503 } 504 qdf_export_symbol(ce_send); 505 506 unsigned int ce_sendlist_sizeof(void) 507 { 508 return sizeof(struct ce_sendlist); 509 } 510 511 void ce_sendlist_init(struct ce_sendlist *sendlist) 512 { 513 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 514 515 sl->num_items = 0; 516 } 517 518 QDF_STATUS 519 ce_sendlist_buf_add(struct ce_sendlist *sendlist, 520 qdf_dma_addr_t buffer, 521 uint32_t nbytes, 522 uint32_t flags, 523 uint32_t user_flags) 524 { 525 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 526 unsigned int num_items = sl->num_items; 527 struct ce_sendlist_item *item; 528 529 if (num_items >= CE_SENDLIST_ITEMS_MAX) { 530 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX); 531 return QDF_STATUS_E_RESOURCES; 532 } 533 534 item = &sl->item[num_items]; 535 item->send_type = CE_SIMPLE_BUFFER_TYPE; 536 item->data = buffer; 537 item->u.nbytes = nbytes; 538 item->flags = flags; 539 item->user_flags = user_flags; 540 sl->num_items = num_items + 1; 541 return QDF_STATUS_SUCCESS; 542 } 543 544 QDF_STATUS 545 ce_sendlist_send(struct CE_handle *copyeng, 546 void *per_transfer_context, 547 struct ce_sendlist *sendlist, unsigned int transfer_id) 548 { 549 struct CE_state *CE_state = (struct CE_state *)copyeng; 550 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 551 552 return hif_state->ce_services->ce_sendlist_send(copyeng, 553 per_transfer_context, sendlist, transfer_id); 554 } 555 556 #ifndef AH_NEED_TX_DATA_SWAP 557 #define AH_NEED_TX_DATA_SWAP 0 558 #endif 559 560 /** 561 * ce_batch_send() - sends bunch of msdus at once 562 * @ce_tx_hdl : pointer to CE handle 563 * @msdu : list of msdus to be sent 564 * @transfer_id : transfer id 565 * @len : Downloaded length 566 * @sendhead : sendhead 567 * 568 * Assumption : Called with an array of MSDU's 569 * Function: 570 * For each msdu in the array 571 * 1. Send each msdu 572 * 2. Increment write index accordinlgy. 573 * 574 * Return: list of msds not sent 575 */ 576 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 577 uint32_t transfer_id, u_int32_t len, uint32_t sendhead) 578 { 579 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 580 struct hif_softc *scn = ce_state->scn; 581 struct CE_ring_state *src_ring = ce_state->src_ring; 582 u_int32_t ctrl_addr = ce_state->ctrl_addr; 583 /* A_target_id_t targid = TARGID(scn);*/ 584 585 uint32_t nentries_mask = src_ring->nentries_mask; 586 uint32_t sw_index, write_index; 587 588 struct CE_src_desc *src_desc_base = 589 (struct CE_src_desc *)src_ring->base_addr_owner_space; 590 uint32_t *src_desc; 591 592 struct CE_src_desc lsrc_desc = {0}; 593 int deltacount = 0; 594 qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext; 595 596 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 597 sw_index = src_ring->sw_index; 598 write_index = src_ring->write_index; 599 600 deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1); 601 602 while (msdu) { 603 tempnext = qdf_nbuf_next(msdu); 604 605 if (deltacount < 2) { 606 if (sendhead) 607 return msdu; 608 hif_err("Out of descriptors"); 609 src_ring->write_index = write_index; 610 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 611 write_index); 612 613 sw_index = src_ring->sw_index; 614 write_index = src_ring->write_index; 615 616 deltacount = CE_RING_DELTA(nentries_mask, write_index, 617 sw_index-1); 618 if (!freelist) { 619 freelist = msdu; 620 hfreelist = msdu; 621 } else { 622 qdf_nbuf_set_next(freelist, msdu); 623 freelist = msdu; 624 } 625 qdf_nbuf_set_next(msdu, NULL); 626 msdu = tempnext; 627 continue; 628 } 629 630 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, 631 write_index); 632 633 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 634 635 lsrc_desc.meta_data = transfer_id; 636 if (len > msdu->len) 637 len = msdu->len; 638 lsrc_desc.nbytes = len; 639 /* Data packet is a byte stream, so disable byte swap */ 640 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 641 lsrc_desc.gather = 0; /*For the last one, gather is not set*/ 642 643 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 644 645 646 src_ring->per_transfer_context[write_index] = msdu; 647 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 648 649 if (sendhead) 650 break; 651 qdf_nbuf_set_next(msdu, NULL); 652 msdu = tempnext; 653 654 } 655 656 657 src_ring->write_index = write_index; 658 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 659 660 return hfreelist; 661 } 662 663 /** 664 * ce_update_tx_ring() - Advance sw index. 665 * @ce_tx_hdl : pointer to CE handle 666 * @num_htt_cmpls : htt completions received. 667 * 668 * Function: 669 * Increment the value of sw index of src ring 670 * according to number of htt completions 671 * received. 672 * 673 * Return: void 674 */ 675 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE 676 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 677 { 678 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 679 struct CE_ring_state *src_ring = ce_state->src_ring; 680 uint32_t nentries_mask = src_ring->nentries_mask; 681 /* 682 * Advance the s/w index: 683 * This effectively simulates completing the CE ring descriptors 684 */ 685 src_ring->sw_index = 686 CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index, 687 num_htt_cmpls); 688 } 689 #else 690 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 691 {} 692 #endif 693 694 /** 695 * ce_send_single() - sends 696 * @ce_tx_hdl : pointer to CE handle 697 * @msdu : msdu to be sent 698 * @transfer_id : transfer id 699 * @len : Downloaded length 700 * 701 * Function: 702 * 1. Send one msdu 703 * 2. Increment write index of src ring accordinlgy. 704 * 705 * Return: QDF_STATUS: CE sent status 706 */ 707 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 708 uint32_t transfer_id, u_int32_t len) 709 { 710 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 711 struct hif_softc *scn = ce_state->scn; 712 struct CE_ring_state *src_ring = ce_state->src_ring; 713 uint32_t ctrl_addr = ce_state->ctrl_addr; 714 /*A_target_id_t targid = TARGID(scn);*/ 715 716 uint32_t nentries_mask = src_ring->nentries_mask; 717 uint32_t sw_index, write_index; 718 719 struct CE_src_desc *src_desc_base = 720 (struct CE_src_desc *)src_ring->base_addr_owner_space; 721 uint32_t *src_desc; 722 723 struct CE_src_desc lsrc_desc = {0}; 724 enum hif_ce_event_type event_type; 725 726 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 727 sw_index = src_ring->sw_index; 728 write_index = src_ring->write_index; 729 730 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, 731 sw_index-1) < 1)) { 732 hif_err("ce send fail %d %d %d", nentries_mask, 733 write_index, sw_index); 734 return QDF_STATUS_E_RESOURCES; 735 } 736 737 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index); 738 739 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 740 741 lsrc_desc.meta_data = transfer_id; 742 lsrc_desc.nbytes = len; 743 /* Data packet is a byte stream, so disable byte swap */ 744 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 745 lsrc_desc.gather = 0; /* For the last one, gather is not set */ 746 747 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 748 749 750 src_ring->per_transfer_context[write_index] = msdu; 751 752 if (((struct CE_src_desc *)src_desc)->gather) 753 event_type = HIF_TX_GATHER_DESC_POST; 754 else if (qdf_unlikely(ce_state->state != CE_RUNNING)) 755 event_type = HIF_TX_DESC_SOFTWARE_POST; 756 else 757 event_type = HIF_TX_DESC_POST; 758 759 hif_record_ce_desc_event(scn, ce_state->id, event_type, 760 (union ce_desc *)src_desc, msdu, 761 write_index, len); 762 763 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 764 765 src_ring->write_index = write_index; 766 767 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 768 769 return QDF_STATUS_SUCCESS; 770 } 771 772 /** 773 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine 774 * @copyeng: copy engine handle 775 * @per_recv_context: virtual address of the nbuf 776 * @buffer: physical address of the nbuf 777 * 778 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued 779 */ 780 QDF_STATUS 781 ce_recv_buf_enqueue(struct CE_handle *copyeng, 782 void *per_recv_context, qdf_dma_addr_t buffer) 783 { 784 struct CE_state *CE_state = (struct CE_state *)copyeng; 785 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 786 787 return hif_state->ce_services->ce_recv_buf_enqueue(copyeng, 788 per_recv_context, buffer); 789 } 790 qdf_export_symbol(ce_recv_buf_enqueue); 791 792 void 793 ce_send_watermarks_set(struct CE_handle *copyeng, 794 unsigned int low_alert_nentries, 795 unsigned int high_alert_nentries) 796 { 797 struct CE_state *CE_state = (struct CE_state *)copyeng; 798 uint32_t ctrl_addr = CE_state->ctrl_addr; 799 struct hif_softc *scn = CE_state->scn; 800 801 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries); 802 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries); 803 } 804 805 void 806 ce_recv_watermarks_set(struct CE_handle *copyeng, 807 unsigned int low_alert_nentries, 808 unsigned int high_alert_nentries) 809 { 810 struct CE_state *CE_state = (struct CE_state *)copyeng; 811 uint32_t ctrl_addr = CE_state->ctrl_addr; 812 struct hif_softc *scn = CE_state->scn; 813 814 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 815 low_alert_nentries); 816 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, 817 high_alert_nentries); 818 } 819 820 unsigned int ce_send_entries_avail(struct CE_handle *copyeng) 821 { 822 struct CE_state *CE_state = (struct CE_state *)copyeng; 823 struct CE_ring_state *src_ring = CE_state->src_ring; 824 unsigned int nentries_mask = src_ring->nentries_mask; 825 unsigned int sw_index; 826 unsigned int write_index; 827 828 qdf_spin_lock(&CE_state->ce_index_lock); 829 sw_index = src_ring->sw_index; 830 write_index = src_ring->write_index; 831 qdf_spin_unlock(&CE_state->ce_index_lock); 832 833 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 834 } 835 836 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng) 837 { 838 struct CE_state *CE_state = (struct CE_state *)copyeng; 839 struct CE_ring_state *dest_ring = CE_state->dest_ring; 840 unsigned int nentries_mask = dest_ring->nentries_mask; 841 unsigned int sw_index; 842 unsigned int write_index; 843 844 qdf_spin_lock(&CE_state->ce_index_lock); 845 sw_index = dest_ring->sw_index; 846 write_index = dest_ring->write_index; 847 qdf_spin_unlock(&CE_state->ce_index_lock); 848 849 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 850 } 851 852 /* 853 * Guts of ce_completed_recv_next. 854 * The caller takes responsibility for any necessary locking. 855 */ 856 QDF_STATUS 857 ce_completed_recv_next(struct CE_handle *copyeng, 858 void **per_CE_contextp, 859 void **per_transfer_contextp, 860 qdf_dma_addr_t *bufferp, 861 unsigned int *nbytesp, 862 unsigned int *transfer_idp, unsigned int *flagsp) 863 { 864 struct CE_state *CE_state = (struct CE_state *)copyeng; 865 QDF_STATUS status; 866 struct hif_softc *scn = CE_state->scn; 867 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 868 struct ce_ops *ce_services; 869 870 ce_services = hif_state->ce_services; 871 qdf_spin_lock_bh(&CE_state->ce_index_lock); 872 status = 873 ce_services->ce_completed_recv_next_nolock(CE_state, 874 per_CE_contextp, per_transfer_contextp, bufferp, 875 nbytesp, transfer_idp, flagsp); 876 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 877 878 return status; 879 } 880 881 QDF_STATUS 882 ce_revoke_recv_next(struct CE_handle *copyeng, 883 void **per_CE_contextp, 884 void **per_transfer_contextp, qdf_dma_addr_t *bufferp) 885 { 886 struct CE_state *CE_state = (struct CE_state *)copyeng; 887 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 888 889 return hif_state->ce_services->ce_revoke_recv_next(copyeng, 890 per_CE_contextp, per_transfer_contextp, bufferp); 891 } 892 893 QDF_STATUS 894 ce_cancel_send_next(struct CE_handle *copyeng, 895 void **per_CE_contextp, 896 void **per_transfer_contextp, 897 qdf_dma_addr_t *bufferp, 898 unsigned int *nbytesp, 899 unsigned int *transfer_idp, 900 uint32_t *toeplitz_hash_result) 901 { 902 struct CE_state *CE_state = (struct CE_state *)copyeng; 903 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 904 905 return hif_state->ce_services->ce_cancel_send_next 906 (copyeng, per_CE_contextp, per_transfer_contextp, 907 bufferp, nbytesp, transfer_idp, toeplitz_hash_result); 908 } 909 qdf_export_symbol(ce_cancel_send_next); 910 911 QDF_STATUS 912 ce_completed_send_next(struct CE_handle *copyeng, 913 void **per_CE_contextp, 914 void **per_transfer_contextp, 915 qdf_dma_addr_t *bufferp, 916 unsigned int *nbytesp, 917 unsigned int *transfer_idp, 918 unsigned int *sw_idx, 919 unsigned int *hw_idx, 920 unsigned int *toeplitz_hash_result) 921 { 922 struct CE_state *CE_state = (struct CE_state *)copyeng; 923 struct hif_softc *scn = CE_state->scn; 924 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 925 struct ce_ops *ce_services; 926 QDF_STATUS status; 927 928 ce_services = hif_state->ce_services; 929 qdf_spin_lock_bh(&CE_state->ce_index_lock); 930 status = 931 ce_services->ce_completed_send_next_nolock(CE_state, 932 per_CE_contextp, per_transfer_contextp, 933 bufferp, nbytesp, transfer_idp, sw_idx, 934 hw_idx, toeplitz_hash_result); 935 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 936 937 return status; 938 } 939 940 #ifdef ATH_11AC_TXCOMPACT 941 /* CE engine descriptor reap 942 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service 943 * does receive and reaping of completed descriptor , 944 * This function only handles reaping of Tx complete descriptor. 945 * The Function is called from threshold reap poll routine 946 * hif_send_complete_check so should not contain receive functionality 947 * within it . 948 */ 949 950 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id) 951 { 952 void *CE_context; 953 void *transfer_context; 954 qdf_dma_addr_t buf; 955 unsigned int nbytes; 956 unsigned int id; 957 unsigned int sw_idx, hw_idx; 958 uint32_t toeplitz_hash_result; 959 struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; 960 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 961 962 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 963 return; 964 965 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, 966 NULL, NULL, 0, 0); 967 968 /* Since this function is called from both user context and 969 * tasklet context the spinlock has to lock the bottom halves. 970 * This fix assumes that ATH_11AC_TXCOMPACT flag is always 971 * enabled in TX polling mode. If this is not the case, more 972 * bottom halve spin lock changes are needed. Due to data path 973 * performance concern, after internal discussion we've decided 974 * to make minimum change, i.e., only address the issue occurred 975 * in this function. The possible negative effect of this minimum 976 * change is that, in the future, if some other function will also 977 * be opened to let the user context to use, those cases need to be 978 * addressed by change spin_lock to spin_lock_bh also. 979 */ 980 981 qdf_spin_lock_bh(&CE_state->ce_index_lock); 982 983 if (CE_state->send_cb) { 984 { 985 struct ce_ops *ce_services = hif_state->ce_services; 986 /* Pop completed send buffers and call the 987 * registered send callback for each 988 */ 989 while (ce_services->ce_completed_send_next_nolock 990 (CE_state, &CE_context, 991 &transfer_context, &buf, 992 &nbytes, &id, &sw_idx, &hw_idx, 993 &toeplitz_hash_result) == 994 QDF_STATUS_SUCCESS) { 995 if (ce_id != CE_HTT_H2T_MSG) { 996 qdf_spin_unlock_bh( 997 &CE_state->ce_index_lock); 998 CE_state->send_cb( 999 (struct CE_handle *) 1000 CE_state, CE_context, 1001 transfer_context, buf, 1002 nbytes, id, sw_idx, hw_idx, 1003 toeplitz_hash_result); 1004 qdf_spin_lock_bh( 1005 &CE_state->ce_index_lock); 1006 } else { 1007 struct HIF_CE_pipe_info *pipe_info = 1008 (struct HIF_CE_pipe_info *) 1009 CE_context; 1010 1011 qdf_spin_lock_bh(&pipe_info-> 1012 completion_freeq_lock); 1013 pipe_info->num_sends_allowed++; 1014 qdf_spin_unlock_bh(&pipe_info-> 1015 completion_freeq_lock); 1016 } 1017 } 1018 } 1019 } 1020 1021 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 1022 1023 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, 1024 NULL, NULL, 0, 0); 1025 Q_TARGET_ACCESS_END(scn); 1026 } 1027 1028 #endif /*ATH_11AC_TXCOMPACT */ 1029 1030 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST 1031 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode) 1032 { 1033 // QDF_IS_EPPING_ENABLED is pre lithium feature 1034 // CE4 completion is enabled only lithium and later 1035 // so no need to check for EPPING 1036 return true; 1037 } 1038 1039 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 1040 1041 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode) 1042 { 1043 if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode)) 1044 return true; 1045 else 1046 return false; 1047 } 1048 1049 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */ 1050 1051 /* 1052 * ce_engine_service_reg: 1053 * 1054 * Called from ce_per_engine_service and goes through the regular interrupt 1055 * handling that does not involve the WLAN fast path feature. 1056 * 1057 * Returns void 1058 */ 1059 void ce_engine_service_reg(struct hif_softc *scn, int CE_id) 1060 { 1061 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1062 uint32_t ctrl_addr = CE_state->ctrl_addr; 1063 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1064 void *CE_context; 1065 void *transfer_context; 1066 qdf_dma_addr_t buf; 1067 unsigned int nbytes; 1068 unsigned int id; 1069 unsigned int flags; 1070 unsigned int more_comp_cnt = 0; 1071 unsigned int more_snd_comp_cnt = 0; 1072 unsigned int sw_idx, hw_idx; 1073 uint32_t toeplitz_hash_result; 1074 uint32_t mode = hif_get_conparam(scn); 1075 1076 more_completions: 1077 if (CE_state->recv_cb) { 1078 1079 /* Pop completed recv buffers and call 1080 * the registered recv callback for each 1081 */ 1082 while (hif_state->ce_services->ce_completed_recv_next_nolock 1083 (CE_state, &CE_context, &transfer_context, 1084 &buf, &nbytes, &id, &flags) == 1085 QDF_STATUS_SUCCESS) { 1086 qdf_spin_unlock(&CE_state->ce_index_lock); 1087 CE_state->recv_cb((struct CE_handle *)CE_state, 1088 CE_context, transfer_context, buf, 1089 nbytes, id, flags); 1090 1091 qdf_spin_lock(&CE_state->ce_index_lock); 1092 /* 1093 * EV #112693 - 1094 * [Peregrine][ES1][WB342][Win8x86][Performance] 1095 * BSoD_0x133 occurred in VHT80 UDP_DL 1096 * Break out DPC by force if number of loops in 1097 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES 1098 * to avoid spending too long time in 1099 * DPC for each interrupt handling. Schedule another 1100 * DPC to avoid data loss if we had taken 1101 * force-break action before apply to Windows OS 1102 * only currently, Linux/MAC os can expand to their 1103 * platform if necessary 1104 */ 1105 1106 /* Break the receive processes by 1107 * force if force_break set up 1108 */ 1109 if (qdf_unlikely(CE_state->force_break)) { 1110 qdf_atomic_set(&CE_state->rx_pending, 1); 1111 return; 1112 } 1113 } 1114 } 1115 1116 /* 1117 * Attention: We may experience potential infinite loop for below 1118 * While Loop during Sending Stress test. 1119 * Resolve the same way as Receive Case (Refer to EV #112693) 1120 */ 1121 1122 if (CE_state->send_cb) { 1123 /* Pop completed send buffers and call 1124 * the registered send callback for each 1125 */ 1126 1127 #ifdef ATH_11AC_TXCOMPACT 1128 while (hif_state->ce_services->ce_completed_send_next_nolock 1129 (CE_state, &CE_context, 1130 &transfer_context, &buf, &nbytes, 1131 &id, &sw_idx, &hw_idx, 1132 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1133 1134 if (check_ce_id_and_epping_enabled(CE_id, mode)) { 1135 qdf_spin_unlock(&CE_state->ce_index_lock); 1136 CE_state->send_cb((struct CE_handle *)CE_state, 1137 CE_context, transfer_context, 1138 buf, nbytes, id, sw_idx, 1139 hw_idx, toeplitz_hash_result); 1140 qdf_spin_lock(&CE_state->ce_index_lock); 1141 } else { 1142 struct HIF_CE_pipe_info *pipe_info = 1143 (struct HIF_CE_pipe_info *)CE_context; 1144 1145 qdf_spin_lock_bh(&pipe_info-> 1146 completion_freeq_lock); 1147 pipe_info->num_sends_allowed++; 1148 qdf_spin_unlock_bh(&pipe_info-> 1149 completion_freeq_lock); 1150 } 1151 } 1152 #else /*ATH_11AC_TXCOMPACT */ 1153 while (hif_state->ce_services->ce_completed_send_next_nolock 1154 (CE_state, &CE_context, 1155 &transfer_context, &buf, &nbytes, 1156 &id, &sw_idx, &hw_idx, 1157 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1158 qdf_spin_unlock(&CE_state->ce_index_lock); 1159 CE_state->send_cb((struct CE_handle *)CE_state, 1160 CE_context, transfer_context, buf, 1161 nbytes, id, sw_idx, hw_idx, 1162 toeplitz_hash_result); 1163 qdf_spin_lock(&CE_state->ce_index_lock); 1164 } 1165 #endif /*ATH_11AC_TXCOMPACT */ 1166 } 1167 1168 more_watermarks: 1169 if (CE_state->misc_cbs) { 1170 if (CE_state->watermark_cb && 1171 hif_state->ce_services->watermark_int(CE_state, 1172 &flags)) { 1173 qdf_spin_unlock(&CE_state->ce_index_lock); 1174 /* Convert HW IS bits to software flags */ 1175 CE_state->watermark_cb((struct CE_handle *)CE_state, 1176 CE_state->wm_context, flags); 1177 qdf_spin_lock(&CE_state->ce_index_lock); 1178 } 1179 } 1180 1181 /* 1182 * Clear the misc interrupts (watermark) that were handled above, 1183 * and that will be checked again below. 1184 * Clear and check for copy-complete interrupts again, just in case 1185 * more copy completions happened while the misc interrupts were being 1186 * handled. 1187 */ 1188 if (!ce_srng_based(scn)) { 1189 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1190 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 1191 CE_WATERMARK_MASK | 1192 HOST_IS_COPY_COMPLETE_MASK); 1193 } else { 1194 qdf_atomic_set(&CE_state->rx_pending, 0); 1195 hif_err_rl("%s: target access is not allowed", 1196 __func__); 1197 return; 1198 } 1199 } 1200 1201 /* 1202 * Now that per-engine interrupts are cleared, verify that 1203 * no recv interrupts arrive while processing send interrupts, 1204 * and no recv or send interrupts happened while processing 1205 * misc interrupts.Go back and check again.Keep checking until 1206 * we find no more events to process. 1207 */ 1208 if (CE_state->recv_cb && 1209 hif_state->ce_services->ce_recv_entries_done_nolock(scn, 1210 CE_state)) { 1211 if (QDF_IS_EPPING_ENABLED(mode) || 1212 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1213 goto more_completions; 1214 } else { 1215 if (!ce_srng_based(scn)) { 1216 hif_err_rl( 1217 "Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 1218 CE_state->id, 1219 CE_state->dest_ring->nentries_mask, 1220 CE_state->dest_ring->sw_index, 1221 CE_DEST_RING_READ_IDX_GET(scn, 1222 CE_state->ctrl_addr)); 1223 } 1224 } 1225 } 1226 1227 if (CE_state->send_cb && 1228 hif_state->ce_services->ce_send_entries_done_nolock(scn, 1229 CE_state)) { 1230 if (QDF_IS_EPPING_ENABLED(mode) || 1231 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1232 goto more_completions; 1233 } else { 1234 if (!ce_srng_based(scn)) { 1235 hif_err_rl( 1236 "Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x", 1237 CE_state->id, 1238 CE_state->src_ring->nentries_mask, 1239 CE_state->src_ring->sw_index, 1240 CE_state->src_ring->hw_index, 1241 CE_state->src_ring->write_index, 1242 CE_SRC_RING_READ_IDX_GET(scn, 1243 CE_state->ctrl_addr)); 1244 } 1245 } 1246 } 1247 1248 if (CE_state->misc_cbs && CE_state->watermark_cb) { 1249 if (hif_state->ce_services->watermark_int(CE_state, &flags)) 1250 goto more_watermarks; 1251 } 1252 1253 qdf_atomic_set(&CE_state->rx_pending, 0); 1254 } 1255 1256 #ifdef WLAN_TRACEPOINTS 1257 /** 1258 * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling 1259 * latency 1260 * @ce_state: CE context 1261 * 1262 * Return: None 1263 */ 1264 static inline 1265 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state) 1266 { 1267 qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id, 1268 ce_state->ce_service_start_time - 1269 ce_state->ce_tasklet_sched_time); 1270 } 1271 #else 1272 static inline 1273 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state) 1274 { 1275 } 1276 #endif 1277 1278 /* 1279 * Guts of interrupt handler for per-engine interrupts on a particular CE. 1280 * 1281 * Invokes registered callbacks for recv_complete, 1282 * send_complete, and watermarks. 1283 * 1284 * Returns: number of messages processed 1285 */ 1286 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id) 1287 { 1288 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1289 1290 if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data)) 1291 return CE_state->receive_count; 1292 1293 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 1294 hif_err("[premature rc=0]"); 1295 return 0; /* no work done */ 1296 } 1297 1298 /* Clear force_break flag and re-initialize receive_count to 0 */ 1299 CE_state->receive_count = 0; 1300 CE_state->force_break = 0; 1301 CE_state->ce_service_start_time = qdf_time_sched_clock(); 1302 CE_state->ce_service_yield_time = 1303 CE_state->ce_service_start_time + 1304 hif_get_ce_service_max_yield_time( 1305 (struct hif_opaque_softc *)scn); 1306 1307 ce_trace_tasklet_sched_latency(CE_state); 1308 1309 qdf_spin_lock(&CE_state->ce_index_lock); 1310 1311 CE_state->service(scn, CE_id); 1312 1313 qdf_spin_unlock(&CE_state->ce_index_lock); 1314 1315 if (Q_TARGET_ACCESS_END(scn) < 0) 1316 hif_err("<--[premature rc=%d]", CE_state->receive_count); 1317 return CE_state->receive_count; 1318 } 1319 qdf_export_symbol(ce_per_engine_service); 1320 1321 /* 1322 * Handler for per-engine interrupts on ALL active CEs. 1323 * This is used in cases where the system is sharing a 1324 * single interrupt for all CEs 1325 */ 1326 1327 void ce_per_engine_service_any(int irq, struct hif_softc *scn) 1328 { 1329 int CE_id; 1330 uint32_t intr_summary; 1331 1332 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1333 return; 1334 1335 if (!qdf_atomic_read(&scn->tasklet_from_intr)) { 1336 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1337 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1338 1339 if (qdf_atomic_read(&CE_state->rx_pending)) { 1340 qdf_atomic_set(&CE_state->rx_pending, 0); 1341 ce_per_engine_service(scn, CE_id); 1342 } 1343 } 1344 1345 Q_TARGET_ACCESS_END(scn); 1346 return; 1347 } 1348 1349 intr_summary = CE_INTERRUPT_SUMMARY(scn); 1350 1351 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) { 1352 if (intr_summary & (1 << CE_id)) 1353 intr_summary &= ~(1 << CE_id); 1354 else 1355 continue; /* no intr pending on this CE */ 1356 1357 ce_per_engine_service(scn, CE_id); 1358 } 1359 1360 Q_TARGET_ACCESS_END(scn); 1361 } 1362 1363 /*Iterate the CE_state list and disable the compl interrupt 1364 * if it has been registered already. 1365 */ 1366 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1367 { 1368 int CE_id; 1369 1370 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1371 return; 1372 1373 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1374 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1375 uint32_t ctrl_addr = CE_state->ctrl_addr; 1376 1377 /* if the interrupt is currently enabled, disable it */ 1378 if (!CE_state->disable_copy_compl_intr 1379 && (CE_state->send_cb || CE_state->recv_cb)) 1380 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1381 1382 if (CE_state->watermark_cb) 1383 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1384 } 1385 Q_TARGET_ACCESS_END(scn); 1386 } 1387 1388 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1389 { 1390 int CE_id; 1391 1392 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1393 return; 1394 1395 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1396 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1397 uint32_t ctrl_addr = CE_state->ctrl_addr; 1398 1399 /* 1400 * If the CE is supposed to have copy complete interrupts 1401 * enabled (i.e. there a callback registered, and the 1402 * "disable" flag is not set), then re-enable the interrupt. 1403 */ 1404 if (!CE_state->disable_copy_compl_intr 1405 && (CE_state->send_cb || CE_state->recv_cb)) 1406 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1407 1408 if (CE_state->watermark_cb) 1409 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1410 } 1411 Q_TARGET_ACCESS_END(scn); 1412 } 1413 1414 /** 1415 * ce_send_cb_register(): register completion handler 1416 * @copyeng: CE_state representing the ce we are adding the behavior to 1417 * @fn_ptr: callback that the ce should use when processing tx completions 1418 * @ce_send_context: context to pass back in the callback 1419 * @disable_interrupts: if the interrupts should be enabled or not. 1420 * 1421 * Caller should guarantee that no transactions are in progress before 1422 * switching the callback function. 1423 * 1424 * Registers the send context before the fn pointer so that if the cb is valid 1425 * the context should be valid. 1426 * 1427 * Beware that currently this function will enable completion interrupts. 1428 */ 1429 void 1430 ce_send_cb_register(struct CE_handle *copyeng, 1431 ce_send_cb fn_ptr, 1432 void *ce_send_context, int disable_interrupts) 1433 { 1434 struct CE_state *CE_state = (struct CE_state *)copyeng; 1435 struct hif_softc *scn; 1436 struct HIF_CE_state *hif_state; 1437 1438 if (!CE_state) { 1439 hif_err("Error CE state = NULL"); 1440 return; 1441 } 1442 scn = CE_state->scn; 1443 hif_state = HIF_GET_CE_STATE(scn); 1444 if (!hif_state) { 1445 hif_err("Error HIF state = NULL"); 1446 return; 1447 } 1448 CE_state->send_context = ce_send_context; 1449 CE_state->send_cb = fn_ptr; 1450 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1451 disable_interrupts); 1452 } 1453 qdf_export_symbol(ce_send_cb_register); 1454 1455 /** 1456 * ce_recv_cb_register(): register completion handler 1457 * @copyeng: CE_state representing the ce we are adding the behavior to 1458 * @fn_ptr: callback that the ce should use when processing rx completions 1459 * @CE_recv_context: context to pass back in the callback 1460 * @disable_interrupts: if the interrupts should be enabled or not. 1461 * 1462 * Registers the send context before the fn pointer so that if the cb is valid 1463 * the context should be valid. 1464 * 1465 * Caller should guarantee that no transactions are in progress before 1466 * switching the callback function. 1467 */ 1468 void 1469 ce_recv_cb_register(struct CE_handle *copyeng, 1470 CE_recv_cb fn_ptr, 1471 void *CE_recv_context, int disable_interrupts) 1472 { 1473 struct CE_state *CE_state = (struct CE_state *)copyeng; 1474 struct hif_softc *scn; 1475 struct HIF_CE_state *hif_state; 1476 1477 if (!CE_state) { 1478 hif_err("ERROR CE state = NULL"); 1479 return; 1480 } 1481 scn = CE_state->scn; 1482 hif_state = HIF_GET_CE_STATE(scn); 1483 if (!hif_state) { 1484 hif_err("Error HIF state = NULL"); 1485 return; 1486 } 1487 CE_state->recv_context = CE_recv_context; 1488 CE_state->recv_cb = fn_ptr; 1489 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1490 disable_interrupts); 1491 } 1492 qdf_export_symbol(ce_recv_cb_register); 1493 1494 /** 1495 * ce_watermark_cb_register(): register completion handler 1496 * @copyeng: CE_state representing the ce we are adding the behavior to 1497 * @fn_ptr: callback that the ce should use when processing watermark events 1498 * @CE_wm_context: context to pass back in the callback 1499 * 1500 * Caller should guarantee that no watermark events are being processed before 1501 * switching the callback function. 1502 */ 1503 void 1504 ce_watermark_cb_register(struct CE_handle *copyeng, 1505 CE_watermark_cb fn_ptr, void *CE_wm_context) 1506 { 1507 struct CE_state *CE_state = (struct CE_state *)copyeng; 1508 struct hif_softc *scn = CE_state->scn; 1509 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1510 1511 CE_state->watermark_cb = fn_ptr; 1512 CE_state->wm_context = CE_wm_context; 1513 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1514 0); 1515 if (fn_ptr) 1516 CE_state->misc_cbs = 1; 1517 } 1518 1519 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT 1520 void 1521 ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *), 1522 void *custom_cb_context) 1523 { 1524 struct CE_state *CE_state = (struct CE_state *)copyeng; 1525 1526 CE_state->custom_cb = custom_cb; 1527 CE_state->custom_cb_context = custom_cb_context; 1528 qdf_atomic_init(&CE_state->custom_cb_pending); 1529 } 1530 1531 void 1532 ce_unregister_custom_cb(struct CE_handle *copyeng) 1533 { 1534 struct CE_state *CE_state = (struct CE_state *)copyeng; 1535 1536 qdf_assert_always(!qdf_atomic_read(&CE_state->custom_cb_pending)); 1537 CE_state->custom_cb = NULL; 1538 CE_state->custom_cb_context = NULL; 1539 } 1540 1541 void 1542 ce_enable_custom_cb(struct CE_handle *copyeng) 1543 { 1544 struct CE_state *CE_state = (struct CE_state *)copyeng; 1545 int32_t custom_cb_pending; 1546 1547 qdf_assert_always(CE_state->custom_cb); 1548 qdf_assert_always(CE_state->custom_cb_context); 1549 1550 custom_cb_pending = qdf_atomic_inc_return(&CE_state->custom_cb_pending); 1551 qdf_assert_always(custom_cb_pending >= 1); 1552 } 1553 1554 void 1555 ce_disable_custom_cb(struct CE_handle *copyeng) 1556 { 1557 struct CE_state *CE_state = (struct CE_state *)copyeng; 1558 1559 qdf_assert_always(CE_state->custom_cb); 1560 qdf_assert_always(CE_state->custom_cb_context); 1561 1562 qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending); 1563 } 1564 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */ 1565 1566 bool ce_get_rx_pending(struct hif_softc *scn) 1567 { 1568 int CE_id; 1569 1570 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1571 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1572 1573 if (qdf_atomic_read(&CE_state->rx_pending)) 1574 return true; 1575 } 1576 1577 return false; 1578 } 1579 1580 /** 1581 * ce_check_rx_pending() - ce_check_rx_pending 1582 * @CE_state: context of the copy engine to check 1583 * 1584 * Return: true if there per_engine_service 1585 * didn't process all the rx descriptors. 1586 */ 1587 bool ce_check_rx_pending(struct CE_state *CE_state) 1588 { 1589 if (qdf_atomic_read(&CE_state->rx_pending)) 1590 return true; 1591 else 1592 return false; 1593 } 1594 qdf_export_symbol(ce_check_rx_pending); 1595 1596 #ifdef IPA_OFFLOAD 1597 #ifdef QCN7605_SUPPORT 1598 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state) 1599 { 1600 u_int32_t ctrl_addr = CE_state->ctrl_addr; 1601 struct hif_softc *scn = CE_state->scn; 1602 qdf_dma_addr_t wr_index_addr; 1603 1604 wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr); 1605 return wr_index_addr; 1606 } 1607 #else 1608 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state) 1609 { 1610 struct hif_softc *scn = CE_state->scn; 1611 qdf_dma_addr_t wr_index_addr; 1612 1613 wr_index_addr = CE_BASE_ADDRESS(CE_state->id) + 1614 SR_WR_INDEX_ADDRESS; 1615 return wr_index_addr; 1616 } 1617 #endif 1618 1619 /** 1620 * ce_ipa_get_resource() - get uc resource on copyengine 1621 * @ce: copyengine context 1622 * @ce_sr: copyengine source ring resource info 1623 * @ce_sr_ring_size: copyengine source ring size 1624 * @ce_reg_paddr: copyengine register physical address 1625 * 1626 * Copy engine should release resource to micro controller 1627 * Micro controller needs 1628 * - Copy engine source descriptor base address 1629 * - Copy engine source descriptor size 1630 * - PCI BAR address to access copy engine register 1631 * 1632 * Return: None 1633 */ 1634 void ce_ipa_get_resource(struct CE_handle *ce, 1635 qdf_shared_mem_t **ce_sr, 1636 uint32_t *ce_sr_ring_size, 1637 qdf_dma_addr_t *ce_reg_paddr) 1638 { 1639 struct CE_state *CE_state = (struct CE_state *)ce; 1640 uint32_t ring_loop; 1641 struct CE_src_desc *ce_desc; 1642 qdf_dma_addr_t phy_mem_base; 1643 struct hif_softc *scn = CE_state->scn; 1644 1645 if (CE_UNUSED == CE_state->state) { 1646 *qdf_mem_get_dma_addr_ptr(scn->qdf_dev, 1647 &CE_state->scn->ipa_ce_ring->mem_info) = 0; 1648 *ce_sr_ring_size = 0; 1649 return; 1650 } 1651 1652 /* Update default value for descriptor */ 1653 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries; 1654 ring_loop++) { 1655 ce_desc = (struct CE_src_desc *) 1656 ((char *)CE_state->src_ring->base_addr_owner_space + 1657 ring_loop * (sizeof(struct CE_src_desc))); 1658 CE_IPA_RING_INIT(ce_desc); 1659 } 1660 1661 /* Get BAR address */ 1662 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base); 1663 1664 *ce_sr = CE_state->scn->ipa_ce_ring; 1665 *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries * 1666 sizeof(struct CE_src_desc)); 1667 *ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state); 1668 1669 } 1670 1671 #endif /* IPA_OFFLOAD */ 1672 1673 #ifdef HIF_CE_DEBUG_DATA_BUF 1674 /** 1675 * hif_dump_desc_data_buf() - record ce descriptor events 1676 * @buf: buffer to copy to 1677 * @pos: Current position till which the buf is filled 1678 * @data: Data to be copied 1679 * @data_len: Length of the data to be copied 1680 */ 1681 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos, 1682 uint8_t *data, uint32_t data_len) 1683 { 1684 pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n", 1685 CE_DEBUG_MAX_DATA_BUF_SIZE); 1686 1687 if ((data_len > 0) && data) { 1688 if (data_len < 16) { 1689 hex_dump_to_buffer(data, 1690 CE_DEBUG_DATA_PER_ROW, 1691 16, 1, buf + pos, 1692 (ssize_t)PAGE_SIZE - pos, 1693 false); 1694 pos += CE_DEBUG_PRINT_BUF_SIZE(data_len); 1695 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); 1696 } else { 1697 uint32_t rows = (data_len / 16) + 1; 1698 uint32_t row = 0; 1699 1700 for (row = 0; row < rows; row++) { 1701 hex_dump_to_buffer(data + (row * 16), 1702 CE_DEBUG_DATA_PER_ROW, 1703 16, 1, buf + pos, 1704 (ssize_t)PAGE_SIZE 1705 - pos, false); 1706 pos += 1707 CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW); 1708 pos += snprintf(buf + pos, PAGE_SIZE - pos, 1709 "\n"); 1710 } 1711 } 1712 } 1713 1714 return pos; 1715 } 1716 #endif 1717 1718 /* 1719 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1720 * for defined here 1721 */ 1722 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1723 static const char *ce_event_type_to_str(enum hif_ce_event_type type) 1724 { 1725 switch (type) { 1726 case HIF_RX_DESC_POST: 1727 return "HIF_RX_DESC_POST"; 1728 case HIF_RX_DESC_COMPLETION: 1729 return "HIF_RX_DESC_COMPLETION"; 1730 case HIF_TX_GATHER_DESC_POST: 1731 return "HIF_TX_GATHER_DESC_POST"; 1732 case HIF_TX_DESC_POST: 1733 return "HIF_TX_DESC_POST"; 1734 case HIF_TX_DESC_SOFTWARE_POST: 1735 return "HIF_TX_DESC_SOFTWARE_POST"; 1736 case HIF_TX_DESC_COMPLETION: 1737 return "HIF_TX_DESC_COMPLETION"; 1738 case FAST_RX_WRITE_INDEX_UPDATE: 1739 return "FAST_RX_WRITE_INDEX_UPDATE"; 1740 case FAST_RX_SOFTWARE_INDEX_UPDATE: 1741 return "FAST_RX_SOFTWARE_INDEX_UPDATE"; 1742 case FAST_TX_WRITE_INDEX_UPDATE: 1743 return "FAST_TX_WRITE_INDEX_UPDATE"; 1744 case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: 1745 return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE"; 1746 case FAST_TX_SOFTWARE_INDEX_UPDATE: 1747 return "FAST_TX_SOFTWARE_INDEX_UPDATE"; 1748 case RESUME_WRITE_INDEX_UPDATE: 1749 return "RESUME_WRITE_INDEX_UPDATE"; 1750 case HIF_IRQ_EVENT: 1751 return "HIF_IRQ_EVENT"; 1752 case HIF_CE_TASKLET_ENTRY: 1753 return "HIF_CE_TASKLET_ENTRY"; 1754 case HIF_CE_TASKLET_RESCHEDULE: 1755 return "HIF_CE_TASKLET_RESCHEDULE"; 1756 case HIF_CE_TASKLET_EXIT: 1757 return "HIF_CE_TASKLET_EXIT"; 1758 case HIF_CE_REAP_ENTRY: 1759 return "HIF_CE_REAP_ENTRY"; 1760 case HIF_CE_REAP_EXIT: 1761 return "HIF_CE_REAP_EXIT"; 1762 case NAPI_SCHEDULE: 1763 return "NAPI_SCHEDULE"; 1764 case NAPI_POLL_ENTER: 1765 return "NAPI_POLL_ENTER"; 1766 case NAPI_COMPLETE: 1767 return "NAPI_COMPLETE"; 1768 case NAPI_POLL_EXIT: 1769 return "NAPI_POLL_EXIT"; 1770 case HIF_RX_NBUF_ALLOC_FAILURE: 1771 return "HIF_RX_NBUF_ALLOC_FAILURE"; 1772 case HIF_RX_NBUF_MAP_FAILURE: 1773 return "HIF_RX_NBUF_MAP_FAILURE"; 1774 case HIF_RX_NBUF_ENQUEUE_FAILURE: 1775 return "HIF_RX_NBUF_ENQUEUE_FAILURE"; 1776 default: 1777 return "invalid"; 1778 } 1779 } 1780 1781 /** 1782 * hif_dump_desc_event() - record ce descriptor events 1783 * @scn: HIF context 1784 * @buf: Buffer to which to be copied 1785 */ 1786 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf) 1787 { 1788 struct hif_ce_desc_event *event; 1789 uint64_t secs, usecs; 1790 ssize_t len = 0; 1791 struct ce_desc_hist *ce_hist = NULL; 1792 struct hif_ce_desc_event *hist_ev = NULL; 1793 1794 if (!scn) 1795 return -EINVAL; 1796 1797 ce_hist = &scn->hif_ce_desc_hist; 1798 1799 if (ce_hist->hist_id >= CE_COUNT_MAX || 1800 ce_hist->hist_index >= HIF_CE_HISTORY_MAX) { 1801 qdf_print("Invalid values"); 1802 return -EINVAL; 1803 } 1804 1805 hist_ev = 1806 (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id]; 1807 1808 if (!hist_ev) { 1809 qdf_print("Low Memory"); 1810 return -EINVAL; 1811 } 1812 1813 event = &hist_ev[ce_hist->hist_index]; 1814 1815 qdf_log_timestamp_to_secs(event->time, &secs, &usecs); 1816 1817 len += snprintf(buf, PAGE_SIZE - len, 1818 "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK", 1819 secs, usecs, ce_hist->hist_id, 1820 ce_event_type_to_str(event->type), 1821 event->index, event->memory); 1822 #ifdef HIF_CE_DEBUG_DATA_BUF 1823 len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu", 1824 event->actual_data_len); 1825 #endif 1826 1827 len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: "); 1828 1829 hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc), 1830 16, 1, buf + len, 1831 (ssize_t)PAGE_SIZE - len, false); 1832 len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc)); 1833 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1834 1835 #ifdef HIF_CE_DEBUG_DATA_BUF 1836 if (ce_hist->data_enable[ce_hist->hist_id]) 1837 len = hif_dump_desc_data_buf(buf, len, event->data, 1838 (event->actual_data_len < 1839 CE_DEBUG_MAX_DATA_BUF_SIZE) ? 1840 event->actual_data_len : 1841 CE_DEBUG_MAX_DATA_BUF_SIZE); 1842 #endif /*HIF_CE_DEBUG_DATA_BUF*/ 1843 1844 len += snprintf(buf + len, PAGE_SIZE - len, "END\n"); 1845 1846 return len; 1847 } 1848 1849 /* 1850 * hif_store_desc_trace_buf_index() - 1851 * API to get the CE id and CE debug storage buffer index 1852 * 1853 * @dev: network device 1854 * @attr: sysfs attribute 1855 * @buf: data got from the user 1856 * 1857 * Return total length 1858 */ 1859 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 1860 const char *buf, size_t size) 1861 { 1862 struct ce_desc_hist *ce_hist = NULL; 1863 1864 if (!scn) 1865 return -EINVAL; 1866 1867 ce_hist = &scn->hif_ce_desc_hist; 1868 1869 if (!size) { 1870 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1871 return -EINVAL; 1872 } 1873 1874 if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id, 1875 (unsigned int *)&ce_hist->hist_index) != 2) { 1876 qdf_nofl_err("%s: Invalid input value.", __func__); 1877 return -EINVAL; 1878 } 1879 if ((ce_hist->hist_id >= CE_COUNT_MAX) || 1880 (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) { 1881 qdf_print("Invalid values"); 1882 return -EINVAL; 1883 } 1884 1885 return size; 1886 } 1887 1888 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1889 1890 #ifdef HIF_CE_DEBUG_DATA_BUF 1891 /* 1892 * hif_ce_en_desc_hist() - 1893 * API to enable recording the CE desc history 1894 * 1895 * @dev: network device 1896 * @attr: sysfs attribute 1897 * @buf: buffer to copy the data. 1898 * 1899 * Starts recording the ce desc history 1900 * 1901 * Return total length copied 1902 */ 1903 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size) 1904 { 1905 struct ce_desc_hist *ce_hist = NULL; 1906 uint32_t cfg = 0; 1907 uint32_t ce_id = 0; 1908 1909 if (!scn) 1910 return -EINVAL; 1911 1912 ce_hist = &scn->hif_ce_desc_hist; 1913 1914 if (!size) { 1915 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1916 return -EINVAL; 1917 } 1918 1919 if (sscanf(buf, "%u %u", (unsigned int *)&ce_id, 1920 (unsigned int *)&cfg) != 2) { 1921 qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.", 1922 __func__); 1923 return -EINVAL; 1924 } 1925 if (ce_id >= CE_COUNT_MAX) { 1926 qdf_print("Invalid value CE Id"); 1927 return -EINVAL; 1928 } 1929 1930 if ((cfg > 1 || cfg < 0)) { 1931 qdf_print("Invalid values: enter 0 or 1"); 1932 return -EINVAL; 1933 } 1934 1935 if (!ce_hist->hist_ev[ce_id]) 1936 return -EINVAL; 1937 1938 qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1939 if (cfg == 1) { 1940 if (ce_hist->data_enable[ce_id] == 1) { 1941 qdf_debug("Already Enabled"); 1942 } else { 1943 if (alloc_mem_ce_debug_hist_data(scn, ce_id) 1944 == QDF_STATUS_E_NOMEM){ 1945 ce_hist->data_enable[ce_id] = 0; 1946 qdf_err("%s:Memory Alloc failed", __func__); 1947 } else 1948 ce_hist->data_enable[ce_id] = 1; 1949 } 1950 } else if (cfg == 0) { 1951 if (ce_hist->data_enable[ce_id] == 0) { 1952 qdf_debug("Already Disabled"); 1953 } else { 1954 ce_hist->data_enable[ce_id] = 0; 1955 free_mem_ce_debug_hist_data(scn, ce_id); 1956 } 1957 } 1958 qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1959 1960 return size; 1961 } 1962 1963 /* 1964 * hif_disp_ce_enable_desc_data_hist() - 1965 * API to display value of data_enable 1966 * 1967 * @dev: network device 1968 * @attr: sysfs attribute 1969 * @buf: buffer to copy the data. 1970 * 1971 * Return total length copied 1972 */ 1973 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf) 1974 { 1975 ssize_t len = 0; 1976 uint32_t ce_id = 0; 1977 struct ce_desc_hist *ce_hist = NULL; 1978 1979 if (!scn) 1980 return -EINVAL; 1981 1982 ce_hist = &scn->hif_ce_desc_hist; 1983 1984 for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) { 1985 len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n", 1986 ce_id, ce_hist->data_enable[ce_id]); 1987 } 1988 1989 return len; 1990 } 1991 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1992 1993 #ifdef OL_ATH_SMART_LOGGING 1994 #define GUARD_SPACE 10 1995 #define LOG_ID_SZ 4 1996 /* 1997 * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf 1998 * @src_ring: SRC ring state 1999 * @buf_cur: Current pointer in ring buffer 2000 * @buf_init:Start of the ring buffer 2001 * @buf_sz: Size of the ring buffer 2002 * @skb_sz: Max size of the SKB buffer to be copied 2003 * 2004 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 2005 * the given buf, skb_sz is the max buffer size to be copied 2006 * 2007 * Return: Current pointer in ring buffer 2008 */ 2009 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring, 2010 uint8_t *buf_cur, uint8_t *buf_init, 2011 uint32_t buf_sz, uint32_t skb_sz) 2012 { 2013 struct CE_src_desc *src_ring_base; 2014 uint32_t len, entry; 2015 struct CE_src_desc *src_desc; 2016 qdf_nbuf_t nbuf; 2017 uint32_t available_buf; 2018 2019 src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space; 2020 len = sizeof(struct CE_ring_state); 2021 available_buf = buf_sz - (buf_cur - buf_init); 2022 if (available_buf < (len + GUARD_SPACE)) { 2023 buf_cur = buf_init; 2024 } 2025 2026 qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state)); 2027 buf_cur += sizeof(struct CE_ring_state); 2028 2029 for (entry = 0; entry < src_ring->nentries; entry++) { 2030 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry); 2031 nbuf = src_ring->per_transfer_context[entry]; 2032 if (nbuf) { 2033 uint32_t skb_len = qdf_nbuf_len(nbuf); 2034 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 2035 2036 len = sizeof(struct CE_src_desc) + skb_cp_len 2037 + LOG_ID_SZ + sizeof(skb_cp_len); 2038 available_buf = buf_sz - (buf_cur - buf_init); 2039 if (available_buf < (len + GUARD_SPACE)) { 2040 buf_cur = buf_init; 2041 } 2042 qdf_mem_copy(buf_cur, src_desc, 2043 sizeof(struct CE_src_desc)); 2044 buf_cur += sizeof(struct CE_src_desc); 2045 2046 available_buf = buf_sz - (buf_cur - buf_init); 2047 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 2048 skb_cp_len); 2049 2050 if (skb_cp_len) { 2051 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 2052 skb_cp_len); 2053 buf_cur += skb_cp_len; 2054 } 2055 } else { 2056 len = sizeof(struct CE_src_desc) + LOG_ID_SZ; 2057 available_buf = buf_sz - (buf_cur - buf_init); 2058 if (available_buf < (len + GUARD_SPACE)) { 2059 buf_cur = buf_init; 2060 } 2061 qdf_mem_copy(buf_cur, src_desc, 2062 sizeof(struct CE_src_desc)); 2063 buf_cur += sizeof(struct CE_src_desc); 2064 available_buf = buf_sz - (buf_cur - buf_init); 2065 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 2066 } 2067 } 2068 2069 return buf_cur; 2070 } 2071 2072 /* 2073 * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf 2074 * @dest_ring: SRC ring state 2075 * @buf_cur: Current pointer in ring buffer 2076 * @buf_init:Start of the ring buffer 2077 * @buf_sz: Size of the ring buffer 2078 * @skb_sz: Max size of the SKB buffer to be copied 2079 * 2080 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 2081 * the given buf, skb_sz is the max buffer size to be copied 2082 * 2083 * Return: Current pointer in ring buffer 2084 */ 2085 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring, 2086 uint8_t *buf_cur, uint8_t *buf_init, 2087 uint32_t buf_sz, uint32_t skb_sz) 2088 { 2089 struct CE_dest_desc *dest_ring_base; 2090 uint32_t len, entry; 2091 struct CE_dest_desc *dest_desc; 2092 qdf_nbuf_t nbuf; 2093 uint32_t available_buf; 2094 2095 dest_ring_base = 2096 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 2097 2098 len = sizeof(struct CE_ring_state); 2099 available_buf = buf_sz - (buf_cur - buf_init); 2100 if (available_buf < (len + GUARD_SPACE)) { 2101 buf_cur = buf_init; 2102 } 2103 2104 qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state)); 2105 buf_cur += sizeof(struct CE_ring_state); 2106 2107 for (entry = 0; entry < dest_ring->nentries; entry++) { 2108 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry); 2109 2110 nbuf = dest_ring->per_transfer_context[entry]; 2111 if (nbuf) { 2112 uint32_t skb_len = qdf_nbuf_len(nbuf); 2113 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 2114 2115 len = sizeof(struct CE_dest_desc) + skb_cp_len 2116 + LOG_ID_SZ + sizeof(skb_cp_len); 2117 2118 available_buf = buf_sz - (buf_cur - buf_init); 2119 if (available_buf < (len + GUARD_SPACE)) { 2120 buf_cur = buf_init; 2121 } 2122 2123 qdf_mem_copy(buf_cur, dest_desc, 2124 sizeof(struct CE_dest_desc)); 2125 buf_cur += sizeof(struct CE_dest_desc); 2126 available_buf = buf_sz - (buf_cur - buf_init); 2127 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 2128 skb_cp_len); 2129 if (skb_cp_len) { 2130 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 2131 skb_cp_len); 2132 buf_cur += skb_cp_len; 2133 } 2134 } else { 2135 len = sizeof(struct CE_dest_desc) + LOG_ID_SZ; 2136 available_buf = buf_sz - (buf_cur - buf_init); 2137 if (available_buf < (len + GUARD_SPACE)) { 2138 buf_cur = buf_init; 2139 } 2140 qdf_mem_copy(buf_cur, dest_desc, 2141 sizeof(struct CE_dest_desc)); 2142 buf_cur += sizeof(struct CE_dest_desc); 2143 available_buf = buf_sz - (buf_cur - buf_init); 2144 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 2145 } 2146 } 2147 return buf_cur; 2148 } 2149 2150 /** 2151 * hif_log_dump_ce() - Copy all the CE DEST ring to buf 2152 * @scn: 2153 * @buf_cur: 2154 * @buf_init: 2155 * @buf_sz: 2156 * @ce: 2157 * @skb_sz: 2158 * 2159 * Calls the respective function to dump all the CE SRC/DEST ring descriptors 2160 * and buffers pointed by them in to the given buf 2161 */ 2162 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, 2163 uint8_t *buf_init, uint32_t buf_sz, 2164 uint32_t ce, uint32_t skb_sz) 2165 { 2166 struct CE_state *ce_state; 2167 struct CE_ring_state *src_ring; 2168 struct CE_ring_state *dest_ring; 2169 2170 ce_state = scn->ce_id_to_state[ce]; 2171 src_ring = ce_state->src_ring; 2172 dest_ring = ce_state->dest_ring; 2173 2174 if (src_ring) { 2175 buf_cur = hif_log_src_ce_dump(src_ring, buf_cur, 2176 buf_init, buf_sz, skb_sz); 2177 } else if (dest_ring) { 2178 buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur, 2179 buf_init, buf_sz, skb_sz); 2180 } 2181 2182 return buf_cur; 2183 } 2184 2185 qdf_export_symbol(hif_log_dump_ce); 2186 #endif /* OL_ATH_SMART_LOGGING */ 2187 2188