1 /* 2 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hif.h" 20 #include "hif_io32.h" 21 #include "ce_api.h" 22 #include "ce_main.h" 23 #include "ce_internal.h" 24 #include "ce_reg.h" 25 #include "qdf_lock.h" 26 #include "regtable.h" 27 #include "hif_main.h" 28 #include "hif_debug.h" 29 #include "hif_napi.h" 30 #include "qdf_module.h" 31 32 #ifdef IPA_OFFLOAD 33 #ifdef QCA_WIFI_3_0 34 #define CE_IPA_RING_INIT(ce_desc) \ 35 do { \ 36 ce_desc->gather = 0; \ 37 ce_desc->enable_11h = 0; \ 38 ce_desc->meta_data_low = 0; \ 39 ce_desc->packet_result_offset = 64; \ 40 ce_desc->toeplitz_hash_enable = 0; \ 41 ce_desc->addr_y_search_disable = 0; \ 42 ce_desc->addr_x_search_disable = 0; \ 43 ce_desc->misc_int_disable = 0; \ 44 ce_desc->target_int_disable = 0; \ 45 ce_desc->host_int_disable = 0; \ 46 ce_desc->dest_byte_swap = 0; \ 47 ce_desc->byte_swap = 0; \ 48 ce_desc->type = 2; \ 49 ce_desc->tx_classify = 1; \ 50 ce_desc->buffer_addr_hi = 0; \ 51 ce_desc->meta_data = 0; \ 52 ce_desc->nbytes = 128; \ 53 } while (0) 54 #else 55 #define CE_IPA_RING_INIT(ce_desc) \ 56 do { \ 57 ce_desc->byte_swap = 0; \ 58 ce_desc->nbytes = 60; \ 59 ce_desc->gather = 0; \ 60 } while (0) 61 #endif /* QCA_WIFI_3_0 */ 62 #endif /* IPA_OFFLOAD */ 63 64 static int war1_allow_sleep; 65 /* io32 write workaround */ 66 static int hif_ce_war1; 67 68 /** 69 * hif_ce_war_disable() - disable ce war gobally 70 */ 71 void hif_ce_war_disable(void) 72 { 73 hif_ce_war1 = 0; 74 } 75 76 /** 77 * hif_ce_war_enable() - enable ce war gobally 78 */ 79 void hif_ce_war_enable(void) 80 { 81 hif_ce_war1 = 1; 82 } 83 84 /* 85 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 86 * for defined here 87 */ 88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 89 90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1) 91 #define CE_DEBUG_DATA_PER_ROW 16 92 93 static const char *ce_event_type_to_str(enum hif_ce_event_type type); 94 95 int get_next_record_index(qdf_atomic_t *table_index, int array_size) 96 { 97 int record_index = qdf_atomic_inc_return(table_index); 98 99 if (record_index == array_size) 100 qdf_atomic_sub(array_size, table_index); 101 102 while (record_index >= array_size) 103 record_index -= array_size; 104 105 return record_index; 106 } 107 108 qdf_export_symbol(get_next_record_index); 109 110 #ifdef HIF_CE_DEBUG_DATA_BUF 111 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) 112 { 113 uint8_t *data = NULL; 114 115 if (!event->data) { 116 hif_err_rl("No ce debug memory allocated"); 117 return; 118 } 119 120 if (event->memory && len > 0) 121 data = qdf_nbuf_data((qdf_nbuf_t)event->memory); 122 123 event->actual_data_len = 0; 124 qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE); 125 126 if (data && len > 0) { 127 qdf_mem_copy(event->data, data, 128 ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ? 129 len : CE_DEBUG_MAX_DATA_BUF_SIZE)); 130 event->actual_data_len = len; 131 } 132 } 133 134 qdf_export_symbol(hif_ce_desc_data_record); 135 136 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 137 { 138 qdf_mem_zero(event, 139 offsetof(struct hif_ce_desc_event, data)); 140 } 141 142 qdf_export_symbol(hif_clear_ce_desc_debug_data); 143 #else 144 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 145 { 146 qdf_mem_zero(event, sizeof(struct hif_ce_desc_event)); 147 } 148 149 qdf_export_symbol(hif_clear_ce_desc_debug_data); 150 #endif /* HIF_CE_DEBUG_DATA_BUF */ 151 152 #if defined(HIF_RECORD_PADDR) 153 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, 154 struct hif_ce_desc_event *event, 155 qdf_nbuf_t memory) 156 { 157 if (memory) { 158 event->dma_addr = QDF_NBUF_CB_PADDR(memory); 159 event->dma_to_phy = qdf_mem_paddr_from_dmaaddr( 160 scn->qdf_dev, 161 event->dma_addr); 162 163 event->virt_to_phy = 164 virt_to_phys(qdf_nbuf_data(memory)); 165 } 166 } 167 #endif /* HIF_RECORD_RX_PADDR */ 168 169 /** 170 * hif_record_ce_desc_event() - record ce descriptor events 171 * @scn: hif_softc 172 * @ce_id: which ce is the event occurring on 173 * @type: what happened 174 * @descriptor: pointer to the descriptor posted/completed 175 * @memory: virtual address of buffer related to the descriptor 176 * @index: index that the descriptor was/will be at. 177 */ 178 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, 179 enum hif_ce_event_type type, 180 union ce_desc *descriptor, 181 void *memory, int index, 182 int len) 183 { 184 int record_index; 185 struct hif_ce_desc_event *event; 186 187 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 188 struct hif_ce_desc_event *hist_ev = NULL; 189 190 if (ce_id < CE_COUNT_MAX) 191 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; 192 else 193 return; 194 195 if (ce_id >= CE_COUNT_MAX) 196 return; 197 198 if (!ce_hist->enable[ce_id]) 199 return; 200 201 if (!hist_ev) 202 return; 203 204 record_index = get_next_record_index( 205 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); 206 207 event = &hist_ev[record_index]; 208 209 hif_clear_ce_desc_debug_data(event); 210 211 event->type = type; 212 event->time = qdf_get_log_timestamp(); 213 214 if (descriptor) 215 qdf_mem_copy(&event->descriptor, descriptor, 216 sizeof(union ce_desc)); 217 218 event->memory = memory; 219 event->index = index; 220 221 if (event->type == HIF_RX_DESC_POST || 222 event->type == HIF_RX_DESC_COMPLETION) 223 hif_ce_desc_record_rx_paddr(scn, event, memory); 224 225 if (ce_hist->data_enable[ce_id]) 226 hif_ce_desc_data_record(event, len); 227 } 228 qdf_export_symbol(hif_record_ce_desc_event); 229 230 /** 231 * ce_init_ce_desc_event_log() - initialize the ce event log 232 * @ce_id: copy engine id for which we are initializing the log 233 * @size: size of array to dedicate 234 * 235 * Currently the passed size is ignored in favor of a precompiled value. 236 */ 237 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size) 238 { 239 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 240 qdf_atomic_init(&ce_hist->history_index[ce_id]); 241 qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]); 242 } 243 244 /** 245 * ce_deinit_ce_desc_event_log() - deinitialize the ce event log 246 * @ce_id: copy engine id for which we are deinitializing the log 247 * 248 */ 249 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 250 { 251 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 252 253 qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]); 254 } 255 256 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 257 void hif_record_ce_desc_event(struct hif_softc *scn, 258 int ce_id, enum hif_ce_event_type type, 259 union ce_desc *descriptor, void *memory, 260 int index, int len) 261 { 262 } 263 qdf_export_symbol(hif_record_ce_desc_event); 264 265 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, 266 int size) 267 { 268 } 269 270 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 271 { 272 } 273 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 274 275 #ifdef NAPI_YIELD_BUDGET_BASED 276 bool hif_ce_service_should_yield(struct hif_softc *scn, 277 struct CE_state *ce_state) 278 { 279 bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count); 280 281 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 282 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This 283 * can happen in fast path handling as processing is happenning in 284 * batches. 285 */ 286 if (yield) 287 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 288 289 return yield; 290 } 291 #else 292 /** 293 * hif_ce_service_should_yield() - return true if the service is hogging the cpu 294 * @scn: hif context 295 * @ce_state: context of the copy engine being serviced 296 * 297 * Return: true if the service should yield 298 */ 299 bool hif_ce_service_should_yield(struct hif_softc *scn, 300 struct CE_state *ce_state) 301 { 302 bool yield, time_limit_reached, rxpkt_thresh_reached = 0; 303 304 time_limit_reached = 305 sched_clock() > ce_state->ce_service_yield_time ? 1 : 0; 306 307 if (!time_limit_reached) 308 rxpkt_thresh_reached = hif_max_num_receives_reached 309 (scn, ce_state->receive_count); 310 311 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 312 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This 313 * can happen in fast path handling as processing is happenning in 314 * batches. 315 */ 316 if (rxpkt_thresh_reached) 317 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 318 319 yield = time_limit_reached || rxpkt_thresh_reached; 320 321 if (yield && 322 ce_state->htt_rx_data && 323 hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) { 324 hif_napi_update_yield_stats(ce_state, 325 time_limit_reached, 326 rxpkt_thresh_reached); 327 } 328 329 return yield; 330 } 331 qdf_export_symbol(hif_ce_service_should_yield); 332 #endif 333 334 /* 335 * Guts of ce_send, used by both ce_send and ce_sendlist_send. 336 * The caller takes responsibility for any needed locking. 337 */ 338 339 void war_ce_src_ring_write_idx_set(struct hif_softc *scn, 340 u32 ctrl_addr, unsigned int write_index) 341 { 342 if (hif_ce_war1) { 343 void __iomem *indicator_addr; 344 345 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS; 346 347 if (!war1_allow_sleep 348 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) { 349 hif_write32_mb(scn, indicator_addr, 350 (CDC_WAR_MAGIC_STR | write_index)); 351 } else { 352 unsigned long irq_flags; 353 354 local_irq_save(irq_flags); 355 hif_write32_mb(scn, indicator_addr, 1); 356 357 /* 358 * PCIE write waits for ACK in IPQ8K, there is no 359 * need to read back value. 360 */ 361 (void)hif_read32_mb(scn, indicator_addr); 362 /* conservative */ 363 (void)hif_read32_mb(scn, indicator_addr); 364 365 CE_SRC_RING_WRITE_IDX_SET(scn, 366 ctrl_addr, write_index); 367 368 hif_write32_mb(scn, indicator_addr, 0); 369 local_irq_restore(irq_flags); 370 } 371 } else { 372 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 373 } 374 } 375 376 qdf_export_symbol(war_ce_src_ring_write_idx_set); 377 378 int 379 ce_send(struct CE_handle *copyeng, 380 void *per_transfer_context, 381 qdf_dma_addr_t buffer, 382 uint32_t nbytes, 383 uint32_t transfer_id, 384 uint32_t flags, 385 uint32_t user_flag) 386 { 387 struct CE_state *CE_state = (struct CE_state *)copyeng; 388 int status; 389 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 390 391 qdf_spin_lock_bh(&CE_state->ce_index_lock); 392 status = hif_state->ce_services->ce_send_nolock(copyeng, 393 per_transfer_context, buffer, nbytes, 394 transfer_id, flags, user_flag); 395 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 396 397 return status; 398 } 399 qdf_export_symbol(ce_send); 400 401 unsigned int ce_sendlist_sizeof(void) 402 { 403 return sizeof(struct ce_sendlist); 404 } 405 406 void ce_sendlist_init(struct ce_sendlist *sendlist) 407 { 408 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 409 410 sl->num_items = 0; 411 } 412 413 int 414 ce_sendlist_buf_add(struct ce_sendlist *sendlist, 415 qdf_dma_addr_t buffer, 416 uint32_t nbytes, 417 uint32_t flags, 418 uint32_t user_flags) 419 { 420 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 421 unsigned int num_items = sl->num_items; 422 struct ce_sendlist_item *item; 423 424 if (num_items >= CE_SENDLIST_ITEMS_MAX) { 425 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX); 426 return QDF_STATUS_E_RESOURCES; 427 } 428 429 item = &sl->item[num_items]; 430 item->send_type = CE_SIMPLE_BUFFER_TYPE; 431 item->data = buffer; 432 item->u.nbytes = nbytes; 433 item->flags = flags; 434 item->user_flags = user_flags; 435 sl->num_items = num_items + 1; 436 return QDF_STATUS_SUCCESS; 437 } 438 439 int 440 ce_sendlist_send(struct CE_handle *copyeng, 441 void *per_transfer_context, 442 struct ce_sendlist *sendlist, unsigned int transfer_id) 443 { 444 struct CE_state *CE_state = (struct CE_state *)copyeng; 445 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 446 447 return hif_state->ce_services->ce_sendlist_send(copyeng, 448 per_transfer_context, sendlist, transfer_id); 449 } 450 451 #ifndef AH_NEED_TX_DATA_SWAP 452 #define AH_NEED_TX_DATA_SWAP 0 453 #endif 454 455 /** 456 * ce_batch_send() - sends bunch of msdus at once 457 * @ce_tx_hdl : pointer to CE handle 458 * @msdu : list of msdus to be sent 459 * @transfer_id : transfer id 460 * @len : Downloaded length 461 * @sendhead : sendhead 462 * 463 * Assumption : Called with an array of MSDU's 464 * Function: 465 * For each msdu in the array 466 * 1. Send each msdu 467 * 2. Increment write index accordinlgy. 468 * 469 * Return: list of msds not sent 470 */ 471 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 472 uint32_t transfer_id, u_int32_t len, uint32_t sendhead) 473 { 474 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 475 struct hif_softc *scn = ce_state->scn; 476 struct CE_ring_state *src_ring = ce_state->src_ring; 477 u_int32_t ctrl_addr = ce_state->ctrl_addr; 478 /* A_target_id_t targid = TARGID(scn);*/ 479 480 uint32_t nentries_mask = src_ring->nentries_mask; 481 uint32_t sw_index, write_index; 482 483 struct CE_src_desc *src_desc_base = 484 (struct CE_src_desc *)src_ring->base_addr_owner_space; 485 uint32_t *src_desc; 486 487 struct CE_src_desc lsrc_desc = {0}; 488 int deltacount = 0; 489 qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext; 490 491 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 492 sw_index = src_ring->sw_index; 493 write_index = src_ring->write_index; 494 495 deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1); 496 497 while (msdu) { 498 tempnext = qdf_nbuf_next(msdu); 499 500 if (deltacount < 2) { 501 if (sendhead) 502 return msdu; 503 HIF_ERROR("%s: Out of descriptors", __func__); 504 src_ring->write_index = write_index; 505 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 506 write_index); 507 508 sw_index = src_ring->sw_index; 509 write_index = src_ring->write_index; 510 511 deltacount = CE_RING_DELTA(nentries_mask, write_index, 512 sw_index-1); 513 if (!freelist) { 514 freelist = msdu; 515 hfreelist = msdu; 516 } else { 517 qdf_nbuf_set_next(freelist, msdu); 518 freelist = msdu; 519 } 520 qdf_nbuf_set_next(msdu, NULL); 521 msdu = tempnext; 522 continue; 523 } 524 525 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, 526 write_index); 527 528 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 529 530 lsrc_desc.meta_data = transfer_id; 531 if (len > msdu->len) 532 len = msdu->len; 533 lsrc_desc.nbytes = len; 534 /* Data packet is a byte stream, so disable byte swap */ 535 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 536 lsrc_desc.gather = 0; /*For the last one, gather is not set*/ 537 538 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 539 540 541 src_ring->per_transfer_context[write_index] = msdu; 542 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 543 544 if (sendhead) 545 break; 546 qdf_nbuf_set_next(msdu, NULL); 547 msdu = tempnext; 548 549 } 550 551 552 src_ring->write_index = write_index; 553 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 554 555 return hfreelist; 556 } 557 558 /** 559 * ce_update_tx_ring() - Advance sw index. 560 * @ce_tx_hdl : pointer to CE handle 561 * @num_htt_cmpls : htt completions received. 562 * 563 * Function: 564 * Increment the value of sw index of src ring 565 * according to number of htt completions 566 * received. 567 * 568 * Return: void 569 */ 570 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE 571 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 572 { 573 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 574 struct CE_ring_state *src_ring = ce_state->src_ring; 575 uint32_t nentries_mask = src_ring->nentries_mask; 576 /* 577 * Advance the s/w index: 578 * This effectively simulates completing the CE ring descriptors 579 */ 580 src_ring->sw_index = 581 CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index, 582 num_htt_cmpls); 583 } 584 #else 585 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 586 {} 587 #endif 588 589 /** 590 * ce_send_single() - sends 591 * @ce_tx_hdl : pointer to CE handle 592 * @msdu : msdu to be sent 593 * @transfer_id : transfer id 594 * @len : Downloaded length 595 * 596 * Function: 597 * 1. Send one msdu 598 * 2. Increment write index of src ring accordinlgy. 599 * 600 * Return: QDF_STATUS: CE sent status 601 */ 602 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 603 uint32_t transfer_id, u_int32_t len) 604 { 605 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 606 struct hif_softc *scn = ce_state->scn; 607 struct CE_ring_state *src_ring = ce_state->src_ring; 608 uint32_t ctrl_addr = ce_state->ctrl_addr; 609 /*A_target_id_t targid = TARGID(scn);*/ 610 611 uint32_t nentries_mask = src_ring->nentries_mask; 612 uint32_t sw_index, write_index; 613 614 struct CE_src_desc *src_desc_base = 615 (struct CE_src_desc *)src_ring->base_addr_owner_space; 616 uint32_t *src_desc; 617 618 struct CE_src_desc lsrc_desc = {0}; 619 enum hif_ce_event_type event_type; 620 621 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 622 sw_index = src_ring->sw_index; 623 write_index = src_ring->write_index; 624 625 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, 626 sw_index-1) < 1)) { 627 /* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */ 628 HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask, 629 write_index, sw_index); 630 return QDF_STATUS_E_RESOURCES; 631 } 632 633 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index); 634 635 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 636 637 lsrc_desc.meta_data = transfer_id; 638 lsrc_desc.nbytes = len; 639 /* Data packet is a byte stream, so disable byte swap */ 640 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 641 lsrc_desc.gather = 0; /* For the last one, gather is not set */ 642 643 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 644 645 646 src_ring->per_transfer_context[write_index] = msdu; 647 648 if (((struct CE_src_desc *)src_desc)->gather) 649 event_type = HIF_TX_GATHER_DESC_POST; 650 else if (qdf_unlikely(ce_state->state != CE_RUNNING)) 651 event_type = HIF_TX_DESC_SOFTWARE_POST; 652 else 653 event_type = HIF_TX_DESC_POST; 654 655 hif_record_ce_desc_event(scn, ce_state->id, event_type, 656 (union ce_desc *)src_desc, msdu, 657 write_index, len); 658 659 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 660 661 src_ring->write_index = write_index; 662 663 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 664 665 return QDF_STATUS_SUCCESS; 666 } 667 668 /** 669 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine 670 * @coyeng: copy engine handle 671 * @per_recv_context: virtual address of the nbuf 672 * @buffer: physical address of the nbuf 673 * 674 * Return: 0 if the buffer is enqueued 675 */ 676 int 677 ce_recv_buf_enqueue(struct CE_handle *copyeng, 678 void *per_recv_context, qdf_dma_addr_t buffer) 679 { 680 struct CE_state *CE_state = (struct CE_state *)copyeng; 681 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 682 683 return hif_state->ce_services->ce_recv_buf_enqueue(copyeng, 684 per_recv_context, buffer); 685 } 686 qdf_export_symbol(ce_recv_buf_enqueue); 687 688 void 689 ce_send_watermarks_set(struct CE_handle *copyeng, 690 unsigned int low_alert_nentries, 691 unsigned int high_alert_nentries) 692 { 693 struct CE_state *CE_state = (struct CE_state *)copyeng; 694 uint32_t ctrl_addr = CE_state->ctrl_addr; 695 struct hif_softc *scn = CE_state->scn; 696 697 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries); 698 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries); 699 } 700 701 void 702 ce_recv_watermarks_set(struct CE_handle *copyeng, 703 unsigned int low_alert_nentries, 704 unsigned int high_alert_nentries) 705 { 706 struct CE_state *CE_state = (struct CE_state *)copyeng; 707 uint32_t ctrl_addr = CE_state->ctrl_addr; 708 struct hif_softc *scn = CE_state->scn; 709 710 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 711 low_alert_nentries); 712 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, 713 high_alert_nentries); 714 } 715 716 unsigned int ce_send_entries_avail(struct CE_handle *copyeng) 717 { 718 struct CE_state *CE_state = (struct CE_state *)copyeng; 719 struct CE_ring_state *src_ring = CE_state->src_ring; 720 unsigned int nentries_mask = src_ring->nentries_mask; 721 unsigned int sw_index; 722 unsigned int write_index; 723 724 qdf_spin_lock(&CE_state->ce_index_lock); 725 sw_index = src_ring->sw_index; 726 write_index = src_ring->write_index; 727 qdf_spin_unlock(&CE_state->ce_index_lock); 728 729 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 730 } 731 732 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng) 733 { 734 struct CE_state *CE_state = (struct CE_state *)copyeng; 735 struct CE_ring_state *dest_ring = CE_state->dest_ring; 736 unsigned int nentries_mask = dest_ring->nentries_mask; 737 unsigned int sw_index; 738 unsigned int write_index; 739 740 qdf_spin_lock(&CE_state->ce_index_lock); 741 sw_index = dest_ring->sw_index; 742 write_index = dest_ring->write_index; 743 qdf_spin_unlock(&CE_state->ce_index_lock); 744 745 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 746 } 747 748 /* 749 * Guts of ce_completed_recv_next. 750 * The caller takes responsibility for any necessary locking. 751 */ 752 int 753 ce_completed_recv_next(struct CE_handle *copyeng, 754 void **per_CE_contextp, 755 void **per_transfer_contextp, 756 qdf_dma_addr_t *bufferp, 757 unsigned int *nbytesp, 758 unsigned int *transfer_idp, unsigned int *flagsp) 759 { 760 struct CE_state *CE_state = (struct CE_state *)copyeng; 761 int status; 762 struct hif_softc *scn = CE_state->scn; 763 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 764 struct ce_ops *ce_services; 765 766 ce_services = hif_state->ce_services; 767 qdf_spin_lock_bh(&CE_state->ce_index_lock); 768 status = 769 ce_services->ce_completed_recv_next_nolock(CE_state, 770 per_CE_contextp, per_transfer_contextp, bufferp, 771 nbytesp, transfer_idp, flagsp); 772 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 773 774 return status; 775 } 776 777 QDF_STATUS 778 ce_revoke_recv_next(struct CE_handle *copyeng, 779 void **per_CE_contextp, 780 void **per_transfer_contextp, qdf_dma_addr_t *bufferp) 781 { 782 struct CE_state *CE_state = (struct CE_state *)copyeng; 783 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 784 785 return hif_state->ce_services->ce_revoke_recv_next(copyeng, 786 per_CE_contextp, per_transfer_contextp, bufferp); 787 } 788 789 QDF_STATUS 790 ce_cancel_send_next(struct CE_handle *copyeng, 791 void **per_CE_contextp, 792 void **per_transfer_contextp, 793 qdf_dma_addr_t *bufferp, 794 unsigned int *nbytesp, 795 unsigned int *transfer_idp, 796 uint32_t *toeplitz_hash_result) 797 { 798 struct CE_state *CE_state = (struct CE_state *)copyeng; 799 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 800 801 return hif_state->ce_services->ce_cancel_send_next 802 (copyeng, per_CE_contextp, per_transfer_contextp, 803 bufferp, nbytesp, transfer_idp, toeplitz_hash_result); 804 } 805 qdf_export_symbol(ce_cancel_send_next); 806 807 int 808 ce_completed_send_next(struct CE_handle *copyeng, 809 void **per_CE_contextp, 810 void **per_transfer_contextp, 811 qdf_dma_addr_t *bufferp, 812 unsigned int *nbytesp, 813 unsigned int *transfer_idp, 814 unsigned int *sw_idx, 815 unsigned int *hw_idx, 816 unsigned int *toeplitz_hash_result) 817 { 818 struct CE_state *CE_state = (struct CE_state *)copyeng; 819 struct hif_softc *scn = CE_state->scn; 820 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 821 struct ce_ops *ce_services; 822 int status; 823 824 ce_services = hif_state->ce_services; 825 qdf_spin_lock_bh(&CE_state->ce_index_lock); 826 status = 827 ce_services->ce_completed_send_next_nolock(CE_state, 828 per_CE_contextp, per_transfer_contextp, 829 bufferp, nbytesp, transfer_idp, sw_idx, 830 hw_idx, toeplitz_hash_result); 831 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 832 833 return status; 834 } 835 836 #ifdef ATH_11AC_TXCOMPACT 837 /* CE engine descriptor reap 838 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service 839 * does receive and reaping of completed descriptor , 840 * This function only handles reaping of Tx complete descriptor. 841 * The Function is called from threshold reap poll routine 842 * hif_send_complete_check so should not countain receive functionality 843 * within it . 844 */ 845 846 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id) 847 { 848 void *CE_context; 849 void *transfer_context; 850 qdf_dma_addr_t buf; 851 unsigned int nbytes; 852 unsigned int id; 853 unsigned int sw_idx, hw_idx; 854 uint32_t toeplitz_hash_result; 855 struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; 856 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 857 858 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 859 return; 860 861 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, 862 NULL, NULL, 0, 0); 863 864 /* Since this function is called from both user context and 865 * tasklet context the spinlock has to lock the bottom halves. 866 * This fix assumes that ATH_11AC_TXCOMPACT flag is always 867 * enabled in TX polling mode. If this is not the case, more 868 * bottom halve spin lock changes are needed. Due to data path 869 * performance concern, after internal discussion we've decided 870 * to make minimum change, i.e., only address the issue occurred 871 * in this function. The possible negative effect of this minimum 872 * change is that, in the future, if some other function will also 873 * be opened to let the user context to use, those cases need to be 874 * addressed by change spin_lock to spin_lock_bh also. 875 */ 876 877 qdf_spin_lock_bh(&CE_state->ce_index_lock); 878 879 if (CE_state->send_cb) { 880 { 881 struct ce_ops *ce_services = hif_state->ce_services; 882 /* Pop completed send buffers and call the 883 * registered send callback for each 884 */ 885 while (ce_services->ce_completed_send_next_nolock 886 (CE_state, &CE_context, 887 &transfer_context, &buf, 888 &nbytes, &id, &sw_idx, &hw_idx, 889 &toeplitz_hash_result) == 890 QDF_STATUS_SUCCESS) { 891 if (ce_id != CE_HTT_H2T_MSG) { 892 qdf_spin_unlock_bh( 893 &CE_state->ce_index_lock); 894 CE_state->send_cb( 895 (struct CE_handle *) 896 CE_state, CE_context, 897 transfer_context, buf, 898 nbytes, id, sw_idx, hw_idx, 899 toeplitz_hash_result); 900 qdf_spin_lock_bh( 901 &CE_state->ce_index_lock); 902 } else { 903 struct HIF_CE_pipe_info *pipe_info = 904 (struct HIF_CE_pipe_info *) 905 CE_context; 906 907 qdf_spin_lock_bh(&pipe_info-> 908 completion_freeq_lock); 909 pipe_info->num_sends_allowed++; 910 qdf_spin_unlock_bh(&pipe_info-> 911 completion_freeq_lock); 912 } 913 } 914 } 915 } 916 917 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 918 919 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, 920 NULL, NULL, 0, 0); 921 Q_TARGET_ACCESS_END(scn); 922 } 923 924 #endif /*ATH_11AC_TXCOMPACT */ 925 926 /* 927 * ce_engine_service_reg: 928 * 929 * Called from ce_per_engine_service and goes through the regular interrupt 930 * handling that does not involve the WLAN fast path feature. 931 * 932 * Returns void 933 */ 934 void ce_engine_service_reg(struct hif_softc *scn, int CE_id) 935 { 936 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 937 uint32_t ctrl_addr = CE_state->ctrl_addr; 938 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 939 void *CE_context; 940 void *transfer_context; 941 qdf_dma_addr_t buf; 942 unsigned int nbytes; 943 unsigned int id; 944 unsigned int flags; 945 unsigned int more_comp_cnt = 0; 946 unsigned int more_snd_comp_cnt = 0; 947 unsigned int sw_idx, hw_idx; 948 uint32_t toeplitz_hash_result; 949 uint32_t mode = hif_get_conparam(scn); 950 951 more_completions: 952 if (CE_state->recv_cb) { 953 954 /* Pop completed recv buffers and call 955 * the registered recv callback for each 956 */ 957 while (hif_state->ce_services->ce_completed_recv_next_nolock 958 (CE_state, &CE_context, &transfer_context, 959 &buf, &nbytes, &id, &flags) == 960 QDF_STATUS_SUCCESS) { 961 qdf_spin_unlock(&CE_state->ce_index_lock); 962 CE_state->recv_cb((struct CE_handle *)CE_state, 963 CE_context, transfer_context, buf, 964 nbytes, id, flags); 965 966 qdf_spin_lock(&CE_state->ce_index_lock); 967 /* 968 * EV #112693 - 969 * [Peregrine][ES1][WB342][Win8x86][Performance] 970 * BSoD_0x133 occurred in VHT80 UDP_DL 971 * Break out DPC by force if number of loops in 972 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES 973 * to avoid spending too long time in 974 * DPC for each interrupt handling. Schedule another 975 * DPC to avoid data loss if we had taken 976 * force-break action before apply to Windows OS 977 * only currently, Linux/MAC os can expand to their 978 * platform if necessary 979 */ 980 981 /* Break the receive processes by 982 * force if force_break set up 983 */ 984 if (qdf_unlikely(CE_state->force_break)) { 985 qdf_atomic_set(&CE_state->rx_pending, 1); 986 return; 987 } 988 } 989 } 990 991 /* 992 * Attention: We may experience potential infinite loop for below 993 * While Loop during Sending Stress test. 994 * Resolve the same way as Receive Case (Refer to EV #112693) 995 */ 996 997 if (CE_state->send_cb) { 998 /* Pop completed send buffers and call 999 * the registered send callback for each 1000 */ 1001 1002 #ifdef ATH_11AC_TXCOMPACT 1003 while (hif_state->ce_services->ce_completed_send_next_nolock 1004 (CE_state, &CE_context, 1005 &transfer_context, &buf, &nbytes, 1006 &id, &sw_idx, &hw_idx, 1007 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1008 1009 if (CE_id != CE_HTT_H2T_MSG || 1010 QDF_IS_EPPING_ENABLED(mode)) { 1011 qdf_spin_unlock(&CE_state->ce_index_lock); 1012 CE_state->send_cb((struct CE_handle *)CE_state, 1013 CE_context, transfer_context, 1014 buf, nbytes, id, sw_idx, 1015 hw_idx, toeplitz_hash_result); 1016 qdf_spin_lock(&CE_state->ce_index_lock); 1017 } else { 1018 struct HIF_CE_pipe_info *pipe_info = 1019 (struct HIF_CE_pipe_info *)CE_context; 1020 1021 qdf_spin_lock_bh(&pipe_info-> 1022 completion_freeq_lock); 1023 pipe_info->num_sends_allowed++; 1024 qdf_spin_unlock_bh(&pipe_info-> 1025 completion_freeq_lock); 1026 } 1027 } 1028 #else /*ATH_11AC_TXCOMPACT */ 1029 while (hif_state->ce_services->ce_completed_send_next_nolock 1030 (CE_state, &CE_context, 1031 &transfer_context, &buf, &nbytes, 1032 &id, &sw_idx, &hw_idx, 1033 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1034 qdf_spin_unlock(&CE_state->ce_index_lock); 1035 CE_state->send_cb((struct CE_handle *)CE_state, 1036 CE_context, transfer_context, buf, 1037 nbytes, id, sw_idx, hw_idx, 1038 toeplitz_hash_result); 1039 qdf_spin_lock(&CE_state->ce_index_lock); 1040 } 1041 #endif /*ATH_11AC_TXCOMPACT */ 1042 } 1043 1044 more_watermarks: 1045 if (CE_state->misc_cbs) { 1046 if (CE_state->watermark_cb && 1047 hif_state->ce_services->watermark_int(CE_state, 1048 &flags)) { 1049 qdf_spin_unlock(&CE_state->ce_index_lock); 1050 /* Convert HW IS bits to software flags */ 1051 CE_state->watermark_cb((struct CE_handle *)CE_state, 1052 CE_state->wm_context, flags); 1053 qdf_spin_lock(&CE_state->ce_index_lock); 1054 } 1055 } 1056 1057 /* 1058 * Clear the misc interrupts (watermark) that were handled above, 1059 * and that will be checked again below. 1060 * Clear and check for copy-complete interrupts again, just in case 1061 * more copy completions happened while the misc interrupts were being 1062 * handled. 1063 */ 1064 if (!ce_srng_based(scn)) { 1065 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1066 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 1067 CE_WATERMARK_MASK | 1068 HOST_IS_COPY_COMPLETE_MASK); 1069 } else { 1070 qdf_atomic_set(&CE_state->rx_pending, 0); 1071 hif_err_rl("%s: target access is not allowed", 1072 __func__); 1073 return; 1074 } 1075 } 1076 1077 /* 1078 * Now that per-engine interrupts are cleared, verify that 1079 * no recv interrupts arrive while processing send interrupts, 1080 * and no recv or send interrupts happened while processing 1081 * misc interrupts.Go back and check again.Keep checking until 1082 * we find no more events to process. 1083 */ 1084 if (CE_state->recv_cb && 1085 hif_state->ce_services->ce_recv_entries_done_nolock(scn, 1086 CE_state)) { 1087 if (QDF_IS_EPPING_ENABLED(mode) || 1088 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1089 goto more_completions; 1090 } else { 1091 if (!ce_srng_based(scn)) { 1092 HIF_ERROR( 1093 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 1094 __func__, 1095 CE_state->dest_ring->nentries_mask, 1096 CE_state->dest_ring->sw_index, 1097 CE_DEST_RING_READ_IDX_GET(scn, 1098 CE_state->ctrl_addr)); 1099 } 1100 } 1101 } 1102 1103 if (CE_state->send_cb && 1104 hif_state->ce_services->ce_send_entries_done_nolock(scn, 1105 CE_state)) { 1106 if (QDF_IS_EPPING_ENABLED(mode) || 1107 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1108 goto more_completions; 1109 } else { 1110 if (!ce_srng_based(scn)) { 1111 HIF_ERROR( 1112 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 1113 __func__, 1114 CE_state->src_ring->nentries_mask, 1115 CE_state->src_ring->sw_index, 1116 CE_SRC_RING_READ_IDX_GET(scn, 1117 CE_state->ctrl_addr)); 1118 } 1119 } 1120 } 1121 1122 if (CE_state->misc_cbs && CE_state->watermark_cb) { 1123 if (hif_state->ce_services->watermark_int(CE_state, &flags)) 1124 goto more_watermarks; 1125 } 1126 1127 qdf_atomic_set(&CE_state->rx_pending, 0); 1128 } 1129 1130 /* 1131 * Guts of interrupt handler for per-engine interrupts on a particular CE. 1132 * 1133 * Invokes registered callbacks for recv_complete, 1134 * send_complete, and watermarks. 1135 * 1136 * Returns: number of messages processed 1137 */ 1138 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id) 1139 { 1140 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1141 1142 if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data)) 1143 return CE_state->receive_count; 1144 1145 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 1146 HIF_ERROR("[premature rc=0]"); 1147 return 0; /* no work done */ 1148 } 1149 1150 /* Clear force_break flag and re-initialize receive_count to 0 */ 1151 CE_state->receive_count = 0; 1152 CE_state->force_break = 0; 1153 CE_state->ce_service_start_time = sched_clock(); 1154 CE_state->ce_service_yield_time = 1155 CE_state->ce_service_start_time + 1156 hif_get_ce_service_max_yield_time( 1157 (struct hif_opaque_softc *)scn); 1158 1159 qdf_spin_lock(&CE_state->ce_index_lock); 1160 1161 CE_state->service(scn, CE_id); 1162 1163 qdf_spin_unlock(&CE_state->ce_index_lock); 1164 1165 if (Q_TARGET_ACCESS_END(scn) < 0) 1166 HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count); 1167 return CE_state->receive_count; 1168 } 1169 qdf_export_symbol(ce_per_engine_service); 1170 1171 /* 1172 * Handler for per-engine interrupts on ALL active CEs. 1173 * This is used in cases where the system is sharing a 1174 * single interrput for all CEs 1175 */ 1176 1177 void ce_per_engine_service_any(int irq, struct hif_softc *scn) 1178 { 1179 int CE_id; 1180 uint32_t intr_summary; 1181 1182 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1183 return; 1184 1185 if (!qdf_atomic_read(&scn->tasklet_from_intr)) { 1186 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1187 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1188 1189 if (qdf_atomic_read(&CE_state->rx_pending)) { 1190 qdf_atomic_set(&CE_state->rx_pending, 0); 1191 ce_per_engine_service(scn, CE_id); 1192 } 1193 } 1194 1195 Q_TARGET_ACCESS_END(scn); 1196 return; 1197 } 1198 1199 intr_summary = CE_INTERRUPT_SUMMARY(scn); 1200 1201 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) { 1202 if (intr_summary & (1 << CE_id)) 1203 intr_summary &= ~(1 << CE_id); 1204 else 1205 continue; /* no intr pending on this CE */ 1206 1207 ce_per_engine_service(scn, CE_id); 1208 } 1209 1210 Q_TARGET_ACCESS_END(scn); 1211 } 1212 1213 /*Iterate the CE_state list and disable the compl interrupt 1214 * if it has been registered already. 1215 */ 1216 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1217 { 1218 int CE_id; 1219 1220 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1221 return; 1222 1223 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1224 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1225 uint32_t ctrl_addr = CE_state->ctrl_addr; 1226 1227 /* if the interrupt is currently enabled, disable it */ 1228 if (!CE_state->disable_copy_compl_intr 1229 && (CE_state->send_cb || CE_state->recv_cb)) 1230 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1231 1232 if (CE_state->watermark_cb) 1233 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1234 } 1235 Q_TARGET_ACCESS_END(scn); 1236 } 1237 1238 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1239 { 1240 int CE_id; 1241 1242 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1243 return; 1244 1245 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1246 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1247 uint32_t ctrl_addr = CE_state->ctrl_addr; 1248 1249 /* 1250 * If the CE is supposed to have copy complete interrupts 1251 * enabled (i.e. there a callback registered, and the 1252 * "disable" flag is not set), then re-enable the interrupt. 1253 */ 1254 if (!CE_state->disable_copy_compl_intr 1255 && (CE_state->send_cb || CE_state->recv_cb)) 1256 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1257 1258 if (CE_state->watermark_cb) 1259 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1260 } 1261 Q_TARGET_ACCESS_END(scn); 1262 } 1263 1264 /** 1265 * ce_send_cb_register(): register completion handler 1266 * @copyeng: CE_state representing the ce we are adding the behavior to 1267 * @fn_ptr: callback that the ce should use when processing tx completions 1268 * @disable_interrupts: if the interupts should be enabled or not. 1269 * 1270 * Caller should guarantee that no transactions are in progress before 1271 * switching the callback function. 1272 * 1273 * Registers the send context before the fn pointer so that if the cb is valid 1274 * the context should be valid. 1275 * 1276 * Beware that currently this function will enable completion interrupts. 1277 */ 1278 void 1279 ce_send_cb_register(struct CE_handle *copyeng, 1280 ce_send_cb fn_ptr, 1281 void *ce_send_context, int disable_interrupts) 1282 { 1283 struct CE_state *CE_state = (struct CE_state *)copyeng; 1284 struct hif_softc *scn; 1285 struct HIF_CE_state *hif_state; 1286 1287 if (!CE_state) { 1288 HIF_ERROR("%s: Error CE state = NULL", __func__); 1289 return; 1290 } 1291 scn = CE_state->scn; 1292 hif_state = HIF_GET_CE_STATE(scn); 1293 if (!hif_state) { 1294 HIF_ERROR("%s: Error HIF state = NULL", __func__); 1295 return; 1296 } 1297 CE_state->send_context = ce_send_context; 1298 CE_state->send_cb = fn_ptr; 1299 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1300 disable_interrupts); 1301 } 1302 qdf_export_symbol(ce_send_cb_register); 1303 1304 /** 1305 * ce_recv_cb_register(): register completion handler 1306 * @copyeng: CE_state representing the ce we are adding the behavior to 1307 * @fn_ptr: callback that the ce should use when processing rx completions 1308 * @disable_interrupts: if the interupts should be enabled or not. 1309 * 1310 * Registers the send context before the fn pointer so that if the cb is valid 1311 * the context should be valid. 1312 * 1313 * Caller should guarantee that no transactions are in progress before 1314 * switching the callback function. 1315 */ 1316 void 1317 ce_recv_cb_register(struct CE_handle *copyeng, 1318 CE_recv_cb fn_ptr, 1319 void *CE_recv_context, int disable_interrupts) 1320 { 1321 struct CE_state *CE_state = (struct CE_state *)copyeng; 1322 struct hif_softc *scn; 1323 struct HIF_CE_state *hif_state; 1324 1325 if (!CE_state) { 1326 HIF_ERROR("%s: ERROR CE state = NULL", __func__); 1327 return; 1328 } 1329 scn = CE_state->scn; 1330 hif_state = HIF_GET_CE_STATE(scn); 1331 if (!hif_state) { 1332 HIF_ERROR("%s: Error HIF state = NULL", __func__); 1333 return; 1334 } 1335 CE_state->recv_context = CE_recv_context; 1336 CE_state->recv_cb = fn_ptr; 1337 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1338 disable_interrupts); 1339 } 1340 qdf_export_symbol(ce_recv_cb_register); 1341 1342 /** 1343 * ce_watermark_cb_register(): register completion handler 1344 * @copyeng: CE_state representing the ce we are adding the behavior to 1345 * @fn_ptr: callback that the ce should use when processing watermark events 1346 * 1347 * Caller should guarantee that no watermark events are being processed before 1348 * switching the callback function. 1349 */ 1350 void 1351 ce_watermark_cb_register(struct CE_handle *copyeng, 1352 CE_watermark_cb fn_ptr, void *CE_wm_context) 1353 { 1354 struct CE_state *CE_state = (struct CE_state *)copyeng; 1355 struct hif_softc *scn = CE_state->scn; 1356 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1357 1358 CE_state->watermark_cb = fn_ptr; 1359 CE_state->wm_context = CE_wm_context; 1360 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1361 0); 1362 if (fn_ptr) 1363 CE_state->misc_cbs = 1; 1364 } 1365 1366 bool ce_get_rx_pending(struct hif_softc *scn) 1367 { 1368 int CE_id; 1369 1370 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1371 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1372 1373 if (qdf_atomic_read(&CE_state->rx_pending)) 1374 return true; 1375 } 1376 1377 return false; 1378 } 1379 1380 /** 1381 * ce_check_rx_pending() - ce_check_rx_pending 1382 * @CE_state: context of the copy engine to check 1383 * 1384 * Return: true if there per_engine_service 1385 * didn't process all the rx descriptors. 1386 */ 1387 bool ce_check_rx_pending(struct CE_state *CE_state) 1388 { 1389 if (qdf_atomic_read(&CE_state->rx_pending)) 1390 return true; 1391 else 1392 return false; 1393 } 1394 qdf_export_symbol(ce_check_rx_pending); 1395 1396 #ifdef IPA_OFFLOAD 1397 /** 1398 * ce_ipa_get_resource() - get uc resource on copyengine 1399 * @ce: copyengine context 1400 * @ce_sr: copyengine source ring resource info 1401 * @ce_sr_ring_size: copyengine source ring size 1402 * @ce_reg_paddr: copyengine register physical address 1403 * 1404 * Copy engine should release resource to micro controller 1405 * Micro controller needs 1406 * - Copy engine source descriptor base address 1407 * - Copy engine source descriptor size 1408 * - PCI BAR address to access copy engine regiser 1409 * 1410 * Return: None 1411 */ 1412 void ce_ipa_get_resource(struct CE_handle *ce, 1413 qdf_shared_mem_t **ce_sr, 1414 uint32_t *ce_sr_ring_size, 1415 qdf_dma_addr_t *ce_reg_paddr) 1416 { 1417 struct CE_state *CE_state = (struct CE_state *)ce; 1418 uint32_t ring_loop; 1419 struct CE_src_desc *ce_desc; 1420 qdf_dma_addr_t phy_mem_base; 1421 struct hif_softc *scn = CE_state->scn; 1422 1423 if (CE_UNUSED == CE_state->state) { 1424 *qdf_mem_get_dma_addr_ptr(scn->qdf_dev, 1425 &CE_state->scn->ipa_ce_ring->mem_info) = 0; 1426 *ce_sr_ring_size = 0; 1427 return; 1428 } 1429 1430 /* Update default value for descriptor */ 1431 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries; 1432 ring_loop++) { 1433 ce_desc = (struct CE_src_desc *) 1434 ((char *)CE_state->src_ring->base_addr_owner_space + 1435 ring_loop * (sizeof(struct CE_src_desc))); 1436 CE_IPA_RING_INIT(ce_desc); 1437 } 1438 1439 /* Get BAR address */ 1440 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base); 1441 1442 *ce_sr = CE_state->scn->ipa_ce_ring; 1443 *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries * 1444 sizeof(struct CE_src_desc)); 1445 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) + 1446 SR_WR_INDEX_ADDRESS; 1447 } 1448 #endif /* IPA_OFFLOAD */ 1449 1450 #ifdef HIF_CE_DEBUG_DATA_BUF 1451 /** 1452 * hif_dump_desc_data_buf() - record ce descriptor events 1453 * @buf: buffer to copy to 1454 * @pos: Current position till which the buf is filled 1455 * @data: Data to be copied 1456 * @data_len: Length of the data to be copied 1457 */ 1458 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos, 1459 uint8_t *data, uint32_t data_len) 1460 { 1461 pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n", 1462 CE_DEBUG_MAX_DATA_BUF_SIZE); 1463 1464 if ((data_len > 0) && data) { 1465 if (data_len < 16) { 1466 hex_dump_to_buffer(data, 1467 CE_DEBUG_DATA_PER_ROW, 1468 16, 1, buf + pos, 1469 (ssize_t)PAGE_SIZE - pos, 1470 false); 1471 pos += CE_DEBUG_PRINT_BUF_SIZE(data_len); 1472 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); 1473 } else { 1474 uint32_t rows = (data_len / 16) + 1; 1475 uint32_t row = 0; 1476 1477 for (row = 0; row < rows; row++) { 1478 hex_dump_to_buffer(data + (row * 16), 1479 CE_DEBUG_DATA_PER_ROW, 1480 16, 1, buf + pos, 1481 (ssize_t)PAGE_SIZE 1482 - pos, false); 1483 pos += 1484 CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW); 1485 pos += snprintf(buf + pos, PAGE_SIZE - pos, 1486 "\n"); 1487 } 1488 } 1489 } 1490 1491 return pos; 1492 } 1493 #endif 1494 1495 /* 1496 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1497 * for defined here 1498 */ 1499 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1500 static const char *ce_event_type_to_str(enum hif_ce_event_type type) 1501 { 1502 switch (type) { 1503 case HIF_RX_DESC_POST: 1504 return "HIF_RX_DESC_POST"; 1505 case HIF_RX_DESC_COMPLETION: 1506 return "HIF_RX_DESC_COMPLETION"; 1507 case HIF_TX_GATHER_DESC_POST: 1508 return "HIF_TX_GATHER_DESC_POST"; 1509 case HIF_TX_DESC_POST: 1510 return "HIF_TX_DESC_POST"; 1511 case HIF_TX_DESC_SOFTWARE_POST: 1512 return "HIF_TX_DESC_SOFTWARE_POST"; 1513 case HIF_TX_DESC_COMPLETION: 1514 return "HIF_TX_DESC_COMPLETION"; 1515 case FAST_RX_WRITE_INDEX_UPDATE: 1516 return "FAST_RX_WRITE_INDEX_UPDATE"; 1517 case FAST_RX_SOFTWARE_INDEX_UPDATE: 1518 return "FAST_RX_SOFTWARE_INDEX_UPDATE"; 1519 case FAST_TX_WRITE_INDEX_UPDATE: 1520 return "FAST_TX_WRITE_INDEX_UPDATE"; 1521 case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: 1522 return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE"; 1523 case FAST_TX_SOFTWARE_INDEX_UPDATE: 1524 return "FAST_TX_SOFTWARE_INDEX_UPDATE"; 1525 case RESUME_WRITE_INDEX_UPDATE: 1526 return "RESUME_WRITE_INDEX_UPDATE"; 1527 case HIF_IRQ_EVENT: 1528 return "HIF_IRQ_EVENT"; 1529 case HIF_CE_TASKLET_ENTRY: 1530 return "HIF_CE_TASKLET_ENTRY"; 1531 case HIF_CE_TASKLET_RESCHEDULE: 1532 return "HIF_CE_TASKLET_RESCHEDULE"; 1533 case HIF_CE_TASKLET_EXIT: 1534 return "HIF_CE_TASKLET_EXIT"; 1535 case HIF_CE_REAP_ENTRY: 1536 return "HIF_CE_REAP_ENTRY"; 1537 case HIF_CE_REAP_EXIT: 1538 return "HIF_CE_REAP_EXIT"; 1539 case NAPI_SCHEDULE: 1540 return "NAPI_SCHEDULE"; 1541 case NAPI_POLL_ENTER: 1542 return "NAPI_POLL_ENTER"; 1543 case NAPI_COMPLETE: 1544 return "NAPI_COMPLETE"; 1545 case NAPI_POLL_EXIT: 1546 return "NAPI_POLL_EXIT"; 1547 case HIF_RX_NBUF_ALLOC_FAILURE: 1548 return "HIF_RX_NBUF_ALLOC_FAILURE"; 1549 case HIF_RX_NBUF_MAP_FAILURE: 1550 return "HIF_RX_NBUF_MAP_FAILURE"; 1551 case HIF_RX_NBUF_ENQUEUE_FAILURE: 1552 return "HIF_RX_NBUF_ENQUEUE_FAILURE"; 1553 default: 1554 return "invalid"; 1555 } 1556 } 1557 1558 /** 1559 * hif_dump_desc_event() - record ce descriptor events 1560 * @buf: Buffer to which to be copied 1561 * @ce_id: which ce is the event occurring on 1562 * @index: index that the descriptor was/will be at. 1563 */ 1564 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf) 1565 { 1566 struct hif_ce_desc_event *event; 1567 uint64_t secs, usecs; 1568 ssize_t len = 0; 1569 struct ce_desc_hist *ce_hist = NULL; 1570 struct hif_ce_desc_event *hist_ev = NULL; 1571 1572 if (!scn) 1573 return -EINVAL; 1574 1575 ce_hist = &scn->hif_ce_desc_hist; 1576 1577 if (ce_hist->hist_id >= CE_COUNT_MAX || 1578 ce_hist->hist_index >= HIF_CE_HISTORY_MAX) { 1579 qdf_print("Invalid values"); 1580 return -EINVAL; 1581 } 1582 1583 hist_ev = 1584 (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id]; 1585 1586 if (!hist_ev) { 1587 qdf_print("Low Memory"); 1588 return -EINVAL; 1589 } 1590 1591 event = &hist_ev[ce_hist->hist_index]; 1592 1593 qdf_log_timestamp_to_secs(event->time, &secs, &usecs); 1594 1595 len += snprintf(buf, PAGE_SIZE - len, 1596 "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK", 1597 secs, usecs, ce_hist->hist_id, 1598 ce_event_type_to_str(event->type), 1599 event->index, event->memory); 1600 #ifdef HIF_CE_DEBUG_DATA_BUF 1601 len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%lu", 1602 event->actual_data_len); 1603 #endif 1604 1605 len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: "); 1606 1607 hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc), 1608 16, 1, buf + len, 1609 (ssize_t)PAGE_SIZE - len, false); 1610 len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc)); 1611 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1612 1613 #ifdef HIF_CE_DEBUG_DATA_BUF 1614 if (ce_hist->data_enable[ce_hist->hist_id]) 1615 len = hif_dump_desc_data_buf(buf, len, event->data, 1616 (event->actual_data_len < 1617 CE_DEBUG_MAX_DATA_BUF_SIZE) ? 1618 event->actual_data_len : 1619 CE_DEBUG_MAX_DATA_BUF_SIZE); 1620 #endif /*HIF_CE_DEBUG_DATA_BUF*/ 1621 1622 len += snprintf(buf + len, PAGE_SIZE - len, "END\n"); 1623 1624 return len; 1625 } 1626 1627 /* 1628 * hif_store_desc_trace_buf_index() - 1629 * API to get the CE id and CE debug storage buffer index 1630 * 1631 * @dev: network device 1632 * @attr: sysfs attribute 1633 * @buf: data got from the user 1634 * 1635 * Return total length 1636 */ 1637 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 1638 const char *buf, size_t size) 1639 { 1640 struct ce_desc_hist *ce_hist = NULL; 1641 1642 if (!scn) 1643 return -EINVAL; 1644 1645 ce_hist = &scn->hif_ce_desc_hist; 1646 1647 if (!size) { 1648 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1649 return -EINVAL; 1650 } 1651 1652 if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id, 1653 (unsigned int *)&ce_hist->hist_index) != 2) { 1654 qdf_nofl_err("%s: Invalid input value.", __func__); 1655 return -EINVAL; 1656 } 1657 if ((ce_hist->hist_id >= CE_COUNT_MAX) || 1658 (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) { 1659 qdf_print("Invalid values"); 1660 return -EINVAL; 1661 } 1662 1663 return size; 1664 } 1665 1666 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1667 1668 #ifdef HIF_CE_DEBUG_DATA_BUF 1669 /* 1670 * hif_ce_en_desc_hist() - 1671 * API to enable recording the CE desc history 1672 * 1673 * @dev: network device 1674 * @attr: sysfs attribute 1675 * @buf: buffer to copy the data. 1676 * 1677 * Starts recording the ce desc history 1678 * 1679 * Return total length copied 1680 */ 1681 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size) 1682 { 1683 struct ce_desc_hist *ce_hist = NULL; 1684 uint32_t cfg = 0; 1685 uint32_t ce_id = 0; 1686 1687 if (!scn) 1688 return -EINVAL; 1689 1690 ce_hist = &scn->hif_ce_desc_hist; 1691 1692 if (!size) { 1693 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1694 return -EINVAL; 1695 } 1696 1697 if (sscanf(buf, "%u %u", (unsigned int *)&ce_id, 1698 (unsigned int *)&cfg) != 2) { 1699 qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.", 1700 __func__); 1701 return -EINVAL; 1702 } 1703 if (ce_id >= CE_COUNT_MAX) { 1704 qdf_print("Invalid value CE Id"); 1705 return -EINVAL; 1706 } 1707 1708 if ((cfg > 1 || cfg < 0)) { 1709 qdf_print("Invalid values: enter 0 or 1"); 1710 return -EINVAL; 1711 } 1712 1713 if (!ce_hist->hist_ev[ce_id]) 1714 return -EINVAL; 1715 1716 qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1717 if (cfg == 1) { 1718 if (ce_hist->data_enable[ce_id] == 1) { 1719 qdf_debug("Already Enabled"); 1720 } else { 1721 if (alloc_mem_ce_debug_hist_data(scn, ce_id) 1722 == QDF_STATUS_E_NOMEM){ 1723 ce_hist->data_enable[ce_id] = 0; 1724 qdf_err("%s:Memory Alloc failed", __func__); 1725 } else 1726 ce_hist->data_enable[ce_id] = 1; 1727 } 1728 } else if (cfg == 0) { 1729 if (ce_hist->data_enable[ce_id] == 0) { 1730 qdf_debug("Already Disabled"); 1731 } else { 1732 ce_hist->data_enable[ce_id] = 0; 1733 free_mem_ce_debug_hist_data(scn, ce_id); 1734 } 1735 } 1736 qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1737 1738 return size; 1739 } 1740 1741 /* 1742 * hif_disp_ce_enable_desc_data_hist() - 1743 * API to display value of data_enable 1744 * 1745 * @dev: network device 1746 * @attr: sysfs attribute 1747 * @buf: buffer to copy the data. 1748 * 1749 * Return total length copied 1750 */ 1751 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf) 1752 { 1753 ssize_t len = 0; 1754 uint32_t ce_id = 0; 1755 struct ce_desc_hist *ce_hist = NULL; 1756 1757 if (!scn) 1758 return -EINVAL; 1759 1760 ce_hist = &scn->hif_ce_desc_hist; 1761 1762 for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) { 1763 len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n", 1764 ce_id, ce_hist->data_enable[ce_id]); 1765 } 1766 1767 return len; 1768 } 1769 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1770 1771 #ifdef OL_ATH_SMART_LOGGING 1772 #define GUARD_SPACE 10 1773 #define LOG_ID_SZ 4 1774 /* 1775 * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf 1776 * @src_ring: SRC ring state 1777 * @buf_cur: Current pointer in ring buffer 1778 * @buf_init:Start of the ring buffer 1779 * @buf_sz: Size of the ring buffer 1780 * @skb_sz: Max size of the SKB buffer to be copied 1781 * 1782 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 1783 * the given buf, skb_sz is the max buffer size to be copied 1784 * 1785 * Return: Current pointer in ring buffer 1786 */ 1787 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring, 1788 uint8_t *buf_cur, uint8_t *buf_init, 1789 uint32_t buf_sz, uint32_t skb_sz) 1790 { 1791 struct CE_src_desc *src_ring_base; 1792 uint32_t len, entry; 1793 struct CE_src_desc *src_desc; 1794 qdf_nbuf_t nbuf; 1795 uint32_t available_buf; 1796 1797 src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space; 1798 len = sizeof(struct CE_ring_state); 1799 available_buf = buf_sz - (buf_cur - buf_init); 1800 if (available_buf < (len + GUARD_SPACE)) { 1801 buf_cur = buf_init; 1802 } 1803 1804 qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state)); 1805 buf_cur += sizeof(struct CE_ring_state); 1806 1807 for (entry = 0; entry < src_ring->nentries; entry++) { 1808 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry); 1809 nbuf = src_ring->per_transfer_context[entry]; 1810 if (nbuf) { 1811 uint32_t skb_len = qdf_nbuf_len(nbuf); 1812 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 1813 1814 len = sizeof(struct CE_src_desc) + skb_cp_len 1815 + LOG_ID_SZ + sizeof(skb_cp_len); 1816 available_buf = buf_sz - (buf_cur - buf_init); 1817 if (available_buf < (len + GUARD_SPACE)) { 1818 buf_cur = buf_init; 1819 } 1820 qdf_mem_copy(buf_cur, src_desc, 1821 sizeof(struct CE_src_desc)); 1822 buf_cur += sizeof(struct CE_src_desc); 1823 1824 available_buf = buf_sz - (buf_cur - buf_init); 1825 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 1826 skb_cp_len); 1827 1828 if (skb_cp_len) { 1829 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 1830 skb_cp_len); 1831 buf_cur += skb_cp_len; 1832 } 1833 } else { 1834 len = sizeof(struct CE_src_desc) + LOG_ID_SZ; 1835 available_buf = buf_sz - (buf_cur - buf_init); 1836 if (available_buf < (len + GUARD_SPACE)) { 1837 buf_cur = buf_init; 1838 } 1839 qdf_mem_copy(buf_cur, src_desc, 1840 sizeof(struct CE_src_desc)); 1841 buf_cur += sizeof(struct CE_src_desc); 1842 available_buf = buf_sz - (buf_cur - buf_init); 1843 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 1844 } 1845 } 1846 1847 return buf_cur; 1848 } 1849 1850 /* 1851 * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf 1852 * @dest_ring: SRC ring state 1853 * @buf_cur: Current pointer in ring buffer 1854 * @buf_init:Start of the ring buffer 1855 * @buf_sz: Size of the ring buffer 1856 * @skb_sz: Max size of the SKB buffer to be copied 1857 * 1858 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 1859 * the given buf, skb_sz is the max buffer size to be copied 1860 * 1861 * Return: Current pointer in ring buffer 1862 */ 1863 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring, 1864 uint8_t *buf_cur, uint8_t *buf_init, 1865 uint32_t buf_sz, uint32_t skb_sz) 1866 { 1867 struct CE_dest_desc *dest_ring_base; 1868 uint32_t len, entry; 1869 struct CE_dest_desc *dest_desc; 1870 qdf_nbuf_t nbuf; 1871 uint32_t available_buf; 1872 1873 dest_ring_base = 1874 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 1875 1876 len = sizeof(struct CE_ring_state); 1877 available_buf = buf_sz - (buf_cur - buf_init); 1878 if (available_buf < (len + GUARD_SPACE)) { 1879 buf_cur = buf_init; 1880 } 1881 1882 qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state)); 1883 buf_cur += sizeof(struct CE_ring_state); 1884 1885 for (entry = 0; entry < dest_ring->nentries; entry++) { 1886 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry); 1887 1888 nbuf = dest_ring->per_transfer_context[entry]; 1889 if (nbuf) { 1890 uint32_t skb_len = qdf_nbuf_len(nbuf); 1891 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 1892 1893 len = sizeof(struct CE_dest_desc) + skb_cp_len 1894 + LOG_ID_SZ + sizeof(skb_cp_len); 1895 1896 available_buf = buf_sz - (buf_cur - buf_init); 1897 if (available_buf < (len + GUARD_SPACE)) { 1898 buf_cur = buf_init; 1899 } 1900 1901 qdf_mem_copy(buf_cur, dest_desc, 1902 sizeof(struct CE_dest_desc)); 1903 buf_cur += sizeof(struct CE_dest_desc); 1904 available_buf = buf_sz - (buf_cur - buf_init); 1905 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 1906 skb_cp_len); 1907 if (skb_cp_len) { 1908 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 1909 skb_cp_len); 1910 buf_cur += skb_cp_len; 1911 } 1912 } else { 1913 len = sizeof(struct CE_dest_desc) + LOG_ID_SZ; 1914 available_buf = buf_sz - (buf_cur - buf_init); 1915 if (available_buf < (len + GUARD_SPACE)) { 1916 buf_cur = buf_init; 1917 } 1918 qdf_mem_copy(buf_cur, dest_desc, 1919 sizeof(struct CE_dest_desc)); 1920 buf_cur += sizeof(struct CE_dest_desc); 1921 available_buf = buf_sz - (buf_cur - buf_init); 1922 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 1923 } 1924 } 1925 return buf_cur; 1926 } 1927 1928 /** 1929 * hif_log_ce_dump() - Copy all the CE DEST ring to buf 1930 * Calls the respective function to dump all the CE SRC/DEST ring descriptors 1931 * and buffers pointed by them in to the given buf 1932 */ 1933 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, 1934 uint8_t *buf_init, uint32_t buf_sz, 1935 uint32_t ce, uint32_t skb_sz) 1936 { 1937 struct CE_state *ce_state; 1938 struct CE_ring_state *src_ring; 1939 struct CE_ring_state *dest_ring; 1940 1941 ce_state = scn->ce_id_to_state[ce]; 1942 src_ring = ce_state->src_ring; 1943 dest_ring = ce_state->dest_ring; 1944 1945 if (src_ring) { 1946 buf_cur = hif_log_src_ce_dump(src_ring, buf_cur, 1947 buf_init, buf_sz, skb_sz); 1948 } else if (dest_ring) { 1949 buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur, 1950 buf_init, buf_sz, skb_sz); 1951 } 1952 1953 return buf_cur; 1954 } 1955 1956 qdf_export_symbol(hif_log_dump_ce); 1957 #endif /* OL_ATH_SMART_LOGGING */ 1958 1959