1 /* 2 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hif.h" 20 #include "hif_io32.h" 21 #include "ce_api.h" 22 #include "ce_main.h" 23 #include "ce_internal.h" 24 #include "ce_reg.h" 25 #include "qdf_lock.h" 26 #include "regtable.h" 27 #include "hif_main.h" 28 #include "hif_debug.h" 29 #include "hif_napi.h" 30 #include "qdf_module.h" 31 32 #ifdef IPA_OFFLOAD 33 #ifdef QCA_WIFI_3_0 34 #define CE_IPA_RING_INIT(ce_desc) \ 35 do { \ 36 ce_desc->gather = 0; \ 37 ce_desc->enable_11h = 0; \ 38 ce_desc->meta_data_low = 0; \ 39 ce_desc->packet_result_offset = 64; \ 40 ce_desc->toeplitz_hash_enable = 0; \ 41 ce_desc->addr_y_search_disable = 0; \ 42 ce_desc->addr_x_search_disable = 0; \ 43 ce_desc->misc_int_disable = 0; \ 44 ce_desc->target_int_disable = 0; \ 45 ce_desc->host_int_disable = 0; \ 46 ce_desc->dest_byte_swap = 0; \ 47 ce_desc->byte_swap = 0; \ 48 ce_desc->type = 2; \ 49 ce_desc->tx_classify = 1; \ 50 ce_desc->buffer_addr_hi = 0; \ 51 ce_desc->meta_data = 0; \ 52 ce_desc->nbytes = 128; \ 53 } while (0) 54 #else 55 #define CE_IPA_RING_INIT(ce_desc) \ 56 do { \ 57 ce_desc->byte_swap = 0; \ 58 ce_desc->nbytes = 60; \ 59 ce_desc->gather = 0; \ 60 } while (0) 61 #endif /* QCA_WIFI_3_0 */ 62 #endif /* IPA_OFFLOAD */ 63 64 static int war1_allow_sleep; 65 /* io32 write workaround */ 66 static int hif_ce_war1; 67 68 /** 69 * hif_ce_war_disable() - disable ce war gobally 70 */ 71 void hif_ce_war_disable(void) 72 { 73 hif_ce_war1 = 0; 74 } 75 76 /** 77 * hif_ce_war_enable() - enable ce war gobally 78 */ 79 void hif_ce_war_enable(void) 80 { 81 hif_ce_war1 = 1; 82 } 83 84 /* 85 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 86 * for defined here 87 */ 88 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 89 90 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1) 91 #define CE_DEBUG_DATA_PER_ROW 16 92 93 static const char *ce_event_type_to_str(enum hif_ce_event_type type); 94 95 int get_next_record_index(qdf_atomic_t *table_index, int array_size) 96 { 97 int record_index = qdf_atomic_inc_return(table_index); 98 99 if (record_index == array_size) 100 qdf_atomic_sub(array_size, table_index); 101 102 while (record_index >= array_size) 103 record_index -= array_size; 104 105 return record_index; 106 } 107 108 qdf_export_symbol(get_next_record_index); 109 110 #ifdef HIF_CE_DEBUG_DATA_BUF 111 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) 112 { 113 uint8_t *data = NULL; 114 115 if (!event->data) { 116 hif_err_rl("No ce debug memory allocated"); 117 return; 118 } 119 120 if (event->memory && len > 0) 121 data = qdf_nbuf_data((qdf_nbuf_t)event->memory); 122 123 event->actual_data_len = 0; 124 qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE); 125 126 if (data && len > 0) { 127 qdf_mem_copy(event->data, data, 128 ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ? 129 len : CE_DEBUG_MAX_DATA_BUF_SIZE)); 130 event->actual_data_len = len; 131 } 132 } 133 134 qdf_export_symbol(hif_ce_desc_data_record); 135 136 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 137 { 138 qdf_mem_zero(event, 139 offsetof(struct hif_ce_desc_event, data)); 140 } 141 142 qdf_export_symbol(hif_clear_ce_desc_debug_data); 143 #else 144 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) 145 { 146 qdf_mem_zero(event, sizeof(struct hif_ce_desc_event)); 147 } 148 149 qdf_export_symbol(hif_clear_ce_desc_debug_data); 150 #endif /* HIF_CE_DEBUG_DATA_BUF */ 151 152 #if defined(HIF_RECORD_PADDR) 153 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, 154 struct hif_ce_desc_event *event, 155 qdf_nbuf_t memory) 156 { 157 if (memory) { 158 event->dma_addr = QDF_NBUF_CB_PADDR(memory); 159 event->dma_to_phy = qdf_mem_paddr_from_dmaaddr( 160 scn->qdf_dev, 161 event->dma_addr); 162 163 event->virt_to_phy = 164 virt_to_phys(qdf_nbuf_data(memory)); 165 } 166 } 167 #endif /* HIF_RECORD_RX_PADDR */ 168 169 /** 170 * hif_record_ce_desc_event() - record ce descriptor events 171 * @scn: hif_softc 172 * @ce_id: which ce is the event occurring on 173 * @type: what happened 174 * @descriptor: pointer to the descriptor posted/completed 175 * @memory: virtual address of buffer related to the descriptor 176 * @index: index that the descriptor was/will be at. 177 */ 178 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, 179 enum hif_ce_event_type type, 180 union ce_desc *descriptor, 181 void *memory, int index, 182 int len) 183 { 184 int record_index; 185 struct hif_ce_desc_event *event; 186 187 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 188 struct hif_ce_desc_event *hist_ev = NULL; 189 190 if (ce_id < CE_COUNT_MAX) 191 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; 192 else 193 return; 194 195 if (ce_id >= CE_COUNT_MAX) 196 return; 197 198 if (!ce_hist->enable[ce_id]) 199 return; 200 201 if (!hist_ev) 202 return; 203 204 record_index = get_next_record_index( 205 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); 206 207 event = &hist_ev[record_index]; 208 209 hif_clear_ce_desc_debug_data(event); 210 211 event->type = type; 212 event->time = qdf_get_log_timestamp(); 213 event->cpu_id = qdf_get_cpu(); 214 215 if (descriptor) 216 qdf_mem_copy(&event->descriptor, descriptor, 217 sizeof(union ce_desc)); 218 219 event->memory = memory; 220 event->index = index; 221 222 if (event->type == HIF_RX_DESC_POST || 223 event->type == HIF_RX_DESC_COMPLETION) 224 hif_ce_desc_record_rx_paddr(scn, event, memory); 225 226 if (ce_hist->data_enable[ce_id]) 227 hif_ce_desc_data_record(event, len); 228 } 229 qdf_export_symbol(hif_record_ce_desc_event); 230 231 /** 232 * ce_init_ce_desc_event_log() - initialize the ce event log 233 * @ce_id: copy engine id for which we are initializing the log 234 * @size: size of array to dedicate 235 * 236 * Currently the passed size is ignored in favor of a precompiled value. 237 */ 238 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size) 239 { 240 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 241 qdf_atomic_init(&ce_hist->history_index[ce_id]); 242 qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]); 243 } 244 245 /** 246 * ce_deinit_ce_desc_event_log() - deinitialize the ce event log 247 * @ce_id: copy engine id for which we are deinitializing the log 248 * 249 */ 250 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 251 { 252 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 253 254 qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]); 255 } 256 257 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 258 void hif_record_ce_desc_event(struct hif_softc *scn, 259 int ce_id, enum hif_ce_event_type type, 260 union ce_desc *descriptor, void *memory, 261 int index, int len) 262 { 263 } 264 qdf_export_symbol(hif_record_ce_desc_event); 265 266 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, 267 int size) 268 { 269 } 270 271 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) 272 { 273 } 274 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 275 276 #ifdef NAPI_YIELD_BUDGET_BASED 277 bool hif_ce_service_should_yield(struct hif_softc *scn, 278 struct CE_state *ce_state) 279 { 280 bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count); 281 282 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 283 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This 284 * can happen in fast path handling as processing is happenning in 285 * batches. 286 */ 287 if (yield) 288 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 289 290 return yield; 291 } 292 #else 293 /** 294 * hif_ce_service_should_yield() - return true if the service is hogging the cpu 295 * @scn: hif context 296 * @ce_state: context of the copy engine being serviced 297 * 298 * Return: true if the service should yield 299 */ 300 bool hif_ce_service_should_yield(struct hif_softc *scn, 301 struct CE_state *ce_state) 302 { 303 bool yield, time_limit_reached, rxpkt_thresh_reached = 0; 304 305 time_limit_reached = 306 sched_clock() > ce_state->ce_service_yield_time ? 1 : 0; 307 308 if (!time_limit_reached) 309 rxpkt_thresh_reached = hif_max_num_receives_reached 310 (scn, ce_state->receive_count); 311 312 /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes 313 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This 314 * can happen in fast path handling as processing is happenning in 315 * batches. 316 */ 317 if (rxpkt_thresh_reached) 318 ce_state->receive_count = MAX_NUM_OF_RECEIVES; 319 320 yield = time_limit_reached || rxpkt_thresh_reached; 321 322 if (yield && 323 ce_state->htt_rx_data && 324 hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) { 325 hif_napi_update_yield_stats(ce_state, 326 time_limit_reached, 327 rxpkt_thresh_reached); 328 } 329 330 return yield; 331 } 332 qdf_export_symbol(hif_ce_service_should_yield); 333 #endif 334 335 /* 336 * Guts of ce_send, used by both ce_send and ce_sendlist_send. 337 * The caller takes responsibility for any needed locking. 338 */ 339 340 void war_ce_src_ring_write_idx_set(struct hif_softc *scn, 341 u32 ctrl_addr, unsigned int write_index) 342 { 343 if (hif_ce_war1) { 344 void __iomem *indicator_addr; 345 346 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS; 347 348 if (!war1_allow_sleep 349 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) { 350 hif_write32_mb(scn, indicator_addr, 351 (CDC_WAR_MAGIC_STR | write_index)); 352 } else { 353 unsigned long irq_flags; 354 355 local_irq_save(irq_flags); 356 hif_write32_mb(scn, indicator_addr, 1); 357 358 /* 359 * PCIE write waits for ACK in IPQ8K, there is no 360 * need to read back value. 361 */ 362 (void)hif_read32_mb(scn, indicator_addr); 363 /* conservative */ 364 (void)hif_read32_mb(scn, indicator_addr); 365 366 CE_SRC_RING_WRITE_IDX_SET(scn, 367 ctrl_addr, write_index); 368 369 hif_write32_mb(scn, indicator_addr, 0); 370 local_irq_restore(irq_flags); 371 } 372 } else { 373 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); 374 } 375 } 376 377 qdf_export_symbol(war_ce_src_ring_write_idx_set); 378 379 QDF_STATUS 380 ce_send(struct CE_handle *copyeng, 381 void *per_transfer_context, 382 qdf_dma_addr_t buffer, 383 uint32_t nbytes, 384 uint32_t transfer_id, 385 uint32_t flags, 386 uint32_t user_flag) 387 { 388 struct CE_state *CE_state = (struct CE_state *)copyeng; 389 QDF_STATUS status; 390 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 391 392 qdf_spin_lock_bh(&CE_state->ce_index_lock); 393 status = hif_state->ce_services->ce_send_nolock(copyeng, 394 per_transfer_context, buffer, nbytes, 395 transfer_id, flags, user_flag); 396 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 397 398 return status; 399 } 400 qdf_export_symbol(ce_send); 401 402 unsigned int ce_sendlist_sizeof(void) 403 { 404 return sizeof(struct ce_sendlist); 405 } 406 407 void ce_sendlist_init(struct ce_sendlist *sendlist) 408 { 409 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 410 411 sl->num_items = 0; 412 } 413 414 QDF_STATUS 415 ce_sendlist_buf_add(struct ce_sendlist *sendlist, 416 qdf_dma_addr_t buffer, 417 uint32_t nbytes, 418 uint32_t flags, 419 uint32_t user_flags) 420 { 421 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; 422 unsigned int num_items = sl->num_items; 423 struct ce_sendlist_item *item; 424 425 if (num_items >= CE_SENDLIST_ITEMS_MAX) { 426 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX); 427 return QDF_STATUS_E_RESOURCES; 428 } 429 430 item = &sl->item[num_items]; 431 item->send_type = CE_SIMPLE_BUFFER_TYPE; 432 item->data = buffer; 433 item->u.nbytes = nbytes; 434 item->flags = flags; 435 item->user_flags = user_flags; 436 sl->num_items = num_items + 1; 437 return QDF_STATUS_SUCCESS; 438 } 439 440 QDF_STATUS 441 ce_sendlist_send(struct CE_handle *copyeng, 442 void *per_transfer_context, 443 struct ce_sendlist *sendlist, unsigned int transfer_id) 444 { 445 struct CE_state *CE_state = (struct CE_state *)copyeng; 446 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 447 448 return hif_state->ce_services->ce_sendlist_send(copyeng, 449 per_transfer_context, sendlist, transfer_id); 450 } 451 452 #ifndef AH_NEED_TX_DATA_SWAP 453 #define AH_NEED_TX_DATA_SWAP 0 454 #endif 455 456 /** 457 * ce_batch_send() - sends bunch of msdus at once 458 * @ce_tx_hdl : pointer to CE handle 459 * @msdu : list of msdus to be sent 460 * @transfer_id : transfer id 461 * @len : Downloaded length 462 * @sendhead : sendhead 463 * 464 * Assumption : Called with an array of MSDU's 465 * Function: 466 * For each msdu in the array 467 * 1. Send each msdu 468 * 2. Increment write index accordinlgy. 469 * 470 * Return: list of msds not sent 471 */ 472 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 473 uint32_t transfer_id, u_int32_t len, uint32_t sendhead) 474 { 475 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 476 struct hif_softc *scn = ce_state->scn; 477 struct CE_ring_state *src_ring = ce_state->src_ring; 478 u_int32_t ctrl_addr = ce_state->ctrl_addr; 479 /* A_target_id_t targid = TARGID(scn);*/ 480 481 uint32_t nentries_mask = src_ring->nentries_mask; 482 uint32_t sw_index, write_index; 483 484 struct CE_src_desc *src_desc_base = 485 (struct CE_src_desc *)src_ring->base_addr_owner_space; 486 uint32_t *src_desc; 487 488 struct CE_src_desc lsrc_desc = {0}; 489 int deltacount = 0; 490 qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext; 491 492 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 493 sw_index = src_ring->sw_index; 494 write_index = src_ring->write_index; 495 496 deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1); 497 498 while (msdu) { 499 tempnext = qdf_nbuf_next(msdu); 500 501 if (deltacount < 2) { 502 if (sendhead) 503 return msdu; 504 hif_err("Out of descriptors"); 505 src_ring->write_index = write_index; 506 war_ce_src_ring_write_idx_set(scn, ctrl_addr, 507 write_index); 508 509 sw_index = src_ring->sw_index; 510 write_index = src_ring->write_index; 511 512 deltacount = CE_RING_DELTA(nentries_mask, write_index, 513 sw_index-1); 514 if (!freelist) { 515 freelist = msdu; 516 hfreelist = msdu; 517 } else { 518 qdf_nbuf_set_next(freelist, msdu); 519 freelist = msdu; 520 } 521 qdf_nbuf_set_next(msdu, NULL); 522 msdu = tempnext; 523 continue; 524 } 525 526 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, 527 write_index); 528 529 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 530 531 lsrc_desc.meta_data = transfer_id; 532 if (len > msdu->len) 533 len = msdu->len; 534 lsrc_desc.nbytes = len; 535 /* Data packet is a byte stream, so disable byte swap */ 536 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 537 lsrc_desc.gather = 0; /*For the last one, gather is not set*/ 538 539 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 540 541 542 src_ring->per_transfer_context[write_index] = msdu; 543 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 544 545 if (sendhead) 546 break; 547 qdf_nbuf_set_next(msdu, NULL); 548 msdu = tempnext; 549 550 } 551 552 553 src_ring->write_index = write_index; 554 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 555 556 return hfreelist; 557 } 558 559 /** 560 * ce_update_tx_ring() - Advance sw index. 561 * @ce_tx_hdl : pointer to CE handle 562 * @num_htt_cmpls : htt completions received. 563 * 564 * Function: 565 * Increment the value of sw index of src ring 566 * according to number of htt completions 567 * received. 568 * 569 * Return: void 570 */ 571 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE 572 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 573 { 574 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 575 struct CE_ring_state *src_ring = ce_state->src_ring; 576 uint32_t nentries_mask = src_ring->nentries_mask; 577 /* 578 * Advance the s/w index: 579 * This effectively simulates completing the CE ring descriptors 580 */ 581 src_ring->sw_index = 582 CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index, 583 num_htt_cmpls); 584 } 585 #else 586 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) 587 {} 588 #endif 589 590 /** 591 * ce_send_single() - sends 592 * @ce_tx_hdl : pointer to CE handle 593 * @msdu : msdu to be sent 594 * @transfer_id : transfer id 595 * @len : Downloaded length 596 * 597 * Function: 598 * 1. Send one msdu 599 * 2. Increment write index of src ring accordinlgy. 600 * 601 * Return: QDF_STATUS: CE sent status 602 */ 603 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, 604 uint32_t transfer_id, u_int32_t len) 605 { 606 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; 607 struct hif_softc *scn = ce_state->scn; 608 struct CE_ring_state *src_ring = ce_state->src_ring; 609 uint32_t ctrl_addr = ce_state->ctrl_addr; 610 /*A_target_id_t targid = TARGID(scn);*/ 611 612 uint32_t nentries_mask = src_ring->nentries_mask; 613 uint32_t sw_index, write_index; 614 615 struct CE_src_desc *src_desc_base = 616 (struct CE_src_desc *)src_ring->base_addr_owner_space; 617 uint32_t *src_desc; 618 619 struct CE_src_desc lsrc_desc = {0}; 620 enum hif_ce_event_type event_type; 621 622 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); 623 sw_index = src_ring->sw_index; 624 write_index = src_ring->write_index; 625 626 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, 627 sw_index-1) < 1)) { 628 hif_err("ce send fail %d %d %d", nentries_mask, 629 write_index, sw_index); 630 return QDF_STATUS_E_RESOURCES; 631 } 632 633 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index); 634 635 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); 636 637 lsrc_desc.meta_data = transfer_id; 638 lsrc_desc.nbytes = len; 639 /* Data packet is a byte stream, so disable byte swap */ 640 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; 641 lsrc_desc.gather = 0; /* For the last one, gather is not set */ 642 643 src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; 644 645 646 src_ring->per_transfer_context[write_index] = msdu; 647 648 if (((struct CE_src_desc *)src_desc)->gather) 649 event_type = HIF_TX_GATHER_DESC_POST; 650 else if (qdf_unlikely(ce_state->state != CE_RUNNING)) 651 event_type = HIF_TX_DESC_SOFTWARE_POST; 652 else 653 event_type = HIF_TX_DESC_POST; 654 655 hif_record_ce_desc_event(scn, ce_state->id, event_type, 656 (union ce_desc *)src_desc, msdu, 657 write_index, len); 658 659 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 660 661 src_ring->write_index = write_index; 662 663 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); 664 665 return QDF_STATUS_SUCCESS; 666 } 667 668 /** 669 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine 670 * @coyeng: copy engine handle 671 * @per_recv_context: virtual address of the nbuf 672 * @buffer: physical address of the nbuf 673 * 674 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued 675 */ 676 QDF_STATUS 677 ce_recv_buf_enqueue(struct CE_handle *copyeng, 678 void *per_recv_context, qdf_dma_addr_t buffer) 679 { 680 struct CE_state *CE_state = (struct CE_state *)copyeng; 681 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 682 683 return hif_state->ce_services->ce_recv_buf_enqueue(copyeng, 684 per_recv_context, buffer); 685 } 686 qdf_export_symbol(ce_recv_buf_enqueue); 687 688 void 689 ce_send_watermarks_set(struct CE_handle *copyeng, 690 unsigned int low_alert_nentries, 691 unsigned int high_alert_nentries) 692 { 693 struct CE_state *CE_state = (struct CE_state *)copyeng; 694 uint32_t ctrl_addr = CE_state->ctrl_addr; 695 struct hif_softc *scn = CE_state->scn; 696 697 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries); 698 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries); 699 } 700 701 void 702 ce_recv_watermarks_set(struct CE_handle *copyeng, 703 unsigned int low_alert_nentries, 704 unsigned int high_alert_nentries) 705 { 706 struct CE_state *CE_state = (struct CE_state *)copyeng; 707 uint32_t ctrl_addr = CE_state->ctrl_addr; 708 struct hif_softc *scn = CE_state->scn; 709 710 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 711 low_alert_nentries); 712 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, 713 high_alert_nentries); 714 } 715 716 unsigned int ce_send_entries_avail(struct CE_handle *copyeng) 717 { 718 struct CE_state *CE_state = (struct CE_state *)copyeng; 719 struct CE_ring_state *src_ring = CE_state->src_ring; 720 unsigned int nentries_mask = src_ring->nentries_mask; 721 unsigned int sw_index; 722 unsigned int write_index; 723 724 qdf_spin_lock(&CE_state->ce_index_lock); 725 sw_index = src_ring->sw_index; 726 write_index = src_ring->write_index; 727 qdf_spin_unlock(&CE_state->ce_index_lock); 728 729 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 730 } 731 732 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng) 733 { 734 struct CE_state *CE_state = (struct CE_state *)copyeng; 735 struct CE_ring_state *dest_ring = CE_state->dest_ring; 736 unsigned int nentries_mask = dest_ring->nentries_mask; 737 unsigned int sw_index; 738 unsigned int write_index; 739 740 qdf_spin_lock(&CE_state->ce_index_lock); 741 sw_index = dest_ring->sw_index; 742 write_index = dest_ring->write_index; 743 qdf_spin_unlock(&CE_state->ce_index_lock); 744 745 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 746 } 747 748 /* 749 * Guts of ce_completed_recv_next. 750 * The caller takes responsibility for any necessary locking. 751 */ 752 QDF_STATUS 753 ce_completed_recv_next(struct CE_handle *copyeng, 754 void **per_CE_contextp, 755 void **per_transfer_contextp, 756 qdf_dma_addr_t *bufferp, 757 unsigned int *nbytesp, 758 unsigned int *transfer_idp, unsigned int *flagsp) 759 { 760 struct CE_state *CE_state = (struct CE_state *)copyeng; 761 QDF_STATUS status; 762 struct hif_softc *scn = CE_state->scn; 763 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 764 struct ce_ops *ce_services; 765 766 ce_services = hif_state->ce_services; 767 qdf_spin_lock_bh(&CE_state->ce_index_lock); 768 status = 769 ce_services->ce_completed_recv_next_nolock(CE_state, 770 per_CE_contextp, per_transfer_contextp, bufferp, 771 nbytesp, transfer_idp, flagsp); 772 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 773 774 return status; 775 } 776 777 QDF_STATUS 778 ce_revoke_recv_next(struct CE_handle *copyeng, 779 void **per_CE_contextp, 780 void **per_transfer_contextp, qdf_dma_addr_t *bufferp) 781 { 782 struct CE_state *CE_state = (struct CE_state *)copyeng; 783 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 784 785 return hif_state->ce_services->ce_revoke_recv_next(copyeng, 786 per_CE_contextp, per_transfer_contextp, bufferp); 787 } 788 789 QDF_STATUS 790 ce_cancel_send_next(struct CE_handle *copyeng, 791 void **per_CE_contextp, 792 void **per_transfer_contextp, 793 qdf_dma_addr_t *bufferp, 794 unsigned int *nbytesp, 795 unsigned int *transfer_idp, 796 uint32_t *toeplitz_hash_result) 797 { 798 struct CE_state *CE_state = (struct CE_state *)copyeng; 799 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); 800 801 return hif_state->ce_services->ce_cancel_send_next 802 (copyeng, per_CE_contextp, per_transfer_contextp, 803 bufferp, nbytesp, transfer_idp, toeplitz_hash_result); 804 } 805 qdf_export_symbol(ce_cancel_send_next); 806 807 QDF_STATUS 808 ce_completed_send_next(struct CE_handle *copyeng, 809 void **per_CE_contextp, 810 void **per_transfer_contextp, 811 qdf_dma_addr_t *bufferp, 812 unsigned int *nbytesp, 813 unsigned int *transfer_idp, 814 unsigned int *sw_idx, 815 unsigned int *hw_idx, 816 unsigned int *toeplitz_hash_result) 817 { 818 struct CE_state *CE_state = (struct CE_state *)copyeng; 819 struct hif_softc *scn = CE_state->scn; 820 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 821 struct ce_ops *ce_services; 822 QDF_STATUS status; 823 824 ce_services = hif_state->ce_services; 825 qdf_spin_lock_bh(&CE_state->ce_index_lock); 826 status = 827 ce_services->ce_completed_send_next_nolock(CE_state, 828 per_CE_contextp, per_transfer_contextp, 829 bufferp, nbytesp, transfer_idp, sw_idx, 830 hw_idx, toeplitz_hash_result); 831 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 832 833 return status; 834 } 835 836 #ifdef ATH_11AC_TXCOMPACT 837 /* CE engine descriptor reap 838 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service 839 * does receive and reaping of completed descriptor , 840 * This function only handles reaping of Tx complete descriptor. 841 * The Function is called from threshold reap poll routine 842 * hif_send_complete_check so should not countain receive functionality 843 * within it . 844 */ 845 846 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id) 847 { 848 void *CE_context; 849 void *transfer_context; 850 qdf_dma_addr_t buf; 851 unsigned int nbytes; 852 unsigned int id; 853 unsigned int sw_idx, hw_idx; 854 uint32_t toeplitz_hash_result; 855 struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; 856 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 857 858 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 859 return; 860 861 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, 862 NULL, NULL, 0, 0); 863 864 /* Since this function is called from both user context and 865 * tasklet context the spinlock has to lock the bottom halves. 866 * This fix assumes that ATH_11AC_TXCOMPACT flag is always 867 * enabled in TX polling mode. If this is not the case, more 868 * bottom halve spin lock changes are needed. Due to data path 869 * performance concern, after internal discussion we've decided 870 * to make minimum change, i.e., only address the issue occurred 871 * in this function. The possible negative effect of this minimum 872 * change is that, in the future, if some other function will also 873 * be opened to let the user context to use, those cases need to be 874 * addressed by change spin_lock to spin_lock_bh also. 875 */ 876 877 qdf_spin_lock_bh(&CE_state->ce_index_lock); 878 879 if (CE_state->send_cb) { 880 { 881 struct ce_ops *ce_services = hif_state->ce_services; 882 /* Pop completed send buffers and call the 883 * registered send callback for each 884 */ 885 while (ce_services->ce_completed_send_next_nolock 886 (CE_state, &CE_context, 887 &transfer_context, &buf, 888 &nbytes, &id, &sw_idx, &hw_idx, 889 &toeplitz_hash_result) == 890 QDF_STATUS_SUCCESS) { 891 if (ce_id != CE_HTT_H2T_MSG) { 892 qdf_spin_unlock_bh( 893 &CE_state->ce_index_lock); 894 CE_state->send_cb( 895 (struct CE_handle *) 896 CE_state, CE_context, 897 transfer_context, buf, 898 nbytes, id, sw_idx, hw_idx, 899 toeplitz_hash_result); 900 qdf_spin_lock_bh( 901 &CE_state->ce_index_lock); 902 } else { 903 struct HIF_CE_pipe_info *pipe_info = 904 (struct HIF_CE_pipe_info *) 905 CE_context; 906 907 qdf_spin_lock_bh(&pipe_info-> 908 completion_freeq_lock); 909 pipe_info->num_sends_allowed++; 910 qdf_spin_unlock_bh(&pipe_info-> 911 completion_freeq_lock); 912 } 913 } 914 } 915 } 916 917 qdf_spin_unlock_bh(&CE_state->ce_index_lock); 918 919 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, 920 NULL, NULL, 0, 0); 921 Q_TARGET_ACCESS_END(scn); 922 } 923 924 #endif /*ATH_11AC_TXCOMPACT */ 925 926 /* 927 * ce_engine_service_reg: 928 * 929 * Called from ce_per_engine_service and goes through the regular interrupt 930 * handling that does not involve the WLAN fast path feature. 931 * 932 * Returns void 933 */ 934 void ce_engine_service_reg(struct hif_softc *scn, int CE_id) 935 { 936 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 937 uint32_t ctrl_addr = CE_state->ctrl_addr; 938 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 939 void *CE_context; 940 void *transfer_context; 941 qdf_dma_addr_t buf; 942 unsigned int nbytes; 943 unsigned int id; 944 unsigned int flags; 945 unsigned int more_comp_cnt = 0; 946 unsigned int more_snd_comp_cnt = 0; 947 unsigned int sw_idx, hw_idx; 948 uint32_t toeplitz_hash_result; 949 uint32_t mode = hif_get_conparam(scn); 950 951 more_completions: 952 if (CE_state->recv_cb) { 953 954 /* Pop completed recv buffers and call 955 * the registered recv callback for each 956 */ 957 while (hif_state->ce_services->ce_completed_recv_next_nolock 958 (CE_state, &CE_context, &transfer_context, 959 &buf, &nbytes, &id, &flags) == 960 QDF_STATUS_SUCCESS) { 961 qdf_spin_unlock(&CE_state->ce_index_lock); 962 CE_state->recv_cb((struct CE_handle *)CE_state, 963 CE_context, transfer_context, buf, 964 nbytes, id, flags); 965 966 qdf_spin_lock(&CE_state->ce_index_lock); 967 /* 968 * EV #112693 - 969 * [Peregrine][ES1][WB342][Win8x86][Performance] 970 * BSoD_0x133 occurred in VHT80 UDP_DL 971 * Break out DPC by force if number of loops in 972 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES 973 * to avoid spending too long time in 974 * DPC for each interrupt handling. Schedule another 975 * DPC to avoid data loss if we had taken 976 * force-break action before apply to Windows OS 977 * only currently, Linux/MAC os can expand to their 978 * platform if necessary 979 */ 980 981 /* Break the receive processes by 982 * force if force_break set up 983 */ 984 if (qdf_unlikely(CE_state->force_break)) { 985 qdf_atomic_set(&CE_state->rx_pending, 1); 986 return; 987 } 988 } 989 } 990 991 /* 992 * Attention: We may experience potential infinite loop for below 993 * While Loop during Sending Stress test. 994 * Resolve the same way as Receive Case (Refer to EV #112693) 995 */ 996 997 if (CE_state->send_cb) { 998 /* Pop completed send buffers and call 999 * the registered send callback for each 1000 */ 1001 1002 #ifdef ATH_11AC_TXCOMPACT 1003 while (hif_state->ce_services->ce_completed_send_next_nolock 1004 (CE_state, &CE_context, 1005 &transfer_context, &buf, &nbytes, 1006 &id, &sw_idx, &hw_idx, 1007 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1008 1009 if (CE_id != CE_HTT_H2T_MSG || 1010 QDF_IS_EPPING_ENABLED(mode)) { 1011 qdf_spin_unlock(&CE_state->ce_index_lock); 1012 CE_state->send_cb((struct CE_handle *)CE_state, 1013 CE_context, transfer_context, 1014 buf, nbytes, id, sw_idx, 1015 hw_idx, toeplitz_hash_result); 1016 qdf_spin_lock(&CE_state->ce_index_lock); 1017 } else { 1018 struct HIF_CE_pipe_info *pipe_info = 1019 (struct HIF_CE_pipe_info *)CE_context; 1020 1021 qdf_spin_lock_bh(&pipe_info-> 1022 completion_freeq_lock); 1023 pipe_info->num_sends_allowed++; 1024 qdf_spin_unlock_bh(&pipe_info-> 1025 completion_freeq_lock); 1026 } 1027 } 1028 #else /*ATH_11AC_TXCOMPACT */ 1029 while (hif_state->ce_services->ce_completed_send_next_nolock 1030 (CE_state, &CE_context, 1031 &transfer_context, &buf, &nbytes, 1032 &id, &sw_idx, &hw_idx, 1033 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 1034 qdf_spin_unlock(&CE_state->ce_index_lock); 1035 CE_state->send_cb((struct CE_handle *)CE_state, 1036 CE_context, transfer_context, buf, 1037 nbytes, id, sw_idx, hw_idx, 1038 toeplitz_hash_result); 1039 qdf_spin_lock(&CE_state->ce_index_lock); 1040 } 1041 #endif /*ATH_11AC_TXCOMPACT */ 1042 } 1043 1044 more_watermarks: 1045 if (CE_state->misc_cbs) { 1046 if (CE_state->watermark_cb && 1047 hif_state->ce_services->watermark_int(CE_state, 1048 &flags)) { 1049 qdf_spin_unlock(&CE_state->ce_index_lock); 1050 /* Convert HW IS bits to software flags */ 1051 CE_state->watermark_cb((struct CE_handle *)CE_state, 1052 CE_state->wm_context, flags); 1053 qdf_spin_lock(&CE_state->ce_index_lock); 1054 } 1055 } 1056 1057 /* 1058 * Clear the misc interrupts (watermark) that were handled above, 1059 * and that will be checked again below. 1060 * Clear and check for copy-complete interrupts again, just in case 1061 * more copy completions happened while the misc interrupts were being 1062 * handled. 1063 */ 1064 if (!ce_srng_based(scn)) { 1065 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 1066 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, 1067 CE_WATERMARK_MASK | 1068 HOST_IS_COPY_COMPLETE_MASK); 1069 } else { 1070 qdf_atomic_set(&CE_state->rx_pending, 0); 1071 hif_err_rl("%s: target access is not allowed", 1072 __func__); 1073 return; 1074 } 1075 } 1076 1077 /* 1078 * Now that per-engine interrupts are cleared, verify that 1079 * no recv interrupts arrive while processing send interrupts, 1080 * and no recv or send interrupts happened while processing 1081 * misc interrupts.Go back and check again.Keep checking until 1082 * we find no more events to process. 1083 */ 1084 if (CE_state->recv_cb && 1085 hif_state->ce_services->ce_recv_entries_done_nolock(scn, 1086 CE_state)) { 1087 if (QDF_IS_EPPING_ENABLED(mode) || 1088 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1089 goto more_completions; 1090 } else { 1091 if (!ce_srng_based(scn)) { 1092 hif_err( 1093 "Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 1094 CE_state->dest_ring->nentries_mask, 1095 CE_state->dest_ring->sw_index, 1096 CE_DEST_RING_READ_IDX_GET(scn, 1097 CE_state->ctrl_addr)); 1098 } 1099 } 1100 } 1101 1102 if (CE_state->send_cb && 1103 hif_state->ce_services->ce_send_entries_done_nolock(scn, 1104 CE_state)) { 1105 if (QDF_IS_EPPING_ENABLED(mode) || 1106 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { 1107 goto more_completions; 1108 } else { 1109 if (!ce_srng_based(scn)) { 1110 hif_err( 1111 "Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", 1112 CE_state->src_ring->nentries_mask, 1113 CE_state->src_ring->sw_index, 1114 CE_SRC_RING_READ_IDX_GET(scn, 1115 CE_state->ctrl_addr)); 1116 } 1117 } 1118 } 1119 1120 if (CE_state->misc_cbs && CE_state->watermark_cb) { 1121 if (hif_state->ce_services->watermark_int(CE_state, &flags)) 1122 goto more_watermarks; 1123 } 1124 1125 qdf_atomic_set(&CE_state->rx_pending, 0); 1126 } 1127 1128 /* 1129 * Guts of interrupt handler for per-engine interrupts on a particular CE. 1130 * 1131 * Invokes registered callbacks for recv_complete, 1132 * send_complete, and watermarks. 1133 * 1134 * Returns: number of messages processed 1135 */ 1136 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id) 1137 { 1138 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1139 1140 if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data)) 1141 return CE_state->receive_count; 1142 1143 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 1144 hif_err("[premature rc=0]"); 1145 return 0; /* no work done */ 1146 } 1147 1148 /* Clear force_break flag and re-initialize receive_count to 0 */ 1149 CE_state->receive_count = 0; 1150 CE_state->force_break = 0; 1151 CE_state->ce_service_start_time = sched_clock(); 1152 CE_state->ce_service_yield_time = 1153 CE_state->ce_service_start_time + 1154 hif_get_ce_service_max_yield_time( 1155 (struct hif_opaque_softc *)scn); 1156 1157 qdf_spin_lock(&CE_state->ce_index_lock); 1158 1159 CE_state->service(scn, CE_id); 1160 1161 qdf_spin_unlock(&CE_state->ce_index_lock); 1162 1163 if (Q_TARGET_ACCESS_END(scn) < 0) 1164 hif_err("<--[premature rc=%d]", CE_state->receive_count); 1165 return CE_state->receive_count; 1166 } 1167 qdf_export_symbol(ce_per_engine_service); 1168 1169 /* 1170 * Handler for per-engine interrupts on ALL active CEs. 1171 * This is used in cases where the system is sharing a 1172 * single interrput for all CEs 1173 */ 1174 1175 void ce_per_engine_service_any(int irq, struct hif_softc *scn) 1176 { 1177 int CE_id; 1178 uint32_t intr_summary; 1179 1180 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1181 return; 1182 1183 if (!qdf_atomic_read(&scn->tasklet_from_intr)) { 1184 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1185 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1186 1187 if (qdf_atomic_read(&CE_state->rx_pending)) { 1188 qdf_atomic_set(&CE_state->rx_pending, 0); 1189 ce_per_engine_service(scn, CE_id); 1190 } 1191 } 1192 1193 Q_TARGET_ACCESS_END(scn); 1194 return; 1195 } 1196 1197 intr_summary = CE_INTERRUPT_SUMMARY(scn); 1198 1199 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) { 1200 if (intr_summary & (1 << CE_id)) 1201 intr_summary &= ~(1 << CE_id); 1202 else 1203 continue; /* no intr pending on this CE */ 1204 1205 ce_per_engine_service(scn, CE_id); 1206 } 1207 1208 Q_TARGET_ACCESS_END(scn); 1209 } 1210 1211 /*Iterate the CE_state list and disable the compl interrupt 1212 * if it has been registered already. 1213 */ 1214 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1215 { 1216 int CE_id; 1217 1218 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1219 return; 1220 1221 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1222 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1223 uint32_t ctrl_addr = CE_state->ctrl_addr; 1224 1225 /* if the interrupt is currently enabled, disable it */ 1226 if (!CE_state->disable_copy_compl_intr 1227 && (CE_state->send_cb || CE_state->recv_cb)) 1228 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 1229 1230 if (CE_state->watermark_cb) 1231 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); 1232 } 1233 Q_TARGET_ACCESS_END(scn); 1234 } 1235 1236 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn) 1237 { 1238 int CE_id; 1239 1240 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1241 return; 1242 1243 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1244 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1245 uint32_t ctrl_addr = CE_state->ctrl_addr; 1246 1247 /* 1248 * If the CE is supposed to have copy complete interrupts 1249 * enabled (i.e. there a callback registered, and the 1250 * "disable" flag is not set), then re-enable the interrupt. 1251 */ 1252 if (!CE_state->disable_copy_compl_intr 1253 && (CE_state->send_cb || CE_state->recv_cb)) 1254 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); 1255 1256 if (CE_state->watermark_cb) 1257 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); 1258 } 1259 Q_TARGET_ACCESS_END(scn); 1260 } 1261 1262 /** 1263 * ce_send_cb_register(): register completion handler 1264 * @copyeng: CE_state representing the ce we are adding the behavior to 1265 * @fn_ptr: callback that the ce should use when processing tx completions 1266 * @disable_interrupts: if the interupts should be enabled or not. 1267 * 1268 * Caller should guarantee that no transactions are in progress before 1269 * switching the callback function. 1270 * 1271 * Registers the send context before the fn pointer so that if the cb is valid 1272 * the context should be valid. 1273 * 1274 * Beware that currently this function will enable completion interrupts. 1275 */ 1276 void 1277 ce_send_cb_register(struct CE_handle *copyeng, 1278 ce_send_cb fn_ptr, 1279 void *ce_send_context, int disable_interrupts) 1280 { 1281 struct CE_state *CE_state = (struct CE_state *)copyeng; 1282 struct hif_softc *scn; 1283 struct HIF_CE_state *hif_state; 1284 1285 if (!CE_state) { 1286 hif_err("Error CE state = NULL"); 1287 return; 1288 } 1289 scn = CE_state->scn; 1290 hif_state = HIF_GET_CE_STATE(scn); 1291 if (!hif_state) { 1292 hif_err("Error HIF state = NULL"); 1293 return; 1294 } 1295 CE_state->send_context = ce_send_context; 1296 CE_state->send_cb = fn_ptr; 1297 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1298 disable_interrupts); 1299 } 1300 qdf_export_symbol(ce_send_cb_register); 1301 1302 /** 1303 * ce_recv_cb_register(): register completion handler 1304 * @copyeng: CE_state representing the ce we are adding the behavior to 1305 * @fn_ptr: callback that the ce should use when processing rx completions 1306 * @disable_interrupts: if the interupts should be enabled or not. 1307 * 1308 * Registers the send context before the fn pointer so that if the cb is valid 1309 * the context should be valid. 1310 * 1311 * Caller should guarantee that no transactions are in progress before 1312 * switching the callback function. 1313 */ 1314 void 1315 ce_recv_cb_register(struct CE_handle *copyeng, 1316 CE_recv_cb fn_ptr, 1317 void *CE_recv_context, int disable_interrupts) 1318 { 1319 struct CE_state *CE_state = (struct CE_state *)copyeng; 1320 struct hif_softc *scn; 1321 struct HIF_CE_state *hif_state; 1322 1323 if (!CE_state) { 1324 hif_err("ERROR CE state = NULL"); 1325 return; 1326 } 1327 scn = CE_state->scn; 1328 hif_state = HIF_GET_CE_STATE(scn); 1329 if (!hif_state) { 1330 hif_err("Error HIF state = NULL"); 1331 return; 1332 } 1333 CE_state->recv_context = CE_recv_context; 1334 CE_state->recv_cb = fn_ptr; 1335 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1336 disable_interrupts); 1337 } 1338 qdf_export_symbol(ce_recv_cb_register); 1339 1340 /** 1341 * ce_watermark_cb_register(): register completion handler 1342 * @copyeng: CE_state representing the ce we are adding the behavior to 1343 * @fn_ptr: callback that the ce should use when processing watermark events 1344 * 1345 * Caller should guarantee that no watermark events are being processed before 1346 * switching the callback function. 1347 */ 1348 void 1349 ce_watermark_cb_register(struct CE_handle *copyeng, 1350 CE_watermark_cb fn_ptr, void *CE_wm_context) 1351 { 1352 struct CE_state *CE_state = (struct CE_state *)copyeng; 1353 struct hif_softc *scn = CE_state->scn; 1354 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1355 1356 CE_state->watermark_cb = fn_ptr; 1357 CE_state->wm_context = CE_wm_context; 1358 hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, 1359 0); 1360 if (fn_ptr) 1361 CE_state->misc_cbs = 1; 1362 } 1363 1364 bool ce_get_rx_pending(struct hif_softc *scn) 1365 { 1366 int CE_id; 1367 1368 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { 1369 struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; 1370 1371 if (qdf_atomic_read(&CE_state->rx_pending)) 1372 return true; 1373 } 1374 1375 return false; 1376 } 1377 1378 /** 1379 * ce_check_rx_pending() - ce_check_rx_pending 1380 * @CE_state: context of the copy engine to check 1381 * 1382 * Return: true if there per_engine_service 1383 * didn't process all the rx descriptors. 1384 */ 1385 bool ce_check_rx_pending(struct CE_state *CE_state) 1386 { 1387 if (qdf_atomic_read(&CE_state->rx_pending)) 1388 return true; 1389 else 1390 return false; 1391 } 1392 qdf_export_symbol(ce_check_rx_pending); 1393 1394 #ifdef IPA_OFFLOAD 1395 #ifdef QCN7605_SUPPORT 1396 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state) 1397 { 1398 u_int32_t ctrl_addr = CE_state->ctrl_addr; 1399 struct hif_softc *scn = CE_state->scn; 1400 qdf_dma_addr_t wr_index_addr; 1401 1402 wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr); 1403 return wr_index_addr; 1404 } 1405 #else 1406 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state) 1407 { 1408 struct hif_softc *scn = CE_state->scn; 1409 qdf_dma_addr_t wr_index_addr; 1410 1411 wr_index_addr = CE_BASE_ADDRESS(CE_state->id) + 1412 SR_WR_INDEX_ADDRESS; 1413 return wr_index_addr; 1414 } 1415 #endif 1416 1417 /** 1418 * ce_ipa_get_resource() - get uc resource on copyengine 1419 * @ce: copyengine context 1420 * @ce_sr: copyengine source ring resource info 1421 * @ce_sr_ring_size: copyengine source ring size 1422 * @ce_reg_paddr: copyengine register physical address 1423 * 1424 * Copy engine should release resource to micro controller 1425 * Micro controller needs 1426 * - Copy engine source descriptor base address 1427 * - Copy engine source descriptor size 1428 * - PCI BAR address to access copy engine regiser 1429 * 1430 * Return: None 1431 */ 1432 void ce_ipa_get_resource(struct CE_handle *ce, 1433 qdf_shared_mem_t **ce_sr, 1434 uint32_t *ce_sr_ring_size, 1435 qdf_dma_addr_t *ce_reg_paddr) 1436 { 1437 struct CE_state *CE_state = (struct CE_state *)ce; 1438 uint32_t ring_loop; 1439 struct CE_src_desc *ce_desc; 1440 qdf_dma_addr_t phy_mem_base; 1441 struct hif_softc *scn = CE_state->scn; 1442 1443 if (CE_UNUSED == CE_state->state) { 1444 *qdf_mem_get_dma_addr_ptr(scn->qdf_dev, 1445 &CE_state->scn->ipa_ce_ring->mem_info) = 0; 1446 *ce_sr_ring_size = 0; 1447 return; 1448 } 1449 1450 /* Update default value for descriptor */ 1451 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries; 1452 ring_loop++) { 1453 ce_desc = (struct CE_src_desc *) 1454 ((char *)CE_state->src_ring->base_addr_owner_space + 1455 ring_loop * (sizeof(struct CE_src_desc))); 1456 CE_IPA_RING_INIT(ce_desc); 1457 } 1458 1459 /* Get BAR address */ 1460 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base); 1461 1462 *ce_sr = CE_state->scn->ipa_ce_ring; 1463 *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries * 1464 sizeof(struct CE_src_desc)); 1465 *ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state); 1466 1467 } 1468 1469 #endif /* IPA_OFFLOAD */ 1470 1471 #ifdef HIF_CE_DEBUG_DATA_BUF 1472 /** 1473 * hif_dump_desc_data_buf() - record ce descriptor events 1474 * @buf: buffer to copy to 1475 * @pos: Current position till which the buf is filled 1476 * @data: Data to be copied 1477 * @data_len: Length of the data to be copied 1478 */ 1479 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos, 1480 uint8_t *data, uint32_t data_len) 1481 { 1482 pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n", 1483 CE_DEBUG_MAX_DATA_BUF_SIZE); 1484 1485 if ((data_len > 0) && data) { 1486 if (data_len < 16) { 1487 hex_dump_to_buffer(data, 1488 CE_DEBUG_DATA_PER_ROW, 1489 16, 1, buf + pos, 1490 (ssize_t)PAGE_SIZE - pos, 1491 false); 1492 pos += CE_DEBUG_PRINT_BUF_SIZE(data_len); 1493 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); 1494 } else { 1495 uint32_t rows = (data_len / 16) + 1; 1496 uint32_t row = 0; 1497 1498 for (row = 0; row < rows; row++) { 1499 hex_dump_to_buffer(data + (row * 16), 1500 CE_DEBUG_DATA_PER_ROW, 1501 16, 1, buf + pos, 1502 (ssize_t)PAGE_SIZE 1503 - pos, false); 1504 pos += 1505 CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW); 1506 pos += snprintf(buf + pos, PAGE_SIZE - pos, 1507 "\n"); 1508 } 1509 } 1510 } 1511 1512 return pos; 1513 } 1514 #endif 1515 1516 /* 1517 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1518 * for defined here 1519 */ 1520 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1521 static const char *ce_event_type_to_str(enum hif_ce_event_type type) 1522 { 1523 switch (type) { 1524 case HIF_RX_DESC_POST: 1525 return "HIF_RX_DESC_POST"; 1526 case HIF_RX_DESC_COMPLETION: 1527 return "HIF_RX_DESC_COMPLETION"; 1528 case HIF_TX_GATHER_DESC_POST: 1529 return "HIF_TX_GATHER_DESC_POST"; 1530 case HIF_TX_DESC_POST: 1531 return "HIF_TX_DESC_POST"; 1532 case HIF_TX_DESC_SOFTWARE_POST: 1533 return "HIF_TX_DESC_SOFTWARE_POST"; 1534 case HIF_TX_DESC_COMPLETION: 1535 return "HIF_TX_DESC_COMPLETION"; 1536 case FAST_RX_WRITE_INDEX_UPDATE: 1537 return "FAST_RX_WRITE_INDEX_UPDATE"; 1538 case FAST_RX_SOFTWARE_INDEX_UPDATE: 1539 return "FAST_RX_SOFTWARE_INDEX_UPDATE"; 1540 case FAST_TX_WRITE_INDEX_UPDATE: 1541 return "FAST_TX_WRITE_INDEX_UPDATE"; 1542 case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: 1543 return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE"; 1544 case FAST_TX_SOFTWARE_INDEX_UPDATE: 1545 return "FAST_TX_SOFTWARE_INDEX_UPDATE"; 1546 case RESUME_WRITE_INDEX_UPDATE: 1547 return "RESUME_WRITE_INDEX_UPDATE"; 1548 case HIF_IRQ_EVENT: 1549 return "HIF_IRQ_EVENT"; 1550 case HIF_CE_TASKLET_ENTRY: 1551 return "HIF_CE_TASKLET_ENTRY"; 1552 case HIF_CE_TASKLET_RESCHEDULE: 1553 return "HIF_CE_TASKLET_RESCHEDULE"; 1554 case HIF_CE_TASKLET_EXIT: 1555 return "HIF_CE_TASKLET_EXIT"; 1556 case HIF_CE_REAP_ENTRY: 1557 return "HIF_CE_REAP_ENTRY"; 1558 case HIF_CE_REAP_EXIT: 1559 return "HIF_CE_REAP_EXIT"; 1560 case NAPI_SCHEDULE: 1561 return "NAPI_SCHEDULE"; 1562 case NAPI_POLL_ENTER: 1563 return "NAPI_POLL_ENTER"; 1564 case NAPI_COMPLETE: 1565 return "NAPI_COMPLETE"; 1566 case NAPI_POLL_EXIT: 1567 return "NAPI_POLL_EXIT"; 1568 case HIF_RX_NBUF_ALLOC_FAILURE: 1569 return "HIF_RX_NBUF_ALLOC_FAILURE"; 1570 case HIF_RX_NBUF_MAP_FAILURE: 1571 return "HIF_RX_NBUF_MAP_FAILURE"; 1572 case HIF_RX_NBUF_ENQUEUE_FAILURE: 1573 return "HIF_RX_NBUF_ENQUEUE_FAILURE"; 1574 default: 1575 return "invalid"; 1576 } 1577 } 1578 1579 /** 1580 * hif_dump_desc_event() - record ce descriptor events 1581 * @buf: Buffer to which to be copied 1582 * @ce_id: which ce is the event occurring on 1583 * @index: index that the descriptor was/will be at. 1584 */ 1585 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf) 1586 { 1587 struct hif_ce_desc_event *event; 1588 uint64_t secs, usecs; 1589 ssize_t len = 0; 1590 struct ce_desc_hist *ce_hist = NULL; 1591 struct hif_ce_desc_event *hist_ev = NULL; 1592 1593 if (!scn) 1594 return -EINVAL; 1595 1596 ce_hist = &scn->hif_ce_desc_hist; 1597 1598 if (ce_hist->hist_id >= CE_COUNT_MAX || 1599 ce_hist->hist_index >= HIF_CE_HISTORY_MAX) { 1600 qdf_print("Invalid values"); 1601 return -EINVAL; 1602 } 1603 1604 hist_ev = 1605 (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id]; 1606 1607 if (!hist_ev) { 1608 qdf_print("Low Memory"); 1609 return -EINVAL; 1610 } 1611 1612 event = &hist_ev[ce_hist->hist_index]; 1613 1614 qdf_log_timestamp_to_secs(event->time, &secs, &usecs); 1615 1616 len += snprintf(buf, PAGE_SIZE - len, 1617 "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK", 1618 secs, usecs, ce_hist->hist_id, 1619 ce_event_type_to_str(event->type), 1620 event->index, event->memory); 1621 #ifdef HIF_CE_DEBUG_DATA_BUF 1622 len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu", 1623 event->actual_data_len); 1624 #endif 1625 1626 len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: "); 1627 1628 hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc), 1629 16, 1, buf + len, 1630 (ssize_t)PAGE_SIZE - len, false); 1631 len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc)); 1632 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1633 1634 #ifdef HIF_CE_DEBUG_DATA_BUF 1635 if (ce_hist->data_enable[ce_hist->hist_id]) 1636 len = hif_dump_desc_data_buf(buf, len, event->data, 1637 (event->actual_data_len < 1638 CE_DEBUG_MAX_DATA_BUF_SIZE) ? 1639 event->actual_data_len : 1640 CE_DEBUG_MAX_DATA_BUF_SIZE); 1641 #endif /*HIF_CE_DEBUG_DATA_BUF*/ 1642 1643 len += snprintf(buf + len, PAGE_SIZE - len, "END\n"); 1644 1645 return len; 1646 } 1647 1648 /* 1649 * hif_store_desc_trace_buf_index() - 1650 * API to get the CE id and CE debug storage buffer index 1651 * 1652 * @dev: network device 1653 * @attr: sysfs attribute 1654 * @buf: data got from the user 1655 * 1656 * Return total length 1657 */ 1658 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 1659 const char *buf, size_t size) 1660 { 1661 struct ce_desc_hist *ce_hist = NULL; 1662 1663 if (!scn) 1664 return -EINVAL; 1665 1666 ce_hist = &scn->hif_ce_desc_hist; 1667 1668 if (!size) { 1669 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1670 return -EINVAL; 1671 } 1672 1673 if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id, 1674 (unsigned int *)&ce_hist->hist_index) != 2) { 1675 qdf_nofl_err("%s: Invalid input value.", __func__); 1676 return -EINVAL; 1677 } 1678 if ((ce_hist->hist_id >= CE_COUNT_MAX) || 1679 (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) { 1680 qdf_print("Invalid values"); 1681 return -EINVAL; 1682 } 1683 1684 return size; 1685 } 1686 1687 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1688 1689 #ifdef HIF_CE_DEBUG_DATA_BUF 1690 /* 1691 * hif_ce_en_desc_hist() - 1692 * API to enable recording the CE desc history 1693 * 1694 * @dev: network device 1695 * @attr: sysfs attribute 1696 * @buf: buffer to copy the data. 1697 * 1698 * Starts recording the ce desc history 1699 * 1700 * Return total length copied 1701 */ 1702 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size) 1703 { 1704 struct ce_desc_hist *ce_hist = NULL; 1705 uint32_t cfg = 0; 1706 uint32_t ce_id = 0; 1707 1708 if (!scn) 1709 return -EINVAL; 1710 1711 ce_hist = &scn->hif_ce_desc_hist; 1712 1713 if (!size) { 1714 qdf_nofl_err("%s: Invalid input buffer.", __func__); 1715 return -EINVAL; 1716 } 1717 1718 if (sscanf(buf, "%u %u", (unsigned int *)&ce_id, 1719 (unsigned int *)&cfg) != 2) { 1720 qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.", 1721 __func__); 1722 return -EINVAL; 1723 } 1724 if (ce_id >= CE_COUNT_MAX) { 1725 qdf_print("Invalid value CE Id"); 1726 return -EINVAL; 1727 } 1728 1729 if ((cfg > 1 || cfg < 0)) { 1730 qdf_print("Invalid values: enter 0 or 1"); 1731 return -EINVAL; 1732 } 1733 1734 if (!ce_hist->hist_ev[ce_id]) 1735 return -EINVAL; 1736 1737 qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1738 if (cfg == 1) { 1739 if (ce_hist->data_enable[ce_id] == 1) { 1740 qdf_debug("Already Enabled"); 1741 } else { 1742 if (alloc_mem_ce_debug_hist_data(scn, ce_id) 1743 == QDF_STATUS_E_NOMEM){ 1744 ce_hist->data_enable[ce_id] = 0; 1745 qdf_err("%s:Memory Alloc failed", __func__); 1746 } else 1747 ce_hist->data_enable[ce_id] = 1; 1748 } 1749 } else if (cfg == 0) { 1750 if (ce_hist->data_enable[ce_id] == 0) { 1751 qdf_debug("Already Disabled"); 1752 } else { 1753 ce_hist->data_enable[ce_id] = 0; 1754 free_mem_ce_debug_hist_data(scn, ce_id); 1755 } 1756 } 1757 qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]); 1758 1759 return size; 1760 } 1761 1762 /* 1763 * hif_disp_ce_enable_desc_data_hist() - 1764 * API to display value of data_enable 1765 * 1766 * @dev: network device 1767 * @attr: sysfs attribute 1768 * @buf: buffer to copy the data. 1769 * 1770 * Return total length copied 1771 */ 1772 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf) 1773 { 1774 ssize_t len = 0; 1775 uint32_t ce_id = 0; 1776 struct ce_desc_hist *ce_hist = NULL; 1777 1778 if (!scn) 1779 return -EINVAL; 1780 1781 ce_hist = &scn->hif_ce_desc_hist; 1782 1783 for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) { 1784 len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n", 1785 ce_id, ce_hist->data_enable[ce_id]); 1786 } 1787 1788 return len; 1789 } 1790 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1791 1792 #ifdef OL_ATH_SMART_LOGGING 1793 #define GUARD_SPACE 10 1794 #define LOG_ID_SZ 4 1795 /* 1796 * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf 1797 * @src_ring: SRC ring state 1798 * @buf_cur: Current pointer in ring buffer 1799 * @buf_init:Start of the ring buffer 1800 * @buf_sz: Size of the ring buffer 1801 * @skb_sz: Max size of the SKB buffer to be copied 1802 * 1803 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 1804 * the given buf, skb_sz is the max buffer size to be copied 1805 * 1806 * Return: Current pointer in ring buffer 1807 */ 1808 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring, 1809 uint8_t *buf_cur, uint8_t *buf_init, 1810 uint32_t buf_sz, uint32_t skb_sz) 1811 { 1812 struct CE_src_desc *src_ring_base; 1813 uint32_t len, entry; 1814 struct CE_src_desc *src_desc; 1815 qdf_nbuf_t nbuf; 1816 uint32_t available_buf; 1817 1818 src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space; 1819 len = sizeof(struct CE_ring_state); 1820 available_buf = buf_sz - (buf_cur - buf_init); 1821 if (available_buf < (len + GUARD_SPACE)) { 1822 buf_cur = buf_init; 1823 } 1824 1825 qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state)); 1826 buf_cur += sizeof(struct CE_ring_state); 1827 1828 for (entry = 0; entry < src_ring->nentries; entry++) { 1829 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry); 1830 nbuf = src_ring->per_transfer_context[entry]; 1831 if (nbuf) { 1832 uint32_t skb_len = qdf_nbuf_len(nbuf); 1833 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 1834 1835 len = sizeof(struct CE_src_desc) + skb_cp_len 1836 + LOG_ID_SZ + sizeof(skb_cp_len); 1837 available_buf = buf_sz - (buf_cur - buf_init); 1838 if (available_buf < (len + GUARD_SPACE)) { 1839 buf_cur = buf_init; 1840 } 1841 qdf_mem_copy(buf_cur, src_desc, 1842 sizeof(struct CE_src_desc)); 1843 buf_cur += sizeof(struct CE_src_desc); 1844 1845 available_buf = buf_sz - (buf_cur - buf_init); 1846 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 1847 skb_cp_len); 1848 1849 if (skb_cp_len) { 1850 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 1851 skb_cp_len); 1852 buf_cur += skb_cp_len; 1853 } 1854 } else { 1855 len = sizeof(struct CE_src_desc) + LOG_ID_SZ; 1856 available_buf = buf_sz - (buf_cur - buf_init); 1857 if (available_buf < (len + GUARD_SPACE)) { 1858 buf_cur = buf_init; 1859 } 1860 qdf_mem_copy(buf_cur, src_desc, 1861 sizeof(struct CE_src_desc)); 1862 buf_cur += sizeof(struct CE_src_desc); 1863 available_buf = buf_sz - (buf_cur - buf_init); 1864 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 1865 } 1866 } 1867 1868 return buf_cur; 1869 } 1870 1871 /* 1872 * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf 1873 * @dest_ring: SRC ring state 1874 * @buf_cur: Current pointer in ring buffer 1875 * @buf_init:Start of the ring buffer 1876 * @buf_sz: Size of the ring buffer 1877 * @skb_sz: Max size of the SKB buffer to be copied 1878 * 1879 * Dumps all the CE SRC ring descriptors and buffers pointed by them in to 1880 * the given buf, skb_sz is the max buffer size to be copied 1881 * 1882 * Return: Current pointer in ring buffer 1883 */ 1884 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring, 1885 uint8_t *buf_cur, uint8_t *buf_init, 1886 uint32_t buf_sz, uint32_t skb_sz) 1887 { 1888 struct CE_dest_desc *dest_ring_base; 1889 uint32_t len, entry; 1890 struct CE_dest_desc *dest_desc; 1891 qdf_nbuf_t nbuf; 1892 uint32_t available_buf; 1893 1894 dest_ring_base = 1895 (struct CE_dest_desc *)dest_ring->base_addr_owner_space; 1896 1897 len = sizeof(struct CE_ring_state); 1898 available_buf = buf_sz - (buf_cur - buf_init); 1899 if (available_buf < (len + GUARD_SPACE)) { 1900 buf_cur = buf_init; 1901 } 1902 1903 qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state)); 1904 buf_cur += sizeof(struct CE_ring_state); 1905 1906 for (entry = 0; entry < dest_ring->nentries; entry++) { 1907 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry); 1908 1909 nbuf = dest_ring->per_transfer_context[entry]; 1910 if (nbuf) { 1911 uint32_t skb_len = qdf_nbuf_len(nbuf); 1912 uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); 1913 1914 len = sizeof(struct CE_dest_desc) + skb_cp_len 1915 + LOG_ID_SZ + sizeof(skb_cp_len); 1916 1917 available_buf = buf_sz - (buf_cur - buf_init); 1918 if (available_buf < (len + GUARD_SPACE)) { 1919 buf_cur = buf_init; 1920 } 1921 1922 qdf_mem_copy(buf_cur, dest_desc, 1923 sizeof(struct CE_dest_desc)); 1924 buf_cur += sizeof(struct CE_dest_desc); 1925 available_buf = buf_sz - (buf_cur - buf_init); 1926 buf_cur += snprintf(buf_cur, available_buf, "SKB%d", 1927 skb_cp_len); 1928 if (skb_cp_len) { 1929 qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), 1930 skb_cp_len); 1931 buf_cur += skb_cp_len; 1932 } 1933 } else { 1934 len = sizeof(struct CE_dest_desc) + LOG_ID_SZ; 1935 available_buf = buf_sz - (buf_cur - buf_init); 1936 if (available_buf < (len + GUARD_SPACE)) { 1937 buf_cur = buf_init; 1938 } 1939 qdf_mem_copy(buf_cur, dest_desc, 1940 sizeof(struct CE_dest_desc)); 1941 buf_cur += sizeof(struct CE_dest_desc); 1942 available_buf = buf_sz - (buf_cur - buf_init); 1943 buf_cur += snprintf(buf_cur, available_buf, "NUL"); 1944 } 1945 } 1946 return buf_cur; 1947 } 1948 1949 /** 1950 * hif_log_ce_dump() - Copy all the CE DEST ring to buf 1951 * Calls the respective function to dump all the CE SRC/DEST ring descriptors 1952 * and buffers pointed by them in to the given buf 1953 */ 1954 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, 1955 uint8_t *buf_init, uint32_t buf_sz, 1956 uint32_t ce, uint32_t skb_sz) 1957 { 1958 struct CE_state *ce_state; 1959 struct CE_ring_state *src_ring; 1960 struct CE_ring_state *dest_ring; 1961 1962 ce_state = scn->ce_id_to_state[ce]; 1963 src_ring = ce_state->src_ring; 1964 dest_ring = ce_state->dest_ring; 1965 1966 if (src_ring) { 1967 buf_cur = hif_log_src_ce_dump(src_ring, buf_cur, 1968 buf_init, buf_sz, skb_sz); 1969 } else if (dest_ring) { 1970 buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur, 1971 buf_init, buf_sz, skb_sz); 1972 } 1973 1974 return buf_cur; 1975 } 1976 1977 qdf_export_symbol(hif_log_dump_ce); 1978 #endif /* OL_ATH_SMART_LOGGING */ 1979 1980