Lines Matching full:io
294 * the WQ to allow for 2 completions per IO. This allows us to in efct_hw_setup()
327 efct_hw_init_free_io(struct efct_hw_io *io) in efct_hw_init_free_io() argument
330 * Set io->done to NULL, to avoid any callbacks, should in efct_hw_init_free_io()
333 io->done = NULL; in efct_hw_init_free_io()
334 io->abort_done = NULL; in efct_hw_init_free_io()
335 io->status_saved = false; in efct_hw_init_free_io()
336 io->abort_in_progress = false; in efct_hw_init_free_io()
337 io->type = 0xFFFF; in efct_hw_init_free_io()
338 io->wq = NULL; in efct_hw_init_free_io()
353 efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_restore_sgl() argument
356 io->sgl = &io->def_sgl; in efct_hw_io_restore_sgl()
357 io->sgl_count = io->def_sgl_count; in efct_hw_io_restore_sgl()
363 struct efct_hw_io *io = arg; in efct_hw_wq_process_io() local
364 struct efct_hw *hw = io->hw; in efct_hw_wq_process_io()
370 if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0) in efct_hw_wq_process_io()
371 io->xbusy = false; in efct_hw_wq_process_io()
374 switch (io->type) { in efct_hw_wq_process_io()
398 /* efct_hw_io_free(hw, io); */ in efct_hw_wq_process_io()
401 efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n", in efct_hw_wq_process_io()
402 io->type, io->indicator); in efct_hw_wq_process_io()
408 * If we're not an originator IO, and XB is set, then issue in efct_hw_wq_process_io()
409 * abort for the IO from within the HW in efct_hw_wq_process_io()
411 if (efct_hw_iotype_is_originator(io->type) && in efct_hw_wq_process_io()
416 io->indicator, io->reqtag); in efct_hw_wq_process_io()
419 * Because targets may send a response when the IO in efct_hw_wq_process_io()
421 * XRI_ABORTED CQE to issue the IO callback in efct_hw_wq_process_io()
423 rc = efct_hw_io_abort(hw, io, false, NULL, NULL); in efct_hw_wq_process_io()
429 io->status_saved = true; in efct_hw_wq_process_io()
430 io->saved_status = status; in efct_hw_wq_process_io()
431 io->saved_ext = ext; in efct_hw_wq_process_io()
432 io->saved_len = len; in efct_hw_wq_process_io()
442 io->indicator, io->reqtag); in efct_hw_wq_process_io()
450 io->indicator, io->reqtag, rc); in efct_hw_wq_process_io()
455 if (io->done) { in efct_hw_wq_process_io()
456 efct_hw_done_t done = io->done; in efct_hw_wq_process_io()
458 io->done = NULL; in efct_hw_wq_process_io()
460 if (io->status_saved) { in efct_hw_wq_process_io()
462 status = io->saved_status; in efct_hw_wq_process_io()
463 len = io->saved_len; in efct_hw_wq_process_io()
464 ext = io->saved_ext; in efct_hw_wq_process_io()
465 io->status_saved = false; in efct_hw_wq_process_io()
469 efct_hw_io_restore_sgl(hw, io); in efct_hw_wq_process_io()
470 done(io, len, status, ext, io->arg); in efct_hw_wq_process_io()
481 struct efct_hw_io *io = NULL; in efct_hw_setup_io() local
489 if (!hw->io) { in efct_hw_setup_io()
490 hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL); in efct_hw_setup_io()
491 if (!hw->io) in efct_hw_setup_io()
494 memset(hw->io, 0, hw->config.n_io * sizeof(io)); in efct_hw_setup_io()
497 hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL); in efct_hw_setup_io()
498 if (!hw->io[i]) in efct_hw_setup_io()
502 /* Create WQE buffs for IO */ in efct_hw_setup_io()
506 kfree(hw->io); in efct_hw_setup_io()
526 /* Initialize the pool of HW IO objects */ in efct_hw_setup_io()
530 io = hw->io[i]; in efct_hw_setup_io()
532 /* initialize IO fields */ in efct_hw_setup_io()
533 io->hw = hw; in efct_hw_setup_io()
536 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size]; in efct_hw_setup_io()
538 /* Allocate the request tag for this IO */ in efct_hw_setup_io()
539 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io); in efct_hw_setup_io()
544 io->reqtag = wqcb->instance_index; in efct_hw_setup_io()
547 efct_hw_init_free_io(io); in efct_hw_setup_io()
549 /* The XB flag isn't cleared on IO free, so init to zero */ in efct_hw_setup_io()
550 io->xbusy = 0; in efct_hw_setup_io()
553 &io->indicator, &index)) { in efct_hw_setup_io()
560 dma = &io->def_sgl; in efct_hw_setup_io()
568 memset(&io->def_sgl, 0, in efct_hw_setup_io()
573 io->def_sgl_count = hw->config.n_sgl; in efct_hw_setup_io()
574 io->sgl = &io->def_sgl; in efct_hw_setup_io()
575 io->sgl_count = io->def_sgl_count; in efct_hw_setup_io()
578 io->xfer_rdy.virt = (void *)xfer_virt; in efct_hw_setup_io()
579 io->xfer_rdy.phys = xfer_phys; in efct_hw_setup_io()
580 io->xfer_rdy.size = sizeof(struct fcp_txrdy); in efct_hw_setup_io()
589 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { in efct_hw_setup_io()
590 kfree(hw->io[i]); in efct_hw_setup_io()
591 hw->io[i] = NULL; in efct_hw_setup_io()
594 kfree(hw->io); in efct_hw_setup_io()
595 hw->io = NULL; in efct_hw_setup_io()
604 struct efct_hw_io *io = NULL; in efct_hw_init_prereg_io() local
636 if (hw->io[idx + n]->indicator != in efct_hw_init_prereg_io()
637 hw->io[idx + n - 1]->indicator + 1) in efct_hw_init_prereg_io()
641 sgls[n] = hw->io[idx + n]->sgl; in efct_hw_init_prereg_io()
645 hw->io[idx]->indicator, n, sgls, NULL, &req)) { in efct_hw_init_prereg_io()
658 io = hw->io[idx]; in efct_hw_init_prereg_io()
659 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_init_prereg_io()
660 INIT_LIST_HEAD(&io->list_entry); in efct_hw_init_prereg_io()
661 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_init_prereg_io()
677 struct efct_hw_io *io = NULL; in efct_hw_init_io() local
686 io = hw->io[idx]; in efct_hw_init_io()
687 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_init_io()
688 INIT_LIST_HEAD(&io->list_entry); in efct_hw_init_io()
689 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_init_io()
915 * The IO queues must be initialized here for the reset case. The in efct_hw_init()
1053 efc_log_err(hw->os, "IO allocation failure\n"); in efct_hw_init()
1059 efc_log_err(hw->os, "IO initialization failure\n"); in efct_hw_init()
1579 struct efct_hw_io *io = NULL; in _efct_hw_io_alloc() local
1582 io = list_first_entry(&hw->io_free, struct efct_hw_io, in _efct_hw_io_alloc()
1584 list_del(&io->list_entry); in _efct_hw_io_alloc()
1586 if (io) { in _efct_hw_io_alloc()
1587 INIT_LIST_HEAD(&io->list_entry); in _efct_hw_io_alloc()
1588 list_add_tail(&io->list_entry, &hw->io_inuse); in _efct_hw_io_alloc()
1589 io->state = EFCT_HW_IO_STATE_INUSE; in _efct_hw_io_alloc()
1590 io->abort_reqtag = U32_MAX; in _efct_hw_io_alloc()
1591 io->wq = hw->wq_cpu_array[raw_smp_processor_id()]; in _efct_hw_io_alloc()
1592 if (!io->wq) { in _efct_hw_io_alloc()
1595 io->wq = hw->hw_wq[0]; in _efct_hw_io_alloc()
1597 kref_init(&io->ref); in _efct_hw_io_alloc()
1598 io->release = efct_hw_io_free_internal; in _efct_hw_io_alloc()
1603 return io; in _efct_hw_io_alloc()
1609 struct efct_hw_io *io = NULL; in efct_hw_io_alloc() local
1613 io = _efct_hw_io_alloc(hw); in efct_hw_io_alloc()
1616 return io; in efct_hw_io_alloc()
1621 struct efct_hw_io *io) in efct_hw_io_free_move_correct_list() argument
1624 * When an IO is freed, depending on the exchange busy flag, in efct_hw_io_free_move_correct_list()
1627 if (io->xbusy) { in efct_hw_io_free_move_correct_list()
1632 INIT_LIST_HEAD(&io->list_entry); in efct_hw_io_free_move_correct_list()
1633 list_add_tail(&io->list_entry, &hw->io_wait_free); in efct_hw_io_free_move_correct_list()
1634 io->state = EFCT_HW_IO_STATE_WAIT_FREE; in efct_hw_io_free_move_correct_list()
1636 /* IO not busy, add to free list */ in efct_hw_io_free_move_correct_list()
1637 INIT_LIST_HEAD(&io->list_entry); in efct_hw_io_free_move_correct_list()
1638 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_io_free_move_correct_list()
1639 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_io_free_move_correct_list()
1644 efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_free_common() argument
1646 /* initialize IO fields */ in efct_hw_io_free_common()
1647 efct_hw_init_free_io(io); in efct_hw_io_free_common()
1650 efct_hw_io_restore_sgl(hw, io); in efct_hw_io_free_common()
1657 struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref); in efct_hw_io_free_internal() local
1658 struct efct_hw *hw = io->hw; in efct_hw_io_free_internal()
1661 efct_hw_io_free_common(hw, io); in efct_hw_io_free_internal()
1665 if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) { in efct_hw_io_free_internal()
1666 list_del_init(&io->list_entry); in efct_hw_io_free_internal()
1667 efct_hw_io_free_move_correct_list(hw, io); in efct_hw_io_free_internal()
1673 efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_free() argument
1675 return kref_put(&io->ref, io->release); in efct_hw_io_free()
1684 return hw->io[ioindex]; in efct_hw_io_lookup()
1688 efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io, in efct_hw_io_init_sges() argument
1696 if (!io) { in efct_hw_io_init_sges()
1697 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io); in efct_hw_io_init_sges()
1702 io->sgl = &io->def_sgl; in efct_hw_io_init_sges()
1703 io->sgl_count = io->def_sgl_count; in efct_hw_io_init_sges()
1704 io->first_data_sge = 0; in efct_hw_io_init_sges()
1706 memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge)); in efct_hw_io_init_sges()
1707 io->n_sge = 0; in efct_hw_io_init_sges()
1708 io->sge_offset = 0; in efct_hw_io_init_sges()
1710 io->type = type; in efct_hw_io_init_sges()
1712 data = io->sgl->virt; in efct_hw_io_init_sges()
1715 * Some IO types have underlying hardware requirements on the order in efct_hw_io_init_sges()
1726 cpu_to_le32(upper_32_bits(io->xfer_rdy.phys)); in efct_hw_io_init_sges()
1728 cpu_to_le32(lower_32_bits(io->xfer_rdy.phys)); in efct_hw_io_init_sges()
1729 data->buffer_length = cpu_to_le32(io->xfer_rdy.size); in efct_hw_io_init_sges()
1735 io->n_sge = 1; in efct_hw_io_init_sges()
1749 efc_log_err(hw->os, "unsupported IO type %#x\n", type); in efct_hw_io_init_sges()
1764 io->n_sge += skips; in efct_hw_io_init_sges()
1777 efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io, in efct_hw_io_add_sge() argument
1783 if (!io || !addr || !length) { in efct_hw_io_add_sge()
1785 "bad parameter hw=%p io=%p addr=%lx length=%u\n", in efct_hw_io_add_sge()
1786 hw, io, addr, length); in efct_hw_io_add_sge()
1797 data = io->sgl->virt; in efct_hw_io_add_sge()
1798 data += io->n_sge; in efct_hw_io_add_sge()
1804 sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset; in efct_hw_io_add_sge()
1818 if (io->n_sge) { in efct_hw_io_add_sge()
1825 if (io->first_data_sge == 0) in efct_hw_io_add_sge()
1826 io->first_data_sge = io->n_sge; in efct_hw_io_add_sge()
1828 io->sge_offset += length; in efct_hw_io_add_sge()
1829 io->n_sge++; in efct_hw_io_add_sge()
1849 struct efct_hw_io *io = arg; in efct_hw_wq_process_abort() local
1850 struct efct_hw *hw = io->hw; in efct_hw_wq_process_abort()
1863 ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) { in efct_hw_wq_process_abort()
1864 efct_hw_done_t done = io->done; in efct_hw_wq_process_abort()
1866 io->done = NULL; in efct_hw_wq_process_abort()
1874 status = io->saved_status; in efct_hw_wq_process_abort()
1875 len = io->saved_len; in efct_hw_wq_process_abort()
1876 ext = io->saved_ext; in efct_hw_wq_process_abort()
1877 io->status_saved = false; in efct_hw_wq_process_abort()
1878 done(io, len, status, ext, io->arg); in efct_hw_wq_process_abort()
1881 if (io->abort_done) { in efct_hw_wq_process_abort()
1882 efct_hw_done_t done = io->abort_done; in efct_hw_wq_process_abort()
1884 io->abort_done = NULL; in efct_hw_wq_process_abort()
1885 done(io, len, status, ext, io->abort_arg); in efct_hw_wq_process_abort()
1889 io->abort_in_progress = false; in efct_hw_wq_process_abort()
1892 if (io->abort_reqtag == U32_MAX) { in efct_hw_wq_process_abort()
1893 efc_log_err(hw->os, "HW IO already freed\n"); in efct_hw_wq_process_abort()
1897 wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag); in efct_hw_wq_process_abort()
1904 (void)efct_hw_io_free(hw, io); in efct_hw_wq_process_abort()
1937 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", in efct_hw_io_abort()
1943 efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n", in efct_hw_io_abort()
1948 /* take a reference on IO being aborted */ in efct_hw_io_abort()
1952 "io not active xri=0x%x tag=0x%x\n", in efct_hw_io_abort()
1974 "io already being aborted xri=0x%x tag=0x%x\n", in efct_hw_io_abort()
1996 /* Allocate a request tag for the abort portion of this IO */ in efct_hw_io_abort()
2010 * aborted when the IO's wqe is removed from the list. in efct_hw_io_abort()
2443 struct efct_hw_io *io = NULL; in efct_hw_xabt_process() local
2446 io = efct_hw_io_lookup(hw, rid); in efct_hw_xabt_process()
2447 if (!io) { in efct_hw_xabt_process()
2448 /* IO lookup failure should never happen */ in efct_hw_xabt_process()
2449 efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid); in efct_hw_xabt_process()
2453 if (!io->xbusy) in efct_hw_xabt_process()
2454 efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); in efct_hw_xabt_process()
2456 /* mark IO as no longer busy */ in efct_hw_xabt_process()
2457 io->xbusy = false; in efct_hw_xabt_process()
2463 if (io->done) { in efct_hw_xabt_process()
2464 efct_hw_done_t done = io->done; in efct_hw_xabt_process()
2465 void *arg = io->arg; in efct_hw_xabt_process()
2471 int status = io->saved_status; in efct_hw_xabt_process()
2472 u32 len = io->saved_len; in efct_hw_xabt_process()
2473 u32 ext = io->saved_ext; in efct_hw_xabt_process()
2475 io->done = NULL; in efct_hw_xabt_process()
2476 io->status_saved = false; in efct_hw_xabt_process()
2478 done(io, len, status, ext, arg); in efct_hw_xabt_process()
2482 if (io->state == EFCT_HW_IO_STATE_INUSE || in efct_hw_xabt_process()
2483 io->state == EFCT_HW_IO_STATE_WAIT_FREE) { in efct_hw_xabt_process()
2484 /* if on wait_free list, caller has already freed IO; in efct_hw_xabt_process()
2489 if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) { in efct_hw_xabt_process()
2490 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_xabt_process()
2491 list_del_init(&io->list_entry); in efct_hw_xabt_process()
2492 efct_hw_io_free_move_correct_list(hw, io); in efct_hw_xabt_process()
2616 * Add IO to active io wqe list before submitting, in case the in efct_hw_bls_send()
2638 struct efc_disc_io *io = arg; in efct_els_ssrs_send_cb() local
2640 efc_disc_io_complete(io, length, status, ext_status); in efct_els_ssrs_send_cb()
2645 efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params) in efct_fill_els_params() argument
2647 u8 *cmd = io->req.virt; in efct_fill_els_params()
2650 params->s_id = io->s_id; in efct_fill_els_params()
2651 params->d_id = io->d_id; in efct_fill_els_params()
2652 params->ox_id = io->iparam.els.ox_id; in efct_fill_els_params()
2653 params->rpi = io->rpi; in efct_fill_els_params()
2654 params->vpi = io->vpi; in efct_fill_els_params()
2655 params->rpi_registered = io->rpi_registered; in efct_fill_els_params()
2656 params->xmit_len = io->xmit_len; in efct_fill_els_params()
2657 params->rsp_len = io->rsp_len; in efct_fill_els_params()
2658 params->timeout = io->iparam.els.timeout; in efct_fill_els_params()
2662 efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params) in efct_fill_ct_params() argument
2664 params->r_ctl = io->iparam.ct.r_ctl; in efct_fill_ct_params()
2665 params->type = io->iparam.ct.type; in efct_fill_ct_params()
2666 params->df_ctl = io->iparam.ct.df_ctl; in efct_fill_ct_params()
2667 params->d_id = io->d_id; in efct_fill_ct_params()
2668 params->ox_id = io->iparam.ct.ox_id; in efct_fill_ct_params()
2669 params->rpi = io->rpi; in efct_fill_ct_params()
2670 params->vpi = io->vpi; in efct_fill_ct_params()
2671 params->rpi_registered = io->rpi_registered; in efct_fill_ct_params()
2672 params->xmit_len = io->xmit_len; in efct_fill_ct_params()
2673 params->rsp_len = io->rsp_len; in efct_fill_ct_params()
2674 params->timeout = io->iparam.ct.timeout; in efct_fill_ct_params()
2680 * @io: Discovery IO used to hold els and ct cmd context.
2694 efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io) in efct_els_hw_srrs_send() argument
2699 struct efc_dma *send = &io->req; in efct_els_hw_srrs_send()
2700 struct efc_dma *receive = &io->rsp; in efct_els_hw_srrs_send()
2703 u32 len = io->xmit_len; in efct_els_hw_srrs_send()
2720 hio->arg = io; in efct_els_hw_srrs_send()
2740 if (io->io_type == EFC_DISC_IO_ELS_REQ || in efct_els_hw_srrs_send()
2741 io->io_type == EFC_DISC_IO_CT_REQ) { in efct_els_hw_srrs_send()
2758 switch (io->io_type) { in efct_els_hw_srrs_send()
2763 efct_fill_els_params(io, &els_params); in efct_els_hw_srrs_send()
2778 efct_fill_els_params(io, &els_params); in efct_els_hw_srrs_send()
2792 efct_fill_ct_params(io, &ct_params); in efct_els_hw_srrs_send()
2806 efct_fill_ct_params(io, &ct_params); in efct_els_hw_srrs_send()
2817 efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type); in efct_els_hw_srrs_send()
2825 * Add IO to active io wqe list before submitting, in case the in efct_els_hw_srrs_send()
2846 struct efct_hw_io *io, union efct_hw_io_param_u *iparam, in efct_hw_io_send() argument
2852 if (!io) { in efct_hw_io_send()
2853 pr_err("bad parm hw=%p io=%p\n", hw, io); in efct_hw_io_send()
2858 efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); in efct_hw_io_send()
2865 io->type = type; in efct_hw_io_send()
2866 io->done = cb; in efct_hw_io_send()
2867 io->arg = arg; in efct_hw_io_send()
2870 * Format the work queue entry used to send the IO in efct_hw_io_send()
2875 struct fcp_txrdy *xfer = io->xfer_rdy.virt; in efct_hw_io_send()
2883 if (io->xbusy) in efct_hw_io_send()
2887 iparam->fcp_tgt.xri = io->indicator; in efct_hw_io_send()
2888 iparam->fcp_tgt.tag = io->reqtag; in efct_hw_io_send()
2890 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2891 &io->def_sgl, io->first_data_sge, in efct_hw_io_send()
2902 if (io->xbusy) in efct_hw_io_send()
2907 iparam->fcp_tgt.xri = io->indicator; in efct_hw_io_send()
2908 iparam->fcp_tgt.tag = io->reqtag; in efct_hw_io_send()
2910 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2911 &io->def_sgl, io->first_data_sge, in efct_hw_io_send()
2922 if (io->xbusy) in efct_hw_io_send()
2927 iparam->fcp_tgt.xri = io->indicator; in efct_hw_io_send()
2928 iparam->fcp_tgt.tag = io->reqtag; in efct_hw_io_send()
2930 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2931 &io->def_sgl, SLI4_CQ_DEFAULT, in efct_hw_io_send()
2940 efc_log_err(hw->os, "unsupported IO type %#x\n", type); in efct_hw_io_send()
2945 io->xbusy = true; in efct_hw_io_send()
2948 * Add IO to active io wqe list before submitting, in case the in efct_hw_io_send()
2951 hw->tcmd_wq_submit[io->wq->instance]++; in efct_hw_io_send()
2952 io->wq->use_count++; in efct_hw_io_send()
2953 rc = efct_hw_wq_write(io->wq, &io->wqe); in efct_hw_io_send()
2961 io->xbusy = false; in efct_hw_io_send()
3429 if (hw->io) { in efct_hw_teardown()
3431 if (hw->io[i] && hw->io[i]->sgl && in efct_hw_teardown()
3432 hw->io[i]->sgl->virt) { in efct_hw_teardown()
3434 hw->io[i]->sgl->size, in efct_hw_teardown()
3435 hw->io[i]->sgl->virt, in efct_hw_teardown()
3436 hw->io[i]->sgl->phys); in efct_hw_teardown()
3438 kfree(hw->io[i]); in efct_hw_teardown()
3439 hw->io[i] = NULL; in efct_hw_teardown()
3441 kfree(hw->io); in efct_hw_teardown()
3442 hw->io = NULL; in efct_hw_teardown()