Lines Matching +full:m +full:- +full:phy
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4 * Copyright (c) 2014- QLogic Corporation.
8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
31 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
33 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
38 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
55 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
57 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
58 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
59 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
61 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
63 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
65 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
67 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
69 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
71 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
73 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
75 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
77 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
79 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
82 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
83 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
162 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
164 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
167 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
171 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
173 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
278 * Reset entry actions -- initialize state machine
283 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); in bfa_ioc_sm_reset_entry()
316 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); in bfa_ioc_sm_enabling_entry()
336 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_enabling()
339 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); in bfa_ioc_sm_enabling()
343 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_enabling()
353 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_enabling()
391 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_getattr()
394 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); in bfa_ioc_sm_getattr()
413 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_sm_op_entry()
415 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); in bfa_ioc_sm_op_entry()
441 if (ioc->iocpf.auto_recover) in bfa_ioc_sm_op()
449 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); in bfa_ioc_sm_op()
461 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_sm_disabling_entry()
462 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); in bfa_ioc_sm_disabling_entry()
486 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); in bfa_ioc_sm_disabling()
519 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_sm_disabled()
524 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_disabled()
557 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail_retry()
560 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); in bfa_ioc_sm_fail_retry()
564 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail_retry()
577 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_fail_retry()
603 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail()
612 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_fail()
639 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_hwfail()
643 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_sm_hwfail()
651 /* Ignore - already in hwfail state */ in bfa_ioc_sm_hwfail()
664 * Reset entry actions -- initialize state machine
669 iocpf->fw_mismatch_notified = BFA_FALSE; in bfa_iocpf_sm_reset_entry()
670 iocpf->auto_recover = bfa_auto_recover; in bfa_iocpf_sm_reset_entry()
679 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_reset()
709 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
712 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
716 fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
718 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
722 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); in bfa_iocpf_sm_fwcheck_entry()
725 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
732 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff); in bfa_iocpf_sm_fwcheck_entry()
733 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn); in bfa_iocpf_sm_fwcheck_entry()
736 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0); in bfa_iocpf_sm_fwcheck_entry()
740 bfa_trc(iocpf->ioc, fwstate); in bfa_iocpf_sm_fwcheck_entry()
741 bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); in bfa_iocpf_sm_fwcheck_entry()
742 bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); in bfa_iocpf_sm_fwcheck_entry()
743 bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); in bfa_iocpf_sm_fwcheck_entry()
748 bfa_ioc_ownership_reset(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
753 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
756 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
765 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fwcheck()
777 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck()
781 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck()
816 if (iocpf->fw_mismatch_notified == BFA_FALSE) in bfa_iocpf_sm_mismatch_entry()
817 bfa_ioc_pf_fwmismatch(iocpf->ioc); in bfa_iocpf_sm_mismatch_entry()
819 iocpf->fw_mismatch_notified = BFA_TRUE; in bfa_iocpf_sm_mismatch_entry()
820 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_mismatch_entry()
829 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_mismatch()
860 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_semwait_entry()
869 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_semwait()
879 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_semwait()
902 iocpf->poll_time = 0; in bfa_iocpf_sm_hwinit_entry()
903 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); in bfa_iocpf_sm_hwinit_entry()
913 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_hwinit()
923 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_hwinit()
931 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_hwinit()
943 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_enabling_entry()
947 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); in bfa_iocpf_sm_enabling_entry()
948 bfa_ioc_send_enable(iocpf->ioc); in bfa_iocpf_sm_enabling_entry()
958 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_enabling()
965 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
974 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
982 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
994 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED); in bfa_iocpf_sm_ready_entry()
1000 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_ready()
1025 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_disabling_entry()
1026 bfa_ioc_send_disable(iocpf->ioc); in bfa_iocpf_sm_disabling_entry()
1035 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabling()
1065 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_disabling_sync_entry()
1074 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabling_sync()
1081 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_disabling_sync()
1104 bfa_ioc_mbox_flush(iocpf->ioc); in bfa_iocpf_sm_disabled_entry()
1105 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); in bfa_iocpf_sm_disabled_entry()
1111 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabled()
1133 bfa_ioc_debug_save_ftrc(iocpf->ioc); in bfa_iocpf_sm_initfail_sync_entry()
1134 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_initfail_sync_entry()
1143 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_initfail_sync()
1152 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_initfail_sync()
1183 bfa_trc(iocpf->ioc, 0); in bfa_iocpf_sm_initfail_entry()
1192 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_initfail()
1217 bfa_ioc_lpu_stop(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1222 bfa_ioc_mbox_flush(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1224 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1230 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fail_sync()
1238 if (!iocpf->auto_recover) { in bfa_iocpf_sm_fail_sync()
1241 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fail_sync()
1247 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fail_sync()
1274 bfa_trc(iocpf->ioc, 0); in bfa_iocpf_sm_fail_entry()
1283 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fail()
1310 list_for_each(qe, &ioc->notify_q) { in bfa_ioc_event_notify()
1312 notify->cbfn(notify->cbarg, event); in bfa_ioc_event_notify()
1319 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_disable_comp()
1353 r32 = readl(ioc->ioc_regs.ioc_sem_reg); in bfa_ioc_hw_sem_get()
1356 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); in bfa_ioc_hw_sem_get()
1360 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); in bfa_ioc_hw_sem_get()
1377 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1385 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1392 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1404 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1415 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_start()
1418 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_start()
1429 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_stop()
1432 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_stop()
1446 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_fwver_get()
1447 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_fwver_get()
1452 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_ioc_fwver_get()
1523 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) in bfa_ioc_fwver_md5_check()
1537 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) in bfa_ioc_fw_ver_compatible()
1540 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) in bfa_ioc_fw_ver_compatible()
1543 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) in bfa_ioc_fw_ver_compatible()
1546 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) in bfa_ioc_fw_ver_compatible()
1549 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && in bfa_ioc_fw_ver_compatible()
1550 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && in bfa_ioc_fw_ver_compatible()
1551 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) { in bfa_ioc_fw_ver_compatible()
1561 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) in bfa_ioc_flash_fwver_valid()
1569 if (fwhdr->fwver.phase == 0 && in fwhdr_is_ga()
1570 fwhdr->fwver.build == 0) in fwhdr_is_ga()
1586 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) in bfa_ioc_fw_ver_patch_cmp()
1589 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) in bfa_ioc_fw_ver_patch_cmp()
1607 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) in bfa_ioc_fw_ver_patch_cmp()
1609 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) in bfa_ioc_fw_ver_patch_cmp()
1612 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) in bfa_ioc_fw_ver_patch_cmp()
1614 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) in bfa_ioc_fw_ver_patch_cmp()
1630 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, in bfa_ioc_flash_img_get_chnk()
1670 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_fwsig_invalidate()
1671 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_fwsig_invalidate()
1672 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN); in bfa_ioc_fwsig_invalidate()
1685 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgflush()
1687 writel(1, ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgflush()
1731 * just re-enable IOC. in bfa_ioc_hwinit()
1740 * When using MSI-X any pending firmware ready event should in bfa_ioc_hwinit()
1741 * be flushed. Otherwise MSI-X interrupts are not delivered. in bfa_ioc_hwinit()
1744 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); in bfa_ioc_hwinit()
1780 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); in bfa_ioc_mbox_send()
1783 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); in bfa_ioc_mbox_send()
1788 writel(1, ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_send()
1789 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_send()
1799 enable_req.clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_enable()
1800 /* unsigned 32-bit time_t overflow in y2106 */ in bfa_ioc_send_enable()
1812 disable_req.clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_disable()
1813 /* unsigned 32-bit time_t overflow in y2106 */ in bfa_ioc_send_disable()
1825 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); in bfa_ioc_send_getattr()
1835 hb_count = readl(ioc->ioc_regs.heartbeat); in bfa_ioc_hb_check()
1836 if (ioc->hb_count == hb_count) { in bfa_ioc_hb_check()
1840 ioc->hb_count = hb_count; in bfa_ioc_hb_check()
1850 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); in bfa_ioc_hb_monitor()
1890 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_download_fw()
1891 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1917 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, in bfa_ioc_download_fw()
1928 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1932 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_download_fw()
1933 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1942 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, in bfa_ioc_download_fw()
1943 ioc->port0_mode, ioc->port1_mode); in bfa_ioc_download_fw()
1944 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, in bfa_ioc_download_fw()
1946 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF, in bfa_ioc_download_fw()
1948 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, in bfa_ioc_download_fw()
1960 struct bfi_ioc_attr_s *attr = ioc->attr; in bfa_ioc_getattr_reply()
1962 attr->adapter_prop = be32_to_cpu(attr->adapter_prop); in bfa_ioc_getattr_reply()
1963 attr->card_type = be32_to_cpu(attr->card_type); in bfa_ioc_getattr_reply()
1964 attr->maxfrsize = be16_to_cpu(attr->maxfrsize); in bfa_ioc_getattr_reply()
1965 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); in bfa_ioc_getattr_reply()
1966 attr->mfg_year = be16_to_cpu(attr->mfg_year); in bfa_ioc_getattr_reply()
1977 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_attach()
1980 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach()
1982 mod->mbhdlr[mc].cbfn = NULL; in bfa_ioc_mbox_attach()
1983 mod->mbhdlr[mc].cbarg = ioc->bfa; in bfa_ioc_mbox_attach()
1988 * Mbox poll timer -- restarts any pending mailbox requests.
1993 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_poll()
2000 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll()
2006 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_poll()
2013 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_poll()
2014 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); in bfa_ioc_mbox_poll()
2023 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_flush()
2026 while (!list_empty(&mod->cmd_q)) in bfa_ioc_mbox_flush()
2027 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_flush()
2046 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); in bfa_ioc_smem_read()
2055 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { in bfa_ioc_smem_read()
2060 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
2065 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_ioc_smem_read()
2075 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
2078 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_smem_read()
2079 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
2083 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_read()
2084 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_read()
2103 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); in bfa_ioc_smem_clr()
2112 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { in bfa_ioc_smem_clr()
2117 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
2122 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); in bfa_ioc_smem_clr()
2131 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
2134 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_smem_clr()
2135 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
2140 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_clr()
2141 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_clr()
2149 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_fail_notify()
2154 ioc->cbfn->hbfail_cbfn(ioc->bfa); in bfa_ioc_fail_notify()
2168 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_pf_fwmismatch()
2172 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_pf_fwmismatch()
2186 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2190 ioc->pllinit = BFA_TRUE; in bfa_ioc_pll_init()
2200 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2201 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2291 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2300 r32 = readl(ioc->ioc_regs.lpu_mbox + in bfa_ioc_msgget()
2308 writel(1, ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2309 readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2315 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) in bfa_ioc_isr() argument
2318 struct bfa_iocpf_s *iocpf = &ioc->iocpf; in bfa_ioc_isr()
2320 msg = (union bfi_ioc_i2h_msg_u *) m; in bfa_ioc_isr()
2324 switch (msg->mh.msg_id) { in bfa_ioc_isr()
2329 ioc->port_mode = ioc->port_mode_cfg = in bfa_ioc_isr()
2330 (enum bfa_mode_s)msg->fw_event.port_mode; in bfa_ioc_isr()
2331 ioc->ad_cap_bm = msg->fw_event.cap_bm; in bfa_ioc_isr()
2344 bfa_trc(ioc, msg->mh.msg_id); in bfa_ioc_isr()
2359 ioc->bfa = bfa; in bfa_ioc_attach()
2360 ioc->cbfn = cbfn; in bfa_ioc_attach()
2361 ioc->timer_mod = timer_mod; in bfa_ioc_attach()
2362 ioc->fcmode = BFA_FALSE; in bfa_ioc_attach()
2363 ioc->pllinit = BFA_FALSE; in bfa_ioc_attach()
2364 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_attach()
2365 ioc->iocpf.ioc = ioc; in bfa_ioc_attach()
2368 INIT_LIST_HEAD(&ioc->notify_q); in bfa_ioc_attach()
2381 INIT_LIST_HEAD(&ioc->notify_q); in bfa_ioc_detach()
2393 ioc->clscode = clscode; in bfa_ioc_pci_init()
2394 ioc->pcidev = *pcidev; in bfa_ioc_pci_init()
2399 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; in bfa_ioc_pci_init()
2400 ioc->asic_mode = BFI_ASIC_MODE_FC; in bfa_ioc_pci_init()
2402 switch (pcidev->device_id) { in bfa_ioc_pci_init()
2405 ioc->asic_gen = BFI_ASIC_GEN_CB; in bfa_ioc_pci_init()
2406 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2407 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2408 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2412 ioc->asic_gen = BFI_ASIC_GEN_CT; in bfa_ioc_pci_init()
2413 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; in bfa_ioc_pci_init()
2414 ioc->asic_mode = BFI_ASIC_MODE_ETH; in bfa_ioc_pci_init()
2415 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; in bfa_ioc_pci_init()
2416 ioc->ad_cap_bm = BFA_CM_CNA; in bfa_ioc_pci_init()
2420 ioc->asic_gen = BFI_ASIC_GEN_CT; in bfa_ioc_pci_init()
2421 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2422 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2423 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2428 ioc->asic_gen = BFI_ASIC_GEN_CT2; in bfa_ioc_pci_init()
2430 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { in bfa_ioc_pci_init()
2431 ioc->asic_mode = BFI_ASIC_MODE_FC16; in bfa_ioc_pci_init()
2432 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2433 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2434 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2436 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; in bfa_ioc_pci_init()
2437 ioc->asic_mode = BFI_ASIC_MODE_ETH; in bfa_ioc_pci_init()
2438 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { in bfa_ioc_pci_init()
2439 ioc->port_mode = in bfa_ioc_pci_init()
2440 ioc->port_mode_cfg = BFA_MODE_CNA; in bfa_ioc_pci_init()
2441 ioc->ad_cap_bm = BFA_CM_CNA; in bfa_ioc_pci_init()
2443 ioc->port_mode = in bfa_ioc_pci_init()
2444 ioc->port_mode_cfg = BFA_MODE_NIC; in bfa_ioc_pci_init()
2445 ioc->ad_cap_bm = BFA_CM_NIC; in bfa_ioc_pci_init()
2457 if (ioc->asic_gen == BFI_ASIC_GEN_CB) in bfa_ioc_pci_init()
2459 else if (ioc->asic_gen == BFI_ASIC_GEN_CT) in bfa_ioc_pci_init()
2462 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); in bfa_ioc_pci_init()
2483 ioc->attr_dma.kva = dm_kva; in bfa_ioc_mem_claim()
2484 ioc->attr_dma.pa = dm_pa; in bfa_ioc_mem_claim()
2485 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; in bfa_ioc_mem_claim()
2492 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_enable()
2507 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_suspend()
2518 ioc->dbg_fwsave = dbg_fwsave; in bfa_ioc_debug_memclaim()
2519 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN; in bfa_ioc_debug_memclaim()
2531 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_register()
2535 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; in bfa_ioc_mbox_register()
2545 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_regisr()
2547 mod->mbhdlr[mc].cbfn = cbfn; in bfa_ioc_mbox_regisr()
2548 mod->mbhdlr[mc].cbarg = cbarg; in bfa_ioc_mbox_regisr()
2561 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_queue()
2567 if (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_queue()
2568 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2575 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_queue()
2577 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2582 * mailbox is free -- queue command to firmware in bfa_ioc_mbox_queue()
2584 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); in bfa_ioc_mbox_queue()
2593 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_isr()
2594 struct bfi_mbmsg_s m; in bfa_ioc_mbox_isr() local
2597 if (bfa_ioc_msgget(ioc, &m)) { in bfa_ioc_mbox_isr()
2601 mc = m.mh.msg_class; in bfa_ioc_mbox_isr()
2603 bfa_ioc_isr(ioc, &m); in bfa_ioc_mbox_isr()
2607 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) in bfa_ioc_mbox_isr()
2610 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); in bfa_ioc_mbox_isr()
2625 ioc->stats.hb_count = ioc->hb_count; in bfa_ioc_error_isr()
2646 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || in bfa_ioc_fw_mismatch()
2647 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); in bfa_ioc_fw_mismatch()
2651 * Check if adapter is disabled -- both IOCs should be in a disabled
2666 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { in bfa_ioc_adapter_is_disabled()
2692 ioc_attr = ioc->attr; in bfa_ioc_get_adapter_attr()
2694 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); in bfa_ioc_get_adapter_attr()
2695 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); in bfa_ioc_get_adapter_attr()
2696 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); in bfa_ioc_get_adapter_attr()
2697 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); in bfa_ioc_get_adapter_attr()
2698 memcpy(&ad_attr->vpd, &ioc_attr->vpd, in bfa_ioc_get_adapter_attr()
2701 ad_attr->nports = bfa_ioc_get_nports(ioc); in bfa_ioc_get_adapter_attr()
2702 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); in bfa_ioc_get_adapter_attr()
2704 bfa_ioc_get_adapter_model(ioc, ad_attr->model); in bfa_ioc_get_adapter_attr()
2706 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); in bfa_ioc_get_adapter_attr()
2708 ad_attr->card_type = ioc_attr->card_type; in bfa_ioc_get_adapter_attr()
2709 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); in bfa_ioc_get_adapter_attr()
2711 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) in bfa_ioc_get_adapter_attr()
2712 ad_attr->prototype = 1; in bfa_ioc_get_adapter_attr()
2714 ad_attr->prototype = 0; in bfa_ioc_get_adapter_attr()
2716 ad_attr->pwwn = ioc->attr->pwwn; in bfa_ioc_get_adapter_attr()
2717 ad_attr->mac = bfa_ioc_get_mac(ioc); in bfa_ioc_get_adapter_attr()
2719 ad_attr->pcie_gen = ioc_attr->pcie_gen; in bfa_ioc_get_adapter_attr()
2720 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; in bfa_ioc_get_adapter_attr()
2721 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; in bfa_ioc_get_adapter_attr()
2722 ad_attr->asic_rev = ioc_attr->asic_rev; in bfa_ioc_get_adapter_attr()
2724 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); in bfa_ioc_get_adapter_attr()
2726 ad_attr->cna_capable = bfa_ioc_is_cna(ioc); in bfa_ioc_get_adapter_attr()
2727 ad_attr->trunk_capable = (ad_attr->nports > 1) && in bfa_ioc_get_adapter_attr()
2728 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; in bfa_ioc_get_adapter_attr()
2729 ad_attr->mfg_day = ioc_attr->mfg_day; in bfa_ioc_get_adapter_attr()
2730 ad_attr->mfg_month = ioc_attr->mfg_month; in bfa_ioc_get_adapter_attr()
2731 ad_attr->mfg_year = ioc_attr->mfg_year; in bfa_ioc_get_adapter_attr()
2732 memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN); in bfa_ioc_get_adapter_attr()
2738 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) in bfa_ioc_get_type()
2741 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC); in bfa_ioc_get_type()
2743 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) in bfa_ioc_get_type()
2752 (void *)ioc->attr->brcd_serialnum, in bfa_ioc_get_adapter_serial_num()
2760 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); in bfa_ioc_get_adapter_fw_ver()
2773 chip_rev[3] = '-'; in bfa_ioc_get_pci_chip_rev()
2774 chip_rev[4] = ioc->attr->asic_rev; in bfa_ioc_get_pci_chip_rev()
2782 memcpy(optrom_ver, ioc->attr->optrom_version, in bfa_ioc_get_adapter_optrom_ver()
2802 ioc_attr = ioc->attr; in bfa_ioc_get_adapter_model()
2804 if (bfa_asic_id_ct2(ioc->pcidev.device_id) && in bfa_ioc_get_adapter_model()
2805 (!bfa_mfg_is_mezz(ioc_attr->card_type))) in bfa_ioc_get_adapter_model()
2806 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s", in bfa_ioc_get_adapter_model()
2807 BFA_MFG_NAME, ioc_attr->card_type, nports, "p"); in bfa_ioc_get_adapter_model()
2809 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", in bfa_ioc_get_adapter_model()
2810 BFA_MFG_NAME, ioc_attr->card_type); in bfa_ioc_get_adapter_model()
2817 enum bfa_ioc_state ioc_st = bfa_ioc_sm_to_state(ioc_sm_table, ioc->fsm); in bfa_ioc_get_state()
2822 iocpf_st = bfa_iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); in bfa_ioc_get_state()
2858 ioc_attr->state = bfa_ioc_get_state(ioc); in bfa_ioc_get_attr()
2859 ioc_attr->port_id = bfa_ioc_portid(ioc); in bfa_ioc_get_attr()
2860 ioc_attr->port_mode = ioc->port_mode; in bfa_ioc_get_attr()
2861 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; in bfa_ioc_get_attr()
2862 ioc_attr->cap_bm = ioc->ad_cap_bm; in bfa_ioc_get_attr()
2864 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); in bfa_ioc_get_attr()
2866 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); in bfa_ioc_get_attr()
2868 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); in bfa_ioc_get_attr()
2869 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); in bfa_ioc_get_attr()
2870 ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc)); in bfa_ioc_get_attr()
2871 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); in bfa_ioc_get_attr()
2881 return ioc->attr->fcoe_mac; in bfa_ioc_get_mac()
2883 return ioc->attr->mac; in bfa_ioc_get_mac()
2889 mac_t m; in bfa_ioc_get_mfg_mac() local
2891 m = ioc->attr->mfg_mac; in bfa_ioc_get_mfg_mac()
2892 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) in bfa_ioc_get_mfg_mac()
2893 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); in bfa_ioc_get_mfg_mac()
2895 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), in bfa_ioc_get_mfg_mac()
2898 return m; in bfa_ioc_get_mfg_mac()
2907 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_aen_post()
2918 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; in bfa_ioc_aen_post()
2921 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; in bfa_ioc_aen_post()
2922 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); in bfa_ioc_aen_post()
2925 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); in bfa_ioc_aen_post()
2933 aen_entry->aen_data.ioc.ioc_type = ioc_type; in bfa_ioc_aen_post()
2934 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, in bfa_ioc_aen_post()
2946 if (ioc->dbg_fwsave_len == 0) in bfa_ioc_debug_fwsave()
2950 if (tlen > ioc->dbg_fwsave_len) in bfa_ioc_debug_fwsave()
2951 tlen = ioc->dbg_fwsave_len; in bfa_ioc_debug_fwsave()
2953 memcpy(trcdata, ioc->dbg_fwsave, tlen); in bfa_ioc_debug_fwsave()
2986 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, in bfa_ioc_send_fwsync()
2988 req->clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_fwsync()
3011 fwsync_iter--; in bfa_ioc_fwsync()
3042 dlen = smem_len - loff; in bfa_ioc_debug_fwcore()
3072 if (ioc->stats_busy) { in bfa_ioc_fw_stats_get()
3073 bfa_trc(ioc, ioc->stats_busy); in bfa_ioc_fw_stats_get()
3076 ioc->stats_busy = BFA_TRUE; in bfa_ioc_fw_stats_get()
3081 ioc->stats_busy = BFA_FALSE; in bfa_ioc_fw_stats_get()
3093 if (ioc->stats_busy) { in bfa_ioc_fw_stats_clear()
3094 bfa_trc(ioc, ioc->stats_busy); in bfa_ioc_fw_stats_clear()
3097 ioc->stats_busy = BFA_TRUE; in bfa_ioc_fw_stats_clear()
3102 ioc->stats_busy = BFA_FALSE; in bfa_ioc_fw_stats_clear()
3114 if (ioc->dbg_fwsave_once) { in bfa_ioc_debug_save_ftrc()
3115 ioc->dbg_fwsave_once = BFA_FALSE; in bfa_ioc_debug_save_ftrc()
3116 if (ioc->dbg_fwsave_len) { in bfa_ioc_debug_save_ftrc()
3117 tlen = ioc->dbg_fwsave_len; in bfa_ioc_debug_save_ftrc()
3118 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); in bfa_ioc_debug_save_ftrc()
3130 ioc->stats.hb_count = ioc->hb_count; in bfa_ioc_recover()
3143 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); in bfa_iocpf_timeout()
3162 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); in bfa_ioc_poll_fwinit()
3166 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) in bfa_ioc_poll_fwinit()
3169 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; in bfa_ioc_poll_fwinit()
3188 struct list_head *qh = &mod->timer_q; in bfa_timer_beat()
3201 if (elem->timeout <= BFA_TIMER_FREQ) { in bfa_timer_beat()
3202 elem->timeout = 0; in bfa_timer_beat()
3203 list_del(&elem->qe); in bfa_timer_beat()
3204 list_add_tail(&elem->qe, &timedout_q); in bfa_timer_beat()
3206 elem->timeout -= BFA_TIMER_FREQ; in bfa_timer_beat()
3217 elem->timercb(elem->arg); in bfa_timer_beat()
3230 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer)); in bfa_timer_begin()
3232 timer->timeout = timeout; in bfa_timer_begin()
3233 timer->timercb = timercb; in bfa_timer_begin()
3234 timer->arg = arg; in bfa_timer_begin()
3236 list_add_tail(&timer->qe, &mod->timer_q); in bfa_timer_begin()
3245 WARN_ON(list_empty(&timer->qe)); in bfa_timer_stop()
3247 list_del(&timer->qe); in bfa_timer_stop()
3261 cfg_inst = &cfg->inst[i]; in bfa_ablk_config_swap()
3263 be16 = cfg_inst->pf_cfg[j].pers; in bfa_ablk_config_swap()
3264 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16); in bfa_ablk_config_swap()
3265 be16 = cfg_inst->pf_cfg[j].num_qpairs; in bfa_ablk_config_swap()
3266 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); in bfa_ablk_config_swap()
3267 be16 = cfg_inst->pf_cfg[j].num_vectors; in bfa_ablk_config_swap()
3268 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); in bfa_ablk_config_swap()
3269 be16 = cfg_inst->pf_cfg[j].bw_min; in bfa_ablk_config_swap()
3270 cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); in bfa_ablk_config_swap()
3271 be16 = cfg_inst->pf_cfg[j].bw_max; in bfa_ablk_config_swap()
3272 cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); in bfa_ablk_config_swap()
3284 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK); in bfa_ablk_isr()
3285 bfa_trc(ablk->ioc, msg->mh.msg_id); in bfa_ablk_isr()
3287 switch (msg->mh.msg_id) { in bfa_ablk_isr()
3289 if (rsp->status == BFA_STATUS_OK) { in bfa_ablk_isr()
3290 memcpy(ablk->cfg, ablk->dma_addr.kva, in bfa_ablk_isr()
3292 bfa_ablk_config_swap(ablk->cfg); in bfa_ablk_isr()
3293 ablk->cfg = NULL; in bfa_ablk_isr()
3300 ablk->ioc->port_mode_cfg = rsp->port_mode; in bfa_ablk_isr()
3307 /* No-op */ in bfa_ablk_isr()
3311 *(ablk->pcifn) = rsp->pcifn; in bfa_ablk_isr()
3312 ablk->pcifn = NULL; in bfa_ablk_isr()
3319 ablk->busy = BFA_FALSE; in bfa_ablk_isr()
3320 if (ablk->cbfn) { in bfa_ablk_isr()
3321 cbfn = ablk->cbfn; in bfa_ablk_isr()
3322 ablk->cbfn = NULL; in bfa_ablk_isr()
3323 cbfn(ablk->cbarg, rsp->status); in bfa_ablk_isr()
3332 bfa_trc(ablk->ioc, event); in bfa_ablk_notify()
3336 WARN_ON(ablk->busy != BFA_FALSE); in bfa_ablk_notify()
3342 ablk->pcifn = NULL; in bfa_ablk_notify()
3343 if (ablk->busy) { in bfa_ablk_notify()
3344 if (ablk->cbfn) in bfa_ablk_notify()
3345 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED); in bfa_ablk_notify()
3346 ablk->cbfn = NULL; in bfa_ablk_notify()
3347 ablk->busy = BFA_FALSE; in bfa_ablk_notify()
3366 ablk->dma_addr.kva = dma_kva; in bfa_ablk_memclaim()
3367 ablk->dma_addr.pa = dma_pa; in bfa_ablk_memclaim()
3373 ablk->ioc = ioc; in bfa_ablk_attach()
3375 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk); in bfa_ablk_attach()
3376 bfa_q_qe_init(&ablk->ioc_notify); in bfa_ablk_attach()
3377 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk); in bfa_ablk_attach()
3378 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q); in bfa_ablk_attach()
3385 struct bfi_ablk_h2i_query_s *m; in bfa_ablk_query() local
3389 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_query()
3390 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_query()
3394 if (ablk->busy) { in bfa_ablk_query()
3395 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_query()
3399 ablk->cfg = ablk_cfg; in bfa_ablk_query()
3400 ablk->cbfn = cbfn; in bfa_ablk_query()
3401 ablk->cbarg = cbarg; in bfa_ablk_query()
3402 ablk->busy = BFA_TRUE; in bfa_ablk_query()
3404 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg; in bfa_ablk_query()
3405 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY, in bfa_ablk_query()
3406 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_query()
3407 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa); in bfa_ablk_query()
3408 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_query()
3419 struct bfi_ablk_h2i_pf_req_s *m; in bfa_ablk_pf_create() local
3421 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_create()
3422 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_create()
3426 if (ablk->busy) { in bfa_ablk_pf_create()
3427 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_create()
3431 ablk->pcifn = pcifn; in bfa_ablk_pf_create()
3432 ablk->cbfn = cbfn; in bfa_ablk_pf_create()
3433 ablk->cbarg = cbarg; in bfa_ablk_pf_create()
3434 ablk->busy = BFA_TRUE; in bfa_ablk_pf_create()
3436 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_create()
3437 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, in bfa_ablk_pf_create()
3438 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_create()
3439 m->pers = cpu_to_be16((u16)personality); in bfa_ablk_pf_create()
3440 m->bw_min = cpu_to_be16(bw_min); in bfa_ablk_pf_create()
3441 m->bw_max = cpu_to_be16(bw_max); in bfa_ablk_pf_create()
3442 m->port = port; in bfa_ablk_pf_create()
3443 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_create()
3452 struct bfi_ablk_h2i_pf_req_s *m; in bfa_ablk_pf_delete() local
3454 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_delete()
3455 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_delete()
3459 if (ablk->busy) { in bfa_ablk_pf_delete()
3460 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_delete()
3464 ablk->cbfn = cbfn; in bfa_ablk_pf_delete()
3465 ablk->cbarg = cbarg; in bfa_ablk_pf_delete()
3466 ablk->busy = BFA_TRUE; in bfa_ablk_pf_delete()
3468 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_delete()
3469 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE, in bfa_ablk_pf_delete()
3470 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_delete()
3471 m->pcifn = (u8)pcifn; in bfa_ablk_pf_delete()
3472 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_delete()
3481 struct bfi_ablk_h2i_cfg_req_s *m; in bfa_ablk_adapter_config() local
3483 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_adapter_config()
3484 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_adapter_config()
3488 if (ablk->busy) { in bfa_ablk_adapter_config()
3489 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_adapter_config()
3493 ablk->cbfn = cbfn; in bfa_ablk_adapter_config()
3494 ablk->cbarg = cbarg; in bfa_ablk_adapter_config()
3495 ablk->busy = BFA_TRUE; in bfa_ablk_adapter_config()
3497 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; in bfa_ablk_adapter_config()
3498 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG, in bfa_ablk_adapter_config()
3499 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_adapter_config()
3500 m->mode = (u8)mode; in bfa_ablk_adapter_config()
3501 m->max_pf = (u8)max_pf; in bfa_ablk_adapter_config()
3502 m->max_vf = (u8)max_vf; in bfa_ablk_adapter_config()
3503 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_adapter_config()
3512 struct bfi_ablk_h2i_cfg_req_s *m; in bfa_ablk_port_config() local
3514 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_port_config()
3515 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_port_config()
3519 if (ablk->busy) { in bfa_ablk_port_config()
3520 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_port_config()
3524 ablk->cbfn = cbfn; in bfa_ablk_port_config()
3525 ablk->cbarg = cbarg; in bfa_ablk_port_config()
3526 ablk->busy = BFA_TRUE; in bfa_ablk_port_config()
3528 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; in bfa_ablk_port_config()
3529 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG, in bfa_ablk_port_config()
3530 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_port_config()
3531 m->port = (u8)port; in bfa_ablk_port_config()
3532 m->mode = (u8)mode; in bfa_ablk_port_config()
3533 m->max_pf = (u8)max_pf; in bfa_ablk_port_config()
3534 m->max_vf = (u8)max_vf; in bfa_ablk_port_config()
3535 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_port_config()
3544 struct bfi_ablk_h2i_pf_req_s *m; in bfa_ablk_pf_update() local
3546 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_update()
3547 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_update()
3551 if (ablk->busy) { in bfa_ablk_pf_update()
3552 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_update()
3556 ablk->cbfn = cbfn; in bfa_ablk_pf_update()
3557 ablk->cbarg = cbarg; in bfa_ablk_pf_update()
3558 ablk->busy = BFA_TRUE; in bfa_ablk_pf_update()
3560 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_update()
3561 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, in bfa_ablk_pf_update()
3562 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_update()
3563 m->pcifn = (u8)pcifn; in bfa_ablk_pf_update()
3564 m->bw_min = cpu_to_be16(bw_min); in bfa_ablk_pf_update()
3565 m->bw_max = cpu_to_be16(bw_max); in bfa_ablk_pf_update()
3566 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_update()
3574 struct bfi_ablk_h2i_optrom_s *m; in bfa_ablk_optrom_en() local
3576 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_optrom_en()
3577 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_optrom_en()
3581 if (ablk->busy) { in bfa_ablk_optrom_en()
3582 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_optrom_en()
3586 ablk->cbfn = cbfn; in bfa_ablk_optrom_en()
3587 ablk->cbarg = cbarg; in bfa_ablk_optrom_en()
3588 ablk->busy = BFA_TRUE; in bfa_ablk_optrom_en()
3590 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; in bfa_ablk_optrom_en()
3591 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE, in bfa_ablk_optrom_en()
3592 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_optrom_en()
3593 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_optrom_en()
3601 struct bfi_ablk_h2i_optrom_s *m; in bfa_ablk_optrom_dis() local
3603 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_optrom_dis()
3604 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_optrom_dis()
3608 if (ablk->busy) { in bfa_ablk_optrom_dis()
3609 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_optrom_dis()
3613 ablk->cbfn = cbfn; in bfa_ablk_optrom_dis()
3614 ablk->cbarg = cbarg; in bfa_ablk_optrom_dis()
3615 ablk->busy = BFA_TRUE; in bfa_ablk_optrom_dis()
3617 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; in bfa_ablk_optrom_dis()
3618 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE, in bfa_ablk_optrom_dis()
3619 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_optrom_dis()
3620 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_optrom_dis()
3638 bfa_trc(sfp, sfp->lock); in bfa_cb_sfp_show()
3639 if (sfp->cbfn) in bfa_cb_sfp_show()
3640 sfp->cbfn(sfp->cbarg, sfp->status); in bfa_cb_sfp_show()
3641 sfp->lock = 0; in bfa_cb_sfp_show()
3642 sfp->cbfn = NULL; in bfa_cb_sfp_show()
3648 bfa_trc(sfp, sfp->portspeed); in bfa_cb_sfp_state_query()
3649 if (sfp->media) { in bfa_cb_sfp_state_query()
3651 if (sfp->state_query_cbfn) in bfa_cb_sfp_state_query()
3652 sfp->state_query_cbfn(sfp->state_query_cbarg, in bfa_cb_sfp_state_query()
3653 sfp->status); in bfa_cb_sfp_state_query()
3654 sfp->media = NULL; in bfa_cb_sfp_state_query()
3657 if (sfp->portspeed) { in bfa_cb_sfp_state_query()
3658 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); in bfa_cb_sfp_state_query()
3659 if (sfp->state_query_cbfn) in bfa_cb_sfp_state_query()
3660 sfp->state_query_cbfn(sfp->state_query_cbarg, in bfa_cb_sfp_state_query()
3661 sfp->status); in bfa_cb_sfp_state_query()
3662 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; in bfa_cb_sfp_state_query()
3665 sfp->state_query_lock = 0; in bfa_cb_sfp_state_query()
3666 sfp->state_query_cbfn = NULL; in bfa_cb_sfp_state_query()
3678 bfa_trc(sfp, sfp->lock); in bfa_sfp_notify()
3679 bfa_trc(sfp, sfp->state_query_lock); in bfa_sfp_notify()
3684 if (sfp->lock) { in bfa_sfp_notify()
3685 sfp->status = BFA_STATUS_IOC_FAILURE; in bfa_sfp_notify()
3689 if (sfp->state_query_lock) { in bfa_sfp_notify()
3690 sfp->status = BFA_STATUS_IOC_FAILURE; in bfa_sfp_notify()
3706 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad; in bfa_sfp_scn_aen_post()
3710 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) | in bfa_sfp_scn_aen_post()
3711 ((u64)rsp->event)); in bfa_sfp_scn_aen_post()
3717 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc); in bfa_sfp_scn_aen_post()
3718 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn; in bfa_sfp_scn_aen_post()
3719 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc); in bfa_sfp_scn_aen_post()
3721 switch (rsp->event) { in bfa_sfp_scn_aen_post()
3736 aen_entry->aen_data.port.level = rsp->pomlvl; in bfa_sfp_scn_aen_post()
3739 bfa_trc(sfp, rsp->event); in bfa_sfp_scn_aen_post()
3744 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq, in bfa_sfp_scn_aen_post()
3754 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_getdata_send()
3756 bfa_trc(sfp, req->memtype); in bfa_sfp_getdata_send()
3759 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW, in bfa_sfp_getdata_send()
3760 bfa_ioc_portid(sfp->ioc)); in bfa_sfp_getdata_send()
3763 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd); in bfa_sfp_getdata_send()
3772 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_getdata()
3774 WARN_ON(sfp->lock != 0); in bfa_sfp_getdata()
3775 bfa_trc(sfp, sfp->state); in bfa_sfp_getdata()
3777 sfp->lock = 1; in bfa_sfp_getdata()
3778 sfp->memtype = memtype; in bfa_sfp_getdata()
3779 req->memtype = memtype; in bfa_sfp_getdata()
3782 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa); in bfa_sfp_getdata()
3795 switch (rsp->event) { in bfa_sfp_scn()
3797 sfp->state = BFA_SFP_STATE_INSERTED; in bfa_sfp_scn()
3798 sfp->data_valid = 0; in bfa_sfp_scn()
3802 sfp->state = BFA_SFP_STATE_REMOVED; in bfa_sfp_scn()
3803 sfp->data_valid = 0; in bfa_sfp_scn()
3807 sfp->state = BFA_SFP_STATE_FAILED; in bfa_sfp_scn()
3808 sfp->data_valid = 0; in bfa_sfp_scn()
3812 sfp->state = BFA_SFP_STATE_UNSUPPORT; in bfa_sfp_scn()
3814 if (!sfp->lock) in bfa_sfp_scn()
3821 sfp->state = BFA_SFP_STATE_VALID; in bfa_sfp_scn()
3822 if (!sfp->lock) in bfa_sfp_scn()
3826 bfa_trc(sfp, rsp->event); in bfa_sfp_scn()
3839 if (!sfp->lock) { in bfa_sfp_show_comp()
3843 bfa_trc(sfp, sfp->lock); in bfa_sfp_show_comp()
3847 bfa_trc(sfp, rsp->status); in bfa_sfp_show_comp()
3848 if (rsp->status == BFA_STATUS_OK) { in bfa_sfp_show_comp()
3849 sfp->data_valid = 1; in bfa_sfp_show_comp()
3850 if (sfp->state == BFA_SFP_STATE_VALID) in bfa_sfp_show_comp()
3851 sfp->status = BFA_STATUS_OK; in bfa_sfp_show_comp()
3852 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT) in bfa_sfp_show_comp()
3853 sfp->status = BFA_STATUS_SFP_UNSUPP; in bfa_sfp_show_comp()
3855 bfa_trc(sfp, sfp->state); in bfa_sfp_show_comp()
3857 sfp->data_valid = 0; in bfa_sfp_show_comp()
3858 sfp->status = rsp->status; in bfa_sfp_show_comp()
3862 bfa_trc(sfp, sfp->memtype); in bfa_sfp_show_comp()
3863 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) { in bfa_sfp_show_comp()
3864 bfa_trc(sfp, sfp->data_valid); in bfa_sfp_show_comp()
3865 if (sfp->data_valid) { in bfa_sfp_show_comp()
3867 u8 *des = (u8 *)(sfp->sfpmem); in bfa_sfp_show_comp()
3868 memcpy(des, sfp->dbuf_kva, size); in bfa_sfp_show_comp()
3875 sfp->lock = 0; in bfa_sfp_show_comp()
3877 bfa_trc(sfp, sfp->state_query_lock); in bfa_sfp_show_comp()
3878 if (sfp->state_query_lock) { in bfa_sfp_show_comp()
3879 sfp->state = rsp->state; in bfa_sfp_show_comp()
3891 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_state_query()
3894 WARN_ON(sfp->state != BFA_SFP_STATE_INIT); in bfa_sfp_state_query()
3895 WARN_ON(sfp->state_query_lock != 0); in bfa_sfp_state_query()
3896 bfa_trc(sfp, sfp->state); in bfa_sfp_state_query()
3898 sfp->state_query_lock = 1; in bfa_sfp_state_query()
3899 req->memtype = 0; in bfa_sfp_state_query()
3901 if (!sfp->lock) in bfa_sfp_state_query()
3908 enum bfa_defs_sfp_media_e *media = sfp->media; in bfa_sfp_media_get()
3912 if (sfp->state == BFA_SFP_STATE_UNSUPPORT) in bfa_sfp_media_get()
3914 else if (sfp->state == BFA_SFP_STATE_VALID) { in bfa_sfp_media_get()
3916 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; in bfa_sfp_media_get()
3917 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 | in bfa_sfp_media_get()
3918 (sfpmem->srlid_base.xcvr[5] >> 1); in bfa_sfp_media_get()
3920 e10g.b = sfpmem->srlid_base.xcvr[0]; in bfa_sfp_media_get()
3948 bfa_trc(sfp, sfp->state); in bfa_sfp_media_get()
3954 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; in bfa_sfp_speed_valid()
3955 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr; in bfa_sfp_speed_valid()
3956 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3; in bfa_sfp_speed_valid()
3957 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g; in bfa_sfp_speed_valid()
3989 switch (msg->mh.msg_id) { in bfa_sfp_intr()
3999 bfa_trc(sfp, msg->mh.msg_id); in bfa_sfp_intr()
4020 sfp->dev = dev; in bfa_sfp_attach()
4021 sfp->ioc = ioc; in bfa_sfp_attach()
4022 sfp->trcmod = trcmod; in bfa_sfp_attach()
4024 sfp->cbfn = NULL; in bfa_sfp_attach()
4025 sfp->cbarg = NULL; in bfa_sfp_attach()
4026 sfp->sfpmem = NULL; in bfa_sfp_attach()
4027 sfp->lock = 0; in bfa_sfp_attach()
4028 sfp->data_valid = 0; in bfa_sfp_attach()
4029 sfp->state = BFA_SFP_STATE_INIT; in bfa_sfp_attach()
4030 sfp->state_query_lock = 0; in bfa_sfp_attach()
4031 sfp->state_query_cbfn = NULL; in bfa_sfp_attach()
4032 sfp->state_query_cbarg = NULL; in bfa_sfp_attach()
4033 sfp->media = NULL; in bfa_sfp_attach()
4034 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; in bfa_sfp_attach()
4035 sfp->is_elb = BFA_FALSE; in bfa_sfp_attach()
4037 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp); in bfa_sfp_attach()
4038 bfa_q_qe_init(&sfp->ioc_notify); in bfa_sfp_attach()
4039 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp); in bfa_sfp_attach()
4040 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q); in bfa_sfp_attach()
4049 sfp->dbuf_kva = dm_kva; in bfa_sfp_memclaim()
4050 sfp->dbuf_pa = dm_pa; in bfa_sfp_memclaim()
4051 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s)); in bfa_sfp_memclaim()
4060 * @param[in] sfp - bfa sfp module
4062 * @param[out] sfpmem - sfp eeprom data
4070 if (!bfa_ioc_is_operational(sfp->ioc)) { in bfa_sfp_show()
4075 if (sfp->lock) { in bfa_sfp_show()
4080 sfp->cbfn = cbfn; in bfa_sfp_show()
4081 sfp->cbarg = cbarg; in bfa_sfp_show()
4082 sfp->sfpmem = sfpmem; in bfa_sfp_show()
4091 * @param[in] sfp - bfa sfp module
4093 * @param[out] media - port speed from user
4100 if (!bfa_ioc_is_operational(sfp->ioc)) { in bfa_sfp_media()
4105 sfp->media = media; in bfa_sfp_media()
4106 if (sfp->state == BFA_SFP_STATE_INIT) { in bfa_sfp_media()
4107 if (sfp->state_query_lock) { in bfa_sfp_media()
4111 sfp->state_query_cbfn = cbfn; in bfa_sfp_media()
4112 sfp->state_query_cbarg = cbarg; in bfa_sfp_media()
4125 * @param[in] sfp - bfa sfp module
4126 * @param[in] portspeed - port speed from user
4135 if (!bfa_ioc_is_operational(sfp->ioc)) in bfa_sfp_speed()
4139 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type)) in bfa_sfp_speed()
4143 sfp->portspeed = portspeed; in bfa_sfp_speed()
4144 if (sfp->state == BFA_SFP_STATE_INIT) { in bfa_sfp_speed()
4145 if (sfp->state_query_lock) { in bfa_sfp_speed()
4149 sfp->state_query_cbfn = cbfn; in bfa_sfp_speed()
4150 sfp->state_query_cbarg = cbarg; in bfa_sfp_speed()
4156 if (sfp->state == BFA_SFP_STATE_REMOVED || in bfa_sfp_speed()
4157 sfp->state == BFA_SFP_STATE_FAILED) { in bfa_sfp_speed()
4158 bfa_trc(sfp, sfp->state); in bfa_sfp_speed()
4162 if (sfp->state == BFA_SFP_STATE_INSERTED) { in bfa_sfp_speed()
4163 bfa_trc(sfp, sfp->state); in bfa_sfp_speed()
4168 if (sfp->is_elb) in bfa_sfp_speed()
4191 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_flash_aen_audit_post()
4198 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn; in bfa_flash_aen_audit_post()
4199 aen_entry->aen_data.audit.partition_inst = inst; in bfa_flash_aen_audit_post()
4200 aen_entry->aen_data.audit.partition_type = type; in bfa_flash_aen_audit_post()
4203 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, in bfa_flash_aen_audit_post()
4210 flash->op_busy = 0; in bfa_flash_cb()
4211 if (flash->cbfn) in bfa_flash_cb()
4212 flash->cbfn(flash->cbarg, flash->status); in bfa_flash_cb()
4224 if (flash->op_busy) { in bfa_flash_notify()
4225 flash->status = BFA_STATUS_IOC_FAILURE; in bfa_flash_notify()
4226 flash->cbfn(flash->cbarg, flash->status); in bfa_flash_notify()
4227 flash->op_busy = 0; in bfa_flash_notify()
4239 * @param[in] cbarg - callback argument
4246 (struct bfi_flash_query_req_s *) flash->mb.msg; in bfa_flash_query_send()
4248 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, in bfa_flash_query_send()
4249 bfa_ioc_portid(flash->ioc)); in bfa_flash_query_send()
4250 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s), in bfa_flash_query_send()
4251 flash->dbuf_pa); in bfa_flash_query_send()
4252 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_query_send()
4258 * @param[in] cbarg - callback argument
4264 (struct bfi_flash_write_req_s *) flash->mb.msg; in bfa_flash_write_send()
4267 msg->type = be32_to_cpu(flash->type); in bfa_flash_write_send()
4268 msg->instance = flash->instance; in bfa_flash_write_send()
4269 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); in bfa_flash_write_send()
4270 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? in bfa_flash_write_send()
4271 flash->residue : BFA_FLASH_DMA_BUF_SZ; in bfa_flash_write_send()
4272 msg->length = be32_to_cpu(len); in bfa_flash_write_send()
4275 msg->last = (len == flash->residue) ? 1 : 0; in bfa_flash_write_send()
4277 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, in bfa_flash_write_send()
4278 bfa_ioc_portid(flash->ioc)); in bfa_flash_write_send()
4279 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); in bfa_flash_write_send()
4280 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); in bfa_flash_write_send()
4281 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_write_send()
4283 flash->residue -= len; in bfa_flash_write_send()
4284 flash->offset += len; in bfa_flash_write_send()
4290 * @param[in] cbarg - callback argument
4297 (struct bfi_flash_read_req_s *) flash->mb.msg; in bfa_flash_read_send()
4300 msg->type = be32_to_cpu(flash->type); in bfa_flash_read_send()
4301 msg->instance = flash->instance; in bfa_flash_read_send()
4302 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); in bfa_flash_read_send()
4303 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? in bfa_flash_read_send()
4304 flash->residue : BFA_FLASH_DMA_BUF_SZ; in bfa_flash_read_send()
4305 msg->length = be32_to_cpu(len); in bfa_flash_read_send()
4306 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, in bfa_flash_read_send()
4307 bfa_ioc_portid(flash->ioc)); in bfa_flash_read_send()
4308 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); in bfa_flash_read_send()
4309 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_read_send()
4315 * @param[in] cbarg - callback argument
4322 (struct bfi_flash_erase_req_s *) flash->mb.msg; in bfa_flash_erase_send()
4324 msg->type = be32_to_cpu(flash->type); in bfa_flash_erase_send()
4325 msg->instance = flash->instance; in bfa_flash_erase_send()
4326 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ, in bfa_flash_erase_send()
4327 bfa_ioc_portid(flash->ioc)); in bfa_flash_erase_send()
4328 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_erase_send()
4334 * @param[in] flasharg - flash structure
4335 * @param[in] msg - message structure
4350 } m; in bfa_flash_intr() local
4352 m.msg = msg; in bfa_flash_intr()
4353 bfa_trc(flash, msg->mh.msg_id); in bfa_flash_intr()
4355 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) { in bfa_flash_intr()
4361 switch (msg->mh.msg_id) { in bfa_flash_intr()
4363 status = be32_to_cpu(m.query->status); in bfa_flash_intr()
4369 attr = (struct bfa_flash_attr_s *) flash->ubuf; in bfa_flash_intr()
4370 f = (struct bfa_flash_attr_s *) flash->dbuf_kva; in bfa_flash_intr()
4371 attr->status = be32_to_cpu(f->status); in bfa_flash_intr()
4372 attr->npart = be32_to_cpu(f->npart); in bfa_flash_intr()
4373 bfa_trc(flash, attr->status); in bfa_flash_intr()
4374 bfa_trc(flash, attr->npart); in bfa_flash_intr()
4375 for (i = 0; i < attr->npart; i++) { in bfa_flash_intr()
4376 attr->part[i].part_type = in bfa_flash_intr()
4377 be32_to_cpu(f->part[i].part_type); in bfa_flash_intr()
4378 attr->part[i].part_instance = in bfa_flash_intr()
4379 be32_to_cpu(f->part[i].part_instance); in bfa_flash_intr()
4380 attr->part[i].part_off = in bfa_flash_intr()
4381 be32_to_cpu(f->part[i].part_off); in bfa_flash_intr()
4382 attr->part[i].part_size = in bfa_flash_intr()
4383 be32_to_cpu(f->part[i].part_size); in bfa_flash_intr()
4384 attr->part[i].part_len = in bfa_flash_intr()
4385 be32_to_cpu(f->part[i].part_len); in bfa_flash_intr()
4386 attr->part[i].part_status = in bfa_flash_intr()
4387 be32_to_cpu(f->part[i].part_status); in bfa_flash_intr()
4390 flash->status = status; in bfa_flash_intr()
4394 status = be32_to_cpu(m.erase->status); in bfa_flash_intr()
4396 flash->status = status; in bfa_flash_intr()
4400 status = be32_to_cpu(m.write->status); in bfa_flash_intr()
4402 if (status != BFA_STATUS_OK || flash->residue == 0) { in bfa_flash_intr()
4403 flash->status = status; in bfa_flash_intr()
4406 bfa_trc(flash, flash->offset); in bfa_flash_intr()
4411 status = be32_to_cpu(m.read->status); in bfa_flash_intr()
4414 flash->status = status; in bfa_flash_intr()
4417 u32 len = be32_to_cpu(m.read->length); in bfa_flash_intr()
4418 bfa_trc(flash, flash->offset); in bfa_flash_intr()
4420 memcpy(flash->ubuf + flash->offset, in bfa_flash_intr()
4421 flash->dbuf_kva, len); in bfa_flash_intr()
4422 flash->residue -= len; in bfa_flash_intr()
4423 flash->offset += len; in bfa_flash_intr()
4424 if (flash->residue == 0) { in bfa_flash_intr()
4425 flash->status = status; in bfa_flash_intr()
4434 status = be32_to_cpu(m.event->status); in bfa_flash_intr()
4437 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR); in bfa_flash_intr()
4440 param = be32_to_cpu(m.event->param); in bfa_flash_intr()
4442 bfa_ioc_aen_post(flash->ioc, in bfa_flash_intr()
4455 * @param[in] mincfg - minimal cfg variable
4469 * @param[in] flash - flash structure
4470 * @param[in] ioc - ioc structure
4471 * @param[in] dev - device structure
4472 * @param[in] trcmod - trace module
4473 * @param[in] logmod - log module
4479 flash->ioc = ioc; in bfa_flash_attach()
4480 flash->trcmod = trcmod; in bfa_flash_attach()
4481 flash->cbfn = NULL; in bfa_flash_attach()
4482 flash->cbarg = NULL; in bfa_flash_attach()
4483 flash->op_busy = 0; in bfa_flash_attach()
4485 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); in bfa_flash_attach()
4486 bfa_q_qe_init(&flash->ioc_notify); in bfa_flash_attach()
4487 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); in bfa_flash_attach()
4488 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); in bfa_flash_attach()
4492 flash->dbuf_kva = NULL; in bfa_flash_attach()
4493 flash->dbuf_pa = 0; in bfa_flash_attach()
4500 * @param[in] flash - flash structure
4501 * @param[in] dm_kva - pointer to virtual memory address
4502 * @param[in] dm_pa - physical memory address
4503 * @param[in] mincfg - minimal cfg variable
4512 flash->dbuf_kva = dm_kva; in bfa_flash_memclaim()
4513 flash->dbuf_pa = dm_pa; in bfa_flash_memclaim()
4514 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); in bfa_flash_memclaim()
4522 * @param[in] flash - flash structure
4523 * @param[in] attr - flash attribute structure
4524 * @param[in] cbfn - callback function
4525 * @param[in] cbarg - callback argument
4535 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_get_attr()
4538 if (flash->op_busy) { in bfa_flash_get_attr()
4539 bfa_trc(flash, flash->op_busy); in bfa_flash_get_attr()
4543 flash->op_busy = 1; in bfa_flash_get_attr()
4544 flash->cbfn = cbfn; in bfa_flash_get_attr()
4545 flash->cbarg = cbarg; in bfa_flash_get_attr()
4546 flash->ubuf = (u8 *) attr; in bfa_flash_get_attr()
4555 * @param[in] flash - flash structure
4556 * @param[in] type - flash partition type
4557 * @param[in] instance - flash partition instance
4558 * @param[in] cbfn - callback function
4559 * @param[in] cbarg - callback argument
4571 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_erase_part()
4574 if (flash->op_busy) { in bfa_flash_erase_part()
4575 bfa_trc(flash, flash->op_busy); in bfa_flash_erase_part()
4579 flash->op_busy = 1; in bfa_flash_erase_part()
4580 flash->cbfn = cbfn; in bfa_flash_erase_part()
4581 flash->cbarg = cbarg; in bfa_flash_erase_part()
4582 flash->type = type; in bfa_flash_erase_part()
4583 flash->instance = instance; in bfa_flash_erase_part()
4586 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE, in bfa_flash_erase_part()
4594 * @param[in] flash - flash structure
4595 * @param[in] type - flash partition type
4596 * @param[in] instance - flash partition instance
4597 * @param[in] buf - update data buffer
4598 * @param[in] len - data buffer length
4599 * @param[in] offset - offset relative to the partition starting address
4600 * @param[in] cbfn - callback function
4601 * @param[in] cbarg - callback argument
4616 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_update_part()
4620 * 'len' must be in word (4-byte) boundary in bfa_flash_update_part()
4629 if (flash->op_busy) { in bfa_flash_update_part()
4630 bfa_trc(flash, flash->op_busy); in bfa_flash_update_part()
4634 flash->op_busy = 1; in bfa_flash_update_part()
4635 flash->cbfn = cbfn; in bfa_flash_update_part()
4636 flash->cbarg = cbarg; in bfa_flash_update_part()
4637 flash->type = type; in bfa_flash_update_part()
4638 flash->instance = instance; in bfa_flash_update_part()
4639 flash->residue = len; in bfa_flash_update_part()
4640 flash->offset = 0; in bfa_flash_update_part()
4641 flash->addr_off = offset; in bfa_flash_update_part()
4642 flash->ubuf = buf; in bfa_flash_update_part()
4651 * @param[in] flash - flash structure
4652 * @param[in] type - flash partition type
4653 * @param[in] instance - flash partition instance
4654 * @param[in] buf - read data buffer
4655 * @param[in] len - data buffer length
4656 * @param[in] offset - offset relative to the partition starting address
4657 * @param[in] cbfn - callback function
4658 * @param[in] cbarg - callback argument
4673 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_read_part()
4677 * 'len' must be in word (4-byte) boundary in bfa_flash_read_part()
4683 if (flash->op_busy) { in bfa_flash_read_part()
4684 bfa_trc(flash, flash->op_busy); in bfa_flash_read_part()
4688 flash->op_busy = 1; in bfa_flash_read_part()
4689 flash->cbfn = cbfn; in bfa_flash_read_part()
4690 flash->cbarg = cbarg; in bfa_flash_read_part()
4691 flash->type = type; in bfa_flash_read_part()
4692 flash->instance = instance; in bfa_flash_read_part()
4693 flash->residue = len; in bfa_flash_read_part()
4694 flash->offset = 0; in bfa_flash_read_part()
4695 flash->addr_off = offset; in bfa_flash_read_part()
4696 flash->ubuf = buf; in bfa_flash_read_part()
4716 bfa_trc(diag, diag->block); in bfa_diag_notify()
4717 bfa_trc(diag, diag->fwping.lock); in bfa_diag_notify()
4718 bfa_trc(diag, diag->tsensor.lock); in bfa_diag_notify()
4723 if (diag->fwping.lock) { in bfa_diag_notify()
4724 diag->fwping.status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4725 diag->fwping.cbfn(diag->fwping.cbarg, in bfa_diag_notify()
4726 diag->fwping.status); in bfa_diag_notify()
4727 diag->fwping.lock = 0; in bfa_diag_notify()
4730 if (diag->tsensor.lock) { in bfa_diag_notify()
4731 diag->tsensor.status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4732 diag->tsensor.cbfn(diag->tsensor.cbarg, in bfa_diag_notify()
4733 diag->tsensor.status); in bfa_diag_notify()
4734 diag->tsensor.lock = 0; in bfa_diag_notify()
4737 if (diag->block) { in bfa_diag_notify()
4738 if (diag->timer_active) { in bfa_diag_notify()
4739 bfa_timer_stop(&diag->timer); in bfa_diag_notify()
4740 diag->timer_active = 0; in bfa_diag_notify()
4743 diag->status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4744 diag->cbfn(diag->cbarg, diag->status); in bfa_diag_notify()
4745 diag->block = 0; in bfa_diag_notify()
4758 struct bfa_ioc_s *ioc = diag->ioc; in bfa_diag_memtest_done()
4759 struct bfa_diag_memtest_result *res = diag->result; in bfa_diag_memtest_done()
4763 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_diag_memtest_done()
4764 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_diag_memtest_done()
4770 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_diag_memtest_done()
4777 res->status = swab32(res->status); in bfa_diag_memtest_done()
4778 bfa_trc(diag, res->status); in bfa_diag_memtest_done()
4780 if (res->status == BFI_BOOT_MEMTEST_RES_SIG) in bfa_diag_memtest_done()
4781 diag->status = BFA_STATUS_OK; in bfa_diag_memtest_done()
4783 diag->status = BFA_STATUS_MEMTEST_FAILED; in bfa_diag_memtest_done()
4784 res->addr = swab32(res->addr); in bfa_diag_memtest_done()
4785 res->exp = swab32(res->exp); in bfa_diag_memtest_done()
4786 res->act = swab32(res->act); in bfa_diag_memtest_done()
4787 res->err_status = swab32(res->err_status); in bfa_diag_memtest_done()
4788 res->err_status1 = swab32(res->err_status1); in bfa_diag_memtest_done()
4789 res->err_addr = swab32(res->err_addr); in bfa_diag_memtest_done()
4790 bfa_trc(diag, res->addr); in bfa_diag_memtest_done()
4791 bfa_trc(diag, res->exp); in bfa_diag_memtest_done()
4792 bfa_trc(diag, res->act); in bfa_diag_memtest_done()
4793 bfa_trc(diag, res->err_status); in bfa_diag_memtest_done()
4794 bfa_trc(diag, res->err_status1); in bfa_diag_memtest_done()
4795 bfa_trc(diag, res->err_addr); in bfa_diag_memtest_done()
4797 diag->timer_active = 0; in bfa_diag_memtest_done()
4798 diag->cbfn(diag->cbarg, diag->status); in bfa_diag_memtest_done()
4799 diag->block = 0; in bfa_diag_memtest_done()
4815 bfa_trc(diag, diag->fwping.dbuf_pa); in diag_fwping_send()
4819 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data; in diag_fwping_send()
4822 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg; in diag_fwping_send()
4825 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ, in diag_fwping_send()
4826 diag->fwping.dbuf_pa); in diag_fwping_send()
4828 fwping_req->count = cpu_to_be32(diag->fwping.count); in diag_fwping_send()
4830 fwping_req->data = diag->fwping.data; in diag_fwping_send()
4833 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING, in diag_fwping_send()
4834 bfa_ioc_portid(diag->ioc)); in diag_fwping_send()
4837 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd); in diag_fwping_send()
4844 u32 rsp_data = diag_rsp->data; in diag_fwping_comp()
4845 u8 rsp_dma_status = diag_rsp->dma_status; in diag_fwping_comp()
4852 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) : in diag_fwping_comp()
4853 diag->fwping.data; in diag_fwping_comp()
4855 if (diag->fwping.data != rsp_data) { in diag_fwping_comp()
4857 diag->fwping.result->dmastatus = in diag_fwping_comp()
4859 diag->fwping.status = BFA_STATUS_DATACORRUPTED; in diag_fwping_comp()
4860 diag->fwping.cbfn(diag->fwping.cbarg, in diag_fwping_comp()
4861 diag->fwping.status); in diag_fwping_comp()
4862 diag->fwping.lock = 0; in diag_fwping_comp()
4867 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) { in diag_fwping_comp()
4871 *((u32 *)diag->fwping.dbuf_kva + i)); in diag_fwping_comp()
4872 diag->fwping.result->dmastatus = in diag_fwping_comp()
4874 diag->fwping.status = BFA_STATUS_DATACORRUPTED; in diag_fwping_comp()
4875 diag->fwping.cbfn(diag->fwping.cbarg, in diag_fwping_comp()
4876 diag->fwping.status); in diag_fwping_comp()
4877 diag->fwping.lock = 0; in diag_fwping_comp()
4881 diag->fwping.result->dmastatus = BFA_STATUS_OK; in diag_fwping_comp()
4882 diag->fwping.status = BFA_STATUS_OK; in diag_fwping_comp()
4883 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); in diag_fwping_comp()
4884 diag->fwping.lock = 0; in diag_fwping_comp()
4886 diag->fwping.status = BFA_STATUS_HDMA_FAILED; in diag_fwping_comp()
4887 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); in diag_fwping_comp()
4888 diag->fwping.lock = 0; in diag_fwping_comp()
4901 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg; in diag_tempsensor_send()
4902 bfa_trc(diag, msg->temp); in diag_tempsensor_send()
4904 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR, in diag_tempsensor_send()
4905 bfa_ioc_portid(diag->ioc)); in diag_tempsensor_send()
4907 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd); in diag_tempsensor_send()
4913 if (!diag->tsensor.lock) { in diag_tempsensor_comp()
4915 bfa_trc(diag, diag->tsensor.lock); in diag_tempsensor_comp()
4923 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); in diag_tempsensor_comp()
4924 diag->tsensor.temp->ts_junc = rsp->ts_junc; in diag_tempsensor_comp()
4925 diag->tsensor.temp->ts_brd = rsp->ts_brd; in diag_tempsensor_comp()
4927 if (rsp->ts_brd) { in diag_tempsensor_comp()
4928 /* tsensor.temp->status is brd_temp status */ in diag_tempsensor_comp()
4929 diag->tsensor.temp->status = rsp->status; in diag_tempsensor_comp()
4930 if (rsp->status == BFA_STATUS_OK) { in diag_tempsensor_comp()
4931 diag->tsensor.temp->brd_temp = in diag_tempsensor_comp()
4932 be16_to_cpu(rsp->brd_temp); in diag_tempsensor_comp()
4934 diag->tsensor.temp->brd_temp = 0; in diag_tempsensor_comp()
4937 bfa_trc(diag, rsp->status); in diag_tempsensor_comp()
4938 bfa_trc(diag, rsp->ts_junc); in diag_tempsensor_comp()
4939 bfa_trc(diag, rsp->temp); in diag_tempsensor_comp()
4940 bfa_trc(diag, rsp->ts_brd); in diag_tempsensor_comp()
4941 bfa_trc(diag, rsp->brd_temp); in diag_tempsensor_comp()
4944 diag->tsensor.status = BFA_STATUS_OK; in diag_tempsensor_comp()
4945 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); in diag_tempsensor_comp()
4946 diag->tsensor.lock = 0; in diag_tempsensor_comp()
4957 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg; in diag_ledtest_send()
4959 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST, in diag_ledtest_send()
4960 bfa_ioc_portid(diag->ioc)); in diag_ledtest_send()
4966 if (ledtest->freq) in diag_ledtest_send()
4967 ledtest->freq = 500 / ledtest->freq; in diag_ledtest_send()
4969 if (ledtest->freq == 0) in diag_ledtest_send()
4970 ledtest->freq = 1; in diag_ledtest_send()
4972 bfa_trc(diag, ledtest->freq); in diag_ledtest_send()
4973 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */ in diag_ledtest_send()
4974 msg->cmd = (u8) ledtest->cmd; in diag_ledtest_send()
4975 msg->color = (u8) ledtest->color; in diag_ledtest_send()
4976 msg->portid = bfa_ioc_portid(diag->ioc); in diag_ledtest_send()
4977 msg->led = ledtest->led; in diag_ledtest_send()
4978 msg->freq = cpu_to_be16(ledtest->freq); in diag_ledtest_send()
4981 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd); in diag_ledtest_send()
4987 bfa_trc(diag, diag->ledtest.lock); in diag_ledtest_comp()
4988 diag->ledtest.lock = BFA_FALSE; in diag_ledtest_comp()
5000 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg; in diag_portbeacon_send()
5002 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON, in diag_portbeacon_send()
5003 bfa_ioc_portid(diag->ioc)); in diag_portbeacon_send()
5004 msg->beacon = beacon; in diag_portbeacon_send()
5005 msg->period = cpu_to_be32(sec); in diag_portbeacon_send()
5007 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd); in diag_portbeacon_send()
5013 bfa_trc(diag, diag->beacon.state); in diag_portbeacon_comp()
5014 diag->beacon.state = BFA_FALSE; in diag_portbeacon_comp()
5015 if (diag->cbfn_beacon) in diag_portbeacon_comp()
5016 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e); in diag_portbeacon_comp()
5027 switch (msg->mh.msg_id) { in bfa_diag_intr()
5041 bfa_trc(diag, msg->mh.msg_id); in bfa_diag_intr()
5049 * @param[in] *diag - diag data struct
5050 * @param[in] *memtest - mem test params input from upper layer,
5051 * @param[in] pattern - mem test pattern
5052 * @param[in] *result - mem test result
5053 * @param[in] cbfn - mem test callback functioin
5054 * @param[in] cbarg - callback functioin arg
5067 if (!bfa_ioc_adapter_is_disabled(diag->ioc)) in bfa_diag_memtest()
5071 if (diag->block) { in bfa_diag_memtest()
5072 bfa_trc(diag, diag->block); in bfa_diag_memtest()
5075 diag->block = 1; in bfa_diag_memtest()
5077 diag->result = result; in bfa_diag_memtest()
5078 diag->cbfn = cbfn; in bfa_diag_memtest()
5079 diag->cbarg = cbarg; in bfa_diag_memtest()
5082 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); in bfa_diag_memtest()
5084 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ? in bfa_diag_memtest()
5086 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, in bfa_diag_memtest()
5088 diag->timer_active = 1; in bfa_diag_memtest()
5095 * @param[in] *diag - diag data struct
5096 * @param[in] cnt - dma loop count for testing PCIE
5097 * @param[in] data - data pattern to pass in fw
5098 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
5099 * @param[in] cbfn - callback function
5100 * @param[in] *cbarg - callback functioin arg
5112 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_fwping()
5115 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) && in bfa_diag_fwping()
5116 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH)) in bfa_diag_fwping()
5120 if (diag->block || diag->fwping.lock) { in bfa_diag_fwping()
5121 bfa_trc(diag, diag->block); in bfa_diag_fwping()
5122 bfa_trc(diag, diag->fwping.lock); in bfa_diag_fwping()
5127 diag->fwping.lock = 1; in bfa_diag_fwping()
5128 diag->fwping.cbfn = cbfn; in bfa_diag_fwping()
5129 diag->fwping.cbarg = cbarg; in bfa_diag_fwping()
5130 diag->fwping.result = result; in bfa_diag_fwping()
5131 diag->fwping.data = data; in bfa_diag_fwping()
5132 diag->fwping.count = cnt; in bfa_diag_fwping()
5135 diag->fwping.result->data = 0; in bfa_diag_fwping()
5136 diag->fwping.result->status = BFA_STATUS_OK; in bfa_diag_fwping()
5146 * @param[in] *diag - diag data struct
5147 * @param[in] *result - pt to bfa_diag_temp_t data struct
5148 * @param[in] cbfn - callback function
5149 * @param[in] *cbarg - callback functioin arg
5159 if (diag->block || diag->tsensor.lock) { in bfa_diag_tsensor_query()
5160 bfa_trc(diag, diag->block); in bfa_diag_tsensor_query()
5161 bfa_trc(diag, diag->tsensor.lock); in bfa_diag_tsensor_query()
5165 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_tsensor_query()
5169 diag->tsensor.lock = 1; in bfa_diag_tsensor_query()
5170 diag->tsensor.temp = result; in bfa_diag_tsensor_query()
5171 diag->tsensor.cbfn = cbfn; in bfa_diag_tsensor_query()
5172 diag->tsensor.cbarg = cbarg; in bfa_diag_tsensor_query()
5173 diag->tsensor.status = BFA_STATUS_OK; in bfa_diag_tsensor_query()
5184 * @param[in] *diag - diag data struct
5185 * @param[in] *ledtest - pt to ledtest data structure
5192 bfa_trc(diag, ledtest->cmd); in bfa_diag_ledtest()
5194 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_ledtest()
5197 if (diag->beacon.state) in bfa_diag_ledtest()
5200 if (diag->ledtest.lock) in bfa_diag_ledtest()
5204 diag->ledtest.lock = BFA_TRUE; in bfa_diag_ledtest()
5213 * @param[in] *diag - diag data struct
5214 * @param[in] beacon - port beaconing 1:ON 0:OFF
5215 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5216 * @param[in] sec - beaconing duration in seconds
5228 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_beacon_port()
5231 if (diag->ledtest.lock) in bfa_diag_beacon_port()
5234 if (diag->beacon.state && beacon) /* beacon alread on */ in bfa_diag_beacon_port()
5237 diag->beacon.state = beacon; in bfa_diag_beacon_port()
5238 diag->beacon.link_e2e = link_e2e_beacon; in bfa_diag_beacon_port()
5239 if (diag->cbfn_beacon) in bfa_diag_beacon_port()
5240 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon); in bfa_diag_beacon_port()
5264 diag->dev = dev; in bfa_diag_attach()
5265 diag->ioc = ioc; in bfa_diag_attach()
5266 diag->trcmod = trcmod; in bfa_diag_attach()
5268 diag->block = 0; in bfa_diag_attach()
5269 diag->cbfn = NULL; in bfa_diag_attach()
5270 diag->cbarg = NULL; in bfa_diag_attach()
5271 diag->result = NULL; in bfa_diag_attach()
5272 diag->cbfn_beacon = cbfn_beacon; in bfa_diag_attach()
5274 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag); in bfa_diag_attach()
5275 bfa_q_qe_init(&diag->ioc_notify); in bfa_diag_attach()
5276 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag); in bfa_diag_attach()
5277 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q); in bfa_diag_attach()
5283 diag->fwping.dbuf_kva = dm_kva; in bfa_diag_memclaim()
5284 diag->fwping.dbuf_pa = dm_pa; in bfa_diag_memclaim()
5285 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ); in bfa_diag_memclaim()
5289 * PHY module specific
5292 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5297 int i, m = sz >> 2; in bfa_phy_ntoh32() local
5299 for (i = 0; i < m; i++) in bfa_phy_ntoh32()
5304 bfa_phy_present(struct bfa_phy_s *phy) in bfa_phy_present() argument
5306 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING); in bfa_phy_present()
5312 struct bfa_phy_s *phy = cbarg; in bfa_phy_notify() local
5314 bfa_trc(phy, event); in bfa_phy_notify()
5319 if (phy->op_busy) { in bfa_phy_notify()
5320 phy->status = BFA_STATUS_IOC_FAILURE; in bfa_phy_notify()
5321 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_notify()
5322 phy->op_busy = 0; in bfa_phy_notify()
5332 * Send phy attribute query request.
5334 * @param[in] cbarg - callback argument
5339 struct bfa_phy_s *phy = cbarg; in bfa_phy_query_send() local
5341 (struct bfi_phy_query_req_s *) phy->mb.msg; in bfa_phy_query_send()
5343 msg->instance = phy->instance; in bfa_phy_query_send()
5344 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ, in bfa_phy_query_send()
5345 bfa_ioc_portid(phy->ioc)); in bfa_phy_query_send()
5346 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa); in bfa_phy_query_send()
5347 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_query_send()
5351 * Send phy write request.
5353 * @param[in] cbarg - callback argument
5358 struct bfa_phy_s *phy = cbarg; in bfa_phy_write_send() local
5360 (struct bfi_phy_write_req_s *) phy->mb.msg; in bfa_phy_write_send()
5365 msg->instance = phy->instance; in bfa_phy_write_send()
5366 msg->offset = cpu_to_be32(phy->addr_off + phy->offset); in bfa_phy_write_send()
5367 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? in bfa_phy_write_send()
5368 phy->residue : BFA_PHY_DMA_BUF_SZ; in bfa_phy_write_send()
5369 msg->length = cpu_to_be32(len); in bfa_phy_write_send()
5372 msg->last = (len == phy->residue) ? 1 : 0; in bfa_phy_write_send()
5374 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ, in bfa_phy_write_send()
5375 bfa_ioc_portid(phy->ioc)); in bfa_phy_write_send()
5376 bfa_alen_set(&msg->alen, len, phy->dbuf_pa); in bfa_phy_write_send()
5378 buf = (u16 *) (phy->ubuf + phy->offset); in bfa_phy_write_send()
5379 dbuf = (u16 *)phy->dbuf_kva; in bfa_phy_write_send()
5384 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_write_send()
5386 phy->residue -= len; in bfa_phy_write_send()
5387 phy->offset += len; in bfa_phy_write_send()
5391 * Send phy read request.
5393 * @param[in] cbarg - callback argument
5398 struct bfa_phy_s *phy = cbarg; in bfa_phy_read_send() local
5400 (struct bfi_phy_read_req_s *) phy->mb.msg; in bfa_phy_read_send()
5403 msg->instance = phy->instance; in bfa_phy_read_send()
5404 msg->offset = cpu_to_be32(phy->addr_off + phy->offset); in bfa_phy_read_send()
5405 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? in bfa_phy_read_send()
5406 phy->residue : BFA_PHY_DMA_BUF_SZ; in bfa_phy_read_send()
5407 msg->length = cpu_to_be32(len); in bfa_phy_read_send()
5408 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ, in bfa_phy_read_send()
5409 bfa_ioc_portid(phy->ioc)); in bfa_phy_read_send()
5410 bfa_alen_set(&msg->alen, len, phy->dbuf_pa); in bfa_phy_read_send()
5411 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_read_send()
5415 * Send phy stats request.
5417 * @param[in] cbarg - callback argument
5422 struct bfa_phy_s *phy = cbarg; in bfa_phy_stats_send() local
5424 (struct bfi_phy_stats_req_s *) phy->mb.msg; in bfa_phy_stats_send()
5426 msg->instance = phy->instance; in bfa_phy_stats_send()
5427 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ, in bfa_phy_stats_send()
5428 bfa_ioc_portid(phy->ioc)); in bfa_phy_stats_send()
5429 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa); in bfa_phy_stats_send()
5430 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_stats_send()
5436 * @param[in] mincfg - minimal cfg variable
5441 /* min driver doesn't need phy */ in bfa_phy_meminfo()
5451 * @param[in] phy - phy structure
5452 * @param[in] ioc - ioc structure
5453 * @param[in] dev - device structure
5454 * @param[in] trcmod - trace module
5455 * @param[in] logmod - log module
5458 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev, in bfa_phy_attach() argument
5461 phy->ioc = ioc; in bfa_phy_attach()
5462 phy->trcmod = trcmod; in bfa_phy_attach()
5463 phy->cbfn = NULL; in bfa_phy_attach()
5464 phy->cbarg = NULL; in bfa_phy_attach()
5465 phy->op_busy = 0; in bfa_phy_attach()
5467 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy); in bfa_phy_attach()
5468 bfa_q_qe_init(&phy->ioc_notify); in bfa_phy_attach()
5469 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy); in bfa_phy_attach()
5470 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q); in bfa_phy_attach()
5472 /* min driver doesn't need phy */ in bfa_phy_attach()
5474 phy->dbuf_kva = NULL; in bfa_phy_attach()
5475 phy->dbuf_pa = 0; in bfa_phy_attach()
5480 * Claim memory for phy
5482 * @param[in] phy - phy structure
5483 * @param[in] dm_kva - pointer to virtual memory address
5484 * @param[in] dm_pa - physical memory address
5485 * @param[in] mincfg - minimal cfg variable
5488 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa, in bfa_phy_memclaim() argument
5494 phy->dbuf_kva = dm_kva; in bfa_phy_memclaim()
5495 phy->dbuf_pa = dm_pa; in bfa_phy_memclaim()
5496 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ); in bfa_phy_memclaim()
5511 * Get phy attribute.
5513 * @param[in] phy - phy structure
5514 * @param[in] attr - phy attribute structure
5515 * @param[in] cbfn - callback function
5516 * @param[in] cbarg - callback argument
5521 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, in bfa_phy_get_attr() argument
5524 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ); in bfa_phy_get_attr()
5525 bfa_trc(phy, instance); in bfa_phy_get_attr()
5527 if (!bfa_phy_present(phy)) in bfa_phy_get_attr()
5530 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_get_attr()
5533 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_get_attr()
5534 bfa_trc(phy, phy->op_busy); in bfa_phy_get_attr()
5538 phy->op_busy = 1; in bfa_phy_get_attr()
5539 phy->cbfn = cbfn; in bfa_phy_get_attr()
5540 phy->cbarg = cbarg; in bfa_phy_get_attr()
5541 phy->instance = instance; in bfa_phy_get_attr()
5542 phy->ubuf = (uint8_t *) attr; in bfa_phy_get_attr()
5543 bfa_phy_query_send(phy); in bfa_phy_get_attr()
5549 * Get phy stats.
5551 * @param[in] phy - phy structure
5552 * @param[in] instance - phy image instance
5553 * @param[in] stats - pointer to phy stats
5554 * @param[in] cbfn - callback function
5555 * @param[in] cbarg - callback argument
5560 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, in bfa_phy_get_stats() argument
5564 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ); in bfa_phy_get_stats()
5565 bfa_trc(phy, instance); in bfa_phy_get_stats()
5567 if (!bfa_phy_present(phy)) in bfa_phy_get_stats()
5570 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_get_stats()
5573 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_get_stats()
5574 bfa_trc(phy, phy->op_busy); in bfa_phy_get_stats()
5578 phy->op_busy = 1; in bfa_phy_get_stats()
5579 phy->cbfn = cbfn; in bfa_phy_get_stats()
5580 phy->cbarg = cbarg; in bfa_phy_get_stats()
5581 phy->instance = instance; in bfa_phy_get_stats()
5582 phy->ubuf = (u8 *) stats; in bfa_phy_get_stats()
5583 bfa_phy_stats_send(phy); in bfa_phy_get_stats()
5589 * Update phy image.
5591 * @param[in] phy - phy structure
5592 * @param[in] instance - phy image instance
5593 * @param[in] buf - update data buffer
5594 * @param[in] len - data buffer length
5595 * @param[in] offset - offset relative to starting address
5596 * @param[in] cbfn - callback function
5597 * @param[in] cbarg - callback argument
5602 bfa_phy_update(struct bfa_phy_s *phy, u8 instance, in bfa_phy_update() argument
5606 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ); in bfa_phy_update()
5607 bfa_trc(phy, instance); in bfa_phy_update()
5608 bfa_trc(phy, len); in bfa_phy_update()
5609 bfa_trc(phy, offset); in bfa_phy_update()
5611 if (!bfa_phy_present(phy)) in bfa_phy_update()
5614 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_update()
5617 /* 'len' must be in word (4-byte) boundary */ in bfa_phy_update()
5621 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_update()
5622 bfa_trc(phy, phy->op_busy); in bfa_phy_update()
5626 phy->op_busy = 1; in bfa_phy_update()
5627 phy->cbfn = cbfn; in bfa_phy_update()
5628 phy->cbarg = cbarg; in bfa_phy_update()
5629 phy->instance = instance; in bfa_phy_update()
5630 phy->residue = len; in bfa_phy_update()
5631 phy->offset = 0; in bfa_phy_update()
5632 phy->addr_off = offset; in bfa_phy_update()
5633 phy->ubuf = buf; in bfa_phy_update()
5635 bfa_phy_write_send(phy); in bfa_phy_update()
5640 * Read phy image.
5642 * @param[in] phy - phy structure
5643 * @param[in] instance - phy image instance
5644 * @param[in] buf - read data buffer
5645 * @param[in] len - data buffer length
5646 * @param[in] offset - offset relative to starting address
5647 * @param[in] cbfn - callback function
5648 * @param[in] cbarg - callback argument
5653 bfa_phy_read(struct bfa_phy_s *phy, u8 instance, in bfa_phy_read() argument
5657 bfa_trc(phy, BFI_PHY_H2I_READ_REQ); in bfa_phy_read()
5658 bfa_trc(phy, instance); in bfa_phy_read()
5659 bfa_trc(phy, len); in bfa_phy_read()
5660 bfa_trc(phy, offset); in bfa_phy_read()
5662 if (!bfa_phy_present(phy)) in bfa_phy_read()
5665 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_read()
5668 /* 'len' must be in word (4-byte) boundary */ in bfa_phy_read()
5672 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_read()
5673 bfa_trc(phy, phy->op_busy); in bfa_phy_read()
5677 phy->op_busy = 1; in bfa_phy_read()
5678 phy->cbfn = cbfn; in bfa_phy_read()
5679 phy->cbarg = cbarg; in bfa_phy_read()
5680 phy->instance = instance; in bfa_phy_read()
5681 phy->residue = len; in bfa_phy_read()
5682 phy->offset = 0; in bfa_phy_read()
5683 phy->addr_off = offset; in bfa_phy_read()
5684 phy->ubuf = buf; in bfa_phy_read()
5685 bfa_phy_read_send(phy); in bfa_phy_read()
5691 * Process phy response messages upon receiving interrupts.
5693 * @param[in] phyarg - phy structure
5694 * @param[in] msg - message structure
5699 struct bfa_phy_s *phy = phyarg; in bfa_phy_intr() local
5708 } m; in bfa_phy_intr() local
5710 m.msg = msg; in bfa_phy_intr()
5711 bfa_trc(phy, msg->mh.msg_id); in bfa_phy_intr()
5713 if (!phy->op_busy) { in bfa_phy_intr()
5715 bfa_trc(phy, 0x9999); in bfa_phy_intr()
5719 switch (msg->mh.msg_id) { in bfa_phy_intr()
5721 status = be32_to_cpu(m.query->status); in bfa_phy_intr()
5722 bfa_trc(phy, status); in bfa_phy_intr()
5726 (struct bfa_phy_attr_s *) phy->ubuf; in bfa_phy_intr()
5727 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva, in bfa_phy_intr()
5729 bfa_trc(phy, attr->status); in bfa_phy_intr()
5730 bfa_trc(phy, attr->length); in bfa_phy_intr()
5733 phy->status = status; in bfa_phy_intr()
5734 phy->op_busy = 0; in bfa_phy_intr()
5735 if (phy->cbfn) in bfa_phy_intr()
5736 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5739 status = be32_to_cpu(m.stats->status); in bfa_phy_intr()
5740 bfa_trc(phy, status); in bfa_phy_intr()
5744 (struct bfa_phy_stats_s *) phy->ubuf; in bfa_phy_intr()
5745 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, in bfa_phy_intr()
5747 bfa_trc(phy, stats->status); in bfa_phy_intr()
5750 phy->status = status; in bfa_phy_intr()
5751 phy->op_busy = 0; in bfa_phy_intr()
5752 if (phy->cbfn) in bfa_phy_intr()
5753 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5756 status = be32_to_cpu(m.write->status); in bfa_phy_intr()
5757 bfa_trc(phy, status); in bfa_phy_intr()
5759 if (status != BFA_STATUS_OK || phy->residue == 0) { in bfa_phy_intr()
5760 phy->status = status; in bfa_phy_intr()
5761 phy->op_busy = 0; in bfa_phy_intr()
5762 if (phy->cbfn) in bfa_phy_intr()
5763 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5765 bfa_trc(phy, phy->offset); in bfa_phy_intr()
5766 bfa_phy_write_send(phy); in bfa_phy_intr()
5770 status = be32_to_cpu(m.read->status); in bfa_phy_intr()
5771 bfa_trc(phy, status); in bfa_phy_intr()
5774 phy->status = status; in bfa_phy_intr()
5775 phy->op_busy = 0; in bfa_phy_intr()
5776 if (phy->cbfn) in bfa_phy_intr()
5777 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5779 u32 len = be32_to_cpu(m.read->length); in bfa_phy_intr()
5780 u16 *buf = (u16 *)(phy->ubuf + phy->offset); in bfa_phy_intr()
5781 u16 *dbuf = (u16 *)phy->dbuf_kva; in bfa_phy_intr()
5784 bfa_trc(phy, phy->offset); in bfa_phy_intr()
5785 bfa_trc(phy, len); in bfa_phy_intr()
5790 phy->residue -= len; in bfa_phy_intr()
5791 phy->offset += len; in bfa_phy_intr()
5793 if (phy->residue == 0) { in bfa_phy_intr()
5794 phy->status = status; in bfa_phy_intr()
5795 phy->op_busy = 0; in bfa_phy_intr()
5796 if (phy->cbfn) in bfa_phy_intr()
5797 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5799 bfa_phy_read_send(phy); in bfa_phy_intr()
5835 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_uninit()
5839 if (dconf->min_cfg) { in bfa_dconf_sm_uninit()
5840 bfa_trc(dconf->bfa, dconf->min_cfg); in bfa_dconf_sm_uninit()
5841 bfa_fsm_send_event(&dconf->bfa->iocfc, in bfa_dconf_sm_uninit()
5846 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_uninit()
5848 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), in bfa_dconf_sm_uninit()
5849 BFA_FLASH_PART_DRV, dconf->instance, in bfa_dconf_sm_uninit()
5850 dconf->dconf, in bfa_dconf_sm_uninit()
5852 bfa_dconf_init_cb, dconf->bfa); in bfa_dconf_sm_uninit()
5854 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_uninit()
5855 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); in bfa_dconf_sm_uninit()
5861 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_uninit()
5868 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_uninit()
5879 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_flash_read()
5883 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_flash_read()
5888 bfa_ioc_suspend(&dconf->bfa->ioc); in bfa_dconf_sm_flash_read()
5891 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_flash_read()
5893 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_flash_read()
5896 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_flash_read()
5900 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_flash_read()
5910 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_ready()
5914 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_ready()
5920 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_ready()
5926 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_ready()
5937 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_dirty()
5945 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5946 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_dirty()
5950 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5951 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_dirty()
5959 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5963 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_dirty()
5974 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_final_sync()
5979 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_final_sync()
5983 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_final_sync()
5986 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_final_sync()
5993 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_sync()
6000 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_sync()
6005 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_sync()
6013 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_sync()
6021 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_iocdown_dirty()
6025 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_iocdown_dirty()
6031 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_iocdown_dirty()
6036 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_iocdown_dirty()
6049 if (cfg->drvcfg.min_cfg) in bfa_dconf_meminfo()
6062 dconf->bfad = bfad; in bfa_dconf_attach()
6063 dconf->bfa = bfa; in bfa_dconf_attach()
6064 dconf->instance = bfa->ioc.port_id; in bfa_dconf_attach()
6065 bfa_trc(bfa, dconf->instance); in bfa_dconf_attach()
6067 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf); in bfa_dconf_attach()
6068 if (cfg->drvcfg.min_cfg) { in bfa_dconf_attach()
6070 dconf->min_cfg = BFA_TRUE; in bfa_dconf_attach()
6072 dconf->min_cfg = BFA_FALSE; in bfa_dconf_attach()
6088 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) in bfa_dconf_init_cb()
6089 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE; in bfa_dconf_init_cb()
6090 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) in bfa_dconf_init_cb()
6091 dconf->dconf->hdr.version = BFI_DCONF_VERSION; in bfa_dconf_init_cb()
6094 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_init_cb()
6121 bfa_trc(dconf->bfa, 0); in bfa_dconf_flash_write()
6123 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa), in bfa_dconf_flash_write()
6124 BFA_FLASH_PART_DRV, dconf->instance, in bfa_dconf_flash_write()
6125 dconf->dconf, sizeof(struct bfa_dconf_s), 0, in bfa_dconf_flash_write()
6129 bfa_trc(dconf->bfa, bfa_status); in bfa_dconf_flash_write()
6138 bfa_trc(dconf->bfa, 0); in bfa_dconf_update()
6142 if (dconf->min_cfg) { in bfa_dconf_update()
6143 bfa_trc(dconf->bfa, dconf->min_cfg); in bfa_dconf_update()
6184 if (fru->op_busy) { in bfa_fru_notify()
6185 fru->status = BFA_STATUS_IOC_FAILURE; in bfa_fru_notify()
6186 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_notify()
6187 fru->op_busy = 0; in bfa_fru_notify()
6199 * @param[in] cbarg - callback argument
6206 (struct bfi_fru_write_req_s *) fru->mb.msg; in bfa_fru_write_send()
6209 msg->offset = cpu_to_be32(fru->addr_off + fru->offset); in bfa_fru_write_send()
6210 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? in bfa_fru_write_send()
6211 fru->residue : BFA_FRU_DMA_BUF_SZ; in bfa_fru_write_send()
6212 msg->length = cpu_to_be32(len); in bfa_fru_write_send()
6217 msg->last = (len == fru->residue) ? 1 : 0; in bfa_fru_write_send()
6219 msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0; in bfa_fru_write_send()
6220 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); in bfa_fru_write_send()
6221 bfa_alen_set(&msg->alen, len, fru->dbuf_pa); in bfa_fru_write_send()
6223 memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); in bfa_fru_write_send()
6224 bfa_ioc_mbox_queue(fru->ioc, &fru->mb); in bfa_fru_write_send()
6226 fru->residue -= len; in bfa_fru_write_send()
6227 fru->offset += len; in bfa_fru_write_send()
6233 * @param[in] cbarg - callback argument
6240 (struct bfi_fru_read_req_s *) fru->mb.msg; in bfa_fru_read_send()
6243 msg->offset = cpu_to_be32(fru->addr_off + fru->offset); in bfa_fru_read_send()
6244 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? in bfa_fru_read_send()
6245 fru->residue : BFA_FRU_DMA_BUF_SZ; in bfa_fru_read_send()
6246 msg->length = cpu_to_be32(len); in bfa_fru_read_send()
6247 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); in bfa_fru_read_send()
6248 bfa_alen_set(&msg->alen, len, fru->dbuf_pa); in bfa_fru_read_send()
6249 bfa_ioc_mbox_queue(fru->ioc, &fru->mb); in bfa_fru_read_send()
6255 * @param[in] mincfg - minimal cfg variable
6270 * @param[in] fru - fru structure
6271 * @param[in] ioc - ioc structure
6272 * @param[in] dev - device structure
6273 * @param[in] trcmod - trace module
6274 * @param[in] logmod - log module
6280 fru->ioc = ioc; in bfa_fru_attach()
6281 fru->trcmod = trcmod; in bfa_fru_attach()
6282 fru->cbfn = NULL; in bfa_fru_attach()
6283 fru->cbarg = NULL; in bfa_fru_attach()
6284 fru->op_busy = 0; in bfa_fru_attach()
6286 bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); in bfa_fru_attach()
6287 bfa_q_qe_init(&fru->ioc_notify); in bfa_fru_attach()
6288 bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); in bfa_fru_attach()
6289 list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); in bfa_fru_attach()
6293 fru->dbuf_kva = NULL; in bfa_fru_attach()
6294 fru->dbuf_pa = 0; in bfa_fru_attach()
6301 * @param[in] fru - fru structure
6302 * @param[in] dm_kva - pointer to virtual memory address
6303 * @param[in] dm_pa - frusical memory address
6304 * @param[in] mincfg - minimal cfg variable
6313 fru->dbuf_kva = dm_kva; in bfa_fru_memclaim()
6314 fru->dbuf_pa = dm_pa; in bfa_fru_memclaim()
6315 memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); in bfa_fru_memclaim()
6323 * @param[in] fru - fru structure
6324 * @param[in] buf - update data buffer
6325 * @param[in] len - data buffer length
6326 * @param[in] offset - offset relative to starting address
6327 * @param[in] cbfn - callback function
6328 * @param[in] cbarg - callback argument
6340 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 && in bfa_fruvpd_update()
6341 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) in bfa_fruvpd_update()
6344 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) in bfa_fruvpd_update()
6347 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_fruvpd_update()
6350 if (fru->op_busy) { in bfa_fruvpd_update()
6351 bfa_trc(fru, fru->op_busy); in bfa_fruvpd_update()
6355 fru->op_busy = 1; in bfa_fruvpd_update()
6357 fru->cbfn = cbfn; in bfa_fruvpd_update()
6358 fru->cbarg = cbarg; in bfa_fruvpd_update()
6359 fru->residue = len; in bfa_fruvpd_update()
6360 fru->offset = 0; in bfa_fruvpd_update()
6361 fru->addr_off = offset; in bfa_fruvpd_update()
6362 fru->ubuf = buf; in bfa_fruvpd_update()
6363 fru->trfr_cmpl = trfr_cmpl; in bfa_fruvpd_update()
6373 * @param[in] fru - fru structure
6374 * @param[in] buf - read data buffer
6375 * @param[in] len - data buffer length
6376 * @param[in] offset - offset relative to starting address
6377 * @param[in] cbfn - callback function
6378 * @param[in] cbarg - callback argument
6390 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_fruvpd_read()
6393 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK && in bfa_fruvpd_read()
6394 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) in bfa_fruvpd_read()
6397 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_fruvpd_read()
6400 if (fru->op_busy) { in bfa_fruvpd_read()
6401 bfa_trc(fru, fru->op_busy); in bfa_fruvpd_read()
6405 fru->op_busy = 1; in bfa_fruvpd_read()
6407 fru->cbfn = cbfn; in bfa_fruvpd_read()
6408 fru->cbarg = cbarg; in bfa_fruvpd_read()
6409 fru->residue = len; in bfa_fruvpd_read()
6410 fru->offset = 0; in bfa_fruvpd_read()
6411 fru->addr_off = offset; in bfa_fruvpd_read()
6412 fru->ubuf = buf; in bfa_fruvpd_read()
6421 * @param[in] fru - fru structure
6422 * @param[out] size - maximum size of fru vpd data
6429 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_fruvpd_get_max_size()
6432 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_fruvpd_get_max_size()
6435 if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK || in bfa_fruvpd_get_max_size()
6436 fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2) in bfa_fruvpd_get_max_size()
6445 * @param[in] fru - fru structure
6446 * @param[in] buf - update data buffer
6447 * @param[in] len - data buffer length
6448 * @param[in] offset - offset relative to starting address
6449 * @param[in] cbfn - callback function
6450 * @param[in] cbarg - callback argument
6463 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_tfru_write()
6466 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_tfru_write()
6469 if (fru->op_busy) { in bfa_tfru_write()
6470 bfa_trc(fru, fru->op_busy); in bfa_tfru_write()
6474 fru->op_busy = 1; in bfa_tfru_write()
6476 fru->cbfn = cbfn; in bfa_tfru_write()
6477 fru->cbarg = cbarg; in bfa_tfru_write()
6478 fru->residue = len; in bfa_tfru_write()
6479 fru->offset = 0; in bfa_tfru_write()
6480 fru->addr_off = offset; in bfa_tfru_write()
6481 fru->ubuf = buf; in bfa_tfru_write()
6491 * @param[in] fru - fru structure
6492 * @param[in] buf - read data buffer
6493 * @param[in] len - data buffer length
6494 * @param[in] offset - offset relative to starting address
6495 * @param[in] cbfn - callback function
6496 * @param[in] cbarg - callback argument
6508 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_tfru_read()
6511 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_tfru_read()
6514 if (fru->op_busy) { in bfa_tfru_read()
6515 bfa_trc(fru, fru->op_busy); in bfa_tfru_read()
6519 fru->op_busy = 1; in bfa_tfru_read()
6521 fru->cbfn = cbfn; in bfa_tfru_read()
6522 fru->cbarg = cbarg; in bfa_tfru_read()
6523 fru->residue = len; in bfa_tfru_read()
6524 fru->offset = 0; in bfa_tfru_read()
6525 fru->addr_off = offset; in bfa_tfru_read()
6526 fru->ubuf = buf; in bfa_tfru_read()
6535 * @param[in] fruarg - fru structure
6536 * @param[in] msg - message structure
6545 bfa_trc(fru, msg->mh.msg_id); in bfa_fru_intr()
6547 if (!fru->op_busy) { in bfa_fru_intr()
6555 switch (msg->mh.msg_id) { in bfa_fru_intr()
6558 status = be32_to_cpu(rsp->status); in bfa_fru_intr()
6561 if (status != BFA_STATUS_OK || fru->residue == 0) { in bfa_fru_intr()
6562 fru->status = status; in bfa_fru_intr()
6563 fru->op_busy = 0; in bfa_fru_intr()
6564 if (fru->cbfn) in bfa_fru_intr()
6565 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_intr()
6567 bfa_trc(fru, fru->offset); in bfa_fru_intr()
6568 if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) in bfa_fru_intr()
6578 status = be32_to_cpu(rsp->status); in bfa_fru_intr()
6582 fru->status = status; in bfa_fru_intr()
6583 fru->op_busy = 0; in bfa_fru_intr()
6584 if (fru->cbfn) in bfa_fru_intr()
6585 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_intr()
6587 u32 len = be32_to_cpu(rsp->length); in bfa_fru_intr()
6589 bfa_trc(fru, fru->offset); in bfa_fru_intr()
6592 memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); in bfa_fru_intr()
6593 fru->residue -= len; in bfa_fru_intr()
6594 fru->offset += len; in bfa_fru_intr()
6596 if (fru->residue == 0) { in bfa_fru_intr()
6597 fru->status = status; in bfa_fru_intr()
6598 fru->op_busy = 0; in bfa_fru_intr()
6599 if (fru->cbfn) in bfa_fru_intr()
6600 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_intr()
6602 if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) in bfa_fru_intr()
6638 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
6639 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
6640 BFA_FLASH_BAD = -3, /*!< flash bad */
6641 BFA_FLASH_BUSY = -4, /*!< flash busy */
6642 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
6643 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
6644 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
6645 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
6646 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
6760 * @param[in] pci_bar - pci bar address
6761 * @param[in] dev_status - device status
6799 * @param[in] pci_bar - pci bar address
6844 * @param[in] pci_bar - pci bar address
6845 * @param[in] offset - flash address offset
6846 * @param[in] len - read data length
6847 * @param[in] buf - read data buffer
6874 * check if write-in-progress bit is cleared in bfa_flash_read_start()
6890 * @param[in] pci_bar - pci bar address
6907 * @param[in] pci_bar - pci bar address
6908 * @param[in] len - read data length
6909 * @param[in] buf - read data buffer
6933 * @param[in] pci_bar - pci bar address
6934 * @param[in] offset - flash partition address offset
6935 * @param[in] buf - read data buffer
6936 * @param[in] len - read data length
6961 if (--n <= 0) in bfa_flash_sem_get()
6992 l = (n + 1) * fifo_sz - s; in bfa_flash_raw_read()
7005 if (--n <= 0) { in bfa_flash_raw_read()
7013 residue -= l; in bfa_flash_raw_read()