Lines Matching refs:bfad

38 	struct bfad_s         *bfad = drv;  in bfa_cb_ioim_done()  local
45 bfa_trc(bfad, scsi_status); in bfa_cb_ioim_done()
49 bfa_trc(bfad, sns_len); in bfa_cb_ioim_done()
56 bfa_trc(bfad, residue); in bfa_cb_ioim_done()
61 bfa_trc(bfad, 0); in bfa_cb_ioim_done()
84 bfa_trc(bfad, cmnd->result); in bfa_cb_ioim_done()
134 struct bfad_s *bfad = drv; in bfa_cb_ioim_abort() local
142 bfa_trc(bfad, cmnd->result); in bfa_cb_ioim_abort()
147 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, in bfa_cb_tskim_done() argument
174 struct bfad_s *bfad = im_port->bfad; in bfad_im_info() local
179 bfad->pci_name, BFAD_DRIVER_VERSION); in bfad_im_info()
195 struct bfad_s *bfad = im_port->bfad; in bfad_im_abort_handler() local
201 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_abort_handler()
213 bfa_trc(bfad, hal_io->iotag); in bfad_im_abort_handler()
214 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_abort_handler()
218 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_abort_handler()
230 bfa_trc(bfad, hal_io->iotag); in bfad_im_abort_handler()
231 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_abort_handler()
236 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_abort_handler()
241 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, in bfad_im_target_reset_send() argument
249 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); in bfad_im_target_reset_send()
251 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_target_reset_send()
271 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_target_reset_send()
297 struct bfad_s *bfad = im_port->bfad; in bfad_im_reset_lun_handler() local
307 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
310 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
315 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); in bfad_im_reset_lun_handler()
317 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_lun_handler()
319 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
339 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_lun_handler()
341 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
348 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
354 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_lun_handler()
373 struct bfad_s *bfad = im_port->bfad; in bfad_im_reset_target_handler() local
380 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
384 rc = bfad_im_target_reset_send(bfad, cmnd, itnim); in bfad_im_reset_target_handler()
387 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
390 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
394 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_target_handler()
401 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
425 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, in bfa_fcb_itnim_alloc() argument
432 (*itnim_drv)->im = bfad->im; in bfa_fcb_itnim_alloc()
440 bfad->bfad_flags |= BFAD_RPORT_ONLINE; in bfa_fcb_itnim_alloc()
449 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) in bfa_fcb_itnim_free() argument
472 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfa_fcb_itnim_free()
511 struct bfad_s *bfad; in bfa_fcb_itnim_offline() local
515 bfad = port->bfad; in bfa_fcb_itnim_offline()
516 if ((bfad->pport.flags & BFAD_PORT_DELETE) || in bfa_fcb_itnim_offline()
534 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, in bfad_im_scsi_host_alloc() argument
550 im_port->shost = bfad_scsi_host_alloc(im_port, bfad); in bfad_im_scsi_host_alloc()
563 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; in bfad_im_scsi_host_alloc()
570 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); in bfad_im_scsi_host_alloc()
590 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) in bfad_im_scsi_host_free() argument
592 bfa_trc(bfad, bfad->inst_no); in bfad_im_scsi_host_free()
593 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", in bfad_im_scsi_host_free()
619 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) in bfad_im_port_new() argument
631 im_port->bfad = bfad; in bfad_im_port_new()
642 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) in bfad_im_port_delete() argument
646 queue_work(bfad->im->drv_workq, in bfad_im_port_delete()
655 struct bfad_s *bfad = im_port->bfad; in bfad_im_port_clean() local
657 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_port_clean()
667 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_port_clean()
675 struct bfad_s *bfad = im->bfad; in bfad_aen_im_notify_handler() local
676 struct Scsi_Host *shost = bfad->pport.im_port->shost; in bfad_aen_im_notify_handler()
680 while (!list_empty(&bfad->active_aen_q)) { in bfad_aen_im_notify_handler()
681 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
682 bfa_q_deq(&bfad->active_aen_q, &aen_entry); in bfad_aen_im_notify_handler()
683 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
689 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
690 list_add_tail(&aen_entry->qe, &bfad->free_aen_q); in bfad_aen_im_notify_handler()
691 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
696 bfad_im_probe(struct bfad_s *bfad) in bfad_im_probe() argument
704 bfad->im = im; in bfad_im_probe()
705 im->bfad = bfad; in bfad_im_probe()
707 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { in bfad_im_probe()
717 bfad_im_probe_undo(struct bfad_s *bfad) in bfad_im_probe_undo() argument
719 if (bfad->im) { in bfad_im_probe_undo()
720 bfad_destroy_workq(bfad->im); in bfad_im_probe_undo()
721 kfree(bfad->im); in bfad_im_probe_undo()
722 bfad->im = NULL; in bfad_im_probe_undo()
727 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) in bfad_scsi_host_alloc() argument
739 sht->sg_tablesize = bfad->cfg_data.io_max_sge; in bfad_scsi_host_alloc()
745 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) in bfad_scsi_host_free() argument
748 flush_workqueue(bfad->im->drv_workq); in bfad_scsi_host_free()
749 bfad_im_scsi_host_free(im_port->bfad, im_port); in bfad_scsi_host_free()
764 bfad_thread_workq(struct bfad_s *bfad) in bfad_thread_workq() argument
766 struct bfad_im_s *im = bfad->im; in bfad_thread_workq()
768 bfa_trc(bfad, 0); in bfad_thread_workq()
770 bfad->inst_no); in bfad_thread_workq()
1027 struct bfad_s *bfad = im_port->bfad; in bfad_fc_host_init() local
1030 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); in bfad_fc_host_init()
1036 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); in bfad_fc_host_init()
1048 strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, in bfad_fc_host_init()
1052 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); in bfad_fc_host_init()
1108 struct bfad_s *bfad = im->bfad; in bfad_im_itnim_work_handler() local
1116 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1118 bfa_trc(bfad, itnim->state); in bfad_im_itnim_work_handler()
1122 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1124 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1131 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_itnim_work_handler()
1153 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1155 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; in bfad_im_itnim_work_handler()
1157 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1164 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_itnim_work_handler()
1179 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1181 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; in bfad_im_itnim_work_handler()
1183 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1195 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1206 struct bfad_s *bfad = im_port->bfad; in bfad_im_queuecommand_lck() local
1222 if (bfad->bfad_flags & BFAD_EEH_BUSY) { in bfad_im_queuecommand_lck()
1223 if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) in bfad_im_queuecommand_lck()
1235 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1236 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { in bfad_im_queuecommand_lck()
1239 bfad->inst_no, cmnd, cmnd->cmnd[0]); in bfad_im_queuecommand_lck()
1251 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, in bfad_im_queuecommand_lck()
1255 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1262 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1267 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1278 bfad_rport_online_wait(struct bfad_s *bfad) in DEF_SCSI_QCMD()
1283 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) in DEF_SCSI_QCMD()
1289 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { in DEF_SCSI_QCMD()
1292 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) in DEF_SCSI_QCMD()
1298 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { in DEF_SCSI_QCMD()
1306 bfad_get_linkup_delay(struct bfad_s *bfad) in bfad_get_linkup_delay() argument
1319 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); in bfad_get_linkup_delay()