1  /*******************************************************************
2   * This file is part of the Emulex Linux Device Driver for         *
3   * Fibre Channel Host Bus Adapters.                                *
4   * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7   * EMULEX and SLI are trademarks of Emulex.                        *
8   * www.broadcom.com                                                *
9   * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10   *                                                                 *
11   * This program is free software; you can redistribute it and/or   *
12   * modify it under the terms of version 2 of the GNU General       *
13   * Public License as published by the Free Software Foundation.    *
14   * This program is distributed in the hope that it will be useful. *
15   * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16   * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17   * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18   * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19   * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20   * more details, a copy of which can be found in the file COPYING  *
21   * included with this package.                                     *
22   *******************************************************************/
23  #include <linux/pci.h>
24  #include <linux/slab.h>
25  #include <linux/interrupt.h>
26  #include <linux/export.h>
27  #include <linux/delay.h>
28  #include <linux/unaligned.h>
29  #include <linux/t10-pi.h>
30  #include <linux/crc-t10dif.h>
31  #include <linux/blk-cgroup.h>
32  #include <net/checksum.h>
33  
34  #include <scsi/scsi.h>
35  #include <scsi/scsi_device.h>
36  #include <scsi/scsi_eh.h>
37  #include <scsi/scsi_host.h>
38  #include <scsi/scsi_tcq.h>
39  #include <scsi/scsi_transport_fc.h>
40  
41  #include "lpfc_version.h"
42  #include "lpfc_hw4.h"
43  #include "lpfc_hw.h"
44  #include "lpfc_sli.h"
45  #include "lpfc_sli4.h"
46  #include "lpfc_nl.h"
47  #include "lpfc_disc.h"
48  #include "lpfc.h"
49  #include "lpfc_scsi.h"
50  #include "lpfc_logmsg.h"
51  #include "lpfc_crtn.h"
52  #include "lpfc_vport.h"
53  
54  #define LPFC_RESET_WAIT  2
55  #define LPFC_ABORT_WAIT  2
56  
57  static char *dif_op_str[] = {
58  	"PROT_NORMAL",
59  	"PROT_READ_INSERT",
60  	"PROT_WRITE_STRIP",
61  	"PROT_READ_STRIP",
62  	"PROT_WRITE_INSERT",
63  	"PROT_READ_PASS",
64  	"PROT_WRITE_PASS",
65  };
66  
67  struct scsi_dif_tuple {
68  	__be16 guard_tag;       /* Checksum */
69  	__be16 app_tag;         /* Opaque storage */
70  	__be32 ref_tag;         /* Target LBA or indirect LBA */
71  };
72  
73  static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device * sdev)74  lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75  {
76  	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77  
78  	if (vport->phba->cfg_fof)
79  		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
80  	else
81  		return (struct lpfc_rport_data *)sdev->hostdata;
82  }
83  
84  static void
85  lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
86  static void
87  lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
88  static int
89  lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
90  
91  /**
92   * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
93   * @phba: Pointer to HBA object.
94   * @lpfc_cmd: lpfc scsi command object pointer.
95   *
96   * This function is called from the lpfc_prep_task_mgmt_cmd function to
97   * set the last bit in the response sge entry.
98   **/
99  static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)100  lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
101  				struct lpfc_io_buf *lpfc_cmd)
102  {
103  	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
104  	if (sgl) {
105  		sgl += 1;
106  		sgl->word2 = le32_to_cpu(sgl->word2);
107  		bf_set(lpfc_sli4_sge_last, sgl, 1);
108  		sgl->word2 = cpu_to_le32(sgl->word2);
109  	}
110  }
111  
112  /**
113   * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
114   * @phba: The Hba for which this call is being executed.
115   *
116   * This routine is called when there is resource error in driver or firmware.
117   * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
118   * posts at most 1 event each second. This routine wakes up worker thread of
119   * @phba to process WORKER_RAM_DOWN_EVENT event.
120   *
121   * This routine should be called with no lock held.
122   **/
123  void
lpfc_rampdown_queue_depth(struct lpfc_hba * phba)124  lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
125  {
126  	unsigned long flags;
127  	uint32_t evt_posted;
128  	unsigned long expires;
129  
130  	spin_lock_irqsave(&phba->hbalock, flags);
131  	atomic_inc(&phba->num_rsrc_err);
132  	phba->last_rsrc_error_time = jiffies;
133  
134  	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
135  	if (time_after(expires, jiffies)) {
136  		spin_unlock_irqrestore(&phba->hbalock, flags);
137  		return;
138  	}
139  
140  	phba->last_ramp_down_time = jiffies;
141  
142  	spin_unlock_irqrestore(&phba->hbalock, flags);
143  
144  	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
145  	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
146  	if (!evt_posted)
147  		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
148  	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
149  
150  	if (!evt_posted)
151  		lpfc_worker_wake_up(phba);
152  	return;
153  }
154  
155  /**
156   * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
157   * @phba: The Hba for which this call is being executed.
158   *
159   * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
160   * thread.This routine reduces queue depth for all scsi device on each vport
161   * associated with @phba.
162   **/
163  void
lpfc_ramp_down_queue_handler(struct lpfc_hba * phba)164  lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
165  {
166  	struct lpfc_vport **vports;
167  	struct Scsi_Host  *shost;
168  	struct scsi_device *sdev;
169  	unsigned long new_queue_depth;
170  	unsigned long num_rsrc_err;
171  	int i;
172  
173  	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
174  
175  	/*
176  	 * The error and success command counters are global per
177  	 * driver instance.  If another handler has already
178  	 * operated on this error event, just exit.
179  	 */
180  	if (num_rsrc_err == 0)
181  		return;
182  
183  	vports = lpfc_create_vport_work_array(phba);
184  	if (vports != NULL)
185  		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
186  			shost = lpfc_shost_from_vport(vports[i]);
187  			shost_for_each_device(sdev, shost) {
188  				if (num_rsrc_err >= sdev->queue_depth)
189  					new_queue_depth = 1;
190  				else
191  					new_queue_depth = sdev->queue_depth -
192  						num_rsrc_err;
193  				scsi_change_queue_depth(sdev, new_queue_depth);
194  			}
195  		}
196  	lpfc_destroy_vport_work_array(phba, vports);
197  	atomic_set(&phba->num_rsrc_err, 0);
198  }
199  
200  /**
201   * lpfc_scsi_dev_block - set all scsi hosts to block state
202   * @phba: Pointer to HBA context object.
203   *
204   * This function walks vport list and set each SCSI host to block state
205   * by invoking fc_remote_port_delete() routine. This function is invoked
206   * with EEH when device's PCI slot has been permanently disabled.
207   **/
208  void
lpfc_scsi_dev_block(struct lpfc_hba * phba)209  lpfc_scsi_dev_block(struct lpfc_hba *phba)
210  {
211  	struct lpfc_vport **vports;
212  	struct Scsi_Host  *shost;
213  	struct scsi_device *sdev;
214  	struct fc_rport *rport;
215  	int i;
216  
217  	vports = lpfc_create_vport_work_array(phba);
218  	if (vports != NULL)
219  		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
220  			shost = lpfc_shost_from_vport(vports[i]);
221  			shost_for_each_device(sdev, shost) {
222  				rport = starget_to_rport(scsi_target(sdev));
223  				fc_remote_port_delete(rport);
224  			}
225  		}
226  	lpfc_destroy_vport_work_array(phba, vports);
227  }
228  
229  /**
230   * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
231   * @vport: The virtual port for which this call being executed.
232   * @num_to_alloc: The requested number of buffers to allocate.
233   *
234   * This routine allocates a scsi buffer for device with SLI-3 interface spec,
235   * the scsi buffer contains all the necessary information needed to initiate
236   * a SCSI I/O. The non-DMAable buffer region contains information to build
237   * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
238   * and the initial BPL. In addition to allocating memory, the FCP CMND and
239   * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
240   *
241   * Return codes:
242   *   int - number of scsi buffers that were allocated.
243   *   0 = failure, less than num_to_alloc is a partial failure.
244   **/
245  static int
lpfc_new_scsi_buf_s3(struct lpfc_vport * vport,int num_to_alloc)246  lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
247  {
248  	struct lpfc_hba *phba = vport->phba;
249  	struct lpfc_io_buf *psb;
250  	struct ulp_bde64 *bpl;
251  	IOCB_t *iocb;
252  	dma_addr_t pdma_phys_fcp_cmd;
253  	dma_addr_t pdma_phys_fcp_rsp;
254  	dma_addr_t pdma_phys_sgl;
255  	uint16_t iotag;
256  	int bcnt, bpl_size;
257  
258  	bpl_size = phba->cfg_sg_dma_buf_size -
259  		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
260  
261  	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
262  			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
263  			 num_to_alloc, phba->cfg_sg_dma_buf_size,
264  			 (int)sizeof(struct fcp_cmnd),
265  			 (int)sizeof(struct fcp_rsp), bpl_size);
266  
267  	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
268  		psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
269  		if (!psb)
270  			break;
271  
272  		/*
273  		 * Get memory from the pci pool to map the virt space to pci
274  		 * bus space for an I/O.  The DMA buffer includes space for the
275  		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
276  		 * necessary to support the sg_tablesize.
277  		 */
278  		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
279  					GFP_KERNEL, &psb->dma_handle);
280  		if (!psb->data) {
281  			kfree(psb);
282  			break;
283  		}
284  
285  
286  		/* Allocate iotag for psb->cur_iocbq. */
287  		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
288  		if (iotag == 0) {
289  			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
290  				      psb->data, psb->dma_handle);
291  			kfree(psb);
292  			break;
293  		}
294  		psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
295  
296  		psb->fcp_cmnd = psb->data;
297  		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
298  		psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
299  			sizeof(struct fcp_rsp);
300  
301  		/* Initialize local short-hand pointers. */
302  		bpl = (struct ulp_bde64 *)psb->dma_sgl;
303  		pdma_phys_fcp_cmd = psb->dma_handle;
304  		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
305  		pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
306  			sizeof(struct fcp_rsp);
307  
308  		/*
309  		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
310  		 * are sg list bdes.  Initialize the first two and leave the
311  		 * rest for queuecommand.
312  		 */
313  		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
314  		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
315  		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
316  		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
317  		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
318  
319  		/* Setup the physical region for the FCP RSP */
320  		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
321  		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
322  		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
323  		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
324  		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
325  
326  		/*
327  		 * Since the IOCB for the FCP I/O is built into this
328  		 * lpfc_scsi_buf, initialize it with all known data now.
329  		 */
330  		iocb = &psb->cur_iocbq.iocb;
331  		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
332  		if ((phba->sli_rev == 3) &&
333  				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
334  			/* fill in immediate fcp command BDE */
335  			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
336  			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
337  			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
338  					unsli3.fcp_ext.icd);
339  			iocb->un.fcpi64.bdl.addrHigh = 0;
340  			iocb->ulpBdeCount = 0;
341  			iocb->ulpLe = 0;
342  			/* fill in response BDE */
343  			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
344  							BUFF_TYPE_BDE_64;
345  			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
346  				sizeof(struct fcp_rsp);
347  			iocb->unsli3.fcp_ext.rbde.addrLow =
348  				putPaddrLow(pdma_phys_fcp_rsp);
349  			iocb->unsli3.fcp_ext.rbde.addrHigh =
350  				putPaddrHigh(pdma_phys_fcp_rsp);
351  		} else {
352  			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
353  			iocb->un.fcpi64.bdl.bdeSize =
354  					(2 * sizeof(struct ulp_bde64));
355  			iocb->un.fcpi64.bdl.addrLow =
356  					putPaddrLow(pdma_phys_sgl);
357  			iocb->un.fcpi64.bdl.addrHigh =
358  					putPaddrHigh(pdma_phys_sgl);
359  			iocb->ulpBdeCount = 1;
360  			iocb->ulpLe = 1;
361  		}
362  		iocb->ulpClass = CLASS3;
363  		psb->status = IOSTAT_SUCCESS;
364  		/* Put it back into the SCSI buffer list */
365  		psb->cur_iocbq.io_buf = psb;
366  		spin_lock_init(&psb->buf_lock);
367  		lpfc_release_scsi_buf_s3(phba, psb);
368  
369  	}
370  
371  	return bcnt;
372  }
373  
374  /**
375   * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
376   * @vport: pointer to lpfc vport data structure.
377   *
378   * This routine is invoked by the vport cleanup for deletions and the cleanup
379   * for an ndlp on removal.
380   **/
381  void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport * vport)382  lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
383  {
384  	struct lpfc_hba *phba = vport->phba;
385  	struct lpfc_io_buf *psb, *next_psb;
386  	struct lpfc_sli4_hdw_queue *qp;
387  	unsigned long iflag = 0;
388  	int idx;
389  
390  	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
391  		return;
392  
393  	spin_lock_irqsave(&phba->hbalock, iflag);
394  	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
395  		qp = &phba->sli4_hba.hdwq[idx];
396  
397  		spin_lock(&qp->abts_io_buf_list_lock);
398  		list_for_each_entry_safe(psb, next_psb,
399  					 &qp->lpfc_abts_io_buf_list, list) {
400  			if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
401  				continue;
402  
403  			if (psb->rdata && psb->rdata->pnode &&
404  			    psb->rdata->pnode->vport == vport)
405  				psb->rdata = NULL;
406  		}
407  		spin_unlock(&qp->abts_io_buf_list_lock);
408  	}
409  	spin_unlock_irqrestore(&phba->hbalock, iflag);
410  }
411  
412  /**
413   * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
414   * @phba: pointer to lpfc hba data structure.
415   * @axri: pointer to the fcp xri abort wcqe structure.
416   * @idx: index into hdwq
417   *
418   * This routine is invoked by the worker thread to process a SLI4 fast-path
419   * FCP or NVME aborted xri.
420   **/
421  void
lpfc_sli4_io_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,int idx)422  lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
423  			 struct sli4_wcqe_xri_aborted *axri, int idx)
424  {
425  	u16 xri = 0;
426  	u16 rxid = 0;
427  	struct lpfc_io_buf *psb, *next_psb;
428  	struct lpfc_sli4_hdw_queue *qp;
429  	unsigned long iflag = 0;
430  	struct lpfc_iocbq *iocbq;
431  	int i;
432  	struct lpfc_nodelist *ndlp;
433  	int rrq_empty = 0;
434  	struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
435  	struct scsi_cmnd *cmd;
436  	int offline = 0;
437  
438  	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
439  		return;
440  	offline = pci_channel_offline(phba->pcidev);
441  	if (!offline) {
442  		xri = bf_get(lpfc_wcqe_xa_xri, axri);
443  		rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
444  	}
445  	qp = &phba->sli4_hba.hdwq[idx];
446  	spin_lock_irqsave(&phba->hbalock, iflag);
447  	spin_lock(&qp->abts_io_buf_list_lock);
448  	list_for_each_entry_safe(psb, next_psb,
449  		&qp->lpfc_abts_io_buf_list, list) {
450  		if (offline)
451  			xri = psb->cur_iocbq.sli4_xritag;
452  		if (psb->cur_iocbq.sli4_xritag == xri) {
453  			list_del_init(&psb->list);
454  			psb->flags &= ~LPFC_SBUF_XBUSY;
455  			psb->status = IOSTAT_SUCCESS;
456  			if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
457  				qp->abts_nvme_io_bufs--;
458  				spin_unlock(&qp->abts_io_buf_list_lock);
459  				spin_unlock_irqrestore(&phba->hbalock, iflag);
460  				if (!offline) {
461  					lpfc_sli4_nvme_xri_aborted(phba, axri,
462  								   psb);
463  					return;
464  				}
465  				lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
466  				spin_lock_irqsave(&phba->hbalock, iflag);
467  				spin_lock(&qp->abts_io_buf_list_lock);
468  				continue;
469  			}
470  			qp->abts_scsi_io_bufs--;
471  			spin_unlock(&qp->abts_io_buf_list_lock);
472  
473  			if (psb->rdata && psb->rdata->pnode)
474  				ndlp = psb->rdata->pnode;
475  			else
476  				ndlp = NULL;
477  			spin_unlock_irqrestore(&phba->hbalock, iflag);
478  
479  			spin_lock_irqsave(&phba->rrq_list_lock, iflag);
480  			rrq_empty = list_empty(&phba->active_rrq_list);
481  			spin_unlock_irqrestore(&phba->rrq_list_lock, iflag);
482  			if (ndlp && !offline) {
483  				lpfc_set_rrq_active(phba, ndlp,
484  					psb->cur_iocbq.sli4_lxritag, rxid, 1);
485  				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
486  			}
487  
488  			if (phba->cfg_fcp_wait_abts_rsp || offline) {
489  				spin_lock_irqsave(&psb->buf_lock, iflag);
490  				cmd = psb->pCmd;
491  				psb->pCmd = NULL;
492  				spin_unlock_irqrestore(&psb->buf_lock, iflag);
493  
494  				/* The sdev is not guaranteed to be valid post
495  				 * scsi_done upcall.
496  				 */
497  				if (cmd)
498  					scsi_done(cmd);
499  
500  				/*
501  				 * We expect there is an abort thread waiting
502  				 * for command completion wake up the thread.
503  				 */
504  				spin_lock_irqsave(&psb->buf_lock, iflag);
505  				psb->cur_iocbq.cmd_flag &=
506  					~LPFC_DRIVER_ABORTED;
507  				if (psb->waitq)
508  					wake_up(psb->waitq);
509  				spin_unlock_irqrestore(&psb->buf_lock, iflag);
510  			}
511  
512  			lpfc_release_scsi_buf_s4(phba, psb);
513  			if (rrq_empty)
514  				lpfc_worker_wake_up(phba);
515  			if (!offline)
516  				return;
517  			spin_lock_irqsave(&phba->hbalock, iflag);
518  			spin_lock(&qp->abts_io_buf_list_lock);
519  			continue;
520  		}
521  	}
522  	spin_unlock(&qp->abts_io_buf_list_lock);
523  	if (!offline) {
524  		for (i = 1; i <= phba->sli.last_iotag; i++) {
525  			iocbq = phba->sli.iocbq_lookup[i];
526  
527  			if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
528  			    (iocbq->cmd_flag & LPFC_IO_LIBDFC))
529  				continue;
530  			if (iocbq->sli4_xritag != xri)
531  				continue;
532  			psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
533  			psb->flags &= ~LPFC_SBUF_XBUSY;
534  			spin_unlock_irqrestore(&phba->hbalock, iflag);
535  			if (!list_empty(&pring->txq))
536  				lpfc_worker_wake_up(phba);
537  			return;
538  		}
539  	}
540  	spin_unlock_irqrestore(&phba->hbalock, iflag);
541  }
542  
543  /**
544   * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
545   * @phba: The HBA for which this call is being executed.
546   * @ndlp: pointer to a node-list data structure.
547   * @cmnd: Pointer to scsi_cmnd data structure.
548   *
549   * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
550   * and returns to caller.
551   *
552   * Return codes:
553   *   NULL - Error
554   *   Pointer to lpfc_scsi_buf - Success
555   **/
556  static struct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)557  lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
558  		     struct scsi_cmnd *cmnd)
559  {
560  	struct lpfc_io_buf *lpfc_cmd = NULL;
561  	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
562  	unsigned long iflag = 0;
563  
564  	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
565  	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
566  			 list);
567  	if (!lpfc_cmd) {
568  		spin_lock(&phba->scsi_buf_list_put_lock);
569  		list_splice(&phba->lpfc_scsi_buf_list_put,
570  			    &phba->lpfc_scsi_buf_list_get);
571  		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
572  		list_remove_head(scsi_buf_list_get, lpfc_cmd,
573  				 struct lpfc_io_buf, list);
574  		spin_unlock(&phba->scsi_buf_list_put_lock);
575  	}
576  	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
577  
578  	if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
579  		atomic_inc(&ndlp->cmd_pending);
580  		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
581  	}
582  	return  lpfc_cmd;
583  }
584  /**
585   * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
586   * @phba: The HBA for which this call is being executed.
587   * @ndlp: pointer to a node-list data structure.
588   * @cmnd: Pointer to scsi_cmnd data structure.
589   *
590   * This routine removes a scsi buffer from head of @hdwq io_buf_list
591   * and returns to caller.
592   *
593   * Return codes:
594   *   NULL - Error
595   *   Pointer to lpfc_scsi_buf - Success
596   **/
597  static struct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)598  lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
599  		     struct scsi_cmnd *cmnd)
600  {
601  	struct lpfc_io_buf *lpfc_cmd;
602  	struct lpfc_sli4_hdw_queue *qp;
603  	struct sli4_sge_le *sgl;
604  	dma_addr_t pdma_phys_fcp_rsp;
605  	dma_addr_t pdma_phys_fcp_cmd;
606  	uint32_t cpu, idx;
607  	int tag;
608  	struct fcp_cmd_rsp_buf *tmp = NULL;
609  
610  	cpu = raw_smp_processor_id();
611  	if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
612  		tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
613  		idx = blk_mq_unique_tag_to_hwq(tag);
614  	} else {
615  		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
616  	}
617  
618  	lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
619  				   !phba->cfg_xri_rebalancing);
620  	if (!lpfc_cmd) {
621  		qp = &phba->sli4_hba.hdwq[idx];
622  		qp->empty_io_bufs++;
623  		return NULL;
624  	}
625  
626  	/* Setup key fields in buffer that may have been changed
627  	 * if other protocols used this buffer.
628  	 */
629  	lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
630  	lpfc_cmd->prot_seg_cnt = 0;
631  	lpfc_cmd->seg_cnt = 0;
632  	lpfc_cmd->timeout = 0;
633  	lpfc_cmd->flags = 0;
634  	lpfc_cmd->start_time = jiffies;
635  	lpfc_cmd->waitq = NULL;
636  	lpfc_cmd->cpu = cpu;
637  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
638  	lpfc_cmd->prot_data_type = 0;
639  #endif
640  	tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
641  	if (!tmp) {
642  		lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
643  		return NULL;
644  	}
645  
646  	lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
647  	lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
648  
649  	/*
650  	 * The first two SGEs are the FCP_CMD and FCP_RSP.
651  	 * The balance are sg list bdes. Initialize the
652  	 * first two and leave the rest for queuecommand.
653  	 */
654  	sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
655  	pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
656  	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
657  	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
658  	bf_set_le32(lpfc_sli4_sge_last, sgl, 0);
659  	if (cmnd && cmnd->cmd_len > LPFC_FCP_CDB_LEN)
660  		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd32));
661  	else
662  		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
663  
664  	sgl++;
665  
666  	/* Setup the physical region for the FCP RSP */
667  	pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd32);
668  	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
669  	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
670  	bf_set_le32(lpfc_sli4_sge_last, sgl, 1);
671  	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
672  
673  	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
674  		atomic_inc(&ndlp->cmd_pending);
675  		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
676  	}
677  	return  lpfc_cmd;
678  }
679  /**
680   * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
681   * @phba: The HBA for which this call is being executed.
682   * @ndlp: pointer to a node-list data structure.
683   * @cmnd: Pointer to scsi_cmnd data structure.
684   *
685   * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
686   * and returns to caller.
687   *
688   * Return codes:
689   *   NULL - Error
690   *   Pointer to lpfc_scsi_buf - Success
691   **/
692  static struct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)693  lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
694  		  struct scsi_cmnd *cmnd)
695  {
696  	return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
697  }
698  
699  /**
700   * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
701   * @phba: The Hba for which this call is being executed.
702   * @psb: The scsi buffer which is being released.
703   *
704   * This routine releases @psb scsi buffer by adding it to tail of @phba
705   * lpfc_scsi_buf_list list.
706   **/
707  static void
lpfc_release_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * psb)708  lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
709  {
710  	unsigned long iflag = 0;
711  
712  	psb->seg_cnt = 0;
713  	psb->prot_seg_cnt = 0;
714  
715  	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
716  	psb->pCmd = NULL;
717  	psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
718  	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
719  	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
720  }
721  
722  /**
723   * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
724   * @phba: The Hba for which this call is being executed.
725   * @psb: The scsi buffer which is being released.
726   *
727   * This routine releases @psb scsi buffer by adding it to tail of @hdwq
728   * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
729   * and cannot be reused for at least RA_TOV amount of time if it was
730   * aborted.
731   **/
732  static void
lpfc_release_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * psb)733  lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
734  {
735  	struct lpfc_sli4_hdw_queue *qp;
736  	unsigned long iflag = 0;
737  
738  	psb->seg_cnt = 0;
739  	psb->prot_seg_cnt = 0;
740  
741  	qp = psb->hdwq;
742  	if (psb->flags & LPFC_SBUF_XBUSY) {
743  		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
744  		if (!phba->cfg_fcp_wait_abts_rsp)
745  			psb->pCmd = NULL;
746  		list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
747  		qp->abts_scsi_io_bufs++;
748  		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
749  	} else {
750  		lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
751  	}
752  }
753  
754  /**
755   * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
756   * @phba: The Hba for which this call is being executed.
757   * @psb: The scsi buffer which is being released.
758   *
759   * This routine releases @psb scsi buffer by adding it to tail of @phba
760   * lpfc_scsi_buf_list list.
761   **/
762  static void
lpfc_release_scsi_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)763  lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
764  {
765  	if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
766  		atomic_dec(&psb->ndlp->cmd_pending);
767  
768  	psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
769  	phba->lpfc_release_scsi_buf(phba, psb);
770  }
771  
772  /**
773   * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
774   * @data: A pointer to the immediate command data portion of the IOCB.
775   * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
776   *
777   * The routine copies the entire FCP command from @fcp_cmnd to @data while
778   * byte swapping the data to big endian format for transmission on the wire.
779   **/
780  static void
lpfc_fcpcmd_to_iocb(u8 * data,struct fcp_cmnd * fcp_cmnd)781  lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
782  {
783  	int i, j;
784  
785  	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
786  	     i += sizeof(uint32_t), j++) {
787  		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
788  	}
789  }
790  
791  /**
792   * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
793   * @phba: The Hba for which this call is being executed.
794   * @lpfc_cmd: The scsi buffer which is going to be mapped.
795   *
796   * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
797   * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
798   * through sg elements and format the bde. This routine also initializes all
799   * IOCB fields which are dependent on scsi command request buffer.
800   *
801   * Return codes:
802   *   1 - Error
803   *   0 - Success
804   **/
805  static int
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)806  lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
807  {
808  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
809  	struct scatterlist *sgel = NULL;
810  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
811  	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
812  	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
813  	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
814  	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
815  	dma_addr_t physaddr;
816  	uint32_t num_bde = 0;
817  	int nseg, datadir = scsi_cmnd->sc_data_direction;
818  
819  	/*
820  	 * There are three possibilities here - use scatter-gather segment, use
821  	 * the single mapping, or neither.  Start the lpfc command prep by
822  	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
823  	 * data bde entry.
824  	 */
825  	bpl += 2;
826  	if (scsi_sg_count(scsi_cmnd)) {
827  		/*
828  		 * The driver stores the segment count returned from dma_map_sg
829  		 * because this a count of dma-mappings used to map the use_sg
830  		 * pages.  They are not guaranteed to be the same for those
831  		 * architectures that implement an IOMMU.
832  		 */
833  
834  		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
835  				  scsi_sg_count(scsi_cmnd), datadir);
836  		if (unlikely(!nseg))
837  			return 1;
838  
839  		lpfc_cmd->seg_cnt = nseg;
840  		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
841  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
842  					"9064 BLKGRD: %s: Too many sg segments"
843  					" from dma_map_sg.  Config %d, seg_cnt"
844  					" %d\n", __func__, phba->cfg_sg_seg_cnt,
845  					lpfc_cmd->seg_cnt);
846  			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
847  			lpfc_cmd->seg_cnt = 0;
848  			scsi_dma_unmap(scsi_cmnd);
849  			return 2;
850  		}
851  
852  		/*
853  		 * The driver established a maximum scatter-gather segment count
854  		 * during probe that limits the number of sg elements in any
855  		 * single scsi command.  Just run through the seg_cnt and format
856  		 * the bde's.
857  		 * When using SLI-3 the driver will try to fit all the BDEs into
858  		 * the IOCB. If it can't then the BDEs get added to a BPL as it
859  		 * does for SLI-2 mode.
860  		 */
861  		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
862  			physaddr = sg_dma_address(sgel);
863  			if (phba->sli_rev == 3 &&
864  			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
865  			    !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
866  			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
867  				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
868  				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
869  				data_bde->addrLow = putPaddrLow(physaddr);
870  				data_bde->addrHigh = putPaddrHigh(physaddr);
871  				data_bde++;
872  			} else {
873  				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
874  				bpl->tus.f.bdeSize = sg_dma_len(sgel);
875  				bpl->tus.w = le32_to_cpu(bpl->tus.w);
876  				bpl->addrLow =
877  					le32_to_cpu(putPaddrLow(physaddr));
878  				bpl->addrHigh =
879  					le32_to_cpu(putPaddrHigh(physaddr));
880  				bpl++;
881  			}
882  		}
883  	}
884  
885  	/*
886  	 * Finish initializing those IOCB fields that are dependent on the
887  	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
888  	 * explicitly reinitialized and for SLI-3 the extended bde count is
889  	 * explicitly reinitialized since all iocb memory resources are reused.
890  	 */
891  	if (phba->sli_rev == 3 &&
892  	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
893  	    !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
894  		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
895  			/*
896  			 * The extended IOCB format can only fit 3 BDE or a BPL.
897  			 * This I/O has more than 3 BDE so the 1st data bde will
898  			 * be a BPL that is filled in here.
899  			 */
900  			physaddr = lpfc_cmd->dma_handle;
901  			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
902  			data_bde->tus.f.bdeSize = (num_bde *
903  						   sizeof(struct ulp_bde64));
904  			physaddr += (sizeof(struct fcp_cmnd) +
905  				     sizeof(struct fcp_rsp) +
906  				     (2 * sizeof(struct ulp_bde64)));
907  			data_bde->addrHigh = putPaddrHigh(physaddr);
908  			data_bde->addrLow = putPaddrLow(physaddr);
909  			/* ebde count includes the response bde and data bpl */
910  			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
911  		} else {
912  			/* ebde count includes the response bde and data bdes */
913  			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
914  		}
915  	} else {
916  		iocb_cmd->un.fcpi64.bdl.bdeSize =
917  			((num_bde + 2) * sizeof(struct ulp_bde64));
918  		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
919  	}
920  	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
921  
922  	/*
923  	 * Due to difference in data length between DIF/non-DIF paths,
924  	 * we need to set word 4 of IOCB here
925  	 */
926  	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
927  	lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
928  	return 0;
929  }
930  
931  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
932  
933  /* Return BG_ERR_INIT if error injection is detected by Initiator */
934  #define BG_ERR_INIT	0x1
935  /* Return BG_ERR_TGT if error injection is detected by Target */
936  #define BG_ERR_TGT	0x2
937  /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
938  #define BG_ERR_SWAP	0x10
939  /*
940   * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
941   * error injection
942   */
943  #define BG_ERR_CHECK	0x20
944  
945  /**
946   * lpfc_bg_err_inject - Determine if we should inject an error
947   * @phba: The Hba for which this call is being executed.
948   * @sc: The SCSI command to examine
949   * @reftag: (out) BlockGuard reference tag for transmitted data
950   * @apptag: (out) BlockGuard application tag for transmitted data
951   * @new_guard: (in) Value to replace CRC with if needed
952   *
953   * Returns BG_ERR_* bit mask or 0 if request ignored
954   **/
955  static int
lpfc_bg_err_inject(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint32_t * reftag,uint16_t * apptag,uint32_t new_guard)956  lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
957  		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
958  {
959  	struct scatterlist *sgpe; /* s/g prot entry */
960  	struct lpfc_io_buf *lpfc_cmd = NULL;
961  	struct scsi_dif_tuple *src = NULL;
962  	struct lpfc_nodelist *ndlp;
963  	struct lpfc_rport_data *rdata;
964  	uint32_t op = scsi_get_prot_op(sc);
965  	uint32_t blksize;
966  	uint32_t numblks;
967  	u32 lba;
968  	int rc = 0;
969  	int blockoff = 0;
970  
971  	if (op == SCSI_PROT_NORMAL)
972  		return 0;
973  
974  	sgpe = scsi_prot_sglist(sc);
975  	lba = scsi_prot_ref_tag(sc);
976  
977  	/* First check if we need to match the LBA */
978  	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
979  		blksize = scsi_prot_interval(sc);
980  		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
981  
982  		/* Make sure we have the right LBA if one is specified */
983  		if (phba->lpfc_injerr_lba < (u64)lba ||
984  		    (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
985  			return 0;
986  		if (sgpe) {
987  			blockoff = phba->lpfc_injerr_lba - (u64)lba;
988  			numblks = sg_dma_len(sgpe) /
989  				sizeof(struct scsi_dif_tuple);
990  			if (numblks < blockoff)
991  				blockoff = numblks;
992  		}
993  	}
994  
995  	/* Next check if we need to match the remote NPortID or WWPN */
996  	rdata = lpfc_rport_data_from_scsi_device(sc->device);
997  	if (rdata && rdata->pnode) {
998  		ndlp = rdata->pnode;
999  
1000  		/* Make sure we have the right NPortID if one is specified */
1001  		if (phba->lpfc_injerr_nportid  &&
1002  			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1003  			return 0;
1004  
1005  		/*
1006  		 * Make sure we have the right WWPN if one is specified.
1007  		 * wwn[0] should be a non-zero NAA in a good WWPN.
1008  		 */
1009  		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1010  			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1011  				sizeof(struct lpfc_name)) != 0))
1012  			return 0;
1013  	}
1014  
1015  	/* Setup a ptr to the protection data if the SCSI host provides it */
1016  	if (sgpe) {
1017  		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1018  		src += blockoff;
1019  		lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1020  	}
1021  
1022  	/* Should we change the Reference Tag */
1023  	if (reftag) {
1024  		if (phba->lpfc_injerr_wref_cnt) {
1025  			switch (op) {
1026  			case SCSI_PROT_WRITE_PASS:
1027  				if (src) {
1028  					/*
1029  					 * For WRITE_PASS, force the error
1030  					 * to be sent on the wire. It should
1031  					 * be detected by the Target.
1032  					 * If blockoff != 0 error will be
1033  					 * inserted in middle of the IO.
1034  					 */
1035  
1036  					lpfc_printf_log(phba, KERN_ERR,
1037  							LOG_TRACE_EVENT,
1038  					"9076 BLKGRD: Injecting reftag error: "
1039  					"write lba x%lx + x%x oldrefTag x%x\n",
1040  					(unsigned long)lba, blockoff,
1041  					be32_to_cpu(src->ref_tag));
1042  
1043  					/*
1044  					 * Save the old ref_tag so we can
1045  					 * restore it on completion.
1046  					 */
1047  					if (lpfc_cmd) {
1048  						lpfc_cmd->prot_data_type =
1049  							LPFC_INJERR_REFTAG;
1050  						lpfc_cmd->prot_data_segment =
1051  							src;
1052  						lpfc_cmd->prot_data =
1053  							src->ref_tag;
1054  					}
1055  					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1056  					phba->lpfc_injerr_wref_cnt--;
1057  					if (phba->lpfc_injerr_wref_cnt == 0) {
1058  						phba->lpfc_injerr_nportid = 0;
1059  						phba->lpfc_injerr_lba =
1060  							LPFC_INJERR_LBA_OFF;
1061  						memset(&phba->lpfc_injerr_wwpn,
1062  						  0, sizeof(struct lpfc_name));
1063  					}
1064  					rc = BG_ERR_TGT | BG_ERR_CHECK;
1065  
1066  					break;
1067  				}
1068  				fallthrough;
1069  			case SCSI_PROT_WRITE_INSERT:
1070  				/*
1071  				 * For WRITE_INSERT, force the error
1072  				 * to be sent on the wire. It should be
1073  				 * detected by the Target.
1074  				 */
1075  				/* DEADBEEF will be the reftag on the wire */
1076  				*reftag = 0xDEADBEEF;
1077  				phba->lpfc_injerr_wref_cnt--;
1078  				if (phba->lpfc_injerr_wref_cnt == 0) {
1079  					phba->lpfc_injerr_nportid = 0;
1080  					phba->lpfc_injerr_lba =
1081  					LPFC_INJERR_LBA_OFF;
1082  					memset(&phba->lpfc_injerr_wwpn,
1083  						0, sizeof(struct lpfc_name));
1084  				}
1085  				rc = BG_ERR_TGT | BG_ERR_CHECK;
1086  
1087  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1088  					"9078 BLKGRD: Injecting reftag error: "
1089  					"write lba x%lx\n", (unsigned long)lba);
1090  				break;
1091  			case SCSI_PROT_WRITE_STRIP:
1092  				/*
1093  				 * For WRITE_STRIP and WRITE_PASS,
1094  				 * force the error on data
1095  				 * being copied from SLI-Host to SLI-Port.
1096  				 */
1097  				*reftag = 0xDEADBEEF;
1098  				phba->lpfc_injerr_wref_cnt--;
1099  				if (phba->lpfc_injerr_wref_cnt == 0) {
1100  					phba->lpfc_injerr_nportid = 0;
1101  					phba->lpfc_injerr_lba =
1102  						LPFC_INJERR_LBA_OFF;
1103  					memset(&phba->lpfc_injerr_wwpn,
1104  						0, sizeof(struct lpfc_name));
1105  				}
1106  				rc = BG_ERR_INIT;
1107  
1108  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1109  					"9077 BLKGRD: Injecting reftag error: "
1110  					"write lba x%lx\n", (unsigned long)lba);
1111  				break;
1112  			}
1113  		}
1114  		if (phba->lpfc_injerr_rref_cnt) {
1115  			switch (op) {
1116  			case SCSI_PROT_READ_INSERT:
1117  			case SCSI_PROT_READ_STRIP:
1118  			case SCSI_PROT_READ_PASS:
1119  				/*
1120  				 * For READ_STRIP and READ_PASS, force the
1121  				 * error on data being read off the wire. It
1122  				 * should force an IO error to the driver.
1123  				 */
1124  				*reftag = 0xDEADBEEF;
1125  				phba->lpfc_injerr_rref_cnt--;
1126  				if (phba->lpfc_injerr_rref_cnt == 0) {
1127  					phba->lpfc_injerr_nportid = 0;
1128  					phba->lpfc_injerr_lba =
1129  						LPFC_INJERR_LBA_OFF;
1130  					memset(&phba->lpfc_injerr_wwpn,
1131  						0, sizeof(struct lpfc_name));
1132  				}
1133  				rc = BG_ERR_INIT;
1134  
1135  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1136  					"9079 BLKGRD: Injecting reftag error: "
1137  					"read lba x%lx\n", (unsigned long)lba);
1138  				break;
1139  			}
1140  		}
1141  	}
1142  
1143  	/* Should we change the Application Tag */
1144  	if (apptag) {
1145  		if (phba->lpfc_injerr_wapp_cnt) {
1146  			switch (op) {
1147  			case SCSI_PROT_WRITE_PASS:
1148  				if (src) {
1149  					/*
1150  					 * For WRITE_PASS, force the error
1151  					 * to be sent on the wire. It should
1152  					 * be detected by the Target.
1153  					 * If blockoff != 0 error will be
1154  					 * inserted in middle of the IO.
1155  					 */
1156  
1157  					lpfc_printf_log(phba, KERN_ERR,
1158  							LOG_TRACE_EVENT,
1159  					"9080 BLKGRD: Injecting apptag error: "
1160  					"write lba x%lx + x%x oldappTag x%x\n",
1161  					(unsigned long)lba, blockoff,
1162  					be16_to_cpu(src->app_tag));
1163  
1164  					/*
1165  					 * Save the old app_tag so we can
1166  					 * restore it on completion.
1167  					 */
1168  					if (lpfc_cmd) {
1169  						lpfc_cmd->prot_data_type =
1170  							LPFC_INJERR_APPTAG;
1171  						lpfc_cmd->prot_data_segment =
1172  							src;
1173  						lpfc_cmd->prot_data =
1174  							src->app_tag;
1175  					}
1176  					src->app_tag = cpu_to_be16(0xDEAD);
1177  					phba->lpfc_injerr_wapp_cnt--;
1178  					if (phba->lpfc_injerr_wapp_cnt == 0) {
1179  						phba->lpfc_injerr_nportid = 0;
1180  						phba->lpfc_injerr_lba =
1181  							LPFC_INJERR_LBA_OFF;
1182  						memset(&phba->lpfc_injerr_wwpn,
1183  						  0, sizeof(struct lpfc_name));
1184  					}
1185  					rc = BG_ERR_TGT | BG_ERR_CHECK;
1186  					break;
1187  				}
1188  				fallthrough;
1189  			case SCSI_PROT_WRITE_INSERT:
1190  				/*
1191  				 * For WRITE_INSERT, force the
1192  				 * error to be sent on the wire. It should be
1193  				 * detected by the Target.
1194  				 */
1195  				/* DEAD will be the apptag on the wire */
1196  				*apptag = 0xDEAD;
1197  				phba->lpfc_injerr_wapp_cnt--;
1198  				if (phba->lpfc_injerr_wapp_cnt == 0) {
1199  					phba->lpfc_injerr_nportid = 0;
1200  					phba->lpfc_injerr_lba =
1201  						LPFC_INJERR_LBA_OFF;
1202  					memset(&phba->lpfc_injerr_wwpn,
1203  						0, sizeof(struct lpfc_name));
1204  				}
1205  				rc = BG_ERR_TGT | BG_ERR_CHECK;
1206  
1207  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1208  					"0813 BLKGRD: Injecting apptag error: "
1209  					"write lba x%lx\n", (unsigned long)lba);
1210  				break;
1211  			case SCSI_PROT_WRITE_STRIP:
1212  				/*
1213  				 * For WRITE_STRIP and WRITE_PASS,
1214  				 * force the error on data
1215  				 * being copied from SLI-Host to SLI-Port.
1216  				 */
1217  				*apptag = 0xDEAD;
1218  				phba->lpfc_injerr_wapp_cnt--;
1219  				if (phba->lpfc_injerr_wapp_cnt == 0) {
1220  					phba->lpfc_injerr_nportid = 0;
1221  					phba->lpfc_injerr_lba =
1222  						LPFC_INJERR_LBA_OFF;
1223  					memset(&phba->lpfc_injerr_wwpn,
1224  						0, sizeof(struct lpfc_name));
1225  				}
1226  				rc = BG_ERR_INIT;
1227  
1228  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1229  					"0812 BLKGRD: Injecting apptag error: "
1230  					"write lba x%lx\n", (unsigned long)lba);
1231  				break;
1232  			}
1233  		}
1234  		if (phba->lpfc_injerr_rapp_cnt) {
1235  			switch (op) {
1236  			case SCSI_PROT_READ_INSERT:
1237  			case SCSI_PROT_READ_STRIP:
1238  			case SCSI_PROT_READ_PASS:
1239  				/*
1240  				 * For READ_STRIP and READ_PASS, force the
1241  				 * error on data being read off the wire. It
1242  				 * should force an IO error to the driver.
1243  				 */
1244  				*apptag = 0xDEAD;
1245  				phba->lpfc_injerr_rapp_cnt--;
1246  				if (phba->lpfc_injerr_rapp_cnt == 0) {
1247  					phba->lpfc_injerr_nportid = 0;
1248  					phba->lpfc_injerr_lba =
1249  						LPFC_INJERR_LBA_OFF;
1250  					memset(&phba->lpfc_injerr_wwpn,
1251  						0, sizeof(struct lpfc_name));
1252  				}
1253  				rc = BG_ERR_INIT;
1254  
1255  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1256  					"0814 BLKGRD: Injecting apptag error: "
1257  					"read lba x%lx\n", (unsigned long)lba);
1258  				break;
1259  			}
1260  		}
1261  	}
1262  
1263  
1264  	/* Should we change the Guard Tag */
1265  	if (new_guard) {
1266  		if (phba->lpfc_injerr_wgrd_cnt) {
1267  			switch (op) {
1268  			case SCSI_PROT_WRITE_PASS:
1269  				rc = BG_ERR_CHECK;
1270  				fallthrough;
1271  
1272  			case SCSI_PROT_WRITE_INSERT:
1273  				/*
1274  				 * For WRITE_INSERT, force the
1275  				 * error to be sent on the wire. It should be
1276  				 * detected by the Target.
1277  				 */
1278  				phba->lpfc_injerr_wgrd_cnt--;
1279  				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1280  					phba->lpfc_injerr_nportid = 0;
1281  					phba->lpfc_injerr_lba =
1282  						LPFC_INJERR_LBA_OFF;
1283  					memset(&phba->lpfc_injerr_wwpn,
1284  						0, sizeof(struct lpfc_name));
1285  				}
1286  
1287  				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1288  				/* Signals the caller to swap CRC->CSUM */
1289  
1290  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1291  					"0817 BLKGRD: Injecting guard error: "
1292  					"write lba x%lx\n", (unsigned long)lba);
1293  				break;
1294  			case SCSI_PROT_WRITE_STRIP:
1295  				/*
1296  				 * For WRITE_STRIP and WRITE_PASS,
1297  				 * force the error on data
1298  				 * being copied from SLI-Host to SLI-Port.
1299  				 */
1300  				phba->lpfc_injerr_wgrd_cnt--;
1301  				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1302  					phba->lpfc_injerr_nportid = 0;
1303  					phba->lpfc_injerr_lba =
1304  						LPFC_INJERR_LBA_OFF;
1305  					memset(&phba->lpfc_injerr_wwpn,
1306  						0, sizeof(struct lpfc_name));
1307  				}
1308  
1309  				rc = BG_ERR_INIT | BG_ERR_SWAP;
1310  				/* Signals the caller to swap CRC->CSUM */
1311  
1312  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1313  					"0816 BLKGRD: Injecting guard error: "
1314  					"write lba x%lx\n", (unsigned long)lba);
1315  				break;
1316  			}
1317  		}
1318  		if (phba->lpfc_injerr_rgrd_cnt) {
1319  			switch (op) {
1320  			case SCSI_PROT_READ_INSERT:
1321  			case SCSI_PROT_READ_STRIP:
1322  			case SCSI_PROT_READ_PASS:
1323  				/*
1324  				 * For READ_STRIP and READ_PASS, force the
1325  				 * error on data being read off the wire. It
1326  				 * should force an IO error to the driver.
1327  				 */
1328  				phba->lpfc_injerr_rgrd_cnt--;
1329  				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1330  					phba->lpfc_injerr_nportid = 0;
1331  					phba->lpfc_injerr_lba =
1332  						LPFC_INJERR_LBA_OFF;
1333  					memset(&phba->lpfc_injerr_wwpn,
1334  						0, sizeof(struct lpfc_name));
1335  				}
1336  
1337  				rc = BG_ERR_INIT | BG_ERR_SWAP;
1338  				/* Signals the caller to swap CRC->CSUM */
1339  
1340  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1341  					"0818 BLKGRD: Injecting guard error: "
1342  					"read lba x%lx\n", (unsigned long)lba);
1343  			}
1344  		}
1345  	}
1346  
1347  	return rc;
1348  }
1349  #endif
1350  
1351  /**
1352   * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1353   * the specified SCSI command.
1354   * @phba: The Hba for which this call is being executed.
1355   * @sc: The SCSI command to examine
1356   * @txop: (out) BlockGuard operation for transmitted data
1357   * @rxop: (out) BlockGuard operation for received data
1358   *
1359   * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1360   *
1361   **/
1362  static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1363  lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1364  		uint8_t *txop, uint8_t *rxop)
1365  {
1366  	uint8_t ret = 0;
1367  
1368  	if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1369  		switch (scsi_get_prot_op(sc)) {
1370  		case SCSI_PROT_READ_INSERT:
1371  		case SCSI_PROT_WRITE_STRIP:
1372  			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1373  			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1374  			break;
1375  
1376  		case SCSI_PROT_READ_STRIP:
1377  		case SCSI_PROT_WRITE_INSERT:
1378  			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1379  			*txop = BG_OP_IN_NODIF_OUT_CRC;
1380  			break;
1381  
1382  		case SCSI_PROT_READ_PASS:
1383  		case SCSI_PROT_WRITE_PASS:
1384  			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1385  			*txop = BG_OP_IN_CSUM_OUT_CRC;
1386  			break;
1387  
1388  		case SCSI_PROT_NORMAL:
1389  		default:
1390  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1391  				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1392  					scsi_get_prot_op(sc));
1393  			ret = 1;
1394  			break;
1395  
1396  		}
1397  	} else {
1398  		switch (scsi_get_prot_op(sc)) {
1399  		case SCSI_PROT_READ_STRIP:
1400  		case SCSI_PROT_WRITE_INSERT:
1401  			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1402  			*txop = BG_OP_IN_NODIF_OUT_CRC;
1403  			break;
1404  
1405  		case SCSI_PROT_READ_PASS:
1406  		case SCSI_PROT_WRITE_PASS:
1407  			*rxop = BG_OP_IN_CRC_OUT_CRC;
1408  			*txop = BG_OP_IN_CRC_OUT_CRC;
1409  			break;
1410  
1411  		case SCSI_PROT_READ_INSERT:
1412  		case SCSI_PROT_WRITE_STRIP:
1413  			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1414  			*txop = BG_OP_IN_CRC_OUT_NODIF;
1415  			break;
1416  
1417  		case SCSI_PROT_NORMAL:
1418  		default:
1419  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1420  				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1421  					scsi_get_prot_op(sc));
1422  			ret = 1;
1423  			break;
1424  		}
1425  	}
1426  
1427  	return ret;
1428  }
1429  
1430  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1431  /**
1432   * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1433   * the specified SCSI command in order to force a guard tag error.
1434   * @phba: The Hba for which this call is being executed.
1435   * @sc: The SCSI command to examine
1436   * @txop: (out) BlockGuard operation for transmitted data
1437   * @rxop: (out) BlockGuard operation for received data
1438   *
1439   * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1440   *
1441   **/
1442  static int
lpfc_bg_err_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1443  lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1444  		uint8_t *txop, uint8_t *rxop)
1445  {
1446  
1447  	if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1448  		switch (scsi_get_prot_op(sc)) {
1449  		case SCSI_PROT_READ_INSERT:
1450  		case SCSI_PROT_WRITE_STRIP:
1451  			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1452  			*txop = BG_OP_IN_CRC_OUT_NODIF;
1453  			break;
1454  
1455  		case SCSI_PROT_READ_STRIP:
1456  		case SCSI_PROT_WRITE_INSERT:
1457  			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1458  			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1459  			break;
1460  
1461  		case SCSI_PROT_READ_PASS:
1462  		case SCSI_PROT_WRITE_PASS:
1463  			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1464  			*txop = BG_OP_IN_CRC_OUT_CSUM;
1465  			break;
1466  
1467  		case SCSI_PROT_NORMAL:
1468  		default:
1469  			break;
1470  
1471  		}
1472  	} else {
1473  		switch (scsi_get_prot_op(sc)) {
1474  		case SCSI_PROT_READ_STRIP:
1475  		case SCSI_PROT_WRITE_INSERT:
1476  			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1477  			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1478  			break;
1479  
1480  		case SCSI_PROT_READ_PASS:
1481  		case SCSI_PROT_WRITE_PASS:
1482  			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1483  			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1484  			break;
1485  
1486  		case SCSI_PROT_READ_INSERT:
1487  		case SCSI_PROT_WRITE_STRIP:
1488  			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1489  			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1490  			break;
1491  
1492  		case SCSI_PROT_NORMAL:
1493  		default:
1494  			break;
1495  		}
1496  	}
1497  
1498  	return 0;
1499  }
1500  #endif
1501  
1502  /**
1503   * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1504   * @phba: The Hba for which this call is being executed.
1505   * @sc: pointer to scsi command we're working on
1506   * @bpl: pointer to buffer list for protection groups
1507   * @datasegcnt: number of segments of data that have been dma mapped
1508   *
1509   * This function sets up BPL buffer list for protection groups of
1510   * type LPFC_PG_TYPE_NO_DIF
1511   *
1512   * This is usually used when the HBA is instructed to generate
1513   * DIFs and insert them into data stream (or strip DIF from
1514   * incoming data stream)
1515   *
1516   * The buffer list consists of just one protection group described
1517   * below:
1518   *                                +-------------------------+
1519   *   start of prot group  -->     |          PDE_5          |
1520   *                                +-------------------------+
1521   *                                |          PDE_6          |
1522   *                                +-------------------------+
1523   *                                |         Data BDE        |
1524   *                                +-------------------------+
1525   *                                |more Data BDE's ... (opt)|
1526   *                                +-------------------------+
1527   *
1528   *
1529   * Note: Data s/g buffers have been dma mapped
1530   *
1531   * Returns the number of BDEs added to the BPL.
1532   **/
1533  static int
lpfc_bg_setup_bpl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datasegcnt)1534  lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1535  		struct ulp_bde64 *bpl, int datasegcnt)
1536  {
1537  	struct scatterlist *sgde = NULL; /* s/g data entry */
1538  	struct lpfc_pde5 *pde5 = NULL;
1539  	struct lpfc_pde6 *pde6 = NULL;
1540  	dma_addr_t physaddr;
1541  	int i = 0, num_bde = 0, status;
1542  	int datadir = sc->sc_data_direction;
1543  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1544  	uint32_t rc;
1545  #endif
1546  	uint32_t checking = 1;
1547  	uint32_t reftag;
1548  	uint8_t txop, rxop;
1549  
1550  	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1551  	if (status)
1552  		goto out;
1553  
1554  	/* extract some info from the scsi command for pde*/
1555  	reftag = scsi_prot_ref_tag(sc);
1556  
1557  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1558  	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1559  	if (rc) {
1560  		if (rc & BG_ERR_SWAP)
1561  			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1562  		if (rc & BG_ERR_CHECK)
1563  			checking = 0;
1564  	}
1565  #endif
1566  
1567  	/* setup PDE5 with what we have */
1568  	pde5 = (struct lpfc_pde5 *) bpl;
1569  	memset(pde5, 0, sizeof(struct lpfc_pde5));
1570  	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1571  
1572  	/* Endianness conversion if necessary for PDE5 */
1573  	pde5->word0 = cpu_to_le32(pde5->word0);
1574  	pde5->reftag = cpu_to_le32(reftag);
1575  
1576  	/* advance bpl and increment bde count */
1577  	num_bde++;
1578  	bpl++;
1579  	pde6 = (struct lpfc_pde6 *) bpl;
1580  
1581  	/* setup PDE6 with the rest of the info */
1582  	memset(pde6, 0, sizeof(struct lpfc_pde6));
1583  	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1584  	bf_set(pde6_optx, pde6, txop);
1585  	bf_set(pde6_oprx, pde6, rxop);
1586  
1587  	/*
1588  	 * We only need to check the data on READs, for WRITEs
1589  	 * protection data is automatically generated, not checked.
1590  	 */
1591  	if (datadir == DMA_FROM_DEVICE) {
1592  		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1593  			bf_set(pde6_ce, pde6, checking);
1594  		else
1595  			bf_set(pde6_ce, pde6, 0);
1596  
1597  		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1598  			bf_set(pde6_re, pde6, checking);
1599  		else
1600  			bf_set(pde6_re, pde6, 0);
1601  	}
1602  	bf_set(pde6_ai, pde6, 1);
1603  	bf_set(pde6_ae, pde6, 0);
1604  	bf_set(pde6_apptagval, pde6, 0);
1605  
1606  	/* Endianness conversion if necessary for PDE6 */
1607  	pde6->word0 = cpu_to_le32(pde6->word0);
1608  	pde6->word1 = cpu_to_le32(pde6->word1);
1609  	pde6->word2 = cpu_to_le32(pde6->word2);
1610  
1611  	/* advance bpl and increment bde count */
1612  	num_bde++;
1613  	bpl++;
1614  
1615  	/* assumption: caller has already run dma_map_sg on command data */
1616  	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1617  		physaddr = sg_dma_address(sgde);
1618  		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1619  		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1620  		bpl->tus.f.bdeSize = sg_dma_len(sgde);
1621  		if (datadir == DMA_TO_DEVICE)
1622  			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1623  		else
1624  			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1625  		bpl->tus.w = le32_to_cpu(bpl->tus.w);
1626  		bpl++;
1627  		num_bde++;
1628  	}
1629  
1630  out:
1631  	return num_bde;
1632  }
1633  
1634  /**
1635   * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1636   * @phba: The Hba for which this call is being executed.
1637   * @sc: pointer to scsi command we're working on
1638   * @bpl: pointer to buffer list for protection groups
1639   * @datacnt: number of segments of data that have been dma mapped
1640   * @protcnt: number of segment of protection data that have been dma mapped
1641   *
1642   * This function sets up BPL buffer list for protection groups of
1643   * type LPFC_PG_TYPE_DIF
1644   *
1645   * This is usually used when DIFs are in their own buffers,
1646   * separate from the data. The HBA can then by instructed
1647   * to place the DIFs in the outgoing stream.  For read operations,
1648   * The HBA could extract the DIFs and place it in DIF buffers.
1649   *
1650   * The buffer list for this type consists of one or more of the
1651   * protection groups described below:
1652   *                                    +-------------------------+
1653   *   start of first prot group  -->   |          PDE_5          |
1654   *                                    +-------------------------+
1655   *                                    |          PDE_6          |
1656   *                                    +-------------------------+
1657   *                                    |      PDE_7 (Prot BDE)   |
1658   *                                    +-------------------------+
1659   *                                    |        Data BDE         |
1660   *                                    +-------------------------+
1661   *                                    |more Data BDE's ... (opt)|
1662   *                                    +-------------------------+
1663   *   start of new  prot group  -->    |          PDE_5          |
1664   *                                    +-------------------------+
1665   *                                    |          ...            |
1666   *                                    +-------------------------+
1667   *
1668   * Note: It is assumed that both data and protection s/g buffers have been
1669   *       mapped for DMA
1670   *
1671   * Returns the number of BDEs added to the BPL.
1672   **/
1673  static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datacnt,int protcnt)1674  lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1675  		struct ulp_bde64 *bpl, int datacnt, int protcnt)
1676  {
1677  	struct scatterlist *sgde = NULL; /* s/g data entry */
1678  	struct scatterlist *sgpe = NULL; /* s/g prot entry */
1679  	struct lpfc_pde5 *pde5 = NULL;
1680  	struct lpfc_pde6 *pde6 = NULL;
1681  	struct lpfc_pde7 *pde7 = NULL;
1682  	dma_addr_t dataphysaddr, protphysaddr;
1683  	unsigned short curr_prot = 0;
1684  	unsigned int split_offset;
1685  	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1686  	unsigned int protgrp_blks, protgrp_bytes;
1687  	unsigned int remainder, subtotal;
1688  	int status;
1689  	int datadir = sc->sc_data_direction;
1690  	unsigned char pgdone = 0, alldone = 0;
1691  	unsigned blksize;
1692  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1693  	uint32_t rc;
1694  #endif
1695  	uint32_t checking = 1;
1696  	uint32_t reftag;
1697  	uint8_t txop, rxop;
1698  	int num_bde = 0;
1699  
1700  	sgpe = scsi_prot_sglist(sc);
1701  	sgde = scsi_sglist(sc);
1702  
1703  	if (!sgpe || !sgde) {
1704  		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1705  				"9020 Invalid s/g entry: data=x%px prot=x%px\n",
1706  				sgpe, sgde);
1707  		return 0;
1708  	}
1709  
1710  	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1711  	if (status)
1712  		goto out;
1713  
1714  	/* extract some info from the scsi command */
1715  	blksize = scsi_prot_interval(sc);
1716  	reftag = scsi_prot_ref_tag(sc);
1717  
1718  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1719  	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1720  	if (rc) {
1721  		if (rc & BG_ERR_SWAP)
1722  			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1723  		if (rc & BG_ERR_CHECK)
1724  			checking = 0;
1725  	}
1726  #endif
1727  
1728  	split_offset = 0;
1729  	do {
1730  		/* Check to see if we ran out of space */
1731  		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1732  			return num_bde + 3;
1733  
1734  		/* setup PDE5 with what we have */
1735  		pde5 = (struct lpfc_pde5 *) bpl;
1736  		memset(pde5, 0, sizeof(struct lpfc_pde5));
1737  		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1738  
1739  		/* Endianness conversion if necessary for PDE5 */
1740  		pde5->word0 = cpu_to_le32(pde5->word0);
1741  		pde5->reftag = cpu_to_le32(reftag);
1742  
1743  		/* advance bpl and increment bde count */
1744  		num_bde++;
1745  		bpl++;
1746  		pde6 = (struct lpfc_pde6 *) bpl;
1747  
1748  		/* setup PDE6 with the rest of the info */
1749  		memset(pde6, 0, sizeof(struct lpfc_pde6));
1750  		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1751  		bf_set(pde6_optx, pde6, txop);
1752  		bf_set(pde6_oprx, pde6, rxop);
1753  
1754  		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1755  			bf_set(pde6_ce, pde6, checking);
1756  		else
1757  			bf_set(pde6_ce, pde6, 0);
1758  
1759  		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1760  			bf_set(pde6_re, pde6, checking);
1761  		else
1762  			bf_set(pde6_re, pde6, 0);
1763  
1764  		bf_set(pde6_ai, pde6, 1);
1765  		bf_set(pde6_ae, pde6, 0);
1766  		bf_set(pde6_apptagval, pde6, 0);
1767  
1768  		/* Endianness conversion if necessary for PDE6 */
1769  		pde6->word0 = cpu_to_le32(pde6->word0);
1770  		pde6->word1 = cpu_to_le32(pde6->word1);
1771  		pde6->word2 = cpu_to_le32(pde6->word2);
1772  
1773  		/* advance bpl and increment bde count */
1774  		num_bde++;
1775  		bpl++;
1776  
1777  		/* setup the first BDE that points to protection buffer */
1778  		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1779  		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1780  
1781  		/* must be integer multiple of the DIF block length */
1782  		BUG_ON(protgroup_len % 8);
1783  
1784  		pde7 = (struct lpfc_pde7 *) bpl;
1785  		memset(pde7, 0, sizeof(struct lpfc_pde7));
1786  		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1787  
1788  		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1789  		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1790  
1791  		protgrp_blks = protgroup_len / 8;
1792  		protgrp_bytes = protgrp_blks * blksize;
1793  
1794  		/* check if this pde is crossing the 4K boundary; if so split */
1795  		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1796  			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1797  			protgroup_offset += protgroup_remainder;
1798  			protgrp_blks = protgroup_remainder / 8;
1799  			protgrp_bytes = protgrp_blks * blksize;
1800  		} else {
1801  			protgroup_offset = 0;
1802  			curr_prot++;
1803  		}
1804  
1805  		num_bde++;
1806  
1807  		/* setup BDE's for data blocks associated with DIF data */
1808  		pgdone = 0;
1809  		subtotal = 0; /* total bytes processed for current prot grp */
1810  		while (!pgdone) {
1811  			/* Check to see if we ran out of space */
1812  			if (num_bde >= phba->cfg_total_seg_cnt)
1813  				return num_bde + 1;
1814  
1815  			if (!sgde) {
1816  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1817  					"9065 BLKGRD:%s Invalid data segment\n",
1818  						__func__);
1819  				return 0;
1820  			}
1821  			bpl++;
1822  			dataphysaddr = sg_dma_address(sgde) + split_offset;
1823  			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1824  			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1825  
1826  			remainder = sg_dma_len(sgde) - split_offset;
1827  
1828  			if ((subtotal + remainder) <= protgrp_bytes) {
1829  				/* we can use this whole buffer */
1830  				bpl->tus.f.bdeSize = remainder;
1831  				split_offset = 0;
1832  
1833  				if ((subtotal + remainder) == protgrp_bytes)
1834  					pgdone = 1;
1835  			} else {
1836  				/* must split this buffer with next prot grp */
1837  				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1838  				split_offset += bpl->tus.f.bdeSize;
1839  			}
1840  
1841  			subtotal += bpl->tus.f.bdeSize;
1842  
1843  			if (datadir == DMA_TO_DEVICE)
1844  				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1845  			else
1846  				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1847  			bpl->tus.w = le32_to_cpu(bpl->tus.w);
1848  
1849  			num_bde++;
1850  
1851  			if (split_offset)
1852  				break;
1853  
1854  			/* Move to the next s/g segment if possible */
1855  			sgde = sg_next(sgde);
1856  
1857  		}
1858  
1859  		if (protgroup_offset) {
1860  			/* update the reference tag */
1861  			reftag += protgrp_blks;
1862  			bpl++;
1863  			continue;
1864  		}
1865  
1866  		/* are we done ? */
1867  		if (curr_prot == protcnt) {
1868  			alldone = 1;
1869  		} else if (curr_prot < protcnt) {
1870  			/* advance to next prot buffer */
1871  			sgpe = sg_next(sgpe);
1872  			bpl++;
1873  
1874  			/* update the reference tag */
1875  			reftag += protgrp_blks;
1876  		} else {
1877  			/* if we're here, we have a bug */
1878  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1879  					"9054 BLKGRD: bug in %s\n", __func__);
1880  		}
1881  
1882  	} while (!alldone);
1883  out:
1884  
1885  	return num_bde;
1886  }
1887  
1888  /**
1889   * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1890   * @phba: The Hba for which this call is being executed.
1891   * @sc: pointer to scsi command we're working on
1892   * @sgl: pointer to buffer list for protection groups
1893   * @datasegcnt: number of segments of data that have been dma mapped
1894   * @lpfc_cmd: lpfc scsi command object pointer.
1895   *
1896   * This function sets up SGL buffer list for protection groups of
1897   * type LPFC_PG_TYPE_NO_DIF
1898   *
1899   * This is usually used when the HBA is instructed to generate
1900   * DIFs and insert them into data stream (or strip DIF from
1901   * incoming data stream)
1902   *
1903   * The buffer list consists of just one protection group described
1904   * below:
1905   *                                +-------------------------+
1906   *   start of prot group  -->     |         DI_SEED         |
1907   *                                +-------------------------+
1908   *                                |         Data SGE        |
1909   *                                +-------------------------+
1910   *                                |more Data SGE's ... (opt)|
1911   *                                +-------------------------+
1912   *
1913   *
1914   * Note: Data s/g buffers have been dma mapped
1915   *
1916   * Returns the number of SGEs added to the SGL.
1917   **/
1918  static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datasegcnt,struct lpfc_io_buf * lpfc_cmd)1919  lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1920  		struct sli4_sge *sgl, int datasegcnt,
1921  		struct lpfc_io_buf *lpfc_cmd)
1922  {
1923  	struct scatterlist *sgde = NULL; /* s/g data entry */
1924  	struct sli4_sge_diseed *diseed = NULL;
1925  	dma_addr_t physaddr;
1926  	int i = 0, status;
1927  	uint32_t reftag, num_sge = 0;
1928  	uint8_t txop, rxop;
1929  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1930  	uint32_t rc;
1931  #endif
1932  	uint32_t checking = 1;
1933  	uint32_t dma_len;
1934  	uint32_t dma_offset = 0;
1935  	struct sli4_hybrid_sgl *sgl_xtra = NULL;
1936  	int j;
1937  	bool lsp_just_set = false;
1938  
1939  	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1940  	if (status)
1941  		goto out;
1942  
1943  	/* extract some info from the scsi command for pde*/
1944  	reftag = scsi_prot_ref_tag(sc);
1945  
1946  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1947  	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1948  	if (rc) {
1949  		if (rc & BG_ERR_SWAP)
1950  			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1951  		if (rc & BG_ERR_CHECK)
1952  			checking = 0;
1953  	}
1954  #endif
1955  
1956  	/* setup DISEED with what we have */
1957  	diseed = (struct sli4_sge_diseed *) sgl;
1958  	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1959  	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1960  
1961  	/* Endianness conversion if necessary */
1962  	diseed->ref_tag = cpu_to_le32(reftag);
1963  	diseed->ref_tag_tran = diseed->ref_tag;
1964  
1965  	/*
1966  	 * We only need to check the data on READs, for WRITEs
1967  	 * protection data is automatically generated, not checked.
1968  	 */
1969  	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1970  		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1971  			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
1972  		else
1973  			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
1974  
1975  		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1976  			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
1977  		else
1978  			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
1979  	}
1980  
1981  	/* setup DISEED with the rest of the info */
1982  	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
1983  	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
1984  
1985  	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
1986  	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
1987  
1988  	/* Endianness conversion if necessary for DISEED */
1989  	diseed->word2 = cpu_to_le32(diseed->word2);
1990  	diseed->word3 = cpu_to_le32(diseed->word3);
1991  
1992  	/* advance bpl and increment sge count */
1993  	num_sge++;
1994  	sgl++;
1995  
1996  	/* assumption: caller has already run dma_map_sg on command data */
1997  	sgde = scsi_sglist(sc);
1998  	j = 3;
1999  	for (i = 0; i < datasegcnt; i++) {
2000  		/* clear it */
2001  		sgl->word2 = 0;
2002  
2003  		/* do we need to expand the segment */
2004  		if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2005  		    ((datasegcnt - 1) != i)) {
2006  			/* set LSP type */
2007  			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2008  
2009  			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2010  
2011  			if (unlikely(!sgl_xtra)) {
2012  				lpfc_cmd->seg_cnt = 0;
2013  				return 0;
2014  			}
2015  			sgl->addr_lo = cpu_to_le32(putPaddrLow(
2016  						sgl_xtra->dma_phys_sgl));
2017  			sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2018  						sgl_xtra->dma_phys_sgl));
2019  
2020  		} else {
2021  			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2022  		}
2023  
2024  		if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2025  			if ((datasegcnt - 1) == i)
2026  				bf_set(lpfc_sli4_sge_last, sgl, 1);
2027  			physaddr = sg_dma_address(sgde);
2028  			dma_len = sg_dma_len(sgde);
2029  			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2030  			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2031  
2032  			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2033  			sgl->word2 = cpu_to_le32(sgl->word2);
2034  			sgl->sge_len = cpu_to_le32(dma_len);
2035  
2036  			dma_offset += dma_len;
2037  			sgde = sg_next(sgde);
2038  
2039  			sgl++;
2040  			num_sge++;
2041  			lsp_just_set = false;
2042  
2043  		} else {
2044  			sgl->word2 = cpu_to_le32(sgl->word2);
2045  			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2046  
2047  			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2048  			i = i - 1;
2049  
2050  			lsp_just_set = true;
2051  		}
2052  
2053  		j++;
2054  
2055  	}
2056  
2057  out:
2058  	return num_sge;
2059  }
2060  
2061  /**
2062   * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2063   * @phba: The Hba for which this call is being executed.
2064   * @sc: pointer to scsi command we're working on
2065   * @sgl: pointer to buffer list for protection groups
2066   * @datacnt: number of segments of data that have been dma mapped
2067   * @protcnt: number of segment of protection data that have been dma mapped
2068   * @lpfc_cmd: lpfc scsi command object pointer.
2069   *
2070   * This function sets up SGL buffer list for protection groups of
2071   * type LPFC_PG_TYPE_DIF
2072   *
2073   * This is usually used when DIFs are in their own buffers,
2074   * separate from the data. The HBA can then by instructed
2075   * to place the DIFs in the outgoing stream.  For read operations,
2076   * The HBA could extract the DIFs and place it in DIF buffers.
2077   *
2078   * The buffer list for this type consists of one or more of the
2079   * protection groups described below:
2080   *                                    +-------------------------+
2081   *   start of first prot group  -->   |         DISEED          |
2082   *                                    +-------------------------+
2083   *                                    |      DIF (Prot SGE)     |
2084   *                                    +-------------------------+
2085   *                                    |        Data SGE         |
2086   *                                    +-------------------------+
2087   *                                    |more Data SGE's ... (opt)|
2088   *                                    +-------------------------+
2089   *   start of new  prot group  -->    |         DISEED          |
2090   *                                    +-------------------------+
2091   *                                    |          ...            |
2092   *                                    +-------------------------+
2093   *
2094   * Note: It is assumed that both data and protection s/g buffers have been
2095   *       mapped for DMA
2096   *
2097   * Returns the number of SGEs added to the SGL.
2098   **/
2099  static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datacnt,int protcnt,struct lpfc_io_buf * lpfc_cmd)2100  lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2101  		struct sli4_sge *sgl, int datacnt, int protcnt,
2102  		struct lpfc_io_buf *lpfc_cmd)
2103  {
2104  	struct scatterlist *sgde = NULL; /* s/g data entry */
2105  	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2106  	struct sli4_sge_diseed *diseed = NULL;
2107  	dma_addr_t dataphysaddr, protphysaddr;
2108  	unsigned short curr_prot = 0;
2109  	unsigned int split_offset;
2110  	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2111  	unsigned int protgrp_blks, protgrp_bytes;
2112  	unsigned int remainder, subtotal;
2113  	int status;
2114  	unsigned char pgdone = 0, alldone = 0;
2115  	unsigned blksize;
2116  	uint32_t reftag;
2117  	uint8_t txop, rxop;
2118  	uint32_t dma_len;
2119  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2120  	uint32_t rc;
2121  #endif
2122  	uint32_t checking = 1;
2123  	uint32_t dma_offset = 0, num_sge = 0;
2124  	int j = 2;
2125  	struct sli4_hybrid_sgl *sgl_xtra = NULL;
2126  
2127  	sgpe = scsi_prot_sglist(sc);
2128  	sgde = scsi_sglist(sc);
2129  
2130  	if (!sgpe || !sgde) {
2131  		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2132  				"9082 Invalid s/g entry: data=x%px prot=x%px\n",
2133  				sgpe, sgde);
2134  		return 0;
2135  	}
2136  
2137  	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2138  	if (status)
2139  		goto out;
2140  
2141  	/* extract some info from the scsi command */
2142  	blksize = scsi_prot_interval(sc);
2143  	reftag = scsi_prot_ref_tag(sc);
2144  
2145  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2146  	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2147  	if (rc) {
2148  		if (rc & BG_ERR_SWAP)
2149  			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2150  		if (rc & BG_ERR_CHECK)
2151  			checking = 0;
2152  	}
2153  #endif
2154  
2155  	split_offset = 0;
2156  	do {
2157  		/* Check to see if we ran out of space */
2158  		if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2159  		    !(phba->cfg_xpsgl))
2160  			return num_sge + 3;
2161  
2162  		/* DISEED and DIF have to be together */
2163  		if (!((j + 1) % phba->border_sge_num) ||
2164  		    !((j + 2) % phba->border_sge_num) ||
2165  		    !((j + 3) % phba->border_sge_num)) {
2166  			sgl->word2 = 0;
2167  
2168  			/* set LSP type */
2169  			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2170  
2171  			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2172  
2173  			if (unlikely(!sgl_xtra)) {
2174  				goto out;
2175  			} else {
2176  				sgl->addr_lo = cpu_to_le32(putPaddrLow(
2177  						sgl_xtra->dma_phys_sgl));
2178  				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2179  						       sgl_xtra->dma_phys_sgl));
2180  			}
2181  
2182  			sgl->word2 = cpu_to_le32(sgl->word2);
2183  			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2184  
2185  			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2186  			j = 0;
2187  		}
2188  
2189  		/* setup DISEED with what we have */
2190  		diseed = (struct sli4_sge_diseed *) sgl;
2191  		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2192  		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2193  
2194  		/* Endianness conversion if necessary */
2195  		diseed->ref_tag = cpu_to_le32(reftag);
2196  		diseed->ref_tag_tran = diseed->ref_tag;
2197  
2198  		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
2199  			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2200  		} else {
2201  			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2202  			/*
2203  			 * When in this mode, the hardware will replace
2204  			 * the guard tag from the host with a
2205  			 * newly generated good CRC for the wire.
2206  			 * Switch to raw mode here to avoid this
2207  			 * behavior. What the host sends gets put on the wire.
2208  			 */
2209  			if (txop == BG_OP_IN_CRC_OUT_CRC) {
2210  				txop = BG_OP_RAW_MODE;
2211  				rxop = BG_OP_RAW_MODE;
2212  			}
2213  		}
2214  
2215  
2216  		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2217  			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2218  		else
2219  			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2220  
2221  		/* setup DISEED with the rest of the info */
2222  		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2223  		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2224  
2225  		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2226  		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2227  
2228  		/* Endianness conversion if necessary for DISEED */
2229  		diseed->word2 = cpu_to_le32(diseed->word2);
2230  		diseed->word3 = cpu_to_le32(diseed->word3);
2231  
2232  		/* advance sgl and increment bde count */
2233  		num_sge++;
2234  
2235  		sgl++;
2236  		j++;
2237  
2238  		/* setup the first BDE that points to protection buffer */
2239  		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2240  		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2241  
2242  		/* must be integer multiple of the DIF block length */
2243  		BUG_ON(protgroup_len % 8);
2244  
2245  		/* Now setup DIF SGE */
2246  		sgl->word2 = 0;
2247  		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2248  		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2249  		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2250  		sgl->word2 = cpu_to_le32(sgl->word2);
2251  		sgl->sge_len = 0;
2252  
2253  		protgrp_blks = protgroup_len / 8;
2254  		protgrp_bytes = protgrp_blks * blksize;
2255  
2256  		/* check if DIF SGE is crossing the 4K boundary; if so split */
2257  		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2258  			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2259  			protgroup_offset += protgroup_remainder;
2260  			protgrp_blks = protgroup_remainder / 8;
2261  			protgrp_bytes = protgrp_blks * blksize;
2262  		} else {
2263  			protgroup_offset = 0;
2264  			curr_prot++;
2265  		}
2266  
2267  		num_sge++;
2268  
2269  		/* setup SGE's for data blocks associated with DIF data */
2270  		pgdone = 0;
2271  		subtotal = 0; /* total bytes processed for current prot grp */
2272  
2273  		sgl++;
2274  		j++;
2275  
2276  		while (!pgdone) {
2277  			/* Check to see if we ran out of space */
2278  			if ((num_sge >= phba->cfg_total_seg_cnt) &&
2279  			    !phba->cfg_xpsgl)
2280  				return num_sge + 1;
2281  
2282  			if (!sgde) {
2283  				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2284  					"9086 BLKGRD:%s Invalid data segment\n",
2285  						__func__);
2286  				return 0;
2287  			}
2288  
2289  			if (!((j + 1) % phba->border_sge_num)) {
2290  				sgl->word2 = 0;
2291  
2292  				/* set LSP type */
2293  				bf_set(lpfc_sli4_sge_type, sgl,
2294  				       LPFC_SGE_TYPE_LSP);
2295  
2296  				sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2297  								 lpfc_cmd);
2298  
2299  				if (unlikely(!sgl_xtra)) {
2300  					goto out;
2301  				} else {
2302  					sgl->addr_lo = cpu_to_le32(
2303  					  putPaddrLow(sgl_xtra->dma_phys_sgl));
2304  					sgl->addr_hi = cpu_to_le32(
2305  					  putPaddrHigh(sgl_xtra->dma_phys_sgl));
2306  				}
2307  
2308  				sgl->word2 = cpu_to_le32(sgl->word2);
2309  				sgl->sge_len = cpu_to_le32(
2310  						     phba->cfg_sg_dma_buf_size);
2311  
2312  				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2313  			} else {
2314  				dataphysaddr = sg_dma_address(sgde) +
2315  								   split_offset;
2316  
2317  				remainder = sg_dma_len(sgde) - split_offset;
2318  
2319  				if ((subtotal + remainder) <= protgrp_bytes) {
2320  					/* we can use this whole buffer */
2321  					dma_len = remainder;
2322  					split_offset = 0;
2323  
2324  					if ((subtotal + remainder) ==
2325  								  protgrp_bytes)
2326  						pgdone = 1;
2327  				} else {
2328  					/* must split this buffer with next
2329  					 * prot grp
2330  					 */
2331  					dma_len = protgrp_bytes - subtotal;
2332  					split_offset += dma_len;
2333  				}
2334  
2335  				subtotal += dma_len;
2336  
2337  				sgl->word2 = 0;
2338  				sgl->addr_lo = cpu_to_le32(putPaddrLow(
2339  								 dataphysaddr));
2340  				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2341  								 dataphysaddr));
2342  				bf_set(lpfc_sli4_sge_last, sgl, 0);
2343  				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2344  				bf_set(lpfc_sli4_sge_type, sgl,
2345  				       LPFC_SGE_TYPE_DATA);
2346  
2347  				sgl->sge_len = cpu_to_le32(dma_len);
2348  				dma_offset += dma_len;
2349  
2350  				num_sge++;
2351  
2352  				if (split_offset) {
2353  					sgl++;
2354  					j++;
2355  					break;
2356  				}
2357  
2358  				/* Move to the next s/g segment if possible */
2359  				sgde = sg_next(sgde);
2360  
2361  				sgl++;
2362  			}
2363  
2364  			j++;
2365  		}
2366  
2367  		if (protgroup_offset) {
2368  			/* update the reference tag */
2369  			reftag += protgrp_blks;
2370  			continue;
2371  		}
2372  
2373  		/* are we done ? */
2374  		if (curr_prot == protcnt) {
2375  			/* mark the last SGL */
2376  			sgl--;
2377  			bf_set(lpfc_sli4_sge_last, sgl, 1);
2378  			alldone = 1;
2379  		} else if (curr_prot < protcnt) {
2380  			/* advance to next prot buffer */
2381  			sgpe = sg_next(sgpe);
2382  
2383  			/* update the reference tag */
2384  			reftag += protgrp_blks;
2385  		} else {
2386  			/* if we're here, we have a bug */
2387  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2388  					"9085 BLKGRD: bug in %s\n", __func__);
2389  		}
2390  
2391  	} while (!alldone);
2392  
2393  out:
2394  
2395  	return num_sge;
2396  }
2397  
2398  /**
2399   * lpfc_prot_group_type - Get prtotection group type of SCSI command
2400   * @phba: The Hba for which this call is being executed.
2401   * @sc: pointer to scsi command we're working on
2402   *
2403   * Given a SCSI command that supports DIF, determine composition of protection
2404   * groups involved in setting up buffer lists
2405   *
2406   * Returns: Protection group type (with or without DIF)
2407   *
2408   **/
2409  static int
lpfc_prot_group_type(struct lpfc_hba * phba,struct scsi_cmnd * sc)2410  lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2411  {
2412  	int ret = LPFC_PG_TYPE_INVALID;
2413  	unsigned char op = scsi_get_prot_op(sc);
2414  
2415  	switch (op) {
2416  	case SCSI_PROT_READ_STRIP:
2417  	case SCSI_PROT_WRITE_INSERT:
2418  		ret = LPFC_PG_TYPE_NO_DIF;
2419  		break;
2420  	case SCSI_PROT_READ_INSERT:
2421  	case SCSI_PROT_WRITE_STRIP:
2422  	case SCSI_PROT_READ_PASS:
2423  	case SCSI_PROT_WRITE_PASS:
2424  		ret = LPFC_PG_TYPE_DIF_BUF;
2425  		break;
2426  	default:
2427  		if (phba)
2428  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2429  					"9021 Unsupported protection op:%d\n",
2430  					op);
2431  		break;
2432  	}
2433  	return ret;
2434  }
2435  
2436  /**
2437   * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2438   * @phba: The Hba for which this call is being executed.
2439   * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2440   *
2441   * Adjust the data length to account for how much data
2442   * is actually on the wire.
2443   *
2444   * returns the adjusted data length
2445   **/
2446  static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2447  lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2448  		       struct lpfc_io_buf *lpfc_cmd)
2449  {
2450  	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2451  	int fcpdl;
2452  
2453  	fcpdl = scsi_bufflen(sc);
2454  
2455  	/* Check if there is protection data on the wire */
2456  	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2457  		/* Read check for protection data */
2458  		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2459  			return fcpdl;
2460  
2461  	} else {
2462  		/* Write check for protection data */
2463  		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2464  			return fcpdl;
2465  	}
2466  
2467  	/*
2468  	 * If we are in DIF Type 1 mode every data block has a 8 byte
2469  	 * DIF (trailer) attached to it. Must ajust FCP data length
2470  	 * to account for the protection data.
2471  	 */
2472  	fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
2473  
2474  	return fcpdl;
2475  }
2476  
2477  /**
2478   * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2479   * @phba: The Hba for which this call is being executed.
2480   * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2481   *
2482   * This is the protection/DIF aware version of
2483   * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2484   * two functions eventually, but for now, it's here.
2485   * RETURNS 0 - SUCCESS,
2486   *         1 - Failed DMA map, retry.
2487   *         2 - Invalid scsi cmd or prot-type. Do not rety.
2488   **/
2489  static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2490  lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2491  		struct lpfc_io_buf *lpfc_cmd)
2492  {
2493  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2494  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2495  	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2496  	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2497  	uint32_t num_bde = 0;
2498  	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2499  	int prot_group_type = 0;
2500  	int fcpdl;
2501  	int ret = 1;
2502  	struct lpfc_vport *vport = phba->pport;
2503  
2504  	/*
2505  	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2506  	 *  fcp_rsp regions to the first data bde entry
2507  	 */
2508  	bpl += 2;
2509  	if (scsi_sg_count(scsi_cmnd)) {
2510  		/*
2511  		 * The driver stores the segment count returned from dma_map_sg
2512  		 * because this a count of dma-mappings used to map the use_sg
2513  		 * pages.  They are not guaranteed to be the same for those
2514  		 * architectures that implement an IOMMU.
2515  		 */
2516  		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2517  					scsi_sglist(scsi_cmnd),
2518  					scsi_sg_count(scsi_cmnd), datadir);
2519  		if (unlikely(!datasegcnt))
2520  			return 1;
2521  
2522  		lpfc_cmd->seg_cnt = datasegcnt;
2523  
2524  		/* First check if data segment count from SCSI Layer is good */
2525  		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2526  			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2527  			ret = 2;
2528  			goto err;
2529  		}
2530  
2531  		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2532  
2533  		switch (prot_group_type) {
2534  		case LPFC_PG_TYPE_NO_DIF:
2535  
2536  			/* Here we need to add a PDE5 and PDE6 to the count */
2537  			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2538  				ret = 2;
2539  				goto err;
2540  			}
2541  
2542  			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2543  					datasegcnt);
2544  			/* we should have 2 or more entries in buffer list */
2545  			if (num_bde < 2) {
2546  				ret = 2;
2547  				goto err;
2548  			}
2549  			break;
2550  
2551  		case LPFC_PG_TYPE_DIF_BUF:
2552  			/*
2553  			 * This type indicates that protection buffers are
2554  			 * passed to the driver, so that needs to be prepared
2555  			 * for DMA
2556  			 */
2557  			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2558  					scsi_prot_sglist(scsi_cmnd),
2559  					scsi_prot_sg_count(scsi_cmnd), datadir);
2560  			if (unlikely(!protsegcnt)) {
2561  				scsi_dma_unmap(scsi_cmnd);
2562  				return 1;
2563  			}
2564  
2565  			lpfc_cmd->prot_seg_cnt = protsegcnt;
2566  
2567  			/*
2568  			 * There is a minimun of 4 BPLs used for every
2569  			 * protection data segment.
2570  			 */
2571  			if ((lpfc_cmd->prot_seg_cnt * 4) >
2572  			    (phba->cfg_total_seg_cnt - 2)) {
2573  				ret = 2;
2574  				goto err;
2575  			}
2576  
2577  			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2578  					datasegcnt, protsegcnt);
2579  			/* we should have 3 or more entries in buffer list */
2580  			if ((num_bde < 3) ||
2581  			    (num_bde > phba->cfg_total_seg_cnt)) {
2582  				ret = 2;
2583  				goto err;
2584  			}
2585  			break;
2586  
2587  		case LPFC_PG_TYPE_INVALID:
2588  		default:
2589  			scsi_dma_unmap(scsi_cmnd);
2590  			lpfc_cmd->seg_cnt = 0;
2591  
2592  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2593  					"9022 Unexpected protection group %i\n",
2594  					prot_group_type);
2595  			return 2;
2596  		}
2597  	}
2598  
2599  	/*
2600  	 * Finish initializing those IOCB fields that are dependent on the
2601  	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2602  	 * reinitialized since all iocb memory resources are used many times
2603  	 * for transmit, receive, and continuation bpl's.
2604  	 */
2605  	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2606  	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2607  	iocb_cmd->ulpBdeCount = 1;
2608  	iocb_cmd->ulpLe = 1;
2609  
2610  	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2611  	fcp_cmnd->fcpDl = cpu_to_be32(fcpdl);
2612  
2613  	/*
2614  	 * Due to difference in data length between DIF/non-DIF paths,
2615  	 * we need to set word 4 of IOCB here
2616  	 */
2617  	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2618  
2619  	/*
2620  	 * For First burst, we may need to adjust the initial transfer
2621  	 * length for DIF
2622  	 */
2623  	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2624  	    (fcpdl < vport->cfg_first_burst_size))
2625  		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2626  
2627  	return 0;
2628  err:
2629  	if (lpfc_cmd->seg_cnt)
2630  		scsi_dma_unmap(scsi_cmnd);
2631  	if (lpfc_cmd->prot_seg_cnt)
2632  		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2633  			     scsi_prot_sg_count(scsi_cmnd),
2634  			     scsi_cmnd->sc_data_direction);
2635  
2636  	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2637  			"9023 Cannot setup S/G List for HBA"
2638  			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2639  			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2640  			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2641  			prot_group_type, num_bde);
2642  
2643  	lpfc_cmd->seg_cnt = 0;
2644  	lpfc_cmd->prot_seg_cnt = 0;
2645  	return ret;
2646  }
2647  
2648  /*
2649   * This function calcuates the T10 DIF guard tag
2650   * on the specified data using a CRC algorithmn
2651   * using crc_t10dif.
2652   */
2653  static uint16_t
lpfc_bg_crc(uint8_t * data,int count)2654  lpfc_bg_crc(uint8_t *data, int count)
2655  {
2656  	uint16_t crc = 0;
2657  	uint16_t x;
2658  
2659  	crc = crc_t10dif(data, count);
2660  	x = cpu_to_be16(crc);
2661  	return x;
2662  }
2663  
2664  /*
2665   * This function calcuates the T10 DIF guard tag
2666   * on the specified data using a CSUM algorithmn
2667   * using ip_compute_csum.
2668   */
2669  static uint16_t
lpfc_bg_csum(uint8_t * data,int count)2670  lpfc_bg_csum(uint8_t *data, int count)
2671  {
2672  	uint16_t ret;
2673  
2674  	ret = ip_compute_csum(data, count);
2675  	return ret;
2676  }
2677  
2678  /*
2679   * This function examines the protection data to try to determine
2680   * what type of T10-DIF error occurred.
2681   */
2682  static void
lpfc_calc_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2683  lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2684  {
2685  	struct scatterlist *sgpe; /* s/g prot entry */
2686  	struct scatterlist *sgde; /* s/g data entry */
2687  	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2688  	struct scsi_dif_tuple *src = NULL;
2689  	uint8_t *data_src = NULL;
2690  	uint16_t guard_tag;
2691  	uint16_t start_app_tag, app_tag;
2692  	uint32_t start_ref_tag, ref_tag;
2693  	int prot, protsegcnt;
2694  	int err_type, len, data_len;
2695  	int chk_ref, chk_app, chk_guard;
2696  	uint16_t sum;
2697  	unsigned blksize;
2698  
2699  	err_type = BGS_GUARD_ERR_MASK;
2700  	sum = 0;
2701  	guard_tag = 0;
2702  
2703  	/* First check to see if there is protection data to examine */
2704  	prot = scsi_get_prot_op(cmd);
2705  	if ((prot == SCSI_PROT_READ_STRIP) ||
2706  	    (prot == SCSI_PROT_WRITE_INSERT) ||
2707  	    (prot == SCSI_PROT_NORMAL))
2708  		goto out;
2709  
2710  	/* Currently the driver just supports ref_tag and guard_tag checking */
2711  	chk_ref = 1;
2712  	chk_app = 0;
2713  	chk_guard = 0;
2714  
2715  	/* Setup a ptr to the protection data provided by the SCSI host */
2716  	sgpe = scsi_prot_sglist(cmd);
2717  	protsegcnt = lpfc_cmd->prot_seg_cnt;
2718  
2719  	if (sgpe && protsegcnt) {
2720  
2721  		/*
2722  		 * We will only try to verify guard tag if the segment
2723  		 * data length is a multiple of the blksize.
2724  		 */
2725  		sgde = scsi_sglist(cmd);
2726  		blksize = scsi_prot_interval(cmd);
2727  		data_src = (uint8_t *)sg_virt(sgde);
2728  		data_len = sg_dma_len(sgde);
2729  		if ((data_len & (blksize - 1)) == 0)
2730  			chk_guard = 1;
2731  
2732  		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2733  		start_ref_tag = scsi_prot_ref_tag(cmd);
2734  		start_app_tag = src->app_tag;
2735  		len = sg_dma_len(sgpe);
2736  		while (src && protsegcnt) {
2737  			while (len) {
2738  
2739  				/*
2740  				 * First check to see if a protection data
2741  				 * check is valid
2742  				 */
2743  				if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2744  				    (src->app_tag == T10_PI_APP_ESCAPE)) {
2745  					start_ref_tag++;
2746  					goto skipit;
2747  				}
2748  
2749  				/* First Guard Tag checking */
2750  				if (chk_guard) {
2751  					guard_tag = src->guard_tag;
2752  					if (cmd->prot_flags
2753  					    & SCSI_PROT_IP_CHECKSUM)
2754  						sum = lpfc_bg_csum(data_src,
2755  								   blksize);
2756  					else
2757  						sum = lpfc_bg_crc(data_src,
2758  								  blksize);
2759  					if ((guard_tag != sum)) {
2760  						err_type = BGS_GUARD_ERR_MASK;
2761  						goto out;
2762  					}
2763  				}
2764  
2765  				/* Reference Tag checking */
2766  				ref_tag = be32_to_cpu(src->ref_tag);
2767  				if (chk_ref && (ref_tag != start_ref_tag)) {
2768  					err_type = BGS_REFTAG_ERR_MASK;
2769  					goto out;
2770  				}
2771  				start_ref_tag++;
2772  
2773  				/* App Tag checking */
2774  				app_tag = src->app_tag;
2775  				if (chk_app && (app_tag != start_app_tag)) {
2776  					err_type = BGS_APPTAG_ERR_MASK;
2777  					goto out;
2778  				}
2779  skipit:
2780  				len -= sizeof(struct scsi_dif_tuple);
2781  				if (len < 0)
2782  					len = 0;
2783  				src++;
2784  
2785  				data_src += blksize;
2786  				data_len -= blksize;
2787  
2788  				/*
2789  				 * Are we at the end of the Data segment?
2790  				 * The data segment is only used for Guard
2791  				 * tag checking.
2792  				 */
2793  				if (chk_guard && (data_len == 0)) {
2794  					chk_guard = 0;
2795  					sgde = sg_next(sgde);
2796  					if (!sgde)
2797  						goto out;
2798  
2799  					data_src = (uint8_t *)sg_virt(sgde);
2800  					data_len = sg_dma_len(sgde);
2801  					if ((data_len & (blksize - 1)) == 0)
2802  						chk_guard = 1;
2803  				}
2804  			}
2805  
2806  			/* Goto the next Protection data segment */
2807  			sgpe = sg_next(sgpe);
2808  			if (sgpe) {
2809  				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2810  				len = sg_dma_len(sgpe);
2811  			} else {
2812  				src = NULL;
2813  			}
2814  			protsegcnt--;
2815  		}
2816  	}
2817  out:
2818  	if (err_type == BGS_GUARD_ERR_MASK) {
2819  		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2820  		set_host_byte(cmd, DID_ABORT);
2821  		phba->bg_guard_err_cnt++;
2822  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2823  				"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2824  				scsi_prot_ref_tag(cmd),
2825  				sum, guard_tag);
2826  
2827  	} else if (err_type == BGS_REFTAG_ERR_MASK) {
2828  		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2829  		set_host_byte(cmd, DID_ABORT);
2830  
2831  		phba->bg_reftag_err_cnt++;
2832  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2833  				"9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2834  				scsi_prot_ref_tag(cmd),
2835  				ref_tag, start_ref_tag);
2836  
2837  	} else if (err_type == BGS_APPTAG_ERR_MASK) {
2838  		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2839  		set_host_byte(cmd, DID_ABORT);
2840  
2841  		phba->bg_apptag_err_cnt++;
2842  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2843  				"9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2844  				scsi_prot_ref_tag(cmd),
2845  				app_tag, start_app_tag);
2846  	}
2847  }
2848  
2849  /*
2850   * This function checks for BlockGuard errors detected by
2851   * the HBA.  In case of errors, the ASC/ASCQ fields in the
2852   * sense buffer will be set accordingly, paired with
2853   * ILLEGAL_REQUEST to signal to the kernel that the HBA
2854   * detected corruption.
2855   *
2856   * Returns:
2857   *  0 - No error found
2858   *  1 - BlockGuard error found
2859   * -1 - Internal error (bad profile, ...etc)
2860   */
2861  static int
lpfc_parse_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * pIocbOut)2862  lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2863  		  struct lpfc_iocbq *pIocbOut)
2864  {
2865  	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2866  	struct sli3_bg_fields *bgf;
2867  	int ret = 0;
2868  	struct lpfc_wcqe_complete *wcqe;
2869  	u32 status;
2870  	u32 bghm = 0;
2871  	u32 bgstat = 0;
2872  	u64 failing_sector = 0;
2873  
2874  	if (phba->sli_rev == LPFC_SLI_REV4) {
2875  		wcqe = &pIocbOut->wcqe_cmpl;
2876  		status = bf_get(lpfc_wcqe_c_status, wcqe);
2877  
2878  		if (status == CQE_STATUS_DI_ERROR) {
2879  			/* Guard Check failed */
2880  			if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2881  				bgstat |= BGS_GUARD_ERR_MASK;
2882  
2883  			/* AppTag Check failed */
2884  			if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2885  				bgstat |= BGS_APPTAG_ERR_MASK;
2886  
2887  			/* RefTag Check failed */
2888  			if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2889  				bgstat |= BGS_REFTAG_ERR_MASK;
2890  
2891  			/* Check to see if there was any good data before the
2892  			 * error
2893  			 */
2894  			if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2895  				bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2896  				bghm = wcqe->total_data_placed;
2897  			}
2898  
2899  			/*
2900  			 * Set ALL the error bits to indicate we don't know what
2901  			 * type of error it is.
2902  			 */
2903  			if (!bgstat)
2904  				bgstat |= (BGS_REFTAG_ERR_MASK |
2905  					   BGS_APPTAG_ERR_MASK |
2906  					   BGS_GUARD_ERR_MASK);
2907  		}
2908  
2909  	} else {
2910  		bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2911  		bghm = bgf->bghm;
2912  		bgstat = bgf->bgstat;
2913  	}
2914  
2915  	if (lpfc_bgs_get_invalid_prof(bgstat)) {
2916  		cmd->result = DID_ERROR << 16;
2917  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2918  				"9072 BLKGRD: Invalid BG Profile in cmd "
2919  				"0x%x reftag 0x%x blk cnt 0x%x "
2920  				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2921  				scsi_prot_ref_tag(cmd),
2922  				scsi_logical_block_count(cmd), bgstat, bghm);
2923  		ret = (-1);
2924  		goto out;
2925  	}
2926  
2927  	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2928  		cmd->result = DID_ERROR << 16;
2929  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2930  				"9073 BLKGRD: Invalid BG PDIF Block in cmd "
2931  				"0x%x reftag 0x%x blk cnt 0x%x "
2932  				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2933  				scsi_prot_ref_tag(cmd),
2934  				scsi_logical_block_count(cmd), bgstat, bghm);
2935  		ret = (-1);
2936  		goto out;
2937  	}
2938  
2939  	if (lpfc_bgs_get_guard_err(bgstat)) {
2940  		ret = 1;
2941  		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2942  		set_host_byte(cmd, DID_ABORT);
2943  		phba->bg_guard_err_cnt++;
2944  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2945  				"9055 BLKGRD: Guard Tag error in cmd "
2946  				"0x%x reftag 0x%x blk cnt 0x%x "
2947  				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2948  				scsi_prot_ref_tag(cmd),
2949  				scsi_logical_block_count(cmd), bgstat, bghm);
2950  	}
2951  
2952  	if (lpfc_bgs_get_reftag_err(bgstat)) {
2953  		ret = 1;
2954  		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2955  		set_host_byte(cmd, DID_ABORT);
2956  		phba->bg_reftag_err_cnt++;
2957  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2958  				"9056 BLKGRD: Ref Tag error in cmd "
2959  				"0x%x reftag 0x%x blk cnt 0x%x "
2960  				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2961  				scsi_prot_ref_tag(cmd),
2962  				scsi_logical_block_count(cmd), bgstat, bghm);
2963  	}
2964  
2965  	if (lpfc_bgs_get_apptag_err(bgstat)) {
2966  		ret = 1;
2967  		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2968  		set_host_byte(cmd, DID_ABORT);
2969  		phba->bg_apptag_err_cnt++;
2970  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2971  				"9061 BLKGRD: App Tag error in cmd "
2972  				"0x%x reftag 0x%x blk cnt 0x%x "
2973  				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2974  				scsi_prot_ref_tag(cmd),
2975  				scsi_logical_block_count(cmd), bgstat, bghm);
2976  	}
2977  
2978  	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2979  		/*
2980  		 * setup sense data descriptor 0 per SPC-4 as an information
2981  		 * field, and put the failing LBA in it.
2982  		 * This code assumes there was also a guard/app/ref tag error
2983  		 * indication.
2984  		 */
2985  		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2986  		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2987  		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
2988  		cmd->sense_buffer[10] = 0x80; /* Validity bit */
2989  
2990  		/* bghm is a "on the wire" FC frame based count */
2991  		switch (scsi_get_prot_op(cmd)) {
2992  		case SCSI_PROT_READ_INSERT:
2993  		case SCSI_PROT_WRITE_STRIP:
2994  			bghm /= cmd->device->sector_size;
2995  			break;
2996  		case SCSI_PROT_READ_STRIP:
2997  		case SCSI_PROT_WRITE_INSERT:
2998  		case SCSI_PROT_READ_PASS:
2999  		case SCSI_PROT_WRITE_PASS:
3000  			bghm /= (cmd->device->sector_size +
3001  				sizeof(struct scsi_dif_tuple));
3002  			break;
3003  		}
3004  
3005  		failing_sector = scsi_get_lba(cmd);
3006  		failing_sector += bghm;
3007  
3008  		/* Descriptor Information */
3009  		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3010  	}
3011  
3012  	if (!ret) {
3013  		/* No error was reported - problem in FW? */
3014  		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3015  				"9057 BLKGRD: Unknown error in cmd "
3016  				"0x%x reftag 0x%x blk cnt 0x%x "
3017  				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3018  				scsi_prot_ref_tag(cmd),
3019  				scsi_logical_block_count(cmd), bgstat, bghm);
3020  
3021  		/* Calculate what type of error it was */
3022  		lpfc_calc_bg_err(phba, lpfc_cmd);
3023  	}
3024  out:
3025  	return ret;
3026  }
3027  
3028  /**
3029   * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3030   * @phba: The Hba for which this call is being executed.
3031   * @lpfc_cmd: The scsi buffer which is going to be mapped.
3032   *
3033   * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3034   * field of @lpfc_cmd for device with SLI-4 interface spec.
3035   *
3036   * Return codes:
3037   *	2 - Error - Do not retry
3038   *	1 - Error - Retry
3039   *	0 - Success
3040   **/
3041  static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3042  lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3043  {
3044  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3045  	struct scatterlist *sgel = NULL;
3046  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3047  	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3048  	struct sli4_sge *first_data_sgl;
3049  	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3050  	struct lpfc_vport *vport = phba->pport;
3051  	union lpfc_wqe128 *wqe = &pwqeq->wqe;
3052  	dma_addr_t physaddr;
3053  	uint32_t dma_len;
3054  	uint32_t dma_offset = 0;
3055  	int nseg, i, j;
3056  	struct ulp_bde64 *bde;
3057  	bool lsp_just_set = false;
3058  	struct sli4_hybrid_sgl *sgl_xtra = NULL;
3059  
3060  	/*
3061  	 * There are three possibilities here - use scatter-gather segment, use
3062  	 * the single mapping, or neither.  Start the lpfc command prep by
3063  	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3064  	 * data bde entry.
3065  	 */
3066  	if (scsi_sg_count(scsi_cmnd)) {
3067  		/*
3068  		 * The driver stores the segment count returned from dma_map_sg
3069  		 * because this a count of dma-mappings used to map the use_sg
3070  		 * pages.  They are not guaranteed to be the same for those
3071  		 * architectures that implement an IOMMU.
3072  		 */
3073  
3074  		nseg = scsi_dma_map(scsi_cmnd);
3075  		if (unlikely(nseg <= 0))
3076  			return 1;
3077  		sgl += 1;
3078  		/* clear the last flag in the fcp_rsp map entry */
3079  		sgl->word2 = le32_to_cpu(sgl->word2);
3080  		bf_set(lpfc_sli4_sge_last, sgl, 0);
3081  		sgl->word2 = cpu_to_le32(sgl->word2);
3082  		sgl += 1;
3083  		first_data_sgl = sgl;
3084  		lpfc_cmd->seg_cnt = nseg;
3085  		if (!phba->cfg_xpsgl &&
3086  		    lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3087  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3088  					"9074 BLKGRD:"
3089  					" %s: Too many sg segments from "
3090  					"dma_map_sg.  Config %d, seg_cnt %d\n",
3091  					__func__, phba->cfg_sg_seg_cnt,
3092  					lpfc_cmd->seg_cnt);
3093  			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3094  			lpfc_cmd->seg_cnt = 0;
3095  			scsi_dma_unmap(scsi_cmnd);
3096  			return 2;
3097  		}
3098  
3099  		/*
3100  		 * The driver established a maximum scatter-gather segment count
3101  		 * during probe that limits the number of sg elements in any
3102  		 * single scsi command.  Just run through the seg_cnt and format
3103  		 * the sge's.
3104  		 * When using SLI-3 the driver will try to fit all the BDEs into
3105  		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3106  		 * does for SLI-2 mode.
3107  		 */
3108  
3109  		/* for tracking segment boundaries */
3110  		sgel = scsi_sglist(scsi_cmnd);
3111  		j = 2;
3112  		for (i = 0; i < nseg; i++) {
3113  			sgl->word2 = 0;
3114  			if (nseg == 1) {
3115  				bf_set(lpfc_sli4_sge_last, sgl, 1);
3116  				bf_set(lpfc_sli4_sge_type, sgl,
3117  				       LPFC_SGE_TYPE_DATA);
3118  			} else {
3119  				bf_set(lpfc_sli4_sge_last, sgl, 0);
3120  
3121  				/* do we need to expand the segment */
3122  				if (!lsp_just_set &&
3123  				    !((j + 1) % phba->border_sge_num) &&
3124  				    ((nseg - 1) != i)) {
3125  					/* set LSP type */
3126  					bf_set(lpfc_sli4_sge_type, sgl,
3127  					       LPFC_SGE_TYPE_LSP);
3128  
3129  					sgl_xtra = lpfc_get_sgl_per_hdwq(
3130  							phba, lpfc_cmd);
3131  
3132  					if (unlikely(!sgl_xtra)) {
3133  						lpfc_cmd->seg_cnt = 0;
3134  						scsi_dma_unmap(scsi_cmnd);
3135  						return 1;
3136  					}
3137  					sgl->addr_lo = cpu_to_le32(putPaddrLow(
3138  						       sgl_xtra->dma_phys_sgl));
3139  					sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3140  						       sgl_xtra->dma_phys_sgl));
3141  
3142  				} else {
3143  					bf_set(lpfc_sli4_sge_type, sgl,
3144  					       LPFC_SGE_TYPE_DATA);
3145  				}
3146  			}
3147  
3148  			if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3149  				     LPFC_SGE_TYPE_LSP)) {
3150  				if ((nseg - 1) == i)
3151  					bf_set(lpfc_sli4_sge_last, sgl, 1);
3152  
3153  				physaddr = sg_dma_address(sgel);
3154  				dma_len = sg_dma_len(sgel);
3155  				sgl->addr_lo = cpu_to_le32(putPaddrLow(
3156  							   physaddr));
3157  				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3158  							   physaddr));
3159  
3160  				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3161  				sgl->word2 = cpu_to_le32(sgl->word2);
3162  				sgl->sge_len = cpu_to_le32(dma_len);
3163  
3164  				dma_offset += dma_len;
3165  				sgel = sg_next(sgel);
3166  
3167  				sgl++;
3168  				lsp_just_set = false;
3169  
3170  			} else {
3171  				sgl->word2 = cpu_to_le32(sgl->word2);
3172  				sgl->sge_len = cpu_to_le32(
3173  						     phba->cfg_sg_dma_buf_size);
3174  
3175  				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3176  				i = i - 1;
3177  
3178  				lsp_just_set = true;
3179  			}
3180  
3181  			j++;
3182  		}
3183  
3184  		/* PBDE support for first data SGE only.
3185  		 * For FCoE, we key off Performance Hints.
3186  		 * For FC, we key off lpfc_enable_pbde.
3187  		 */
3188  		if (nseg == 1 &&
3189  		    ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3190  		     phba->cfg_enable_pbde)) {
3191  			/* Words 13-15 */
3192  			bde = (struct ulp_bde64 *)
3193  				&wqe->words[13];
3194  			bde->addrLow = first_data_sgl->addr_lo;
3195  			bde->addrHigh = first_data_sgl->addr_hi;
3196  			bde->tus.f.bdeSize =
3197  					le32_to_cpu(first_data_sgl->sge_len);
3198  			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3199  			bde->tus.w = cpu_to_le32(bde->tus.w);
3200  
3201  			/* Word 11 - set PBDE bit */
3202  			bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3203  		} else {
3204  			memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3205  			/* Word 11 - PBDE bit disabled by default template */
3206  		}
3207  	} else {
3208  		sgl += 1;
3209  		/* set the last flag in the fcp_rsp map entry */
3210  		sgl->word2 = le32_to_cpu(sgl->word2);
3211  		bf_set(lpfc_sli4_sge_last, sgl, 1);
3212  		sgl->word2 = cpu_to_le32(sgl->word2);
3213  
3214  		if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3215  		    phba->cfg_enable_pbde) {
3216  			bde = (struct ulp_bde64 *)
3217  				&wqe->words[13];
3218  			memset(bde, 0, (sizeof(uint32_t) * 3));
3219  		}
3220  	}
3221  
3222  	/*
3223  	 * Finish initializing those IOCB fields that are dependent on the
3224  	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3225  	 * explicitly reinitialized.
3226  	 * all iocb memory resources are reused.
3227  	 */
3228  	if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN)
3229  		((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl =
3230  				cpu_to_be32(scsi_bufflen(scsi_cmnd));
3231  	else
3232  		fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3233  	/* Set first-burst provided it was successfully negotiated */
3234  	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
3235  	    vport->cfg_first_burst_size &&
3236  	    scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3237  		u32 init_len, total_len;
3238  
3239  		total_len = scsi_bufflen(scsi_cmnd);
3240  		init_len = min(total_len, vport->cfg_first_burst_size);
3241  
3242  		/* Word 4 & 5 */
3243  		wqe->fcp_iwrite.initial_xfer_len = init_len;
3244  		wqe->fcp_iwrite.total_xfer_len = total_len;
3245  	} else {
3246  		/* Word 4 */
3247  		wqe->fcp_iwrite.total_xfer_len =
3248  			be32_to_cpu(fcp_cmnd->fcpDl);
3249  	}
3250  
3251  	/*
3252  	 * If the OAS driver feature is enabled and the lun is enabled for
3253  	 * OAS, set the oas iocb related flags.
3254  	 */
3255  	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3256  		scsi_cmnd->device->hostdata)->oas_enabled) {
3257  		lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3258  		lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3259  			scsi_cmnd->device->hostdata)->priority;
3260  
3261  		/* Word 10 */
3262  		bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3263  		bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3264  
3265  		if (lpfc_cmd->cur_iocbq.priority)
3266  			bf_set(wqe_ccp, &wqe->generic.wqe_com,
3267  			       (lpfc_cmd->cur_iocbq.priority << 1));
3268  		else
3269  			bf_set(wqe_ccp, &wqe->generic.wqe_com,
3270  			       (phba->cfg_XLanePriority << 1));
3271  	}
3272  
3273  	return 0;
3274  }
3275  
3276  /**
3277   * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3278   * @phba: The Hba for which this call is being executed.
3279   * @lpfc_cmd: The scsi buffer which is going to be mapped.
3280   *
3281   * This is the protection/DIF aware version of
3282   * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3283   * two functions eventually, but for now, it's here
3284   * Return codes:
3285   *	2 - Error - Do not retry
3286   *	1 - Error - Retry
3287   *	0 - Success
3288   **/
3289  static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3290  lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3291  		struct lpfc_io_buf *lpfc_cmd)
3292  {
3293  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3294  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3295  	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3296  	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3297  	union lpfc_wqe128 *wqe = &pwqeq->wqe;
3298  	uint32_t num_sge = 0;
3299  	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3300  	int prot_group_type = 0;
3301  	int fcpdl;
3302  	int ret = 1;
3303  	struct lpfc_vport *vport = phba->pport;
3304  
3305  	/*
3306  	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3307  	 *  fcp_rsp regions to the first data sge entry
3308  	 */
3309  	if (scsi_sg_count(scsi_cmnd)) {
3310  		/*
3311  		 * The driver stores the segment count returned from dma_map_sg
3312  		 * because this a count of dma-mappings used to map the use_sg
3313  		 * pages.  They are not guaranteed to be the same for those
3314  		 * architectures that implement an IOMMU.
3315  		 */
3316  		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3317  					scsi_sglist(scsi_cmnd),
3318  					scsi_sg_count(scsi_cmnd), datadir);
3319  		if (unlikely(!datasegcnt))
3320  			return 1;
3321  
3322  		sgl += 1;
3323  		/* clear the last flag in the fcp_rsp map entry */
3324  		sgl->word2 = le32_to_cpu(sgl->word2);
3325  		bf_set(lpfc_sli4_sge_last, sgl, 0);
3326  		sgl->word2 = cpu_to_le32(sgl->word2);
3327  
3328  		sgl += 1;
3329  		lpfc_cmd->seg_cnt = datasegcnt;
3330  
3331  		/* First check if data segment count from SCSI Layer is good */
3332  		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3333  		    !phba->cfg_xpsgl) {
3334  			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3335  			ret = 2;
3336  			goto err;
3337  		}
3338  
3339  		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3340  
3341  		switch (prot_group_type) {
3342  		case LPFC_PG_TYPE_NO_DIF:
3343  			/* Here we need to add a DISEED to the count */
3344  			if (((lpfc_cmd->seg_cnt + 1) >
3345  					phba->cfg_total_seg_cnt) &&
3346  			    !phba->cfg_xpsgl) {
3347  				ret = 2;
3348  				goto err;
3349  			}
3350  
3351  			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3352  					datasegcnt, lpfc_cmd);
3353  
3354  			/* we should have 2 or more entries in buffer list */
3355  			if (num_sge < 2) {
3356  				ret = 2;
3357  				goto err;
3358  			}
3359  			break;
3360  
3361  		case LPFC_PG_TYPE_DIF_BUF:
3362  			/*
3363  			 * This type indicates that protection buffers are
3364  			 * passed to the driver, so that needs to be prepared
3365  			 * for DMA
3366  			 */
3367  			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3368  					scsi_prot_sglist(scsi_cmnd),
3369  					scsi_prot_sg_count(scsi_cmnd), datadir);
3370  			if (unlikely(!protsegcnt)) {
3371  				scsi_dma_unmap(scsi_cmnd);
3372  				return 1;
3373  			}
3374  
3375  			lpfc_cmd->prot_seg_cnt = protsegcnt;
3376  			/*
3377  			 * There is a minimun of 3 SGEs used for every
3378  			 * protection data segment.
3379  			 */
3380  			if (((lpfc_cmd->prot_seg_cnt * 3) >
3381  					(phba->cfg_total_seg_cnt - 2)) &&
3382  			    !phba->cfg_xpsgl) {
3383  				ret = 2;
3384  				goto err;
3385  			}
3386  
3387  			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3388  					datasegcnt, protsegcnt, lpfc_cmd);
3389  
3390  			/* we should have 3 or more entries in buffer list */
3391  			if (num_sge < 3 ||
3392  			    (num_sge > phba->cfg_total_seg_cnt &&
3393  			     !phba->cfg_xpsgl)) {
3394  				ret = 2;
3395  				goto err;
3396  			}
3397  			break;
3398  
3399  		case LPFC_PG_TYPE_INVALID:
3400  		default:
3401  			scsi_dma_unmap(scsi_cmnd);
3402  			lpfc_cmd->seg_cnt = 0;
3403  
3404  			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3405  					"9083 Unexpected protection group %i\n",
3406  					prot_group_type);
3407  			return 2;
3408  		}
3409  	}
3410  
3411  	switch (scsi_get_prot_op(scsi_cmnd)) {
3412  	case SCSI_PROT_WRITE_STRIP:
3413  	case SCSI_PROT_READ_STRIP:
3414  		lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
3415  		break;
3416  	case SCSI_PROT_WRITE_INSERT:
3417  	case SCSI_PROT_READ_INSERT:
3418  		lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
3419  		break;
3420  	case SCSI_PROT_WRITE_PASS:
3421  	case SCSI_PROT_READ_PASS:
3422  		lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
3423  		break;
3424  	}
3425  
3426  	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3427  	if (lpfc_cmd->pCmd->cmd_len > LPFC_FCP_CDB_LEN)
3428  		((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = cpu_to_be32(fcpdl);
3429  	else
3430  		fcp_cmnd->fcpDl = cpu_to_be32(fcpdl);
3431  
3432  	/* Set first-burst provided it was successfully negotiated */
3433  	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
3434  	    vport->cfg_first_burst_size &&
3435  	    scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3436  		u32 init_len, total_len;
3437  
3438  		total_len = fcpdl;
3439  		init_len = min(total_len, vport->cfg_first_burst_size);
3440  
3441  		/* Word 4 & 5 */
3442  		wqe->fcp_iwrite.initial_xfer_len = init_len;
3443  		wqe->fcp_iwrite.total_xfer_len = total_len;
3444  	} else {
3445  		/* Word 4 */
3446  		wqe->fcp_iwrite.total_xfer_len = fcpdl;
3447  	}
3448  
3449  	/*
3450  	 * If the OAS driver feature is enabled and the lun is enabled for
3451  	 * OAS, set the oas iocb related flags.
3452  	 */
3453  	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3454  		scsi_cmnd->device->hostdata)->oas_enabled) {
3455  		lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3456  
3457  		/* Word 10 */
3458  		bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3459  		bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3460  		bf_set(wqe_ccp, &wqe->generic.wqe_com,
3461  		       (phba->cfg_XLanePriority << 1));
3462  	}
3463  
3464  	/* Word 7. DIF Flags */
3465  	if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
3466  		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3467  	else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
3468  		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3469  	else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
3470  		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3471  
3472  	lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
3473  				 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3474  
3475  	return 0;
3476  err:
3477  	if (lpfc_cmd->seg_cnt)
3478  		scsi_dma_unmap(scsi_cmnd);
3479  	if (lpfc_cmd->prot_seg_cnt)
3480  		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3481  			     scsi_prot_sg_count(scsi_cmnd),
3482  			     scsi_cmnd->sc_data_direction);
3483  
3484  	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3485  			"9084 Cannot setup S/G List for HBA "
3486  			"IO segs %d/%d SGL %d SCSI %d: %d %d %d\n",
3487  			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3488  			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3489  			prot_group_type, num_sge, ret);
3490  
3491  	lpfc_cmd->seg_cnt = 0;
3492  	lpfc_cmd->prot_seg_cnt = 0;
3493  	return ret;
3494  }
3495  
3496  /**
3497   * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3498   * @phba: The Hba for which this call is being executed.
3499   * @lpfc_cmd: The scsi buffer which is going to be mapped.
3500   *
3501   * This routine wraps the actual DMA mapping function pointer from the
3502   * lpfc_hba struct.
3503   *
3504   * Return codes:
3505   *	1 - Error
3506   *	0 - Success
3507   **/
3508  static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3509  lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3510  {
3511  	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3512  }
3513  
3514  /**
3515   * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3516   * using BlockGuard.
3517   * @phba: The Hba for which this call is being executed.
3518   * @lpfc_cmd: The scsi buffer which is going to be mapped.
3519   *
3520   * This routine wraps the actual DMA mapping function pointer from the
3521   * lpfc_hba struct.
3522   *
3523   * Return codes:
3524   *	1 - Error
3525   *	0 - Success
3526   **/
3527  static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3528  lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3529  {
3530  	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3531  }
3532  
3533  /**
3534   * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3535   * buffer
3536   * @vport: Pointer to vport object.
3537   * @lpfc_cmd: The scsi buffer which is going to be mapped.
3538   * @tmo: Timeout value for IO
3539   *
3540   * This routine initializes IOCB/WQE data structure from scsi command
3541   *
3542   * Return codes:
3543   *	1 - Error
3544   *	0 - Success
3545   **/
3546  static inline int
lpfc_scsi_prep_cmnd_buf(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)3547  lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3548  			uint8_t tmo)
3549  {
3550  	return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3551  }
3552  
3553  /**
3554   * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3555   * @phba: Pointer to hba context object.
3556   * @vport: Pointer to vport object.
3557   * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3558   * @fcpi_parm: FCP Initiator parameter.
3559   *
3560   * This function posts an event when there is a SCSI command reporting
3561   * error from the scsi device.
3562   **/
3563  static void
lpfc_send_scsi_error_event(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint32_t fcpi_parm)3564  lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3565  		struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3566  	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3567  	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3568  	uint32_t resp_info = fcprsp->rspStatus2;
3569  	uint32_t scsi_status = fcprsp->rspStatus3;
3570  	struct lpfc_fast_path_event *fast_path_evt = NULL;
3571  	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3572  	unsigned long flags;
3573  
3574  	if (!pnode)
3575  		return;
3576  
3577  	/* If there is queuefull or busy condition send a scsi event */
3578  	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3579  		(cmnd->result == SAM_STAT_BUSY)) {
3580  		fast_path_evt = lpfc_alloc_fast_evt(phba);
3581  		if (!fast_path_evt)
3582  			return;
3583  		fast_path_evt->un.scsi_evt.event_type =
3584  			FC_REG_SCSI_EVENT;
3585  		fast_path_evt->un.scsi_evt.subcategory =
3586  		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3587  		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3588  		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3589  		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3590  			&pnode->nlp_portname, sizeof(struct lpfc_name));
3591  		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3592  			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3593  	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3594  		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3595  		fast_path_evt = lpfc_alloc_fast_evt(phba);
3596  		if (!fast_path_evt)
3597  			return;
3598  		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3599  			FC_REG_SCSI_EVENT;
3600  		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3601  			LPFC_EVENT_CHECK_COND;
3602  		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3603  			cmnd->device->lun;
3604  		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3605  			&pnode->nlp_portname, sizeof(struct lpfc_name));
3606  		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3607  			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3608  		fast_path_evt->un.check_cond_evt.sense_key =
3609  			cmnd->sense_buffer[2] & 0xf;
3610  		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3611  		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3612  	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3613  		     fcpi_parm &&
3614  		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3615  			((scsi_status == SAM_STAT_GOOD) &&
3616  			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3617  		/*
3618  		 * If status is good or resid does not match with fcp_param and
3619  		 * there is valid fcpi_parm, then there is a read_check error
3620  		 */
3621  		fast_path_evt = lpfc_alloc_fast_evt(phba);
3622  		if (!fast_path_evt)
3623  			return;
3624  		fast_path_evt->un.read_check_error.header.event_type =
3625  			FC_REG_FABRIC_EVENT;
3626  		fast_path_evt->un.read_check_error.header.subcategory =
3627  			LPFC_EVENT_FCPRDCHKERR;
3628  		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3629  			&pnode->nlp_portname, sizeof(struct lpfc_name));
3630  		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3631  			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3632  		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3633  		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3634  		fast_path_evt->un.read_check_error.fcpiparam =
3635  			fcpi_parm;
3636  	} else
3637  		return;
3638  
3639  	fast_path_evt->vport = vport;
3640  	spin_lock_irqsave(&phba->hbalock, flags);
3641  	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3642  	spin_unlock_irqrestore(&phba->hbalock, flags);
3643  	lpfc_worker_wake_up(phba);
3644  	return;
3645  }
3646  
3647  /**
3648   * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3649   * @phba: The HBA for which this call is being executed.
3650   * @psb: The scsi buffer which is going to be un-mapped.
3651   *
3652   * This routine does DMA un-mapping of scatter gather list of scsi command
3653   * field of @lpfc_cmd for device with SLI-3 interface spec.
3654   **/
3655  static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)3656  lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3657  {
3658  	/*
3659  	 * There are only two special cases to consider.  (1) the scsi command
3660  	 * requested scatter-gather usage or (2) the scsi command allocated
3661  	 * a request buffer, but did not request use_sg.  There is a third
3662  	 * case, but it does not require resource deallocation.
3663  	 */
3664  	if (psb->seg_cnt > 0)
3665  		scsi_dma_unmap(psb->pCmd);
3666  	if (psb->prot_seg_cnt > 0)
3667  		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3668  				scsi_prot_sg_count(psb->pCmd),
3669  				psb->pCmd->sc_data_direction);
3670  }
3671  
3672  /**
3673   * lpfc_unblock_requests - allow further commands to be queued.
3674   * @phba: pointer to phba object
3675   *
3676   * For single vport, just call scsi_unblock_requests on physical port.
3677   * For multiple vports, send scsi_unblock_requests for all the vports.
3678   */
3679  void
lpfc_unblock_requests(struct lpfc_hba * phba)3680  lpfc_unblock_requests(struct lpfc_hba *phba)
3681  {
3682  	struct lpfc_vport **vports;
3683  	struct Scsi_Host  *shost;
3684  	int i;
3685  
3686  	if (phba->sli_rev == LPFC_SLI_REV4 &&
3687  	    !phba->sli4_hba.max_cfg_param.vpi_used) {
3688  		shost = lpfc_shost_from_vport(phba->pport);
3689  		scsi_unblock_requests(shost);
3690  		return;
3691  	}
3692  
3693  	vports = lpfc_create_vport_work_array(phba);
3694  	if (vports != NULL)
3695  		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3696  			shost = lpfc_shost_from_vport(vports[i]);
3697  			scsi_unblock_requests(shost);
3698  		}
3699  	lpfc_destroy_vport_work_array(phba, vports);
3700  }
3701  
3702  /**
3703   * lpfc_block_requests - prevent further commands from being queued.
3704   * @phba: pointer to phba object
3705   *
3706   * For single vport, just call scsi_block_requests on physical port.
3707   * For multiple vports, send scsi_block_requests for all the vports.
3708   */
3709  void
lpfc_block_requests(struct lpfc_hba * phba)3710  lpfc_block_requests(struct lpfc_hba *phba)
3711  {
3712  	struct lpfc_vport **vports;
3713  	struct Scsi_Host  *shost;
3714  	int i;
3715  
3716  	if (atomic_read(&phba->cmf_stop_io))
3717  		return;
3718  
3719  	if (phba->sli_rev == LPFC_SLI_REV4 &&
3720  	    !phba->sli4_hba.max_cfg_param.vpi_used) {
3721  		shost = lpfc_shost_from_vport(phba->pport);
3722  		scsi_block_requests(shost);
3723  		return;
3724  	}
3725  
3726  	vports = lpfc_create_vport_work_array(phba);
3727  	if (vports != NULL)
3728  		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3729  			shost = lpfc_shost_from_vport(vports[i]);
3730  			scsi_block_requests(shost);
3731  		}
3732  	lpfc_destroy_vport_work_array(phba, vports);
3733  }
3734  
3735  /**
3736   * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
3737   * @phba: The HBA for which this call is being executed.
3738   * @time: The latency of the IO that completed (in ns)
3739   * @size: The size of the IO that completed
3740   * @shost: SCSI host the IO completed on (NULL for a NVME IO)
3741   *
3742   * The routine adjusts the various Burst and Bandwidth counters used in
3743   * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
3744   * that means the IO was never issued to the HBA, so this routine is
3745   * just being called to cleanup the counter from a previous
3746   * lpfc_update_cmf_cmd call.
3747   */
3748  int
lpfc_update_cmf_cmpl(struct lpfc_hba * phba,uint64_t time,uint32_t size,struct Scsi_Host * shost)3749  lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3750  		     uint64_t time, uint32_t size, struct Scsi_Host *shost)
3751  {
3752  	struct lpfc_cgn_stat *cgs;
3753  
3754  	if (time != LPFC_CGN_NOT_SENT) {
3755  		/* lat is ns coming in, save latency in us */
3756  		if (time < 1000)
3757  			time = 1;
3758  		else
3759  			time = div_u64(time + 500, 1000); /* round it */
3760  
3761  		cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3762  		atomic64_add(size, &cgs->rcv_bytes);
3763  		atomic64_add(time, &cgs->rx_latency);
3764  		atomic_inc(&cgs->rx_io_cnt);
3765  	}
3766  	return 0;
3767  }
3768  
3769  /**
3770   * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
3771   * @phba: The HBA for which this call is being executed.
3772   * @size: The size of the IO that will be issued
3773   *
3774   * The routine adjusts the various Burst and Bandwidth counters used in
3775   * Congestion management and E2E.
3776   */
3777  int
lpfc_update_cmf_cmd(struct lpfc_hba * phba,uint32_t size)3778  lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3779  {
3780  	uint64_t total;
3781  	struct lpfc_cgn_stat *cgs;
3782  	int cpu;
3783  
3784  	/* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
3785  	if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3786  	    phba->cmf_max_bytes_per_interval) {
3787  		total = 0;
3788  		for_each_present_cpu(cpu) {
3789  			cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3790  			total += atomic64_read(&cgs->total_bytes);
3791  		}
3792  		if (total >= phba->cmf_max_bytes_per_interval) {
3793  			if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3794  				lpfc_block_requests(phba);
3795  				phba->cmf_last_ts =
3796  					lpfc_calc_cmf_latency(phba);
3797  			}
3798  			atomic_inc(&phba->cmf_busy);
3799  			return -EBUSY;
3800  		}
3801  		if (size > atomic_read(&phba->rx_max_read_cnt))
3802  			atomic_set(&phba->rx_max_read_cnt, size);
3803  	}
3804  
3805  	cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3806  	atomic64_add(size, &cgs->total_bytes);
3807  	return 0;
3808  }
3809  
3810  /**
3811   * lpfc_handle_fcp_err - FCP response handler
3812   * @vport: The virtual port for which this call is being executed.
3813   * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3814   * @fcpi_parm: FCP Initiator parameter.
3815   *
3816   * This routine is called to process response IOCB with status field
3817   * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3818   * based upon SCSI and FCP error.
3819   **/
3820  static void
lpfc_handle_fcp_err(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint32_t fcpi_parm)3821  lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3822  		    uint32_t fcpi_parm)
3823  {
3824  	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3825  	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3826  	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3827  	uint32_t resp_info = fcprsp->rspStatus2;
3828  	uint32_t scsi_status = fcprsp->rspStatus3;
3829  	uint32_t *lp;
3830  	uint32_t host_status = DID_OK;
3831  	uint32_t rsplen = 0;
3832  	uint32_t fcpDl;
3833  	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3834  
3835  
3836  	/*
3837  	 *  If this is a task management command, there is no
3838  	 *  scsi packet associated with this lpfc_cmd.  The driver
3839  	 *  consumes it.
3840  	 */
3841  	if (fcpcmd->fcpCntl2) {
3842  		scsi_status = 0;
3843  		goto out;
3844  	}
3845  
3846  	if (resp_info & RSP_LEN_VALID) {
3847  		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3848  		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3849  			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3850  					 "2719 Invalid response length: "
3851  					 "tgt x%x lun x%llx cmnd x%x rsplen "
3852  					 "x%x\n", cmnd->device->id,
3853  					 cmnd->device->lun, cmnd->cmnd[0],
3854  					 rsplen);
3855  			host_status = DID_ERROR;
3856  			goto out;
3857  		}
3858  		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3859  			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3860  				 "2757 Protocol failure detected during "
3861  				 "processing of FCP I/O op: "
3862  				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3863  				 cmnd->device->id,
3864  				 cmnd->device->lun, cmnd->cmnd[0],
3865  				 fcprsp->rspInfo3);
3866  			host_status = DID_ERROR;
3867  			goto out;
3868  		}
3869  	}
3870  
3871  	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3872  		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3873  		if (snslen > SCSI_SENSE_BUFFERSIZE)
3874  			snslen = SCSI_SENSE_BUFFERSIZE;
3875  
3876  		if (resp_info & RSP_LEN_VALID)
3877  		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3878  		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3879  	}
3880  	lp = (uint32_t *)cmnd->sense_buffer;
3881  
3882  	/* special handling for under run conditions */
3883  	if (!scsi_status && (resp_info & RESID_UNDER)) {
3884  		/* don't log under runs if fcp set... */
3885  		if (vport->cfg_log_verbose & LOG_FCP)
3886  			logit = LOG_FCP_ERROR;
3887  		/* unless operator says so */
3888  		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3889  			logit = LOG_FCP_UNDER;
3890  	}
3891  
3892  	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3893  			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3894  			 "Data: x%x x%x x%x x%x x%x\n",
3895  			 cmnd->cmnd[0], scsi_status,
3896  			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3897  			 be32_to_cpu(fcprsp->rspResId),
3898  			 be32_to_cpu(fcprsp->rspSnsLen),
3899  			 be32_to_cpu(fcprsp->rspRspLen),
3900  			 fcprsp->rspInfo3);
3901  
3902  	scsi_set_resid(cmnd, 0);
3903  	if (cmnd->cmd_len > LPFC_FCP_CDB_LEN)
3904  		fcpDl = be32_to_cpu(((struct fcp_cmnd32 *)fcpcmd)->fcpDl);
3905  	else
3906  		fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3907  	if (resp_info & RESID_UNDER) {
3908  		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3909  
3910  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3911  				 "9025 FCP Underrun, expected %d, "
3912  				 "residual %d Data: x%x x%x x%x\n",
3913  				 fcpDl,
3914  				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3915  				 cmnd->underflow);
3916  
3917  		/*
3918  		 * If there is an under run, check if under run reported by
3919  		 * storage array is same as the under run reported by HBA.
3920  		 * If this is not same, there is a dropped frame.
3921  		 */
3922  		if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3923  			lpfc_printf_vlog(vport, KERN_WARNING,
3924  					 LOG_FCP | LOG_FCP_ERROR,
3925  					 "9026 FCP Read Check Error "
3926  					 "and Underrun Data: x%x x%x x%x x%x\n",
3927  					 fcpDl,
3928  					 scsi_get_resid(cmnd), fcpi_parm,
3929  					 cmnd->cmnd[0]);
3930  			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3931  			host_status = DID_ERROR;
3932  		}
3933  		/*
3934  		 * The cmnd->underflow is the minimum number of bytes that must
3935  		 * be transferred for this command.  Provided a sense condition
3936  		 * is not present, make sure the actual amount transferred is at
3937  		 * least the underflow value or fail.
3938  		 */
3939  		if (!(resp_info & SNS_LEN_VALID) &&
3940  		    (scsi_status == SAM_STAT_GOOD) &&
3941  		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3942  		     < cmnd->underflow)) {
3943  			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3944  					 "9027 FCP command x%x residual "
3945  					 "underrun converted to error "
3946  					 "Data: x%x x%x x%x\n",
3947  					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3948  					 scsi_get_resid(cmnd), cmnd->underflow);
3949  			host_status = DID_ERROR;
3950  		}
3951  	} else if (resp_info & RESID_OVER) {
3952  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3953  				 "9028 FCP command x%x residual overrun error. "
3954  				 "Data: x%x x%x\n", cmnd->cmnd[0],
3955  				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3956  		host_status = DID_ERROR;
3957  
3958  	/*
3959  	 * Check SLI validation that all the transfer was actually done
3960  	 * (fcpi_parm should be zero). Apply check only to reads.
3961  	 */
3962  	} else if (fcpi_parm) {
3963  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3964  				 "9029 FCP %s Check Error Data: "
3965  				 "x%x x%x x%x x%x x%x\n",
3966  				 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3967  				 "Read" : "Write"),
3968  				 fcpDl, be32_to_cpu(fcprsp->rspResId),
3969  				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3970  
3971  		/* There is some issue with the LPe12000 that causes it
3972  		 * to miscalculate the fcpi_parm and falsely trip this
3973  		 * recovery logic.  Detect this case and don't error when true.
3974  		 */
3975  		if (fcpi_parm > fcpDl)
3976  			goto out;
3977  
3978  		switch (scsi_status) {
3979  		case SAM_STAT_GOOD:
3980  		case SAM_STAT_CHECK_CONDITION:
3981  			/* Fabric dropped a data frame. Fail any successful
3982  			 * command in which we detected dropped frames.
3983  			 * A status of good or some check conditions could
3984  			 * be considered a successful command.
3985  			 */
3986  			host_status = DID_ERROR;
3987  			break;
3988  		}
3989  		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3990  	}
3991  
3992   out:
3993  	cmnd->result = host_status << 16 | scsi_status;
3994  	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
3995  }
3996  
3997  /**
3998   * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
3999   * @phba: The hba for which this call is being executed.
4000   * @pwqeIn: The command WQE for the scsi cmnd.
4001   * @pwqeOut: Pointer to driver response WQE object.
4002   *
4003   * This routine assigns scsi command result by looking into response WQE
4004   * status field appropriately. This routine handles QUEUE FULL condition as
4005   * well by ramping down device queue depth.
4006   **/
4007  static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeIn,struct lpfc_iocbq * pwqeOut)4008  lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4009  			 struct lpfc_iocbq *pwqeOut)
4010  {
4011  	struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
4012  	struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
4013  	struct lpfc_vport *vport = pwqeIn->vport;
4014  	struct lpfc_rport_data *rdata;
4015  	struct lpfc_nodelist *ndlp;
4016  	struct scsi_cmnd *cmd;
4017  	unsigned long flags;
4018  	struct lpfc_fast_path_event *fast_path_evt;
4019  	struct Scsi_Host *shost;
4020  	u32 logit = LOG_FCP;
4021  	u32 idx;
4022  	u32 lat;
4023  	u8 wait_xb_clr = 0;
4024  
4025  	/* Sanity check on return of outstanding command */
4026  	if (!lpfc_cmd) {
4027  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4028  				 "9032 Null lpfc_cmd pointer. No "
4029  				 "release, skip completion\n");
4030  		return;
4031  	}
4032  
4033  	rdata = lpfc_cmd->rdata;
4034  	ndlp = rdata->pnode;
4035  
4036  	/* Sanity check on return of outstanding command */
4037  	cmd = lpfc_cmd->pCmd;
4038  	if (!cmd) {
4039  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4040  				 "9042 I/O completion: Not an active IO\n");
4041  		lpfc_release_scsi_buf(phba, lpfc_cmd);
4042  		return;
4043  	}
4044  	/* Guard against abort handler being called at same time */
4045  	spin_lock(&lpfc_cmd->buf_lock);
4046  	idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4047  	if (phba->sli4_hba.hdwq)
4048  		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4049  
4050  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4051  	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4052  		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4053  #endif
4054  	shost = cmd->device->host;
4055  
4056  	lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe);
4057  	lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4058  
4059  	lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4060  	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4061  		lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4062  		if (phba->cfg_fcp_wait_abts_rsp)
4063  			wait_xb_clr = 1;
4064  	}
4065  
4066  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4067  	if (lpfc_cmd->prot_data_type) {
4068  		struct scsi_dif_tuple *src = NULL;
4069  
4070  		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4071  		/*
4072  		 * Used to restore any changes to protection
4073  		 * data for error injection.
4074  		 */
4075  		switch (lpfc_cmd->prot_data_type) {
4076  		case LPFC_INJERR_REFTAG:
4077  			src->ref_tag =
4078  				lpfc_cmd->prot_data;
4079  			break;
4080  		case LPFC_INJERR_APPTAG:
4081  			src->app_tag =
4082  				(uint16_t)lpfc_cmd->prot_data;
4083  			break;
4084  		case LPFC_INJERR_GUARD:
4085  			src->guard_tag =
4086  				(uint16_t)lpfc_cmd->prot_data;
4087  			break;
4088  		default:
4089  			break;
4090  		}
4091  
4092  		lpfc_cmd->prot_data = 0;
4093  		lpfc_cmd->prot_data_type = 0;
4094  		lpfc_cmd->prot_data_segment = NULL;
4095  	}
4096  #endif
4097  	if (unlikely(lpfc_cmd->status)) {
4098  		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4099  		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4100  		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4101  		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4102  			logit = 0;
4103  		else
4104  			logit = LOG_FCP | LOG_FCP_UNDER;
4105  		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4106  				 "9034 FCP cmd x%x failed <%d/%lld> "
4107  				 "status: x%x result: x%x "
4108  				 "sid: x%x did: x%x oxid: x%x "
4109  				 "Data: x%x x%x x%x\n",
4110  				 cmd->cmnd[0],
4111  				 cmd->device ? cmd->device->id : 0xffff,
4112  				 cmd->device ? cmd->device->lun : 0xffff,
4113  				 lpfc_cmd->status, lpfc_cmd->result,
4114  				 vport->fc_myDID,
4115  				 (ndlp) ? ndlp->nlp_DID : 0,
4116  				 lpfc_cmd->cur_iocbq.sli4_xritag,
4117  				 wcqe->parameter, wcqe->total_data_placed,
4118  				 lpfc_cmd->cur_iocbq.iotag);
4119  	}
4120  
4121  	switch (lpfc_cmd->status) {
4122  	case CQE_STATUS_SUCCESS:
4123  		cmd->result = DID_OK << 16;
4124  		break;
4125  	case CQE_STATUS_FCP_RSP_FAILURE:
4126  		lpfc_handle_fcp_err(vport, lpfc_cmd,
4127  				    pwqeIn->wqe.fcp_iread.total_xfer_len -
4128  				    wcqe->total_data_placed);
4129  		break;
4130  	case CQE_STATUS_NPORT_BSY:
4131  	case CQE_STATUS_FABRIC_BSY:
4132  		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4133  		fast_path_evt = lpfc_alloc_fast_evt(phba);
4134  		if (!fast_path_evt)
4135  			break;
4136  		fast_path_evt->un.fabric_evt.event_type =
4137  			FC_REG_FABRIC_EVENT;
4138  		fast_path_evt->un.fabric_evt.subcategory =
4139  			(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4140  			LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4141  		if (ndlp) {
4142  			memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4143  			       &ndlp->nlp_portname,
4144  				sizeof(struct lpfc_name));
4145  			memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4146  			       &ndlp->nlp_nodename,
4147  				sizeof(struct lpfc_name));
4148  		}
4149  		fast_path_evt->vport = vport;
4150  		fast_path_evt->work_evt.evt =
4151  			LPFC_EVT_FASTPATH_MGMT_EVT;
4152  		spin_lock_irqsave(&phba->hbalock, flags);
4153  		list_add_tail(&fast_path_evt->work_evt.evt_listp,
4154  			      &phba->work_list);
4155  		spin_unlock_irqrestore(&phba->hbalock, flags);
4156  		lpfc_worker_wake_up(phba);
4157  		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4158  				 "9035 Fabric/Node busy FCP cmd x%x failed"
4159  				 " <%d/%lld> "
4160  				 "status: x%x result: x%x "
4161  				 "sid: x%x did: x%x oxid: x%x "
4162  				 "Data: x%x x%x x%x\n",
4163  				 cmd->cmnd[0],
4164  				 cmd->device ? cmd->device->id : 0xffff,
4165  				 cmd->device ? cmd->device->lun : 0xffff,
4166  				 lpfc_cmd->status, lpfc_cmd->result,
4167  				 vport->fc_myDID,
4168  				 (ndlp) ? ndlp->nlp_DID : 0,
4169  				 lpfc_cmd->cur_iocbq.sli4_xritag,
4170  				 wcqe->parameter,
4171  				 wcqe->total_data_placed,
4172  				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4173  		break;
4174  	case CQE_STATUS_DI_ERROR:
4175  		if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
4176  			lpfc_cmd->result = IOERR_RX_DMA_FAILED;
4177  		else
4178  			lpfc_cmd->result = IOERR_TX_DMA_FAILED;
4179  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG,
4180  				 "9048 DI Error xri x%x status x%x DI ext "
4181  				 "status x%x data placed x%x\n",
4182  				 lpfc_cmd->cur_iocbq.sli4_xritag,
4183  				 lpfc_cmd->status, wcqe->parameter,
4184  				 wcqe->total_data_placed);
4185  		if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4186  			/* BG enabled cmd. Parse BG error */
4187  			lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
4188  			break;
4189  		}
4190  		cmd->result = DID_ERROR << 16;
4191  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4192  				 "9040 DI Error on unprotected cmd\n");
4193  		break;
4194  	case CQE_STATUS_REMOTE_STOP:
4195  		if (ndlp) {
4196  			/* This I/O was aborted by the target, we don't
4197  			 * know the rxid and because we did not send the
4198  			 * ABTS we cannot generate and RRQ.
4199  			 */
4200  			lpfc_set_rrq_active(phba, ndlp,
4201  					    lpfc_cmd->cur_iocbq.sli4_lxritag,
4202  					    0, 0);
4203  		}
4204  		fallthrough;
4205  	case CQE_STATUS_LOCAL_REJECT:
4206  		if (lpfc_cmd->result & IOERR_DRVR_MASK)
4207  			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4208  		if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4209  		    lpfc_cmd->result ==
4210  		    IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4211  		    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4212  		    lpfc_cmd->result ==
4213  		    IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4214  			cmd->result = DID_NO_CONNECT << 16;
4215  			break;
4216  		}
4217  		if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4218  		    lpfc_cmd->result == IOERR_LINK_DOWN ||
4219  		    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4220  		    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4221  		    lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
4222  		    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4223  			cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4224  			break;
4225  		}
4226  		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4227  				 "9036 Local Reject FCP cmd x%x failed"
4228  				 " <%d/%lld> "
4229  				 "status: x%x result: x%x "
4230  				 "sid: x%x did: x%x oxid: x%x "
4231  				 "Data: x%x x%x x%x\n",
4232  				 cmd->cmnd[0],
4233  				 cmd->device ? cmd->device->id : 0xffff,
4234  				 cmd->device ? cmd->device->lun : 0xffff,
4235  				 lpfc_cmd->status, lpfc_cmd->result,
4236  				 vport->fc_myDID,
4237  				 (ndlp) ? ndlp->nlp_DID : 0,
4238  				 lpfc_cmd->cur_iocbq.sli4_xritag,
4239  				 wcqe->parameter,
4240  				 wcqe->total_data_placed,
4241  				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4242  		fallthrough;
4243  	default:
4244  		cmd->result = DID_ERROR << 16;
4245  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4246  				 "9037 FCP Completion Error: xri %x "
4247  				 "status x%x result x%x [x%x] "
4248  				 "placed x%x\n",
4249  				 lpfc_cmd->cur_iocbq.sli4_xritag,
4250  				 lpfc_cmd->status, lpfc_cmd->result,
4251  				 wcqe->parameter,
4252  				 wcqe->total_data_placed);
4253  	}
4254  	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4255  		u32 *lp = (u32 *)cmd->sense_buffer;
4256  
4257  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4258  				 "9039 Iodone <%d/%llu> cmd x%px, error "
4259  				 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
4260  				 cmd->device->id, cmd->device->lun, cmd,
4261  				 cmd->result, *lp, *(lp + 3),
4262  				 (cmd->device->sector_size) ?
4263  				 (u64)scsi_get_lba(cmd) : 0,
4264  				 cmd->retries, scsi_get_resid(cmd));
4265  	}
4266  
4267  	if (vport->cfg_max_scsicmpl_time &&
4268  	    time_after(jiffies, lpfc_cmd->start_time +
4269  	    msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4270  		spin_lock_irqsave(shost->host_lock, flags);
4271  		if (ndlp) {
4272  			if (ndlp->cmd_qdepth >
4273  				atomic_read(&ndlp->cmd_pending) &&
4274  				(atomic_read(&ndlp->cmd_pending) >
4275  				LPFC_MIN_TGT_QDEPTH) &&
4276  				(cmd->cmnd[0] == READ_10 ||
4277  				cmd->cmnd[0] == WRITE_10))
4278  				ndlp->cmd_qdepth =
4279  					atomic_read(&ndlp->cmd_pending);
4280  
4281  			ndlp->last_change_time = jiffies;
4282  		}
4283  		spin_unlock_irqrestore(shost->host_lock, flags);
4284  	}
4285  	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4286  
4287  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4288  	if (lpfc_cmd->ts_cmd_start) {
4289  		lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4290  		lpfc_cmd->ts_data_io = ktime_get_ns();
4291  		phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4292  		lpfc_io_ktime(phba, lpfc_cmd);
4293  	}
4294  #endif
4295  	if (likely(!wait_xb_clr))
4296  		lpfc_cmd->pCmd = NULL;
4297  	spin_unlock(&lpfc_cmd->buf_lock);
4298  
4299  	/* Check if IO qualified for CMF */
4300  	if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4301  	    cmd->sc_data_direction == DMA_FROM_DEVICE &&
4302  	    (scsi_sg_count(cmd))) {
4303  		/* Used when calculating average latency */
4304  		lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4305  		lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4306  	}
4307  
4308  	if (wait_xb_clr)
4309  		goto out;
4310  
4311  	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4312  	scsi_done(cmd);
4313  
4314  	/*
4315  	 * If there is an abort thread waiting for command completion
4316  	 * wake up the thread.
4317  	 */
4318  	spin_lock(&lpfc_cmd->buf_lock);
4319  	lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4320  	if (lpfc_cmd->waitq)
4321  		wake_up(lpfc_cmd->waitq);
4322  	spin_unlock(&lpfc_cmd->buf_lock);
4323  out:
4324  	lpfc_release_scsi_buf(phba, lpfc_cmd);
4325  }
4326  
4327  /**
4328   * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4329   * @phba: The Hba for which this call is being executed.
4330   * @pIocbIn: The command IOCBQ for the scsi cmnd.
4331   * @pIocbOut: The response IOCBQ for the scsi cmnd.
4332   *
4333   * This routine assigns scsi command result by looking into response IOCB
4334   * status field appropriately. This routine handles QUEUE FULL condition as
4335   * well by ramping down device queue depth.
4336   **/
4337  static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut)4338  lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4339  			struct lpfc_iocbq *pIocbOut)
4340  {
4341  	struct lpfc_io_buf *lpfc_cmd =
4342  		(struct lpfc_io_buf *) pIocbIn->io_buf;
4343  	struct lpfc_vport      *vport = pIocbIn->vport;
4344  	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4345  	struct lpfc_nodelist *pnode = rdata->pnode;
4346  	struct scsi_cmnd *cmd;
4347  	unsigned long flags;
4348  	struct lpfc_fast_path_event *fast_path_evt;
4349  	struct Scsi_Host *shost;
4350  	int idx;
4351  	uint32_t logit = LOG_FCP;
4352  
4353  	/* Guard against abort handler being called at same time */
4354  	spin_lock(&lpfc_cmd->buf_lock);
4355  
4356  	/* Sanity check on return of outstanding command */
4357  	cmd = lpfc_cmd->pCmd;
4358  	if (!cmd || !phba) {
4359  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4360  				 "2621 IO completion: Not an active IO\n");
4361  		spin_unlock(&lpfc_cmd->buf_lock);
4362  		return;
4363  	}
4364  
4365  	idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4366  	if (phba->sli4_hba.hdwq)
4367  		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4368  
4369  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4370  	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4371  		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4372  #endif
4373  	shost = cmd->device->host;
4374  
4375  	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4376  	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4377  	/* pick up SLI4 exchange busy status from HBA */
4378  	lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4379  	if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
4380  		lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4381  
4382  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4383  	if (lpfc_cmd->prot_data_type) {
4384  		struct scsi_dif_tuple *src = NULL;
4385  
4386  		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4387  		/*
4388  		 * Used to restore any changes to protection
4389  		 * data for error injection.
4390  		 */
4391  		switch (lpfc_cmd->prot_data_type) {
4392  		case LPFC_INJERR_REFTAG:
4393  			src->ref_tag =
4394  				lpfc_cmd->prot_data;
4395  			break;
4396  		case LPFC_INJERR_APPTAG:
4397  			src->app_tag =
4398  				(uint16_t)lpfc_cmd->prot_data;
4399  			break;
4400  		case LPFC_INJERR_GUARD:
4401  			src->guard_tag =
4402  				(uint16_t)lpfc_cmd->prot_data;
4403  			break;
4404  		default:
4405  			break;
4406  		}
4407  
4408  		lpfc_cmd->prot_data = 0;
4409  		lpfc_cmd->prot_data_type = 0;
4410  		lpfc_cmd->prot_data_segment = NULL;
4411  	}
4412  #endif
4413  
4414  	if (unlikely(lpfc_cmd->status)) {
4415  		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4416  		    (lpfc_cmd->result & IOERR_DRVR_MASK))
4417  			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4418  		else if (lpfc_cmd->status >= IOSTAT_CNT)
4419  			lpfc_cmd->status = IOSTAT_DEFAULT;
4420  		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4421  		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4422  		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4423  		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4424  			logit = 0;
4425  		else
4426  			logit = LOG_FCP | LOG_FCP_UNDER;
4427  		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4428  			 "9030 FCP cmd x%x failed <%d/%lld> "
4429  			 "status: x%x result: x%x "
4430  			 "sid: x%x did: x%x oxid: x%x "
4431  			 "Data: x%x x%x\n",
4432  			 cmd->cmnd[0],
4433  			 cmd->device ? cmd->device->id : 0xffff,
4434  			 cmd->device ? cmd->device->lun : 0xffff,
4435  			 lpfc_cmd->status, lpfc_cmd->result,
4436  			 vport->fc_myDID,
4437  			 (pnode) ? pnode->nlp_DID : 0,
4438  			 phba->sli_rev == LPFC_SLI_REV4 ?
4439  			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4440  			 pIocbOut->iocb.ulpContext,
4441  			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4442  
4443  		switch (lpfc_cmd->status) {
4444  		case IOSTAT_FCP_RSP_ERROR:
4445  			/* Call FCP RSP handler to determine result */
4446  			lpfc_handle_fcp_err(vport, lpfc_cmd,
4447  					    pIocbOut->iocb.un.fcpi.fcpi_parm);
4448  			break;
4449  		case IOSTAT_NPORT_BSY:
4450  		case IOSTAT_FABRIC_BSY:
4451  			cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4452  			fast_path_evt = lpfc_alloc_fast_evt(phba);
4453  			if (!fast_path_evt)
4454  				break;
4455  			fast_path_evt->un.fabric_evt.event_type =
4456  				FC_REG_FABRIC_EVENT;
4457  			fast_path_evt->un.fabric_evt.subcategory =
4458  				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4459  				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4460  			if (pnode) {
4461  				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4462  					&pnode->nlp_portname,
4463  					sizeof(struct lpfc_name));
4464  				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4465  					&pnode->nlp_nodename,
4466  					sizeof(struct lpfc_name));
4467  			}
4468  			fast_path_evt->vport = vport;
4469  			fast_path_evt->work_evt.evt =
4470  				LPFC_EVT_FASTPATH_MGMT_EVT;
4471  			spin_lock_irqsave(&phba->hbalock, flags);
4472  			list_add_tail(&fast_path_evt->work_evt.evt_listp,
4473  				&phba->work_list);
4474  			spin_unlock_irqrestore(&phba->hbalock, flags);
4475  			lpfc_worker_wake_up(phba);
4476  			break;
4477  		case IOSTAT_LOCAL_REJECT:
4478  		case IOSTAT_REMOTE_STOP:
4479  			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4480  			    lpfc_cmd->result ==
4481  					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4482  			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4483  			    lpfc_cmd->result ==
4484  					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4485  				cmd->result = DID_NO_CONNECT << 16;
4486  				break;
4487  			}
4488  			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4489  			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4490  			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4491  			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4492  				cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4493  				break;
4494  			}
4495  			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4496  			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4497  			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4498  				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4499  					/*
4500  					 * This is a response for a BG enabled
4501  					 * cmd. Parse BG error
4502  					 */
4503  					lpfc_parse_bg_err(phba, lpfc_cmd,
4504  							pIocbOut);
4505  					break;
4506  				} else {
4507  					lpfc_printf_vlog(vport, KERN_WARNING,
4508  							LOG_BG,
4509  							"9031 non-zero BGSTAT "
4510  							"on unprotected cmd\n");
4511  				}
4512  			}
4513  			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4514  				&& (phba->sli_rev == LPFC_SLI_REV4)
4515  				&& pnode) {
4516  				/* This IO was aborted by the target, we don't
4517  				 * know the rxid and because we did not send the
4518  				 * ABTS we cannot generate and RRQ.
4519  				 */
4520  				lpfc_set_rrq_active(phba, pnode,
4521  					lpfc_cmd->cur_iocbq.sli4_lxritag,
4522  					0, 0);
4523  			}
4524  			fallthrough;
4525  		default:
4526  			cmd->result = DID_ERROR << 16;
4527  			break;
4528  		}
4529  
4530  		if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4531  			cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4532  				      SAM_STAT_BUSY;
4533  	} else
4534  		cmd->result = DID_OK << 16;
4535  
4536  	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4537  		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4538  
4539  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4540  				 "0710 Iodone <%d/%llu> cmd x%px, error "
4541  				 "x%x SNS x%x x%x Data: x%x x%x\n",
4542  				 cmd->device->id, cmd->device->lun, cmd,
4543  				 cmd->result, *lp, *(lp + 3), cmd->retries,
4544  				 scsi_get_resid(cmd));
4545  	}
4546  
4547  	if (vport->cfg_max_scsicmpl_time &&
4548  	   time_after(jiffies, lpfc_cmd->start_time +
4549  		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4550  		spin_lock_irqsave(shost->host_lock, flags);
4551  		if (pnode) {
4552  			if (pnode->cmd_qdepth >
4553  				atomic_read(&pnode->cmd_pending) &&
4554  				(atomic_read(&pnode->cmd_pending) >
4555  				LPFC_MIN_TGT_QDEPTH) &&
4556  				((cmd->cmnd[0] == READ_10) ||
4557  				(cmd->cmnd[0] == WRITE_10)))
4558  				pnode->cmd_qdepth =
4559  					atomic_read(&pnode->cmd_pending);
4560  
4561  			pnode->last_change_time = jiffies;
4562  		}
4563  		spin_unlock_irqrestore(shost->host_lock, flags);
4564  	}
4565  	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4566  
4567  	lpfc_cmd->pCmd = NULL;
4568  	spin_unlock(&lpfc_cmd->buf_lock);
4569  
4570  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4571  	if (lpfc_cmd->ts_cmd_start) {
4572  		lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4573  		lpfc_cmd->ts_data_io = ktime_get_ns();
4574  		phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4575  		lpfc_io_ktime(phba, lpfc_cmd);
4576  	}
4577  #endif
4578  
4579  	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4580  	scsi_done(cmd);
4581  
4582  	/*
4583  	 * If there is an abort thread waiting for command completion
4584  	 * wake up the thread.
4585  	 */
4586  	spin_lock(&lpfc_cmd->buf_lock);
4587  	lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4588  	if (lpfc_cmd->waitq)
4589  		wake_up(lpfc_cmd->waitq);
4590  	spin_unlock(&lpfc_cmd->buf_lock);
4591  
4592  	lpfc_release_scsi_buf(phba, lpfc_cmd);
4593  }
4594  
4595  /**
4596   * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4597   * @vport: Pointer to vport object.
4598   * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4599   * @tmo: timeout value for the IO
4600   *
4601   * Based on the data-direction of the command, initialize IOCB
4602   * in the I/O buffer. Fill in the IOCB fields which are independent
4603   * of the scsi buffer
4604   *
4605   * RETURNS 0 - SUCCESS,
4606   **/
lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)4607  static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4608  				      struct lpfc_io_buf *lpfc_cmd,
4609  				      uint8_t tmo)
4610  {
4611  	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4612  	struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4613  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4614  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4615  	struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4616  	int datadir = scsi_cmnd->sc_data_direction;
4617  	u32 fcpdl;
4618  
4619  	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4620  
4621  	/*
4622  	 * There are three possibilities here - use scatter-gather segment, use
4623  	 * the single mapping, or neither.  Start the lpfc command prep by
4624  	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4625  	 * data bde entry.
4626  	 */
4627  	if (scsi_sg_count(scsi_cmnd)) {
4628  		if (datadir == DMA_TO_DEVICE) {
4629  			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4630  			iocb_cmd->ulpPU = PARM_READ_CHECK;
4631  			if (vport->cfg_first_burst_size &&
4632  			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4633  				u32 xrdy_len;
4634  
4635  				fcpdl = scsi_bufflen(scsi_cmnd);
4636  				xrdy_len = min(fcpdl,
4637  					       vport->cfg_first_burst_size);
4638  				piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4639  			}
4640  			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4641  		} else {
4642  			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4643  			iocb_cmd->ulpPU = PARM_READ_CHECK;
4644  			fcp_cmnd->fcpCntl3 = READ_DATA;
4645  		}
4646  	} else {
4647  		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4648  		iocb_cmd->un.fcpi.fcpi_parm = 0;
4649  		iocb_cmd->ulpPU = 0;
4650  		fcp_cmnd->fcpCntl3 = 0;
4651  	}
4652  
4653  	/*
4654  	 * Finish initializing those IOCB fields that are independent
4655  	 * of the scsi_cmnd request_buffer
4656  	 */
4657  	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4658  	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4659  		piocbq->iocb.ulpFCP2Rcvy = 1;
4660  	else
4661  		piocbq->iocb.ulpFCP2Rcvy = 0;
4662  
4663  	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4664  	piocbq->io_buf  = lpfc_cmd;
4665  	if (!piocbq->cmd_cmpl)
4666  		piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4667  	piocbq->iocb.ulpTimeout = tmo;
4668  	piocbq->vport = vport;
4669  	return 0;
4670  }
4671  
4672  /**
4673   * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4674   * @vport: Pointer to vport object.
4675   * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4676   * @tmo: timeout value for the IO
4677   *
4678   * Based on the data-direction of the command copy WQE template
4679   * to I/O buffer WQE. Fill in the WQE fields which are independent
4680   * of the scsi buffer
4681   *
4682   * RETURNS 0 - SUCCESS,
4683   **/
lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)4684  static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4685  				      struct lpfc_io_buf *lpfc_cmd,
4686  				      uint8_t tmo)
4687  {
4688  	struct lpfc_hba *phba = vport->phba;
4689  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4690  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4691  	struct lpfc_sli4_hdw_queue *hdwq = NULL;
4692  	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4693  	struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4694  	union lpfc_wqe128 *wqe = &pwqeq->wqe;
4695  	u16 idx = lpfc_cmd->hdwq_no;
4696  	int datadir = scsi_cmnd->sc_data_direction;
4697  
4698  	hdwq = &phba->sli4_hba.hdwq[idx];
4699  
4700  	/* Initialize 64 bytes only */
4701  	memset(wqe, 0, sizeof(union lpfc_wqe128));
4702  
4703  	/*
4704  	 * There are three possibilities here - use scatter-gather segment, use
4705  	 * the single mapping, or neither.
4706  	 */
4707  	if (scsi_sg_count(scsi_cmnd)) {
4708  		if (datadir == DMA_TO_DEVICE) {
4709  			/* From the iwrite template, initialize words 7 -  11 */
4710  			memcpy(&wqe->words[7],
4711  			       &lpfc_iwrite_cmd_template.words[7],
4712  			       sizeof(uint32_t) * 5);
4713  
4714  			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4715  			if (hdwq)
4716  				hdwq->scsi_cstat.output_requests++;
4717  		} else {
4718  			/* From the iread template, initialize words 7 - 11 */
4719  			memcpy(&wqe->words[7],
4720  			       &lpfc_iread_cmd_template.words[7],
4721  			       sizeof(uint32_t) * 5);
4722  
4723  			/* Word 7 */
4724  			bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4725  
4726  			fcp_cmnd->fcpCntl3 = READ_DATA;
4727  			if (hdwq)
4728  				hdwq->scsi_cstat.input_requests++;
4729  
4730  			/* For a CMF Managed port, iod must be zero'ed */
4731  			if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4732  				bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4733  				       LPFC_WQE_IOD_NONE);
4734  		}
4735  
4736  		/* Additional fcp cdb length field calculation.
4737  		 * LPFC_FCP_CDB_LEN_32 - normal 16 byte cdb length,
4738  		 * then divide by 4 for the word count.
4739  		 * shift 2 because of the RDDATA/WRDATA.
4740  		 */
4741  		if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN)
4742  			fcp_cmnd->fcpCntl3 |= 4 << 2;
4743  	} else {
4744  		/* From the icmnd template, initialize words 4 - 11 */
4745  		memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4746  		       sizeof(uint32_t) * 8);
4747  
4748  		/* Word 7 */
4749  		bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4750  
4751  		fcp_cmnd->fcpCntl3 = 0;
4752  		if (hdwq)
4753  			hdwq->scsi_cstat.control_requests++;
4754  	}
4755  
4756  	/*
4757  	 * Finish initializing those WQE fields that are independent
4758  	 * of the request_buffer
4759  	 */
4760  
4761  	 /* Word 3 */
4762  	bf_set(payload_offset_len, &wqe->fcp_icmd,
4763  	       sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4764  
4765  	/* Word 6 */
4766  	bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4767  	       phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4768  	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4769  
4770  	/* Word 7*/
4771  	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4772  		bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4773  
4774  	bf_set(wqe_class, &wqe->generic.wqe_com,
4775  	       (pnode->nlp_fcp_info & 0x0f));
4776  
4777  	 /* Word 8 */
4778  	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4779  
4780  	/* Word 9 */
4781  	bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4782  
4783  	pwqeq->vport = vport;
4784  	pwqeq->io_buf = lpfc_cmd;
4785  	pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4786  	pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4787  
4788  	return 0;
4789  }
4790  
4791  /**
4792   * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4793   * @vport: The virtual port for which this call is being executed.
4794   * @lpfc_cmd: The scsi command which needs to send.
4795   * @pnode: Pointer to lpfc_nodelist.
4796   *
4797   * This routine initializes fcp_cmnd and iocb data structure from scsi command
4798   * to transfer for device with SLI3 interface spec.
4799   **/
4800  static int
lpfc_scsi_prep_cmnd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_nodelist * pnode)4801  lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4802  		    struct lpfc_nodelist *pnode)
4803  {
4804  	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4805  	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4806  	u8 *ptr;
4807  
4808  	if (!pnode)
4809  		return 0;
4810  
4811  	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4812  	/* clear task management bits */
4813  	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4814  
4815  	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4816  		       &lpfc_cmd->fcp_cmnd->fcp_lun);
4817  
4818  	ptr = &((struct fcp_cmnd32 *)fcp_cmnd)->fcpCdb[0];
4819  	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4820  	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4821  		ptr += scsi_cmnd->cmd_len;
4822  		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4823  	}
4824  
4825  	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4826  
4827  	lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4828  
4829  	return 0;
4830  }
4831  
4832  /**
4833   * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
4834   * @vport: The virtual port for which this call is being executed.
4835   * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4836   * @lun: Logical unit number.
4837   * @task_mgmt_cmd: SCSI task management command.
4838   *
4839   * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4840   * for device with SLI-3 interface spec.
4841   *
4842   * Return codes:
4843   *   0 - Error
4844   *   1 - Success
4845   **/
4846  static int
lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,u64 lun,u8 task_mgmt_cmd)4847  lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
4848  				struct lpfc_io_buf *lpfc_cmd,
4849  				u64 lun, u8 task_mgmt_cmd)
4850  {
4851  	struct lpfc_iocbq *piocbq;
4852  	IOCB_t *piocb;
4853  	struct fcp_cmnd *fcp_cmnd;
4854  	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4855  	struct lpfc_nodelist *ndlp = rdata->pnode;
4856  
4857  	if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4858  		return 0;
4859  
4860  	piocbq = &(lpfc_cmd->cur_iocbq);
4861  	piocbq->vport = vport;
4862  
4863  	piocb = &piocbq->iocb;
4864  
4865  	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4866  	/* Clear out any old data in the FCP command area */
4867  	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4868  	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4869  	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4870  	if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4871  		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4872  	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4873  	piocb->ulpContext = ndlp->nlp_rpi;
4874  	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4875  	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4876  	piocb->ulpPU = 0;
4877  	piocb->un.fcpi.fcpi_parm = 0;
4878  
4879  	/* ulpTimeout is only one byte */
4880  	if (lpfc_cmd->timeout > 0xff) {
4881  		/*
4882  		 * Do not timeout the command at the firmware level.
4883  		 * The driver will provide the timeout mechanism.
4884  		 */
4885  		piocb->ulpTimeout = 0;
4886  	} else
4887  		piocb->ulpTimeout = lpfc_cmd->timeout;
4888  
4889  	return 1;
4890  }
4891  
4892  /**
4893   * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
4894   * @vport: The virtual port for which this call is being executed.
4895   * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4896   * @lun: Logical unit number.
4897   * @task_mgmt_cmd: SCSI task management command.
4898   *
4899   * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4900   * for device with SLI-4 interface spec.
4901   *
4902   * Return codes:
4903   *   0 - Error
4904   *   1 - Success
4905   **/
4906  static int
lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,u64 lun,u8 task_mgmt_cmd)4907  lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
4908  				struct lpfc_io_buf *lpfc_cmd,
4909  				u64 lun, u8 task_mgmt_cmd)
4910  {
4911  	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4912  	union lpfc_wqe128 *wqe = &pwqeq->wqe;
4913  	struct fcp_cmnd *fcp_cmnd;
4914  	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4915  	struct lpfc_nodelist *ndlp = rdata->pnode;
4916  
4917  	if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4918  		return 0;
4919  
4920  	pwqeq->vport = vport;
4921  	/* Initialize 64 bytes only */
4922  	memset(wqe, 0, sizeof(union lpfc_wqe128));
4923  
4924  	/* From the icmnd template, initialize words 4 - 11 */
4925  	memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4926  	       sizeof(uint32_t) * 8);
4927  
4928  	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4929  	/* Clear out any old data in the FCP command area */
4930  	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4931  	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4932  	fcp_cmnd->fcpCntl3 = 0;
4933  	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4934  
4935  	bf_set(payload_offset_len, &wqe->fcp_icmd,
4936  	       sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4937  	bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
4938  	bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,  /* ulpContext */
4939  	       vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
4940  	bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
4941  	       ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
4942  	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
4943  	       (ndlp->nlp_fcp_info & 0x0f));
4944  
4945  	/* ulpTimeout is only one byte */
4946  	if (lpfc_cmd->timeout > 0xff) {
4947  		/*
4948  		 * Do not timeout the command at the firmware level.
4949  		 * The driver will provide the timeout mechanism.
4950  		 */
4951  		bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
4952  	} else {
4953  		bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
4954  	}
4955  
4956  	lpfc_prep_embed_io(vport->phba, lpfc_cmd);
4957  	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4958  	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4959  	bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4960  
4961  	lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4962  
4963  	return 1;
4964  }
4965  
4966  /**
4967   * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4968   * @phba: The hba struct for which this call is being executed.
4969   * @dev_grp: The HBA PCI-Device group number.
4970   *
4971   * This routine sets up the SCSI interface API function jump table in @phba
4972   * struct.
4973   * Returns: 0 - success, -ENODEV - failure.
4974   **/
4975  int
lpfc_scsi_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)4976  lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4977  {
4978  
4979  	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4980  
4981  	switch (dev_grp) {
4982  	case LPFC_PCI_DEV_LP:
4983  		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4984  		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4985  		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4986  		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4987  		phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4988  		phba->lpfc_scsi_prep_task_mgmt_cmd =
4989  					lpfc_scsi_prep_task_mgmt_cmd_s3;
4990  		break;
4991  	case LPFC_PCI_DEV_OC:
4992  		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4993  		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4994  		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4995  		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4996  		phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
4997  		phba->lpfc_scsi_prep_task_mgmt_cmd =
4998  					lpfc_scsi_prep_task_mgmt_cmd_s4;
4999  		break;
5000  	default:
5001  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5002  				"1418 Invalid HBA PCI-device group: 0x%x\n",
5003  				dev_grp);
5004  		return -ENODEV;
5005  	}
5006  	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5007  	return 0;
5008  }
5009  
5010  /**
5011   * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
5012   * @phba: The Hba for which this call is being executed.
5013   * @cmdiocbq: Pointer to lpfc_iocbq data structure.
5014   * @rspiocbq: Pointer to lpfc_iocbq data structure.
5015   *
5016   * This routine is IOCB completion routine for device reset and target reset
5017   * routine. This routine release scsi buffer associated with lpfc_cmd.
5018   **/
5019  static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)5020  lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5021  			struct lpfc_iocbq *cmdiocbq,
5022  			struct lpfc_iocbq *rspiocbq)
5023  {
5024  	struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
5025  	if (lpfc_cmd)
5026  		lpfc_release_scsi_buf(phba, lpfc_cmd);
5027  	return;
5028  }
5029  
5030  /**
5031   * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5032   *                             if issuing a pci_bus_reset is possibly unsafe
5033   * @phba: lpfc_hba pointer.
5034   *
5035   * Description:
5036   * Walks the bus_list to ensure only PCI devices with Emulex
5037   * vendor id, device ids that support hot reset, and only one occurrence
5038   * of function 0.
5039   *
5040   * Returns:
5041   * -EBADSLT,  detected invalid device
5042   *      0,    successful
5043   */
5044  int
lpfc_check_pci_resettable(struct lpfc_hba * phba)5045  lpfc_check_pci_resettable(struct lpfc_hba *phba)
5046  {
5047  	const struct pci_dev *pdev = phba->pcidev;
5048  	struct pci_dev *ptr = NULL;
5049  	u8 counter = 0;
5050  
5051  	/* Walk the list of devices on the pci_dev's bus */
5052  	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5053  		/* Check for Emulex Vendor ID */
5054  		if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5055  			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5056  					"8346 Non-Emulex vendor found: "
5057  					"0x%04x\n", ptr->vendor);
5058  			return -EBADSLT;
5059  		}
5060  
5061  		/* Check for valid Emulex Device ID */
5062  		if (phba->sli_rev != LPFC_SLI_REV4 ||
5063  		    test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
5064  			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5065  					"8347 Incapable PCI reset device: "
5066  					"0x%04x\n", ptr->device);
5067  			return -EBADSLT;
5068  		}
5069  
5070  		/* Check for only one function 0 ID to ensure only one HBA on
5071  		 * secondary bus
5072  		 */
5073  		if (ptr->devfn == 0) {
5074  			if (++counter > 1) {
5075  				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5076  						"8348 More than one device on "
5077  						"secondary bus found\n");
5078  				return -EBADSLT;
5079  			}
5080  		}
5081  	}
5082  
5083  	return 0;
5084  }
5085  
5086  /**
5087   * lpfc_info - Info entry point of scsi_host_template data structure
5088   * @host: The scsi host for which this call is being executed.
5089   *
5090   * This routine provides module information about hba.
5091   *
5092   * Reutrn code:
5093   *   Pointer to char - Success.
5094   **/
5095  const char *
lpfc_info(struct Scsi_Host * host)5096  lpfc_info(struct Scsi_Host *host)
5097  {
5098  	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5099  	struct lpfc_hba   *phba = vport->phba;
5100  	int link_speed = 0;
5101  	static char lpfcinfobuf[384];
5102  	char tmp[384] = {0};
5103  
5104  	memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5105  	if (phba && phba->pcidev){
5106  		/* Model Description */
5107  		scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5108  		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5109  		    sizeof(lpfcinfobuf))
5110  			goto buffer_done;
5111  
5112  		/* PCI Info */
5113  		scnprintf(tmp, sizeof(tmp),
5114  			  " on PCI bus %02x device %02x irq %d",
5115  			  phba->pcidev->bus->number, phba->pcidev->devfn,
5116  			  phba->pcidev->irq);
5117  		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5118  		    sizeof(lpfcinfobuf))
5119  			goto buffer_done;
5120  
5121  		/* Port Number */
5122  		if (phba->Port[0]) {
5123  			scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5124  			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5125  			    sizeof(lpfcinfobuf))
5126  				goto buffer_done;
5127  		}
5128  
5129  		/* Link Speed */
5130  		link_speed = lpfc_sli_port_speed_get(phba);
5131  		if (link_speed != 0) {
5132  			scnprintf(tmp, sizeof(tmp),
5133  				  " Logical Link Speed: %d Mbps", link_speed);
5134  			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5135  			    sizeof(lpfcinfobuf))
5136  				goto buffer_done;
5137  		}
5138  
5139  		/* PCI resettable */
5140  		if (!lpfc_check_pci_resettable(phba)) {
5141  			scnprintf(tmp, sizeof(tmp), " PCI resettable");
5142  			strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5143  		}
5144  	}
5145  
5146  buffer_done:
5147  	return lpfcinfobuf;
5148  }
5149  
5150  /**
5151   * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
5152   * @phba: The Hba for which this call is being executed.
5153   *
5154   * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
5155   * The default value of cfg_poll_tmo is 10 milliseconds.
5156   **/
lpfc_poll_rearm_timer(struct lpfc_hba * phba)5157  static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5158  {
5159  	unsigned long  poll_tmo_expires =
5160  		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5161  
5162  	if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5163  		mod_timer(&phba->fcp_poll_timer,
5164  			  poll_tmo_expires);
5165  }
5166  
5167  /**
5168   * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5169   * @phba: The Hba for which this call is being executed.
5170   *
5171   * This routine starts the fcp_poll_timer of @phba.
5172   **/
lpfc_poll_start_timer(struct lpfc_hba * phba)5173  void lpfc_poll_start_timer(struct lpfc_hba * phba)
5174  {
5175  	lpfc_poll_rearm_timer(phba);
5176  }
5177  
5178  /**
5179   * lpfc_poll_timeout - Restart polling timer
5180   * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5181   *
5182   * This routine restarts fcp_poll timer, when FCP ring  polling is enable
5183   * and FCP Ring interrupt is disable.
5184   **/
lpfc_poll_timeout(struct timer_list * t)5185  void lpfc_poll_timeout(struct timer_list *t)
5186  {
5187  	struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5188  
5189  	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5190  		lpfc_sli_handle_fast_ring_event(phba,
5191  			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5192  
5193  		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5194  			lpfc_poll_rearm_timer(phba);
5195  	}
5196  }
5197  
5198  /*
5199   * lpfc_is_command_vm_io - get the UUID from blk cgroup
5200   * @cmd: Pointer to scsi_cmnd data structure
5201   * Returns UUID if present, otherwise NULL
5202   */
lpfc_is_command_vm_io(struct scsi_cmnd * cmd)5203  static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5204  {
5205  	struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
5206  
5207  	if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
5208  		return NULL;
5209  	return blkcg_get_fc_appid(bio);
5210  }
5211  
5212  /**
5213   * lpfc_queuecommand - scsi_host_template queuecommand entry point
5214   * @shost: kernel scsi host pointer.
5215   * @cmnd: Pointer to scsi_cmnd data structure.
5216   *
5217   * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5218   * This routine prepares an IOCB from scsi command and provides to firmware.
5219   * The @done callback is invoked after driver finished processing the command.
5220   *
5221   * Return value :
5222   *   0 - Success
5223   *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5224   **/
5225  static int
lpfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)5226  lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5227  {
5228  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5229  	struct lpfc_hba   *phba = vport->phba;
5230  	struct lpfc_iocbq *cur_iocbq = NULL;
5231  	struct lpfc_rport_data *rdata;
5232  	struct lpfc_nodelist *ndlp;
5233  	struct lpfc_io_buf *lpfc_cmd;
5234  	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5235  	int err, idx;
5236  	u8 *uuid = NULL;
5237  	uint64_t start;
5238  
5239  	start = ktime_get_ns();
5240  	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5241  
5242  	/* sanity check on references */
5243  	if (unlikely(!rdata) || unlikely(!rport))
5244  		goto out_fail_command;
5245  
5246  	err = fc_remote_port_chkready(rport);
5247  	if (err) {
5248  		cmnd->result = err;
5249  		goto out_fail_command;
5250  	}
5251  	ndlp = rdata->pnode;
5252  
5253  	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5254  		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5255  
5256  		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5257  				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5258  				" op:%02x str=%s without registering for"
5259  				" BlockGuard - Rejecting command\n",
5260  				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5261  				dif_op_str[scsi_get_prot_op(cmnd)]);
5262  		goto out_fail_command;
5263  	}
5264  
5265  	/*
5266  	 * Catch race where our node has transitioned, but the
5267  	 * transport is still transitioning.
5268  	 */
5269  	if (!ndlp)
5270  		goto out_tgt_busy1;
5271  
5272  	/* Check if IO qualifies for CMF */
5273  	if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5274  	    cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5275  	    (scsi_sg_count(cmnd))) {
5276  		/* Latency start time saved in rx_cmd_start later in routine */
5277  		err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5278  		if (err)
5279  			goto out_tgt_busy1;
5280  	}
5281  
5282  	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5283  		if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5284  			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5285  					 "3377 Target Queue Full, scsi Id:%d "
5286  					 "Qdepth:%d Pending command:%d"
5287  					 " WWNN:%02x:%02x:%02x:%02x:"
5288  					 "%02x:%02x:%02x:%02x, "
5289  					 " WWPN:%02x:%02x:%02x:%02x:"
5290  					 "%02x:%02x:%02x:%02x",
5291  					 ndlp->nlp_sid, ndlp->cmd_qdepth,
5292  					 atomic_read(&ndlp->cmd_pending),
5293  					 ndlp->nlp_nodename.u.wwn[0],
5294  					 ndlp->nlp_nodename.u.wwn[1],
5295  					 ndlp->nlp_nodename.u.wwn[2],
5296  					 ndlp->nlp_nodename.u.wwn[3],
5297  					 ndlp->nlp_nodename.u.wwn[4],
5298  					 ndlp->nlp_nodename.u.wwn[5],
5299  					 ndlp->nlp_nodename.u.wwn[6],
5300  					 ndlp->nlp_nodename.u.wwn[7],
5301  					 ndlp->nlp_portname.u.wwn[0],
5302  					 ndlp->nlp_portname.u.wwn[1],
5303  					 ndlp->nlp_portname.u.wwn[2],
5304  					 ndlp->nlp_portname.u.wwn[3],
5305  					 ndlp->nlp_portname.u.wwn[4],
5306  					 ndlp->nlp_portname.u.wwn[5],
5307  					 ndlp->nlp_portname.u.wwn[6],
5308  					 ndlp->nlp_portname.u.wwn[7]);
5309  			goto out_tgt_busy2;
5310  		}
5311  	}
5312  
5313  	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5314  	if (lpfc_cmd == NULL) {
5315  		lpfc_rampdown_queue_depth(phba);
5316  
5317  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5318  				 "0707 driver's buffer pool is empty, "
5319  				 "IO busied\n");
5320  		goto out_host_busy;
5321  	}
5322  	lpfc_cmd->rx_cmd_start = start;
5323  
5324  	cur_iocbq = &lpfc_cmd->cur_iocbq;
5325  	/*
5326  	 * Store the midlayer's command structure for the completion phase
5327  	 * and complete the command initialization.
5328  	 */
5329  	lpfc_cmd->pCmd  = cmnd;
5330  	lpfc_cmd->rdata = rdata;
5331  	lpfc_cmd->ndlp = ndlp;
5332  	cur_iocbq->cmd_cmpl = NULL;
5333  	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5334  
5335  	err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5336  	if (err)
5337  		goto out_host_busy_release_buf;
5338  
5339  	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5340  		if (vport->phba->cfg_enable_bg) {
5341  			lpfc_printf_vlog(vport,
5342  					 KERN_INFO, LOG_SCSI_CMD,
5343  					 "9033 BLKGRD: rcvd %s cmd:x%x "
5344  					 "reftag x%x cnt %u pt %x\n",
5345  					 dif_op_str[scsi_get_prot_op(cmnd)],
5346  					 cmnd->cmnd[0],
5347  					 scsi_prot_ref_tag(cmnd),
5348  					 scsi_logical_block_count(cmnd),
5349  					 scsi_get_prot_type(cmnd));
5350  		}
5351  		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5352  	} else {
5353  		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5354  	}
5355  
5356  	if (unlikely(err)) {
5357  		if (err == 2) {
5358  			cmnd->result = DID_ERROR << 16;
5359  			goto out_fail_command_release_buf;
5360  		}
5361  		goto out_host_busy_free_buf;
5362  	}
5363  
5364  	/* check the necessary and sufficient condition to support VMID */
5365  	if (lpfc_is_vmid_enabled(phba) &&
5366  	    (ndlp->vmid_support ||
5367  	     phba->pport->vmid_priority_tagging ==
5368  	     LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5369  		/* is the I/O generated by a VM, get the associated virtual */
5370  		/* entity id */
5371  		uuid = lpfc_is_command_vm_io(cmnd);
5372  
5373  		if (uuid) {
5374  			err = lpfc_vmid_get_appid(vport, uuid,
5375  					cmnd->sc_data_direction,
5376  					(union lpfc_vmid_io_tag *)
5377  						&cur_iocbq->vmid_tag);
5378  			if (!err)
5379  				cur_iocbq->cmd_flag |= LPFC_IO_VMID;
5380  		}
5381  	}
5382  
5383  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5384  	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5385  		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5386  #endif
5387  	/* Issue I/O to adapter */
5388  	err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
5389  				    SLI_IOCB_RET_IOCB);
5390  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5391  	if (start) {
5392  		lpfc_cmd->ts_cmd_start = start;
5393  		lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5394  		lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5395  	} else {
5396  		lpfc_cmd->ts_cmd_start = 0;
5397  	}
5398  #endif
5399  	if (err) {
5400  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5401  				 "3376 FCP could not issue iocb err %x "
5402  				 "FCP cmd x%x <%d/%llu> "
5403  				 "sid: x%x did: x%x oxid: x%x "
5404  				 "Data: x%x x%x x%x x%x\n",
5405  				 err, cmnd->cmnd[0],
5406  				 cmnd->device ? cmnd->device->id : 0xffff,
5407  				 cmnd->device ? cmnd->device->lun : (u64)-1,
5408  				 vport->fc_myDID, ndlp->nlp_DID,
5409  				 phba->sli_rev == LPFC_SLI_REV4 ?
5410  				 cur_iocbq->sli4_xritag : 0xffff,
5411  				 phba->sli_rev == LPFC_SLI_REV4 ?
5412  				 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5413  				 cur_iocbq->iocb.ulpContext,
5414  				 cur_iocbq->iotag,
5415  				 phba->sli_rev == LPFC_SLI_REV4 ?
5416  				 bf_get(wqe_tmo,
5417  					&cur_iocbq->wqe.generic.wqe_com) :
5418  				 cur_iocbq->iocb.ulpTimeout,
5419  				 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
5420  
5421  		goto out_host_busy_free_buf;
5422  	}
5423  
5424  	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5425  		lpfc_sli_handle_fast_ring_event(phba,
5426  			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5427  
5428  		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5429  			lpfc_poll_rearm_timer(phba);
5430  	}
5431  
5432  	if (phba->cfg_xri_rebalancing)
5433  		lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5434  
5435  	return 0;
5436  
5437   out_host_busy_free_buf:
5438  	idx = lpfc_cmd->hdwq_no;
5439  	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5440  	if (phba->sli4_hba.hdwq) {
5441  		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5442  		case WRITE_DATA:
5443  			phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5444  			break;
5445  		case READ_DATA:
5446  			phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5447  			break;
5448  		default:
5449  			phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5450  		}
5451  	}
5452   out_host_busy_release_buf:
5453  	lpfc_release_scsi_buf(phba, lpfc_cmd);
5454   out_host_busy:
5455  	lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5456  			     shost);
5457  	return SCSI_MLQUEUE_HOST_BUSY;
5458  
5459   out_tgt_busy2:
5460  	lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5461  			     shost);
5462   out_tgt_busy1:
5463  	return SCSI_MLQUEUE_TARGET_BUSY;
5464  
5465   out_fail_command_release_buf:
5466  	lpfc_release_scsi_buf(phba, lpfc_cmd);
5467  	lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5468  			     shost);
5469  
5470   out_fail_command:
5471  	scsi_done(cmnd);
5472  	return 0;
5473  }
5474  
5475  /*
5476   * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5477   * @vport: The virtual port for which this call is being executed.
5478   */
lpfc_vmid_vport_cleanup(struct lpfc_vport * vport)5479  void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5480  {
5481  	u32 bucket;
5482  	struct lpfc_vmid *cur;
5483  
5484  	if (vport->port_type == LPFC_PHYSICAL_PORT)
5485  		del_timer_sync(&vport->phba->inactive_vmid_poll);
5486  
5487  	kfree(vport->qfpa_res);
5488  	kfree(vport->vmid_priority.vmid_range);
5489  	kfree(vport->vmid);
5490  
5491  	if (!hash_empty(vport->hash_table))
5492  		hash_for_each(vport->hash_table, bucket, cur, hnode)
5493  			hash_del(&cur->hnode);
5494  
5495  	vport->qfpa_res = NULL;
5496  	vport->vmid_priority.vmid_range = NULL;
5497  	vport->vmid = NULL;
5498  	vport->cur_vmid_cnt = 0;
5499  }
5500  
5501  /**
5502   * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5503   * @cmnd: Pointer to scsi_cmnd data structure.
5504   *
5505   * This routine aborts @cmnd pending in base driver.
5506   *
5507   * Return code :
5508   *   0x2003 - Error
5509   *   0x2002 - Success
5510   **/
5511  static int
lpfc_abort_handler(struct scsi_cmnd * cmnd)5512  lpfc_abort_handler(struct scsi_cmnd *cmnd)
5513  {
5514  	struct Scsi_Host  *shost = cmnd->device->host;
5515  	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5516  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5517  	struct lpfc_hba   *phba = vport->phba;
5518  	struct lpfc_iocbq *iocb;
5519  	struct lpfc_io_buf *lpfc_cmd;
5520  	int ret = SUCCESS, status = 0;
5521  	struct lpfc_sli_ring *pring_s4 = NULL;
5522  	struct lpfc_sli_ring *pring = NULL;
5523  	int ret_val;
5524  	unsigned long flags;
5525  	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5526  
5527  	status = fc_block_rport(rport);
5528  	if (status != 0 && status != SUCCESS)
5529  		return status;
5530  
5531  	lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5532  	if (!lpfc_cmd)
5533  		return ret;
5534  
5535  	/* Guard against IO completion being called at same time */
5536  	spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
5537  
5538  	spin_lock(&phba->hbalock);
5539  	/* driver queued commands are in process of being flushed */
5540  	if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
5541  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5542  			"3168 SCSI Layer abort requested I/O has been "
5543  			"flushed by LLD.\n");
5544  		ret = FAILED;
5545  		goto out_unlock_hba;
5546  	}
5547  
5548  	if (!lpfc_cmd->pCmd) {
5549  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5550  			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5551  			 "x%x ID %d LUN %llu\n",
5552  			 SUCCESS, cmnd->device->id, cmnd->device->lun);
5553  		goto out_unlock_hba;
5554  	}
5555  
5556  	iocb = &lpfc_cmd->cur_iocbq;
5557  	if (phba->sli_rev == LPFC_SLI_REV4) {
5558  		/* if the io_wq & pring are gone, the port was reset. */
5559  		if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq ||
5560  		    !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) {
5561  			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5562  					 "2877 SCSI Layer I/O Abort Request "
5563  					 "IO CMPL Status x%x ID %d LUN %llu "
5564  					 "HBA_SETUP %d\n", FAILED,
5565  					 cmnd->device->id,
5566  					 (u64)cmnd->device->lun,
5567  					 test_bit(HBA_SETUP, &phba->hba_flag));
5568  			ret = FAILED;
5569  			goto out_unlock_hba;
5570  		}
5571  		pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5572  		spin_lock(&pring_s4->ring_lock);
5573  	}
5574  	/* the command is in process of being cancelled */
5575  	if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
5576  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5577  			"3169 SCSI Layer abort requested I/O has been "
5578  			"cancelled by LLD.\n");
5579  		ret = FAILED;
5580  		goto out_unlock_ring;
5581  	}
5582  	/*
5583  	 * If pCmd field of the corresponding lpfc_io_buf structure
5584  	 * points to a different SCSI command, then the driver has
5585  	 * already completed this command, but the midlayer did not
5586  	 * see the completion before the eh fired. Just return SUCCESS.
5587  	 */
5588  	if (lpfc_cmd->pCmd != cmnd) {
5589  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5590  			"3170 SCSI Layer abort requested I/O has been "
5591  			"completed by LLD.\n");
5592  		goto out_unlock_ring;
5593  	}
5594  
5595  	WARN_ON(iocb->io_buf != lpfc_cmd);
5596  
5597  	/* abort issued in recovery is still in progress */
5598  	if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
5599  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5600  			 "3389 SCSI Layer I/O Abort Request is pending\n");
5601  		if (phba->sli_rev == LPFC_SLI_REV4)
5602  			spin_unlock(&pring_s4->ring_lock);
5603  		spin_unlock(&phba->hbalock);
5604  		spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5605  		goto wait_for_cmpl;
5606  	}
5607  
5608  	lpfc_cmd->waitq = &waitq;
5609  	if (phba->sli_rev == LPFC_SLI_REV4) {
5610  		spin_unlock(&pring_s4->ring_lock);
5611  		ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5612  						      lpfc_sli_abort_fcp_cmpl);
5613  	} else {
5614  		pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5615  		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5616  						     lpfc_sli_abort_fcp_cmpl);
5617  	}
5618  
5619  	/* Make sure HBA is alive */
5620  	lpfc_issue_hb_tmo(phba);
5621  
5622  	if (ret_val != IOCB_SUCCESS) {
5623  		/* Indicate the IO is not being aborted by the driver. */
5624  		lpfc_cmd->waitq = NULL;
5625  		ret = FAILED;
5626  		goto out_unlock_hba;
5627  	}
5628  
5629  	/* no longer need the lock after this point */
5630  	spin_unlock(&phba->hbalock);
5631  	spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5632  
5633  	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5634  		lpfc_sli_handle_fast_ring_event(phba,
5635  			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5636  
5637  wait_for_cmpl:
5638  	/*
5639  	 * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
5640  	 * for abort to complete.
5641  	 */
5642  	wait_event_timeout(waitq,
5643  			  (lpfc_cmd->pCmd != cmnd),
5644  			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5645  
5646  	spin_lock(&lpfc_cmd->buf_lock);
5647  
5648  	if (lpfc_cmd->pCmd == cmnd) {
5649  		ret = FAILED;
5650  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5651  				 "0748 abort handler timed out waiting "
5652  				 "for aborting I/O (xri:x%x) to complete: "
5653  				 "ret %#x, ID %d, LUN %llu\n",
5654  				 iocb->sli4_xritag, ret,
5655  				 cmnd->device->id, cmnd->device->lun);
5656  	}
5657  
5658  	lpfc_cmd->waitq = NULL;
5659  
5660  	spin_unlock(&lpfc_cmd->buf_lock);
5661  	goto out;
5662  
5663  out_unlock_ring:
5664  	if (phba->sli_rev == LPFC_SLI_REV4)
5665  		spin_unlock(&pring_s4->ring_lock);
5666  out_unlock_hba:
5667  	spin_unlock(&phba->hbalock);
5668  	spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5669  out:
5670  	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5671  			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5672  			 "LUN %llu\n", ret, cmnd->device->id,
5673  			 cmnd->device->lun);
5674  	return ret;
5675  }
5676  
5677  static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)5678  lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5679  {
5680  	switch (task_mgmt_cmd) {
5681  	case FCP_ABORT_TASK_SET:
5682  		return "ABORT_TASK_SET";
5683  	case FCP_CLEAR_TASK_SET:
5684  		return "FCP_CLEAR_TASK_SET";
5685  	case FCP_BUS_RESET:
5686  		return "FCP_BUS_RESET";
5687  	case FCP_LUN_RESET:
5688  		return "FCP_LUN_RESET";
5689  	case FCP_TARGET_RESET:
5690  		return "FCP_TARGET_RESET";
5691  	case FCP_CLEAR_ACA:
5692  		return "FCP_CLEAR_ACA";
5693  	case FCP_TERMINATE_TASK:
5694  		return "FCP_TERMINATE_TASK";
5695  	default:
5696  		return "unknown";
5697  	}
5698  }
5699  
5700  
5701  /**
5702   * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5703   * @vport: The virtual port for which this call is being executed.
5704   * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5705   *
5706   * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5707   *
5708   * Return code :
5709   *   0x2003 - Error
5710   *   0x2002 - Success
5711   **/
5712  static int
lpfc_check_fcp_rsp(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)5713  lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5714  {
5715  	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5716  	uint32_t rsp_info;
5717  	uint32_t rsp_len;
5718  	uint8_t  rsp_info_code;
5719  	int ret = FAILED;
5720  
5721  
5722  	if (fcprsp == NULL)
5723  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5724  				 "0703 fcp_rsp is missing\n");
5725  	else {
5726  		rsp_info = fcprsp->rspStatus2;
5727  		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5728  		rsp_info_code = fcprsp->rspInfo3;
5729  
5730  
5731  		lpfc_printf_vlog(vport, KERN_INFO,
5732  				 LOG_FCP,
5733  				 "0706 fcp_rsp valid 0x%x,"
5734  				 " rsp len=%d code 0x%x\n",
5735  				 rsp_info,
5736  				 rsp_len, rsp_info_code);
5737  
5738  		/* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5739  		 * field specifies the number of valid bytes of FCP_RSP_INFO.
5740  		 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5741  		 */
5742  		if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5743  		    ((rsp_len == 8) || (rsp_len == 4))) {
5744  			switch (rsp_info_code) {
5745  			case RSP_NO_FAILURE:
5746  				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5747  						 "0715 Task Mgmt No Failure\n");
5748  				ret = SUCCESS;
5749  				break;
5750  			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5751  				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5752  						 "0716 Task Mgmt Target "
5753  						"reject\n");
5754  				break;
5755  			case RSP_TM_NOT_COMPLETED: /* TM failed */
5756  				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5757  						 "0717 Task Mgmt Target "
5758  						"failed TM\n");
5759  				break;
5760  			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5761  				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5762  						 "0718 Task Mgmt to invalid "
5763  						"LUN\n");
5764  				break;
5765  			}
5766  		}
5767  	}
5768  	return ret;
5769  }
5770  
5771  
5772  /**
5773   * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5774   * @vport: The virtual port for which this call is being executed.
5775   * @rport: Pointer to remote port
5776   * @tgt_id: Target ID of remote device.
5777   * @lun_id: Lun number for the TMF
5778   * @task_mgmt_cmd: type of TMF to send
5779   *
5780   * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5781   * a remote port.
5782   *
5783   * Return Code:
5784   *   0x2003 - Error
5785   *   0x2002 - Success.
5786   **/
5787  static int
lpfc_send_taskmgmt(struct lpfc_vport * vport,struct fc_rport * rport,unsigned int tgt_id,uint64_t lun_id,uint8_t task_mgmt_cmd)5788  lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
5789  		   unsigned int tgt_id, uint64_t lun_id,
5790  		   uint8_t task_mgmt_cmd)
5791  {
5792  	struct lpfc_hba   *phba = vport->phba;
5793  	struct lpfc_io_buf *lpfc_cmd;
5794  	struct lpfc_iocbq *iocbq;
5795  	struct lpfc_iocbq *iocbqrsp;
5796  	struct lpfc_rport_data *rdata;
5797  	struct lpfc_nodelist *pnode;
5798  	int ret;
5799  	int status;
5800  
5801  	rdata = rport->dd_data;
5802  	if (!rdata || !rdata->pnode)
5803  		return FAILED;
5804  	pnode = rdata->pnode;
5805  
5806  	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
5807  	if (lpfc_cmd == NULL)
5808  		return FAILED;
5809  	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5810  	lpfc_cmd->rdata = rdata;
5811  	lpfc_cmd->pCmd = NULL;
5812  	lpfc_cmd->ndlp = pnode;
5813  
5814  	status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5815  						    task_mgmt_cmd);
5816  	if (!status) {
5817  		lpfc_release_scsi_buf(phba, lpfc_cmd);
5818  		return FAILED;
5819  	}
5820  
5821  	iocbq = &lpfc_cmd->cur_iocbq;
5822  	iocbqrsp = lpfc_sli_get_iocbq(phba);
5823  	if (iocbqrsp == NULL) {
5824  		lpfc_release_scsi_buf(phba, lpfc_cmd);
5825  		return FAILED;
5826  	}
5827  	iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
5828  	iocbq->vport = vport;
5829  
5830  	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5831  			 "0702 Issue %s to TGT %d LUN %llu "
5832  			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5833  			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5834  			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5835  			 iocbq->cmd_flag);
5836  
5837  	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5838  					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5839  	if ((status != IOCB_SUCCESS) ||
5840  	    (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
5841  		if (status != IOCB_SUCCESS ||
5842  		    get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
5843  			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5844  					 "0727 TMF %s to TGT %d LUN %llu "
5845  					 "failed (%d, %d) cmd_flag x%x\n",
5846  					 lpfc_taskmgmt_name(task_mgmt_cmd),
5847  					 tgt_id, lun_id,
5848  					 get_job_ulpstatus(phba, iocbqrsp),
5849  					 get_job_word4(phba, iocbqrsp),
5850  					 iocbq->cmd_flag);
5851  		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5852  		if (status == IOCB_SUCCESS) {
5853  			if (get_job_ulpstatus(phba, iocbqrsp) ==
5854  			    IOSTAT_FCP_RSP_ERROR)
5855  				/* Something in the FCP_RSP was invalid.
5856  				 * Check conditions */
5857  				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5858  			else
5859  				ret = FAILED;
5860  		} else if ((status == IOCB_TIMEDOUT) ||
5861  			   (status == IOCB_ABORTED)) {
5862  			ret = TIMEOUT_ERROR;
5863  		} else {
5864  			ret = FAILED;
5865  		}
5866  	} else
5867  		ret = SUCCESS;
5868  
5869  	lpfc_sli_release_iocbq(phba, iocbqrsp);
5870  
5871  	if (status != IOCB_TIMEDOUT)
5872  		lpfc_release_scsi_buf(phba, lpfc_cmd);
5873  
5874  	return ret;
5875  }
5876  
5877  /**
5878   * lpfc_chk_tgt_mapped -
5879   * @vport: The virtual port to check on
5880   * @rport: Pointer to fc_rport data structure.
5881   *
5882   * This routine delays until the scsi target (aka rport) for the
5883   * command exists (is present and logged in) or we declare it non-existent.
5884   *
5885   * Return code :
5886   *  0x2003 - Error
5887   *  0x2002 - Success
5888   **/
5889  static int
lpfc_chk_tgt_mapped(struct lpfc_vport * vport,struct fc_rport * rport)5890  lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
5891  {
5892  	struct lpfc_rport_data *rdata;
5893  	struct lpfc_nodelist *pnode = NULL;
5894  	unsigned long later;
5895  
5896  	rdata = rport->dd_data;
5897  	if (!rdata) {
5898  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5899  			"0797 Tgt Map rport failure: rdata x%px\n", rdata);
5900  		return FAILED;
5901  	}
5902  	pnode = rdata->pnode;
5903  
5904  	/*
5905  	 * If target is not in a MAPPED state, delay until
5906  	 * target is rediscovered or devloss timeout expires.
5907  	 */
5908  	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5909  	while (time_after(later, jiffies)) {
5910  		if (!pnode)
5911  			return FAILED;
5912  		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5913  			return SUCCESS;
5914  		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5915  		rdata = rport->dd_data;
5916  		if (!rdata)
5917  			return FAILED;
5918  		pnode = rdata->pnode;
5919  	}
5920  	if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5921  		return FAILED;
5922  	return SUCCESS;
5923  }
5924  
5925  /**
5926   * lpfc_reset_flush_io_context -
5927   * @vport: The virtual port (scsi_host) for the flush context
5928   * @tgt_id: If aborting by Target contect - specifies the target id
5929   * @lun_id: If aborting by Lun context - specifies the lun id
5930   * @context: specifies the context level to flush at.
5931   *
5932   * After a reset condition via TMF, we need to flush orphaned i/o
5933   * contexts from the adapter. This routine aborts any contexts
5934   * outstanding, then waits for their completions. The wait is
5935   * bounded by devloss_tmo though.
5936   *
5937   * Return code :
5938   *  0x2003 - Error
5939   *  0x2002 - Success
5940   **/
5941  static int
lpfc_reset_flush_io_context(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd context)5942  lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5943  			uint64_t lun_id, lpfc_ctx_cmd context)
5944  {
5945  	struct lpfc_hba   *phba = vport->phba;
5946  	unsigned long later;
5947  	int cnt;
5948  
5949  	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5950  	if (cnt)
5951  		lpfc_sli_abort_taskmgmt(vport,
5952  					&phba->sli.sli3_ring[LPFC_FCP_RING],
5953  					tgt_id, lun_id, context);
5954  	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5955  	while (time_after(later, jiffies) && cnt) {
5956  		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5957  		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5958  	}
5959  	if (cnt) {
5960  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5961  			"0724 I/O flush failure for context %s : cnt x%x\n",
5962  			((context == LPFC_CTX_LUN) ? "LUN" :
5963  			 ((context == LPFC_CTX_TGT) ? "TGT" :
5964  			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5965  			cnt);
5966  		return FAILED;
5967  	}
5968  	return SUCCESS;
5969  }
5970  
5971  /**
5972   * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5973   * @cmnd: Pointer to scsi_cmnd data structure.
5974   *
5975   * This routine does a device reset by sending a LUN_RESET task management
5976   * command.
5977   *
5978   * Return code :
5979   *  0x2003 - Error
5980   *  0x2002 - Success
5981   **/
5982  static int
lpfc_device_reset_handler(struct scsi_cmnd * cmnd)5983  lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5984  {
5985  	struct Scsi_Host  *shost = cmnd->device->host;
5986  	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5987  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5988  	struct lpfc_rport_data *rdata;
5989  	struct lpfc_nodelist *pnode;
5990  	unsigned tgt_id = cmnd->device->id;
5991  	uint64_t lun_id = cmnd->device->lun;
5992  	struct lpfc_scsi_event_header scsi_event;
5993  	int status;
5994  	u32 logit = LOG_FCP;
5995  
5996  	if (!rport)
5997  		return FAILED;
5998  
5999  	rdata = rport->dd_data;
6000  	if (!rdata || !rdata->pnode) {
6001  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6002  				 "0798 Device Reset rdata failure: rdata x%px\n",
6003  				 rdata);
6004  		return FAILED;
6005  	}
6006  	pnode = rdata->pnode;
6007  	status = fc_block_rport(rport);
6008  	if (status != 0 && status != SUCCESS)
6009  		return status;
6010  
6011  	status = lpfc_chk_tgt_mapped(vport, rport);
6012  	if (status == FAILED) {
6013  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6014  			"0721 Device Reset rport failure: rdata x%px\n", rdata);
6015  		return FAILED;
6016  	}
6017  
6018  	scsi_event.event_type = FC_REG_SCSI_EVENT;
6019  	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6020  	scsi_event.lun = lun_id;
6021  	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6022  	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6023  
6024  	fc_host_post_vendor_event(shost, fc_get_event_number(),
6025  		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6026  
6027  	status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6028  						FCP_LUN_RESET);
6029  	if (status != SUCCESS)
6030  		logit =  LOG_TRACE_EVENT;
6031  
6032  	lpfc_printf_vlog(vport, KERN_ERR, logit,
6033  			 "0713 SCSI layer issued Device Reset (%d, %llu) "
6034  			 "return x%x\n", tgt_id, lun_id, status);
6035  
6036  	/*
6037  	 * We have to clean up i/o as : they may be orphaned by the TMF;
6038  	 * or if the TMF failed, they may be in an indeterminate state.
6039  	 * So, continue on.
6040  	 * We will report success if all the i/o aborts successfully.
6041  	 */
6042  	if (status == SUCCESS)
6043  		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6044  						LPFC_CTX_LUN);
6045  
6046  	return status;
6047  }
6048  
6049  /**
6050   * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6051   * @cmnd: Pointer to scsi_cmnd data structure.
6052   *
6053   * This routine does a target reset by sending a TARGET_RESET task management
6054   * command.
6055   *
6056   * Return code :
6057   *  0x2003 - Error
6058   *  0x2002 - Success
6059   **/
6060  static int
lpfc_target_reset_handler(struct scsi_cmnd * cmnd)6061  lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6062  {
6063  	struct Scsi_Host  *shost = cmnd->device->host;
6064  	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6065  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6066  	struct lpfc_rport_data *rdata;
6067  	struct lpfc_nodelist *pnode;
6068  	unsigned tgt_id = cmnd->device->id;
6069  	uint64_t lun_id = cmnd->device->lun;
6070  	struct lpfc_scsi_event_header scsi_event;
6071  	int status;
6072  	u32 logit = LOG_FCP;
6073  	u32 dev_loss_tmo = vport->cfg_devloss_tmo;
6074  	unsigned long flags;
6075  	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6076  
6077  	if (!rport)
6078  		return FAILED;
6079  
6080  	rdata = rport->dd_data;
6081  	if (!rdata || !rdata->pnode) {
6082  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6083  				 "0799 Target Reset rdata failure: rdata x%px\n",
6084  				 rdata);
6085  		return FAILED;
6086  	}
6087  	pnode = rdata->pnode;
6088  	status = fc_block_rport(rport);
6089  	if (status != 0 && status != SUCCESS)
6090  		return status;
6091  
6092  	status = lpfc_chk_tgt_mapped(vport, rport);
6093  	if (status == FAILED) {
6094  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6095  			"0722 Target Reset rport failure: rdata x%px\n", rdata);
6096  		if (pnode) {
6097  			spin_lock_irqsave(&pnode->lock, flags);
6098  			pnode->nlp_flag &= ~NLP_NPR_ADISC;
6099  			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6100  			spin_unlock_irqrestore(&pnode->lock, flags);
6101  		}
6102  		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6103  					  LPFC_CTX_TGT);
6104  		return FAST_IO_FAIL;
6105  	}
6106  
6107  	scsi_event.event_type = FC_REG_SCSI_EVENT;
6108  	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6109  	scsi_event.lun = 0;
6110  	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6111  	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6112  
6113  	fc_host_post_vendor_event(shost, fc_get_event_number(),
6114  		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6115  
6116  	status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6117  					FCP_TARGET_RESET);
6118  	if (status != SUCCESS) {
6119  		logit = LOG_TRACE_EVENT;
6120  
6121  		/* Issue LOGO, if no LOGO is outstanding */
6122  		spin_lock_irqsave(&pnode->lock, flags);
6123  		if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
6124  		    !pnode->logo_waitq) {
6125  			pnode->logo_waitq = &waitq;
6126  			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6127  			pnode->nlp_flag |= NLP_ISSUE_LOGO;
6128  			pnode->save_flags |= NLP_WAIT_FOR_LOGO;
6129  			spin_unlock_irqrestore(&pnode->lock, flags);
6130  			lpfc_unreg_rpi(vport, pnode);
6131  			wait_event_timeout(waitq,
6132  					   (!(pnode->save_flags &
6133  					      NLP_WAIT_FOR_LOGO)),
6134  					   msecs_to_jiffies(dev_loss_tmo *
6135  							    1000));
6136  
6137  			if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
6138  				lpfc_printf_vlog(vport, KERN_ERR, logit,
6139  						 "0725 SCSI layer TGTRST "
6140  						 "failed & LOGO TMO (%d, %llu) "
6141  						 "return x%x\n",
6142  						 tgt_id, lun_id, status);
6143  				spin_lock_irqsave(&pnode->lock, flags);
6144  				pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
6145  			} else {
6146  				spin_lock_irqsave(&pnode->lock, flags);
6147  			}
6148  			pnode->logo_waitq = NULL;
6149  			spin_unlock_irqrestore(&pnode->lock, flags);
6150  			status = SUCCESS;
6151  
6152  		} else {
6153  			spin_unlock_irqrestore(&pnode->lock, flags);
6154  			status = FAILED;
6155  		}
6156  	}
6157  
6158  	lpfc_printf_vlog(vport, KERN_ERR, logit,
6159  			 "0723 SCSI layer issued Target Reset (%d, %llu) "
6160  			 "return x%x\n", tgt_id, lun_id, status);
6161  
6162  	/*
6163  	 * We have to clean up i/o as : they may be orphaned by the TMF;
6164  	 * or if the TMF failed, they may be in an indeterminate state.
6165  	 * So, continue on.
6166  	 * We will report success if all the i/o aborts successfully.
6167  	 */
6168  	if (status == SUCCESS)
6169  		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6170  					  LPFC_CTX_TGT);
6171  	return status;
6172  }
6173  
6174  /**
6175   * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6176   * @cmnd: Pointer to scsi_cmnd data structure.
6177   *
6178   * This routine does host reset to the adaptor port. It brings the HBA
6179   * offline, performs a board restart, and then brings the board back online.
6180   * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6181   * reject all outstanding SCSI commands to the host and error returned
6182   * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6183   * of error handling, it will only return error if resetting of the adapter
6184   * is not successful; in all other cases, will return success.
6185   *
6186   * Return code :
6187   *  0x2003 - Error
6188   *  0x2002 - Success
6189   **/
6190  static int
lpfc_host_reset_handler(struct scsi_cmnd * cmnd)6191  lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6192  {
6193  	struct Scsi_Host *shost = cmnd->device->host;
6194  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6195  	struct lpfc_hba *phba = vport->phba;
6196  	int rc, ret = SUCCESS;
6197  
6198  	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6199  			 "3172 SCSI layer issued Host Reset Data:\n");
6200  
6201  	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6202  	lpfc_offline(phba);
6203  	rc = lpfc_sli_brdrestart(phba);
6204  	if (rc)
6205  		goto error;
6206  
6207  	/* Wait for successful restart of adapter */
6208  	if (phba->sli_rev < LPFC_SLI_REV4) {
6209  		rc = lpfc_sli_chipset_init(phba);
6210  		if (rc)
6211  			goto error;
6212  	}
6213  
6214  	rc = lpfc_online(phba);
6215  	if (rc)
6216  		goto error;
6217  
6218  	lpfc_unblock_mgmt_io(phba);
6219  
6220  	return ret;
6221  error:
6222  	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6223  			 "3323 Failed host reset\n");
6224  	lpfc_unblock_mgmt_io(phba);
6225  	return FAILED;
6226  }
6227  
6228  /**
6229   * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6230   * @sdev: Pointer to scsi_device.
6231   *
6232   * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
6233   * globally available list of scsi buffers. This routine also makes sure scsi
6234   * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6235   * of scsi buffer exists for the lifetime of the driver.
6236   *
6237   * Return codes:
6238   *   non-0 - Error
6239   *   0 - Success
6240   **/
6241  static int
lpfc_slave_alloc(struct scsi_device * sdev)6242  lpfc_slave_alloc(struct scsi_device *sdev)
6243  {
6244  	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6245  	struct lpfc_hba   *phba = vport->phba;
6246  	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6247  	uint32_t total = 0;
6248  	uint32_t num_to_alloc = 0;
6249  	int num_allocated = 0;
6250  	uint32_t sdev_cnt;
6251  	struct lpfc_device_data *device_data;
6252  	unsigned long flags;
6253  	struct lpfc_name target_wwpn;
6254  
6255  	if (!rport || fc_remote_port_chkready(rport))
6256  		return -ENXIO;
6257  
6258  	if (phba->cfg_fof) {
6259  
6260  		/*
6261  		 * Check to see if the device data structure for the lun
6262  		 * exists.  If not, create one.
6263  		 */
6264  
6265  		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6266  		spin_lock_irqsave(&phba->devicelock, flags);
6267  		device_data = __lpfc_get_device_data(phba,
6268  						     &phba->luns,
6269  						     &vport->fc_portname,
6270  						     &target_wwpn,
6271  						     sdev->lun);
6272  		if (!device_data) {
6273  			spin_unlock_irqrestore(&phba->devicelock, flags);
6274  			device_data = lpfc_create_device_data(phba,
6275  							&vport->fc_portname,
6276  							&target_wwpn,
6277  							sdev->lun,
6278  							phba->cfg_XLanePriority,
6279  							true);
6280  			if (!device_data)
6281  				return -ENOMEM;
6282  			spin_lock_irqsave(&phba->devicelock, flags);
6283  			list_add_tail(&device_data->listentry, &phba->luns);
6284  		}
6285  		device_data->rport_data = rport->dd_data;
6286  		device_data->available = true;
6287  		spin_unlock_irqrestore(&phba->devicelock, flags);
6288  		sdev->hostdata = device_data;
6289  	} else {
6290  		sdev->hostdata = rport->dd_data;
6291  	}
6292  	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6293  
6294  	/* For SLI4, all IO buffers are pre-allocated */
6295  	if (phba->sli_rev == LPFC_SLI_REV4)
6296  		return 0;
6297  
6298  	/* This code path is now ONLY for SLI3 adapters */
6299  
6300  	/*
6301  	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6302  	 * available list of scsi buffers.  Don't allocate more than the
6303  	 * HBA limit conveyed to the midlayer via the host structure.  The
6304  	 * formula accounts for the lun_queue_depth + error handlers + 1
6305  	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
6306  	 */
6307  	total = phba->total_scsi_bufs;
6308  	num_to_alloc = vport->cfg_lun_queue_depth + 2;
6309  
6310  	/* If allocated buffers are enough do nothing */
6311  	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6312  		return 0;
6313  
6314  	/* Allow some exchanges to be available always to complete discovery */
6315  	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6316  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6317  				 "0704 At limitation of %d preallocated "
6318  				 "command buffers\n", total);
6319  		return 0;
6320  	/* Allow some exchanges to be available always to complete discovery */
6321  	} else if (total + num_to_alloc >
6322  		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6323  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6324  				 "0705 Allocation request of %d "
6325  				 "command buffers will exceed max of %d.  "
6326  				 "Reducing allocation request to %d.\n",
6327  				 num_to_alloc, phba->cfg_hba_queue_depth,
6328  				 (phba->cfg_hba_queue_depth - total));
6329  		num_to_alloc = phba->cfg_hba_queue_depth - total;
6330  	}
6331  	num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6332  	if (num_to_alloc != num_allocated) {
6333  			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6334  					 "0708 Allocation request of %d "
6335  					 "command buffers did not succeed.  "
6336  					 "Allocated %d buffers.\n",
6337  					 num_to_alloc, num_allocated);
6338  	}
6339  	if (num_allocated > 0)
6340  		phba->total_scsi_bufs += num_allocated;
6341  	return 0;
6342  }
6343  
6344  /**
6345   * lpfc_slave_configure - scsi_host_template slave_configure entry point
6346   * @sdev: Pointer to scsi_device.
6347   *
6348   * This routine configures following items
6349   *   - Tag command queuing support for @sdev if supported.
6350   *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6351   *
6352   * Return codes:
6353   *   0 - Success
6354   **/
6355  static int
lpfc_slave_configure(struct scsi_device * sdev)6356  lpfc_slave_configure(struct scsi_device *sdev)
6357  {
6358  	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6359  	struct lpfc_hba   *phba = vport->phba;
6360  
6361  	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6362  
6363  	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6364  		lpfc_sli_handle_fast_ring_event(phba,
6365  			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6366  		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6367  			lpfc_poll_rearm_timer(phba);
6368  	}
6369  
6370  	return 0;
6371  }
6372  
6373  /**
6374   * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6375   * @sdev: Pointer to scsi_device.
6376   *
6377   * This routine sets @sdev hostatdata filed to null.
6378   **/
6379  static void
lpfc_slave_destroy(struct scsi_device * sdev)6380  lpfc_slave_destroy(struct scsi_device *sdev)
6381  {
6382  	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6383  	struct lpfc_hba   *phba = vport->phba;
6384  	unsigned long flags;
6385  	struct lpfc_device_data *device_data = sdev->hostdata;
6386  
6387  	atomic_dec(&phba->sdev_cnt);
6388  	if ((phba->cfg_fof) && (device_data)) {
6389  		spin_lock_irqsave(&phba->devicelock, flags);
6390  		device_data->available = false;
6391  		if (!device_data->oas_enabled)
6392  			lpfc_delete_device_data(phba, device_data);
6393  		spin_unlock_irqrestore(&phba->devicelock, flags);
6394  	}
6395  	sdev->hostdata = NULL;
6396  	return;
6397  }
6398  
6399  /**
6400   * lpfc_create_device_data - creates and initializes device data structure for OAS
6401   * @phba: Pointer to host bus adapter structure.
6402   * @vport_wwpn: Pointer to vport's wwpn information
6403   * @target_wwpn: Pointer to target's wwpn information
6404   * @lun: Lun on target
6405   * @pri: Priority
6406   * @atomic_create: Flag to indicate if memory should be allocated using the
6407   *		  GFP_ATOMIC flag or not.
6408   *
6409   * This routine creates a device data structure which will contain identifying
6410   * information for the device (host wwpn, target wwpn, lun), state of OAS,
6411   * whether or not the corresponding lun is available by the system,
6412   * and pointer to the rport data.
6413   *
6414   * Return codes:
6415   *   NULL - Error
6416   *   Pointer to lpfc_device_data - Success
6417   **/
6418  struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint32_t pri,bool atomic_create)6419  lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6420  			struct lpfc_name *target_wwpn, uint64_t lun,
6421  			uint32_t pri, bool atomic_create)
6422  {
6423  
6424  	struct lpfc_device_data *lun_info;
6425  	int memory_flags;
6426  
6427  	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
6428  	    !(phba->cfg_fof))
6429  		return NULL;
6430  
6431  	/* Attempt to create the device data to contain lun info */
6432  
6433  	if (atomic_create)
6434  		memory_flags = GFP_ATOMIC;
6435  	else
6436  		memory_flags = GFP_KERNEL;
6437  	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6438  	if (!lun_info)
6439  		return NULL;
6440  	INIT_LIST_HEAD(&lun_info->listentry);
6441  	lun_info->rport_data  = NULL;
6442  	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6443  	       sizeof(struct lpfc_name));
6444  	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6445  	       sizeof(struct lpfc_name));
6446  	lun_info->device_id.lun = lun;
6447  	lun_info->oas_enabled = false;
6448  	lun_info->priority = pri;
6449  	lun_info->available = false;
6450  	return lun_info;
6451  }
6452  
6453  /**
6454   * lpfc_delete_device_data - frees a device data structure for OAS
6455   * @phba: Pointer to host bus adapter structure.
6456   * @lun_info: Pointer to device data structure to free.
6457   *
6458   * This routine frees the previously allocated device data structure passed.
6459   *
6460   **/
6461  void
lpfc_delete_device_data(struct lpfc_hba * phba,struct lpfc_device_data * lun_info)6462  lpfc_delete_device_data(struct lpfc_hba *phba,
6463  			struct lpfc_device_data *lun_info)
6464  {
6465  
6466  	if (unlikely(!phba) || !lun_info  ||
6467  	    !(phba->cfg_fof))
6468  		return;
6469  
6470  	if (!list_empty(&lun_info->listentry))
6471  		list_del(&lun_info->listentry);
6472  	mempool_free(lun_info, phba->device_data_mem_pool);
6473  	return;
6474  }
6475  
6476  /**
6477   * __lpfc_get_device_data - returns the device data for the specified lun
6478   * @phba: Pointer to host bus adapter structure.
6479   * @list: Point to list to search.
6480   * @vport_wwpn: Pointer to vport's wwpn information
6481   * @target_wwpn: Pointer to target's wwpn information
6482   * @lun: Lun on target
6483   *
6484   * This routine searches the list passed for the specified lun's device data.
6485   * This function does not hold locks, it is the responsibility of the caller
6486   * to ensure the proper lock is held before calling the function.
6487   *
6488   * Return codes:
6489   *   NULL - Error
6490   *   Pointer to lpfc_device_data - Success
6491   **/
6492  struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba * phba,struct list_head * list,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun)6493  __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6494  		       struct lpfc_name *vport_wwpn,
6495  		       struct lpfc_name *target_wwpn, uint64_t lun)
6496  {
6497  
6498  	struct lpfc_device_data *lun_info;
6499  
6500  	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6501  	    !phba->cfg_fof)
6502  		return NULL;
6503  
6504  	/* Check to see if the lun is already enabled for OAS. */
6505  
6506  	list_for_each_entry(lun_info, list, listentry) {
6507  		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6508  			    sizeof(struct lpfc_name)) == 0) &&
6509  		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6510  			    sizeof(struct lpfc_name)) == 0) &&
6511  		    (lun_info->device_id.lun == lun))
6512  			return lun_info;
6513  	}
6514  
6515  	return NULL;
6516  }
6517  
6518  /**
6519   * lpfc_find_next_oas_lun - searches for the next oas lun
6520   * @phba: Pointer to host bus adapter structure.
6521   * @vport_wwpn: Pointer to vport's wwpn information
6522   * @target_wwpn: Pointer to target's wwpn information
6523   * @starting_lun: Pointer to the lun to start searching for
6524   * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6525   * @found_target_wwpn: Pointer to the found lun's target wwpn information
6526   * @found_lun: Pointer to the found lun.
6527   * @found_lun_status: Pointer to status of the found lun.
6528   * @found_lun_pri: Pointer to priority of the found lun.
6529   *
6530   * This routine searches the luns list for the specified lun
6531   * or the first lun for the vport/target.  If the vport wwpn contains
6532   * a zero value then a specific vport is not specified. In this case
6533   * any vport which contains the lun will be considered a match.  If the
6534   * target wwpn contains a zero value then a specific target is not specified.
6535   * In this case any target which contains the lun will be considered a
6536   * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
6537   * are returned.  The function will also return the next lun if available.
6538   * If the next lun is not found, starting_lun parameter will be set to
6539   * NO_MORE_OAS_LUN.
6540   *
6541   * Return codes:
6542   *   non-0 - Error
6543   *   0 - Success
6544   **/
6545  bool
lpfc_find_next_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t * starting_lun,struct lpfc_name * found_vport_wwpn,struct lpfc_name * found_target_wwpn,uint64_t * found_lun,uint32_t * found_lun_status,uint32_t * found_lun_pri)6546  lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6547  		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6548  		       struct lpfc_name *found_vport_wwpn,
6549  		       struct lpfc_name *found_target_wwpn,
6550  		       uint64_t *found_lun,
6551  		       uint32_t *found_lun_status,
6552  		       uint32_t *found_lun_pri)
6553  {
6554  
6555  	unsigned long flags;
6556  	struct lpfc_device_data *lun_info;
6557  	struct lpfc_device_id *device_id;
6558  	uint64_t lun;
6559  	bool found = false;
6560  
6561  	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6562  	    !starting_lun || !found_vport_wwpn ||
6563  	    !found_target_wwpn || !found_lun || !found_lun_status ||
6564  	    (*starting_lun == NO_MORE_OAS_LUN) ||
6565  	    !phba->cfg_fof)
6566  		return false;
6567  
6568  	lun = *starting_lun;
6569  	*found_lun = NO_MORE_OAS_LUN;
6570  	*starting_lun = NO_MORE_OAS_LUN;
6571  
6572  	/* Search for lun or the lun closet in value */
6573  
6574  	spin_lock_irqsave(&phba->devicelock, flags);
6575  	list_for_each_entry(lun_info, &phba->luns, listentry) {
6576  		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6577  		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6578  			    sizeof(struct lpfc_name)) == 0)) &&
6579  		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6580  		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6581  			    sizeof(struct lpfc_name)) == 0)) &&
6582  		    (lun_info->oas_enabled)) {
6583  			device_id = &lun_info->device_id;
6584  			if ((!found) &&
6585  			    ((lun == FIND_FIRST_OAS_LUN) ||
6586  			     (device_id->lun == lun))) {
6587  				*found_lun = device_id->lun;
6588  				memcpy(found_vport_wwpn,
6589  				       &device_id->vport_wwpn,
6590  				       sizeof(struct lpfc_name));
6591  				memcpy(found_target_wwpn,
6592  				       &device_id->target_wwpn,
6593  				       sizeof(struct lpfc_name));
6594  				if (lun_info->available)
6595  					*found_lun_status =
6596  						OAS_LUN_STATUS_EXISTS;
6597  				else
6598  					*found_lun_status = 0;
6599  				*found_lun_pri = lun_info->priority;
6600  				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6601  					memset(vport_wwpn, 0x0,
6602  					       sizeof(struct lpfc_name));
6603  				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6604  					memset(target_wwpn, 0x0,
6605  					       sizeof(struct lpfc_name));
6606  				found = true;
6607  			} else if (found) {
6608  				*starting_lun = device_id->lun;
6609  				memcpy(vport_wwpn, &device_id->vport_wwpn,
6610  				       sizeof(struct lpfc_name));
6611  				memcpy(target_wwpn, &device_id->target_wwpn,
6612  				       sizeof(struct lpfc_name));
6613  				break;
6614  			}
6615  		}
6616  	}
6617  	spin_unlock_irqrestore(&phba->devicelock, flags);
6618  	return found;
6619  }
6620  
6621  /**
6622   * lpfc_enable_oas_lun - enables a lun for OAS operations
6623   * @phba: Pointer to host bus adapter structure.
6624   * @vport_wwpn: Pointer to vport's wwpn information
6625   * @target_wwpn: Pointer to target's wwpn information
6626   * @lun: Lun
6627   * @pri: Priority
6628   *
6629   * This routine enables a lun for oas operations.  The routines does so by
6630   * doing the following :
6631   *
6632   *   1) Checks to see if the device data for the lun has been created.
6633   *   2) If found, sets the OAS enabled flag if not set and returns.
6634   *   3) Otherwise, creates a device data structure.
6635   *   4) If successfully created, indicates the device data is for an OAS lun,
6636   *   indicates the lun is not available and add to the list of luns.
6637   *
6638   * Return codes:
6639   *   false - Error
6640   *   true - Success
6641   **/
6642  bool
lpfc_enable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)6643  lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6644  		    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6645  {
6646  
6647  	struct lpfc_device_data *lun_info;
6648  	unsigned long flags;
6649  
6650  	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6651  	    !phba->cfg_fof)
6652  		return false;
6653  
6654  	spin_lock_irqsave(&phba->devicelock, flags);
6655  
6656  	/* Check to see if the device data for the lun has been created */
6657  	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6658  					  target_wwpn, lun);
6659  	if (lun_info) {
6660  		if (!lun_info->oas_enabled)
6661  			lun_info->oas_enabled = true;
6662  		lun_info->priority = pri;
6663  		spin_unlock_irqrestore(&phba->devicelock, flags);
6664  		return true;
6665  	}
6666  
6667  	/* Create an lun info structure and add to list of luns */
6668  	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6669  					   pri, true);
6670  	if (lun_info) {
6671  		lun_info->oas_enabled = true;
6672  		lun_info->priority = pri;
6673  		lun_info->available = false;
6674  		list_add_tail(&lun_info->listentry, &phba->luns);
6675  		spin_unlock_irqrestore(&phba->devicelock, flags);
6676  		return true;
6677  	}
6678  	spin_unlock_irqrestore(&phba->devicelock, flags);
6679  	return false;
6680  }
6681  
6682  /**
6683   * lpfc_disable_oas_lun - disables a lun for OAS operations
6684   * @phba: Pointer to host bus adapter structure.
6685   * @vport_wwpn: Pointer to vport's wwpn information
6686   * @target_wwpn: Pointer to target's wwpn information
6687   * @lun: Lun
6688   * @pri: Priority
6689   *
6690   * This routine disables a lun for oas operations.  The routines does so by
6691   * doing the following :
6692   *
6693   *   1) Checks to see if the device data for the lun is created.
6694   *   2) If present, clears the flag indicating this lun is for OAS.
6695   *   3) If the lun is not available by the system, the device data is
6696   *   freed.
6697   *
6698   * Return codes:
6699   *   false - Error
6700   *   true - Success
6701   **/
6702  bool
lpfc_disable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)6703  lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6704  		     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6705  {
6706  
6707  	struct lpfc_device_data *lun_info;
6708  	unsigned long flags;
6709  
6710  	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6711  	    !phba->cfg_fof)
6712  		return false;
6713  
6714  	spin_lock_irqsave(&phba->devicelock, flags);
6715  
6716  	/* Check to see if the lun is available. */
6717  	lun_info = __lpfc_get_device_data(phba,
6718  					  &phba->luns, vport_wwpn,
6719  					  target_wwpn, lun);
6720  	if (lun_info) {
6721  		lun_info->oas_enabled = false;
6722  		lun_info->priority = pri;
6723  		if (!lun_info->available)
6724  			lpfc_delete_device_data(phba, lun_info);
6725  		spin_unlock_irqrestore(&phba->devicelock, flags);
6726  		return true;
6727  	}
6728  
6729  	spin_unlock_irqrestore(&phba->devicelock, flags);
6730  	return false;
6731  }
6732  
6733  static int
lpfc_no_command(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)6734  lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6735  {
6736  	return SCSI_MLQUEUE_HOST_BUSY;
6737  }
6738  
6739  static int
lpfc_no_slave(struct scsi_device * sdev)6740  lpfc_no_slave(struct scsi_device *sdev)
6741  {
6742  	return -ENODEV;
6743  }
6744  
6745  struct scsi_host_template lpfc_template_nvme = {
6746  	.module			= THIS_MODULE,
6747  	.name			= LPFC_DRIVER_NAME,
6748  	.proc_name		= LPFC_DRIVER_NAME,
6749  	.info			= lpfc_info,
6750  	.queuecommand		= lpfc_no_command,
6751  	.slave_alloc		= lpfc_no_slave,
6752  	.slave_configure	= lpfc_no_slave,
6753  	.scan_finished		= lpfc_scan_finished,
6754  	.this_id		= -1,
6755  	.sg_tablesize		= 1,
6756  	.cmd_per_lun		= 1,
6757  	.shost_groups		= lpfc_hba_groups,
6758  	.max_sectors		= 0xFFFFFFFF,
6759  	.vendor_id		= LPFC_NL_VENDOR_ID,
6760  	.track_queue_depth	= 0,
6761  };
6762  
6763  struct scsi_host_template lpfc_template = {
6764  	.module			= THIS_MODULE,
6765  	.name			= LPFC_DRIVER_NAME,
6766  	.proc_name		= LPFC_DRIVER_NAME,
6767  	.info			= lpfc_info,
6768  	.queuecommand		= lpfc_queuecommand,
6769  	.eh_timed_out		= fc_eh_timed_out,
6770  	.eh_should_retry_cmd    = fc_eh_should_retry_cmd,
6771  	.eh_abort_handler	= lpfc_abort_handler,
6772  	.eh_device_reset_handler = lpfc_device_reset_handler,
6773  	.eh_target_reset_handler = lpfc_target_reset_handler,
6774  	.eh_host_reset_handler  = lpfc_host_reset_handler,
6775  	.slave_alloc		= lpfc_slave_alloc,
6776  	.slave_configure	= lpfc_slave_configure,
6777  	.slave_destroy		= lpfc_slave_destroy,
6778  	.scan_finished		= lpfc_scan_finished,
6779  	.this_id		= -1,
6780  	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
6781  	.cmd_per_lun		= LPFC_CMD_PER_LUN,
6782  	.shost_groups		= lpfc_hba_groups,
6783  	.max_sectors		= 0xFFFFFFFF,
6784  	.vendor_id		= LPFC_NL_VENDOR_ID,
6785  	.change_queue_depth	= scsi_change_queue_depth,
6786  	.track_queue_depth	= 1,
6787  };
6788  
6789  struct scsi_host_template lpfc_vport_template = {
6790  	.module			= THIS_MODULE,
6791  	.name			= LPFC_DRIVER_NAME,
6792  	.proc_name		= LPFC_DRIVER_NAME,
6793  	.info			= lpfc_info,
6794  	.queuecommand		= lpfc_queuecommand,
6795  	.eh_timed_out		= fc_eh_timed_out,
6796  	.eh_should_retry_cmd    = fc_eh_should_retry_cmd,
6797  	.eh_abort_handler	= lpfc_abort_handler,
6798  	.eh_device_reset_handler = lpfc_device_reset_handler,
6799  	.eh_target_reset_handler = lpfc_target_reset_handler,
6800  	.eh_bus_reset_handler	= NULL,
6801  	.eh_host_reset_handler	= NULL,
6802  	.slave_alloc		= lpfc_slave_alloc,
6803  	.slave_configure	= lpfc_slave_configure,
6804  	.slave_destroy		= lpfc_slave_destroy,
6805  	.scan_finished		= lpfc_scan_finished,
6806  	.this_id		= -1,
6807  	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
6808  	.cmd_per_lun		= LPFC_CMD_PER_LUN,
6809  	.shost_groups		= lpfc_vport_groups,
6810  	.max_sectors		= 0xFFFFFFFF,
6811  	.vendor_id		= 0,
6812  	.change_queue_depth	= scsi_change_queue_depth,
6813  	.track_queue_depth	= 1,
6814  };
6815