Lines Matching +full:data +full:- +full:path
1 // SPDX-License-Identifier: GPL-2.0
3 * CCW device PGID and path verification I/O handling.
32 * Process path verification data and report result.
36 struct subchannel *sch = to_subchannel(cdev->dev.parent); in verify_done()
37 struct ccw_dev_id *id = &cdev->private->dev_id; in verify_done()
38 int mpath = cdev->private->flags.mpath; in verify_done()
39 int pgroup = cdev->private->flags.pgroup; in verify_done()
44 if (sch->config.mp != mpath) { in verify_done()
45 sch->config.mp = mpath; in verify_done()
50 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath, in verify_done()
51 sch->vpm); in verify_done()
60 struct ccw_request *req = &cdev->private->req; in nop_build_cp()
61 struct ccw1 *cp = cdev->private->dma_area->iccws; in nop_build_cp()
63 cp->cmd_code = CCW_CMD_NOOP; in nop_build_cp()
64 cp->cda = 0; in nop_build_cp()
65 cp->count = 0; in nop_build_cp()
66 cp->flags = CCW_FLAG_SLI; in nop_build_cp()
67 req->cp = cp; in nop_build_cp()
71 * Perform NOOP on a single path.
75 struct subchannel *sch = to_subchannel(cdev->dev.parent); in nop_do()
76 struct ccw_request *req = &cdev->private->req; in nop_do()
78 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & in nop_do()
79 ~cdev->private->path_noirq_mask); in nop_do()
80 if (!req->lpm) in nop_do()
87 verify_done(cdev, sch->vpm ? 0 : -EACCES); in nop_do()
93 static enum io_status nop_filter(struct ccw_device *cdev, void *data, in nop_filter() argument
96 /* Only subchannel status might indicate a path error. */ in nop_filter()
97 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0) in nop_filter()
103 * Process NOOP request result for a single path.
105 static void nop_callback(struct ccw_device *cdev, void *data, int rc) in nop_callback() argument
107 struct subchannel *sch = to_subchannel(cdev->dev.parent); in nop_callback()
108 struct ccw_request *req = &cdev->private->req; in nop_callback()
112 sch->vpm |= req->lpm; in nop_callback()
114 case -ETIME: in nop_callback()
115 cdev->private->path_noirq_mask |= req->lpm; in nop_callback()
117 case -EACCES: in nop_callback()
118 cdev->private->path_notoper_mask |= req->lpm; in nop_callback()
123 /* Continue on the next path. */ in nop_callback()
124 req->lpm >>= 1; in nop_callback()
133 * Create channel program to perform SET PGID on a single path.
137 struct ccw_request *req = &cdev->private->req; in spid_build_cp()
138 struct ccw1 *cp = cdev->private->dma_area->iccws; in spid_build_cp()
139 int i = pathmask_to_pos(req->lpm); in spid_build_cp()
140 struct pgid *pgid = &cdev->private->dma_area->pgid[i]; in spid_build_cp()
142 pgid->inf.fc = fn; in spid_build_cp()
143 cp->cmd_code = CCW_CMD_SET_PGID; in spid_build_cp()
144 cp->cda = virt_to_dma32(pgid); in spid_build_cp()
145 cp->count = sizeof(*pgid); in spid_build_cp()
146 cp->flags = CCW_FLAG_SLI; in spid_build_cp()
147 req->cp = cp; in spid_build_cp()
150 static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) in pgid_wipeout_callback() argument
153 /* We don't know the path groups' state. Abort. */ in pgid_wipeout_callback()
158 * Path groups have been reset. Restart path verification but in pgid_wipeout_callback()
161 cdev->private->flags.pgid_unknown = 0; in pgid_wipeout_callback()
166 * Reset pathgroups and restart path verification, leave unusable paths out.
170 struct subchannel *sch = to_subchannel(cdev->dev.parent); in pgid_wipeout_start()
171 struct ccw_dev_id *id = &cdev->private->dev_id; in pgid_wipeout_start()
172 struct ccw_request *req = &cdev->private->req; in pgid_wipeout_start()
176 id->ssid, id->devno, cdev->private->pgid_valid_mask, in pgid_wipeout_start()
177 cdev->private->path_noirq_mask); in pgid_wipeout_start()
179 /* Initialize request data. */ in pgid_wipeout_start()
181 req->timeout = PGID_TIMEOUT; in pgid_wipeout_start()
182 req->maxretries = PGID_RETRIES; in pgid_wipeout_start()
183 req->lpm = sch->schib.pmcw.pam; in pgid_wipeout_start()
184 req->callback = pgid_wipeout_callback; in pgid_wipeout_start()
186 if (cdev->private->flags.mpath) in pgid_wipeout_start()
193 * Perform establish/resign SET PGID on a single path.
197 struct subchannel *sch = to_subchannel(cdev->dev.parent); in spid_do()
198 struct ccw_request *req = &cdev->private->req; in spid_do()
201 /* Use next available path that is not already in correct state. */ in spid_do()
202 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); in spid_do()
203 if (!req->lpm) in spid_do()
206 if (req->lpm & sch->opm) in spid_do()
210 if (cdev->private->flags.mpath) in spid_do()
217 if (cdev->private->flags.pgid_unknown) { in spid_do()
222 verify_done(cdev, sch->vpm ? 0 : -EACCES); in spid_do()
226 * Process SET PGID request result for a single path.
228 static void spid_callback(struct ccw_device *cdev, void *data, int rc) in spid_callback() argument
230 struct subchannel *sch = to_subchannel(cdev->dev.parent); in spid_callback()
231 struct ccw_request *req = &cdev->private->req; in spid_callback()
235 sch->vpm |= req->lpm & sch->opm; in spid_callback()
237 case -ETIME: in spid_callback()
238 cdev->private->flags.pgid_unknown = 1; in spid_callback()
239 cdev->private->path_noirq_mask |= req->lpm; in spid_callback()
241 case -EACCES: in spid_callback()
242 cdev->private->path_notoper_mask |= req->lpm; in spid_callback()
244 case -EOPNOTSUPP: in spid_callback()
245 if (cdev->private->flags.mpath) { in spid_callback()
247 cdev->private->flags.mpath = 0; in spid_callback()
251 cdev->private->flags.pgroup = 0; in spid_callback()
256 req->lpm >>= 1; in spid_callback()
269 struct ccw_request *req = &cdev->private->req; in spid_start()
271 /* Initialize request data. */ in spid_start()
273 req->timeout = PGID_TIMEOUT; in spid_start()
274 req->maxretries = PGID_RETRIES; in spid_start()
275 req->lpm = 0x80; in spid_start()
276 req->singlepath = 1; in spid_start()
277 req->callback = spid_callback; in spid_start()
295 sizeof(struct pgid) - 1); in pgid_cmp()
299 * Determine pathgroup state from PGID data.
304 struct pgid *pgid = &cdev->private->dma_area->pgid[0]; in pgid_analyze()
313 if ((cdev->private->pgid_valid_mask & lpm) == 0) in pgid_analyze()
315 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) in pgid_analyze()
329 first = &channel_subsystems[0]->global_pgid; in pgid_analyze()
335 struct subchannel *sch = to_subchannel(cdev->dev.parent); in pgid_to_donepm()
344 if ((cdev->private->pgid_valid_mask & lpm) == 0) in pgid_to_donepm()
346 pgid = &cdev->private->dma_area->pgid[i]; in pgid_to_donepm()
347 if (sch->opm & lpm) { in pgid_to_donepm()
348 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) in pgid_to_donepm()
351 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED) in pgid_to_donepm()
354 if (cdev->private->flags.mpath) { in pgid_to_donepm()
355 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH) in pgid_to_donepm()
358 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) in pgid_to_donepm()
372 memcpy(&cdev->private->dma_area->pgid[i], pgid, in pgid_fill()
377 * Process SENSE PGID data and report result.
381 struct ccw_dev_id *id = &cdev->private->dev_id; in snid_done()
382 struct subchannel *sch = to_subchannel(cdev->dev.parent); in snid_done()
392 if (reserved == cdev->private->pgid_valid_mask) in snid_done()
393 rc = -EUSERS; in snid_done()
395 rc = -EOPNOTSUPP; in snid_done()
398 sch->vpm = donepm & sch->opm; in snid_done()
399 cdev->private->pgid_reset_mask |= reset; in snid_done()
400 cdev->private->pgid_todo_mask &= in snid_done()
401 ~(donepm | cdev->private->path_noirq_mask); in snid_done()
406 "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, in snid_done()
407 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, in snid_done()
408 cdev->private->pgid_todo_mask, mismatch, reserved, reset); in snid_done()
411 if (cdev->private->flags.pgid_unknown) { in snid_done()
416 if (cdev->private->pgid_todo_mask == 0) { in snid_done()
417 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); in snid_done()
420 /* Perform path-grouping. */ in snid_done()
423 case -EOPNOTSUPP: in snid_done()
424 /* Path-grouping not supported. */ in snid_done()
425 cdev->private->flags.pgroup = 0; in snid_done()
426 cdev->private->flags.mpath = 0; in snid_done()
435 * Create channel program to perform a SENSE PGID on a single path.
439 struct ccw_request *req = &cdev->private->req; in snid_build_cp()
440 struct ccw1 *cp = cdev->private->dma_area->iccws; in snid_build_cp()
441 int i = pathmask_to_pos(req->lpm); in snid_build_cp()
444 cp->cmd_code = CCW_CMD_SENSE_PGID; in snid_build_cp()
445 cp->cda = virt_to_dma32(&cdev->private->dma_area->pgid[i]); in snid_build_cp()
446 cp->count = sizeof(struct pgid); in snid_build_cp()
447 cp->flags = CCW_FLAG_SLI; in snid_build_cp()
448 req->cp = cp; in snid_build_cp()
452 * Perform SENSE PGID on a single path.
456 struct subchannel *sch = to_subchannel(cdev->dev.parent); in snid_do()
457 struct ccw_request *req = &cdev->private->req; in snid_do()
460 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & in snid_do()
461 ~cdev->private->path_noirq_mask); in snid_do()
462 if (!req->lpm) in snid_do()
469 if (cdev->private->pgid_valid_mask) in snid_do()
471 else if (cdev->private->path_noirq_mask) in snid_do()
472 ret = -ETIME; in snid_do()
474 ret = -EACCES; in snid_do()
479 * Process SENSE PGID request result for single path.
481 static void snid_callback(struct ccw_device *cdev, void *data, int rc) in snid_callback() argument
483 struct ccw_request *req = &cdev->private->req; in snid_callback()
487 cdev->private->pgid_valid_mask |= req->lpm; in snid_callback()
489 case -ETIME: in snid_callback()
490 cdev->private->flags.pgid_unknown = 1; in snid_callback()
491 cdev->private->path_noirq_mask |= req->lpm; in snid_callback()
493 case -EACCES: in snid_callback()
494 cdev->private->path_notoper_mask |= req->lpm; in snid_callback()
499 /* Continue on the next path. */ in snid_callback()
500 req->lpm >>= 1; in snid_callback()
509 * Perform path verification.
513 struct subchannel *sch = to_subchannel(cdev->dev.parent); in verify_start()
514 struct ccw_request *req = &cdev->private->req; in verify_start()
515 struct ccw_dev_id *devid = &cdev->private->dev_id; in verify_start()
517 sch->vpm = 0; in verify_start()
518 sch->lpm = sch->schib.pmcw.pam; in verify_start()
520 /* Initialize PGID data. */ in verify_start()
521 memset(cdev->private->dma_area->pgid, 0, in verify_start()
522 sizeof(cdev->private->dma_area->pgid)); in verify_start()
523 cdev->private->pgid_valid_mask = 0; in verify_start()
524 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; in verify_start()
525 cdev->private->path_notoper_mask = 0; in verify_start()
527 /* Initialize request data. */ in verify_start()
529 req->timeout = PGID_TIMEOUT; in verify_start()
530 req->maxretries = PGID_RETRIES; in verify_start()
531 req->lpm = 0x80; in verify_start()
532 req->singlepath = 1; in verify_start()
533 if (cdev->private->flags.pgroup) { in verify_start()
536 req->callback = snid_callback; in verify_start()
541 req->filter = nop_filter; in verify_start()
542 req->callback = nop_callback; in verify_start()
548 * ccw_device_verify_start - perform path verification
551 * Perform an I/O on each available channel path to @cdev to determine which
552 * paths are operational. The resulting path mask is stored in sch->vpm.
560 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); in ccw_device_verify_start()
563 * They may change in the course of path verification. in ccw_device_verify_start()
565 cdev->private->flags.pgroup = cdev->private->options.pgroup; in ccw_device_verify_start()
566 cdev->private->flags.mpath = cdev->private->options.mpath; in ccw_device_verify_start()
567 cdev->private->flags.doverify = 0; in ccw_device_verify_start()
568 cdev->private->path_noirq_mask = 0; in ccw_device_verify_start()
575 static void disband_callback(struct ccw_device *cdev, void *data, int rc) in disband_callback() argument
577 struct subchannel *sch = to_subchannel(cdev->dev.parent); in disband_callback()
578 struct ccw_dev_id *id = &cdev->private->dev_id; in disband_callback()
583 cdev->private->flags.mpath = 0; in disband_callback()
584 if (sch->config.mp) { in disband_callback()
585 sch->config.mp = 0; in disband_callback()
589 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno, in disband_callback()
595 * ccw_device_disband_start - disband pathgroup
604 struct subchannel *sch = to_subchannel(cdev->dev.parent); in ccw_device_disband_start()
605 struct ccw_request *req = &cdev->private->req; in ccw_device_disband_start()
609 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); in ccw_device_disband_start()
612 req->timeout = PGID_TIMEOUT; in ccw_device_disband_start()
613 req->maxretries = PGID_RETRIES; in ccw_device_disband_start()
614 req->lpm = sch->schib.pmcw.pam & sch->opm; in ccw_device_disband_start()
615 req->singlepath = 1; in ccw_device_disband_start()
616 req->callback = disband_callback; in ccw_device_disband_start()
618 if (cdev->private->flags.mpath) in ccw_device_disband_start()
631 struct ccw_request *req = &cdev->private->req; in stlck_build_cp()
632 struct ccw1 *cp = cdev->private->dma_area->iccws; in stlck_build_cp()
642 req->cp = cp; in stlck_build_cp()
645 static void stlck_callback(struct ccw_device *cdev, void *data, int rc) in stlck_callback() argument
647 struct stlck_data *sdata = data; in stlck_callback()
649 sdata->rc = rc; in stlck_callback()
650 complete(&sdata->done); in stlck_callback()
654 * ccw_device_stlck_start - perform unconditional release
656 * @data: data pointer to be passed to ccw_device_stlck_done
657 * @buf1: data pointer used in channel program
658 * @buf2: data pointer used in channel program
662 static void ccw_device_stlck_start(struct ccw_device *cdev, void *data, in ccw_device_stlck_start() argument
665 struct subchannel *sch = to_subchannel(cdev->dev.parent); in ccw_device_stlck_start()
666 struct ccw_request *req = &cdev->private->req; in ccw_device_stlck_start()
669 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); in ccw_device_stlck_start()
672 req->timeout = PGID_TIMEOUT; in ccw_device_stlck_start()
673 req->maxretries = PGID_RETRIES; in ccw_device_stlck_start()
674 req->lpm = sch->schib.pmcw.pam & sch->opm; in ccw_device_stlck_start()
675 req->data = data; in ccw_device_stlck_start()
676 req->callback = stlck_callback; in ccw_device_stlck_start()
686 struct subchannel *sch = to_subchannel(cdev->dev.parent); in ccw_device_stlck()
687 struct stlck_data data; in ccw_device_stlck() local
692 if (cdev->drv) { in ccw_device_stlck()
693 if (!cdev->private->options.force) in ccw_device_stlck()
694 return -EINVAL; in ccw_device_stlck()
698 return -ENOMEM; in ccw_device_stlck()
699 init_completion(&data.done); in ccw_device_stlck()
700 data.rc = -EIO; in ccw_device_stlck()
701 spin_lock_irq(&sch->lock); in ccw_device_stlck()
706 cdev->private->state = DEV_STATE_STEAL_LOCK; in ccw_device_stlck()
707 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); in ccw_device_stlck()
708 spin_unlock_irq(&sch->lock); in ccw_device_stlck()
710 if (wait_for_completion_interruptible(&data.done)) { in ccw_device_stlck()
712 spin_lock_irq(&sch->lock); in ccw_device_stlck()
714 spin_unlock_irq(&sch->lock); in ccw_device_stlck()
715 wait_for_completion(&data.done); in ccw_device_stlck()
717 rc = data.rc; in ccw_device_stlck()
719 spin_lock_irq(&sch->lock); in ccw_device_stlck()
721 cdev->private->state = DEV_STATE_BOXED; in ccw_device_stlck()
723 spin_unlock_irq(&sch->lock); in ccw_device_stlck()