1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4   *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5   *		    Carsten Otte <Cotte@de.ibm.com>
6   *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7   * Bugreports.to..: <Linux390@de.ibm.com>
8   * Copyright IBM Corp. 1999, 2009
9   */
10  
11  #include <linux/kmod.h>
12  #include <linux/init.h>
13  #include <linux/interrupt.h>
14  #include <linux/ctype.h>
15  #include <linux/major.h>
16  #include <linux/slab.h>
17  #include <linux/hdreg.h>
18  #include <linux/async.h>
19  #include <linux/mutex.h>
20  #include <linux/debugfs.h>
21  #include <linux/seq_file.h>
22  #include <linux/vmalloc.h>
23  
24  #include <asm/ccwdev.h>
25  #include <asm/ebcdic.h>
26  #include <asm/idals.h>
27  #include <asm/itcw.h>
28  #include <asm/diag.h>
29  
30  #include "dasd_int.h"
31  /*
32   * SECTION: Constant definitions to be used within this file
33   */
34  #define DASD_CHANQ_MAX_SIZE 4
35  
36  #define DASD_DIAG_MOD		"dasd_diag_mod"
37  
38  /*
39   * SECTION: exported variables of dasd.c
40   */
41  debug_info_t *dasd_debug_area;
42  EXPORT_SYMBOL(dasd_debug_area);
43  static struct dentry *dasd_debugfs_root_entry;
44  struct dasd_discipline *dasd_diag_discipline_pointer;
45  EXPORT_SYMBOL(dasd_diag_discipline_pointer);
46  void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
47  
48  MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
49  MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
50  		   " Copyright IBM Corp. 2000");
51  MODULE_LICENSE("GPL");
52  
53  /*
54   * SECTION: prototypes for static functions of dasd.c
55   */
56  static int dasd_flush_block_queue(struct dasd_block *);
57  static void dasd_device_tasklet(unsigned long);
58  static void dasd_block_tasklet(unsigned long);
59  static void do_kick_device(struct work_struct *);
60  static void do_reload_device(struct work_struct *);
61  static void do_requeue_requests(struct work_struct *);
62  static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
63  static void dasd_device_timeout(struct timer_list *);
64  static void dasd_block_timeout(struct timer_list *);
65  static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
66  static void dasd_profile_init(struct dasd_profile *, struct dentry *);
67  static void dasd_profile_exit(struct dasd_profile *);
68  static void dasd_hosts_init(struct dentry *, struct dasd_device *);
69  static void dasd_hosts_exit(struct dasd_device *);
70  static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
71  				   unsigned int);
72  /*
73   * SECTION: Operations on the device structure.
74   */
75  static wait_queue_head_t dasd_init_waitq;
76  static wait_queue_head_t dasd_flush_wq;
77  static wait_queue_head_t generic_waitq;
78  static wait_queue_head_t shutdown_waitq;
79  
80  /*
81   * Allocate memory for a new device structure.
82   */
dasd_alloc_device(void)83  struct dasd_device *dasd_alloc_device(void)
84  {
85  	struct dasd_device *device;
86  
87  	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
88  	if (!device)
89  		return ERR_PTR(-ENOMEM);
90  
91  	/* Get two pages for normal block device operations. */
92  	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
93  	if (!device->ccw_mem) {
94  		kfree(device);
95  		return ERR_PTR(-ENOMEM);
96  	}
97  	/* Get one page for error recovery. */
98  	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
99  	if (!device->erp_mem) {
100  		free_pages((unsigned long) device->ccw_mem, 1);
101  		kfree(device);
102  		return ERR_PTR(-ENOMEM);
103  	}
104  	/* Get two pages for ese format. */
105  	device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
106  	if (!device->ese_mem) {
107  		free_page((unsigned long) device->erp_mem);
108  		free_pages((unsigned long) device->ccw_mem, 1);
109  		kfree(device);
110  		return ERR_PTR(-ENOMEM);
111  	}
112  
113  	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
114  	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
115  	dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
116  	spin_lock_init(&device->mem_lock);
117  	atomic_set(&device->tasklet_scheduled, 0);
118  	tasklet_init(&device->tasklet, dasd_device_tasklet,
119  		     (unsigned long) device);
120  	INIT_LIST_HEAD(&device->ccw_queue);
121  	timer_setup(&device->timer, dasd_device_timeout, 0);
122  	INIT_WORK(&device->kick_work, do_kick_device);
123  	INIT_WORK(&device->reload_device, do_reload_device);
124  	INIT_WORK(&device->requeue_requests, do_requeue_requests);
125  	device->state = DASD_STATE_NEW;
126  	device->target = DASD_STATE_NEW;
127  	mutex_init(&device->state_mutex);
128  	spin_lock_init(&device->profile.lock);
129  	return device;
130  }
131  
132  /*
133   * Free memory of a device structure.
134   */
dasd_free_device(struct dasd_device * device)135  void dasd_free_device(struct dasd_device *device)
136  {
137  	kfree(device->private);
138  	free_pages((unsigned long) device->ese_mem, 1);
139  	free_page((unsigned long) device->erp_mem);
140  	free_pages((unsigned long) device->ccw_mem, 1);
141  	kfree(device);
142  }
143  
144  /*
145   * Allocate memory for a new device structure.
146   */
dasd_alloc_block(void)147  struct dasd_block *dasd_alloc_block(void)
148  {
149  	struct dasd_block *block;
150  
151  	block = kzalloc(sizeof(*block), GFP_ATOMIC);
152  	if (!block)
153  		return ERR_PTR(-ENOMEM);
154  	/* open_count = 0 means device online but not in use */
155  	atomic_set(&block->open_count, -1);
156  
157  	atomic_set(&block->tasklet_scheduled, 0);
158  	tasklet_init(&block->tasklet, dasd_block_tasklet,
159  		     (unsigned long) block);
160  	INIT_LIST_HEAD(&block->ccw_queue);
161  	spin_lock_init(&block->queue_lock);
162  	INIT_LIST_HEAD(&block->format_list);
163  	spin_lock_init(&block->format_lock);
164  	timer_setup(&block->timer, dasd_block_timeout, 0);
165  	spin_lock_init(&block->profile.lock);
166  
167  	return block;
168  }
169  EXPORT_SYMBOL_GPL(dasd_alloc_block);
170  
171  /*
172   * Free memory of a device structure.
173   */
dasd_free_block(struct dasd_block * block)174  void dasd_free_block(struct dasd_block *block)
175  {
176  	kfree(block);
177  }
178  EXPORT_SYMBOL_GPL(dasd_free_block);
179  
180  /*
181   * Make a new device known to the system.
182   */
dasd_state_new_to_known(struct dasd_device * device)183  static int dasd_state_new_to_known(struct dasd_device *device)
184  {
185  	/*
186  	 * As long as the device is not in state DASD_STATE_NEW we want to
187  	 * keep the reference count > 0.
188  	 */
189  	dasd_get_device(device);
190  	device->state = DASD_STATE_KNOWN;
191  	return 0;
192  }
193  
194  /*
195   * Let the system forget about a device.
196   */
dasd_state_known_to_new(struct dasd_device * device)197  static int dasd_state_known_to_new(struct dasd_device *device)
198  {
199  	/* Disable extended error reporting for this device. */
200  	dasd_eer_disable(device);
201  	device->state = DASD_STATE_NEW;
202  
203  	/* Give up reference we took in dasd_state_new_to_known. */
204  	dasd_put_device(device);
205  	return 0;
206  }
207  
dasd_debugfs_setup(const char * name,struct dentry * base_dentry)208  static struct dentry *dasd_debugfs_setup(const char *name,
209  					 struct dentry *base_dentry)
210  {
211  	struct dentry *pde;
212  
213  	if (!base_dentry)
214  		return NULL;
215  	pde = debugfs_create_dir(name, base_dentry);
216  	if (!pde || IS_ERR(pde))
217  		return NULL;
218  	return pde;
219  }
220  
221  /*
222   * Request the irq line for the device.
223   */
dasd_state_known_to_basic(struct dasd_device * device)224  static int dasd_state_known_to_basic(struct dasd_device *device)
225  {
226  	struct dasd_block *block = device->block;
227  	int rc = 0;
228  
229  	/* Allocate and register gendisk structure. */
230  	if (block) {
231  		rc = dasd_gendisk_alloc(block);
232  		if (rc)
233  			return rc;
234  		block->debugfs_dentry =
235  			dasd_debugfs_setup(block->gdp->disk_name,
236  					   dasd_debugfs_root_entry);
237  		dasd_profile_init(&block->profile, block->debugfs_dentry);
238  		if (dasd_global_profile_level == DASD_PROFILE_ON)
239  			dasd_profile_on(&device->block->profile);
240  	}
241  	device->debugfs_dentry =
242  		dasd_debugfs_setup(dev_name(&device->cdev->dev),
243  				   dasd_debugfs_root_entry);
244  	dasd_profile_init(&device->profile, device->debugfs_dentry);
245  	dasd_hosts_init(device->debugfs_dentry, device);
246  
247  	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
248  	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
249  					    8 * sizeof(long));
250  	debug_register_view(device->debug_area, &debug_sprintf_view);
251  	debug_set_level(device->debug_area, DBF_WARNING);
252  	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
253  
254  	device->state = DASD_STATE_BASIC;
255  
256  	return rc;
257  }
258  
259  /*
260   * Release the irq line for the device. Terminate any running i/o.
261   */
dasd_state_basic_to_known(struct dasd_device * device)262  static int dasd_state_basic_to_known(struct dasd_device *device)
263  {
264  	int rc;
265  
266  	if (device->discipline->basic_to_known) {
267  		rc = device->discipline->basic_to_known(device);
268  		if (rc)
269  			return rc;
270  	}
271  
272  	if (device->block) {
273  		dasd_profile_exit(&device->block->profile);
274  		debugfs_remove(device->block->debugfs_dentry);
275  		dasd_gendisk_free(device->block);
276  		dasd_block_clear_timer(device->block);
277  	}
278  	rc = dasd_flush_device_queue(device);
279  	if (rc)
280  		return rc;
281  	dasd_device_clear_timer(device);
282  	dasd_profile_exit(&device->profile);
283  	dasd_hosts_exit(device);
284  	debugfs_remove(device->debugfs_dentry);
285  	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
286  	if (device->debug_area != NULL) {
287  		debug_unregister(device->debug_area);
288  		device->debug_area = NULL;
289  	}
290  	device->state = DASD_STATE_KNOWN;
291  	return 0;
292  }
293  
294  /*
295   * Do the initial analysis. The do_analysis function may return
296   * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
297   * until the discipline decides to continue the startup sequence
298   * by calling the function dasd_change_state. The eckd disciplines
299   * uses this to start a ccw that detects the format. The completion
300   * interrupt for this detection ccw uses the kernel event daemon to
301   * trigger the call to dasd_change_state. All this is done in the
302   * discipline code, see dasd_eckd.c.
303   * After the analysis ccw is done (do_analysis returned 0) the block
304   * device is setup.
305   * In case the analysis returns an error, the device setup is stopped
306   * (a fake disk was already added to allow formatting).
307   */
dasd_state_basic_to_ready(struct dasd_device * device)308  static int dasd_state_basic_to_ready(struct dasd_device *device)
309  {
310  	struct dasd_block *block = device->block;
311  	struct queue_limits lim;
312  	int rc = 0;
313  
314  	/* make disk known with correct capacity */
315  	if (!block) {
316  		device->state = DASD_STATE_READY;
317  		goto out;
318  	}
319  
320  	if (block->base->discipline->do_analysis != NULL)
321  		rc = block->base->discipline->do_analysis(block);
322  	if (rc) {
323  		if (rc == -EAGAIN)
324  			return rc;
325  		device->state = DASD_STATE_UNFMT;
326  		kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
327  			       KOBJ_CHANGE);
328  		goto out;
329  	}
330  
331  	lim = queue_limits_start_update(block->gdp->queue);
332  	lim.max_dev_sectors = device->discipline->max_sectors(block);
333  	lim.max_hw_sectors = lim.max_dev_sectors;
334  	lim.logical_block_size = block->bp_block;
335  
336  	if (device->discipline->has_discard) {
337  		unsigned int max_bytes;
338  
339  		lim.discard_granularity = block->bp_block;
340  
341  		/* Calculate max_discard_sectors and make it PAGE aligned */
342  		max_bytes = USHRT_MAX * block->bp_block;
343  		max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
344  
345  		lim.max_hw_discard_sectors = max_bytes / block->bp_block;
346  		lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
347  	}
348  	rc = queue_limits_commit_update(block->gdp->queue, &lim);
349  	if (rc)
350  		return rc;
351  
352  	set_capacity(block->gdp, block->blocks << block->s2b_shift);
353  	device->state = DASD_STATE_READY;
354  
355  	rc = dasd_scan_partitions(block);
356  	if (rc) {
357  		device->state = DASD_STATE_BASIC;
358  		return rc;
359  	}
360  
361  out:
362  	if (device->discipline->basic_to_ready)
363  		rc = device->discipline->basic_to_ready(device);
364  	return rc;
365  }
366  
367  static inline
_wait_for_empty_queues(struct dasd_device * device)368  int _wait_for_empty_queues(struct dasd_device *device)
369  {
370  	if (device->block)
371  		return list_empty(&device->ccw_queue) &&
372  			list_empty(&device->block->ccw_queue);
373  	else
374  		return list_empty(&device->ccw_queue);
375  }
376  
377  /*
378   * Remove device from block device layer. Destroy dirty buffers.
379   * Forget format information. Check if the target level is basic
380   * and if it is create fake disk for formatting.
381   */
dasd_state_ready_to_basic(struct dasd_device * device)382  static int dasd_state_ready_to_basic(struct dasd_device *device)
383  {
384  	int rc;
385  
386  	device->state = DASD_STATE_BASIC;
387  	if (device->block) {
388  		struct dasd_block *block = device->block;
389  		rc = dasd_flush_block_queue(block);
390  		if (rc) {
391  			device->state = DASD_STATE_READY;
392  			return rc;
393  		}
394  		dasd_destroy_partitions(block);
395  		block->blocks = 0;
396  		block->bp_block = 0;
397  		block->s2b_shift = 0;
398  	}
399  	return 0;
400  }
401  
402  /*
403   * Back to basic.
404   */
dasd_state_unfmt_to_basic(struct dasd_device * device)405  static int dasd_state_unfmt_to_basic(struct dasd_device *device)
406  {
407  	device->state = DASD_STATE_BASIC;
408  	return 0;
409  }
410  
411  /*
412   * Make the device online and schedule the bottom half to start
413   * the requeueing of requests from the linux request queue to the
414   * ccw queue.
415   */
416  static int
dasd_state_ready_to_online(struct dasd_device * device)417  dasd_state_ready_to_online(struct dasd_device * device)
418  {
419  	device->state = DASD_STATE_ONLINE;
420  	if (device->block) {
421  		dasd_schedule_block_bh(device->block);
422  		if ((device->features & DASD_FEATURE_USERAW)) {
423  			kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
424  					KOBJ_CHANGE);
425  			return 0;
426  		}
427  		disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
428  			    KOBJ_CHANGE);
429  	}
430  	return 0;
431  }
432  
433  /*
434   * Stop the requeueing of requests again.
435   */
dasd_state_online_to_ready(struct dasd_device * device)436  static int dasd_state_online_to_ready(struct dasd_device *device)
437  {
438  	int rc;
439  
440  	if (device->discipline->online_to_ready) {
441  		rc = device->discipline->online_to_ready(device);
442  		if (rc)
443  			return rc;
444  	}
445  
446  	device->state = DASD_STATE_READY;
447  	if (device->block && !(device->features & DASD_FEATURE_USERAW))
448  		disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
449  			    KOBJ_CHANGE);
450  	return 0;
451  }
452  
453  /*
454   * Device startup state changes.
455   */
dasd_increase_state(struct dasd_device * device)456  static int dasd_increase_state(struct dasd_device *device)
457  {
458  	int rc;
459  
460  	rc = 0;
461  	if (device->state == DASD_STATE_NEW &&
462  	    device->target >= DASD_STATE_KNOWN)
463  		rc = dasd_state_new_to_known(device);
464  
465  	if (!rc &&
466  	    device->state == DASD_STATE_KNOWN &&
467  	    device->target >= DASD_STATE_BASIC)
468  		rc = dasd_state_known_to_basic(device);
469  
470  	if (!rc &&
471  	    device->state == DASD_STATE_BASIC &&
472  	    device->target >= DASD_STATE_READY)
473  		rc = dasd_state_basic_to_ready(device);
474  
475  	if (!rc &&
476  	    device->state == DASD_STATE_UNFMT &&
477  	    device->target > DASD_STATE_UNFMT)
478  		rc = -EPERM;
479  
480  	if (!rc &&
481  	    device->state == DASD_STATE_READY &&
482  	    device->target >= DASD_STATE_ONLINE)
483  		rc = dasd_state_ready_to_online(device);
484  
485  	return rc;
486  }
487  
488  /*
489   * Device shutdown state changes.
490   */
dasd_decrease_state(struct dasd_device * device)491  static int dasd_decrease_state(struct dasd_device *device)
492  {
493  	int rc;
494  
495  	rc = 0;
496  	if (device->state == DASD_STATE_ONLINE &&
497  	    device->target <= DASD_STATE_READY)
498  		rc = dasd_state_online_to_ready(device);
499  
500  	if (!rc &&
501  	    device->state == DASD_STATE_READY &&
502  	    device->target <= DASD_STATE_BASIC)
503  		rc = dasd_state_ready_to_basic(device);
504  
505  	if (!rc &&
506  	    device->state == DASD_STATE_UNFMT &&
507  	    device->target <= DASD_STATE_BASIC)
508  		rc = dasd_state_unfmt_to_basic(device);
509  
510  	if (!rc &&
511  	    device->state == DASD_STATE_BASIC &&
512  	    device->target <= DASD_STATE_KNOWN)
513  		rc = dasd_state_basic_to_known(device);
514  
515  	if (!rc &&
516  	    device->state == DASD_STATE_KNOWN &&
517  	    device->target <= DASD_STATE_NEW)
518  		rc = dasd_state_known_to_new(device);
519  
520  	return rc;
521  }
522  
523  /*
524   * This is the main startup/shutdown routine.
525   */
dasd_change_state(struct dasd_device * device)526  static void dasd_change_state(struct dasd_device *device)
527  {
528  	int rc;
529  
530  	if (device->state == device->target)
531  		/* Already where we want to go today... */
532  		return;
533  	if (device->state < device->target)
534  		rc = dasd_increase_state(device);
535  	else
536  		rc = dasd_decrease_state(device);
537  	if (rc == -EAGAIN)
538  		return;
539  	if (rc)
540  		device->target = device->state;
541  
542  	/* let user-space know that the device status changed */
543  	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
544  
545  	if (device->state == device->target)
546  		wake_up(&dasd_init_waitq);
547  }
548  
549  /*
550   * Kick starter for devices that did not complete the startup/shutdown
551   * procedure or were sleeping because of a pending state.
552   * dasd_kick_device will schedule a call do do_kick_device to the kernel
553   * event daemon.
554   */
do_kick_device(struct work_struct * work)555  static void do_kick_device(struct work_struct *work)
556  {
557  	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
558  	mutex_lock(&device->state_mutex);
559  	dasd_change_state(device);
560  	mutex_unlock(&device->state_mutex);
561  	dasd_schedule_device_bh(device);
562  	dasd_put_device(device);
563  }
564  
dasd_kick_device(struct dasd_device * device)565  void dasd_kick_device(struct dasd_device *device)
566  {
567  	dasd_get_device(device);
568  	/* queue call to dasd_kick_device to the kernel event daemon. */
569  	if (!schedule_work(&device->kick_work))
570  		dasd_put_device(device);
571  }
572  EXPORT_SYMBOL(dasd_kick_device);
573  
574  /*
575   * dasd_reload_device will schedule a call do do_reload_device to the kernel
576   * event daemon.
577   */
do_reload_device(struct work_struct * work)578  static void do_reload_device(struct work_struct *work)
579  {
580  	struct dasd_device *device = container_of(work, struct dasd_device,
581  						  reload_device);
582  	device->discipline->reload(device);
583  	dasd_put_device(device);
584  }
585  
dasd_reload_device(struct dasd_device * device)586  void dasd_reload_device(struct dasd_device *device)
587  {
588  	dasd_get_device(device);
589  	/* queue call to dasd_reload_device to the kernel event daemon. */
590  	if (!schedule_work(&device->reload_device))
591  		dasd_put_device(device);
592  }
593  EXPORT_SYMBOL(dasd_reload_device);
594  
595  /*
596   * Set the target state for a device and starts the state change.
597   */
dasd_set_target_state(struct dasd_device * device,int target)598  void dasd_set_target_state(struct dasd_device *device, int target)
599  {
600  	dasd_get_device(device);
601  	mutex_lock(&device->state_mutex);
602  	/* If we are in probeonly mode stop at DASD_STATE_READY. */
603  	if (dasd_probeonly && target > DASD_STATE_READY)
604  		target = DASD_STATE_READY;
605  	if (device->target != target) {
606  		if (device->state == target)
607  			wake_up(&dasd_init_waitq);
608  		device->target = target;
609  	}
610  	if (device->state != device->target)
611  		dasd_change_state(device);
612  	mutex_unlock(&device->state_mutex);
613  	dasd_put_device(device);
614  }
615  
616  /*
617   * Enable devices with device numbers in [from..to].
618   */
_wait_for_device(struct dasd_device * device)619  static inline int _wait_for_device(struct dasd_device *device)
620  {
621  	return (device->state == device->target);
622  }
623  
dasd_enable_device(struct dasd_device * device)624  void dasd_enable_device(struct dasd_device *device)
625  {
626  	dasd_set_target_state(device, DASD_STATE_ONLINE);
627  	if (device->state <= DASD_STATE_KNOWN)
628  		/* No discipline for device found. */
629  		dasd_set_target_state(device, DASD_STATE_NEW);
630  	/* Now wait for the devices to come up. */
631  	wait_event(dasd_init_waitq, _wait_for_device(device));
632  
633  	dasd_reload_device(device);
634  	if (device->discipline->kick_validate)
635  		device->discipline->kick_validate(device);
636  }
637  EXPORT_SYMBOL(dasd_enable_device);
638  
639  /*
640   * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
641   */
642  
643  unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
644  
645  #ifdef CONFIG_DASD_PROFILE
646  struct dasd_profile dasd_global_profile = {
647  	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
648  };
649  static struct dentry *dasd_debugfs_global_entry;
650  
651  /*
652   * Add profiling information for cqr before execution.
653   */
dasd_profile_start(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)654  static void dasd_profile_start(struct dasd_block *block,
655  			       struct dasd_ccw_req *cqr,
656  			       struct request *req)
657  {
658  	struct list_head *l;
659  	unsigned int counter;
660  	struct dasd_device *device;
661  
662  	/* count the length of the chanq for statistics */
663  	counter = 0;
664  	if (dasd_global_profile_level || block->profile.data)
665  		list_for_each(l, &block->ccw_queue)
666  			if (++counter >= 31)
667  				break;
668  
669  	spin_lock(&dasd_global_profile.lock);
670  	if (dasd_global_profile.data) {
671  		dasd_global_profile.data->dasd_io_nr_req[counter]++;
672  		if (rq_data_dir(req) == READ)
673  			dasd_global_profile.data->dasd_read_nr_req[counter]++;
674  	}
675  	spin_unlock(&dasd_global_profile.lock);
676  
677  	spin_lock(&block->profile.lock);
678  	if (block->profile.data) {
679  		block->profile.data->dasd_io_nr_req[counter]++;
680  		if (rq_data_dir(req) == READ)
681  			block->profile.data->dasd_read_nr_req[counter]++;
682  	}
683  	spin_unlock(&block->profile.lock);
684  
685  	/*
686  	 * We count the request for the start device, even though it may run on
687  	 * some other device due to error recovery. This way we make sure that
688  	 * we count each request only once.
689  	 */
690  	device = cqr->startdev;
691  	if (!device->profile.data)
692  		return;
693  
694  	spin_lock(get_ccwdev_lock(device->cdev));
695  	counter = 1; /* request is not yet queued on the start device */
696  	list_for_each(l, &device->ccw_queue)
697  		if (++counter >= 31)
698  			break;
699  	spin_unlock(get_ccwdev_lock(device->cdev));
700  
701  	spin_lock(&device->profile.lock);
702  	device->profile.data->dasd_io_nr_req[counter]++;
703  	if (rq_data_dir(req) == READ)
704  		device->profile.data->dasd_read_nr_req[counter]++;
705  	spin_unlock(&device->profile.lock);
706  }
707  
708  /*
709   * Add profiling information for cqr after execution.
710   */
711  
712  #define dasd_profile_counter(value, index)			   \
713  {								   \
714  	for (index = 0; index < 31 && value >> (2+index); index++) \
715  		;						   \
716  }
717  
dasd_profile_end_add_data(struct dasd_profile_info * data,int is_alias,int is_tpm,int is_read,long sectors,int sectors_ind,int tottime_ind,int tottimeps_ind,int strtime_ind,int irqtime_ind,int irqtimeps_ind,int endtime_ind)718  static void dasd_profile_end_add_data(struct dasd_profile_info *data,
719  				      int is_alias,
720  				      int is_tpm,
721  				      int is_read,
722  				      long sectors,
723  				      int sectors_ind,
724  				      int tottime_ind,
725  				      int tottimeps_ind,
726  				      int strtime_ind,
727  				      int irqtime_ind,
728  				      int irqtimeps_ind,
729  				      int endtime_ind)
730  {
731  	/* in case of an overflow, reset the whole profile */
732  	if (data->dasd_io_reqs == UINT_MAX) {
733  			memset(data, 0, sizeof(*data));
734  			ktime_get_real_ts64(&data->starttod);
735  	}
736  	data->dasd_io_reqs++;
737  	data->dasd_io_sects += sectors;
738  	if (is_alias)
739  		data->dasd_io_alias++;
740  	if (is_tpm)
741  		data->dasd_io_tpm++;
742  
743  	data->dasd_io_secs[sectors_ind]++;
744  	data->dasd_io_times[tottime_ind]++;
745  	data->dasd_io_timps[tottimeps_ind]++;
746  	data->dasd_io_time1[strtime_ind]++;
747  	data->dasd_io_time2[irqtime_ind]++;
748  	data->dasd_io_time2ps[irqtimeps_ind]++;
749  	data->dasd_io_time3[endtime_ind]++;
750  
751  	if (is_read) {
752  		data->dasd_read_reqs++;
753  		data->dasd_read_sects += sectors;
754  		if (is_alias)
755  			data->dasd_read_alias++;
756  		if (is_tpm)
757  			data->dasd_read_tpm++;
758  		data->dasd_read_secs[sectors_ind]++;
759  		data->dasd_read_times[tottime_ind]++;
760  		data->dasd_read_time1[strtime_ind]++;
761  		data->dasd_read_time2[irqtime_ind]++;
762  		data->dasd_read_time3[endtime_ind]++;
763  	}
764  }
765  
dasd_profile_end(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)766  static void dasd_profile_end(struct dasd_block *block,
767  			     struct dasd_ccw_req *cqr,
768  			     struct request *req)
769  {
770  	unsigned long strtime, irqtime, endtime, tottime;
771  	unsigned long tottimeps, sectors;
772  	struct dasd_device *device;
773  	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
774  	int irqtime_ind, irqtimeps_ind, endtime_ind;
775  	struct dasd_profile_info *data;
776  
777  	device = cqr->startdev;
778  	if (!(dasd_global_profile_level ||
779  	      block->profile.data ||
780  	      device->profile.data))
781  		return;
782  
783  	sectors = blk_rq_sectors(req);
784  	if (!cqr->buildclk || !cqr->startclk ||
785  	    !cqr->stopclk || !cqr->endclk ||
786  	    !sectors)
787  		return;
788  
789  	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
790  	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
791  	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
792  	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
793  	tottimeps = tottime / sectors;
794  
795  	dasd_profile_counter(sectors, sectors_ind);
796  	dasd_profile_counter(tottime, tottime_ind);
797  	dasd_profile_counter(tottimeps, tottimeps_ind);
798  	dasd_profile_counter(strtime, strtime_ind);
799  	dasd_profile_counter(irqtime, irqtime_ind);
800  	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
801  	dasd_profile_counter(endtime, endtime_ind);
802  
803  	spin_lock(&dasd_global_profile.lock);
804  	if (dasd_global_profile.data) {
805  		data = dasd_global_profile.data;
806  		data->dasd_sum_times += tottime;
807  		data->dasd_sum_time_str += strtime;
808  		data->dasd_sum_time_irq += irqtime;
809  		data->dasd_sum_time_end += endtime;
810  		dasd_profile_end_add_data(dasd_global_profile.data,
811  					  cqr->startdev != block->base,
812  					  cqr->cpmode == 1,
813  					  rq_data_dir(req) == READ,
814  					  sectors, sectors_ind, tottime_ind,
815  					  tottimeps_ind, strtime_ind,
816  					  irqtime_ind, irqtimeps_ind,
817  					  endtime_ind);
818  	}
819  	spin_unlock(&dasd_global_profile.lock);
820  
821  	spin_lock(&block->profile.lock);
822  	if (block->profile.data) {
823  		data = block->profile.data;
824  		data->dasd_sum_times += tottime;
825  		data->dasd_sum_time_str += strtime;
826  		data->dasd_sum_time_irq += irqtime;
827  		data->dasd_sum_time_end += endtime;
828  		dasd_profile_end_add_data(block->profile.data,
829  					  cqr->startdev != block->base,
830  					  cqr->cpmode == 1,
831  					  rq_data_dir(req) == READ,
832  					  sectors, sectors_ind, tottime_ind,
833  					  tottimeps_ind, strtime_ind,
834  					  irqtime_ind, irqtimeps_ind,
835  					  endtime_ind);
836  	}
837  	spin_unlock(&block->profile.lock);
838  
839  	spin_lock(&device->profile.lock);
840  	if (device->profile.data) {
841  		data = device->profile.data;
842  		data->dasd_sum_times += tottime;
843  		data->dasd_sum_time_str += strtime;
844  		data->dasd_sum_time_irq += irqtime;
845  		data->dasd_sum_time_end += endtime;
846  		dasd_profile_end_add_data(device->profile.data,
847  					  cqr->startdev != block->base,
848  					  cqr->cpmode == 1,
849  					  rq_data_dir(req) == READ,
850  					  sectors, sectors_ind, tottime_ind,
851  					  tottimeps_ind, strtime_ind,
852  					  irqtime_ind, irqtimeps_ind,
853  					  endtime_ind);
854  	}
855  	spin_unlock(&device->profile.lock);
856  }
857  
dasd_profile_reset(struct dasd_profile * profile)858  void dasd_profile_reset(struct dasd_profile *profile)
859  {
860  	struct dasd_profile_info *data;
861  
862  	spin_lock_bh(&profile->lock);
863  	data = profile->data;
864  	if (!data) {
865  		spin_unlock_bh(&profile->lock);
866  		return;
867  	}
868  	memset(data, 0, sizeof(*data));
869  	ktime_get_real_ts64(&data->starttod);
870  	spin_unlock_bh(&profile->lock);
871  }
872  
dasd_profile_on(struct dasd_profile * profile)873  int dasd_profile_on(struct dasd_profile *profile)
874  {
875  	struct dasd_profile_info *data;
876  
877  	data = kzalloc(sizeof(*data), GFP_KERNEL);
878  	if (!data)
879  		return -ENOMEM;
880  	spin_lock_bh(&profile->lock);
881  	if (profile->data) {
882  		spin_unlock_bh(&profile->lock);
883  		kfree(data);
884  		return 0;
885  	}
886  	ktime_get_real_ts64(&data->starttod);
887  	profile->data = data;
888  	spin_unlock_bh(&profile->lock);
889  	return 0;
890  }
891  
dasd_profile_off(struct dasd_profile * profile)892  void dasd_profile_off(struct dasd_profile *profile)
893  {
894  	spin_lock_bh(&profile->lock);
895  	kfree(profile->data);
896  	profile->data = NULL;
897  	spin_unlock_bh(&profile->lock);
898  }
899  
dasd_get_user_string(const char __user * user_buf,size_t user_len)900  char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
901  {
902  	char *buffer;
903  
904  	buffer = vmalloc(user_len + 1);
905  	if (buffer == NULL)
906  		return ERR_PTR(-ENOMEM);
907  	if (copy_from_user(buffer, user_buf, user_len) != 0) {
908  		vfree(buffer);
909  		return ERR_PTR(-EFAULT);
910  	}
911  	/* got the string, now strip linefeed. */
912  	if (buffer[user_len - 1] == '\n')
913  		buffer[user_len - 1] = 0;
914  	else
915  		buffer[user_len] = 0;
916  	return buffer;
917  }
918  
dasd_stats_write(struct file * file,const char __user * user_buf,size_t user_len,loff_t * pos)919  static ssize_t dasd_stats_write(struct file *file,
920  				const char __user *user_buf,
921  				size_t user_len, loff_t *pos)
922  {
923  	char *buffer, *str;
924  	int rc;
925  	struct seq_file *m = (struct seq_file *)file->private_data;
926  	struct dasd_profile *prof = m->private;
927  
928  	if (user_len > 65536)
929  		user_len = 65536;
930  	buffer = dasd_get_user_string(user_buf, user_len);
931  	if (IS_ERR(buffer))
932  		return PTR_ERR(buffer);
933  
934  	str = skip_spaces(buffer);
935  	rc = user_len;
936  	if (strncmp(str, "reset", 5) == 0) {
937  		dasd_profile_reset(prof);
938  	} else if (strncmp(str, "on", 2) == 0) {
939  		rc = dasd_profile_on(prof);
940  		if (rc)
941  			goto out;
942  		rc = user_len;
943  		if (prof == &dasd_global_profile) {
944  			dasd_profile_reset(prof);
945  			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
946  		}
947  	} else if (strncmp(str, "off", 3) == 0) {
948  		if (prof == &dasd_global_profile)
949  			dasd_global_profile_level = DASD_PROFILE_OFF;
950  		dasd_profile_off(prof);
951  	} else
952  		rc = -EINVAL;
953  out:
954  	vfree(buffer);
955  	return rc;
956  }
957  
dasd_stats_array(struct seq_file * m,unsigned int * array)958  static void dasd_stats_array(struct seq_file *m, unsigned int *array)
959  {
960  	int i;
961  
962  	for (i = 0; i < 32; i++)
963  		seq_printf(m, "%u ", array[i]);
964  	seq_putc(m, '\n');
965  }
966  
dasd_stats_seq_print(struct seq_file * m,struct dasd_profile_info * data)967  static void dasd_stats_seq_print(struct seq_file *m,
968  				 struct dasd_profile_info *data)
969  {
970  	seq_printf(m, "start_time %lld.%09ld\n",
971  		   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
972  	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
973  	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
974  	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
975  	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
976  	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
977  		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
978  	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
979  		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
980  	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
981  		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
982  	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
983  		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
984  	seq_puts(m, "histogram_sectors ");
985  	dasd_stats_array(m, data->dasd_io_secs);
986  	seq_puts(m, "histogram_io_times ");
987  	dasd_stats_array(m, data->dasd_io_times);
988  	seq_puts(m, "histogram_io_times_weighted ");
989  	dasd_stats_array(m, data->dasd_io_timps);
990  	seq_puts(m, "histogram_time_build_to_ssch ");
991  	dasd_stats_array(m, data->dasd_io_time1);
992  	seq_puts(m, "histogram_time_ssch_to_irq ");
993  	dasd_stats_array(m, data->dasd_io_time2);
994  	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
995  	dasd_stats_array(m, data->dasd_io_time2ps);
996  	seq_puts(m, "histogram_time_irq_to_end ");
997  	dasd_stats_array(m, data->dasd_io_time3);
998  	seq_puts(m, "histogram_ccw_queue_length ");
999  	dasd_stats_array(m, data->dasd_io_nr_req);
1000  	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1001  	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1002  	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1003  	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1004  	seq_puts(m, "histogram_read_sectors ");
1005  	dasd_stats_array(m, data->dasd_read_secs);
1006  	seq_puts(m, "histogram_read_times ");
1007  	dasd_stats_array(m, data->dasd_read_times);
1008  	seq_puts(m, "histogram_read_time_build_to_ssch ");
1009  	dasd_stats_array(m, data->dasd_read_time1);
1010  	seq_puts(m, "histogram_read_time_ssch_to_irq ");
1011  	dasd_stats_array(m, data->dasd_read_time2);
1012  	seq_puts(m, "histogram_read_time_irq_to_end ");
1013  	dasd_stats_array(m, data->dasd_read_time3);
1014  	seq_puts(m, "histogram_read_ccw_queue_length ");
1015  	dasd_stats_array(m, data->dasd_read_nr_req);
1016  }
1017  
dasd_stats_show(struct seq_file * m,void * v)1018  static int dasd_stats_show(struct seq_file *m, void *v)
1019  {
1020  	struct dasd_profile *profile;
1021  	struct dasd_profile_info *data;
1022  
1023  	profile = m->private;
1024  	spin_lock_bh(&profile->lock);
1025  	data = profile->data;
1026  	if (!data) {
1027  		spin_unlock_bh(&profile->lock);
1028  		seq_puts(m, "disabled\n");
1029  		return 0;
1030  	}
1031  	dasd_stats_seq_print(m, data);
1032  	spin_unlock_bh(&profile->lock);
1033  	return 0;
1034  }
1035  
dasd_stats_open(struct inode * inode,struct file * file)1036  static int dasd_stats_open(struct inode *inode, struct file *file)
1037  {
1038  	struct dasd_profile *profile = inode->i_private;
1039  	return single_open(file, dasd_stats_show, profile);
1040  }
1041  
1042  static const struct file_operations dasd_stats_raw_fops = {
1043  	.owner		= THIS_MODULE,
1044  	.open		= dasd_stats_open,
1045  	.read		= seq_read,
1046  	.llseek		= seq_lseek,
1047  	.release	= single_release,
1048  	.write		= dasd_stats_write,
1049  };
1050  
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1051  static void dasd_profile_init(struct dasd_profile *profile,
1052  			      struct dentry *base_dentry)
1053  {
1054  	umode_t mode;
1055  	struct dentry *pde;
1056  
1057  	if (!base_dentry)
1058  		return;
1059  	profile->dentry = NULL;
1060  	profile->data = NULL;
1061  	mode = (S_IRUSR | S_IWUSR | S_IFREG);
1062  	pde = debugfs_create_file("statistics", mode, base_dentry,
1063  				  profile, &dasd_stats_raw_fops);
1064  	if (pde && !IS_ERR(pde))
1065  		profile->dentry = pde;
1066  	return;
1067  }
1068  
dasd_profile_exit(struct dasd_profile * profile)1069  static void dasd_profile_exit(struct dasd_profile *profile)
1070  {
1071  	dasd_profile_off(profile);
1072  	debugfs_remove(profile->dentry);
1073  	profile->dentry = NULL;
1074  }
1075  
dasd_statistics_removeroot(void)1076  static void dasd_statistics_removeroot(void)
1077  {
1078  	dasd_global_profile_level = DASD_PROFILE_OFF;
1079  	dasd_profile_exit(&dasd_global_profile);
1080  	debugfs_remove(dasd_debugfs_global_entry);
1081  	debugfs_remove(dasd_debugfs_root_entry);
1082  }
1083  
dasd_statistics_createroot(void)1084  static void dasd_statistics_createroot(void)
1085  {
1086  	struct dentry *pde;
1087  
1088  	dasd_debugfs_root_entry = NULL;
1089  	pde = debugfs_create_dir("dasd", NULL);
1090  	if (!pde || IS_ERR(pde))
1091  		goto error;
1092  	dasd_debugfs_root_entry = pde;
1093  	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1094  	if (!pde || IS_ERR(pde))
1095  		goto error;
1096  	dasd_debugfs_global_entry = pde;
1097  	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1098  	return;
1099  
1100  error:
1101  	DBF_EVENT(DBF_ERR, "%s",
1102  		  "Creation of the dasd debugfs interface failed");
1103  	dasd_statistics_removeroot();
1104  	return;
1105  }
1106  
1107  #else
1108  #define dasd_profile_start(block, cqr, req) do {} while (0)
1109  #define dasd_profile_end(block, cqr, req) do {} while (0)
1110  
dasd_statistics_createroot(void)1111  static void dasd_statistics_createroot(void)
1112  {
1113  	return;
1114  }
1115  
dasd_statistics_removeroot(void)1116  static void dasd_statistics_removeroot(void)
1117  {
1118  	return;
1119  }
1120  
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1121  static void dasd_profile_init(struct dasd_profile *profile,
1122  			      struct dentry *base_dentry)
1123  {
1124  	return;
1125  }
1126  
dasd_profile_exit(struct dasd_profile * profile)1127  static void dasd_profile_exit(struct dasd_profile *profile)
1128  {
1129  	return;
1130  }
1131  
dasd_profile_on(struct dasd_profile * profile)1132  int dasd_profile_on(struct dasd_profile *profile)
1133  {
1134  	return 0;
1135  }
1136  
1137  #endif				/* CONFIG_DASD_PROFILE */
1138  
dasd_hosts_show(struct seq_file * m,void * v)1139  static int dasd_hosts_show(struct seq_file *m, void *v)
1140  {
1141  	struct dasd_device *device;
1142  	int rc = -EOPNOTSUPP;
1143  
1144  	device = m->private;
1145  	dasd_get_device(device);
1146  
1147  	if (device->discipline->hosts_print)
1148  		rc = device->discipline->hosts_print(device, m);
1149  
1150  	dasd_put_device(device);
1151  	return rc;
1152  }
1153  
1154  DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1155  
dasd_hosts_exit(struct dasd_device * device)1156  static void dasd_hosts_exit(struct dasd_device *device)
1157  {
1158  	debugfs_remove(device->hosts_dentry);
1159  	device->hosts_dentry = NULL;
1160  }
1161  
dasd_hosts_init(struct dentry * base_dentry,struct dasd_device * device)1162  static void dasd_hosts_init(struct dentry *base_dentry,
1163  			    struct dasd_device *device)
1164  {
1165  	struct dentry *pde;
1166  	umode_t mode;
1167  
1168  	if (!base_dentry)
1169  		return;
1170  
1171  	mode = S_IRUSR | S_IFREG;
1172  	pde = debugfs_create_file("host_access_list", mode, base_dentry,
1173  				  device, &dasd_hosts_fops);
1174  	if (pde && !IS_ERR(pde))
1175  		device->hosts_dentry = pde;
1176  }
1177  
dasd_smalloc_request(int magic,int cplength,int datasize,struct dasd_device * device,struct dasd_ccw_req * cqr)1178  struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1179  					  struct dasd_device *device,
1180  					  struct dasd_ccw_req *cqr)
1181  {
1182  	unsigned long flags;
1183  	char *data, *chunk;
1184  	int size = 0;
1185  
1186  	if (cplength > 0)
1187  		size += cplength * sizeof(struct ccw1);
1188  	if (datasize > 0)
1189  		size += datasize;
1190  	if (!cqr)
1191  		size += (sizeof(*cqr) + 7L) & -8L;
1192  
1193  	spin_lock_irqsave(&device->mem_lock, flags);
1194  	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1195  	spin_unlock_irqrestore(&device->mem_lock, flags);
1196  	if (!chunk)
1197  		return ERR_PTR(-ENOMEM);
1198  	if (!cqr) {
1199  		cqr = (void *) data;
1200  		data += (sizeof(*cqr) + 7L) & -8L;
1201  	}
1202  	memset(cqr, 0, sizeof(*cqr));
1203  	cqr->mem_chunk = chunk;
1204  	if (cplength > 0) {
1205  		cqr->cpaddr = data;
1206  		data += cplength * sizeof(struct ccw1);
1207  		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1208  	}
1209  	if (datasize > 0) {
1210  		cqr->data = data;
1211   		memset(cqr->data, 0, datasize);
1212  	}
1213  	cqr->magic = magic;
1214  	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1215  	dasd_get_device(device);
1216  	return cqr;
1217  }
1218  EXPORT_SYMBOL(dasd_smalloc_request);
1219  
dasd_fmalloc_request(int magic,int cplength,int datasize,struct dasd_device * device)1220  struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1221  					  int datasize,
1222  					  struct dasd_device *device)
1223  {
1224  	struct dasd_ccw_req *cqr;
1225  	unsigned long flags;
1226  	int size, cqr_size;
1227  	char *data;
1228  
1229  	cqr_size = (sizeof(*cqr) + 7L) & -8L;
1230  	size = cqr_size;
1231  	if (cplength > 0)
1232  		size += cplength * sizeof(struct ccw1);
1233  	if (datasize > 0)
1234  		size += datasize;
1235  
1236  	spin_lock_irqsave(&device->mem_lock, flags);
1237  	cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1238  	spin_unlock_irqrestore(&device->mem_lock, flags);
1239  	if (!cqr)
1240  		return ERR_PTR(-ENOMEM);
1241  	memset(cqr, 0, sizeof(*cqr));
1242  	data = (char *)cqr + cqr_size;
1243  	cqr->cpaddr = NULL;
1244  	if (cplength > 0) {
1245  		cqr->cpaddr = data;
1246  		data += cplength * sizeof(struct ccw1);
1247  		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1248  	}
1249  	cqr->data = NULL;
1250  	if (datasize > 0) {
1251  		cqr->data = data;
1252  		memset(cqr->data, 0, datasize);
1253  	}
1254  
1255  	cqr->magic = magic;
1256  	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1257  	dasd_get_device(device);
1258  
1259  	return cqr;
1260  }
1261  EXPORT_SYMBOL(dasd_fmalloc_request);
1262  
dasd_sfree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1263  void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1264  {
1265  	unsigned long flags;
1266  
1267  	spin_lock_irqsave(&device->mem_lock, flags);
1268  	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1269  	spin_unlock_irqrestore(&device->mem_lock, flags);
1270  	dasd_put_device(device);
1271  }
1272  EXPORT_SYMBOL(dasd_sfree_request);
1273  
dasd_ffree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1274  void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1275  {
1276  	unsigned long flags;
1277  
1278  	spin_lock_irqsave(&device->mem_lock, flags);
1279  	dasd_free_chunk(&device->ese_chunks, cqr);
1280  	spin_unlock_irqrestore(&device->mem_lock, flags);
1281  	dasd_put_device(device);
1282  }
1283  EXPORT_SYMBOL(dasd_ffree_request);
1284  
1285  /*
1286   * Check discipline magic in cqr.
1287   */
dasd_check_cqr(struct dasd_ccw_req * cqr)1288  static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1289  {
1290  	struct dasd_device *device;
1291  
1292  	if (cqr == NULL)
1293  		return -EINVAL;
1294  	device = cqr->startdev;
1295  	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1296  		DBF_DEV_EVENT(DBF_WARNING, device,
1297  			    " dasd_ccw_req 0x%08x magic doesn't match"
1298  			    " discipline 0x%08x",
1299  			    cqr->magic,
1300  			    *(unsigned int *) device->discipline->name);
1301  		return -EINVAL;
1302  	}
1303  	return 0;
1304  }
1305  
1306  /*
1307   * Terminate the current i/o and set the request to clear_pending.
1308   * Timer keeps device runnig.
1309   * ccw_device_clear can fail if the i/o subsystem
1310   * is in a bad mood.
1311   */
dasd_term_IO(struct dasd_ccw_req * cqr)1312  int dasd_term_IO(struct dasd_ccw_req *cqr)
1313  {
1314  	struct dasd_device *device;
1315  	int retries, rc;
1316  
1317  	/* Check the cqr */
1318  	rc = dasd_check_cqr(cqr);
1319  	if (rc)
1320  		return rc;
1321  	retries = 0;
1322  	device = (struct dasd_device *) cqr->startdev;
1323  	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1324  		rc = ccw_device_clear(device->cdev, (long) cqr);
1325  		switch (rc) {
1326  		case 0:	/* termination successful */
1327  			cqr->status = DASD_CQR_CLEAR_PENDING;
1328  			cqr->stopclk = get_tod_clock();
1329  			cqr->starttime = 0;
1330  			DBF_DEV_EVENT(DBF_DEBUG, device,
1331  				      "terminate cqr %p successful",
1332  				      cqr);
1333  			break;
1334  		case -ENODEV:
1335  			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1336  				      "device gone, retry");
1337  			break;
1338  		case -EINVAL:
1339  			/*
1340  			 * device not valid so no I/O could be running
1341  			 * handle CQR as termination successful
1342  			 */
1343  			cqr->status = DASD_CQR_CLEARED;
1344  			cqr->stopclk = get_tod_clock();
1345  			cqr->starttime = 0;
1346  			/* no retries for invalid devices */
1347  			cqr->retries = -1;
1348  			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1349  				      "EINVAL, handle as terminated");
1350  			/* fake rc to success */
1351  			rc = 0;
1352  			break;
1353  		default:
1354  			dev_err(&device->cdev->dev,
1355  				"Unexpected error during request termination %d\n", rc);
1356  			BUG();
1357  			break;
1358  		}
1359  		retries++;
1360  	}
1361  	dasd_schedule_device_bh(device);
1362  	return rc;
1363  }
1364  EXPORT_SYMBOL(dasd_term_IO);
1365  
1366  /*
1367   * Start the i/o. This start_IO can fail if the channel is really busy.
1368   * In that case set up a timer to start the request later.
1369   */
dasd_start_IO(struct dasd_ccw_req * cqr)1370  int dasd_start_IO(struct dasd_ccw_req *cqr)
1371  {
1372  	struct dasd_device *device;
1373  	int rc;
1374  
1375  	/* Check the cqr */
1376  	rc = dasd_check_cqr(cqr);
1377  	if (rc) {
1378  		cqr->intrc = rc;
1379  		return rc;
1380  	}
1381  	device = (struct dasd_device *) cqr->startdev;
1382  	if (((cqr->block &&
1383  	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1384  	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1385  	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1386  		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1387  			      "because of stolen lock", cqr);
1388  		cqr->status = DASD_CQR_ERROR;
1389  		cqr->intrc = -EPERM;
1390  		return -EPERM;
1391  	}
1392  	if (cqr->retries < 0) {
1393  		dev_err(&device->cdev->dev,
1394  			"Start I/O ran out of retries\n");
1395  		cqr->status = DASD_CQR_ERROR;
1396  		return -EIO;
1397  	}
1398  	cqr->startclk = get_tod_clock();
1399  	cqr->starttime = jiffies;
1400  	cqr->retries--;
1401  	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1402  		cqr->lpm &= dasd_path_get_opm(device);
1403  		if (!cqr->lpm)
1404  			cqr->lpm = dasd_path_get_opm(device);
1405  	}
1406  	/*
1407  	 * remember the amount of formatted tracks to prevent double format on
1408  	 * ESE devices
1409  	 */
1410  	if (cqr->block)
1411  		cqr->trkcount = atomic_read(&cqr->block->trkcount);
1412  
1413  	if (cqr->cpmode == 1) {
1414  		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1415  					 (long) cqr, cqr->lpm);
1416  	} else {
1417  		rc = ccw_device_start(device->cdev, cqr->cpaddr,
1418  				      (long) cqr, cqr->lpm, 0);
1419  	}
1420  	switch (rc) {
1421  	case 0:
1422  		cqr->status = DASD_CQR_IN_IO;
1423  		break;
1424  	case -EBUSY:
1425  		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1426  			      "start_IO: device busy, retry later");
1427  		break;
1428  	case -EACCES:
1429  		/* -EACCES indicates that the request used only a subset of the
1430  		 * available paths and all these paths are gone. If the lpm of
1431  		 * this request was only a subset of the opm (e.g. the ppm) then
1432  		 * we just do a retry with all available paths.
1433  		 * If we already use the full opm, something is amiss, and we
1434  		 * need a full path verification.
1435  		 */
1436  		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1437  			DBF_DEV_EVENT(DBF_WARNING, device,
1438  				      "start_IO: selected paths gone (%x)",
1439  				      cqr->lpm);
1440  		} else if (cqr->lpm != dasd_path_get_opm(device)) {
1441  			cqr->lpm = dasd_path_get_opm(device);
1442  			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1443  				      "start_IO: selected paths gone,"
1444  				      " retry on all paths");
1445  		} else {
1446  			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1447  				      "start_IO: all paths in opm gone,"
1448  				      " do path verification");
1449  			dasd_generic_last_path_gone(device);
1450  			dasd_path_no_path(device);
1451  			dasd_path_set_tbvpm(device,
1452  					  ccw_device_get_path_mask(
1453  						  device->cdev));
1454  		}
1455  		break;
1456  	case -ENODEV:
1457  		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1458  			      "start_IO: -ENODEV device gone, retry");
1459  		/* this is equivalent to CC=3 for SSCH report this to EER */
1460  		dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO);
1461  		break;
1462  	case -EIO:
1463  		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1464  			      "start_IO: -EIO device gone, retry");
1465  		break;
1466  	case -EINVAL:
1467  		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1468  			      "start_IO: -EINVAL device currently "
1469  			      "not accessible");
1470  		break;
1471  	default:
1472  		dev_err(&device->cdev->dev,
1473  			"Unexpected error during request start %d", rc);
1474  		BUG();
1475  		break;
1476  	}
1477  	cqr->intrc = rc;
1478  	return rc;
1479  }
1480  EXPORT_SYMBOL(dasd_start_IO);
1481  
1482  /*
1483   * Timeout function for dasd devices. This is used for different purposes
1484   *  1) missing interrupt handler for normal operation
1485   *  2) delayed start of request where start_IO failed with -EBUSY
1486   *  3) timeout for missing state change interrupts
1487   * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1488   * DASD_CQR_QUEUED for 2) and 3).
1489   */
dasd_device_timeout(struct timer_list * t)1490  static void dasd_device_timeout(struct timer_list *t)
1491  {
1492  	unsigned long flags;
1493  	struct dasd_device *device;
1494  
1495  	device = from_timer(device, t, timer);
1496  	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1497  	/* re-activate request queue */
1498  	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1499  	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1500  	dasd_schedule_device_bh(device);
1501  }
1502  
1503  /*
1504   * Setup timeout for a device in jiffies.
1505   */
dasd_device_set_timer(struct dasd_device * device,int expires)1506  void dasd_device_set_timer(struct dasd_device *device, int expires)
1507  {
1508  	if (expires == 0)
1509  		del_timer(&device->timer);
1510  	else
1511  		mod_timer(&device->timer, jiffies + expires);
1512  }
1513  EXPORT_SYMBOL(dasd_device_set_timer);
1514  
1515  /*
1516   * Clear timeout for a device.
1517   */
dasd_device_clear_timer(struct dasd_device * device)1518  void dasd_device_clear_timer(struct dasd_device *device)
1519  {
1520  	del_timer(&device->timer);
1521  }
1522  EXPORT_SYMBOL(dasd_device_clear_timer);
1523  
dasd_handle_killed_request(struct ccw_device * cdev,unsigned long intparm)1524  static void dasd_handle_killed_request(struct ccw_device *cdev,
1525  				       unsigned long intparm)
1526  {
1527  	struct dasd_ccw_req *cqr;
1528  	struct dasd_device *device;
1529  
1530  	if (!intparm)
1531  		return;
1532  	cqr = (struct dasd_ccw_req *) intparm;
1533  	if (cqr->status != DASD_CQR_IN_IO) {
1534  		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1535  				"invalid status in handle_killed_request: "
1536  				"%02x", cqr->status);
1537  		return;
1538  	}
1539  
1540  	device = dasd_device_from_cdev_locked(cdev);
1541  	if (IS_ERR(device)) {
1542  		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1543  				"unable to get device from cdev");
1544  		return;
1545  	}
1546  
1547  	if (!cqr->startdev ||
1548  	    device != cqr->startdev ||
1549  	    strncmp(cqr->startdev->discipline->ebcname,
1550  		    (char *) &cqr->magic, 4)) {
1551  		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1552  				"invalid device in request");
1553  		dasd_put_device(device);
1554  		return;
1555  	}
1556  
1557  	/* Schedule request to be retried. */
1558  	cqr->status = DASD_CQR_QUEUED;
1559  
1560  	dasd_device_clear_timer(device);
1561  	dasd_schedule_device_bh(device);
1562  	dasd_put_device(device);
1563  }
1564  
dasd_generic_handle_state_change(struct dasd_device * device)1565  void dasd_generic_handle_state_change(struct dasd_device *device)
1566  {
1567  	/* First of all start sense subsystem status request. */
1568  	dasd_eer_snss(device);
1569  
1570  	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1571  	dasd_schedule_device_bh(device);
1572  	if (device->block) {
1573  		dasd_schedule_block_bh(device->block);
1574  		if (device->block->gdp)
1575  			blk_mq_run_hw_queues(device->block->gdp->queue, true);
1576  	}
1577  }
1578  EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1579  
dasd_check_hpf_error(struct irb * irb)1580  static int dasd_check_hpf_error(struct irb *irb)
1581  {
1582  	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1583  	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1584  	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1585  }
1586  
dasd_ese_needs_format(struct dasd_block * block,struct irb * irb)1587  static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1588  {
1589  	struct dasd_device *device = NULL;
1590  	u8 *sense = NULL;
1591  
1592  	if (!block)
1593  		return 0;
1594  	device = block->base;
1595  	if (!device || !device->discipline->is_ese)
1596  		return 0;
1597  	if (!device->discipline->is_ese(device))
1598  		return 0;
1599  
1600  	sense = dasd_get_sense(irb);
1601  	if (!sense)
1602  		return 0;
1603  
1604  	if (sense[1] & SNS1_NO_REC_FOUND)
1605  		return 1;
1606  
1607  	if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
1608  	    scsw_is_tm(&irb->scsw) &&
1609  	    !(sense[2] & SNS2_ENV_DATA_PRESENT))
1610  		return 1;
1611  
1612  	return 0;
1613  }
1614  
dasd_ese_oos_cond(u8 * sense)1615  static int dasd_ese_oos_cond(u8 *sense)
1616  {
1617  	return sense[0] & SNS0_EQUIPMENT_CHECK &&
1618  		sense[1] & SNS1_PERM_ERR &&
1619  		sense[1] & SNS1_WRITE_INHIBITED &&
1620  		sense[25] == 0x01;
1621  }
1622  
1623  /*
1624   * Interrupt handler for "normal" ssch-io based dasd devices.
1625   */
dasd_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1626  void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1627  		      struct irb *irb)
1628  {
1629  	struct dasd_ccw_req *cqr, *next, *fcqr;
1630  	struct dasd_device *device;
1631  	unsigned long now;
1632  	int nrf_suppressed = 0;
1633  	int it_suppressed = 0;
1634  	struct request *req;
1635  	u8 *sense = NULL;
1636  	int expires;
1637  
1638  	cqr = (struct dasd_ccw_req *) intparm;
1639  	if (IS_ERR(irb)) {
1640  		switch (PTR_ERR(irb)) {
1641  		case -EIO:
1642  			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1643  				device = cqr->startdev;
1644  				cqr->status = DASD_CQR_CLEARED;
1645  				dasd_device_clear_timer(device);
1646  				wake_up(&dasd_flush_wq);
1647  				dasd_schedule_device_bh(device);
1648  				return;
1649  			}
1650  			break;
1651  		case -ETIMEDOUT:
1652  			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1653  					"request timed out\n", __func__);
1654  			break;
1655  		default:
1656  			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1657  					"unknown error %ld\n", __func__,
1658  					PTR_ERR(irb));
1659  		}
1660  		dasd_handle_killed_request(cdev, intparm);
1661  		return;
1662  	}
1663  
1664  	now = get_tod_clock();
1665  	/* check for conditions that should be handled immediately */
1666  	if (!cqr ||
1667  	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1668  	      scsw_cstat(&irb->scsw) == 0)) {
1669  		if (cqr)
1670  			memcpy(&cqr->irb, irb, sizeof(*irb));
1671  		device = dasd_device_from_cdev_locked(cdev);
1672  		if (IS_ERR(device))
1673  			return;
1674  		/* ignore unsolicited interrupts for DIAG discipline */
1675  		if (device->discipline == dasd_diag_discipline_pointer) {
1676  			dasd_put_device(device);
1677  			return;
1678  		}
1679  
1680  		/*
1681  		 * In some cases 'File Protected' or 'No Record Found' errors
1682  		 * might be expected and debug log messages for the
1683  		 * corresponding interrupts shouldn't be written then.
1684  		 * Check if either of the according suppress bits is set.
1685  		 */
1686  		sense = dasd_get_sense(irb);
1687  		if (sense) {
1688  			it_suppressed =	(sense[1] & SNS1_INV_TRACK_FORMAT) &&
1689  				!(sense[2] & SNS2_ENV_DATA_PRESENT) &&
1690  				test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
1691  			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1692  				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1693  
1694  			/*
1695  			 * Extent pool probably out-of-space.
1696  			 * Stop device and check exhaust level.
1697  			 */
1698  			if (dasd_ese_oos_cond(sense)) {
1699  				dasd_generic_space_exhaust(device, cqr);
1700  				device->discipline->ext_pool_exhaust(device, cqr);
1701  				dasd_put_device(device);
1702  				return;
1703  			}
1704  		}
1705  		if (!(it_suppressed || nrf_suppressed))
1706  			device->discipline->dump_sense_dbf(device, irb, "int");
1707  
1708  		if (device->features & DASD_FEATURE_ERPLOG)
1709  			device->discipline->dump_sense(device, cqr, irb);
1710  		device->discipline->check_for_device_change(device, cqr, irb);
1711  		dasd_put_device(device);
1712  	}
1713  
1714  	/* check for attention message */
1715  	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1716  		device = dasd_device_from_cdev_locked(cdev);
1717  		if (!IS_ERR(device)) {
1718  			device->discipline->check_attention(device,
1719  							    irb->esw.esw1.lpum);
1720  			dasd_put_device(device);
1721  		}
1722  	}
1723  
1724  	if (!cqr)
1725  		return;
1726  
1727  	device = (struct dasd_device *) cqr->startdev;
1728  	if (!device ||
1729  	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1730  		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1731  				"invalid device in request");
1732  		return;
1733  	}
1734  
1735  	if (dasd_ese_needs_format(cqr->block, irb)) {
1736  		req = dasd_get_callback_data(cqr);
1737  		if (!req) {
1738  			cqr->status = DASD_CQR_ERROR;
1739  			return;
1740  		}
1741  		if (rq_data_dir(req) == READ) {
1742  			device->discipline->ese_read(cqr, irb);
1743  			cqr->status = DASD_CQR_SUCCESS;
1744  			cqr->stopclk = now;
1745  			dasd_device_clear_timer(device);
1746  			dasd_schedule_device_bh(device);
1747  			return;
1748  		}
1749  		fcqr = device->discipline->ese_format(device, cqr, irb);
1750  		if (IS_ERR(fcqr)) {
1751  			if (PTR_ERR(fcqr) == -EINVAL) {
1752  				cqr->status = DASD_CQR_ERROR;
1753  				return;
1754  			}
1755  			/*
1756  			 * If we can't format now, let the request go
1757  			 * one extra round. Maybe we can format later.
1758  			 */
1759  			cqr->status = DASD_CQR_QUEUED;
1760  			dasd_schedule_device_bh(device);
1761  			return;
1762  		} else {
1763  			fcqr->status = DASD_CQR_QUEUED;
1764  			cqr->status = DASD_CQR_QUEUED;
1765  			list_add(&fcqr->devlist, &device->ccw_queue);
1766  			dasd_schedule_device_bh(device);
1767  			return;
1768  		}
1769  	}
1770  
1771  	/* Check for clear pending */
1772  	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1773  	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1774  		cqr->status = DASD_CQR_CLEARED;
1775  		dasd_device_clear_timer(device);
1776  		wake_up(&dasd_flush_wq);
1777  		dasd_schedule_device_bh(device);
1778  		return;
1779  	}
1780  
1781  	/* check status - the request might have been killed by dyn detach */
1782  	if (cqr->status != DASD_CQR_IN_IO) {
1783  		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1784  			      "status %02x", dev_name(&cdev->dev), cqr->status);
1785  		return;
1786  	}
1787  
1788  	next = NULL;
1789  	expires = 0;
1790  	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1791  	    scsw_cstat(&irb->scsw) == 0) {
1792  		/* request was completed successfully */
1793  		cqr->status = DASD_CQR_SUCCESS;
1794  		cqr->stopclk = now;
1795  		/* Start first request on queue if possible -> fast_io. */
1796  		if (cqr->devlist.next != &device->ccw_queue) {
1797  			next = list_entry(cqr->devlist.next,
1798  					  struct dasd_ccw_req, devlist);
1799  		}
1800  	} else {  /* error */
1801  		/* check for HPF error
1802  		 * call discipline function to requeue all requests
1803  		 * and disable HPF accordingly
1804  		 */
1805  		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1806  		    device->discipline->handle_hpf_error)
1807  			device->discipline->handle_hpf_error(device, irb);
1808  		/*
1809  		 * If we don't want complex ERP for this request, then just
1810  		 * reset this and retry it in the fastpath
1811  		 */
1812  		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1813  		    cqr->retries > 0) {
1814  			if (cqr->lpm == dasd_path_get_opm(device))
1815  				DBF_DEV_EVENT(DBF_DEBUG, device,
1816  					      "default ERP in fastpath "
1817  					      "(%i retries left)",
1818  					      cqr->retries);
1819  			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1820  				cqr->lpm = dasd_path_get_opm(device);
1821  			cqr->status = DASD_CQR_QUEUED;
1822  			next = cqr;
1823  		} else
1824  			cqr->status = DASD_CQR_ERROR;
1825  	}
1826  	if (next && (next->status == DASD_CQR_QUEUED) &&
1827  	    (!device->stopped)) {
1828  		if (device->discipline->start_IO(next) == 0)
1829  			expires = next->expires;
1830  	}
1831  	if (expires != 0)
1832  		dasd_device_set_timer(device, expires);
1833  	else
1834  		dasd_device_clear_timer(device);
1835  	dasd_schedule_device_bh(device);
1836  }
1837  EXPORT_SYMBOL(dasd_int_handler);
1838  
dasd_generic_uc_handler(struct ccw_device * cdev,struct irb * irb)1839  enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1840  {
1841  	struct dasd_device *device;
1842  
1843  	device = dasd_device_from_cdev_locked(cdev);
1844  
1845  	if (IS_ERR(device))
1846  		goto out;
1847  	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1848  	   device->state != device->target ||
1849  	   !device->discipline->check_for_device_change){
1850  		dasd_put_device(device);
1851  		goto out;
1852  	}
1853  	if (device->discipline->dump_sense_dbf)
1854  		device->discipline->dump_sense_dbf(device, irb, "uc");
1855  	device->discipline->check_for_device_change(device, NULL, irb);
1856  	dasd_put_device(device);
1857  out:
1858  	return UC_TODO_RETRY;
1859  }
1860  EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1861  
1862  /*
1863   * If we have an error on a dasd_block layer request then we cancel
1864   * and return all further requests from the same dasd_block as well.
1865   */
__dasd_device_recovery(struct dasd_device * device,struct dasd_ccw_req * ref_cqr)1866  static void __dasd_device_recovery(struct dasd_device *device,
1867  				   struct dasd_ccw_req *ref_cqr)
1868  {
1869  	struct list_head *l, *n;
1870  	struct dasd_ccw_req *cqr;
1871  
1872  	/*
1873  	 * only requeue request that came from the dasd_block layer
1874  	 */
1875  	if (!ref_cqr->block)
1876  		return;
1877  
1878  	list_for_each_safe(l, n, &device->ccw_queue) {
1879  		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1880  		if (cqr->status == DASD_CQR_QUEUED &&
1881  		    ref_cqr->block == cqr->block) {
1882  			cqr->status = DASD_CQR_CLEARED;
1883  		}
1884  	}
1885  };
1886  
1887  /*
1888   * Remove those ccw requests from the queue that need to be returned
1889   * to the upper layer.
1890   */
__dasd_device_process_ccw_queue(struct dasd_device * device,struct list_head * final_queue)1891  static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1892  					    struct list_head *final_queue)
1893  {
1894  	struct list_head *l, *n;
1895  	struct dasd_ccw_req *cqr;
1896  
1897  	/* Process request with final status. */
1898  	list_for_each_safe(l, n, &device->ccw_queue) {
1899  		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1900  
1901  		/* Skip any non-final request. */
1902  		if (cqr->status == DASD_CQR_QUEUED ||
1903  		    cqr->status == DASD_CQR_IN_IO ||
1904  		    cqr->status == DASD_CQR_CLEAR_PENDING)
1905  			continue;
1906  		if (cqr->status == DASD_CQR_ERROR) {
1907  			__dasd_device_recovery(device, cqr);
1908  		}
1909  		/* Rechain finished requests to final queue */
1910  		list_move_tail(&cqr->devlist, final_queue);
1911  	}
1912  }
1913  
__dasd_process_cqr(struct dasd_device * device,struct dasd_ccw_req * cqr)1914  static void __dasd_process_cqr(struct dasd_device *device,
1915  			       struct dasd_ccw_req *cqr)
1916  {
1917  	switch (cqr->status) {
1918  	case DASD_CQR_SUCCESS:
1919  		cqr->status = DASD_CQR_DONE;
1920  		break;
1921  	case DASD_CQR_ERROR:
1922  		cqr->status = DASD_CQR_NEED_ERP;
1923  		break;
1924  	case DASD_CQR_CLEARED:
1925  		cqr->status = DASD_CQR_TERMINATED;
1926  		break;
1927  	default:
1928  		dev_err(&device->cdev->dev,
1929  			"Unexpected CQR status %02x", cqr->status);
1930  		BUG();
1931  	}
1932  	if (cqr->callback)
1933  		cqr->callback(cqr, cqr->callback_data);
1934  }
1935  
1936  /*
1937   * the cqrs from the final queue are returned to the upper layer
1938   * by setting a dasd_block state and calling the callback function
1939   */
__dasd_device_process_final_queue(struct dasd_device * device,struct list_head * final_queue)1940  static void __dasd_device_process_final_queue(struct dasd_device *device,
1941  					      struct list_head *final_queue)
1942  {
1943  	struct list_head *l, *n;
1944  	struct dasd_ccw_req *cqr;
1945  	struct dasd_block *block;
1946  
1947  	list_for_each_safe(l, n, final_queue) {
1948  		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1949  		list_del_init(&cqr->devlist);
1950  		block = cqr->block;
1951  		if (!block) {
1952  			__dasd_process_cqr(device, cqr);
1953  		} else {
1954  			spin_lock_bh(&block->queue_lock);
1955  			__dasd_process_cqr(device, cqr);
1956  			spin_unlock_bh(&block->queue_lock);
1957  		}
1958  	}
1959  }
1960  
1961  /*
1962   * check if device should be autoquiesced due to too many timeouts
1963   */
__dasd_device_check_autoquiesce_timeout(struct dasd_device * device,struct dasd_ccw_req * cqr)1964  static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device,
1965  						    struct dasd_ccw_req *cqr)
1966  {
1967  	if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
1968  		dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
1969  }
1970  
1971  /*
1972   * Take a look at the first request on the ccw queue and check
1973   * if it reached its expire time. If so, terminate the IO.
1974   */
__dasd_device_check_expire(struct dasd_device * device)1975  static void __dasd_device_check_expire(struct dasd_device *device)
1976  {
1977  	struct dasd_ccw_req *cqr;
1978  
1979  	if (list_empty(&device->ccw_queue))
1980  		return;
1981  	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1982  	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1983  	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1984  		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1985  			/*
1986  			 * IO in safe offline processing should not
1987  			 * run out of retries
1988  			 */
1989  			cqr->retries++;
1990  		}
1991  		if (device->discipline->term_IO(cqr) != 0) {
1992  			/* Hmpf, try again in 5 sec */
1993  			dev_err(&device->cdev->dev,
1994  				"CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
1995  				(cqr->expires / HZ));
1996  			cqr->expires += 5*HZ;
1997  			dasd_device_set_timer(device, 5*HZ);
1998  		} else {
1999  			dev_err(&device->cdev->dev,
2000  				"CQR timed out (%lus), %i retries remaining\n",
2001  				(cqr->expires / HZ), cqr->retries);
2002  		}
2003  		__dasd_device_check_autoquiesce_timeout(device, cqr);
2004  	}
2005  }
2006  
2007  /*
2008   * return 1 when device is not eligible for IO
2009   */
__dasd_device_is_unusable(struct dasd_device * device,struct dasd_ccw_req * cqr)2010  static int __dasd_device_is_unusable(struct dasd_device *device,
2011  				     struct dasd_ccw_req *cqr)
2012  {
2013  	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
2014  
2015  	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2016  	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2017  		/*
2018  		 * dasd is being set offline
2019  		 * but it is no safe offline where we have to allow I/O
2020  		 */
2021  		return 1;
2022  	}
2023  	if (device->stopped) {
2024  		if (device->stopped & mask) {
2025  			/* stopped and CQR will not change that. */
2026  			return 1;
2027  		}
2028  		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2029  			/* CQR is not able to change device to
2030  			 * operational. */
2031  			return 1;
2032  		}
2033  		/* CQR required to get device operational. */
2034  	}
2035  	return 0;
2036  }
2037  
2038  /*
2039   * Take a look at the first request on the ccw queue and check
2040   * if it needs to be started.
2041   */
__dasd_device_start_head(struct dasd_device * device)2042  static void __dasd_device_start_head(struct dasd_device *device)
2043  {
2044  	struct dasd_ccw_req *cqr;
2045  	int rc;
2046  
2047  	if (list_empty(&device->ccw_queue))
2048  		return;
2049  	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2050  	if (cqr->status != DASD_CQR_QUEUED)
2051  		return;
2052  	/* if device is not usable return request to upper layer */
2053  	if (__dasd_device_is_unusable(device, cqr)) {
2054  		cqr->intrc = -EAGAIN;
2055  		cqr->status = DASD_CQR_CLEARED;
2056  		dasd_schedule_device_bh(device);
2057  		return;
2058  	}
2059  
2060  	rc = device->discipline->start_IO(cqr);
2061  	if (rc == 0)
2062  		dasd_device_set_timer(device, cqr->expires);
2063  	else if (rc == -EACCES) {
2064  		dasd_schedule_device_bh(device);
2065  	} else
2066  		/* Hmpf, try again in 1/2 sec */
2067  		dasd_device_set_timer(device, 50);
2068  }
2069  
__dasd_device_check_path_events(struct dasd_device * device)2070  static void __dasd_device_check_path_events(struct dasd_device *device)
2071  {
2072  	__u8 tbvpm, fcsecpm;
2073  	int rc;
2074  
2075  	tbvpm = dasd_path_get_tbvpm(device);
2076  	fcsecpm = dasd_path_get_fcsecpm(device);
2077  
2078  	if (!tbvpm && !fcsecpm)
2079  		return;
2080  
2081  	if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
2082  		return;
2083  
2084  	dasd_path_clear_all_verify(device);
2085  	dasd_path_clear_all_fcsec(device);
2086  
2087  	rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
2088  	if (rc) {
2089  		dasd_path_add_tbvpm(device, tbvpm);
2090  		dasd_path_add_fcsecpm(device, fcsecpm);
2091  		dasd_device_set_timer(device, 50);
2092  	}
2093  };
2094  
2095  /*
2096   * Go through all request on the dasd_device request queue,
2097   * terminate them on the cdev if necessary, and return them to the
2098   * submitting layer via callback.
2099   * Note:
2100   * Make sure that all 'submitting layers' still exist when
2101   * this function is called!. In other words, when 'device' is a base
2102   * device then all block layer requests must have been removed before
2103   * via dasd_flush_block_queue.
2104   */
dasd_flush_device_queue(struct dasd_device * device)2105  int dasd_flush_device_queue(struct dasd_device *device)
2106  {
2107  	struct dasd_ccw_req *cqr, *n;
2108  	int rc;
2109  	struct list_head flush_queue;
2110  
2111  	INIT_LIST_HEAD(&flush_queue);
2112  	spin_lock_irq(get_ccwdev_lock(device->cdev));
2113  	rc = 0;
2114  	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2115  		/* Check status and move request to flush_queue */
2116  		switch (cqr->status) {
2117  		case DASD_CQR_IN_IO:
2118  			rc = device->discipline->term_IO(cqr);
2119  			if (rc) {
2120  				/* unable to terminate requeust */
2121  				dev_err(&device->cdev->dev,
2122  					"Flushing the DASD request queue failed\n");
2123  				/* stop flush processing */
2124  				goto finished;
2125  			}
2126  			break;
2127  		case DASD_CQR_QUEUED:
2128  			cqr->stopclk = get_tod_clock();
2129  			cqr->status = DASD_CQR_CLEARED;
2130  			break;
2131  		default: /* no need to modify the others */
2132  			break;
2133  		}
2134  		list_move_tail(&cqr->devlist, &flush_queue);
2135  	}
2136  finished:
2137  	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2138  	/*
2139  	 * After this point all requests must be in state CLEAR_PENDING,
2140  	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2141  	 * one of the others.
2142  	 */
2143  	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2144  		wait_event(dasd_flush_wq,
2145  			   (cqr->status != DASD_CQR_CLEAR_PENDING));
2146  	/*
2147  	 * Now set each request back to TERMINATED, DONE or NEED_ERP
2148  	 * and call the callback function of flushed requests
2149  	 */
2150  	__dasd_device_process_final_queue(device, &flush_queue);
2151  	return rc;
2152  }
2153  EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2154  
2155  /*
2156   * Acquire the device lock and process queues for the device.
2157   */
dasd_device_tasklet(unsigned long data)2158  static void dasd_device_tasklet(unsigned long data)
2159  {
2160  	struct dasd_device *device = (struct dasd_device *) data;
2161  	struct list_head final_queue;
2162  
2163  	atomic_set (&device->tasklet_scheduled, 0);
2164  	INIT_LIST_HEAD(&final_queue);
2165  	spin_lock_irq(get_ccwdev_lock(device->cdev));
2166  	/* Check expire time of first request on the ccw queue. */
2167  	__dasd_device_check_expire(device);
2168  	/* find final requests on ccw queue */
2169  	__dasd_device_process_ccw_queue(device, &final_queue);
2170  	__dasd_device_check_path_events(device);
2171  	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2172  	/* Now call the callback function of requests with final status */
2173  	__dasd_device_process_final_queue(device, &final_queue);
2174  	spin_lock_irq(get_ccwdev_lock(device->cdev));
2175  	/* Now check if the head of the ccw queue needs to be started. */
2176  	__dasd_device_start_head(device);
2177  	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2178  	if (waitqueue_active(&shutdown_waitq))
2179  		wake_up(&shutdown_waitq);
2180  	dasd_put_device(device);
2181  }
2182  
2183  /*
2184   * Schedules a call to dasd_tasklet over the device tasklet.
2185   */
dasd_schedule_device_bh(struct dasd_device * device)2186  void dasd_schedule_device_bh(struct dasd_device *device)
2187  {
2188  	/* Protect against rescheduling. */
2189  	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2190  		return;
2191  	dasd_get_device(device);
2192  	tasklet_hi_schedule(&device->tasklet);
2193  }
2194  EXPORT_SYMBOL(dasd_schedule_device_bh);
2195  
dasd_device_set_stop_bits(struct dasd_device * device,int bits)2196  void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2197  {
2198  	device->stopped |= bits;
2199  }
2200  EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2201  
dasd_device_remove_stop_bits(struct dasd_device * device,int bits)2202  void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2203  {
2204  	device->stopped &= ~bits;
2205  	if (!device->stopped)
2206  		wake_up(&generic_waitq);
2207  }
2208  EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2209  
2210  /*
2211   * Queue a request to the head of the device ccw_queue.
2212   * Start the I/O if possible.
2213   */
dasd_add_request_head(struct dasd_ccw_req * cqr)2214  void dasd_add_request_head(struct dasd_ccw_req *cqr)
2215  {
2216  	struct dasd_device *device;
2217  	unsigned long flags;
2218  
2219  	device = cqr->startdev;
2220  	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2221  	cqr->status = DASD_CQR_QUEUED;
2222  	list_add(&cqr->devlist, &device->ccw_queue);
2223  	/* let the bh start the request to keep them in order */
2224  	dasd_schedule_device_bh(device);
2225  	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2226  }
2227  EXPORT_SYMBOL(dasd_add_request_head);
2228  
2229  /*
2230   * Queue a request to the tail of the device ccw_queue.
2231   * Start the I/O if possible.
2232   */
dasd_add_request_tail(struct dasd_ccw_req * cqr)2233  void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2234  {
2235  	struct dasd_device *device;
2236  	unsigned long flags;
2237  
2238  	device = cqr->startdev;
2239  	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2240  	cqr->status = DASD_CQR_QUEUED;
2241  	list_add_tail(&cqr->devlist, &device->ccw_queue);
2242  	/* let the bh start the request to keep them in order */
2243  	dasd_schedule_device_bh(device);
2244  	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2245  }
2246  EXPORT_SYMBOL(dasd_add_request_tail);
2247  
2248  /*
2249   * Wakeup helper for the 'sleep_on' functions.
2250   */
dasd_wakeup_cb(struct dasd_ccw_req * cqr,void * data)2251  void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2252  {
2253  	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2254  	cqr->callback_data = DASD_SLEEPON_END_TAG;
2255  	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2256  	wake_up(&generic_waitq);
2257  }
2258  EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2259  
_wait_for_wakeup(struct dasd_ccw_req * cqr)2260  static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2261  {
2262  	struct dasd_device *device;
2263  	int rc;
2264  
2265  	device = cqr->startdev;
2266  	spin_lock_irq(get_ccwdev_lock(device->cdev));
2267  	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2268  	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2269  	return rc;
2270  }
2271  
2272  /*
2273   * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2274   */
__dasd_sleep_on_erp(struct dasd_ccw_req * cqr)2275  static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2276  {
2277  	struct dasd_device *device;
2278  	dasd_erp_fn_t erp_fn;
2279  
2280  	if (cqr->status == DASD_CQR_FILLED)
2281  		return 0;
2282  	device = cqr->startdev;
2283  	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2284  		if (cqr->status == DASD_CQR_TERMINATED) {
2285  			device->discipline->handle_terminated_request(cqr);
2286  			return 1;
2287  		}
2288  		if (cqr->status == DASD_CQR_NEED_ERP) {
2289  			erp_fn = device->discipline->erp_action(cqr);
2290  			erp_fn(cqr);
2291  			return 1;
2292  		}
2293  		if (cqr->status == DASD_CQR_FAILED)
2294  			dasd_log_sense(cqr, &cqr->irb);
2295  		if (cqr->refers) {
2296  			__dasd_process_erp(device, cqr);
2297  			return 1;
2298  		}
2299  	}
2300  	return 0;
2301  }
2302  
__dasd_sleep_on_loop_condition(struct dasd_ccw_req * cqr)2303  static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2304  {
2305  	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2306  		if (cqr->refers) /* erp is not done yet */
2307  			return 1;
2308  		return ((cqr->status != DASD_CQR_DONE) &&
2309  			(cqr->status != DASD_CQR_FAILED));
2310  	} else
2311  		return (cqr->status == DASD_CQR_FILLED);
2312  }
2313  
_dasd_sleep_on(struct dasd_ccw_req * maincqr,int interruptible)2314  static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2315  {
2316  	struct dasd_device *device;
2317  	int rc;
2318  	struct list_head ccw_queue;
2319  	struct dasd_ccw_req *cqr;
2320  
2321  	INIT_LIST_HEAD(&ccw_queue);
2322  	maincqr->status = DASD_CQR_FILLED;
2323  	device = maincqr->startdev;
2324  	list_add(&maincqr->blocklist, &ccw_queue);
2325  	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2326  	     cqr = list_first_entry(&ccw_queue,
2327  				    struct dasd_ccw_req, blocklist)) {
2328  
2329  		if (__dasd_sleep_on_erp(cqr))
2330  			continue;
2331  		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2332  			continue;
2333  		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2334  		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2335  			cqr->status = DASD_CQR_FAILED;
2336  			cqr->intrc = -EPERM;
2337  			continue;
2338  		}
2339  		/* Non-temporary stop condition will trigger fail fast */
2340  		if (device->stopped & ~DASD_STOPPED_PENDING &&
2341  		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2342  		    !dasd_eer_enabled(device) && device->aq_mask == 0) {
2343  			cqr->status = DASD_CQR_FAILED;
2344  			cqr->intrc = -ENOLINK;
2345  			continue;
2346  		}
2347  		/*
2348  		 * Don't try to start requests if device is in
2349  		 * offline processing, it might wait forever
2350  		 */
2351  		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2352  			cqr->status = DASD_CQR_FAILED;
2353  			cqr->intrc = -ENODEV;
2354  			continue;
2355  		}
2356  		/*
2357  		 * Don't try to start requests if device is stopped
2358  		 * except path verification requests
2359  		 */
2360  		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2361  			if (interruptible) {
2362  				rc = wait_event_interruptible(
2363  					generic_waitq, !(device->stopped));
2364  				if (rc == -ERESTARTSYS) {
2365  					cqr->status = DASD_CQR_FAILED;
2366  					maincqr->intrc = rc;
2367  					continue;
2368  				}
2369  			} else
2370  				wait_event(generic_waitq, !(device->stopped));
2371  		}
2372  		if (!cqr->callback)
2373  			cqr->callback = dasd_wakeup_cb;
2374  
2375  		cqr->callback_data = DASD_SLEEPON_START_TAG;
2376  		dasd_add_request_tail(cqr);
2377  		if (interruptible) {
2378  			rc = wait_event_interruptible(
2379  				generic_waitq, _wait_for_wakeup(cqr));
2380  			if (rc == -ERESTARTSYS) {
2381  				dasd_cancel_req(cqr);
2382  				/* wait (non-interruptible) for final status */
2383  				wait_event(generic_waitq,
2384  					   _wait_for_wakeup(cqr));
2385  				cqr->status = DASD_CQR_FAILED;
2386  				maincqr->intrc = rc;
2387  				continue;
2388  			}
2389  		} else
2390  			wait_event(generic_waitq, _wait_for_wakeup(cqr));
2391  	}
2392  
2393  	maincqr->endclk = get_tod_clock();
2394  	if ((maincqr->status != DASD_CQR_DONE) &&
2395  	    (maincqr->intrc != -ERESTARTSYS))
2396  		dasd_log_sense(maincqr, &maincqr->irb);
2397  	if (maincqr->status == DASD_CQR_DONE)
2398  		rc = 0;
2399  	else if (maincqr->intrc)
2400  		rc = maincqr->intrc;
2401  	else
2402  		rc = -EIO;
2403  	return rc;
2404  }
2405  
_wait_for_wakeup_queue(struct list_head * ccw_queue)2406  static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2407  {
2408  	struct dasd_ccw_req *cqr;
2409  
2410  	list_for_each_entry(cqr, ccw_queue, blocklist) {
2411  		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2412  			return 0;
2413  	}
2414  
2415  	return 1;
2416  }
2417  
_dasd_sleep_on_queue(struct list_head * ccw_queue,int interruptible)2418  static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2419  {
2420  	struct dasd_device *device;
2421  	struct dasd_ccw_req *cqr, *n;
2422  	u8 *sense = NULL;
2423  	int rc;
2424  
2425  retry:
2426  	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2427  		device = cqr->startdev;
2428  		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2429  			continue;
2430  
2431  		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2432  		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2433  			cqr->status = DASD_CQR_FAILED;
2434  			cqr->intrc = -EPERM;
2435  			continue;
2436  		}
2437  		/*Non-temporary stop condition will trigger fail fast*/
2438  		if (device->stopped & ~DASD_STOPPED_PENDING &&
2439  		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2440  		    !dasd_eer_enabled(device)) {
2441  			cqr->status = DASD_CQR_FAILED;
2442  			cqr->intrc = -EAGAIN;
2443  			continue;
2444  		}
2445  
2446  		/*Don't try to start requests if device is stopped*/
2447  		if (interruptible) {
2448  			rc = wait_event_interruptible(
2449  				generic_waitq, !device->stopped);
2450  			if (rc == -ERESTARTSYS) {
2451  				cqr->status = DASD_CQR_FAILED;
2452  				cqr->intrc = rc;
2453  				continue;
2454  			}
2455  		} else
2456  			wait_event(generic_waitq, !(device->stopped));
2457  
2458  		if (!cqr->callback)
2459  			cqr->callback = dasd_wakeup_cb;
2460  		cqr->callback_data = DASD_SLEEPON_START_TAG;
2461  		dasd_add_request_tail(cqr);
2462  	}
2463  
2464  	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2465  
2466  	rc = 0;
2467  	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2468  		/*
2469  		 * In some cases certain errors might be expected and
2470  		 * error recovery would be unnecessary in these cases.
2471  		 * Check if the according suppress bit is set.
2472  		 */
2473  		sense = dasd_get_sense(&cqr->irb);
2474  		if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
2475  		    !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
2476  		    test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
2477  			continue;
2478  		if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
2479  		    test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
2480  			continue;
2481  		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2482  		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2483  			continue;
2484  
2485  		/*
2486  		 * for alias devices simplify error recovery and
2487  		 * return to upper layer
2488  		 * do not skip ERP requests
2489  		 */
2490  		if (cqr->startdev != cqr->basedev && !cqr->refers &&
2491  		    (cqr->status == DASD_CQR_TERMINATED ||
2492  		     cqr->status == DASD_CQR_NEED_ERP))
2493  			return -EAGAIN;
2494  
2495  		/* normal recovery for basedev IO */
2496  		if (__dasd_sleep_on_erp(cqr))
2497  			/* handle erp first */
2498  			goto retry;
2499  	}
2500  
2501  	return 0;
2502  }
2503  
2504  /*
2505   * Queue a request to the tail of the device ccw_queue and wait for
2506   * it's completion.
2507   */
dasd_sleep_on(struct dasd_ccw_req * cqr)2508  int dasd_sleep_on(struct dasd_ccw_req *cqr)
2509  {
2510  	return _dasd_sleep_on(cqr, 0);
2511  }
2512  EXPORT_SYMBOL(dasd_sleep_on);
2513  
2514  /*
2515   * Start requests from a ccw_queue and wait for their completion.
2516   */
dasd_sleep_on_queue(struct list_head * ccw_queue)2517  int dasd_sleep_on_queue(struct list_head *ccw_queue)
2518  {
2519  	return _dasd_sleep_on_queue(ccw_queue, 0);
2520  }
2521  EXPORT_SYMBOL(dasd_sleep_on_queue);
2522  
2523  /*
2524   * Start requests from a ccw_queue and wait interruptible for their completion.
2525   */
dasd_sleep_on_queue_interruptible(struct list_head * ccw_queue)2526  int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2527  {
2528  	return _dasd_sleep_on_queue(ccw_queue, 1);
2529  }
2530  EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2531  
2532  /*
2533   * Queue a request to the tail of the device ccw_queue and wait
2534   * interruptible for it's completion.
2535   */
dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)2536  int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2537  {
2538  	return _dasd_sleep_on(cqr, 1);
2539  }
2540  EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2541  
2542  /*
2543   * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2544   * for eckd devices) the currently running request has to be terminated
2545   * and be put back to status queued, before the special request is added
2546   * to the head of the queue. Then the special request is waited on normally.
2547   */
_dasd_term_running_cqr(struct dasd_device * device)2548  static inline int _dasd_term_running_cqr(struct dasd_device *device)
2549  {
2550  	struct dasd_ccw_req *cqr;
2551  	int rc;
2552  
2553  	if (list_empty(&device->ccw_queue))
2554  		return 0;
2555  	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2556  	rc = device->discipline->term_IO(cqr);
2557  	if (!rc)
2558  		/*
2559  		 * CQR terminated because a more important request is pending.
2560  		 * Undo decreasing of retry counter because this is
2561  		 * not an error case.
2562  		 */
2563  		cqr->retries++;
2564  	return rc;
2565  }
2566  
dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)2567  int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2568  {
2569  	struct dasd_device *device;
2570  	int rc;
2571  
2572  	device = cqr->startdev;
2573  	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2574  	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2575  		cqr->status = DASD_CQR_FAILED;
2576  		cqr->intrc = -EPERM;
2577  		return -EIO;
2578  	}
2579  	spin_lock_irq(get_ccwdev_lock(device->cdev));
2580  	rc = _dasd_term_running_cqr(device);
2581  	if (rc) {
2582  		spin_unlock_irq(get_ccwdev_lock(device->cdev));
2583  		return rc;
2584  	}
2585  	cqr->callback = dasd_wakeup_cb;
2586  	cqr->callback_data = DASD_SLEEPON_START_TAG;
2587  	cqr->status = DASD_CQR_QUEUED;
2588  	/*
2589  	 * add new request as second
2590  	 * first the terminated cqr needs to be finished
2591  	 */
2592  	list_add(&cqr->devlist, device->ccw_queue.next);
2593  
2594  	/* let the bh start the request to keep them in order */
2595  	dasd_schedule_device_bh(device);
2596  
2597  	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2598  
2599  	wait_event(generic_waitq, _wait_for_wakeup(cqr));
2600  
2601  	if (cqr->status == DASD_CQR_DONE)
2602  		rc = 0;
2603  	else if (cqr->intrc)
2604  		rc = cqr->intrc;
2605  	else
2606  		rc = -EIO;
2607  
2608  	/* kick tasklets */
2609  	dasd_schedule_device_bh(device);
2610  	if (device->block)
2611  		dasd_schedule_block_bh(device->block);
2612  
2613  	return rc;
2614  }
2615  EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2616  
2617  /*
2618   * Cancels a request that was started with dasd_sleep_on_req.
2619   * This is useful to timeout requests. The request will be
2620   * terminated if it is currently in i/o.
2621   * Returns 0 if request termination was successful
2622   *	   negative error code if termination failed
2623   * Cancellation of a request is an asynchronous operation! The calling
2624   * function has to wait until the request is properly returned via callback.
2625   */
__dasd_cancel_req(struct dasd_ccw_req * cqr)2626  static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2627  {
2628  	struct dasd_device *device = cqr->startdev;
2629  	int rc = 0;
2630  
2631  	switch (cqr->status) {
2632  	case DASD_CQR_QUEUED:
2633  		/* request was not started - just set to cleared */
2634  		cqr->status = DASD_CQR_CLEARED;
2635  		break;
2636  	case DASD_CQR_IN_IO:
2637  		/* request in IO - terminate IO and release again */
2638  		rc = device->discipline->term_IO(cqr);
2639  		if (rc) {
2640  			dev_err(&device->cdev->dev,
2641  				"Cancelling request failed with rc=%d\n", rc);
2642  		} else {
2643  			cqr->stopclk = get_tod_clock();
2644  		}
2645  		break;
2646  	default: /* already finished or clear pending - do nothing */
2647  		break;
2648  	}
2649  	dasd_schedule_device_bh(device);
2650  	return rc;
2651  }
2652  
dasd_cancel_req(struct dasd_ccw_req * cqr)2653  int dasd_cancel_req(struct dasd_ccw_req *cqr)
2654  {
2655  	struct dasd_device *device = cqr->startdev;
2656  	unsigned long flags;
2657  	int rc;
2658  
2659  	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2660  	rc = __dasd_cancel_req(cqr);
2661  	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2662  	return rc;
2663  }
2664  
2665  /*
2666   * SECTION: Operations of the dasd_block layer.
2667   */
2668  
2669  /*
2670   * Timeout function for dasd_block. This is used when the block layer
2671   * is waiting for something that may not come reliably, (e.g. a state
2672   * change interrupt)
2673   */
dasd_block_timeout(struct timer_list * t)2674  static void dasd_block_timeout(struct timer_list *t)
2675  {
2676  	unsigned long flags;
2677  	struct dasd_block *block;
2678  
2679  	block = from_timer(block, t, timer);
2680  	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2681  	/* re-activate request queue */
2682  	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2683  	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2684  	dasd_schedule_block_bh(block);
2685  	blk_mq_run_hw_queues(block->gdp->queue, true);
2686  }
2687  
2688  /*
2689   * Setup timeout for a dasd_block in jiffies.
2690   */
dasd_block_set_timer(struct dasd_block * block,int expires)2691  void dasd_block_set_timer(struct dasd_block *block, int expires)
2692  {
2693  	if (expires == 0)
2694  		del_timer(&block->timer);
2695  	else
2696  		mod_timer(&block->timer, jiffies + expires);
2697  }
2698  EXPORT_SYMBOL(dasd_block_set_timer);
2699  
2700  /*
2701   * Clear timeout for a dasd_block.
2702   */
dasd_block_clear_timer(struct dasd_block * block)2703  void dasd_block_clear_timer(struct dasd_block *block)
2704  {
2705  	del_timer(&block->timer);
2706  }
2707  EXPORT_SYMBOL(dasd_block_clear_timer);
2708  
2709  /*
2710   * Process finished error recovery ccw.
2711   */
__dasd_process_erp(struct dasd_device * device,struct dasd_ccw_req * cqr)2712  static void __dasd_process_erp(struct dasd_device *device,
2713  			       struct dasd_ccw_req *cqr)
2714  {
2715  	dasd_erp_fn_t erp_fn;
2716  
2717  	if (cqr->status == DASD_CQR_DONE)
2718  		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2719  	else
2720  		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2721  	erp_fn = device->discipline->erp_postaction(cqr);
2722  	erp_fn(cqr);
2723  }
2724  
__dasd_cleanup_cqr(struct dasd_ccw_req * cqr)2725  static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2726  {
2727  	struct request *req;
2728  	blk_status_t error = BLK_STS_OK;
2729  	unsigned int proc_bytes;
2730  	int status;
2731  
2732  	req = (struct request *) cqr->callback_data;
2733  	dasd_profile_end(cqr->block, cqr, req);
2734  
2735  	proc_bytes = cqr->proc_bytes;
2736  	status = cqr->block->base->discipline->free_cp(cqr, req);
2737  	if (status < 0)
2738  		error = errno_to_blk_status(status);
2739  	else if (status == 0) {
2740  		switch (cqr->intrc) {
2741  		case -EPERM:
2742  			/*
2743  			 * DASD doesn't implement SCSI/NVMe reservations, but it
2744  			 * implements a locking scheme similar to them. We
2745  			 * return this error when we no longer have the lock.
2746  			 */
2747  			error = BLK_STS_RESV_CONFLICT;
2748  			break;
2749  		case -ENOLINK:
2750  			error = BLK_STS_TRANSPORT;
2751  			break;
2752  		case -ETIMEDOUT:
2753  			error = BLK_STS_TIMEOUT;
2754  			break;
2755  		default:
2756  			error = BLK_STS_IOERR;
2757  			break;
2758  		}
2759  	}
2760  
2761  	/*
2762  	 * We need to take care for ETIMEDOUT errors here since the
2763  	 * complete callback does not get called in this case.
2764  	 * Take care of all errors here and avoid additional code to
2765  	 * transfer the error value to the complete callback.
2766  	 */
2767  	if (error) {
2768  		blk_mq_end_request(req, error);
2769  		blk_mq_run_hw_queues(req->q, true);
2770  	} else {
2771  		/*
2772  		 * Partial completed requests can happen with ESE devices.
2773  		 * During read we might have gotten a NRF error and have to
2774  		 * complete a request partially.
2775  		 */
2776  		if (proc_bytes) {
2777  			blk_update_request(req, BLK_STS_OK, proc_bytes);
2778  			blk_mq_requeue_request(req, true);
2779  		} else if (likely(!blk_should_fake_timeout(req->q))) {
2780  			blk_mq_complete_request(req);
2781  		}
2782  	}
2783  }
2784  
2785  /*
2786   * Process ccw request queue.
2787   */
__dasd_process_block_ccw_queue(struct dasd_block * block,struct list_head * final_queue)2788  static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2789  					   struct list_head *final_queue)
2790  {
2791  	struct list_head *l, *n;
2792  	struct dasd_ccw_req *cqr;
2793  	dasd_erp_fn_t erp_fn;
2794  	unsigned long flags;
2795  	struct dasd_device *base = block->base;
2796  
2797  restart:
2798  	/* Process request with final status. */
2799  	list_for_each_safe(l, n, &block->ccw_queue) {
2800  		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2801  		if (cqr->status != DASD_CQR_DONE &&
2802  		    cqr->status != DASD_CQR_FAILED &&
2803  		    cqr->status != DASD_CQR_NEED_ERP &&
2804  		    cqr->status != DASD_CQR_TERMINATED)
2805  			continue;
2806  
2807  		if (cqr->status == DASD_CQR_TERMINATED) {
2808  			base->discipline->handle_terminated_request(cqr);
2809  			goto restart;
2810  		}
2811  
2812  		/*  Process requests that may be recovered */
2813  		if (cqr->status == DASD_CQR_NEED_ERP) {
2814  			erp_fn = base->discipline->erp_action(cqr);
2815  			if (IS_ERR(erp_fn(cqr)))
2816  				continue;
2817  			goto restart;
2818  		}
2819  
2820  		/* log sense for fatal error */
2821  		if (cqr->status == DASD_CQR_FAILED) {
2822  			dasd_log_sense(cqr, &cqr->irb);
2823  		}
2824  
2825  		/*
2826  		 * First call extended error reporting and check for autoquiesce
2827  		 */
2828  		spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2829  		if (cqr->status == DASD_CQR_FAILED &&
2830  		    dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
2831  			cqr->status = DASD_CQR_FILLED;
2832  			cqr->retries = 255;
2833  			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
2834  			goto restart;
2835  		}
2836  		spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
2837  
2838  		/* Process finished ERP request. */
2839  		if (cqr->refers) {
2840  			__dasd_process_erp(base, cqr);
2841  			goto restart;
2842  		}
2843  
2844  		/* Rechain finished requests to final queue */
2845  		cqr->endclk = get_tod_clock();
2846  		list_move_tail(&cqr->blocklist, final_queue);
2847  	}
2848  }
2849  
dasd_return_cqr_cb(struct dasd_ccw_req * cqr,void * data)2850  static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2851  {
2852  	dasd_schedule_block_bh(cqr->block);
2853  }
2854  
__dasd_block_start_head(struct dasd_block * block)2855  static void __dasd_block_start_head(struct dasd_block *block)
2856  {
2857  	struct dasd_ccw_req *cqr;
2858  
2859  	if (list_empty(&block->ccw_queue))
2860  		return;
2861  	/* We allways begin with the first requests on the queue, as some
2862  	 * of previously started requests have to be enqueued on a
2863  	 * dasd_device again for error recovery.
2864  	 */
2865  	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2866  		if (cqr->status != DASD_CQR_FILLED)
2867  			continue;
2868  		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2869  		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2870  			cqr->status = DASD_CQR_FAILED;
2871  			cqr->intrc = -EPERM;
2872  			dasd_schedule_block_bh(block);
2873  			continue;
2874  		}
2875  		/* Non-temporary stop condition will trigger fail fast */
2876  		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2877  		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2878  		    !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
2879  			cqr->status = DASD_CQR_FAILED;
2880  			cqr->intrc = -ENOLINK;
2881  			dasd_schedule_block_bh(block);
2882  			continue;
2883  		}
2884  		/* Don't try to start requests if device is stopped */
2885  		if (block->base->stopped)
2886  			return;
2887  
2888  		/* just a fail safe check, should not happen */
2889  		if (!cqr->startdev)
2890  			cqr->startdev = block->base;
2891  
2892  		/* make sure that the requests we submit find their way back */
2893  		cqr->callback = dasd_return_cqr_cb;
2894  
2895  		dasd_add_request_tail(cqr);
2896  	}
2897  }
2898  
2899  /*
2900   * Central dasd_block layer routine. Takes requests from the generic
2901   * block layer request queue, creates ccw requests, enqueues them on
2902   * a dasd_device and processes ccw requests that have been returned.
2903   */
dasd_block_tasklet(unsigned long data)2904  static void dasd_block_tasklet(unsigned long data)
2905  {
2906  	struct dasd_block *block = (struct dasd_block *) data;
2907  	struct list_head final_queue;
2908  	struct list_head *l, *n;
2909  	struct dasd_ccw_req *cqr;
2910  	struct dasd_queue *dq;
2911  
2912  	atomic_set(&block->tasklet_scheduled, 0);
2913  	INIT_LIST_HEAD(&final_queue);
2914  	spin_lock_irq(&block->queue_lock);
2915  	/* Finish off requests on ccw queue */
2916  	__dasd_process_block_ccw_queue(block, &final_queue);
2917  	spin_unlock_irq(&block->queue_lock);
2918  
2919  	/* Now call the callback function of requests with final status */
2920  	list_for_each_safe(l, n, &final_queue) {
2921  		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2922  		dq = cqr->dq;
2923  		spin_lock_irq(&dq->lock);
2924  		list_del_init(&cqr->blocklist);
2925  		__dasd_cleanup_cqr(cqr);
2926  		spin_unlock_irq(&dq->lock);
2927  	}
2928  
2929  	spin_lock_irq(&block->queue_lock);
2930  	/* Now check if the head of the ccw queue needs to be started. */
2931  	__dasd_block_start_head(block);
2932  	spin_unlock_irq(&block->queue_lock);
2933  
2934  	if (waitqueue_active(&shutdown_waitq))
2935  		wake_up(&shutdown_waitq);
2936  	dasd_put_device(block->base);
2937  }
2938  
_dasd_wake_block_flush_cb(struct dasd_ccw_req * cqr,void * data)2939  static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2940  {
2941  	wake_up(&dasd_flush_wq);
2942  }
2943  
2944  /*
2945   * Requeue a request back to the block request queue
2946   * only works for block requests
2947   */
_dasd_requeue_request(struct dasd_ccw_req * cqr)2948  static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
2949  {
2950  	struct request *req;
2951  
2952  	/*
2953  	 * If the request is an ERP request there is nothing to requeue.
2954  	 * This will be done with the remaining original request.
2955  	 */
2956  	if (cqr->refers)
2957  		return;
2958  	spin_lock_irq(&cqr->dq->lock);
2959  	req = (struct request *) cqr->callback_data;
2960  	blk_mq_requeue_request(req, true);
2961  	spin_unlock_irq(&cqr->dq->lock);
2962  
2963  	return;
2964  }
2965  
_dasd_requests_to_flushqueue(struct dasd_block * block,struct list_head * flush_queue)2966  static int _dasd_requests_to_flushqueue(struct dasd_block *block,
2967  					struct list_head *flush_queue)
2968  {
2969  	struct dasd_ccw_req *cqr, *n;
2970  	unsigned long flags;
2971  	int rc, i;
2972  
2973  	spin_lock_irqsave(&block->queue_lock, flags);
2974  	rc = 0;
2975  restart:
2976  	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2977  		/* if this request currently owned by a dasd_device cancel it */
2978  		if (cqr->status >= DASD_CQR_QUEUED)
2979  			rc = dasd_cancel_req(cqr);
2980  		if (rc < 0)
2981  			break;
2982  		/* Rechain request (including erp chain) so it won't be
2983  		 * touched by the dasd_block_tasklet anymore.
2984  		 * Replace the callback so we notice when the request
2985  		 * is returned from the dasd_device layer.
2986  		 */
2987  		cqr->callback = _dasd_wake_block_flush_cb;
2988  		for (i = 0; cqr; cqr = cqr->refers, i++)
2989  			list_move_tail(&cqr->blocklist, flush_queue);
2990  		if (i > 1)
2991  			/* moved more than one request - need to restart */
2992  			goto restart;
2993  	}
2994  	spin_unlock_irqrestore(&block->queue_lock, flags);
2995  
2996  	return rc;
2997  }
2998  
2999  /*
3000   * Go through all request on the dasd_block request queue, cancel them
3001   * on the respective dasd_device, and return them to the generic
3002   * block layer.
3003   */
dasd_flush_block_queue(struct dasd_block * block)3004  static int dasd_flush_block_queue(struct dasd_block *block)
3005  {
3006  	struct dasd_ccw_req *cqr, *n;
3007  	struct list_head flush_queue;
3008  	unsigned long flags;
3009  	int rc;
3010  
3011  	INIT_LIST_HEAD(&flush_queue);
3012  	rc = _dasd_requests_to_flushqueue(block, &flush_queue);
3013  
3014  	/* Now call the callback function of flushed requests */
3015  restart_cb:
3016  	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3017  		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3018  		/* Process finished ERP request. */
3019  		if (cqr->refers) {
3020  			spin_lock_bh(&block->queue_lock);
3021  			__dasd_process_erp(block->base, cqr);
3022  			spin_unlock_bh(&block->queue_lock);
3023  			/* restart list_for_xx loop since dasd_process_erp
3024  			 * might remove multiple elements */
3025  			goto restart_cb;
3026  		}
3027  		/* call the callback function */
3028  		spin_lock_irqsave(&cqr->dq->lock, flags);
3029  		cqr->endclk = get_tod_clock();
3030  		list_del_init(&cqr->blocklist);
3031  		__dasd_cleanup_cqr(cqr);
3032  		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3033  	}
3034  	return rc;
3035  }
3036  
3037  /*
3038   * Schedules a call to dasd_tasklet over the device tasklet.
3039   */
dasd_schedule_block_bh(struct dasd_block * block)3040  void dasd_schedule_block_bh(struct dasd_block *block)
3041  {
3042  	/* Protect against rescheduling. */
3043  	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3044  		return;
3045  	/* life cycle of block is bound to it's base device */
3046  	dasd_get_device(block->base);
3047  	tasklet_hi_schedule(&block->tasklet);
3048  }
3049  EXPORT_SYMBOL(dasd_schedule_block_bh);
3050  
3051  
3052  /*
3053   * SECTION: external block device operations
3054   * (request queue handling, open, release, etc.)
3055   */
3056  
3057  /*
3058   * Dasd request queue function. Called from ll_rw_blk.c
3059   */
do_dasd_request(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)3060  static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3061  				    const struct blk_mq_queue_data *qd)
3062  {
3063  	struct dasd_block *block = hctx->queue->queuedata;
3064  	struct dasd_queue *dq = hctx->driver_data;
3065  	struct request *req = qd->rq;
3066  	struct dasd_device *basedev;
3067  	struct dasd_ccw_req *cqr;
3068  	blk_status_t rc = BLK_STS_OK;
3069  
3070  	basedev = block->base;
3071  	spin_lock_irq(&dq->lock);
3072  	if (basedev->state < DASD_STATE_READY ||
3073  	    test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3074  		DBF_DEV_EVENT(DBF_ERR, basedev,
3075  			      "device not ready for request %p", req);
3076  		rc = BLK_STS_IOERR;
3077  		goto out;
3078  	}
3079  
3080  	/*
3081  	 * if device is stopped do not fetch new requests
3082  	 * except failfast is active which will let requests fail
3083  	 * immediately in __dasd_block_start_head()
3084  	 */
3085  	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3086  		DBF_DEV_EVENT(DBF_ERR, basedev,
3087  			      "device stopped request %p", req);
3088  		rc = BLK_STS_RESOURCE;
3089  		goto out;
3090  	}
3091  
3092  	if (basedev->features & DASD_FEATURE_READONLY &&
3093  	    rq_data_dir(req) == WRITE) {
3094  		DBF_DEV_EVENT(DBF_ERR, basedev,
3095  			      "Rejecting write request %p", req);
3096  		rc = BLK_STS_IOERR;
3097  		goto out;
3098  	}
3099  
3100  	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3101  	    (basedev->features & DASD_FEATURE_FAILFAST ||
3102  	     blk_noretry_request(req))) {
3103  		DBF_DEV_EVENT(DBF_ERR, basedev,
3104  			      "Rejecting failfast request %p", req);
3105  		rc = BLK_STS_IOERR;
3106  		goto out;
3107  	}
3108  
3109  	cqr = basedev->discipline->build_cp(basedev, block, req);
3110  	if (IS_ERR(cqr)) {
3111  		if (PTR_ERR(cqr) == -EBUSY ||
3112  		    PTR_ERR(cqr) == -ENOMEM ||
3113  		    PTR_ERR(cqr) == -EAGAIN) {
3114  			rc = BLK_STS_RESOURCE;
3115  			goto out;
3116  		}
3117  		DBF_DEV_EVENT(DBF_ERR, basedev,
3118  			      "CCW creation failed (rc=%ld) on request %p",
3119  			      PTR_ERR(cqr), req);
3120  		rc = BLK_STS_IOERR;
3121  		goto out;
3122  	}
3123  	/*
3124  	 *  Note: callback is set to dasd_return_cqr_cb in
3125  	 * __dasd_block_start_head to cover erp requests as well
3126  	 */
3127  	cqr->callback_data = req;
3128  	cqr->status = DASD_CQR_FILLED;
3129  	cqr->dq = dq;
3130  
3131  	blk_mq_start_request(req);
3132  	spin_lock(&block->queue_lock);
3133  	list_add_tail(&cqr->blocklist, &block->ccw_queue);
3134  	INIT_LIST_HEAD(&cqr->devlist);
3135  	dasd_profile_start(block, cqr, req);
3136  	dasd_schedule_block_bh(block);
3137  	spin_unlock(&block->queue_lock);
3138  
3139  out:
3140  	spin_unlock_irq(&dq->lock);
3141  	return rc;
3142  }
3143  
3144  /*
3145   * Block timeout callback, called from the block layer
3146   *
3147   * Return values:
3148   * BLK_EH_RESET_TIMER if the request should be left running
3149   * BLK_EH_DONE if the request is handled or terminated
3150   *		      by the driver.
3151   */
dasd_times_out(struct request * req)3152  enum blk_eh_timer_return dasd_times_out(struct request *req)
3153  {
3154  	struct dasd_block *block = req->q->queuedata;
3155  	struct dasd_device *device;
3156  	struct dasd_ccw_req *cqr;
3157  	unsigned long flags;
3158  	int rc = 0;
3159  
3160  	cqr = blk_mq_rq_to_pdu(req);
3161  	if (!cqr)
3162  		return BLK_EH_DONE;
3163  
3164  	spin_lock_irqsave(&cqr->dq->lock, flags);
3165  	device = cqr->startdev ? cqr->startdev : block->base;
3166  	if (!device->blk_timeout) {
3167  		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3168  		return BLK_EH_RESET_TIMER;
3169  	}
3170  	DBF_DEV_EVENT(DBF_WARNING, device,
3171  		      " dasd_times_out cqr %p status %x",
3172  		      cqr, cqr->status);
3173  
3174  	spin_lock(&block->queue_lock);
3175  	spin_lock(get_ccwdev_lock(device->cdev));
3176  	cqr->retries = -1;
3177  	cqr->intrc = -ETIMEDOUT;
3178  	if (cqr->status >= DASD_CQR_QUEUED) {
3179  		rc = __dasd_cancel_req(cqr);
3180  	} else if (cqr->status == DASD_CQR_FILLED ||
3181  		   cqr->status == DASD_CQR_NEED_ERP) {
3182  		cqr->status = DASD_CQR_TERMINATED;
3183  	} else if (cqr->status == DASD_CQR_IN_ERP) {
3184  		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3185  
3186  		list_for_each_entry_safe(searchcqr, nextcqr,
3187  					 &block->ccw_queue, blocklist) {
3188  			tmpcqr = searchcqr;
3189  			while (tmpcqr->refers)
3190  				tmpcqr = tmpcqr->refers;
3191  			if (tmpcqr != cqr)
3192  				continue;
3193  			/* searchcqr is an ERP request for cqr */
3194  			searchcqr->retries = -1;
3195  			searchcqr->intrc = -ETIMEDOUT;
3196  			if (searchcqr->status >= DASD_CQR_QUEUED) {
3197  				rc = __dasd_cancel_req(searchcqr);
3198  			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
3199  				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
3200  				searchcqr->status = DASD_CQR_TERMINATED;
3201  				rc = 0;
3202  			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
3203  				/*
3204  				 * Shouldn't happen; most recent ERP
3205  				 * request is at the front of queue
3206  				 */
3207  				continue;
3208  			}
3209  			break;
3210  		}
3211  	}
3212  	spin_unlock(get_ccwdev_lock(device->cdev));
3213  	dasd_schedule_block_bh(block);
3214  	spin_unlock(&block->queue_lock);
3215  	spin_unlock_irqrestore(&cqr->dq->lock, flags);
3216  
3217  	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3218  }
3219  
dasd_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int idx)3220  static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3221  			  unsigned int idx)
3222  {
3223  	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3224  
3225  	if (!dq)
3226  		return -ENOMEM;
3227  
3228  	spin_lock_init(&dq->lock);
3229  	hctx->driver_data = dq;
3230  
3231  	return 0;
3232  }
3233  
dasd_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int idx)3234  static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3235  {
3236  	kfree(hctx->driver_data);
3237  	hctx->driver_data = NULL;
3238  }
3239  
dasd_request_done(struct request * req)3240  static void dasd_request_done(struct request *req)
3241  {
3242  	blk_mq_end_request(req, 0);
3243  	blk_mq_run_hw_queues(req->q, true);
3244  }
3245  
3246  struct blk_mq_ops dasd_mq_ops = {
3247  	.queue_rq = do_dasd_request,
3248  	.complete = dasd_request_done,
3249  	.timeout = dasd_times_out,
3250  	.init_hctx = dasd_init_hctx,
3251  	.exit_hctx = dasd_exit_hctx,
3252  };
3253  
dasd_open(struct gendisk * disk,blk_mode_t mode)3254  static int dasd_open(struct gendisk *disk, blk_mode_t mode)
3255  {
3256  	struct dasd_device *base;
3257  	int rc;
3258  
3259  	base = dasd_device_from_gendisk(disk);
3260  	if (!base)
3261  		return -ENODEV;
3262  
3263  	atomic_inc(&base->block->open_count);
3264  	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3265  		rc = -ENODEV;
3266  		goto unlock;
3267  	}
3268  
3269  	if (!try_module_get(base->discipline->owner)) {
3270  		rc = -EINVAL;
3271  		goto unlock;
3272  	}
3273  
3274  	if (dasd_probeonly) {
3275  		dev_info(&base->cdev->dev,
3276  			 "Accessing the DASD failed because it is in "
3277  			 "probeonly mode\n");
3278  		rc = -EPERM;
3279  		goto out;
3280  	}
3281  
3282  	if (base->state <= DASD_STATE_BASIC) {
3283  		DBF_DEV_EVENT(DBF_ERR, base, " %s",
3284  			      " Cannot open unrecognized device");
3285  		rc = -ENODEV;
3286  		goto out;
3287  	}
3288  	if ((mode & BLK_OPEN_WRITE) &&
3289  	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3290  	     (base->features & DASD_FEATURE_READONLY))) {
3291  		rc = -EROFS;
3292  		goto out;
3293  	}
3294  	dasd_put_device(base);
3295  	return 0;
3296  
3297  out:
3298  	module_put(base->discipline->owner);
3299  unlock:
3300  	atomic_dec(&base->block->open_count);
3301  	dasd_put_device(base);
3302  	return rc;
3303  }
3304  
dasd_release(struct gendisk * disk)3305  static void dasd_release(struct gendisk *disk)
3306  {
3307  	struct dasd_device *base = dasd_device_from_gendisk(disk);
3308  	if (base) {
3309  		atomic_dec(&base->block->open_count);
3310  		module_put(base->discipline->owner);
3311  		dasd_put_device(base);
3312  	}
3313  }
3314  
3315  /*
3316   * Return disk geometry.
3317   */
dasd_getgeo(struct block_device * bdev,struct hd_geometry * geo)3318  static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3319  {
3320  	struct dasd_device *base;
3321  
3322  	base = dasd_device_from_gendisk(bdev->bd_disk);
3323  	if (!base)
3324  		return -ENODEV;
3325  
3326  	if (!base->discipline ||
3327  	    !base->discipline->fill_geometry) {
3328  		dasd_put_device(base);
3329  		return -EINVAL;
3330  	}
3331  	base->discipline->fill_geometry(base->block, geo);
3332  	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3333  	dasd_put_device(base);
3334  	return 0;
3335  }
3336  
3337  const struct block_device_operations
3338  dasd_device_operations = {
3339  	.owner		= THIS_MODULE,
3340  	.open		= dasd_open,
3341  	.release	= dasd_release,
3342  	.ioctl		= dasd_ioctl,
3343  	.compat_ioctl	= dasd_ioctl,
3344  	.getgeo		= dasd_getgeo,
3345  	.set_read_only	= dasd_set_read_only,
3346  };
3347  
3348  /*******************************************************************************
3349   * end of block device operations
3350   */
3351  
3352  static void
dasd_exit(void)3353  dasd_exit(void)
3354  {
3355  #ifdef CONFIG_PROC_FS
3356  	dasd_proc_exit();
3357  #endif
3358  	dasd_eer_exit();
3359  	kmem_cache_destroy(dasd_page_cache);
3360  	dasd_page_cache = NULL;
3361  	dasd_gendisk_exit();
3362  	dasd_devmap_exit();
3363  	if (dasd_debug_area != NULL) {
3364  		debug_unregister(dasd_debug_area);
3365  		dasd_debug_area = NULL;
3366  	}
3367  	dasd_statistics_removeroot();
3368  }
3369  
3370  /*
3371   * SECTION: common functions for ccw_driver use
3372   */
3373  
3374  /*
3375   * Is the device read-only?
3376   * Note that this function does not report the setting of the
3377   * readonly device attribute, but how it is configured in z/VM.
3378   */
dasd_device_is_ro(struct dasd_device * device)3379  int dasd_device_is_ro(struct dasd_device *device)
3380  {
3381  	struct ccw_dev_id dev_id;
3382  	struct diag210 diag_data;
3383  	int rc;
3384  
3385  	if (!MACHINE_IS_VM)
3386  		return 0;
3387  	ccw_device_get_id(device->cdev, &dev_id);
3388  	memset(&diag_data, 0, sizeof(diag_data));
3389  	diag_data.vrdcdvno = dev_id.devno;
3390  	diag_data.vrdclen = sizeof(diag_data);
3391  	rc = diag210(&diag_data);
3392  	if (rc == 0 || rc == 2) {
3393  		return diag_data.vrdcvfla & 0x80;
3394  	} else {
3395  		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3396  			  dev_id.devno, rc);
3397  		return 0;
3398  	}
3399  }
3400  EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3401  
dasd_generic_auto_online(void * data,async_cookie_t cookie)3402  static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3403  {
3404  	struct ccw_device *cdev = data;
3405  	int ret;
3406  
3407  	ret = ccw_device_set_online(cdev);
3408  	if (ret)
3409  		dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
3410  }
3411  
3412  /*
3413   * Initial attempt at a probe function. this can be simplified once
3414   * the other detection code is gone.
3415   */
dasd_generic_probe(struct ccw_device * cdev)3416  int dasd_generic_probe(struct ccw_device *cdev)
3417  {
3418  	cdev->handler = &dasd_int_handler;
3419  
3420  	/*
3421  	 * Automatically online either all dasd devices (dasd_autodetect)
3422  	 * or all devices specified with dasd= parameters during
3423  	 * initial probe.
3424  	 */
3425  	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3426  	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3427  		async_schedule(dasd_generic_auto_online, cdev);
3428  	return 0;
3429  }
3430  EXPORT_SYMBOL_GPL(dasd_generic_probe);
3431  
dasd_generic_free_discipline(struct dasd_device * device)3432  void dasd_generic_free_discipline(struct dasd_device *device)
3433  {
3434  	/* Forget the discipline information. */
3435  	if (device->discipline) {
3436  		if (device->discipline->uncheck_device)
3437  			device->discipline->uncheck_device(device);
3438  		module_put(device->discipline->owner);
3439  		device->discipline = NULL;
3440  	}
3441  	if (device->base_discipline) {
3442  		module_put(device->base_discipline->owner);
3443  		device->base_discipline = NULL;
3444  	}
3445  }
3446  EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3447  
3448  /*
3449   * This will one day be called from a global not_oper handler.
3450   * It is also used by driver_unregister during module unload.
3451   */
dasd_generic_remove(struct ccw_device * cdev)3452  void dasd_generic_remove(struct ccw_device *cdev)
3453  {
3454  	struct dasd_device *device;
3455  	struct dasd_block *block;
3456  
3457  	device = dasd_device_from_cdev(cdev);
3458  	if (IS_ERR(device))
3459  		return;
3460  
3461  	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3462  	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3463  		/* Already doing offline processing */
3464  		dasd_put_device(device);
3465  		return;
3466  	}
3467  	/*
3468  	 * This device is removed unconditionally. Set offline
3469  	 * flag to prevent dasd_open from opening it while it is
3470  	 * no quite down yet.
3471  	 */
3472  	dasd_set_target_state(device, DASD_STATE_NEW);
3473  	cdev->handler = NULL;
3474  	/* dasd_delete_device destroys the device reference. */
3475  	block = device->block;
3476  	dasd_delete_device(device);
3477  	/*
3478  	 * life cycle of block is bound to device, so delete it after
3479  	 * device was safely removed
3480  	 */
3481  	if (block)
3482  		dasd_free_block(block);
3483  }
3484  EXPORT_SYMBOL_GPL(dasd_generic_remove);
3485  
3486  /*
3487   * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3488   * the device is detected for the first time and is supposed to be used
3489   * or the user has started activation through sysfs.
3490   */
dasd_generic_set_online(struct ccw_device * cdev,struct dasd_discipline * base_discipline)3491  int dasd_generic_set_online(struct ccw_device *cdev,
3492  			    struct dasd_discipline *base_discipline)
3493  {
3494  	struct dasd_discipline *discipline;
3495  	struct dasd_device *device;
3496  	struct device *dev;
3497  	int rc;
3498  
3499  	dev = &cdev->dev;
3500  
3501  	/* first online clears initial online feature flag */
3502  	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3503  	device = dasd_create_device(cdev);
3504  	if (IS_ERR(device))
3505  		return PTR_ERR(device);
3506  
3507  	discipline = base_discipline;
3508  	if (device->features & DASD_FEATURE_USEDIAG) {
3509  	  	if (!dasd_diag_discipline_pointer) {
3510  			/* Try to load the required module. */
3511  			rc = request_module(DASD_DIAG_MOD);
3512  			if (rc) {
3513  				dev_warn(dev, "Setting the DASD online failed "
3514  					 "because the required module %s "
3515  					 "could not be loaded (rc=%d)\n",
3516  					 DASD_DIAG_MOD, rc);
3517  				dasd_delete_device(device);
3518  				return -ENODEV;
3519  			}
3520  		}
3521  		/* Module init could have failed, so check again here after
3522  		 * request_module(). */
3523  		if (!dasd_diag_discipline_pointer) {
3524  			dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
3525  			dasd_delete_device(device);
3526  			return -ENODEV;
3527  		}
3528  		discipline = dasd_diag_discipline_pointer;
3529  	}
3530  	if (!try_module_get(base_discipline->owner)) {
3531  		dasd_delete_device(device);
3532  		return -EINVAL;
3533  	}
3534  	device->base_discipline = base_discipline;
3535  	if (!try_module_get(discipline->owner)) {
3536  		dasd_delete_device(device);
3537  		return -EINVAL;
3538  	}
3539  	device->discipline = discipline;
3540  
3541  	/* check_device will allocate block device if necessary */
3542  	rc = discipline->check_device(device);
3543  	if (rc) {
3544  		dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
3545  			 discipline->name, rc);
3546  		dasd_delete_device(device);
3547  		return rc;
3548  	}
3549  
3550  	dasd_set_target_state(device, DASD_STATE_ONLINE);
3551  	if (device->state <= DASD_STATE_KNOWN) {
3552  		dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
3553  		rc = -ENODEV;
3554  		dasd_set_target_state(device, DASD_STATE_NEW);
3555  		if (device->block)
3556  			dasd_free_block(device->block);
3557  		dasd_delete_device(device);
3558  	} else {
3559  		dev_dbg(dev, "dasd_generic device found\n");
3560  	}
3561  
3562  	wait_event(dasd_init_waitq, _wait_for_device(device));
3563  
3564  	dasd_put_device(device);
3565  	return rc;
3566  }
3567  EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3568  
dasd_generic_set_offline(struct ccw_device * cdev)3569  int dasd_generic_set_offline(struct ccw_device *cdev)
3570  {
3571  	int max_count, open_count, rc;
3572  	struct dasd_device *device;
3573  	struct dasd_block *block;
3574  	unsigned long flags;
3575  	struct device *dev;
3576  
3577  	dev = &cdev->dev;
3578  
3579  	rc = 0;
3580  	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3581  	device = dasd_device_from_cdev_locked(cdev);
3582  	if (IS_ERR(device)) {
3583  		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3584  		return PTR_ERR(device);
3585  	}
3586  
3587  	/*
3588  	 * We must make sure that this device is currently not in use.
3589  	 * The open_count is increased for every opener, that includes
3590  	 * the blkdev_get in dasd_scan_partitions. We are only interested
3591  	 * in the other openers.
3592  	 */
3593  	if (device->block) {
3594  		max_count = device->block->bdev_file ? 0 : -1;
3595  		open_count = atomic_read(&device->block->open_count);
3596  		if (open_count > max_count) {
3597  			if (open_count > 0)
3598  				dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
3599  					 open_count);
3600  			else
3601  				dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
3602  			rc = -EBUSY;
3603  			goto out_err;
3604  		}
3605  	}
3606  
3607  	/*
3608  	 * Test if the offline processing is already running and exit if so.
3609  	 * If a safe offline is being processed this could only be a normal
3610  	 * offline that should be able to overtake the safe offline and
3611  	 * cancel any I/O we do not want to wait for any longer
3612  	 */
3613  	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3614  		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3615  			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3616  				  &device->flags);
3617  		} else {
3618  			rc = -EBUSY;
3619  			goto out_err;
3620  		}
3621  	}
3622  	set_bit(DASD_FLAG_OFFLINE, &device->flags);
3623  
3624  	/*
3625  	 * if safe_offline is called set safe_offline_running flag and
3626  	 * clear safe_offline so that a call to normal offline
3627  	 * can overrun safe_offline processing
3628  	 */
3629  	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3630  	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3631  		/* need to unlock here to wait for outstanding I/O */
3632  		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3633  		/*
3634  		 * If we want to set the device safe offline all IO operations
3635  		 * should be finished before continuing the offline process
3636  		 * so sync bdev first and then wait for our queues to become
3637  		 * empty
3638  		 */
3639  		if (device->block && device->block->bdev_file)
3640  			bdev_mark_dead(file_bdev(device->block->bdev_file), false);
3641  		dasd_schedule_device_bh(device);
3642  		rc = wait_event_interruptible(shutdown_waitq,
3643  					      _wait_for_empty_queues(device));
3644  		if (rc != 0)
3645  			goto interrupted;
3646  
3647  		/*
3648  		 * check if a normal offline process overtook the offline
3649  		 * processing in this case simply do nothing beside returning
3650  		 * that we got interrupted
3651  		 * otherwise mark safe offline as not running any longer and
3652  		 * continue with normal offline
3653  		 */
3654  		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3655  		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3656  			rc = -ERESTARTSYS;
3657  			goto out_err;
3658  		}
3659  		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3660  	}
3661  	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3662  
3663  	dasd_set_target_state(device, DASD_STATE_NEW);
3664  	/* dasd_delete_device destroys the device reference. */
3665  	block = device->block;
3666  	dasd_delete_device(device);
3667  	/*
3668  	 * life cycle of block is bound to device, so delete it after
3669  	 * device was safely removed
3670  	 */
3671  	if (block)
3672  		dasd_free_block(block);
3673  
3674  	return 0;
3675  
3676  interrupted:
3677  	/* interrupted by signal */
3678  	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3679  	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3680  	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3681  out_err:
3682  	dasd_put_device(device);
3683  	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3684  	return rc;
3685  }
3686  EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3687  
dasd_generic_last_path_gone(struct dasd_device * device)3688  int dasd_generic_last_path_gone(struct dasd_device *device)
3689  {
3690  	struct dasd_ccw_req *cqr;
3691  
3692  	dev_warn(&device->cdev->dev, "No operational channel path is left "
3693  		 "for the device\n");
3694  	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3695  	/* First call extended error reporting and check for autoquiesce. */
3696  	dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
3697  
3698  	if (device->state < DASD_STATE_BASIC)
3699  		return 0;
3700  	/* Device is active. We want to keep it. */
3701  	list_for_each_entry(cqr, &device->ccw_queue, devlist)
3702  		if ((cqr->status == DASD_CQR_IN_IO) ||
3703  		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3704  			cqr->status = DASD_CQR_QUEUED;
3705  			cqr->retries++;
3706  		}
3707  	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3708  	dasd_device_clear_timer(device);
3709  	dasd_schedule_device_bh(device);
3710  	return 1;
3711  }
3712  EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3713  
dasd_generic_path_operational(struct dasd_device * device)3714  int dasd_generic_path_operational(struct dasd_device *device)
3715  {
3716  	dev_info(&device->cdev->dev, "A channel path to the device has become "
3717  		 "operational\n");
3718  	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3719  	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3720  	dasd_schedule_device_bh(device);
3721  	if (device->block) {
3722  		dasd_schedule_block_bh(device->block);
3723  		if (device->block->gdp)
3724  			blk_mq_run_hw_queues(device->block->gdp->queue, true);
3725  	}
3726  
3727  	if (!device->stopped)
3728  		wake_up(&generic_waitq);
3729  
3730  	return 1;
3731  }
3732  EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3733  
dasd_generic_notify(struct ccw_device * cdev,int event)3734  int dasd_generic_notify(struct ccw_device *cdev, int event)
3735  {
3736  	struct dasd_device *device;
3737  	int ret;
3738  
3739  	device = dasd_device_from_cdev_locked(cdev);
3740  	if (IS_ERR(device))
3741  		return 0;
3742  	ret = 0;
3743  	switch (event) {
3744  	case CIO_GONE:
3745  	case CIO_BOXED:
3746  	case CIO_NO_PATH:
3747  		dasd_path_no_path(device);
3748  		ret = dasd_generic_last_path_gone(device);
3749  		break;
3750  	case CIO_OPER:
3751  		ret = 1;
3752  		if (dasd_path_get_opm(device))
3753  			ret = dasd_generic_path_operational(device);
3754  		break;
3755  	}
3756  	dasd_put_device(device);
3757  	return ret;
3758  }
3759  EXPORT_SYMBOL_GPL(dasd_generic_notify);
3760  
dasd_generic_path_event(struct ccw_device * cdev,int * path_event)3761  void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3762  {
3763  	struct dasd_device *device;
3764  	int chp, oldopm, hpfpm, ifccpm;
3765  
3766  	device = dasd_device_from_cdev_locked(cdev);
3767  	if (IS_ERR(device))
3768  		return;
3769  
3770  	oldopm = dasd_path_get_opm(device);
3771  	for (chp = 0; chp < 8; chp++) {
3772  		if (path_event[chp] & PE_PATH_GONE) {
3773  			dasd_path_notoper(device, chp);
3774  		}
3775  		if (path_event[chp] & PE_PATH_AVAILABLE) {
3776  			dasd_path_available(device, chp);
3777  			dasd_schedule_device_bh(device);
3778  		}
3779  		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3780  			if (!dasd_path_is_operational(device, chp) &&
3781  			    !dasd_path_need_verify(device, chp)) {
3782  				/*
3783  				 * we can not establish a pathgroup on an
3784  				 * unavailable path, so trigger a path
3785  				 * verification first
3786  				 */
3787  			dasd_path_available(device, chp);
3788  			dasd_schedule_device_bh(device);
3789  			}
3790  			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3791  				      "Pathgroup re-established\n");
3792  			if (device->discipline->kick_validate)
3793  				device->discipline->kick_validate(device);
3794  		}
3795  		if (path_event[chp] & PE_PATH_FCES_EVENT) {
3796  			dasd_path_fcsec_update(device, chp);
3797  			dasd_schedule_device_bh(device);
3798  		}
3799  	}
3800  	hpfpm = dasd_path_get_hpfpm(device);
3801  	ifccpm = dasd_path_get_ifccpm(device);
3802  	if (!dasd_path_get_opm(device) && hpfpm) {
3803  		/*
3804  		 * device has no operational paths but at least one path is
3805  		 * disabled due to HPF errors
3806  		 * disable HPF at all and use the path(s) again
3807  		 */
3808  		if (device->discipline->disable_hpf)
3809  			device->discipline->disable_hpf(device);
3810  		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3811  		dasd_path_set_tbvpm(device, hpfpm);
3812  		dasd_schedule_device_bh(device);
3813  		dasd_schedule_requeue(device);
3814  	} else if (!dasd_path_get_opm(device) && ifccpm) {
3815  		/*
3816  		 * device has no operational paths but at least one path is
3817  		 * disabled due to IFCC errors
3818  		 * trigger path verification on paths with IFCC errors
3819  		 */
3820  		dasd_path_set_tbvpm(device, ifccpm);
3821  		dasd_schedule_device_bh(device);
3822  	}
3823  	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3824  		dev_warn(&device->cdev->dev,
3825  			 "No verified channel paths remain for the device\n");
3826  		DBF_DEV_EVENT(DBF_WARNING, device,
3827  			      "%s", "last verified path gone");
3828  		/* First call extended error reporting and check for autoquiesce. */
3829  		dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
3830  		dasd_device_set_stop_bits(device,
3831  					  DASD_STOPPED_DC_WAIT);
3832  	}
3833  	dasd_put_device(device);
3834  }
3835  EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3836  
dasd_generic_verify_path(struct dasd_device * device,__u8 lpm)3837  int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3838  {
3839  	if (!dasd_path_get_opm(device) && lpm) {
3840  		dasd_path_set_opm(device, lpm);
3841  		dasd_generic_path_operational(device);
3842  	} else
3843  		dasd_path_add_opm(device, lpm);
3844  	return 0;
3845  }
3846  EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3847  
dasd_generic_space_exhaust(struct dasd_device * device,struct dasd_ccw_req * cqr)3848  void dasd_generic_space_exhaust(struct dasd_device *device,
3849  				struct dasd_ccw_req *cqr)
3850  {
3851  	/* First call extended error reporting and check for autoquiesce. */
3852  	dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
3853  
3854  	if (device->state < DASD_STATE_BASIC)
3855  		return;
3856  
3857  	if (cqr->status == DASD_CQR_IN_IO ||
3858  	    cqr->status == DASD_CQR_CLEAR_PENDING) {
3859  		cqr->status = DASD_CQR_QUEUED;
3860  		cqr->retries++;
3861  	}
3862  	dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3863  	dasd_device_clear_timer(device);
3864  	dasd_schedule_device_bh(device);
3865  }
3866  EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3867  
dasd_generic_space_avail(struct dasd_device * device)3868  void dasd_generic_space_avail(struct dasd_device *device)
3869  {
3870  	dev_info(&device->cdev->dev, "Extent pool space is available\n");
3871  	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3872  
3873  	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3874  	dasd_schedule_device_bh(device);
3875  
3876  	if (device->block) {
3877  		dasd_schedule_block_bh(device->block);
3878  		if (device->block->gdp)
3879  			blk_mq_run_hw_queues(device->block->gdp->queue, true);
3880  	}
3881  	if (!device->stopped)
3882  		wake_up(&generic_waitq);
3883  }
3884  EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3885  
3886  /*
3887   * clear active requests and requeue them to block layer if possible
3888   */
dasd_generic_requeue_all_requests(struct dasd_device * device)3889  int dasd_generic_requeue_all_requests(struct dasd_device *device)
3890  {
3891  	struct dasd_block *block = device->block;
3892  	struct list_head requeue_queue;
3893  	struct dasd_ccw_req *cqr, *n;
3894  	int rc;
3895  
3896  	if (!block)
3897  		return 0;
3898  
3899  	INIT_LIST_HEAD(&requeue_queue);
3900  	rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
3901  
3902  	/* Now call the callback function of flushed requests */
3903  restart_cb:
3904  	list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
3905  		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3906  		/* Process finished ERP request. */
3907  		if (cqr->refers) {
3908  			spin_lock_bh(&block->queue_lock);
3909  			__dasd_process_erp(block->base, cqr);
3910  			spin_unlock_bh(&block->queue_lock);
3911  			/* restart list_for_xx loop since dasd_process_erp
3912  			 * might remove multiple elements
3913  			 */
3914  			goto restart_cb;
3915  		}
3916  		_dasd_requeue_request(cqr);
3917  		list_del_init(&cqr->blocklist);
3918  		cqr->block->base->discipline->free_cp(
3919  			cqr, (struct request *) cqr->callback_data);
3920  	}
3921  	dasd_schedule_device_bh(device);
3922  	return rc;
3923  }
3924  EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
3925  
do_requeue_requests(struct work_struct * work)3926  static void do_requeue_requests(struct work_struct *work)
3927  {
3928  	struct dasd_device *device = container_of(work, struct dasd_device,
3929  						  requeue_requests);
3930  	dasd_generic_requeue_all_requests(device);
3931  	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3932  	if (device->block)
3933  		dasd_schedule_block_bh(device->block);
3934  	dasd_put_device(device);
3935  }
3936  
dasd_schedule_requeue(struct dasd_device * device)3937  void dasd_schedule_requeue(struct dasd_device *device)
3938  {
3939  	dasd_get_device(device);
3940  	/* queue call to dasd_reload_device to the kernel event daemon. */
3941  	if (!schedule_work(&device->requeue_requests))
3942  		dasd_put_device(device);
3943  }
3944  EXPORT_SYMBOL(dasd_schedule_requeue);
3945  
dasd_handle_autoquiesce(struct dasd_device * device,struct dasd_ccw_req * cqr,unsigned int reason)3946  static int dasd_handle_autoquiesce(struct dasd_device *device,
3947  				   struct dasd_ccw_req *cqr,
3948  				   unsigned int reason)
3949  {
3950  	/* in any case write eer message with reason */
3951  	if (dasd_eer_enabled(device))
3952  		dasd_eer_write(device, cqr, reason);
3953  
3954  	if (!test_bit(reason, &device->aq_mask))
3955  		return 0;
3956  
3957  	/* notify eer about autoquiesce */
3958  	if (dasd_eer_enabled(device))
3959  		dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
3960  
3961  	dev_info(&device->cdev->dev,
3962  		 "The DASD has been put in the quiesce state\n");
3963  	dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
3964  
3965  	if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
3966  		dasd_schedule_requeue(device);
3967  
3968  	return 1;
3969  }
3970  
dasd_generic_build_rdc(struct dasd_device * device,int rdc_buffer_size,int magic)3971  static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3972  						   int rdc_buffer_size,
3973  						   int magic)
3974  {
3975  	struct dasd_ccw_req *cqr;
3976  	struct ccw1 *ccw;
3977  
3978  	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
3979  				   NULL);
3980  
3981  	if (IS_ERR(cqr)) {
3982  		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3983  				"Could not allocate RDC request");
3984  		return cqr;
3985  	}
3986  
3987  	ccw = cqr->cpaddr;
3988  	ccw->cmd_code = CCW_CMD_RDC;
3989  	ccw->cda = virt_to_dma32(cqr->data);
3990  	ccw->flags = 0;
3991  	ccw->count = rdc_buffer_size;
3992  	cqr->startdev = device;
3993  	cqr->memdev = device;
3994  	cqr->expires = 10*HZ;
3995  	cqr->retries = 256;
3996  	cqr->buildclk = get_tod_clock();
3997  	cqr->status = DASD_CQR_FILLED;
3998  	return cqr;
3999  }
4000  
4001  
dasd_generic_read_dev_chars(struct dasd_device * device,int magic,void * rdc_buffer,int rdc_buffer_size)4002  int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4003  				void *rdc_buffer, int rdc_buffer_size)
4004  {
4005  	int ret;
4006  	struct dasd_ccw_req *cqr;
4007  
4008  	cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4009  	if (IS_ERR(cqr))
4010  		return PTR_ERR(cqr);
4011  
4012  	ret = dasd_sleep_on(cqr);
4013  	if (ret == 0)
4014  		memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4015  	dasd_sfree_request(cqr, cqr->memdev);
4016  	return ret;
4017  }
4018  EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4019  
4020  /*
4021   *   In command mode and transport mode we need to look for sense
4022   *   data in different places. The sense data itself is allways
4023   *   an array of 32 bytes, so we can unify the sense data access
4024   *   for both modes.
4025   */
dasd_get_sense(struct irb * irb)4026  char *dasd_get_sense(struct irb *irb)
4027  {
4028  	struct tsb *tsb = NULL;
4029  	char *sense = NULL;
4030  
4031  	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4032  		if (irb->scsw.tm.tcw)
4033  			tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
4034  		if (tsb && tsb->length == 64 && tsb->flags)
4035  			switch (tsb->flags & 0x07) {
4036  			case 1:	/* tsa_iostat */
4037  				sense = tsb->tsa.iostat.sense;
4038  				break;
4039  			case 2: /* tsa_ddpc */
4040  				sense = tsb->tsa.ddpc.sense;
4041  				break;
4042  			default:
4043  				/* currently we don't use interrogate data */
4044  				break;
4045  			}
4046  	} else if (irb->esw.esw0.erw.cons) {
4047  		sense = irb->ecw;
4048  	}
4049  	return sense;
4050  }
4051  EXPORT_SYMBOL_GPL(dasd_get_sense);
4052  
dasd_generic_shutdown(struct ccw_device * cdev)4053  void dasd_generic_shutdown(struct ccw_device *cdev)
4054  {
4055  	struct dasd_device *device;
4056  
4057  	device = dasd_device_from_cdev(cdev);
4058  	if (IS_ERR(device))
4059  		return;
4060  
4061  	if (device->block)
4062  		dasd_schedule_block_bh(device->block);
4063  
4064  	dasd_schedule_device_bh(device);
4065  
4066  	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4067  }
4068  EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4069  
dasd_init(void)4070  static int __init dasd_init(void)
4071  {
4072  	int rc;
4073  
4074  	init_waitqueue_head(&dasd_init_waitq);
4075  	init_waitqueue_head(&dasd_flush_wq);
4076  	init_waitqueue_head(&generic_waitq);
4077  	init_waitqueue_head(&shutdown_waitq);
4078  
4079  	/* register 'common' DASD debug area, used for all DBF_XXX calls */
4080  	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4081  	if (dasd_debug_area == NULL) {
4082  		rc = -ENOMEM;
4083  		goto failed;
4084  	}
4085  	debug_register_view(dasd_debug_area, &debug_sprintf_view);
4086  	debug_set_level(dasd_debug_area, DBF_WARNING);
4087  
4088  	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4089  
4090  	dasd_diag_discipline_pointer = NULL;
4091  
4092  	dasd_statistics_createroot();
4093  
4094  	rc = dasd_devmap_init();
4095  	if (rc)
4096  		goto failed;
4097  	rc = dasd_gendisk_init();
4098  	if (rc)
4099  		goto failed;
4100  	rc = dasd_parse();
4101  	if (rc)
4102  		goto failed;
4103  	rc = dasd_eer_init();
4104  	if (rc)
4105  		goto failed;
4106  #ifdef CONFIG_PROC_FS
4107  	rc = dasd_proc_init();
4108  	if (rc)
4109  		goto failed;
4110  #endif
4111  
4112  	return 0;
4113  failed:
4114  	pr_info("The DASD device driver could not be initialized\n");
4115  	dasd_exit();
4116  	return rc;
4117  }
4118  
4119  module_init(dasd_init);
4120  module_exit(dasd_exit);
4121