1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * finite state machine for device handling
4   *
5   *    Copyright IBM Corp. 2002, 2008
6   *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
7   *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
8   */
9  
10  #include <linux/module.h>
11  #include <linux/init.h>
12  #include <linux/io.h>
13  #include <linux/jiffies.h>
14  #include <linux/string.h>
15  
16  #include <asm/ccwdev.h>
17  #include <asm/cio.h>
18  #include <asm/chpid.h>
19  
20  #include "cio.h"
21  #include "cio_debug.h"
22  #include "css.h"
23  #include "device.h"
24  #include "chsc.h"
25  #include "ioasm.h"
26  #include "chp.h"
27  
28  static int timeout_log_enabled;
29  
ccw_timeout_log_setup(char * unused)30  static int __init ccw_timeout_log_setup(char *unused)
31  {
32  	timeout_log_enabled = 1;
33  	return 1;
34  }
35  
36  __setup("ccw_timeout_log", ccw_timeout_log_setup);
37  
ccw_timeout_log(struct ccw_device * cdev)38  static void ccw_timeout_log(struct ccw_device *cdev)
39  {
40  	struct schib schib;
41  	struct subchannel *sch;
42  	struct io_subchannel_private *private;
43  	union orb *orb;
44  	int cc;
45  
46  	sch = to_subchannel(cdev->dev.parent);
47  	private = to_io_private(sch);
48  	orb = &private->orb;
49  	cc = stsch(sch->schid, &schib);
50  
51  	printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, "
52  	       "device information:\n", get_tod_clock());
53  	printk(KERN_WARNING "cio: orb:\n");
54  	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
55  		       orb, sizeof(*orb), 0);
56  	printk(KERN_WARNING "cio: ccw device bus id: %s\n",
57  	       dev_name(&cdev->dev));
58  	printk(KERN_WARNING "cio: subchannel bus id: %s\n",
59  	       dev_name(&sch->dev));
60  	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
61  	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
62  
63  	if (orb->tm.b) {
64  		printk(KERN_WARNING "cio: orb indicates transport mode\n");
65  		printk(KERN_WARNING "cio: last tcw:\n");
66  		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
67  			       dma32_to_virt(orb->tm.tcw),
68  			       sizeof(struct tcw), 0);
69  	} else {
70  		printk(KERN_WARNING "cio: orb indicates command mode\n");
71  		if (dma32_to_virt(orb->cmd.cpa) ==
72  		    &private->dma_area->sense_ccw ||
73  		    dma32_to_virt(orb->cmd.cpa) ==
74  		    cdev->private->dma_area->iccws)
75  			printk(KERN_WARNING "cio: last channel program "
76  			       "(intern):\n");
77  		else
78  			printk(KERN_WARNING "cio: last channel program:\n");
79  
80  		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
81  			       dma32_to_virt(orb->cmd.cpa),
82  			       sizeof(struct ccw1), 0);
83  	}
84  	printk(KERN_WARNING "cio: ccw device state: %d\n",
85  	       cdev->private->state);
86  	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
87  	printk(KERN_WARNING "cio: schib:\n");
88  	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
89  		       &schib, sizeof(schib), 0);
90  	printk(KERN_WARNING "cio: ccw device flags:\n");
91  	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
92  		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
93  }
94  
95  /*
96   * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
97   */
98  void
ccw_device_timeout(struct timer_list * t)99  ccw_device_timeout(struct timer_list *t)
100  {
101  	struct ccw_device_private *priv = from_timer(priv, t, timer);
102  	struct ccw_device *cdev = priv->cdev;
103  
104  	spin_lock_irq(cdev->ccwlock);
105  	if (timeout_log_enabled)
106  		ccw_timeout_log(cdev);
107  	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
108  	spin_unlock_irq(cdev->ccwlock);
109  }
110  
111  /*
112   * Set timeout
113   */
114  void
ccw_device_set_timeout(struct ccw_device * cdev,int expires)115  ccw_device_set_timeout(struct ccw_device *cdev, int expires)
116  {
117  	if (expires == 0)
118  		del_timer(&cdev->private->timer);
119  	else
120  		mod_timer(&cdev->private->timer, jiffies + expires);
121  }
122  
123  int
ccw_device_cancel_halt_clear(struct ccw_device * cdev)124  ccw_device_cancel_halt_clear(struct ccw_device *cdev)
125  {
126  	struct subchannel *sch;
127  	int ret;
128  
129  	sch = to_subchannel(cdev->dev.parent);
130  	ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
131  
132  	if (ret == -EIO)
133  		CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
134  			      cdev->private->dev_id.ssid,
135  			      cdev->private->dev_id.devno);
136  
137  	return ret;
138  }
139  
ccw_device_update_sense_data(struct ccw_device * cdev)140  void ccw_device_update_sense_data(struct ccw_device *cdev)
141  {
142  	memset(&cdev->id, 0, sizeof(cdev->id));
143  	cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
144  	cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
145  	cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
146  	cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
147  }
148  
ccw_device_test_sense_data(struct ccw_device * cdev)149  int ccw_device_test_sense_data(struct ccw_device *cdev)
150  {
151  	return cdev->id.cu_type ==
152  		cdev->private->dma_area->senseid.cu_type &&
153  		cdev->id.cu_model ==
154  		cdev->private->dma_area->senseid.cu_model &&
155  		cdev->id.dev_type ==
156  		cdev->private->dma_area->senseid.dev_type &&
157  		cdev->id.dev_model ==
158  		cdev->private->dma_area->senseid.dev_model;
159  }
160  
161  /*
162   * The machine won't give us any notification by machine check if a chpid has
163   * been varied online on the SE so we have to find out by magic (i. e. driving
164   * the channel subsystem to device selection and updating our path masks).
165   */
166  static void
__recover_lost_chpids(struct subchannel * sch,int old_lpm)167  __recover_lost_chpids(struct subchannel *sch, int old_lpm)
168  {
169  	int mask, i;
170  	struct chp_id chpid;
171  
172  	chp_id_init(&chpid);
173  	for (i = 0; i<8; i++) {
174  		mask = 0x80 >> i;
175  		if (!(sch->lpm & mask))
176  			continue;
177  		if (old_lpm & mask)
178  			continue;
179  		chpid.id = sch->schib.pmcw.chpid[i];
180  		if (!chp_is_registered(chpid))
181  			css_schedule_eval_all();
182  	}
183  }
184  
185  /*
186   * Stop device recognition.
187   */
188  static void
ccw_device_recog_done(struct ccw_device * cdev,int state)189  ccw_device_recog_done(struct ccw_device *cdev, int state)
190  {
191  	struct subchannel *sch;
192  	int old_lpm;
193  
194  	sch = to_subchannel(cdev->dev.parent);
195  
196  	if (cio_disable_subchannel(sch))
197  		state = DEV_STATE_NOT_OPER;
198  	/*
199  	 * Now that we tried recognition, we have performed device selection
200  	 * through ssch() and the path information is up to date.
201  	 */
202  	old_lpm = sch->lpm;
203  
204  	/* Check since device may again have become not operational. */
205  	if (cio_update_schib(sch))
206  		state = DEV_STATE_NOT_OPER;
207  	else
208  		sch->lpm = sch->schib.pmcw.pam & sch->opm;
209  
210  	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
211  		/* Force reprobe on all chpids. */
212  		old_lpm = 0;
213  	if (sch->lpm != old_lpm)
214  		__recover_lost_chpids(sch, old_lpm);
215  	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
216  	    (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
217  		cdev->private->flags.recog_done = 1;
218  		cdev->private->state = DEV_STATE_DISCONNECTED;
219  		wake_up(&cdev->private->wait_q);
220  		return;
221  	}
222  	switch (state) {
223  	case DEV_STATE_NOT_OPER:
224  		break;
225  	case DEV_STATE_OFFLINE:
226  		if (!cdev->online) {
227  			ccw_device_update_sense_data(cdev);
228  			break;
229  		}
230  		cdev->private->state = DEV_STATE_OFFLINE;
231  		cdev->private->flags.recog_done = 1;
232  		if (ccw_device_test_sense_data(cdev)) {
233  			cdev->private->flags.donotify = 1;
234  			ccw_device_online(cdev);
235  			wake_up(&cdev->private->wait_q);
236  		} else {
237  			ccw_device_update_sense_data(cdev);
238  			ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
239  		}
240  		return;
241  	case DEV_STATE_BOXED:
242  		if (cdev->id.cu_type != 0) { /* device was recognized before */
243  			cdev->private->flags.recog_done = 1;
244  			cdev->private->state = DEV_STATE_BOXED;
245  			wake_up(&cdev->private->wait_q);
246  			return;
247  		}
248  		break;
249  	}
250  	cdev->private->state = state;
251  	io_subchannel_recog_done(cdev);
252  	wake_up(&cdev->private->wait_q);
253  }
254  
255  /*
256   * Function called from device_id.c after sense id has completed.
257   */
258  void
ccw_device_sense_id_done(struct ccw_device * cdev,int err)259  ccw_device_sense_id_done(struct ccw_device *cdev, int err)
260  {
261  	switch (err) {
262  	case 0:
263  		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
264  		break;
265  	case -ETIME:		/* Sense id stopped by timeout. */
266  		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
267  		break;
268  	default:
269  		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
270  		break;
271  	}
272  }
273  
274  /**
275    * ccw_device_notify() - inform the device's driver about an event
276    * @cdev: device for which an event occurred
277    * @event: event that occurred
278    *
279    * Returns:
280    *   -%EINVAL if the device is offline or has no driver.
281    *   -%EOPNOTSUPP if the device's driver has no notifier registered.
282    *   %NOTIFY_OK if the driver wants to keep the device.
283    *   %NOTIFY_BAD if the driver doesn't want to keep the device.
284    */
ccw_device_notify(struct ccw_device * cdev,int event)285  int ccw_device_notify(struct ccw_device *cdev, int event)
286  {
287  	int ret = -EINVAL;
288  
289  	if (!cdev->drv)
290  		goto out;
291  	if (!cdev->online)
292  		goto out;
293  	CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
294  		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
295  		      event);
296  	if (!cdev->drv->notify) {
297  		ret = -EOPNOTSUPP;
298  		goto out;
299  	}
300  	if (cdev->drv->notify(cdev, event))
301  		ret = NOTIFY_OK;
302  	else
303  		ret = NOTIFY_BAD;
304  out:
305  	return ret;
306  }
307  
ccw_device_oper_notify(struct ccw_device * cdev)308  static void ccw_device_oper_notify(struct ccw_device *cdev)
309  {
310  	struct subchannel *sch = to_subchannel(cdev->dev.parent);
311  
312  	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
313  		/* Re-enable channel measurements, if needed. */
314  		ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
315  		/* Save indication for new paths. */
316  		cdev->private->path_new_mask = sch->vpm;
317  		return;
318  	}
319  	/* Driver doesn't want device back. */
320  	ccw_device_set_notoper(cdev);
321  	ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
322  }
323  
324  /*
325   * Finished with online/offline processing.
326   */
327  static void
ccw_device_done(struct ccw_device * cdev,int state)328  ccw_device_done(struct ccw_device *cdev, int state)
329  {
330  	struct subchannel *sch;
331  
332  	sch = to_subchannel(cdev->dev.parent);
333  
334  	ccw_device_set_timeout(cdev, 0);
335  
336  	if (state != DEV_STATE_ONLINE)
337  		cio_disable_subchannel(sch);
338  
339  	/* Reset device status. */
340  	memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
341  
342  	cdev->private->state = state;
343  
344  	switch (state) {
345  	case DEV_STATE_BOXED:
346  		CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
347  			      cdev->private->dev_id.devno, sch->schid.sch_no);
348  		if (cdev->online &&
349  		    ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
350  			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
351  		cdev->private->flags.donotify = 0;
352  		break;
353  	case DEV_STATE_NOT_OPER:
354  		CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
355  			      cdev->private->dev_id.devno, sch->schid.sch_no);
356  		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
357  			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
358  		else
359  			ccw_device_set_disconnected(cdev);
360  		cdev->private->flags.donotify = 0;
361  		break;
362  	case DEV_STATE_DISCONNECTED:
363  		CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
364  			      "%04x\n", cdev->private->dev_id.devno,
365  			      sch->schid.sch_no);
366  		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
367  			cdev->private->state = DEV_STATE_NOT_OPER;
368  			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
369  		} else
370  			ccw_device_set_disconnected(cdev);
371  		cdev->private->flags.donotify = 0;
372  		break;
373  	default:
374  		break;
375  	}
376  
377  	if (cdev->private->flags.donotify) {
378  		cdev->private->flags.donotify = 0;
379  		ccw_device_oper_notify(cdev);
380  	}
381  	wake_up(&cdev->private->wait_q);
382  }
383  
384  /*
385   * Start device recognition.
386   */
ccw_device_recognition(struct ccw_device * cdev)387  void ccw_device_recognition(struct ccw_device *cdev)
388  {
389  	struct subchannel *sch = to_subchannel(cdev->dev.parent);
390  
391  	/*
392  	 * We used to start here with a sense pgid to find out whether a device
393  	 * is locked by someone else. Unfortunately, the sense pgid command
394  	 * code has other meanings on devices predating the path grouping
395  	 * algorithm, so we start with sense id and box the device after an
396  	 * timeout (or if sense pgid during path verification detects the device
397  	 * is locked, as may happen on newer devices).
398  	 */
399  	cdev->private->flags.recog_done = 0;
400  	cdev->private->state = DEV_STATE_SENSE_ID;
401  	if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) {
402  		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
403  		return;
404  	}
405  	ccw_device_sense_id_start(cdev);
406  }
407  
408  /*
409   * Handle events for states that use the ccw request infrastructure.
410   */
ccw_device_request_event(struct ccw_device * cdev,enum dev_event e)411  static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
412  {
413  	switch (e) {
414  	case DEV_EVENT_NOTOPER:
415  		ccw_request_notoper(cdev);
416  		break;
417  	case DEV_EVENT_INTERRUPT:
418  		ccw_request_handler(cdev);
419  		break;
420  	case DEV_EVENT_TIMEOUT:
421  		ccw_request_timeout(cdev);
422  		break;
423  	default:
424  		break;
425  	}
426  }
427  
ccw_device_report_path_events(struct ccw_device * cdev)428  static void ccw_device_report_path_events(struct ccw_device *cdev)
429  {
430  	struct subchannel *sch = to_subchannel(cdev->dev.parent);
431  	int path_event[8];
432  	int chp, mask;
433  
434  	for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
435  		path_event[chp] = PE_NONE;
436  		if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
437  			path_event[chp] |= PE_PATH_GONE;
438  		if (mask & cdev->private->path_new_mask & sch->vpm)
439  			path_event[chp] |= PE_PATH_AVAILABLE;
440  		if (mask & cdev->private->pgid_reset_mask & sch->vpm)
441  			path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
442  	}
443  	if (cdev->online && cdev->drv->path_event)
444  		cdev->drv->path_event(cdev, path_event);
445  }
446  
ccw_device_reset_path_events(struct ccw_device * cdev)447  static void ccw_device_reset_path_events(struct ccw_device *cdev)
448  {
449  	cdev->private->path_gone_mask = 0;
450  	cdev->private->path_new_mask = 0;
451  	cdev->private->pgid_reset_mask = 0;
452  }
453  
create_fake_irb(struct irb * irb,int type)454  static void create_fake_irb(struct irb *irb, int type)
455  {
456  	memset(irb, 0, sizeof(*irb));
457  	if (type == FAKE_CMD_IRB) {
458  		struct cmd_scsw *scsw = &irb->scsw.cmd;
459  		scsw->cc = 1;
460  		scsw->fctl = SCSW_FCTL_START_FUNC;
461  		scsw->actl = SCSW_ACTL_START_PEND;
462  		scsw->stctl = SCSW_STCTL_STATUS_PEND;
463  	} else if (type == FAKE_TM_IRB) {
464  		struct tm_scsw *scsw = &irb->scsw.tm;
465  		scsw->x = 1;
466  		scsw->cc = 1;
467  		scsw->fctl = SCSW_FCTL_START_FUNC;
468  		scsw->actl = SCSW_ACTL_START_PEND;
469  		scsw->stctl = SCSW_STCTL_STATUS_PEND;
470  	}
471  }
472  
ccw_device_handle_broken_paths(struct ccw_device * cdev)473  static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
474  {
475  	struct subchannel *sch = to_subchannel(cdev->dev.parent);
476  	u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
477  
478  	if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
479  		ccw_device_schedule_recovery();
480  
481  	cdev->private->path_broken_mask = broken_paths;
482  }
483  
ccw_device_verify_done(struct ccw_device * cdev,int err)484  void ccw_device_verify_done(struct ccw_device *cdev, int err)
485  {
486  	struct subchannel *sch;
487  
488  	sch = to_subchannel(cdev->dev.parent);
489  	/* Update schib - pom may have changed. */
490  	if (cio_update_schib(sch)) {
491  		err = -ENODEV;
492  		goto callback;
493  	}
494  	/* Update lpm with verified path mask. */
495  	sch->lpm = sch->vpm;
496  	/* Repeat path verification? */
497  	if (cdev->private->flags.doverify) {
498  		ccw_device_verify_start(cdev);
499  		return;
500  	}
501  callback:
502  	switch (err) {
503  	case 0:
504  		ccw_device_done(cdev, DEV_STATE_ONLINE);
505  		/* Deliver fake irb to device driver, if needed. */
506  		if (cdev->private->flags.fake_irb) {
507  			CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
508  				      cdev->private->dev_id.ssid,
509  				      cdev->private->dev_id.devno,
510  				      cdev->private->intparm,
511  				      cdev->private->flags.fake_irb);
512  			create_fake_irb(&cdev->private->dma_area->irb,
513  					cdev->private->flags.fake_irb);
514  			cdev->private->flags.fake_irb = 0;
515  			if (cdev->handler)
516  				cdev->handler(cdev, cdev->private->intparm,
517  					      &cdev->private->dma_area->irb);
518  			memset(&cdev->private->dma_area->irb, 0,
519  			       sizeof(struct irb));
520  		}
521  		ccw_device_report_path_events(cdev);
522  		ccw_device_handle_broken_paths(cdev);
523  		break;
524  	case -ETIME:
525  	case -EUSERS:
526  		/* Reset oper notify indication after verify error. */
527  		cdev->private->flags.donotify = 0;
528  		ccw_device_done(cdev, DEV_STATE_BOXED);
529  		break;
530  	case -EACCES:
531  		/* Reset oper notify indication after verify error. */
532  		cdev->private->flags.donotify = 0;
533  		ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
534  		break;
535  	default:
536  		/* Reset oper notify indication after verify error. */
537  		cdev->private->flags.donotify = 0;
538  		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
539  		break;
540  	}
541  	ccw_device_reset_path_events(cdev);
542  }
543  
544  /*
545   * Get device online.
546   */
547  int
ccw_device_online(struct ccw_device * cdev)548  ccw_device_online(struct ccw_device *cdev)
549  {
550  	struct subchannel *sch;
551  	int ret;
552  
553  	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
554  	    (cdev->private->state != DEV_STATE_BOXED))
555  		return -EINVAL;
556  	sch = to_subchannel(cdev->dev.parent);
557  	ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
558  	if (ret != 0) {
559  		/* Couldn't enable the subchannel for i/o. Sick device. */
560  		if (ret == -ENODEV)
561  			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
562  		return ret;
563  	}
564  	/* Start initial path verification. */
565  	cdev->private->state = DEV_STATE_VERIFY;
566  	ccw_device_verify_start(cdev);
567  	return 0;
568  }
569  
570  void
ccw_device_disband_done(struct ccw_device * cdev,int err)571  ccw_device_disband_done(struct ccw_device *cdev, int err)
572  {
573  	switch (err) {
574  	case 0:
575  		ccw_device_done(cdev, DEV_STATE_OFFLINE);
576  		break;
577  	case -ETIME:
578  		ccw_device_done(cdev, DEV_STATE_BOXED);
579  		break;
580  	default:
581  		cdev->private->flags.donotify = 0;
582  		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
583  		break;
584  	}
585  }
586  
587  /*
588   * Shutdown device.
589   */
590  int
ccw_device_offline(struct ccw_device * cdev)591  ccw_device_offline(struct ccw_device *cdev)
592  {
593  	struct subchannel *sch;
594  
595  	/* Allow ccw_device_offline while disconnected. */
596  	if (cdev->private->state == DEV_STATE_DISCONNECTED ||
597  	    cdev->private->state == DEV_STATE_NOT_OPER) {
598  		cdev->private->flags.donotify = 0;
599  		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
600  		return 0;
601  	}
602  	if (cdev->private->state == DEV_STATE_BOXED) {
603  		ccw_device_done(cdev, DEV_STATE_BOXED);
604  		return 0;
605  	}
606  	if (ccw_device_is_orphan(cdev)) {
607  		ccw_device_done(cdev, DEV_STATE_OFFLINE);
608  		return 0;
609  	}
610  	sch = to_subchannel(cdev->dev.parent);
611  	if (cio_update_schib(sch))
612  		return -ENODEV;
613  	if (scsw_actl(&sch->schib.scsw) != 0)
614  		return -EBUSY;
615  	if (cdev->private->state != DEV_STATE_ONLINE)
616  		return -EINVAL;
617  	/* Are we doing path grouping? */
618  	if (!cdev->private->flags.pgroup) {
619  		/* No, set state offline immediately. */
620  		ccw_device_done(cdev, DEV_STATE_OFFLINE);
621  		return 0;
622  	}
623  	/* Start Set Path Group commands. */
624  	cdev->private->state = DEV_STATE_DISBAND_PGID;
625  	ccw_device_disband_start(cdev);
626  	return 0;
627  }
628  
629  /*
630   * Handle not operational event in non-special state.
631   */
ccw_device_generic_notoper(struct ccw_device * cdev,enum dev_event dev_event)632  static void ccw_device_generic_notoper(struct ccw_device *cdev,
633  				       enum dev_event dev_event)
634  {
635  	if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
636  		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
637  	else
638  		ccw_device_set_disconnected(cdev);
639  }
640  
641  /*
642   * Handle path verification event in offline state.
643   */
ccw_device_offline_verify(struct ccw_device * cdev,enum dev_event dev_event)644  static void ccw_device_offline_verify(struct ccw_device *cdev,
645  				      enum dev_event dev_event)
646  {
647  	struct subchannel *sch = to_subchannel(cdev->dev.parent);
648  
649  	css_schedule_eval(sch->schid);
650  }
651  
652  /*
653   * Handle path verification event.
654   */
655  static void
ccw_device_online_verify(struct ccw_device * cdev,enum dev_event dev_event)656  ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
657  {
658  	struct subchannel *sch;
659  
660  	if (cdev->private->state == DEV_STATE_W4SENSE) {
661  		cdev->private->flags.doverify = 1;
662  		return;
663  	}
664  	sch = to_subchannel(cdev->dev.parent);
665  	/*
666  	 * Since we might not just be coming from an interrupt from the
667  	 * subchannel we have to update the schib.
668  	 */
669  	if (cio_update_schib(sch)) {
670  		ccw_device_verify_done(cdev, -ENODEV);
671  		return;
672  	}
673  
674  	if (scsw_actl(&sch->schib.scsw) != 0 ||
675  	    (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
676  	    (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
677  	     SCSW_STCTL_STATUS_PEND)) {
678  		/*
679  		 * No final status yet or final status not yet delivered
680  		 * to the device driver. Can't do path verification now,
681  		 * delay until final status was delivered.
682  		 */
683  		cdev->private->flags.doverify = 1;
684  		return;
685  	}
686  	/* Device is idle, we can do the path verification. */
687  	cdev->private->state = DEV_STATE_VERIFY;
688  	ccw_device_verify_start(cdev);
689  }
690  
691  /*
692   * Handle path verification event in boxed state.
693   */
ccw_device_boxed_verify(struct ccw_device * cdev,enum dev_event dev_event)694  static void ccw_device_boxed_verify(struct ccw_device *cdev,
695  				    enum dev_event dev_event)
696  {
697  	struct subchannel *sch = to_subchannel(cdev->dev.parent);
698  
699  	if (cdev->online) {
700  		if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)))
701  			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
702  		else
703  			ccw_device_online_verify(cdev, dev_event);
704  	} else
705  		css_schedule_eval(sch->schid);
706  }
707  
708  /*
709   * Pass interrupt to device driver.
710   */
ccw_device_call_handler(struct ccw_device * cdev)711  static int ccw_device_call_handler(struct ccw_device *cdev)
712  {
713  	unsigned int stctl;
714  	int ending_status;
715  
716  	/*
717  	 * we allow for the device action handler if .
718  	 *  - we received ending status
719  	 *  - the action handler requested to see all interrupts
720  	 *  - we received an intermediate status
721  	 *  - fast notification was requested (primary status)
722  	 *  - unsolicited interrupts
723  	 */
724  	stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
725  	ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
726  		(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
727  		(stctl == SCSW_STCTL_STATUS_PEND);
728  	if (!ending_status &&
729  	    !cdev->private->options.repall &&
730  	    !(stctl & SCSW_STCTL_INTER_STATUS) &&
731  	    !(cdev->private->options.fast &&
732  	      (stctl & SCSW_STCTL_PRIM_STATUS)))
733  		return 0;
734  
735  	if (ending_status)
736  		ccw_device_set_timeout(cdev, 0);
737  
738  	if (cdev->handler)
739  		cdev->handler(cdev, cdev->private->intparm,
740  			      &cdev->private->dma_area->irb);
741  
742  	memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
743  	return 1;
744  }
745  
746  /*
747   * Got an interrupt for a normal io (state online).
748   */
749  static void
ccw_device_irq(struct ccw_device * cdev,enum dev_event dev_event)750  ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
751  {
752  	struct irb *irb;
753  	int is_cmd;
754  
755  	irb = this_cpu_ptr(&cio_irb);
756  	is_cmd = !scsw_is_tm(&irb->scsw);
757  	/* Check for unsolicited interrupt. */
758  	if (!scsw_is_solicited(&irb->scsw)) {
759  		if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
760  		    !irb->esw.esw0.erw.cons) {
761  			/* Unit check but no sense data. Need basic sense. */
762  			if (ccw_device_do_sense(cdev, irb) != 0)
763  				goto call_handler_unsol;
764  			memcpy(&cdev->private->dma_area->irb, irb,
765  			       sizeof(struct irb));
766  			cdev->private->state = DEV_STATE_W4SENSE;
767  			cdev->private->intparm = 0;
768  			return;
769  		}
770  call_handler_unsol:
771  		if (cdev->handler)
772  			cdev->handler (cdev, 0, irb);
773  		if (cdev->private->flags.doverify)
774  			ccw_device_online_verify(cdev, 0);
775  		return;
776  	}
777  	/* Accumulate status and find out if a basic sense is needed. */
778  	ccw_device_accumulate_irb(cdev, irb);
779  	if (is_cmd && cdev->private->flags.dosense) {
780  		if (ccw_device_do_sense(cdev, irb) == 0) {
781  			cdev->private->state = DEV_STATE_W4SENSE;
782  		}
783  		return;
784  	}
785  	/* Call the handler. */
786  	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
787  		/* Start delayed path verification. */
788  		ccw_device_online_verify(cdev, 0);
789  }
790  
791  /*
792   * Got an timeout in online state.
793   */
794  static void
ccw_device_online_timeout(struct ccw_device * cdev,enum dev_event dev_event)795  ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
796  {
797  	int ret;
798  
799  	ccw_device_set_timeout(cdev, 0);
800  	cdev->private->iretry = 255;
801  	cdev->private->async_kill_io_rc = -ETIMEDOUT;
802  	ret = ccw_device_cancel_halt_clear(cdev);
803  	if (ret == -EBUSY) {
804  		ccw_device_set_timeout(cdev, 3*HZ);
805  		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
806  		return;
807  	}
808  	if (ret)
809  		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
810  	else if (cdev->handler)
811  		cdev->handler(cdev, cdev->private->intparm,
812  			      ERR_PTR(-ETIMEDOUT));
813  }
814  
815  /*
816   * Got an interrupt for a basic sense.
817   */
818  static void
ccw_device_w4sense(struct ccw_device * cdev,enum dev_event dev_event)819  ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
820  {
821  	struct irb *irb;
822  
823  	irb = this_cpu_ptr(&cio_irb);
824  	/* Check for unsolicited interrupt. */
825  	if (scsw_stctl(&irb->scsw) ==
826  	    (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
827  		if (scsw_cc(&irb->scsw) == 1)
828  			/* Basic sense hasn't started. Try again. */
829  			ccw_device_do_sense(cdev, irb);
830  		else {
831  			CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
832  				      "interrupt during w4sense...\n",
833  				      cdev->private->dev_id.ssid,
834  				      cdev->private->dev_id.devno);
835  			if (cdev->handler)
836  				cdev->handler (cdev, 0, irb);
837  		}
838  		return;
839  	}
840  	/*
841  	 * Check if a halt or clear has been issued in the meanwhile. If yes,
842  	 * only deliver the halt/clear interrupt to the device driver as if it
843  	 * had killed the original request.
844  	 */
845  	if (scsw_fctl(&irb->scsw) &
846  	    (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
847  		cdev->private->flags.dosense = 0;
848  		memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
849  		ccw_device_accumulate_irb(cdev, irb);
850  		goto call_handler;
851  	}
852  	/* Add basic sense info to irb. */
853  	ccw_device_accumulate_basic_sense(cdev, irb);
854  	if (cdev->private->flags.dosense) {
855  		/* Another basic sense is needed. */
856  		ccw_device_do_sense(cdev, irb);
857  		return;
858  	}
859  call_handler:
860  	cdev->private->state = DEV_STATE_ONLINE;
861  	/* In case sensing interfered with setting the device online */
862  	wake_up(&cdev->private->wait_q);
863  	/* Call the handler. */
864  	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
865  		/* Start delayed path verification. */
866  		ccw_device_online_verify(cdev, 0);
867  }
868  
869  static void
ccw_device_killing_irq(struct ccw_device * cdev,enum dev_event dev_event)870  ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
871  {
872  	ccw_device_set_timeout(cdev, 0);
873  	/* Start delayed path verification. */
874  	ccw_device_online_verify(cdev, 0);
875  	/* OK, i/o is dead now. Call interrupt handler. */
876  	if (cdev->handler)
877  		cdev->handler(cdev, cdev->private->intparm,
878  			      ERR_PTR(cdev->private->async_kill_io_rc));
879  }
880  
881  static void
ccw_device_killing_timeout(struct ccw_device * cdev,enum dev_event dev_event)882  ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
883  {
884  	int ret;
885  
886  	ret = ccw_device_cancel_halt_clear(cdev);
887  	if (ret == -EBUSY) {
888  		ccw_device_set_timeout(cdev, 3*HZ);
889  		return;
890  	}
891  	/* Start delayed path verification. */
892  	ccw_device_online_verify(cdev, 0);
893  	if (cdev->handler)
894  		cdev->handler(cdev, cdev->private->intparm,
895  			      ERR_PTR(cdev->private->async_kill_io_rc));
896  }
897  
ccw_device_kill_io(struct ccw_device * cdev)898  void ccw_device_kill_io(struct ccw_device *cdev)
899  {
900  	int ret;
901  
902  	ccw_device_set_timeout(cdev, 0);
903  	cdev->private->iretry = 255;
904  	cdev->private->async_kill_io_rc = -EIO;
905  	ret = ccw_device_cancel_halt_clear(cdev);
906  	if (ret == -EBUSY) {
907  		ccw_device_set_timeout(cdev, 3*HZ);
908  		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
909  		return;
910  	}
911  	/* Start delayed path verification. */
912  	ccw_device_online_verify(cdev, 0);
913  	if (cdev->handler)
914  		cdev->handler(cdev, cdev->private->intparm,
915  			      ERR_PTR(-EIO));
916  }
917  
918  static void
ccw_device_delay_verify(struct ccw_device * cdev,enum dev_event dev_event)919  ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
920  {
921  	/* Start verification after current task finished. */
922  	cdev->private->flags.doverify = 1;
923  }
924  
925  static void
ccw_device_start_id(struct ccw_device * cdev,enum dev_event dev_event)926  ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
927  {
928  	struct subchannel *sch;
929  
930  	sch = to_subchannel(cdev->dev.parent);
931  	if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)) != 0)
932  		/* Couldn't enable the subchannel for i/o. Sick device. */
933  		return;
934  	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
935  	ccw_device_sense_id_start(cdev);
936  }
937  
ccw_device_trigger_reprobe(struct ccw_device * cdev)938  void ccw_device_trigger_reprobe(struct ccw_device *cdev)
939  {
940  	struct subchannel *sch;
941  
942  	if (cdev->private->state != DEV_STATE_DISCONNECTED)
943  		return;
944  
945  	sch = to_subchannel(cdev->dev.parent);
946  	/* Update some values. */
947  	if (cio_update_schib(sch))
948  		return;
949  	/*
950  	 * The pim, pam, pom values may not be accurate, but they are the best
951  	 * we have before performing device selection :/
952  	 */
953  	sch->lpm = sch->schib.pmcw.pam & sch->opm;
954  	/*
955  	 * Use the initial configuration since we can't be sure that the old
956  	 * paths are valid.
957  	 */
958  	io_subchannel_init_config(sch);
959  	if (cio_commit_config(sch))
960  		return;
961  
962  	/* We should also udate ssd info, but this has to wait. */
963  	/* Check if this is another device which appeared on the same sch. */
964  	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
965  		css_schedule_eval(sch->schid);
966  	else
967  		ccw_device_start_id(cdev, 0);
968  }
969  
ccw_device_disabled_irq(struct ccw_device * cdev,enum dev_event dev_event)970  static void ccw_device_disabled_irq(struct ccw_device *cdev,
971  				    enum dev_event dev_event)
972  {
973  	struct subchannel *sch;
974  
975  	sch = to_subchannel(cdev->dev.parent);
976  	/*
977  	 * An interrupt in a disabled state means a previous disable was not
978  	 * successful - should not happen, but we try to disable again.
979  	 */
980  	cio_disable_subchannel(sch);
981  }
982  
983  static void
ccw_device_change_cmfstate(struct ccw_device * cdev,enum dev_event dev_event)984  ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
985  {
986  	retry_set_schib(cdev);
987  	cdev->private->state = DEV_STATE_ONLINE;
988  	dev_fsm_event(cdev, dev_event);
989  }
990  
ccw_device_update_cmfblock(struct ccw_device * cdev,enum dev_event dev_event)991  static void ccw_device_update_cmfblock(struct ccw_device *cdev,
992  				       enum dev_event dev_event)
993  {
994  	cmf_retry_copy_block(cdev);
995  	cdev->private->state = DEV_STATE_ONLINE;
996  	dev_fsm_event(cdev, dev_event);
997  }
998  
999  static void
ccw_device_quiesce_done(struct ccw_device * cdev,enum dev_event dev_event)1000  ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1001  {
1002  	ccw_device_set_timeout(cdev, 0);
1003  	cdev->private->state = DEV_STATE_NOT_OPER;
1004  	wake_up(&cdev->private->wait_q);
1005  }
1006  
1007  static void
ccw_device_quiesce_timeout(struct ccw_device * cdev,enum dev_event dev_event)1008  ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1009  {
1010  	int ret;
1011  
1012  	ret = ccw_device_cancel_halt_clear(cdev);
1013  	if (ret == -EBUSY) {
1014  		ccw_device_set_timeout(cdev, HZ/10);
1015  	} else {
1016  		cdev->private->state = DEV_STATE_NOT_OPER;
1017  		wake_up(&cdev->private->wait_q);
1018  	}
1019  }
1020  
1021  /*
1022   * No operation action. This is used e.g. to ignore a timeout event in
1023   * state offline.
1024   */
1025  static void
ccw_device_nop(struct ccw_device * cdev,enum dev_event dev_event)1026  ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1027  {
1028  }
1029  
1030  /*
1031   * device statemachine
1032   */
1033  fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1034  	[DEV_STATE_NOT_OPER] = {
1035  		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1036  		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
1037  		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1038  		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1039  	},
1040  	[DEV_STATE_SENSE_ID] = {
1041  		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1042  		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1043  		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1044  		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1045  	},
1046  	[DEV_STATE_OFFLINE] = {
1047  		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1048  		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
1049  		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1050  		[DEV_EVENT_VERIFY]	= ccw_device_offline_verify,
1051  	},
1052  	[DEV_STATE_VERIFY] = {
1053  		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1054  		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1055  		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1056  		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
1057  	},
1058  	[DEV_STATE_ONLINE] = {
1059  		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1060  		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1061  		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1062  		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1063  	},
1064  	[DEV_STATE_W4SENSE] = {
1065  		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1066  		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1067  		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1068  		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1069  	},
1070  	[DEV_STATE_DISBAND_PGID] = {
1071  		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1072  		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1073  		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1074  		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1075  	},
1076  	[DEV_STATE_BOXED] = {
1077  		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1078  		[DEV_EVENT_INTERRUPT]	= ccw_device_nop,
1079  		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1080  		[DEV_EVENT_VERIFY]	= ccw_device_boxed_verify,
1081  	},
1082  	/* states to wait for i/o completion before doing something */
1083  	[DEV_STATE_TIMEOUT_KILL] = {
1084  		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1085  		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1086  		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1087  		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1088  	},
1089  	[DEV_STATE_QUIESCE] = {
1090  		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1091  		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1092  		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1093  		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1094  	},
1095  	/* special states for devices gone not operational */
1096  	[DEV_STATE_DISCONNECTED] = {
1097  		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1098  		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1099  		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1100  		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
1101  	},
1102  	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1103  		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1104  		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1105  		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1106  		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1107  	},
1108  	[DEV_STATE_CMFCHANGE] = {
1109  		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1110  		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1111  		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1112  		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1113  	},
1114  	[DEV_STATE_CMFUPDATE] = {
1115  		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
1116  		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
1117  		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
1118  		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
1119  	},
1120  	[DEV_STATE_STEAL_LOCK] = {
1121  		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1122  		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1123  		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1124  		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1125  	},
1126  };
1127  
1128  EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1129