2 * bus driver for ccw devices
4 * Copyright IBM Corp. 2002, 2008
5 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
12 #define KMSG_COMPONENT "cio"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/errno.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/device.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/sched/signal.h>
29 #include <asm/ccwdev.h>
31 #include <asm/param.h> /* HZ */
37 #include "cio_debug.h"
42 #include "blacklist.h"
45 static struct timer_list recovery_timer;
46 static DEFINE_SPINLOCK(recovery_lock);
47 static int recovery_phase;
48 static const unsigned long recovery_delay[] = { 3, 30, 300 };
50 static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
51 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
52 static struct bus_type ccw_bus_type;
54 /******************* bus type handling ***********************/
56 /* The Linux driver model distinguishes between a bus type and
57 * the bus itself. Of course we only have one channel
58 * subsystem driver and one channel system per machine, but
59 * we still use the abstraction. T.R. says it's a good idea. */
61 ccw_bus_match (struct device * dev, struct device_driver * drv)
63 struct ccw_device *cdev = to_ccwdev(dev);
64 struct ccw_driver *cdrv = to_ccwdrv(drv);
65 const struct ccw_device_id *ids = cdrv->ids, *found;
70 found = ccw_device_id_match(ids, &cdev->id);
74 cdev->id.driver_info = found->driver_info;
79 /* Store modalias string delimited by prefix/suffix string into buffer with
80 * specified size. Return length of resulting string (excluding trailing '\0')
81 * even if string doesn't fit buffer (snprintf semantics). */
82 static int snprint_alias(char *buf, size_t size,
83 struct ccw_device_id *id, const char *suffix)
87 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
93 if (id->dev_type != 0)
94 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
95 id->dev_model, suffix);
97 len += snprintf(buf, size, "dtdm%s", suffix);
102 /* Set up environment variables for ccw device uevent. Return 0 on success,
103 * non-zero otherwise. */
104 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
106 struct ccw_device *cdev = to_ccwdev(dev);
107 struct ccw_device_id *id = &(cdev->id);
109 char modalias_buf[30];
112 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
117 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
121 /* The next two can be zero, that's ok for us */
123 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
128 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
133 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
134 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
138 static void io_subchannel_irq(struct subchannel *);
139 static int io_subchannel_probe(struct subchannel *);
140 static int io_subchannel_remove(struct subchannel *);
141 static void io_subchannel_shutdown(struct subchannel *);
142 static int io_subchannel_sch_event(struct subchannel *, int);
143 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
145 static void recovery_func(unsigned long data);
147 static struct css_device_id io_subchannel_ids[] = {
148 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
149 { /* end of list */ },
152 static int io_subchannel_prepare(struct subchannel *sch)
154 struct ccw_device *cdev;
156 * Don't allow suspend while a ccw device registration
157 * is still outstanding.
159 cdev = sch_get_cdev(sch);
160 if (cdev && !device_is_registered(&cdev->dev))
165 static int io_subchannel_settle(void)
169 ret = wait_event_interruptible(ccw_device_init_wq,
170 atomic_read(&ccw_device_init_count) == 0);
173 flush_workqueue(cio_work_q);
177 static struct css_driver io_subchannel_driver = {
179 .owner = THIS_MODULE,
180 .name = "io_subchannel",
182 .subchannel_type = io_subchannel_ids,
183 .irq = io_subchannel_irq,
184 .sch_event = io_subchannel_sch_event,
185 .chp_event = io_subchannel_chp_event,
186 .probe = io_subchannel_probe,
187 .remove = io_subchannel_remove,
188 .shutdown = io_subchannel_shutdown,
189 .prepare = io_subchannel_prepare,
190 .settle = io_subchannel_settle,
193 int __init io_subchannel_init(void)
197 setup_timer(&recovery_timer, recovery_func, 0);
198 ret = bus_register(&ccw_bus_type);
201 ret = css_driver_register(&io_subchannel_driver);
203 bus_unregister(&ccw_bus_type);
209 /************************ device handling **************************/
212 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
214 struct ccw_device *cdev = to_ccwdev(dev);
215 struct ccw_device_id *id = &(cdev->id);
217 if (id->dev_type != 0)
218 return sprintf(buf, "%04x/%02x\n",
219 id->dev_type, id->dev_model);
221 return sprintf(buf, "n/a\n");
225 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
227 struct ccw_device *cdev = to_ccwdev(dev);
228 struct ccw_device_id *id = &(cdev->id);
230 return sprintf(buf, "%04x/%02x\n",
231 id->cu_type, id->cu_model);
235 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
237 struct ccw_device *cdev = to_ccwdev(dev);
238 struct ccw_device_id *id = &(cdev->id);
241 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
243 return len > PAGE_SIZE ? PAGE_SIZE : len;
247 online_show (struct device *dev, struct device_attribute *attr, char *buf)
249 struct ccw_device *cdev = to_ccwdev(dev);
251 return sprintf(buf, cdev->online ? "1\n" : "0\n");
254 int ccw_device_is_orphan(struct ccw_device *cdev)
256 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
259 static void ccw_device_unregister(struct ccw_device *cdev)
261 if (device_is_registered(&cdev->dev)) {
262 /* Undo device_add(). */
263 device_del(&cdev->dev);
265 if (cdev->private->flags.initialized) {
266 cdev->private->flags.initialized = 0;
267 /* Release reference from device_initialize(). */
268 put_device(&cdev->dev);
272 static void io_subchannel_quiesce(struct subchannel *);
275 * ccw_device_set_offline() - disable a ccw device for I/O
276 * @cdev: target ccw device
278 * This function calls the driver's set_offline() function for @cdev, if
279 * given, and then disables @cdev.
281 * %0 on success and a negative error value on failure.
283 * enabled, ccw device lock not held
285 int ccw_device_set_offline(struct ccw_device *cdev)
287 struct subchannel *sch;
292 if (!cdev->online || !cdev->drv)
295 if (cdev->drv->set_offline) {
296 ret = cdev->drv->set_offline(cdev);
300 spin_lock_irq(cdev->ccwlock);
301 sch = to_subchannel(cdev->dev.parent);
303 /* Wait until a final state or DISCONNECTED is reached */
304 while (!dev_fsm_final_state(cdev) &&
305 cdev->private->state != DEV_STATE_DISCONNECTED) {
306 spin_unlock_irq(cdev->ccwlock);
307 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
308 cdev->private->state == DEV_STATE_DISCONNECTED));
309 spin_lock_irq(cdev->ccwlock);
312 ret = ccw_device_offline(cdev);
315 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
316 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
317 cdev->private->dev_id.devno);
320 state = cdev->private->state;
321 spin_unlock_irq(cdev->ccwlock);
322 io_subchannel_quiesce(sch);
323 spin_lock_irq(cdev->ccwlock);
324 cdev->private->state = state;
325 } while (ret == -EBUSY);
326 spin_unlock_irq(cdev->ccwlock);
327 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
328 cdev->private->state == DEV_STATE_DISCONNECTED));
329 /* Inform the user if set offline failed. */
330 if (cdev->private->state == DEV_STATE_BOXED) {
331 pr_warn("%s: The device entered boxed state while being set offline\n",
332 dev_name(&cdev->dev));
333 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
334 pr_warn("%s: The device stopped operating while being set offline\n",
335 dev_name(&cdev->dev));
337 /* Give up reference from ccw_device_set_online(). */
338 put_device(&cdev->dev);
342 cdev->private->state = DEV_STATE_OFFLINE;
343 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
344 spin_unlock_irq(cdev->ccwlock);
345 /* Give up reference from ccw_device_set_online(). */
346 put_device(&cdev->dev);
351 * ccw_device_set_online() - enable a ccw device for I/O
352 * @cdev: target ccw device
354 * This function first enables @cdev and then calls the driver's set_online()
355 * function for @cdev, if given. If set_online() returns an error, @cdev is
358 * %0 on success and a negative error value on failure.
360 * enabled, ccw device lock not held
362 int ccw_device_set_online(struct ccw_device *cdev)
369 if (cdev->online || !cdev->drv)
371 /* Hold on to an extra reference while device is online. */
372 if (!get_device(&cdev->dev))
375 spin_lock_irq(cdev->ccwlock);
376 ret = ccw_device_online(cdev);
377 spin_unlock_irq(cdev->ccwlock);
379 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
381 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
382 "device 0.%x.%04x\n",
383 ret, cdev->private->dev_id.ssid,
384 cdev->private->dev_id.devno);
385 /* Give up online reference since onlining failed. */
386 put_device(&cdev->dev);
389 spin_lock_irq(cdev->ccwlock);
390 /* Check if online processing was successful */
391 if ((cdev->private->state != DEV_STATE_ONLINE) &&
392 (cdev->private->state != DEV_STATE_W4SENSE)) {
393 spin_unlock_irq(cdev->ccwlock);
394 /* Inform the user that set online failed. */
395 if (cdev->private->state == DEV_STATE_BOXED) {
396 pr_warn("%s: Setting the device online failed because it is boxed\n",
397 dev_name(&cdev->dev));
398 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
399 pr_warn("%s: Setting the device online failed because it is not operational\n",
400 dev_name(&cdev->dev));
402 /* Give up online reference since onlining failed. */
403 put_device(&cdev->dev);
406 spin_unlock_irq(cdev->ccwlock);
407 if (cdev->drv->set_online)
408 ret = cdev->drv->set_online(cdev);
412 spin_lock_irq(cdev->ccwlock);
414 spin_unlock_irq(cdev->ccwlock);
418 spin_lock_irq(cdev->ccwlock);
419 /* Wait until a final state or DISCONNECTED is reached */
420 while (!dev_fsm_final_state(cdev) &&
421 cdev->private->state != DEV_STATE_DISCONNECTED) {
422 spin_unlock_irq(cdev->ccwlock);
423 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
424 cdev->private->state == DEV_STATE_DISCONNECTED));
425 spin_lock_irq(cdev->ccwlock);
427 ret2 = ccw_device_offline(cdev);
430 spin_unlock_irq(cdev->ccwlock);
431 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
432 cdev->private->state == DEV_STATE_DISCONNECTED));
433 /* Give up online reference since onlining failed. */
434 put_device(&cdev->dev);
438 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
439 "device 0.%x.%04x\n",
440 ret2, cdev->private->dev_id.ssid,
441 cdev->private->dev_id.devno);
442 cdev->private->state = DEV_STATE_OFFLINE;
443 spin_unlock_irq(cdev->ccwlock);
444 /* Give up online reference since onlining failed. */
445 put_device(&cdev->dev);
449 static int online_store_handle_offline(struct ccw_device *cdev)
451 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
452 spin_lock_irq(cdev->ccwlock);
453 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
454 spin_unlock_irq(cdev->ccwlock);
457 if (cdev->drv && cdev->drv->set_offline)
458 return ccw_device_set_offline(cdev);
462 static int online_store_recog_and_online(struct ccw_device *cdev)
464 /* Do device recognition, if needed. */
465 if (cdev->private->state == DEV_STATE_BOXED) {
466 spin_lock_irq(cdev->ccwlock);
467 ccw_device_recognition(cdev);
468 spin_unlock_irq(cdev->ccwlock);
469 wait_event(cdev->private->wait_q,
470 cdev->private->flags.recog_done);
471 if (cdev->private->state != DEV_STATE_OFFLINE)
472 /* recognition failed */
475 if (cdev->drv && cdev->drv->set_online)
476 return ccw_device_set_online(cdev);
480 static int online_store_handle_online(struct ccw_device *cdev, int force)
484 ret = online_store_recog_and_online(cdev);
487 if (force && cdev->private->state == DEV_STATE_BOXED) {
488 ret = ccw_device_stlck(cdev);
491 if (cdev->id.cu_type == 0)
492 cdev->private->state = DEV_STATE_NOT_OPER;
493 ret = online_store_recog_and_online(cdev);
500 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
501 const char *buf, size_t count)
503 struct ccw_device *cdev = to_ccwdev(dev);
507 /* Prevent conflict between multiple on-/offline processing requests. */
508 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
510 /* Prevent conflict between internal I/Os and on-/offline processing. */
511 if (!dev_fsm_final_state(cdev) &&
512 cdev->private->state != DEV_STATE_DISCONNECTED) {
516 /* Prevent conflict between pending work and on-/offline processing.*/
517 if (work_pending(&cdev->private->todo_work)) {
521 if (!strncmp(buf, "force\n", count)) {
527 ret = kstrtoul(buf, 16, &i);
535 ret = online_store_handle_offline(cdev);
538 ret = online_store_handle_online(cdev, force);
546 atomic_set(&cdev->private->onoff, 0);
547 return (ret < 0) ? ret : count;
551 available_show (struct device *dev, struct device_attribute *attr, char *buf)
553 struct ccw_device *cdev = to_ccwdev(dev);
554 struct subchannel *sch;
556 if (ccw_device_is_orphan(cdev))
557 return sprintf(buf, "no device\n");
558 switch (cdev->private->state) {
559 case DEV_STATE_BOXED:
560 return sprintf(buf, "boxed\n");
561 case DEV_STATE_DISCONNECTED:
562 case DEV_STATE_DISCONNECTED_SENSE_ID:
563 case DEV_STATE_NOT_OPER:
564 sch = to_subchannel(dev->parent);
566 return sprintf(buf, "no path\n");
568 return sprintf(buf, "no device\n");
570 /* All other states considered fine. */
571 return sprintf(buf, "good\n");
576 initiate_logging(struct device *dev, struct device_attribute *attr,
577 const char *buf, size_t count)
579 struct subchannel *sch = to_subchannel(dev);
582 rc = chsc_siosl(sch->schid);
584 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
585 sch->schid.ssid, sch->schid.sch_no, rc);
588 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
589 sch->schid.ssid, sch->schid.sch_no);
593 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
596 struct subchannel *sch = to_subchannel(dev);
598 return sprintf(buf, "%02x\n", sch->vpm);
601 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
602 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
603 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
604 static DEVICE_ATTR(online, 0644, online_show, online_store);
605 static DEVICE_ATTR(availability, 0444, available_show, NULL);
606 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
607 static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
609 static struct attribute *io_subchannel_attrs[] = {
610 &dev_attr_logging.attr,
615 static const struct attribute_group io_subchannel_attr_group = {
616 .attrs = io_subchannel_attrs,
619 static struct attribute * ccwdev_attrs[] = {
620 &dev_attr_devtype.attr,
621 &dev_attr_cutype.attr,
622 &dev_attr_modalias.attr,
623 &dev_attr_online.attr,
624 &dev_attr_cmb_enable.attr,
625 &dev_attr_availability.attr,
629 static const struct attribute_group ccwdev_attr_group = {
630 .attrs = ccwdev_attrs,
633 static const struct attribute_group *ccwdev_attr_groups[] = {
638 static int ccw_device_add(struct ccw_device *cdev)
640 struct device *dev = &cdev->dev;
642 dev->bus = &ccw_bus_type;
643 return device_add(dev);
646 static int match_dev_id(struct device *dev, void *data)
648 struct ccw_device *cdev = to_ccwdev(dev);
649 struct ccw_dev_id *dev_id = data;
651 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
655 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
656 * @dev_id: id of the device to be searched
658 * This function searches all devices attached to the ccw bus for a device
661 * If a device is found its reference count is increased and returned;
662 * else %NULL is returned.
664 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
668 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
670 return dev ? to_ccwdev(dev) : NULL;
672 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
674 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
678 if (device_is_registered(&cdev->dev)) {
679 device_release_driver(&cdev->dev);
680 ret = device_attach(&cdev->dev);
681 WARN_ON(ret == -ENODEV);
686 ccw_device_release(struct device *dev)
688 struct ccw_device *cdev;
690 cdev = to_ccwdev(dev);
691 /* Release reference of parent subchannel. */
692 put_device(cdev->dev.parent);
693 kfree(cdev->private);
697 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
699 struct ccw_device *cdev;
701 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
703 cdev->private = kzalloc(sizeof(struct ccw_device_private),
704 GFP_KERNEL | GFP_DMA);
709 return ERR_PTR(-ENOMEM);
712 static void ccw_device_todo(struct work_struct *work);
714 static int io_subchannel_initialize_dev(struct subchannel *sch,
715 struct ccw_device *cdev)
717 struct ccw_device_private *priv = cdev->private;
721 priv->int_class = IRQIO_CIO;
722 priv->state = DEV_STATE_NOT_OPER;
723 priv->dev_id.devno = sch->schib.pmcw.dev;
724 priv->dev_id.ssid = sch->schid.ssid;
726 INIT_WORK(&priv->todo_work, ccw_device_todo);
727 INIT_LIST_HEAD(&priv->cmb_list);
728 init_waitqueue_head(&priv->wait_q);
729 init_timer(&priv->timer);
731 atomic_set(&priv->onoff, 0);
732 cdev->ccwlock = sch->lock;
733 cdev->dev.parent = &sch->dev;
734 cdev->dev.release = ccw_device_release;
735 cdev->dev.groups = ccwdev_attr_groups;
736 /* Do first half of device_register. */
737 device_initialize(&cdev->dev);
738 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
739 cdev->private->dev_id.devno);
742 if (!get_device(&sch->dev)) {
746 priv->flags.initialized = 1;
747 spin_lock_irq(sch->lock);
748 sch_set_cdev(sch, cdev);
749 spin_unlock_irq(sch->lock);
753 /* Release reference from device_initialize(). */
754 put_device(&cdev->dev);
758 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
760 struct ccw_device *cdev;
763 cdev = io_subchannel_allocate_dev(sch);
765 ret = io_subchannel_initialize_dev(sch, cdev);
772 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
774 static void sch_create_and_recog_new_device(struct subchannel *sch)
776 struct ccw_device *cdev;
778 /* Need to allocate a new ccw device. */
779 cdev = io_subchannel_create_ccwdev(sch);
781 /* OK, we did everything we could... */
782 css_sch_device_unregister(sch);
785 /* Start recognition for the new ccw device. */
786 io_subchannel_recog(cdev, sch);
790 * Register recognized device.
792 static void io_subchannel_register(struct ccw_device *cdev)
794 struct subchannel *sch;
795 int ret, adjust_init_count = 1;
798 sch = to_subchannel(cdev->dev.parent);
800 * Check if subchannel is still registered. It may have become
801 * unregistered if a machine check hit us after finishing
802 * device recognition but before the register work could be
805 if (!device_is_registered(&sch->dev))
807 css_update_ssd_info(sch);
809 * io_subchannel_register() will also be called after device
810 * recognition has been done for a boxed device (which will already
811 * be registered). We need to reprobe since we may now have sense id
814 if (device_is_registered(&cdev->dev)) {
816 ret = device_reprobe(&cdev->dev);
818 /* We can't do much here. */
819 CIO_MSG_EVENT(0, "device_reprobe() returned"
820 " %d for 0.%x.%04x\n", ret,
821 cdev->private->dev_id.ssid,
822 cdev->private->dev_id.devno);
824 adjust_init_count = 0;
828 * Now we know this subchannel will stay, we can throw
829 * our delayed uevent.
831 if (dev_get_uevent_suppress(&sch->dev)) {
832 dev_set_uevent_suppress(&sch->dev, 0);
833 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
835 /* make it known to the system */
836 ret = ccw_device_add(cdev);
838 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
839 cdev->private->dev_id.ssid,
840 cdev->private->dev_id.devno, ret);
841 spin_lock_irqsave(sch->lock, flags);
842 sch_set_cdev(sch, NULL);
843 spin_unlock_irqrestore(sch->lock, flags);
844 /* Release initial device reference. */
845 put_device(&cdev->dev);
849 cdev->private->flags.recog_done = 1;
850 wake_up(&cdev->private->wait_q);
852 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
853 wake_up(&ccw_device_init_wq);
856 static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
858 struct subchannel *sch;
860 /* Get subchannel reference for local processing. */
861 if (!get_device(cdev->dev.parent))
863 sch = to_subchannel(cdev->dev.parent);
864 css_sch_device_unregister(sch);
865 /* Release subchannel reference for local processing. */
866 put_device(&sch->dev);
870 * subchannel recognition done. Called from the state machine.
873 io_subchannel_recog_done(struct ccw_device *cdev)
875 if (css_init_done == 0) {
876 cdev->private->flags.recog_done = 1;
879 switch (cdev->private->state) {
880 case DEV_STATE_BOXED:
881 /* Device did not respond in time. */
882 case DEV_STATE_NOT_OPER:
883 cdev->private->flags.recog_done = 1;
884 /* Remove device found not operational. */
885 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
886 if (atomic_dec_and_test(&ccw_device_init_count))
887 wake_up(&ccw_device_init_wq);
889 case DEV_STATE_OFFLINE:
891 * We can't register the device in interrupt context so
892 * we schedule a work item.
894 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
899 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
901 /* Increase counter of devices currently in recognition. */
902 atomic_inc(&ccw_device_init_count);
904 /* Start async. device sensing. */
905 spin_lock_irq(sch->lock);
906 ccw_device_recognition(cdev);
907 spin_unlock_irq(sch->lock);
910 static int ccw_device_move_to_sch(struct ccw_device *cdev,
911 struct subchannel *sch)
913 struct subchannel *old_sch;
914 int rc, old_enabled = 0;
916 old_sch = to_subchannel(cdev->dev.parent);
917 /* Obtain child reference for new parent. */
918 if (!get_device(&sch->dev))
921 if (!sch_is_pseudo_sch(old_sch)) {
922 spin_lock_irq(old_sch->lock);
923 old_enabled = old_sch->schib.pmcw.ena;
926 rc = cio_disable_subchannel(old_sch);
927 spin_unlock_irq(old_sch->lock);
929 /* Release child reference for new parent. */
930 put_device(&sch->dev);
935 mutex_lock(&sch->reg_mutex);
936 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
937 mutex_unlock(&sch->reg_mutex);
939 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
940 cdev->private->dev_id.ssid,
941 cdev->private->dev_id.devno, sch->schid.ssid,
942 sch->schib.pmcw.dev, rc);
944 /* Try to reenable the old subchannel. */
945 spin_lock_irq(old_sch->lock);
946 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
947 spin_unlock_irq(old_sch->lock);
949 /* Release child reference for new parent. */
950 put_device(&sch->dev);
953 /* Clean up old subchannel. */
954 if (!sch_is_pseudo_sch(old_sch)) {
955 spin_lock_irq(old_sch->lock);
956 sch_set_cdev(old_sch, NULL);
957 spin_unlock_irq(old_sch->lock);
958 css_schedule_eval(old_sch->schid);
960 /* Release child reference for old parent. */
961 put_device(&old_sch->dev);
962 /* Initialize new subchannel. */
963 spin_lock_irq(sch->lock);
964 cdev->ccwlock = sch->lock;
965 if (!sch_is_pseudo_sch(sch))
966 sch_set_cdev(sch, cdev);
967 spin_unlock_irq(sch->lock);
968 if (!sch_is_pseudo_sch(sch))
969 css_update_ssd_info(sch);
973 static int ccw_device_move_to_orph(struct ccw_device *cdev)
975 struct subchannel *sch = to_subchannel(cdev->dev.parent);
976 struct channel_subsystem *css = to_css(sch->dev.parent);
978 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
981 static void io_subchannel_irq(struct subchannel *sch)
983 struct ccw_device *cdev;
985 cdev = sch_get_cdev(sch);
987 CIO_TRACE_EVENT(6, "IRQ");
988 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
990 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
992 inc_irq_stat(IRQIO_CIO);
995 void io_subchannel_init_config(struct subchannel *sch)
997 memset(&sch->config, 0, sizeof(sch->config));
998 sch->config.csense = 1;
1001 static void io_subchannel_init_fields(struct subchannel *sch)
1003 if (cio_is_console(sch->schid))
1006 sch->opm = chp_get_sch_opm(sch);
1007 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1008 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1010 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1011 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1012 sch->schib.pmcw.dev, sch->schid.ssid,
1013 sch->schid.sch_no, sch->schib.pmcw.pim,
1014 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1016 io_subchannel_init_config(sch);
1020 * Note: We always return 0 so that we bind to the device even on error.
1021 * This is needed so that our remove function is called on unregister.
1023 static int io_subchannel_probe(struct subchannel *sch)
1025 struct io_subchannel_private *io_priv;
1026 struct ccw_device *cdev;
1029 if (cio_is_console(sch->schid)) {
1030 rc = sysfs_create_group(&sch->dev.kobj,
1031 &io_subchannel_attr_group);
1033 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1034 "attributes for subchannel "
1035 "0.%x.%04x (rc=%d)\n",
1036 sch->schid.ssid, sch->schid.sch_no, rc);
1038 * The console subchannel already has an associated ccw_device.
1039 * Throw the delayed uevent for the subchannel, register
1040 * the ccw_device and exit.
1042 if (dev_get_uevent_suppress(&sch->dev)) {
1043 /* should always be the case for the console */
1044 dev_set_uevent_suppress(&sch->dev, 0);
1045 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1047 cdev = sch_get_cdev(sch);
1048 rc = ccw_device_add(cdev);
1050 /* Release online reference. */
1051 put_device(&cdev->dev);
1054 if (atomic_dec_and_test(&ccw_device_init_count))
1055 wake_up(&ccw_device_init_wq);
1058 io_subchannel_init_fields(sch);
1059 rc = cio_commit_config(sch);
1062 rc = sysfs_create_group(&sch->dev.kobj,
1063 &io_subchannel_attr_group);
1066 /* Allocate I/O subchannel private data. */
1067 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1071 set_io_private(sch, io_priv);
1072 css_schedule_eval(sch->schid);
1076 spin_lock_irq(sch->lock);
1077 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1078 spin_unlock_irq(sch->lock);
1083 io_subchannel_remove (struct subchannel *sch)
1085 struct io_subchannel_private *io_priv = to_io_private(sch);
1086 struct ccw_device *cdev;
1088 cdev = sch_get_cdev(sch);
1091 io_subchannel_quiesce(sch);
1092 /* Set ccw device to not operational and drop reference. */
1093 spin_lock_irq(cdev->ccwlock);
1094 sch_set_cdev(sch, NULL);
1095 set_io_private(sch, NULL);
1096 cdev->private->state = DEV_STATE_NOT_OPER;
1097 spin_unlock_irq(cdev->ccwlock);
1098 ccw_device_unregister(cdev);
1101 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1105 static void io_subchannel_verify(struct subchannel *sch)
1107 struct ccw_device *cdev;
1109 cdev = sch_get_cdev(sch);
1111 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1114 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1116 struct ccw_device *cdev;
1118 cdev = sch_get_cdev(sch);
1121 if (cio_update_schib(sch))
1123 /* Check for I/O on path. */
1124 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1126 if (cdev->private->state == DEV_STATE_ONLINE) {
1127 ccw_device_kill_io(cdev);
1133 /* Trigger path verification. */
1134 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1138 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1141 static int io_subchannel_chp_event(struct subchannel *sch,
1142 struct chp_link *link, int event)
1144 struct ccw_device *cdev = sch_get_cdev(sch);
1147 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1155 cdev->private->path_gone_mask |= mask;
1156 io_subchannel_terminate_path(sch, mask);
1162 cdev->private->path_new_mask |= mask;
1163 io_subchannel_verify(sch);
1166 if (cio_update_schib(sch))
1169 cdev->private->path_gone_mask |= mask;
1170 io_subchannel_terminate_path(sch, mask);
1173 if (cio_update_schib(sch))
1175 sch->lpm |= mask & sch->opm;
1177 cdev->private->path_new_mask |= mask;
1178 io_subchannel_verify(sch);
1184 static void io_subchannel_quiesce(struct subchannel *sch)
1186 struct ccw_device *cdev;
1189 spin_lock_irq(sch->lock);
1190 cdev = sch_get_cdev(sch);
1191 if (cio_is_console(sch->schid))
1193 if (!sch->schib.pmcw.ena)
1195 ret = cio_disable_subchannel(sch);
1199 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1200 while (ret == -EBUSY) {
1201 cdev->private->state = DEV_STATE_QUIESCE;
1202 cdev->private->iretry = 255;
1203 ret = ccw_device_cancel_halt_clear(cdev);
1204 if (ret == -EBUSY) {
1205 ccw_device_set_timeout(cdev, HZ/10);
1206 spin_unlock_irq(sch->lock);
1207 wait_event(cdev->private->wait_q,
1208 cdev->private->state != DEV_STATE_QUIESCE);
1209 spin_lock_irq(sch->lock);
1211 ret = cio_disable_subchannel(sch);
1214 spin_unlock_irq(sch->lock);
1217 static void io_subchannel_shutdown(struct subchannel *sch)
1219 io_subchannel_quiesce(sch);
1222 static int device_is_disconnected(struct ccw_device *cdev)
1226 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1227 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1230 static int recovery_check(struct device *dev, void *data)
1232 struct ccw_device *cdev = to_ccwdev(dev);
1233 struct subchannel *sch;
1236 spin_lock_irq(cdev->ccwlock);
1237 switch (cdev->private->state) {
1238 case DEV_STATE_ONLINE:
1239 sch = to_subchannel(cdev->dev.parent);
1240 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1243 case DEV_STATE_DISCONNECTED:
1244 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1245 cdev->private->dev_id.ssid,
1246 cdev->private->dev_id.devno);
1247 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1250 case DEV_STATE_DISCONNECTED_SENSE_ID:
1254 spin_unlock_irq(cdev->ccwlock);
1259 static void recovery_work_func(struct work_struct *unused)
1263 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1265 spin_lock_irq(&recovery_lock);
1266 if (!timer_pending(&recovery_timer)) {
1267 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1269 mod_timer(&recovery_timer, jiffies +
1270 recovery_delay[recovery_phase] * HZ);
1272 spin_unlock_irq(&recovery_lock);
1274 CIO_MSG_EVENT(3, "recovery: end\n");
1277 static DECLARE_WORK(recovery_work, recovery_work_func);
1279 static void recovery_func(unsigned long data)
1282 * We can't do our recovery in softirq context and it's not
1283 * performance critical, so we schedule it.
1285 schedule_work(&recovery_work);
1288 void ccw_device_schedule_recovery(void)
1290 unsigned long flags;
1292 CIO_MSG_EVENT(3, "recovery: schedule\n");
1293 spin_lock_irqsave(&recovery_lock, flags);
1294 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1296 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1298 spin_unlock_irqrestore(&recovery_lock, flags);
1301 static int purge_fn(struct device *dev, void *data)
1303 struct ccw_device *cdev = to_ccwdev(dev);
1304 struct ccw_dev_id *id = &cdev->private->dev_id;
1306 spin_lock_irq(cdev->ccwlock);
1307 if (is_blacklisted(id->ssid, id->devno) &&
1308 (cdev->private->state == DEV_STATE_OFFLINE) &&
1309 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1310 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1312 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1313 atomic_set(&cdev->private->onoff, 0);
1315 spin_unlock_irq(cdev->ccwlock);
1316 /* Abort loop in case of pending signal. */
1317 if (signal_pending(current))
1324 * ccw_purge_blacklisted - purge unused, blacklisted devices
1326 * Unregister all ccw devices that are offline and on the blacklist.
1328 int ccw_purge_blacklisted(void)
1330 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1331 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1335 void ccw_device_set_disconnected(struct ccw_device *cdev)
1339 ccw_device_set_timeout(cdev, 0);
1340 cdev->private->flags.fake_irb = 0;
1341 cdev->private->state = DEV_STATE_DISCONNECTED;
1343 ccw_device_schedule_recovery();
1346 void ccw_device_set_notoper(struct ccw_device *cdev)
1348 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1350 CIO_TRACE_EVENT(2, "notoper");
1351 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1352 ccw_device_set_timeout(cdev, 0);
1353 cio_disable_subchannel(sch);
1354 cdev->private->state = DEV_STATE_NOT_OPER;
1357 enum io_sch_action {
1361 IO_SCH_UNREG_ATTACH,
1369 static enum io_sch_action sch_get_action(struct subchannel *sch)
1371 struct ccw_device *cdev;
1373 cdev = sch_get_cdev(sch);
1374 if (cio_update_schib(sch)) {
1375 /* Not operational. */
1377 return IO_SCH_UNREG;
1378 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1379 return IO_SCH_UNREG;
1380 return IO_SCH_ORPH_UNREG;
1384 return IO_SCH_ATTACH;
1385 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1386 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1387 return IO_SCH_UNREG_ATTACH;
1388 return IO_SCH_ORPH_ATTACH;
1390 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1391 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1392 return IO_SCH_UNREG;
1395 if (device_is_disconnected(cdev))
1396 return IO_SCH_REPROBE;
1397 if (cdev->online && !cdev->private->flags.resuming)
1398 return IO_SCH_VERIFY;
1399 if (cdev->private->state == DEV_STATE_NOT_OPER)
1400 return IO_SCH_UNREG_ATTACH;
1405 * io_subchannel_sch_event - process subchannel event
1407 * @process: non-zero if function is called in process context
1409 * An unspecified event occurred for this subchannel. Adjust data according
1410 * to the current operational state of the subchannel and device. Return
1411 * zero when the event has been handled sufficiently or -EAGAIN when this
1412 * function should be called again in process context.
1414 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1416 unsigned long flags;
1417 struct ccw_device *cdev;
1418 struct ccw_dev_id dev_id;
1419 enum io_sch_action action;
1422 spin_lock_irqsave(sch->lock, flags);
1423 if (!device_is_registered(&sch->dev))
1425 if (work_pending(&sch->todo_work))
1427 cdev = sch_get_cdev(sch);
1428 if (cdev && work_pending(&cdev->private->todo_work))
1430 action = sch_get_action(sch);
1431 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1432 sch->schid.ssid, sch->schid.sch_no, process,
1434 /* Perform immediate actions while holding the lock. */
1436 case IO_SCH_REPROBE:
1437 /* Trigger device recognition. */
1438 ccw_device_trigger_reprobe(cdev);
1442 /* Trigger path verification. */
1443 io_subchannel_verify(sch);
1447 ccw_device_set_disconnected(cdev);
1450 case IO_SCH_ORPH_UNREG:
1451 case IO_SCH_ORPH_ATTACH:
1452 ccw_device_set_disconnected(cdev);
1454 case IO_SCH_UNREG_ATTACH:
1458 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1460 * Note: delayed work triggered by this event
1461 * and repeated calls to sch_event are synchronized
1462 * by the above check for work_pending(cdev).
1464 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1466 ccw_device_set_notoper(cdev);
1474 spin_unlock_irqrestore(sch->lock, flags);
1475 /* All other actions require process context. */
1478 /* Handle attached ccw device. */
1480 case IO_SCH_ORPH_UNREG:
1481 case IO_SCH_ORPH_ATTACH:
1482 /* Move ccw device to orphanage. */
1483 rc = ccw_device_move_to_orph(cdev);
1487 case IO_SCH_UNREG_ATTACH:
1488 spin_lock_irqsave(sch->lock, flags);
1489 if (cdev->private->flags.resuming) {
1490 /* Device will be handled later. */
1494 sch_set_cdev(sch, NULL);
1495 spin_unlock_irqrestore(sch->lock, flags);
1496 /* Unregister ccw device. */
1497 ccw_device_unregister(cdev);
1502 /* Handle subchannel. */
1504 case IO_SCH_ORPH_UNREG:
1506 if (!cdev || !cdev->private->flags.resuming)
1507 css_sch_device_unregister(sch);
1509 case IO_SCH_ORPH_ATTACH:
1510 case IO_SCH_UNREG_ATTACH:
1512 dev_id.ssid = sch->schid.ssid;
1513 dev_id.devno = sch->schib.pmcw.dev;
1514 cdev = get_ccwdev_by_dev_id(&dev_id);
1516 sch_create_and_recog_new_device(sch);
1519 rc = ccw_device_move_to_sch(cdev, sch);
1521 /* Release reference from get_ccwdev_by_dev_id() */
1522 put_device(&cdev->dev);
1525 spin_lock_irqsave(sch->lock, flags);
1526 ccw_device_trigger_reprobe(cdev);
1527 spin_unlock_irqrestore(sch->lock, flags);
1528 /* Release reference from get_ccwdev_by_dev_id() */
1529 put_device(&cdev->dev);
1537 spin_unlock_irqrestore(sch->lock, flags);
1542 static void ccw_device_set_int_class(struct ccw_device *cdev)
1544 struct ccw_driver *cdrv = cdev->drv;
1546 /* Note: we interpret class 0 in this context as an uninitialized
1547 * field since it translates to a non-I/O interrupt class. */
1548 if (cdrv->int_class != 0)
1549 cdev->private->int_class = cdrv->int_class;
1551 cdev->private->int_class = IRQIO_CIO;
1554 #ifdef CONFIG_CCW_CONSOLE
1555 int __init ccw_device_enable_console(struct ccw_device *cdev)
1557 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1560 if (!cdev->drv || !cdev->handler)
1563 io_subchannel_init_fields(sch);
1564 rc = cio_commit_config(sch);
1567 sch->driver = &io_subchannel_driver;
1568 io_subchannel_recog(cdev, sch);
1569 /* Now wait for the async. recognition to come to an end. */
1570 spin_lock_irq(cdev->ccwlock);
1571 while (!dev_fsm_final_state(cdev))
1572 ccw_device_wait_idle(cdev);
1574 /* Hold on to an extra reference while device is online. */
1575 get_device(&cdev->dev);
1576 rc = ccw_device_online(cdev);
1580 while (!dev_fsm_final_state(cdev))
1581 ccw_device_wait_idle(cdev);
1583 if (cdev->private->state == DEV_STATE_ONLINE)
1588 spin_unlock_irq(cdev->ccwlock);
1589 if (rc) /* Give up online reference since onlining failed. */
1590 put_device(&cdev->dev);
1594 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1596 struct io_subchannel_private *io_priv;
1597 struct ccw_device *cdev;
1598 struct subchannel *sch;
1600 sch = cio_probe_console();
1602 return ERR_CAST(sch);
1604 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1606 put_device(&sch->dev);
1607 return ERR_PTR(-ENOMEM);
1609 set_io_private(sch, io_priv);
1610 cdev = io_subchannel_create_ccwdev(sch);
1612 put_device(&sch->dev);
1617 ccw_device_set_int_class(cdev);
1621 void __init ccw_device_destroy_console(struct ccw_device *cdev)
1623 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1624 struct io_subchannel_private *io_priv = to_io_private(sch);
1626 set_io_private(sch, NULL);
1627 put_device(&sch->dev);
1628 put_device(&cdev->dev);
1633 * ccw_device_wait_idle() - busy wait for device to become idle
1636 * Poll until activity control is zero, that is, no function or data
1637 * transfer is pending/active.
1638 * Called with device lock being held.
1640 void ccw_device_wait_idle(struct ccw_device *cdev)
1642 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1646 if (sch->schib.scsw.cmd.actl == 0)
1652 static int ccw_device_pm_restore(struct device *dev);
1654 int ccw_device_force_console(struct ccw_device *cdev)
1656 return ccw_device_pm_restore(&cdev->dev);
1658 EXPORT_SYMBOL_GPL(ccw_device_force_console);
1662 * get ccw_device matching the busid, but only if owned by cdrv
1665 __ccwdev_check_busid(struct device *dev, void *id)
1671 return (strcmp(bus_id, dev_name(dev)) == 0);
1676 * get_ccwdev_by_busid() - obtain device from a bus id
1677 * @cdrv: driver the device is owned by
1678 * @bus_id: bus id of the device to be searched
1680 * This function searches all devices owned by @cdrv for a device with a bus
1681 * id matching @bus_id.
1683 * If a match is found, its reference count of the found device is increased
1684 * and it is returned; else %NULL is returned.
1686 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1691 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
1692 __ccwdev_check_busid);
1694 return dev ? to_ccwdev(dev) : NULL;
1697 /************************** device driver handling ************************/
1699 /* This is the implementation of the ccw_driver class. The probe, remove
1700 * and release methods are initially very similar to the device_driver
1701 * implementations, with the difference that they have ccw_device
1704 * A ccw driver also contains the information that is needed for
1708 ccw_device_probe (struct device *dev)
1710 struct ccw_device *cdev = to_ccwdev(dev);
1711 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1714 cdev->drv = cdrv; /* to let the driver call _set_online */
1715 ccw_device_set_int_class(cdev);
1716 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1719 cdev->private->int_class = IRQIO_CIO;
1726 static int ccw_device_remove(struct device *dev)
1728 struct ccw_device *cdev = to_ccwdev(dev);
1729 struct ccw_driver *cdrv = cdev->drv;
1735 spin_lock_irq(cdev->ccwlock);
1738 ret = ccw_device_offline(cdev);
1739 spin_unlock_irq(cdev->ccwlock);
1741 wait_event(cdev->private->wait_q,
1742 dev_fsm_final_state(cdev));
1744 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1745 "device 0.%x.%04x\n",
1746 ret, cdev->private->dev_id.ssid,
1747 cdev->private->dev_id.devno);
1748 /* Give up reference obtained in ccw_device_set_online(). */
1749 put_device(&cdev->dev);
1750 spin_lock_irq(cdev->ccwlock);
1752 ccw_device_set_timeout(cdev, 0);
1754 cdev->private->int_class = IRQIO_CIO;
1755 spin_unlock_irq(cdev->ccwlock);
1756 __disable_cmf(cdev);
1761 static void ccw_device_shutdown(struct device *dev)
1763 struct ccw_device *cdev;
1765 cdev = to_ccwdev(dev);
1766 if (cdev->drv && cdev->drv->shutdown)
1767 cdev->drv->shutdown(cdev);
1768 __disable_cmf(cdev);
1771 static int ccw_device_pm_prepare(struct device *dev)
1773 struct ccw_device *cdev = to_ccwdev(dev);
1775 if (work_pending(&cdev->private->todo_work))
1777 /* Fail while device is being set online/offline. */
1778 if (atomic_read(&cdev->private->onoff))
1781 if (cdev->online && cdev->drv && cdev->drv->prepare)
1782 return cdev->drv->prepare(cdev);
1787 static void ccw_device_pm_complete(struct device *dev)
1789 struct ccw_device *cdev = to_ccwdev(dev);
1791 if (cdev->online && cdev->drv && cdev->drv->complete)
1792 cdev->drv->complete(cdev);
1795 static int ccw_device_pm_freeze(struct device *dev)
1797 struct ccw_device *cdev = to_ccwdev(dev);
1798 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1799 int ret, cm_enabled;
1801 /* Fail suspend while device is in transistional state. */
1802 if (!dev_fsm_final_state(cdev))
1806 if (cdev->drv && cdev->drv->freeze) {
1807 ret = cdev->drv->freeze(cdev);
1812 spin_lock_irq(sch->lock);
1813 cm_enabled = cdev->private->cmb != NULL;
1814 spin_unlock_irq(sch->lock);
1816 /* Don't have the css write on memory. */
1817 ret = ccw_set_cmf(cdev, 0);
1821 /* From here on, disallow device driver I/O. */
1822 spin_lock_irq(sch->lock);
1823 ret = cio_disable_subchannel(sch);
1824 spin_unlock_irq(sch->lock);
1829 static int ccw_device_pm_thaw(struct device *dev)
1831 struct ccw_device *cdev = to_ccwdev(dev);
1832 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1833 int ret, cm_enabled;
1838 spin_lock_irq(sch->lock);
1839 /* Allow device driver I/O again. */
1840 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1841 cm_enabled = cdev->private->cmb != NULL;
1842 spin_unlock_irq(sch->lock);
1847 ret = ccw_set_cmf(cdev, 1);
1852 if (cdev->drv && cdev->drv->thaw)
1853 ret = cdev->drv->thaw(cdev);
1858 static void __ccw_device_pm_restore(struct ccw_device *cdev)
1860 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1862 spin_lock_irq(sch->lock);
1863 if (cio_is_console(sch->schid)) {
1864 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1868 * While we were sleeping, devices may have gone or become
1869 * available again. Kick re-detection.
1871 cdev->private->flags.resuming = 1;
1872 cdev->private->path_new_mask = LPM_ANYPATH;
1873 css_sched_sch_todo(sch, SCH_TODO_EVAL);
1874 spin_unlock_irq(sch->lock);
1875 css_wait_for_slow_path();
1877 /* cdev may have been moved to a different subchannel. */
1878 sch = to_subchannel(cdev->dev.parent);
1879 spin_lock_irq(sch->lock);
1880 if (cdev->private->state != DEV_STATE_ONLINE &&
1881 cdev->private->state != DEV_STATE_OFFLINE)
1884 ccw_device_recognition(cdev);
1885 spin_unlock_irq(sch->lock);
1886 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1887 cdev->private->state == DEV_STATE_DISCONNECTED);
1888 spin_lock_irq(sch->lock);
1891 cdev->private->flags.resuming = 0;
1892 spin_unlock_irq(sch->lock);
1895 static int resume_handle_boxed(struct ccw_device *cdev)
1897 cdev->private->state = DEV_STATE_BOXED;
1898 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1900 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1904 static int resume_handle_disc(struct ccw_device *cdev)
1906 cdev->private->state = DEV_STATE_DISCONNECTED;
1907 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1909 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1913 static int ccw_device_pm_restore(struct device *dev)
1915 struct ccw_device *cdev = to_ccwdev(dev);
1916 struct subchannel *sch;
1919 __ccw_device_pm_restore(cdev);
1920 sch = to_subchannel(cdev->dev.parent);
1921 spin_lock_irq(sch->lock);
1922 if (cio_is_console(sch->schid))
1925 /* check recognition results */
1926 switch (cdev->private->state) {
1927 case DEV_STATE_OFFLINE:
1928 case DEV_STATE_ONLINE:
1929 cdev->private->flags.donotify = 0;
1931 case DEV_STATE_BOXED:
1932 ret = resume_handle_boxed(cdev);
1937 ret = resume_handle_disc(cdev);
1942 /* check if the device type has changed */
1943 if (!ccw_device_test_sense_data(cdev)) {
1944 ccw_device_update_sense_data(cdev);
1945 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1952 if (ccw_device_online(cdev)) {
1953 ret = resume_handle_disc(cdev);
1958 spin_unlock_irq(sch->lock);
1959 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1960 spin_lock_irq(sch->lock);
1962 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1963 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1968 /* reenable cmf, if needed */
1969 if (cdev->private->cmb) {
1970 spin_unlock_irq(sch->lock);
1971 ret = ccw_set_cmf(cdev, 1);
1972 spin_lock_irq(sch->lock);
1974 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1975 "(rc=%d)\n", cdev->private->dev_id.ssid,
1976 cdev->private->dev_id.devno, ret);
1982 spin_unlock_irq(sch->lock);
1983 if (cdev->online && cdev->drv && cdev->drv->restore)
1984 ret = cdev->drv->restore(cdev);
1988 spin_unlock_irq(sch->lock);
1992 static const struct dev_pm_ops ccw_pm_ops = {
1993 .prepare = ccw_device_pm_prepare,
1994 .complete = ccw_device_pm_complete,
1995 .freeze = ccw_device_pm_freeze,
1996 .thaw = ccw_device_pm_thaw,
1997 .restore = ccw_device_pm_restore,
2000 static struct bus_type ccw_bus_type = {
2002 .match = ccw_bus_match,
2003 .uevent = ccw_uevent,
2004 .probe = ccw_device_probe,
2005 .remove = ccw_device_remove,
2006 .shutdown = ccw_device_shutdown,
2011 * ccw_driver_register() - register a ccw driver
2012 * @cdriver: driver to be registered
2014 * This function is mainly a wrapper around driver_register().
2016 * %0 on success and a negative error value on failure.
2018 int ccw_driver_register(struct ccw_driver *cdriver)
2020 struct device_driver *drv = &cdriver->driver;
2022 drv->bus = &ccw_bus_type;
2024 return driver_register(drv);
2028 * ccw_driver_unregister() - deregister a ccw driver
2029 * @cdriver: driver to be deregistered
2031 * This function is mainly a wrapper around driver_unregister().
2033 void ccw_driver_unregister(struct ccw_driver *cdriver)
2035 driver_unregister(&cdriver->driver);
2038 static void ccw_device_todo(struct work_struct *work)
2040 struct ccw_device_private *priv;
2041 struct ccw_device *cdev;
2042 struct subchannel *sch;
2043 enum cdev_todo todo;
2045 priv = container_of(work, struct ccw_device_private, todo_work);
2047 sch = to_subchannel(cdev->dev.parent);
2048 /* Find out todo. */
2049 spin_lock_irq(cdev->ccwlock);
2051 priv->todo = CDEV_TODO_NOTHING;
2052 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2053 priv->dev_id.ssid, priv->dev_id.devno, todo);
2054 spin_unlock_irq(cdev->ccwlock);
2057 case CDEV_TODO_ENABLE_CMF:
2060 case CDEV_TODO_REBIND:
2061 ccw_device_do_unbind_bind(cdev);
2063 case CDEV_TODO_REGISTER:
2064 io_subchannel_register(cdev);
2066 case CDEV_TODO_UNREG_EVAL:
2067 if (!sch_is_pseudo_sch(sch))
2068 css_schedule_eval(sch->schid);
2070 case CDEV_TODO_UNREG:
2071 if (sch_is_pseudo_sch(sch))
2072 ccw_device_unregister(cdev);
2074 ccw_device_call_sch_unregister(cdev);
2079 /* Release workqueue ref. */
2080 put_device(&cdev->dev);
2084 * ccw_device_sched_todo - schedule ccw device operation
2088 * Schedule the operation identified by @todo to be performed on the slow path
2089 * workqueue. Do nothing if another operation with higher priority is already
2090 * scheduled. Needs to be called with ccwdev lock held.
2092 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2094 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2095 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2097 if (cdev->private->todo >= todo)
2099 cdev->private->todo = todo;
2100 /* Get workqueue ref. */
2101 if (!get_device(&cdev->dev))
2103 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2104 /* Already queued, release workqueue ref. */
2105 put_device(&cdev->dev);
2110 * ccw_device_siosl() - initiate logging
2113 * This function is used to invoke model-dependent logging within the channel
2116 int ccw_device_siosl(struct ccw_device *cdev)
2118 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2120 return chsc_siosl(sch->schid);
2122 EXPORT_SYMBOL_GPL(ccw_device_siosl);
2124 EXPORT_SYMBOL(ccw_device_set_online);
2125 EXPORT_SYMBOL(ccw_device_set_offline);
2126 EXPORT_SYMBOL(ccw_driver_register);
2127 EXPORT_SYMBOL(ccw_driver_unregister);
2128 EXPORT_SYMBOL(get_ccwdev_by_busid);