2 * SCSI RDMA (SRP) transport class
4 * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2 of the
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/delay.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_transport_srp.h>
35 #include "scsi_priv.h"
37 struct srp_host_attrs {
38 atomic_t next_port_id;
40 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42 #define SRP_HOST_ATTRS 0
43 #define SRP_RPORT_ATTRS 8
46 struct scsi_transport_template t;
47 struct srp_function_template *f;
49 struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
51 struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
52 struct transport_container rport_attr_cont;
55 static int scsi_is_srp_rport(const struct device *dev);
57 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
59 #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
60 #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
61 static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
63 return dev_to_shost(r->dev.parent);
66 static int find_child_rport(struct device *dev, void *data)
68 struct device **child = data;
70 if (scsi_is_srp_rport(dev)) {
77 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
79 struct device *child = NULL;
81 WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
82 find_child_rport) < 0);
83 return child ? dev_to_rport(child) : NULL;
87 * srp_tmo_valid() - check timeout combination validity
88 * @reconnect_delay: Reconnect delay in seconds.
89 * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
90 * @dev_loss_tmo: Device loss timeout in seconds.
92 * The combination of the timeout parameters must be such that SCSI commands
93 * are finished in a reasonable time. Hence do not allow the fast I/O fail
94 * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
95 * exceed that limit if failing I/O fast has been disabled. Furthermore, these
96 * parameters must be such that multipath can detect failed paths timely.
97 * Hence do not allow all three parameters to be disabled simultaneously.
99 int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
101 if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
103 if (reconnect_delay == 0)
105 if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
107 if (fast_io_fail_tmo < 0 &&
108 dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
110 if (dev_loss_tmo >= LONG_MAX / HZ)
112 if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
113 fast_io_fail_tmo >= dev_loss_tmo)
117 EXPORT_SYMBOL_GPL(srp_tmo_valid);
119 static int srp_host_setup(struct transport_container *tc, struct device *dev,
122 struct Scsi_Host *shost = dev_to_shost(dev);
123 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
125 atomic_set(&srp_host->next_port_id, 0);
129 static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
132 static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
136 (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
137 (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
138 (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
139 (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
141 #define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
142 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
145 show_srp_rport_id(struct device *dev, struct device_attribute *attr,
148 struct srp_rport *rport = transport_class_to_srp_rport(dev);
149 return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
152 static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
154 static const struct {
157 } srp_rport_role_names[] = {
158 {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
159 {SRP_RPORT_ROLE_TARGET, "SRP Target"},
163 show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
166 struct srp_rport *rport = transport_class_to_srp_rport(dev);
170 for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
171 if (srp_rport_role_names[i].value == rport->roles) {
172 name = srp_rport_role_names[i].name;
175 return sprintf(buf, "%s\n", name ? : "unknown");
178 static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
180 static ssize_t store_srp_rport_delete(struct device *dev,
181 struct device_attribute *attr,
182 const char *buf, size_t count)
184 struct srp_rport *rport = transport_class_to_srp_rport(dev);
185 struct Scsi_Host *shost = dev_to_shost(dev);
186 struct srp_internal *i = to_srp_internal(shost->transportt);
188 if (i->f->rport_delete) {
189 i->f->rport_delete(rport);
196 static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
198 static ssize_t show_srp_rport_state(struct device *dev,
199 struct device_attribute *attr,
202 static const char *const state_name[] = {
203 [SRP_RPORT_RUNNING] = "running",
204 [SRP_RPORT_BLOCKED] = "blocked",
205 [SRP_RPORT_FAIL_FAST] = "fail-fast",
206 [SRP_RPORT_LOST] = "lost",
208 struct srp_rport *rport = transport_class_to_srp_rport(dev);
209 enum srp_rport_state state = rport->state;
211 return sprintf(buf, "%s\n",
212 (unsigned)state < ARRAY_SIZE(state_name) ?
213 state_name[state] : "???");
216 static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
218 static ssize_t srp_show_tmo(char *buf, int tmo)
220 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
223 int srp_parse_tmo(int *tmo, const char *buf)
227 if (strncmp(buf, "off", 3) != 0)
228 res = kstrtoint(buf, 0, tmo);
234 EXPORT_SYMBOL(srp_parse_tmo);
236 static ssize_t show_reconnect_delay(struct device *dev,
237 struct device_attribute *attr, char *buf)
239 struct srp_rport *rport = transport_class_to_srp_rport(dev);
241 return srp_show_tmo(buf, rport->reconnect_delay);
244 static ssize_t store_reconnect_delay(struct device *dev,
245 struct device_attribute *attr,
246 const char *buf, const size_t count)
248 struct srp_rport *rport = transport_class_to_srp_rport(dev);
251 res = srp_parse_tmo(&delay, buf);
254 res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
255 rport->dev_loss_tmo);
259 if (rport->reconnect_delay <= 0 && delay > 0 &&
260 rport->state != SRP_RPORT_RUNNING) {
261 queue_delayed_work(system_long_wq, &rport->reconnect_work,
263 } else if (delay <= 0) {
264 cancel_delayed_work(&rport->reconnect_work);
266 rport->reconnect_delay = delay;
273 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
274 store_reconnect_delay);
276 static ssize_t show_failed_reconnects(struct device *dev,
277 struct device_attribute *attr, char *buf)
279 struct srp_rport *rport = transport_class_to_srp_rport(dev);
281 return sprintf(buf, "%d\n", rport->failed_reconnects);
284 static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
286 static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
287 struct device_attribute *attr,
290 struct srp_rport *rport = transport_class_to_srp_rport(dev);
292 return srp_show_tmo(buf, rport->fast_io_fail_tmo);
295 static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
296 struct device_attribute *attr,
297 const char *buf, size_t count)
299 struct srp_rport *rport = transport_class_to_srp_rport(dev);
301 int fast_io_fail_tmo;
303 res = srp_parse_tmo(&fast_io_fail_tmo, buf);
306 res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
307 rport->dev_loss_tmo);
310 rport->fast_io_fail_tmo = fast_io_fail_tmo;
317 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
318 show_srp_rport_fast_io_fail_tmo,
319 store_srp_rport_fast_io_fail_tmo);
321 static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
322 struct device_attribute *attr,
325 struct srp_rport *rport = transport_class_to_srp_rport(dev);
327 return srp_show_tmo(buf, rport->dev_loss_tmo);
330 static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
331 struct device_attribute *attr,
332 const char *buf, size_t count)
334 struct srp_rport *rport = transport_class_to_srp_rport(dev);
338 res = srp_parse_tmo(&dev_loss_tmo, buf);
341 res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
345 rport->dev_loss_tmo = dev_loss_tmo;
352 static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
353 show_srp_rport_dev_loss_tmo,
354 store_srp_rport_dev_loss_tmo);
356 static int srp_rport_set_state(struct srp_rport *rport,
357 enum srp_rport_state new_state)
359 enum srp_rport_state old_state = rport->state;
361 lockdep_assert_held(&rport->mutex);
364 case SRP_RPORT_RUNNING:
372 case SRP_RPORT_BLOCKED:
374 case SRP_RPORT_RUNNING:
380 case SRP_RPORT_FAIL_FAST:
391 rport->state = new_state;
399 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
400 * @work: Work structure used for scheduling this operation.
402 static void srp_reconnect_work(struct work_struct *work)
404 struct srp_rport *rport = container_of(to_delayed_work(work),
405 struct srp_rport, reconnect_work);
406 struct Scsi_Host *shost = rport_to_shost(rport);
409 res = srp_reconnect_rport(rport);
411 shost_printk(KERN_ERR, shost,
412 "reconnect attempt %d failed (%d)\n",
413 ++rport->failed_reconnects, res);
414 delay = rport->reconnect_delay *
415 min(100, max(1, rport->failed_reconnects - 10));
417 queue_delayed_work(system_long_wq,
418 &rport->reconnect_work, delay * HZ);
423 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
424 * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
426 * To do: add support for scsi-mq in this function.
428 static int scsi_request_fn_active(struct Scsi_Host *shost)
430 struct scsi_device *sdev;
431 struct request_queue *q;
432 int request_fn_active = 0;
434 shost_for_each_device(sdev, shost) {
435 q = sdev->request_queue;
437 spin_lock_irq(q->queue_lock);
438 request_fn_active += q->request_fn_active;
439 spin_unlock_irq(q->queue_lock);
442 return request_fn_active;
445 /* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
446 static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
448 while (scsi_request_fn_active(shost))
452 static void __rport_fail_io_fast(struct srp_rport *rport)
454 struct Scsi_Host *shost = rport_to_shost(rport);
455 struct srp_internal *i;
457 lockdep_assert_held(&rport->mutex);
459 if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
461 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
463 /* Involve the LLD if possible to terminate all I/O on the rport. */
464 i = to_srp_internal(shost->transportt);
465 if (i->f->terminate_rport_io) {
466 srp_wait_for_queuecommand(shost);
467 i->f->terminate_rport_io(rport);
472 * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
473 * @work: Work structure used for scheduling this operation.
475 static void rport_fast_io_fail_timedout(struct work_struct *work)
477 struct srp_rport *rport = container_of(to_delayed_work(work),
478 struct srp_rport, fast_io_fail_work);
479 struct Scsi_Host *shost = rport_to_shost(rport);
481 pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
482 dev_name(&rport->dev), dev_name(&shost->shost_gendev));
484 mutex_lock(&rport->mutex);
485 if (rport->state == SRP_RPORT_BLOCKED)
486 __rport_fail_io_fast(rport);
487 mutex_unlock(&rport->mutex);
491 * rport_dev_loss_timedout() - device loss timeout handler
492 * @work: Work structure used for scheduling this operation.
494 static void rport_dev_loss_timedout(struct work_struct *work)
496 struct srp_rport *rport = container_of(to_delayed_work(work),
497 struct srp_rport, dev_loss_work);
498 struct Scsi_Host *shost = rport_to_shost(rport);
499 struct srp_internal *i = to_srp_internal(shost->transportt);
501 pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
502 dev_name(&rport->dev), dev_name(&shost->shost_gendev));
504 mutex_lock(&rport->mutex);
505 WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
506 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
507 mutex_unlock(&rport->mutex);
509 i->f->rport_delete(rport);
512 static void __srp_start_tl_fail_timers(struct srp_rport *rport)
514 struct Scsi_Host *shost = rport_to_shost(rport);
515 int delay, fast_io_fail_tmo, dev_loss_tmo;
517 lockdep_assert_held(&rport->mutex);
519 delay = rport->reconnect_delay;
520 fast_io_fail_tmo = rport->fast_io_fail_tmo;
521 dev_loss_tmo = rport->dev_loss_tmo;
522 pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
525 if (rport->state == SRP_RPORT_LOST)
528 queue_delayed_work(system_long_wq, &rport->reconnect_work,
530 if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
531 srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
532 pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
534 scsi_target_block(&shost->shost_gendev);
535 if (fast_io_fail_tmo >= 0)
536 queue_delayed_work(system_long_wq,
537 &rport->fast_io_fail_work,
538 1UL * fast_io_fail_tmo * HZ);
539 if (dev_loss_tmo >= 0)
540 queue_delayed_work(system_long_wq,
541 &rport->dev_loss_work,
542 1UL * dev_loss_tmo * HZ);
547 * srp_start_tl_fail_timers() - start the transport layer failure timers
548 * @rport: SRP target port.
550 * Start the transport layer fast I/O failure and device loss timers. Do not
551 * modify a timer that was already started.
553 void srp_start_tl_fail_timers(struct srp_rport *rport)
555 mutex_lock(&rport->mutex);
556 __srp_start_tl_fail_timers(rport);
557 mutex_unlock(&rport->mutex);
559 EXPORT_SYMBOL(srp_start_tl_fail_timers);
562 * srp_reconnect_rport() - reconnect to an SRP target port
563 * @rport: SRP target port.
565 * Blocks SCSI command queueing before invoking reconnect() such that
566 * queuecommand() won't be invoked concurrently with reconnect() from outside
567 * the SCSI EH. This is important since a reconnect() implementation may
568 * reallocate resources needed by queuecommand().
571 * - This function neither waits until outstanding requests have finished nor
572 * tries to abort these. It is the responsibility of the reconnect()
573 * function to finish outstanding commands before reconnecting to the target
575 * - It is the responsibility of the caller to ensure that the resources
576 * reallocated by the reconnect() function won't be used while this function
577 * is in progress. One possible strategy is to invoke this function from
578 * the context of the SCSI EH thread only. Another possible strategy is to
579 * lock the rport mutex inside each SCSI LLD callback that can be invoked by
580 * the SCSI EH (the scsi_host_template.eh_*() functions and also the
581 * scsi_host_template.queuecommand() function).
583 int srp_reconnect_rport(struct srp_rport *rport)
585 struct Scsi_Host *shost = rport_to_shost(rport);
586 struct srp_internal *i = to_srp_internal(shost->transportt);
587 struct scsi_device *sdev;
590 pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
592 res = mutex_lock_interruptible(&rport->mutex);
595 scsi_target_block(&shost->shost_gendev);
596 srp_wait_for_queuecommand(shost);
597 res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
598 pr_debug("%s (state %d): transport.reconnect() returned %d\n",
599 dev_name(&shost->shost_gendev), rport->state, res);
601 cancel_delayed_work(&rport->fast_io_fail_work);
602 cancel_delayed_work(&rport->dev_loss_work);
604 rport->failed_reconnects = 0;
605 srp_rport_set_state(rport, SRP_RPORT_RUNNING);
606 scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
608 * If the SCSI error handler has offlined one or more devices,
609 * invoking scsi_target_unblock() won't change the state of
610 * these devices into running so do that explicitly.
612 spin_lock_irq(shost->host_lock);
613 __shost_for_each_device(sdev, shost)
614 if (sdev->sdev_state == SDEV_OFFLINE)
615 sdev->sdev_state = SDEV_RUNNING;
616 spin_unlock_irq(shost->host_lock);
617 } else if (rport->state == SRP_RPORT_RUNNING) {
619 * srp_reconnect_rport() has been invoked with fast_io_fail
620 * and dev_loss off. Mark the port as failed and start the TL
621 * failure timers if these had not yet been started.
623 __rport_fail_io_fast(rport);
624 scsi_target_unblock(&shost->shost_gendev,
625 SDEV_TRANSPORT_OFFLINE);
626 __srp_start_tl_fail_timers(rport);
627 } else if (rport->state != SRP_RPORT_BLOCKED) {
628 scsi_target_unblock(&shost->shost_gendev,
629 SDEV_TRANSPORT_OFFLINE);
631 mutex_unlock(&rport->mutex);
636 EXPORT_SYMBOL(srp_reconnect_rport);
639 * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
640 * @scmd: SCSI command.
642 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
643 * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
644 * handle the timeout (BLK_EH_NOT_HANDLED).
646 * Note: This function is called from soft-IRQ context and with the request
649 static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
651 struct scsi_device *sdev = scmd->device;
652 struct Scsi_Host *shost = sdev->host;
653 struct srp_internal *i = to_srp_internal(shost->transportt);
654 struct srp_rport *rport = shost_to_rport(shost);
656 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
657 return rport && rport->fast_io_fail_tmo < 0 &&
658 rport->dev_loss_tmo < 0 &&
659 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
660 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
663 static void srp_rport_release(struct device *dev)
665 struct srp_rport *rport = dev_to_rport(dev);
667 put_device(dev->parent);
671 static int scsi_is_srp_rport(const struct device *dev)
673 return dev->release == srp_rport_release;
676 static int srp_rport_match(struct attribute_container *cont,
679 struct Scsi_Host *shost;
680 struct srp_internal *i;
682 if (!scsi_is_srp_rport(dev))
685 shost = dev_to_shost(dev->parent);
686 if (!shost->transportt)
688 if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
691 i = to_srp_internal(shost->transportt);
692 return &i->rport_attr_cont.ac == cont;
695 static int srp_host_match(struct attribute_container *cont, struct device *dev)
697 struct Scsi_Host *shost;
698 struct srp_internal *i;
700 if (!scsi_is_host_device(dev))
703 shost = dev_to_shost(dev);
704 if (!shost->transportt)
706 if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
709 i = to_srp_internal(shost->transportt);
710 return &i->t.host_attrs.ac == cont;
714 * srp_rport_get() - increment rport reference count
715 * @rport: SRP target port.
717 void srp_rport_get(struct srp_rport *rport)
719 get_device(&rport->dev);
721 EXPORT_SYMBOL(srp_rport_get);
724 * srp_rport_put() - decrement rport reference count
725 * @rport: SRP target port.
727 void srp_rport_put(struct srp_rport *rport)
729 put_device(&rport->dev);
731 EXPORT_SYMBOL(srp_rport_put);
734 * srp_rport_add - add a SRP remote port to the device hierarchy
735 * @shost: scsi host the remote port is connected to.
736 * @ids: The port id for the remote port.
738 * Publishes a port to the rest of the system.
740 struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
741 struct srp_rport_identifiers *ids)
743 struct srp_rport *rport;
744 struct device *parent = &shost->shost_gendev;
745 struct srp_internal *i = to_srp_internal(shost->transportt);
748 rport = kzalloc(sizeof(*rport), GFP_KERNEL);
750 return ERR_PTR(-ENOMEM);
752 mutex_init(&rport->mutex);
754 device_initialize(&rport->dev);
756 rport->dev.parent = get_device(parent);
757 rport->dev.release = srp_rport_release;
759 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
760 rport->roles = ids->roles;
763 rport->reconnect_delay = i->f->reconnect_delay ?
764 *i->f->reconnect_delay : 10;
765 INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
766 rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
767 *i->f->fast_io_fail_tmo : 15;
768 rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
769 INIT_DELAYED_WORK(&rport->fast_io_fail_work,
770 rport_fast_io_fail_timedout);
771 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
773 id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
774 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
776 transport_setup_device(&rport->dev);
778 ret = device_add(&rport->dev);
780 transport_destroy_device(&rport->dev);
781 put_device(&rport->dev);
785 transport_add_device(&rport->dev);
786 transport_configure_device(&rport->dev);
790 EXPORT_SYMBOL_GPL(srp_rport_add);
793 * srp_rport_del - remove a SRP remote port
794 * @rport: SRP remote port to remove
796 * Removes the specified SRP remote port.
798 void srp_rport_del(struct srp_rport *rport)
800 struct device *dev = &rport->dev;
802 transport_remove_device(dev);
804 transport_destroy_device(dev);
808 EXPORT_SYMBOL_GPL(srp_rport_del);
810 static int do_srp_rport_del(struct device *dev, void *data)
812 if (scsi_is_srp_rport(dev))
813 srp_rport_del(dev_to_rport(dev));
818 * srp_remove_host - tear down a Scsi_Host's SRP data structures
819 * @shost: Scsi Host that is torn down
821 * Removes all SRP remote ports for a given Scsi_Host.
822 * Must be called just before scsi_remove_host for SRP HBAs.
824 void srp_remove_host(struct Scsi_Host *shost)
826 device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
828 EXPORT_SYMBOL_GPL(srp_remove_host);
831 * srp_stop_rport_timers - stop the transport layer recovery timers
832 * @rport: SRP remote port for which to stop the timers.
834 * Must be called after srp_remove_host() and scsi_remove_host(). The caller
835 * must hold a reference on the rport (rport->dev) and on the SCSI host
836 * (rport->dev.parent).
838 void srp_stop_rport_timers(struct srp_rport *rport)
840 mutex_lock(&rport->mutex);
841 if (rport->state == SRP_RPORT_BLOCKED)
842 __rport_fail_io_fast(rport);
843 srp_rport_set_state(rport, SRP_RPORT_LOST);
844 mutex_unlock(&rport->mutex);
846 cancel_delayed_work_sync(&rport->reconnect_work);
847 cancel_delayed_work_sync(&rport->fast_io_fail_work);
848 cancel_delayed_work_sync(&rport->dev_loss_work);
850 EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
852 static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
855 struct srp_internal *i = to_srp_internal(shost->transportt);
856 return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
859 static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
861 struct srp_internal *i = to_srp_internal(shost->transportt);
862 return i->f->it_nexus_response(shost, nexus, result);
866 * srp_attach_transport - instantiate SRP transport template
867 * @ft: SRP transport class function template
869 struct scsi_transport_template *
870 srp_attach_transport(struct srp_function_template *ft)
873 struct srp_internal *i;
875 i = kzalloc(sizeof(*i), GFP_KERNEL);
879 i->t.eh_timed_out = srp_timed_out;
881 i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
882 i->t.it_nexus_response = srp_it_nexus_response;
884 i->t.host_size = sizeof(struct srp_host_attrs);
885 i->t.host_attrs.ac.attrs = &i->host_attrs[0];
886 i->t.host_attrs.ac.class = &srp_host_class.class;
887 i->t.host_attrs.ac.match = srp_host_match;
888 i->host_attrs[0] = NULL;
889 transport_container_register(&i->t.host_attrs);
891 i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
892 i->rport_attr_cont.ac.class = &srp_rport_class.class;
893 i->rport_attr_cont.ac.match = srp_rport_match;
896 i->rport_attrs[count++] = &dev_attr_port_id;
897 i->rport_attrs[count++] = &dev_attr_roles;
898 if (ft->has_rport_state) {
899 i->rport_attrs[count++] = &dev_attr_state;
900 i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
901 i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
904 i->rport_attrs[count++] = &dev_attr_reconnect_delay;
905 i->rport_attrs[count++] = &dev_attr_failed_reconnects;
907 if (ft->rport_delete)
908 i->rport_attrs[count++] = &dev_attr_delete;
909 i->rport_attrs[count++] = NULL;
910 BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
912 transport_container_register(&i->rport_attr_cont);
918 EXPORT_SYMBOL_GPL(srp_attach_transport);
921 * srp_release_transport - release SRP transport template instance
922 * @t: transport template instance
924 void srp_release_transport(struct scsi_transport_template *t)
926 struct srp_internal *i = to_srp_internal(t);
928 transport_container_unregister(&i->t.host_attrs);
929 transport_container_unregister(&i->rport_attr_cont);
933 EXPORT_SYMBOL_GPL(srp_release_transport);
935 static __init int srp_transport_init(void)
939 ret = transport_class_register(&srp_host_class);
942 ret = transport_class_register(&srp_rport_class);
944 goto unregister_host_class;
947 unregister_host_class:
948 transport_class_unregister(&srp_host_class);
952 static void __exit srp_transport_exit(void)
954 transport_class_unregister(&srp_host_class);
955 transport_class_unregister(&srp_rport_class);
958 MODULE_AUTHOR("FUJITA Tomonori");
959 MODULE_DESCRIPTION("SRP Transport Attributes");
960 MODULE_LICENSE("GPL");
962 module_init(srp_transport_init);
963 module_exit(srp_transport_exit);