1 /* sunvdc.c: Sun LDOM Virtual Disk Client.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/blkdev.h>
10 #include <linux/hdreg.h>
11 #include <linux/genhd.h>
12 #include <linux/cdrom.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/completion.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/scatterlist.h>
24 #define DRV_MODULE_NAME "sunvdc"
25 #define PFX DRV_MODULE_NAME ": "
26 #define DRV_MODULE_VERSION "1.2"
27 #define DRV_MODULE_RELDATE "November 24, 2014"
29 static char version[] =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
31 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
32 MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION(DRV_MODULE_VERSION);
36 #define VDC_TX_RING_SIZE 512
38 #define WAITING_FOR_LINK_UP 0x01
39 #define WAITING_FOR_TX_SPACE 0x02
40 #define WAITING_FOR_GEN_CMD 0x04
41 #define WAITING_FOR_ANY -1
43 #define VDC_MAX_RETRIES 10
45 static struct workqueue_struct *sunvdc_wq;
47 struct vdc_req_entry {
52 struct vio_driver_state vio;
56 struct vdc_completion *cmp;
60 struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
62 unsigned long ring_cookies;
68 struct timer_list ldc_reset_timer;
69 struct work_struct ldc_reset_work;
71 /* The server fills these in for us in the disk attribute
82 static void vdc_ldc_reset(struct vdc_port *port);
83 static void vdc_ldc_reset_work(struct work_struct *work);
84 static void vdc_ldc_reset_timer(unsigned long _arg);
86 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
88 return container_of(vio, struct vdc_port, vio);
91 /* Ordered from largest major to lowest */
92 static struct vio_version vdc_versions[] = {
93 { .major = 1, .minor = 1 },
94 { .major = 1, .minor = 0 },
97 static inline int vdc_version_supported(struct vdc_port *port,
100 return port->vio.ver.major == major && port->vio.ver.minor >= minor;
103 #define VDCBLK_NAME "vdisk"
104 static int vdc_major;
105 #define PARTITION_SHIFT 3
107 static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
109 return vio_dring_avail(dr, VDC_TX_RING_SIZE);
112 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
114 struct gendisk *disk = bdev->bd_disk;
115 sector_t nsect = get_capacity(disk);
116 sector_t cylinders = nsect;
120 sector_div(cylinders, geo->heads * geo->sectors);
121 geo->cylinders = cylinders;
122 if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
123 geo->cylinders = 0xffff;
128 /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
129 * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
130 * Needed to be able to install inside an ldom from an iso image.
132 static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
133 unsigned command, unsigned long argument)
136 struct gendisk *disk;
139 case CDROMMULTISESSION:
140 pr_debug(PFX "Multisession CDs not supported\n");
141 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
142 if (put_user(0, (char __user *)(argument + i)))
146 case CDROM_GET_CAPABILITY:
147 disk = bdev->bd_disk;
149 if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
154 pr_debug(PFX "ioctl %08x not supported\n", command);
159 static const struct block_device_operations vdc_fops = {
160 .owner = THIS_MODULE,
161 .getgeo = vdc_getgeo,
165 static void vdc_blk_queue_start(struct vdc_port *port)
167 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
169 /* restart blk queue when ring is half emptied. also called after
170 * handshake completes, so check for initial handshake before we've
173 if (port->disk && blk_queue_stopped(port->disk->queue) &&
174 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
175 blk_start_queue(port->disk->queue);
180 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
183 (waiting_for == -1 ||
184 vio->cmp->waiting_for == waiting_for)) {
186 complete(&vio->cmp->com);
191 static void vdc_handshake_complete(struct vio_driver_state *vio)
193 struct vdc_port *port = to_vdc_port(vio);
195 del_timer(&port->ldc_reset_timer);
196 vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
197 vdc_blk_queue_start(port);
200 static int vdc_handle_unknown(struct vdc_port *port, void *arg)
202 struct vio_msg_tag *pkt = arg;
204 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
205 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
206 printk(KERN_ERR PFX "Resetting connection.\n");
208 ldc_disconnect(port->vio.lp);
213 static int vdc_send_attr(struct vio_driver_state *vio)
215 struct vdc_port *port = to_vdc_port(vio);
216 struct vio_disk_attr_info pkt;
218 memset(&pkt, 0, sizeof(pkt));
220 pkt.tag.type = VIO_TYPE_CTRL;
221 pkt.tag.stype = VIO_SUBTYPE_INFO;
222 pkt.tag.stype_env = VIO_ATTR_INFO;
223 pkt.tag.sid = vio_send_sid(vio);
225 pkt.xfer_mode = VIO_DRING_MODE;
226 pkt.vdisk_block_size = port->vdisk_block_size;
227 pkt.max_xfer_size = port->max_xfer_size;
229 viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
230 pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
232 return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
235 static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
237 struct vdc_port *port = to_vdc_port(vio);
238 struct vio_disk_attr_info *pkt = arg;
240 viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
241 "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
242 pkt->tag.stype, pkt->operations,
243 pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
244 pkt->xfer_mode, pkt->vdisk_block_size,
247 if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
248 switch (pkt->vdisk_type) {
249 case VD_DISK_TYPE_DISK:
250 case VD_DISK_TYPE_SLICE:
254 printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
255 vio->name, pkt->vdisk_type);
259 if (pkt->vdisk_block_size > port->vdisk_block_size) {
260 printk(KERN_ERR PFX "%s: BLOCK size increased "
263 port->vdisk_block_size, pkt->vdisk_block_size);
267 port->operations = pkt->operations;
268 port->vdisk_type = pkt->vdisk_type;
269 if (vdc_version_supported(port, 1, 1)) {
270 port->vdisk_size = pkt->vdisk_size;
271 port->vdisk_mtype = pkt->vdisk_mtype;
273 if (pkt->max_xfer_size < port->max_xfer_size)
274 port->max_xfer_size = pkt->max_xfer_size;
275 port->vdisk_block_size = pkt->vdisk_block_size;
278 printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
284 static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
286 int err = desc->status;
288 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
291 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
294 struct vio_disk_desc *desc = vio_dring_entry(dr, index);
295 struct vdc_req_entry *rqe = &port->rq_arr[index];
298 if (unlikely(desc->hdr.state != VIO_DESC_DONE))
301 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
302 desc->hdr.state = VIO_DESC_FREE;
303 dr->cons = vio_dring_next(dr, index);
307 vdc_end_special(port, desc);
313 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
315 vdc_blk_queue_start(port);
318 static int vdc_ack(struct vdc_port *port, void *msgbuf)
320 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
321 struct vio_dring_data *pkt = msgbuf;
323 if (unlikely(pkt->dring_ident != dr->ident ||
324 pkt->start_idx != pkt->end_idx ||
325 pkt->start_idx >= VDC_TX_RING_SIZE))
328 vdc_end_one(port, dr, pkt->start_idx);
333 static int vdc_nack(struct vdc_port *port, void *msgbuf)
335 /* XXX Implement me XXX */
339 static void vdc_event(void *arg, int event)
341 struct vdc_port *port = arg;
342 struct vio_driver_state *vio = &port->vio;
346 spin_lock_irqsave(&vio->lock, flags);
348 if (unlikely(event == LDC_EVENT_RESET)) {
349 vio_link_state_change(vio, event);
350 queue_work(sunvdc_wq, &port->ldc_reset_work);
354 if (unlikely(event == LDC_EVENT_UP)) {
355 vio_link_state_change(vio, event);
359 if (unlikely(event != LDC_EVENT_DATA_READY)) {
360 pr_warn(PFX "Unexpected LDC event %d\n", event);
367 struct vio_msg_tag tag;
371 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
372 if (unlikely(err < 0)) {
373 if (err == -ECONNRESET)
379 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
382 msgbuf.tag.stype_env,
384 err = vio_validate_sid(vio, &msgbuf.tag);
388 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
389 if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
390 err = vdc_ack(port, &msgbuf);
391 else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
392 err = vdc_nack(port, &msgbuf);
394 err = vdc_handle_unknown(port, &msgbuf);
395 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
396 err = vio_control_pkt_engine(vio, &msgbuf);
398 err = vdc_handle_unknown(port, &msgbuf);
404 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
406 spin_unlock_irqrestore(&vio->lock, flags);
409 static int __vdc_tx_trigger(struct vdc_port *port)
411 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
412 struct vio_dring_data hdr = {
414 .type = VIO_TYPE_DATA,
415 .stype = VIO_SUBTYPE_INFO,
416 .stype_env = VIO_DRING_DATA,
417 .sid = vio_send_sid(&port->vio),
419 .dring_ident = dr->ident,
420 .start_idx = dr->prod,
426 hdr.seq = dr->snd_nxt;
429 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
435 if ((delay <<= 1) > 128)
437 if (retries++ > VDC_MAX_RETRIES)
439 } while (err == -EAGAIN);
441 if (err == -ENOTCONN)
446 static int __send_request(struct request *req)
448 struct vdc_port *port = req->rq_disk->private_data;
449 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
450 struct scatterlist sg[port->ring_cookies];
451 struct vdc_req_entry *rqe;
452 struct vio_disk_desc *desc;
453 unsigned int map_perm;
458 map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
460 if (rq_data_dir(req) == READ) {
461 map_perm |= LDC_MAP_W;
464 map_perm |= LDC_MAP_R;
468 sg_init_table(sg, port->ring_cookies);
469 nsg = blk_rq_map_sg(req->q, req, sg);
472 for (i = 0; i < nsg; i++)
475 desc = vio_dring_cur(dr);
477 err = ldc_map_sg(port->vio.lp, sg, nsg,
478 desc->cookies, port->ring_cookies,
481 printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
485 rqe = &port->rq_arr[dr->prod];
488 desc->hdr.ack = VIO_ACK_ENABLE;
489 desc->req_id = port->req_id;
490 desc->operation = op;
491 if (port->vdisk_type == VD_DISK_TYPE_DISK) {
497 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
499 desc->ncookies = err;
501 /* This has to be a non-SMP write barrier because we are writing
502 * to memory which is shared with the peer LDOM.
505 desc->hdr.state = VIO_DESC_READY;
507 err = __vdc_tx_trigger(port);
509 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
512 dr->prod = vio_dring_next(dr, dr->prod);
518 static void do_vdc_request(struct request_queue *rq)
522 while ((req = blk_peek_request(rq)) != NULL) {
523 struct vdc_port *port;
524 struct vio_dring_state *dr;
526 port = req->rq_disk->private_data;
527 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
528 if (unlikely(vdc_tx_dring_avail(dr) < 1))
531 blk_start_request(req);
533 if (__send_request(req) < 0) {
534 blk_requeue_request(rq, req);
536 /* Avoid pointless unplugs. */
543 static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
545 struct vio_dring_state *dr;
546 struct vio_completion comp;
547 struct vio_disk_desc *desc;
548 unsigned int map_perm;
553 if (!(((u64)1 << (u64)op) & port->operations))
568 op_len = sizeof(u32);
569 map_perm = LDC_MAP_W;
573 op_len = sizeof(u32);
574 map_perm = LDC_MAP_R;
578 op_len = sizeof(struct vio_disk_vtoc);
579 map_perm = LDC_MAP_W;
583 op_len = sizeof(struct vio_disk_vtoc);
584 map_perm = LDC_MAP_R;
587 case VD_OP_GET_DISKGEOM:
588 op_len = sizeof(struct vio_disk_geom);
589 map_perm = LDC_MAP_W;
592 case VD_OP_SET_DISKGEOM:
593 op_len = sizeof(struct vio_disk_geom);
594 map_perm = LDC_MAP_R;
599 map_perm = LDC_MAP_RW;
602 case VD_OP_GET_DEVID:
603 op_len = sizeof(struct vio_disk_devid);
604 map_perm = LDC_MAP_W;
613 map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
615 op_len = (op_len + 7) & ~7;
616 req_buf = kzalloc(op_len, GFP_KERNEL);
623 if (map_perm & LDC_MAP_R)
624 memcpy(req_buf, buf, len);
626 spin_lock_irqsave(&port->vio.lock, flags);
628 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
630 /* XXX If we want to use this code generically we have to
631 * XXX handle TX ring exhaustion etc.
633 desc = vio_dring_cur(dr);
635 err = ldc_map_single(port->vio.lp, req_buf, op_len,
636 desc->cookies, port->ring_cookies,
639 spin_unlock_irqrestore(&port->vio.lock, flags);
644 init_completion(&comp.com);
645 comp.waiting_for = WAITING_FOR_GEN_CMD;
646 port->vio.cmp = ∁
648 desc->hdr.ack = VIO_ACK_ENABLE;
649 desc->req_id = port->req_id;
650 desc->operation = op;
655 desc->ncookies = err;
657 /* This has to be a non-SMP write barrier because we are writing
658 * to memory which is shared with the peer LDOM.
661 desc->hdr.state = VIO_DESC_READY;
663 err = __vdc_tx_trigger(port);
666 dr->prod = vio_dring_next(dr, dr->prod);
667 spin_unlock_irqrestore(&port->vio.lock, flags);
669 wait_for_completion(&comp.com);
672 port->vio.cmp = NULL;
673 spin_unlock_irqrestore(&port->vio.lock, flags);
676 if (map_perm & LDC_MAP_W)
677 memcpy(buf, req_buf, len);
684 static int vdc_alloc_tx_ring(struct vdc_port *port)
686 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
687 unsigned long len, entry_size;
691 entry_size = sizeof(struct vio_disk_desc) +
692 (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
693 len = (VDC_TX_RING_SIZE * entry_size);
695 ncookies = VIO_MAX_RING_COOKIES;
696 dring = ldc_alloc_exp_dring(port->vio.lp, len,
697 dr->cookies, &ncookies,
702 return PTR_ERR(dring);
705 dr->entry_size = entry_size;
706 dr->num_entries = VDC_TX_RING_SIZE;
707 dr->prod = dr->cons = 0;
708 dr->pending = VDC_TX_RING_SIZE;
709 dr->ncookies = ncookies;
714 static void vdc_free_tx_ring(struct vdc_port *port)
716 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
719 ldc_free_exp_dring(port->vio.lp, dr->base,
720 (dr->entry_size * dr->num_entries),
721 dr->cookies, dr->ncookies);
730 static int vdc_port_up(struct vdc_port *port)
732 struct vio_completion comp;
734 init_completion(&comp.com);
736 comp.waiting_for = WAITING_FOR_LINK_UP;
737 port->vio.cmp = ∁
739 vio_port_up(&port->vio);
740 wait_for_completion(&comp.com);
744 static void vdc_port_down(struct vdc_port *port)
746 ldc_disconnect(port->vio.lp);
747 ldc_unbind(port->vio.lp);
748 vdc_free_tx_ring(port);
749 vio_ldc_free(&port->vio);
752 static int probe_disk(struct vdc_port *port)
754 struct request_queue *q;
758 err = vdc_port_up(port);
762 if (vdc_version_supported(port, 1, 1)) {
763 /* vdisk_size should be set during the handshake, if it wasn't
764 * then the underlying disk is reserved by another system
766 if (port->vdisk_size == -1)
769 struct vio_disk_geom geom;
771 err = generic_request(port, VD_OP_GET_DISKGEOM,
772 &geom, sizeof(geom));
774 printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
778 port->vdisk_size = ((u64)geom.num_cyl *
783 q = blk_init_queue(do_vdc_request, &port->vio.lock);
785 printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
789 g = alloc_disk(1 << PARTITION_SHIFT);
791 printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
793 blk_cleanup_queue(q);
799 /* Each segment in a request is up to an aligned page in size. */
800 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
801 blk_queue_max_segment_size(q, PAGE_SIZE);
803 blk_queue_max_segments(q, port->ring_cookies);
804 blk_queue_max_hw_sectors(q, port->max_xfer_size);
805 g->major = vdc_major;
806 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
807 strcpy(g->disk_name, port->disk_name);
811 g->private_data = port;
813 set_capacity(g, port->vdisk_size);
815 if (vdc_version_supported(port, 1, 1)) {
816 switch (port->vdisk_mtype) {
817 case VD_MEDIA_TYPE_CD:
818 pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
819 g->flags |= GENHD_FL_CD;
820 g->flags |= GENHD_FL_REMOVABLE;
824 case VD_MEDIA_TYPE_DVD:
825 pr_info(PFX "Virtual DVD %s\n", port->disk_name);
826 g->flags |= GENHD_FL_CD;
827 g->flags |= GENHD_FL_REMOVABLE;
831 case VD_MEDIA_TYPE_FIXED:
832 pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
837 pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
839 port->vdisk_size, (port->vdisk_size >> (20 - 9)),
840 port->vio.ver.major, port->vio.ver.minor);
842 device_add_disk(&port->vio.vdev->dev, g);
847 static struct ldc_channel_config vdc_ldc_cfg = {
850 .mode = LDC_MODE_UNRELIABLE,
853 static struct vio_driver_ops vdc_vio_ops = {
854 .send_attr = vdc_send_attr,
855 .handle_attr = vdc_handle_attr,
856 .handshake_complete = vdc_handshake_complete,
859 static void print_version(void)
861 static int version_printed;
863 if (version_printed++ == 0)
864 printk(KERN_INFO "%s", version);
867 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
869 struct mdesc_handle *hp;
870 struct vdc_port *port;
872 const u64 *ldc_timeout;
879 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
880 printk(KERN_ERR PFX "Port id [%llu] too large.\n",
882 goto err_out_release_mdesc;
885 port = kzalloc(sizeof(*port), GFP_KERNEL);
888 printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
889 goto err_out_release_mdesc;
892 if (vdev->dev_no >= 26)
893 snprintf(port->disk_name, sizeof(port->disk_name),
895 'a' + ((int)vdev->dev_no / 26) - 1,
896 'a' + ((int)vdev->dev_no % 26));
898 snprintf(port->disk_name, sizeof(port->disk_name),
899 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
900 port->vdisk_size = -1;
902 /* Actual wall time may be double due to do_generic_file_read() doing
903 * a readahead I/O first, and once that fails it will try to read a
906 ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
907 port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
908 setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
909 (unsigned long)port);
910 INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
912 err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
913 vdc_versions, ARRAY_SIZE(vdc_versions),
914 &vdc_vio_ops, port->disk_name);
916 goto err_out_free_port;
918 port->vdisk_block_size = 512;
919 port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
920 port->ring_cookies = ((port->max_xfer_size *
921 port->vdisk_block_size) / PAGE_SIZE) + 2;
923 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
925 goto err_out_free_port;
927 err = vdc_alloc_tx_ring(port);
929 goto err_out_free_ldc;
931 err = probe_disk(port);
933 goto err_out_free_tx_ring;
935 dev_set_drvdata(&vdev->dev, port);
941 err_out_free_tx_ring:
942 vdc_free_tx_ring(port);
945 vio_ldc_free(&port->vio);
950 err_out_release_mdesc:
955 static int vdc_port_remove(struct vio_dev *vdev)
957 struct vdc_port *port = dev_get_drvdata(&vdev->dev);
962 spin_lock_irqsave(&port->vio.lock, flags);
963 blk_stop_queue(port->disk->queue);
964 spin_unlock_irqrestore(&port->vio.lock, flags);
966 flush_work(&port->ldc_reset_work);
967 del_timer_sync(&port->ldc_reset_timer);
968 del_timer_sync(&port->vio.timer);
970 del_gendisk(port->disk);
971 blk_cleanup_queue(port->disk->queue);
972 put_disk(port->disk);
975 vdc_free_tx_ring(port);
976 vio_ldc_free(&port->vio);
978 dev_set_drvdata(&vdev->dev, NULL);
985 static void vdc_requeue_inflight(struct vdc_port *port)
987 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
990 for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
991 struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
992 struct vdc_req_entry *rqe = &port->rq_arr[idx];
995 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
996 desc->hdr.state = VIO_DESC_FREE;
997 dr->cons = vio_dring_next(dr, idx);
1001 vdc_end_special(port, desc);
1006 blk_requeue_request(port->disk->queue, req);
1010 static void vdc_queue_drain(struct vdc_port *port)
1012 struct request *req;
1014 while ((req = blk_fetch_request(port->disk->queue)) != NULL)
1015 __blk_end_request_all(req, -EIO);
1018 static void vdc_ldc_reset_timer(unsigned long _arg)
1020 struct vdc_port *port = (struct vdc_port *) _arg;
1021 struct vio_driver_state *vio = &port->vio;
1022 unsigned long flags;
1024 spin_lock_irqsave(&vio->lock, flags);
1025 if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1026 pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1027 port->disk_name, port->ldc_timeout);
1028 vdc_queue_drain(port);
1029 vdc_blk_queue_start(port);
1031 spin_unlock_irqrestore(&vio->lock, flags);
1034 static void vdc_ldc_reset_work(struct work_struct *work)
1036 struct vdc_port *port;
1037 struct vio_driver_state *vio;
1038 unsigned long flags;
1040 port = container_of(work, struct vdc_port, ldc_reset_work);
1043 spin_lock_irqsave(&vio->lock, flags);
1044 vdc_ldc_reset(port);
1045 spin_unlock_irqrestore(&vio->lock, flags);
1048 static void vdc_ldc_reset(struct vdc_port *port)
1052 assert_spin_locked(&port->vio.lock);
1054 pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1055 blk_stop_queue(port->disk->queue);
1056 vdc_requeue_inflight(port);
1057 vdc_port_down(port);
1059 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1061 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1065 err = vdc_alloc_tx_ring(port);
1067 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1071 if (port->ldc_timeout)
1072 mod_timer(&port->ldc_reset_timer,
1073 round_jiffies(jiffies + HZ * port->ldc_timeout));
1074 mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1078 vio_ldc_free(&port->vio);
1081 static const struct vio_device_id vdc_port_match[] = {
1087 MODULE_DEVICE_TABLE(vio, vdc_port_match);
1089 static struct vio_driver vdc_port_driver = {
1090 .id_table = vdc_port_match,
1091 .probe = vdc_port_probe,
1092 .remove = vdc_port_remove,
1096 static int __init vdc_init(void)
1100 sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1104 err = register_blkdev(0, VDCBLK_NAME);
1110 err = vio_register_driver(&vdc_port_driver);
1112 goto out_unregister_blkdev;
1116 out_unregister_blkdev:
1117 unregister_blkdev(vdc_major, VDCBLK_NAME);
1121 destroy_workqueue(sunvdc_wq);
1125 static void __exit vdc_exit(void)
1127 vio_unregister_driver(&vdc_port_driver);
1128 unregister_blkdev(vdc_major, VDCBLK_NAME);
1129 destroy_workqueue(sunvdc_wq);
1132 module_init(vdc_init);
1133 module_exit(vdc_exit);