1 /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
4 * block device routines
7 #include <linux/kernel.h>
8 #include <linux/hdreg.h>
9 #include <linux/blk-mq.h>
10 #include <linux/backing-dev.h>
12 #include <linux/ioctl.h>
13 #include <linux/slab.h>
14 #include <linux/ratelimit.h>
15 #include <linux/netdevice.h>
16 #include <linux/mutex.h>
17 #include <linux/export.h>
18 #include <linux/moduleparam.h>
19 #include <linux/debugfs.h>
23 static DEFINE_MUTEX(aoeblk_mutex);
24 static struct kmem_cache *buf_pool_cache;
25 static struct dentry *aoe_debugfs_dir;
27 /* GPFS needs a larger value than the default. */
28 static int aoe_maxsectors;
29 module_param(aoe_maxsectors, int, 0644);
30 MODULE_PARM_DESC(aoe_maxsectors,
31 "When nonzero, set the maximum number of sectors per I/O request");
33 static ssize_t aoedisk_show_state(struct device *dev,
34 struct device_attribute *attr, char *page)
36 struct gendisk *disk = dev_to_disk(dev);
37 struct aoedev *d = disk->private_data;
39 return sysfs_emit(page, "%s%s\n",
40 (d->flags & DEVFL_UP) ? "up" : "down",
41 (d->flags & DEVFL_KICKME) ? ",kickme" :
42 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
43 /* I'd rather see nopen exported so we can ditch closewait */
45 static ssize_t aoedisk_show_mac(struct device *dev,
46 struct device_attribute *attr, char *page)
48 struct gendisk *disk = dev_to_disk(dev);
49 struct aoedev *d = disk->private_data;
50 struct aoetgt *t = d->targets[0];
53 return sysfs_emit(page, "none\n");
54 return sysfs_emit(page, "%pm\n", t->addr);
56 static ssize_t aoedisk_show_netif(struct device *dev,
57 struct device_attribute *attr, char *page)
59 struct gendisk *disk = dev_to_disk(dev);
60 struct aoedev *d = disk->private_data;
61 struct net_device *nds[8], **nd, **nnd, **ne;
62 struct aoetgt **t, **te;
63 struct aoeif *ifp, *e;
66 memset(nds, 0, sizeof nds);
68 ne = nd + ARRAY_SIZE(nds);
71 for (; t < te && *t; t++) {
74 for (; ifp < e && ifp->nd; ifp++) {
75 for (nnd = nds; nnd < nd; nnd++)
78 if (nnd == nd && nd != ne)
86 return sysfs_emit(page, "none\n");
87 for (p = page; nd < ne; nd++)
88 p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
89 p == page ? "" : ",", (*nd)->name);
90 p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
93 /* firmware version */
94 static ssize_t aoedisk_show_fwver(struct device *dev,
95 struct device_attribute *attr, char *page)
97 struct gendisk *disk = dev_to_disk(dev);
98 struct aoedev *d = disk->private_data;
100 return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
102 static ssize_t aoedisk_show_payload(struct device *dev,
103 struct device_attribute *attr, char *page)
105 struct gendisk *disk = dev_to_disk(dev);
106 struct aoedev *d = disk->private_data;
108 return sysfs_emit(page, "%lu\n", d->maxbcnt);
111 static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
114 struct aoetgt **t, **te;
115 struct aoeif *ifp, *ife;
120 seq_printf(s, "rttavg: %d rttdev: %d\n",
121 d->rttavg >> RTTSCALE,
122 d->rttdev >> RTTDSCALE);
123 seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
124 seq_printf(s, "kicked: %ld\n", d->kicked);
125 seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
126 seq_printf(s, "ref: %ld\n", d->ref);
128 spin_lock_irqsave(&d->lock, flags);
130 te = t + d->ntargets;
131 for (; t < te && *t; t++) {
133 seq_printf(s, "falloc: %ld\n", (*t)->falloc);
134 seq_printf(s, "ffree: %p\n",
135 list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
136 seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
137 (*t)->maxout, (*t)->nframes);
138 seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
139 seq_printf(s, "\ttaint:%d\n", (*t)->taint);
140 seq_printf(s, "\tr:%d\n", (*t)->rpkts);
141 seq_printf(s, "\tw:%d\n", (*t)->wpkts);
143 ife = ifp + ARRAY_SIZE((*t)->ifs);
144 for (; ifp->nd && ifp < ife; ifp++) {
145 seq_printf(s, "%c%s", c, ifp->nd->name);
150 spin_unlock_irqrestore(&d->lock, flags);
155 static int aoe_debugfs_open(struct inode *inode, struct file *file)
157 return single_open(file, aoedisk_debugfs_show, inode->i_private);
160 static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
161 static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
162 static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL);
163 static struct device_attribute dev_attr_firmware_version = {
164 .attr = { .name = "firmware-version", .mode = 0444 },
165 .show = aoedisk_show_fwver,
167 static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL);
169 static struct attribute *aoe_attrs[] = {
170 &dev_attr_state.attr,
172 &dev_attr_netif.attr,
173 &dev_attr_firmware_version.attr,
174 &dev_attr_payload.attr,
178 static const struct attribute_group aoe_attr_group = {
182 static const struct attribute_group *aoe_attr_groups[] = {
187 static const struct file_operations aoe_debugfs_fops = {
188 .open = aoe_debugfs_open,
191 .release = single_release,
195 aoedisk_add_debugfs(struct aoedev *d)
199 if (aoe_debugfs_dir == NULL)
201 p = strchr(d->gd->disk_name, '/');
203 p = d->gd->disk_name;
207 d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
211 aoedisk_rm_debugfs(struct aoedev *d)
213 debugfs_remove(d->debugfs);
218 aoeblk_open(struct block_device *bdev, fmode_t mode)
220 struct aoedev *d = bdev->bd_disk->private_data;
223 if (!virt_addr_valid(d)) {
224 pr_crit("aoe: invalid device pointer in %s\n",
229 if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
232 mutex_lock(&aoeblk_mutex);
233 spin_lock_irqsave(&d->lock, flags);
234 if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
236 spin_unlock_irqrestore(&d->lock, flags);
237 mutex_unlock(&aoeblk_mutex);
240 spin_unlock_irqrestore(&d->lock, flags);
241 mutex_unlock(&aoeblk_mutex);
246 aoeblk_release(struct gendisk *disk, fmode_t mode)
248 struct aoedev *d = disk->private_data;
251 spin_lock_irqsave(&d->lock, flags);
253 if (--d->nopen == 0) {
254 spin_unlock_irqrestore(&d->lock, flags);
255 aoecmd_cfg(d->aoemajor, d->aoeminor);
258 spin_unlock_irqrestore(&d->lock, flags);
261 static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
262 const struct blk_mq_queue_data *bd)
264 struct aoedev *d = hctx->queue->queuedata;
266 spin_lock_irq(&d->lock);
268 if ((d->flags & DEVFL_UP) == 0) {
269 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
270 d->aoemajor, d->aoeminor);
271 spin_unlock_irq(&d->lock);
272 blk_mq_start_request(bd->rq);
273 return BLK_STS_IOERR;
276 list_add_tail(&bd->rq->queuelist, &d->rq_list);
278 spin_unlock_irq(&d->lock);
283 aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
285 struct aoedev *d = bdev->bd_disk->private_data;
287 if ((d->flags & DEVFL_UP) == 0) {
288 printk(KERN_ERR "aoe: disk not up\n");
292 geo->cylinders = d->geo.cylinders;
293 geo->heads = d->geo.heads;
294 geo->sectors = d->geo.sectors;
299 aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
306 d = bdev->bd_disk->private_data;
307 if ((d->flags & DEVFL_UP) == 0) {
308 pr_err("aoe: disk not up\n");
312 if (cmd == HDIO_GET_IDENTITY) {
313 if (!copy_to_user((void __user *) arg, &d->ident,
319 /* udev calls scsi_id, which uses SG_IO, resulting in noise */
321 pr_info("aoe: unknown ioctl 0x%x\n", cmd);
326 static const struct block_device_operations aoe_bdops = {
328 .release = aoeblk_release,
329 .ioctl = aoeblk_ioctl,
330 .compat_ioctl = blkdev_compat_ptr_ioctl,
331 .getgeo = aoeblk_getgeo,
332 .owner = THIS_MODULE,
335 static const struct blk_mq_ops aoeblk_mq_ops = {
336 .queue_rq = aoeblk_queue_rq,
339 /* blk_mq_alloc_disk and add_disk can sleep */
341 aoeblk_gdalloc(void *vp)
343 struct aoedev *d = vp;
346 struct blk_mq_tag_set *set;
351 spin_lock_irqsave(&d->lock, flags);
352 if (d->flags & DEVFL_GDALLOC
353 && !(d->flags & DEVFL_TKILL)
354 && !(d->flags & DEVFL_GD_NOW))
355 d->flags |= DEVFL_GD_NOW;
358 spin_unlock_irqrestore(&d->lock, flags);
362 mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
365 printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
366 d->aoemajor, d->aoeminor);
371 set->ops = &aoeblk_mq_ops;
372 set->cmd_size = sizeof(struct aoe_req);
373 set->nr_hw_queues = 1;
374 set->queue_depth = 128;
375 set->numa_node = NUMA_NO_NODE;
376 set->flags = BLK_MQ_F_SHOULD_MERGE;
377 err = blk_mq_alloc_tag_set(set);
379 pr_err("aoe: cannot allocate tag set for %ld.%d\n",
380 d->aoemajor, d->aoeminor);
384 gd = blk_mq_alloc_disk(set, d);
386 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
387 d->aoemajor, d->aoeminor);
391 spin_lock_irqsave(&d->lock, flags);
392 WARN_ON(!(d->flags & DEVFL_GD_NOW));
393 WARN_ON(!(d->flags & DEVFL_GDALLOC));
394 WARN_ON(d->flags & DEVFL_TKILL);
396 WARN_ON(d->flags & DEVFL_UP);
397 blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS);
398 blk_queue_io_opt(gd->queue, SZ_2M);
403 blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
404 gd->major = AOE_MAJOR;
405 gd->first_minor = d->sysminor;
406 gd->minors = AOE_PARTITIONS;
407 gd->fops = &aoe_bdops;
408 gd->private_data = d;
409 set_capacity(gd, d->ssize);
410 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
411 d->aoemajor, d->aoeminor);
413 d->flags &= ~DEVFL_GDALLOC;
414 d->flags |= DEVFL_UP;
416 spin_unlock_irqrestore(&d->lock, flags);
418 err = device_add_disk(NULL, gd, aoe_attr_groups);
420 goto out_disk_cleanup;
421 aoedisk_add_debugfs(d);
423 spin_lock_irqsave(&d->lock, flags);
424 WARN_ON(!(d->flags & DEVFL_GD_NOW));
425 d->flags &= ~DEVFL_GD_NOW;
426 spin_unlock_irqrestore(&d->lock, flags);
430 blk_cleanup_disk(gd);
432 blk_mq_free_tag_set(set);
436 spin_lock_irqsave(&d->lock, flags);
437 d->flags &= ~DEVFL_GD_NOW;
438 queue_work(aoe_wq, &d->work);
439 spin_unlock_irqrestore(&d->lock, flags);
445 debugfs_remove_recursive(aoe_debugfs_dir);
446 aoe_debugfs_dir = NULL;
447 kmem_cache_destroy(buf_pool_cache);
453 buf_pool_cache = kmem_cache_create("aoe_bufs",
456 if (buf_pool_cache == NULL)
458 aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);