2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/genhd.h>
18 #include <linux/cdev.h>
19 #include <linux/hash.h>
20 #include <linux/slab.h>
21 #include <linux/uio.h>
22 #include <linux/dax.h>
25 static dev_t dax_devt;
26 DEFINE_STATIC_SRCU(dax_srcu);
27 static struct vfsmount *dax_mnt;
28 static DEFINE_IDA(dax_minor_ida);
29 static struct kmem_cache *dax_cache __read_mostly;
30 static struct super_block *dax_superblock __read_mostly;
32 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
33 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
34 static DEFINE_SPINLOCK(dax_host_lock);
36 int dax_read_lock(void)
38 return srcu_read_lock(&dax_srcu);
40 EXPORT_SYMBOL_GPL(dax_read_lock);
42 void dax_read_unlock(int id)
44 srcu_read_unlock(&dax_srcu, id);
46 EXPORT_SYMBOL_GPL(dax_read_unlock);
49 #include <linux/blkdev.h>
51 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
54 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
57 *pgoff = PHYS_PFN(phys_off);
58 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
62 EXPORT_SYMBOL(bdev_dax_pgoff);
64 #if IS_ENABLED(CONFIG_FS_DAX)
65 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
67 if (!blk_queue_dax(bdev->bd_queue))
69 return fs_dax_get_by_host(bdev->bd_disk->disk_name);
71 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
75 * __bdev_dax_supported() - Check if the device supports dax for filesystem
76 * @bdev: block device to check
77 * @blocksize: The block size of the device
79 * This is a library function for filesystems to check if the block device
80 * can be mounted with dax option.
82 * Return: true if supported, false if unsupported
84 bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
86 struct dax_device *dax_dev;
87 struct request_queue *q;
93 char buf[BDEVNAME_SIZE];
95 if (blocksize != PAGE_SIZE) {
96 pr_debug("%s: error: unsupported blocksize for dax\n",
101 q = bdev_get_queue(bdev);
102 if (!q || !blk_queue_dax(q)) {
103 pr_debug("%s: error: request queue doesn't support dax\n",
104 bdevname(bdev, buf));
108 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
110 pr_debug("%s: error: unaligned partition for dax\n",
111 bdevname(bdev, buf));
115 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
117 pr_debug("%s: error: device does not support dax\n",
118 bdevname(bdev, buf));
122 id = dax_read_lock();
123 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
129 pr_debug("%s: error: dax access failed (%ld)\n",
130 bdevname(bdev, buf), len);
136 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
139 enum dax_device_flags {
140 /* !alive + rcu grace period == no new operations / mappings */
142 /* gate whether dax_flush() calls the low level flush routine */
147 * struct dax_device - anchor object for dax services
149 * @cdev: optional character interface for "device dax"
150 * @host: optional name for lookups where the device path is not available
151 * @private: dax driver private data
152 * @flags: state and boolean properties
155 struct hlist_node list;
161 const struct dax_operations *ops;
164 static ssize_t write_cache_show(struct device *dev,
165 struct device_attribute *attr, char *buf)
167 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
170 WARN_ON_ONCE(!dax_dev);
174 rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
180 static ssize_t write_cache_store(struct device *dev,
181 struct device_attribute *attr, const char *buf, size_t len)
184 int rc = strtobool(buf, &write_cache);
185 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
187 WARN_ON_ONCE(!dax_dev);
193 else if (write_cache)
194 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
196 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
201 static DEVICE_ATTR_RW(write_cache);
203 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
205 struct device *dev = container_of(kobj, typeof(*dev), kobj);
206 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
208 WARN_ON_ONCE(!dax_dev);
212 #ifndef CONFIG_ARCH_HAS_PMEM_API
213 if (a == &dev_attr_write_cache.attr)
219 static struct attribute *dax_attributes[] = {
220 &dev_attr_write_cache.attr,
224 struct attribute_group dax_attribute_group = {
226 .attrs = dax_attributes,
227 .is_visible = dax_visible,
229 EXPORT_SYMBOL_GPL(dax_attribute_group);
232 * dax_direct_access() - translate a device pgoff to an absolute pfn
233 * @dax_dev: a dax_device instance representing the logical memory range
234 * @pgoff: offset in pages from the start of the device to translate
235 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
236 * @kaddr: output parameter that returns a virtual address mapping of pfn
237 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
239 * Return: negative errno if an error occurs, otherwise the number of
240 * pages accessible at the device relative @pgoff.
242 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
243 void **kaddr, pfn_t *pfn)
248 * The device driver is allowed to sleep, in order to make the
249 * memory directly accessible.
256 if (!dax_alive(dax_dev))
262 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
266 return min(avail, nr_pages);
268 EXPORT_SYMBOL_GPL(dax_direct_access);
270 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
271 size_t bytes, struct iov_iter *i)
273 if (!dax_alive(dax_dev))
276 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
278 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
280 #ifdef CONFIG_ARCH_HAS_PMEM_API
281 void arch_wb_cache_pmem(void *addr, size_t size);
282 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
284 if (unlikely(!dax_alive(dax_dev)))
287 if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
290 arch_wb_cache_pmem(addr, size);
293 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
297 EXPORT_SYMBOL_GPL(dax_flush);
299 void dax_write_cache(struct dax_device *dax_dev, bool wc)
302 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
304 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
306 EXPORT_SYMBOL_GPL(dax_write_cache);
308 bool dax_write_cache_enabled(struct dax_device *dax_dev)
310 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
312 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
314 bool dax_alive(struct dax_device *dax_dev)
316 lockdep_assert_held(&dax_srcu);
317 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
319 EXPORT_SYMBOL_GPL(dax_alive);
321 static int dax_host_hash(const char *host)
323 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
327 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
328 * that any fault handlers or operations that might have seen
329 * dax_alive(), have completed. Any operations that start after
330 * synchronize_srcu() has run will abort upon seeing !dax_alive().
332 void kill_dax(struct dax_device *dax_dev)
337 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
339 synchronize_srcu(&dax_srcu);
341 spin_lock(&dax_host_lock);
342 hlist_del_init(&dax_dev->list);
343 spin_unlock(&dax_host_lock);
345 dax_dev->private = NULL;
347 EXPORT_SYMBOL_GPL(kill_dax);
349 static struct inode *dax_alloc_inode(struct super_block *sb)
351 struct dax_device *dax_dev;
354 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
358 inode = &dax_dev->inode;
363 static struct dax_device *to_dax_dev(struct inode *inode)
365 return container_of(inode, struct dax_device, inode);
368 static void dax_i_callback(struct rcu_head *head)
370 struct inode *inode = container_of(head, struct inode, i_rcu);
371 struct dax_device *dax_dev = to_dax_dev(inode);
373 kfree(dax_dev->host);
374 dax_dev->host = NULL;
376 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
377 kmem_cache_free(dax_cache, dax_dev);
380 static void dax_destroy_inode(struct inode *inode)
382 struct dax_device *dax_dev = to_dax_dev(inode);
384 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
385 "kill_dax() must be called before final iput()\n");
386 call_rcu(&inode->i_rcu, dax_i_callback);
389 static const struct super_operations dax_sops = {
390 .statfs = simple_statfs,
391 .alloc_inode = dax_alloc_inode,
392 .destroy_inode = dax_destroy_inode,
393 .drop_inode = generic_delete_inode,
396 static struct dentry *dax_mount(struct file_system_type *fs_type,
397 int flags, const char *dev_name, void *data)
399 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
402 static struct file_system_type dax_fs_type = {
405 .kill_sb = kill_anon_super,
408 static int dax_test(struct inode *inode, void *data)
410 dev_t devt = *(dev_t *) data;
412 return inode->i_rdev == devt;
415 static int dax_set(struct inode *inode, void *data)
417 dev_t devt = *(dev_t *) data;
419 inode->i_rdev = devt;
423 static struct dax_device *dax_dev_get(dev_t devt)
425 struct dax_device *dax_dev;
428 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
429 dax_test, dax_set, &devt);
434 dax_dev = to_dax_dev(inode);
435 if (inode->i_state & I_NEW) {
436 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
437 inode->i_cdev = &dax_dev->cdev;
438 inode->i_mode = S_IFCHR;
439 inode->i_flags = S_DAX;
440 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
441 unlock_new_inode(inode);
447 static void dax_add_host(struct dax_device *dax_dev, const char *host)
452 * Unconditionally init dax_dev since it's coming from a
453 * non-zeroed slab cache
455 INIT_HLIST_NODE(&dax_dev->list);
456 dax_dev->host = host;
460 hash = dax_host_hash(host);
461 spin_lock(&dax_host_lock);
462 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
463 spin_unlock(&dax_host_lock);
466 struct dax_device *alloc_dax(void *private, const char *__host,
467 const struct dax_operations *ops)
469 struct dax_device *dax_dev;
474 host = kstrdup(__host, GFP_KERNEL);
478 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
482 devt = MKDEV(MAJOR(dax_devt), minor);
483 dax_dev = dax_dev_get(devt);
487 dax_add_host(dax_dev, host);
489 dax_dev->private = private;
493 ida_simple_remove(&dax_minor_ida, minor);
498 EXPORT_SYMBOL_GPL(alloc_dax);
500 void put_dax(struct dax_device *dax_dev)
504 iput(&dax_dev->inode);
506 EXPORT_SYMBOL_GPL(put_dax);
509 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
510 * @host: alternate name for the device registered by a dax driver
512 struct dax_device *dax_get_by_host(const char *host)
514 struct dax_device *dax_dev, *found = NULL;
520 hash = dax_host_hash(host);
522 id = dax_read_lock();
523 spin_lock(&dax_host_lock);
524 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
525 if (!dax_alive(dax_dev)
526 || strcmp(host, dax_dev->host) != 0)
529 if (igrab(&dax_dev->inode))
533 spin_unlock(&dax_host_lock);
538 EXPORT_SYMBOL_GPL(dax_get_by_host);
541 * inode_dax: convert a public inode into its dax_dev
542 * @inode: An inode with i_cdev pointing to a dax_dev
544 * Note this is not equivalent to to_dax_dev() which is for private
545 * internal use where we know the inode filesystem type == dax_fs_type.
547 struct dax_device *inode_dax(struct inode *inode)
549 struct cdev *cdev = inode->i_cdev;
551 return container_of(cdev, struct dax_device, cdev);
553 EXPORT_SYMBOL_GPL(inode_dax);
555 struct inode *dax_inode(struct dax_device *dax_dev)
557 return &dax_dev->inode;
559 EXPORT_SYMBOL_GPL(dax_inode);
561 void *dax_get_private(struct dax_device *dax_dev)
563 return dax_dev->private;
565 EXPORT_SYMBOL_GPL(dax_get_private);
567 static void init_once(void *_dax_dev)
569 struct dax_device *dax_dev = _dax_dev;
570 struct inode *inode = &dax_dev->inode;
572 memset(dax_dev, 0, sizeof(*dax_dev));
573 inode_init_once(inode);
576 static int __dax_fs_init(void)
580 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
581 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
582 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
587 rc = register_filesystem(&dax_fs_type);
589 goto err_register_fs;
591 dax_mnt = kern_mount(&dax_fs_type);
592 if (IS_ERR(dax_mnt)) {
593 rc = PTR_ERR(dax_mnt);
596 dax_superblock = dax_mnt->mnt_sb;
601 unregister_filesystem(&dax_fs_type);
603 kmem_cache_destroy(dax_cache);
608 static void __dax_fs_exit(void)
610 kern_unmount(dax_mnt);
611 unregister_filesystem(&dax_fs_type);
612 kmem_cache_destroy(dax_cache);
615 static int __init dax_fs_init(void)
619 rc = __dax_fs_init();
623 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
629 static void __exit dax_fs_exit(void)
631 unregister_chrdev_region(dax_devt, MINORMASK+1);
632 ida_destroy(&dax_minor_ida);
636 MODULE_AUTHOR("Intel Corporation");
637 MODULE_LICENSE("GPL v2");
638 subsys_initcall(dax_fs_init);
639 module_exit(dax_fs_exit);