1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4 * Copyright (C) 2014 Red Hat, Inc.
5 * Copyright (C) 2015 Arrikto, Inc.
6 * Copyright (C) 2017 Chinamobile, Inc.
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/parser.h>
15 #include <linux/vmalloc.h>
16 #include <linux/uio_driver.h>
17 #include <linux/radix-tree.h>
18 #include <linux/stringify.h>
19 #include <linux/bitops.h>
20 #include <linux/highmem.h>
21 #include <linux/configfs.h>
22 #include <linux/mutex.h>
23 #include <linux/workqueue.h>
24 #include <net/genetlink.h>
25 #include <scsi/scsi_common.h>
26 #include <scsi/scsi_proto.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/target_core_backend.h>
31 #include <linux/target_core_user.h>
38 * Define a shared-memory interface for LIO to pass SCSI commands and
39 * data to userspace for processing. This is to allow backends that
40 * are too complex for in-kernel support to be possible.
42 * It uses the UIO framework to do a lot of the device-creation and
43 * introspection work for us.
45 * See the .h file for how the ring is laid out. Note that while the
46 * command ring is defined, the particulars of the data area are
47 * not. Offset values in the command entry point to other locations
48 * internal to the mmap-ed area. There is separate space outside the
49 * command ring for data buffers. This leaves maximum flexibility for
50 * moving buffer allocations, or even page flipping or other
51 * allocation techniques, without altering the command ring layout.
54 * The user process must be assumed to be malicious. There's no way to
55 * prevent it breaking the command ring protocol if it wants, but in
56 * order to prevent other issues we must only ever read *data* from
57 * the shared memory area, not offsets or sizes. This applies to
58 * command ring entries as well as the mailbox. Extra code needed for
59 * this may have a 'UAM' comment.
62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
64 /* For cmd area, the size is fixed 8MB */
65 #define CMDR_SIZE (8 * 1024 * 1024)
68 * For data area, the block size is PAGE_SIZE and
69 * the total size is 256K * PAGE_SIZE.
71 #define DATA_BLOCK_SIZE PAGE_SIZE
72 #define DATA_BLOCK_SHIFT PAGE_SHIFT
73 #define DATA_BLOCK_BITS_DEF (256 * 1024)
75 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
76 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
79 * Default number of global data blocks(512K * PAGE_SIZE)
80 * when the unmap thread will be started.
82 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
84 static u8 tcmu_kern_cmd_reply_supported;
85 static u8 tcmu_netlink_blocked;
87 static struct device *tcmu_root_device;
93 #define TCMU_CONFIG_LEN 256
95 static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
96 static LIST_HEAD(tcmu_nl_cmd_list);
101 /* wake up thread waiting for reply */
102 struct completion complete;
103 struct list_head nl_list;
104 struct tcmu_dev *udev;
110 struct list_head node;
113 struct se_device se_dev;
118 #define TCMU_DEV_BIT_OPEN 0
119 #define TCMU_DEV_BIT_BROKEN 1
120 #define TCMU_DEV_BIT_BLOCKED 2
123 struct uio_info uio_info;
127 struct tcmu_mailbox *mb_addr;
130 u32 cmdr_last_cleaned;
131 /* Offset of data area from start of mb */
132 /* Must add data_off and mb_addr to get the address */
138 struct mutex cmdr_lock;
139 struct list_head qfull_queue;
143 unsigned long *data_bitmap;
144 struct radix_tree_root data_blocks;
148 struct timer_list cmd_timer;
149 unsigned int cmd_time_out;
150 struct list_head inflight_queue;
152 struct timer_list qfull_timer;
155 struct list_head timedout_entry;
157 struct tcmu_nl_cmd curr_nl_cmd;
159 char dev_config[TCMU_CONFIG_LEN];
161 int nl_reply_supported;
164 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
166 #define CMDR_OFF sizeof(struct tcmu_mailbox)
169 struct se_cmd *se_cmd;
170 struct tcmu_dev *tcmu_dev;
171 struct list_head queue_entry;
175 /* Can't use se_cmd when cleaning up expired cmds, because if
176 cmd has been completed then accessing se_cmd is off limits */
181 unsigned long deadline;
183 #define TCMU_CMD_BIT_EXPIRED 0
184 #define TCMU_CMD_BIT_INFLIGHT 1
188 * To avoid dead lock the mutex lock order should always be:
190 * mutex_lock(&root_udev_mutex);
192 * mutex_lock(&tcmu_dev->cmdr_lock);
193 * mutex_unlock(&tcmu_dev->cmdr_lock);
195 * mutex_unlock(&root_udev_mutex);
197 static DEFINE_MUTEX(root_udev_mutex);
198 static LIST_HEAD(root_udev);
200 static DEFINE_SPINLOCK(timed_out_udevs_lock);
201 static LIST_HEAD(timed_out_udevs);
203 static struct kmem_cache *tcmu_cmd_cache;
205 static atomic_t global_db_count = ATOMIC_INIT(0);
206 static struct delayed_work tcmu_unmap_work;
207 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
209 static int tcmu_set_global_max_data_area(const char *str,
210 const struct kernel_param *kp)
212 int ret, max_area_mb;
214 ret = kstrtoint(str, 10, &max_area_mb);
218 if (max_area_mb <= 0) {
219 pr_err("global_max_data_area must be larger than 0.\n");
223 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
224 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
225 schedule_delayed_work(&tcmu_unmap_work, 0);
227 cancel_delayed_work_sync(&tcmu_unmap_work);
232 static int tcmu_get_global_max_data_area(char *buffer,
233 const struct kernel_param *kp)
235 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
238 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
239 .set = tcmu_set_global_max_data_area,
240 .get = tcmu_get_global_max_data_area,
243 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
245 MODULE_PARM_DESC(global_max_data_area_mb,
246 "Max MBs allowed to be allocated to all the tcmu device's "
249 static int tcmu_get_block_netlink(char *buffer,
250 const struct kernel_param *kp)
252 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
253 "blocked" : "unblocked");
256 static int tcmu_set_block_netlink(const char *str,
257 const struct kernel_param *kp)
262 ret = kstrtou8(str, 0, &val);
267 pr_err("Invalid block netlink value %u\n", val);
271 tcmu_netlink_blocked = val;
275 static const struct kernel_param_ops tcmu_block_netlink_op = {
276 .set = tcmu_set_block_netlink,
277 .get = tcmu_get_block_netlink,
280 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
281 MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
283 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
285 struct tcmu_dev *udev = nl_cmd->udev;
287 if (!tcmu_netlink_blocked) {
288 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
292 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
293 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
294 nl_cmd->status = -EINTR;
295 list_del(&nl_cmd->nl_list);
296 complete(&nl_cmd->complete);
301 static int tcmu_set_reset_netlink(const char *str,
302 const struct kernel_param *kp)
304 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
308 ret = kstrtou8(str, 0, &val);
313 pr_err("Invalid reset netlink value %u\n", val);
317 mutex_lock(&tcmu_nl_cmd_mutex);
318 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
319 ret = tcmu_fail_netlink_cmd(nl_cmd);
323 mutex_unlock(&tcmu_nl_cmd_mutex);
328 static const struct kernel_param_ops tcmu_reset_netlink_op = {
329 .set = tcmu_set_reset_netlink,
332 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
333 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
335 /* multicast group */
336 enum tcmu_multicast_groups {
340 static const struct genl_multicast_group tcmu_mcgrps[] = {
341 [TCMU_MCGRP_CONFIG] = { .name = "config", },
344 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
345 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
346 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
347 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
348 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
349 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
352 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
354 struct tcmu_dev *udev = NULL;
355 struct tcmu_nl_cmd *nl_cmd;
356 int dev_id, rc, ret = 0;
358 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
359 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
360 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
364 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
365 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
367 mutex_lock(&tcmu_nl_cmd_mutex);
368 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
369 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
376 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
377 completed_cmd, rc, dev_id);
381 list_del(&nl_cmd->nl_list);
383 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
384 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
387 if (nl_cmd->cmd != completed_cmd) {
388 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
389 udev->name, completed_cmd, nl_cmd->cmd);
395 complete(&nl_cmd->complete);
397 mutex_unlock(&tcmu_nl_cmd_mutex);
401 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
403 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
406 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
408 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
411 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
412 struct genl_info *info)
414 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
417 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
419 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
420 tcmu_kern_cmd_reply_supported =
421 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
422 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
423 tcmu_kern_cmd_reply_supported);
429 static const struct genl_ops tcmu_genl_ops[] = {
431 .cmd = TCMU_CMD_SET_FEATURES,
432 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
433 .flags = GENL_ADMIN_PERM,
434 .doit = tcmu_genl_set_features,
437 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
438 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
439 .flags = GENL_ADMIN_PERM,
440 .doit = tcmu_genl_add_dev_done,
443 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
444 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
445 .flags = GENL_ADMIN_PERM,
446 .doit = tcmu_genl_rm_dev_done,
449 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
450 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
451 .flags = GENL_ADMIN_PERM,
452 .doit = tcmu_genl_reconfig_dev_done,
456 /* Our generic netlink family */
457 static struct genl_family tcmu_genl_family __ro_after_init = {
458 .module = THIS_MODULE,
462 .maxattr = TCMU_ATTR_MAX,
463 .policy = tcmu_attr_policy,
464 .mcgrps = tcmu_mcgrps,
465 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
467 .ops = tcmu_genl_ops,
468 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
471 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
472 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
473 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
474 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
476 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
478 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
481 for (i = 0; i < len; i++)
482 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
485 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
486 struct tcmu_cmd *tcmu_cmd)
491 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
492 if (dbi == udev->dbi_thresh)
495 page = radix_tree_lookup(&udev->data_blocks, dbi);
497 if (atomic_add_return(1, &global_db_count) >
498 tcmu_global_max_blocks)
499 schedule_delayed_work(&tcmu_unmap_work, 0);
501 /* try to get new page from the mm */
502 page = alloc_page(GFP_KERNEL);
506 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
511 if (dbi > udev->dbi_max)
514 set_bit(dbi, udev->data_bitmap);
515 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
521 atomic_dec(&global_db_count);
525 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
526 struct tcmu_cmd *tcmu_cmd)
530 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
531 if (!tcmu_get_empty_block(udev, tcmu_cmd))
537 static inline struct page *
538 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
540 return radix_tree_lookup(&udev->data_blocks, dbi);
543 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
545 kfree(tcmu_cmd->dbi);
546 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
549 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
551 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
552 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
554 if (se_cmd->se_cmd_flags & SCF_BIDI) {
555 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
556 data_length += round_up(se_cmd->t_bidi_data_sg->length,
563 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
565 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
567 return data_length / DATA_BLOCK_SIZE;
570 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
572 struct se_device *se_dev = se_cmd->se_dev;
573 struct tcmu_dev *udev = TCMU_DEV(se_dev);
574 struct tcmu_cmd *tcmu_cmd;
576 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
580 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
581 tcmu_cmd->se_cmd = se_cmd;
582 tcmu_cmd->tcmu_dev = udev;
584 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
585 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
586 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
588 if (!tcmu_cmd->dbi) {
589 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
596 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
598 unsigned long offset = offset_in_page(vaddr);
599 void *start = vaddr - offset;
601 size = round_up(size+offset, PAGE_SIZE);
604 flush_dcache_page(vmalloc_to_page(start));
611 * Some ring helper functions. We don't assume size is a power of 2 so
612 * we can't use circ_buf.h.
614 static inline size_t spc_used(size_t head, size_t tail, size_t size)
616 int diff = head - tail;
624 static inline size_t spc_free(size_t head, size_t tail, size_t size)
626 /* Keep 1 byte unused or we can't tell full from empty */
627 return (size - spc_used(head, tail, size) - 1);
630 static inline size_t head_to_end(size_t head, size_t size)
635 static inline void new_iov(struct iovec **iov, int *iov_cnt)
644 memset(iovec, 0, sizeof(struct iovec));
647 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
649 /* offset is relative to mb_addr */
650 static inline size_t get_block_offset_user(struct tcmu_dev *dev,
651 int dbi, int remaining)
653 return dev->data_off + dbi * DATA_BLOCK_SIZE +
654 DATA_BLOCK_SIZE - remaining;
657 static inline size_t iov_tail(struct iovec *iov)
659 return (size_t)iov->iov_base + iov->iov_len;
662 static void scatter_data_area(struct tcmu_dev *udev,
663 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
664 unsigned int data_nents, struct iovec **iov,
665 int *iov_cnt, bool copy_data)
668 int block_remaining = 0;
669 void *from, *to = NULL;
670 size_t copy_bytes, to_offset, offset;
671 struct scatterlist *sg;
672 struct page *page = NULL;
674 for_each_sg(data_sg, sg, data_nents, i) {
675 int sg_remaining = sg->length;
676 from = kmap_atomic(sg_page(sg)) + sg->offset;
677 while (sg_remaining > 0) {
678 if (block_remaining == 0) {
680 flush_dcache_page(page);
684 block_remaining = DATA_BLOCK_SIZE;
685 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
686 page = tcmu_get_block_page(udev, dbi);
687 to = kmap_atomic(page);
691 * Covert to virtual offset of the ring data area.
693 to_offset = get_block_offset_user(udev, dbi,
697 * The following code will gather and map the blocks
698 * to the same iovec when the blocks are all next to
701 copy_bytes = min_t(size_t, sg_remaining,
704 to_offset == iov_tail(*iov)) {
706 * Will append to the current iovec, because
707 * the current block page is next to the
710 (*iov)->iov_len += copy_bytes;
713 * Will allocate a new iovec because we are
714 * first time here or the current block page
715 * is not next to the previous one.
717 new_iov(iov, iov_cnt);
718 (*iov)->iov_base = (void __user *)to_offset;
719 (*iov)->iov_len = copy_bytes;
723 offset = DATA_BLOCK_SIZE - block_remaining;
725 from + sg->length - sg_remaining,
729 sg_remaining -= copy_bytes;
730 block_remaining -= copy_bytes;
732 kunmap_atomic(from - sg->offset);
736 flush_dcache_page(page);
741 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
742 bool bidi, uint32_t read_len)
744 struct se_cmd *se_cmd = cmd->se_cmd;
746 int block_remaining = 0;
747 void *from = NULL, *to;
748 size_t copy_bytes, offset;
749 struct scatterlist *sg, *data_sg;
751 unsigned int data_nents;
755 data_sg = se_cmd->t_data_sg;
756 data_nents = se_cmd->t_data_nents;
760 * For bidi case, the first count blocks are for Data-Out
761 * buffer blocks, and before gathering the Data-In buffer
762 * the Data-Out buffer blocks should be discarded.
764 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
766 data_sg = se_cmd->t_bidi_data_sg;
767 data_nents = se_cmd->t_bidi_data_nents;
770 tcmu_cmd_set_dbi_cur(cmd, count);
772 for_each_sg(data_sg, sg, data_nents, i) {
773 int sg_remaining = sg->length;
774 to = kmap_atomic(sg_page(sg)) + sg->offset;
775 while (sg_remaining > 0 && read_len > 0) {
776 if (block_remaining == 0) {
780 block_remaining = DATA_BLOCK_SIZE;
781 dbi = tcmu_cmd_get_dbi(cmd);
782 page = tcmu_get_block_page(udev, dbi);
783 from = kmap_atomic(page);
784 flush_dcache_page(page);
786 copy_bytes = min_t(size_t, sg_remaining,
788 if (read_len < copy_bytes)
789 copy_bytes = read_len;
790 offset = DATA_BLOCK_SIZE - block_remaining;
791 memcpy(to + sg->length - sg_remaining, from + offset,
794 sg_remaining -= copy_bytes;
795 block_remaining -= copy_bytes;
796 read_len -= copy_bytes;
798 kunmap_atomic(to - sg->offset);
806 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
808 return thresh - bitmap_weight(bitmap, thresh);
812 * We can't queue a command until we have space available on the cmd ring *and*
813 * space available on the data area.
815 * Called with ring lock held.
817 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
818 size_t cmd_size, size_t data_needed)
820 struct tcmu_mailbox *mb = udev->mb_addr;
821 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
823 size_t space, cmd_needed;
826 tcmu_flush_dcache_range(mb, sizeof(*mb));
828 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
831 * If cmd end-of-ring space is too small then we need space for a NOP plus
832 * original cmd - cmds are internally contiguous.
834 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
835 cmd_needed = cmd_size;
837 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
839 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
840 if (space < cmd_needed) {
841 pr_debug("no cmd space: %u %u %u\n", cmd_head,
842 udev->cmdr_last_cleaned, udev->cmdr_size);
846 /* try to check and get the data blocks as needed */
847 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
848 if ((space * DATA_BLOCK_SIZE) < data_needed) {
849 unsigned long blocks_left =
850 (udev->max_blocks - udev->dbi_thresh) + space;
852 if (blocks_left < blocks_needed) {
853 pr_debug("no data space: only %lu available, but ask for %zu\n",
854 blocks_left * DATA_BLOCK_SIZE,
859 udev->dbi_thresh += blocks_needed;
860 if (udev->dbi_thresh > udev->max_blocks)
861 udev->dbi_thresh = udev->max_blocks;
864 return tcmu_get_empty_blocks(udev, cmd);
867 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
869 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
870 sizeof(struct tcmu_cmd_entry));
873 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
874 size_t base_command_size)
876 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
879 command_size = base_command_size +
880 round_up(scsi_command_size(se_cmd->t_task_cdb),
883 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
888 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
889 struct timer_list *timer)
894 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
895 if (!timer_pending(timer))
896 mod_timer(timer, tcmu_cmd->deadline);
898 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
899 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
902 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
904 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
908 * For backwards compat if qfull_time_out is not set use
909 * cmd_time_out and if that's not set use the default time out.
911 if (!udev->qfull_time_out)
913 else if (udev->qfull_time_out > 0)
914 tmo = udev->qfull_time_out;
915 else if (udev->cmd_time_out)
916 tmo = udev->cmd_time_out;
920 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
922 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
923 pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
924 tcmu_cmd, udev->name);
929 * queue_cmd_ring - queue cmd to ring or internally
930 * @tcmu_cmd: cmd to queue
931 * @scsi_err: TCM error code if failure (-1) returned.
934 * -1 we cannot queue internally or to the ring.
936 * 1 internally queued to wait for ring memory to free.
938 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
940 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
941 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
942 size_t base_command_size, command_size;
943 struct tcmu_mailbox *mb;
944 struct tcmu_cmd_entry *entry;
949 bool copy_to_data_area;
950 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
952 *scsi_err = TCM_NO_SENSE;
954 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
955 *scsi_err = TCM_LUN_BUSY;
959 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
960 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
965 * Must be a certain minimum size for response sense info, but
966 * also may be larger if the iov array is large.
968 * We prepare as many iovs as possbile for potential uses here,
969 * because it's expensive to tell how many regions are freed in
970 * the bitmap & global data pool, as the size calculated here
971 * will only be used to do the checks.
973 * The size will be recalculated later as actually needed to save
976 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
977 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
979 if (!list_empty(&udev->qfull_queue))
983 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
984 if ((command_size > (udev->cmdr_size / 2)) ||
985 data_length > udev->data_size) {
986 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
987 "cmd ring/data area\n", command_size, data_length,
988 udev->cmdr_size, udev->data_size);
989 *scsi_err = TCM_INVALID_CDB_FIELD;
993 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
995 * Don't leave commands partially setup because the unmap
996 * thread might need the blocks to make forward progress.
998 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
999 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1003 /* Insert a PAD if end-of-ring space is too small */
1004 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
1005 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
1007 entry = (void *) mb + CMDR_OFF + cmd_head;
1008 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
1009 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
1010 entry->hdr.cmd_id = 0; /* not used for PAD */
1011 entry->hdr.kflags = 0;
1012 entry->hdr.uflags = 0;
1013 tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
1015 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
1016 tcmu_flush_dcache_range(mb, sizeof(*mb));
1018 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1019 WARN_ON(cmd_head != 0);
1022 entry = (void *) mb + CMDR_OFF + cmd_head;
1023 memset(entry, 0, command_size);
1024 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1026 /* Handle allocating space from the data area */
1027 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1028 iov = &entry->req.iov[0];
1030 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1031 || se_cmd->se_cmd_flags & SCF_BIDI);
1032 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1033 se_cmd->t_data_nents, &iov, &iov_cnt,
1035 entry->req.iov_cnt = iov_cnt;
1037 /* Handle BIDI commands */
1039 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1041 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1042 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1045 entry->req.iov_bidi_cnt = iov_cnt;
1047 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
1049 pr_err("tcmu: Could not allocate cmd id.\n");
1051 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1052 *scsi_err = TCM_OUT_OF_RESOURCES;
1055 tcmu_cmd->cmd_id = cmd_id;
1057 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1058 tcmu_cmd, udev->name);
1060 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1062 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1065 * Recalaulate the command's base size and size according
1066 * to the actual needs
1068 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1069 entry->req.iov_bidi_cnt);
1070 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1072 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1074 /* All offsets relative to mb_addr, not start of entry! */
1075 cdb_off = CMDR_OFF + cmd_head + base_command_size;
1076 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1077 entry->req.cdb_off = cdb_off;
1078 tcmu_flush_dcache_range(entry, command_size);
1080 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1081 tcmu_flush_dcache_range(mb, sizeof(*mb));
1083 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1084 set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1086 /* TODO: only if FLUSH and FUA? */
1087 uio_event_notify(&udev->uio_info);
1092 if (add_to_qfull_queue(tcmu_cmd)) {
1093 *scsi_err = TCM_OUT_OF_RESOURCES;
1100 static sense_reason_t
1101 tcmu_queue_cmd(struct se_cmd *se_cmd)
1103 struct se_device *se_dev = se_cmd->se_dev;
1104 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1105 struct tcmu_cmd *tcmu_cmd;
1106 sense_reason_t scsi_ret;
1109 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1111 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1113 mutex_lock(&udev->cmdr_lock);
1114 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1115 mutex_unlock(&udev->cmdr_lock);
1117 tcmu_free_cmd(tcmu_cmd);
1121 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1123 struct se_cmd *se_cmd = cmd->se_cmd;
1124 struct tcmu_dev *udev = cmd->tcmu_dev;
1125 bool read_len_valid = false;
1129 * cmd has been completed already from timeout, just reclaim
1130 * data area space and free cmd
1132 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1133 WARN_ON_ONCE(se_cmd);
1137 list_del_init(&cmd->queue_entry);
1139 tcmu_cmd_reset_dbi_cur(cmd);
1141 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1142 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1144 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1148 read_len = se_cmd->data_length;
1149 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1150 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1151 read_len_valid = true;
1152 if (entry->rsp.read_len < read_len)
1153 read_len = entry->rsp.read_len;
1156 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1157 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1158 if (!read_len_valid )
1161 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1163 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1164 /* Get Data-In buffer before clean up */
1165 gather_data_area(udev, cmd, true, read_len);
1166 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1167 gather_data_area(udev, cmd, false, read_len);
1168 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1170 } else if (se_cmd->data_direction != DMA_NONE) {
1171 pr_warn("TCMU: data direction was %d!\n",
1172 se_cmd->data_direction);
1176 if (read_len_valid) {
1177 pr_debug("read_len = %d\n", read_len);
1178 target_complete_cmd_with_length(cmd->se_cmd,
1179 entry->rsp.scsi_status, read_len);
1181 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1185 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1189 static void tcmu_set_next_deadline(struct list_head *queue,
1190 struct timer_list *timer)
1192 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1193 unsigned long deadline = 0;
1195 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1196 if (!time_after(jiffies, tcmu_cmd->deadline)) {
1197 deadline = tcmu_cmd->deadline;
1203 mod_timer(timer, deadline);
1208 static bool tcmu_handle_completions(struct tcmu_dev *udev)
1210 struct tcmu_mailbox *mb;
1211 struct tcmu_cmd *cmd;
1214 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1215 pr_err("ring broken, not handling completions\n");
1220 tcmu_flush_dcache_range(mb, sizeof(*mb));
1222 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1224 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1227 * Flush max. up to end of cmd ring since current entry might
1228 * be a padding that is shorter than sizeof(*entry)
1230 size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1232 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1233 ring_left : sizeof(*entry));
1235 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1236 UPDATE_HEAD(udev->cmdr_last_cleaned,
1237 tcmu_hdr_get_len(entry->hdr.len_op),
1241 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1243 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1245 pr_err("cmd_id %u not found, ring is broken\n",
1247 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1251 tcmu_handle_completion(cmd, entry);
1253 UPDATE_HEAD(udev->cmdr_last_cleaned,
1254 tcmu_hdr_get_len(entry->hdr.len_op),
1260 if (mb->cmd_tail == mb->cmd_head) {
1261 /* no more pending commands */
1262 del_timer(&udev->cmd_timer);
1264 if (list_empty(&udev->qfull_queue)) {
1266 * no more pending or waiting commands so try to
1267 * reclaim blocks if needed.
1269 if (atomic_read(&global_db_count) >
1270 tcmu_global_max_blocks)
1271 schedule_delayed_work(&tcmu_unmap_work, 0);
1273 } else if (udev->cmd_time_out) {
1274 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1280 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1282 struct se_cmd *se_cmd;
1284 if (!time_after(jiffies, cmd->deadline))
1287 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1288 list_del_init(&cmd->queue_entry);
1289 se_cmd = cmd->se_cmd;
1292 pr_debug("Timing out inflight cmd %u on dev %s.\n",
1293 cmd->cmd_id, cmd->tcmu_dev->name);
1295 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1298 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1300 struct se_cmd *se_cmd;
1302 if (!time_after(jiffies, cmd->deadline))
1305 pr_debug("Timing out queued cmd %p on dev %s.\n",
1306 cmd, cmd->tcmu_dev->name);
1308 list_del_init(&cmd->queue_entry);
1309 se_cmd = cmd->se_cmd;
1312 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1315 static void tcmu_device_timedout(struct tcmu_dev *udev)
1317 spin_lock(&timed_out_udevs_lock);
1318 if (list_empty(&udev->timedout_entry))
1319 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1320 spin_unlock(&timed_out_udevs_lock);
1322 schedule_delayed_work(&tcmu_unmap_work, 0);
1325 static void tcmu_cmd_timedout(struct timer_list *t)
1327 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1329 pr_debug("%s cmd timeout has expired\n", udev->name);
1330 tcmu_device_timedout(udev);
1333 static void tcmu_qfull_timedout(struct timer_list *t)
1335 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1337 pr_debug("%s qfull timeout has expired\n", udev->name);
1338 tcmu_device_timedout(udev);
1341 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1343 struct tcmu_hba *tcmu_hba;
1345 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1349 tcmu_hba->host_id = host_id;
1350 hba->hba_ptr = tcmu_hba;
1355 static void tcmu_detach_hba(struct se_hba *hba)
1357 kfree(hba->hba_ptr);
1358 hba->hba_ptr = NULL;
1361 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1363 struct tcmu_dev *udev;
1365 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1368 kref_init(&udev->kref);
1370 udev->name = kstrdup(name, GFP_KERNEL);
1377 udev->cmd_time_out = TCMU_TIME_OUT;
1378 udev->qfull_time_out = -1;
1380 udev->max_blocks = DATA_BLOCK_BITS_DEF;
1381 mutex_init(&udev->cmdr_lock);
1383 INIT_LIST_HEAD(&udev->node);
1384 INIT_LIST_HEAD(&udev->timedout_entry);
1385 INIT_LIST_HEAD(&udev->qfull_queue);
1386 INIT_LIST_HEAD(&udev->inflight_queue);
1387 idr_init(&udev->commands);
1389 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1390 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1392 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1394 return &udev->se_dev;
1397 static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1399 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1401 sense_reason_t scsi_ret;
1404 if (list_empty(&udev->qfull_queue))
1407 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1409 list_splice_init(&udev->qfull_queue, &cmds);
1411 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1412 list_del_init(&tcmu_cmd->queue_entry);
1414 pr_debug("removing cmd %p on dev %s from queue\n",
1415 tcmu_cmd, udev->name);
1419 * We were not able to even start the command, so
1420 * fail with busy to allow a retry in case runner
1421 * was only temporarily down. If the device is being
1422 * removed then LIO core will do the right thing and
1425 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1426 tcmu_free_cmd(tcmu_cmd);
1430 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1432 pr_debug("cmd %p on dev %s failed with %u\n",
1433 tcmu_cmd, udev->name, scsi_ret);
1435 * Ignore scsi_ret for now. target_complete_cmd
1438 target_complete_cmd(tcmu_cmd->se_cmd,
1439 SAM_STAT_CHECK_CONDITION);
1440 tcmu_free_cmd(tcmu_cmd);
1441 } else if (ret > 0) {
1442 pr_debug("ran out of space during cmdr queue run\n");
1444 * cmd was requeued, so just put all cmds back in
1447 list_splice_tail(&cmds, &udev->qfull_queue);
1452 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1455 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1457 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1459 mutex_lock(&udev->cmdr_lock);
1460 tcmu_handle_completions(udev);
1461 run_qfull_queue(udev, false);
1462 mutex_unlock(&udev->cmdr_lock);
1468 * mmap code from uio.c. Copied here because we want to hook mmap()
1469 * and this stuff must come along.
1471 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1473 struct tcmu_dev *udev = vma->vm_private_data;
1474 struct uio_info *info = &udev->uio_info;
1476 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1477 if (info->mem[vma->vm_pgoff].size == 0)
1479 return (int)vma->vm_pgoff;
1484 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1488 mutex_lock(&udev->cmdr_lock);
1489 page = tcmu_get_block_page(udev, dbi);
1492 mutex_unlock(&udev->cmdr_lock);
1497 * Userspace messed up and passed in a address not in the
1498 * data iov passed to it.
1500 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
1503 mutex_unlock(&udev->cmdr_lock);
1508 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1510 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1511 struct uio_info *info = &udev->uio_info;
1513 unsigned long offset;
1516 int mi = tcmu_find_mem_index(vmf->vma);
1518 return VM_FAULT_SIGBUS;
1521 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1524 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1526 if (offset < udev->data_off) {
1527 /* For the vmalloc()ed cmd area pages */
1528 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1529 page = vmalloc_to_page(addr);
1534 /* For the dynamically growing data area pages */
1535 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1536 page = tcmu_try_get_block_page(udev, dbi);
1538 return VM_FAULT_SIGBUS;
1545 static const struct vm_operations_struct tcmu_vm_ops = {
1546 .fault = tcmu_vma_fault,
1549 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1551 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1553 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1554 vma->vm_ops = &tcmu_vm_ops;
1556 vma->vm_private_data = udev;
1558 /* Ensure the mmap is exactly the right size */
1559 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1565 static int tcmu_open(struct uio_info *info, struct inode *inode)
1567 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1569 /* O_EXCL not supported for char devs, so fake it? */
1570 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1573 udev->inode = inode;
1574 kref_get(&udev->kref);
1581 static void tcmu_dev_call_rcu(struct rcu_head *p)
1583 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1584 struct tcmu_dev *udev = TCMU_DEV(dev);
1586 kfree(udev->uio_info.name);
1591 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1593 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1594 kmem_cache_free(tcmu_cmd_cache, cmd);
1600 static void tcmu_blocks_release(struct radix_tree_root *blocks,
1606 for (i = start; i < end; i++) {
1607 page = radix_tree_delete(blocks, i);
1610 atomic_dec(&global_db_count);
1615 static void tcmu_dev_kref_release(struct kref *kref)
1617 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1618 struct se_device *dev = &udev->se_dev;
1619 struct tcmu_cmd *cmd;
1620 bool all_expired = true;
1623 vfree(udev->mb_addr);
1624 udev->mb_addr = NULL;
1626 spin_lock_bh(&timed_out_udevs_lock);
1627 if (!list_empty(&udev->timedout_entry))
1628 list_del(&udev->timedout_entry);
1629 spin_unlock_bh(&timed_out_udevs_lock);
1631 /* Upper layer should drain all requests before calling this */
1632 mutex_lock(&udev->cmdr_lock);
1633 idr_for_each_entry(&udev->commands, cmd, i) {
1634 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1635 all_expired = false;
1637 if (!list_empty(&udev->qfull_queue))
1638 all_expired = false;
1639 idr_destroy(&udev->commands);
1640 WARN_ON(!all_expired);
1642 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1643 bitmap_free(udev->data_bitmap);
1644 mutex_unlock(&udev->cmdr_lock);
1646 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1649 static int tcmu_release(struct uio_info *info, struct inode *inode)
1651 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1653 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1655 pr_debug("close\n");
1656 /* release ref from open */
1657 kref_put(&udev->kref, tcmu_dev_kref_release);
1661 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1663 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1665 if (!tcmu_kern_cmd_reply_supported)
1668 if (udev->nl_reply_supported <= 0)
1671 mutex_lock(&tcmu_nl_cmd_mutex);
1673 if (tcmu_netlink_blocked) {
1674 mutex_unlock(&tcmu_nl_cmd_mutex);
1675 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1680 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1681 mutex_unlock(&tcmu_nl_cmd_mutex);
1682 pr_warn("netlink cmd %d already executing on %s\n",
1683 nl_cmd->cmd, udev->name);
1687 memset(nl_cmd, 0, sizeof(*nl_cmd));
1689 nl_cmd->udev = udev;
1690 init_completion(&nl_cmd->complete);
1691 INIT_LIST_HEAD(&nl_cmd->nl_list);
1693 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1695 mutex_unlock(&tcmu_nl_cmd_mutex);
1699 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1701 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1703 if (!tcmu_kern_cmd_reply_supported)
1706 if (udev->nl_reply_supported <= 0)
1709 mutex_lock(&tcmu_nl_cmd_mutex);
1711 list_del(&nl_cmd->nl_list);
1712 memset(nl_cmd, 0, sizeof(*nl_cmd));
1714 mutex_unlock(&tcmu_nl_cmd_mutex);
1717 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1719 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1722 if (!tcmu_kern_cmd_reply_supported)
1725 if (udev->nl_reply_supported <= 0)
1728 pr_debug("sleeping for nl reply\n");
1729 wait_for_completion(&nl_cmd->complete);
1731 mutex_lock(&tcmu_nl_cmd_mutex);
1732 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1733 ret = nl_cmd->status;
1734 mutex_unlock(&tcmu_nl_cmd_mutex);
1739 static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1740 enum tcmu_genl_cmd cmd,
1741 struct sk_buff **buf, void **hdr)
1743 struct sk_buff *skb;
1747 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1751 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1755 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1759 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1763 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1776 static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1777 enum tcmu_genl_cmd cmd,
1778 struct sk_buff *skb, void *msg_header)
1782 genlmsg_end(skb, msg_header);
1784 ret = tcmu_init_genl_cmd_reply(udev, cmd);
1790 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1791 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1793 /* Wait during an add as the listener may not be up yet */
1795 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1796 return tcmu_wait_genl_cmd_reply(udev);
1798 tcmu_destroy_genl_cmd_reply(udev);
1803 static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1805 struct sk_buff *skb = NULL;
1806 void *msg_header = NULL;
1809 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1813 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
1817 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
1819 struct sk_buff *skb = NULL;
1820 void *msg_header = NULL;
1823 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
1827 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
1831 static int tcmu_update_uio_info(struct tcmu_dev *udev)
1833 struct tcmu_hba *hba = udev->hba->hba_ptr;
1834 struct uio_info *info;
1837 info = &udev->uio_info;
1839 if (udev->dev_config[0])
1840 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
1841 udev->name, udev->dev_config);
1843 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
1848 /* If the old string exists, free it */
1855 static int tcmu_configure_device(struct se_device *dev)
1857 struct tcmu_dev *udev = TCMU_DEV(dev);
1858 struct uio_info *info;
1859 struct tcmu_mailbox *mb;
1862 ret = tcmu_update_uio_info(udev);
1866 info = &udev->uio_info;
1868 mutex_lock(&udev->cmdr_lock);
1869 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
1870 mutex_unlock(&udev->cmdr_lock);
1871 if (!udev->data_bitmap) {
1873 goto err_bitmap_alloc;
1876 udev->mb_addr = vzalloc(CMDR_SIZE);
1877 if (!udev->mb_addr) {
1882 /* mailbox fits in first part of CMDR space */
1883 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1884 udev->data_off = CMDR_SIZE;
1885 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
1886 udev->dbi_thresh = 0; /* Default in Idle state */
1888 /* Initialise the mailbox of the ring buffer */
1890 mb->version = TCMU_MAILBOX_VERSION;
1891 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1892 mb->cmdr_off = CMDR_OFF;
1893 mb->cmdr_size = udev->cmdr_size;
1895 WARN_ON(!PAGE_ALIGNED(udev->data_off));
1896 WARN_ON(udev->data_size % PAGE_SIZE);
1897 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1899 info->version = __stringify(TCMU_MAILBOX_VERSION);
1901 info->mem[0].name = "tcm-user command & data buffer";
1902 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1903 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
1904 info->mem[0].memtype = UIO_MEM_NONE;
1906 info->irqcontrol = tcmu_irqcontrol;
1907 info->irq = UIO_IRQ_CUSTOM;
1909 info->mmap = tcmu_mmap;
1910 info->open = tcmu_open;
1911 info->release = tcmu_release;
1913 ret = uio_register_device(tcmu_root_device, info);
1917 /* User can set hw_block_size before enable the device */
1918 if (dev->dev_attrib.hw_block_size == 0)
1919 dev->dev_attrib.hw_block_size = 512;
1920 /* Other attributes can be configured in userspace */
1921 if (!dev->dev_attrib.hw_max_sectors)
1922 dev->dev_attrib.hw_max_sectors = 128;
1923 if (!dev->dev_attrib.emulate_write_cache)
1924 dev->dev_attrib.emulate_write_cache = 0;
1925 dev->dev_attrib.hw_queue_depth = 128;
1927 /* If user didn't explicitly disable netlink reply support, use
1928 * module scope setting.
1930 if (udev->nl_reply_supported >= 0)
1931 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1934 * Get a ref incase userspace does a close on the uio device before
1935 * LIO has initiated tcmu_free_device.
1937 kref_get(&udev->kref);
1939 ret = tcmu_send_dev_add_event(udev);
1943 mutex_lock(&root_udev_mutex);
1944 list_add(&udev->node, &root_udev);
1945 mutex_unlock(&root_udev_mutex);
1950 kref_put(&udev->kref, tcmu_dev_kref_release);
1951 uio_unregister_device(&udev->uio_info);
1953 vfree(udev->mb_addr);
1954 udev->mb_addr = NULL;
1956 bitmap_free(udev->data_bitmap);
1957 udev->data_bitmap = NULL;
1965 static void tcmu_free_device(struct se_device *dev)
1967 struct tcmu_dev *udev = TCMU_DEV(dev);
1969 /* release ref from init */
1970 kref_put(&udev->kref, tcmu_dev_kref_release);
1973 static void tcmu_destroy_device(struct se_device *dev)
1975 struct tcmu_dev *udev = TCMU_DEV(dev);
1977 del_timer_sync(&udev->cmd_timer);
1978 del_timer_sync(&udev->qfull_timer);
1980 mutex_lock(&root_udev_mutex);
1981 list_del(&udev->node);
1982 mutex_unlock(&root_udev_mutex);
1984 tcmu_send_dev_remove_event(udev);
1986 uio_unregister_device(&udev->uio_info);
1988 /* release ref from configure */
1989 kref_put(&udev->kref, tcmu_dev_kref_release);
1992 static void tcmu_unblock_dev(struct tcmu_dev *udev)
1994 mutex_lock(&udev->cmdr_lock);
1995 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
1996 mutex_unlock(&udev->cmdr_lock);
1999 static void tcmu_block_dev(struct tcmu_dev *udev)
2001 mutex_lock(&udev->cmdr_lock);
2003 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2006 /* complete IO that has executed successfully */
2007 tcmu_handle_completions(udev);
2008 /* fail IO waiting to be queued */
2009 run_qfull_queue(udev, true);
2012 mutex_unlock(&udev->cmdr_lock);
2015 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2017 struct tcmu_mailbox *mb;
2018 struct tcmu_cmd *cmd;
2021 mutex_lock(&udev->cmdr_lock);
2023 idr_for_each_entry(&udev->commands, cmd, i) {
2024 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2025 cmd->cmd_id, udev->name,
2026 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2028 idr_remove(&udev->commands, i);
2029 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2030 WARN_ON(!cmd->se_cmd);
2031 list_del_init(&cmd->queue_entry);
2032 if (err_level == 1) {
2034 * Userspace was not able to start the
2035 * command or it is retryable.
2037 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2040 target_complete_cmd(cmd->se_cmd,
2041 SAM_STAT_CHECK_CONDITION);
2044 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2049 tcmu_flush_dcache_range(mb, sizeof(*mb));
2050 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2051 mb->cmd_tail, mb->cmd_head);
2053 udev->cmdr_last_cleaned = 0;
2056 tcmu_flush_dcache_range(mb, sizeof(*mb));
2057 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2059 del_timer(&udev->cmd_timer);
2061 run_qfull_queue(udev, false);
2063 mutex_unlock(&udev->cmdr_lock);
2067 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2068 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
2071 static match_table_t tokens = {
2072 {Opt_dev_config, "dev_config=%s"},
2073 {Opt_dev_size, "dev_size=%s"},
2074 {Opt_hw_block_size, "hw_block_size=%d"},
2075 {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2076 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2077 {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2081 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2085 ret = match_int(arg, &val);
2087 pr_err("match_int() failed for dev attrib. Error %d.\n",
2093 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2101 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2105 ret = match_int(arg, &val);
2107 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2113 pr_err("Invalid max_data_area %d.\n", val);
2117 mutex_lock(&udev->cmdr_lock);
2118 if (udev->data_bitmap) {
2119 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2124 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
2125 if (udev->max_blocks > tcmu_global_max_blocks) {
2126 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2127 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2128 udev->max_blocks = tcmu_global_max_blocks;
2132 mutex_unlock(&udev->cmdr_lock);
2136 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2137 const char *page, ssize_t count)
2139 struct tcmu_dev *udev = TCMU_DEV(dev);
2140 char *orig, *ptr, *opts;
2141 substring_t args[MAX_OPT_ARGS];
2144 opts = kstrdup(page, GFP_KERNEL);
2150 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2154 token = match_token(ptr, tokens, args);
2156 case Opt_dev_config:
2157 if (match_strlcpy(udev->dev_config, &args[0],
2158 TCMU_CONFIG_LEN) == 0) {
2162 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2165 ret = match_u64(&args[0], &udev->dev_size);
2167 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2170 case Opt_hw_block_size:
2171 ret = tcmu_set_dev_attrib(&args[0],
2172 &(dev->dev_attrib.hw_block_size));
2174 case Opt_hw_max_sectors:
2175 ret = tcmu_set_dev_attrib(&args[0],
2176 &(dev->dev_attrib.hw_max_sectors));
2178 case Opt_nl_reply_supported:
2179 ret = match_int(&args[0], &udev->nl_reply_supported);
2181 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2184 case Opt_max_data_area_mb:
2185 ret = tcmu_set_max_blocks_param(udev, &args[0]);
2196 return (!ret) ? count : ret;
2199 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2201 struct tcmu_dev *udev = TCMU_DEV(dev);
2204 bl = sprintf(b + bl, "Config: %s ",
2205 udev->dev_config[0] ? udev->dev_config : "NULL");
2206 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2207 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2208 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2213 static sector_t tcmu_get_blocks(struct se_device *dev)
2215 struct tcmu_dev *udev = TCMU_DEV(dev);
2217 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2218 dev->dev_attrib.block_size);
2221 static sense_reason_t
2222 tcmu_parse_cdb(struct se_cmd *cmd)
2224 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2227 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2229 struct se_dev_attrib *da = container_of(to_config_group(item),
2230 struct se_dev_attrib, da_group);
2231 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2233 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2236 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2239 struct se_dev_attrib *da = container_of(to_config_group(item),
2240 struct se_dev_attrib, da_group);
2241 struct tcmu_dev *udev = container_of(da->da_dev,
2242 struct tcmu_dev, se_dev);
2246 if (da->da_dev->export_count) {
2247 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2251 ret = kstrtou32(page, 0, &val);
2255 udev->cmd_time_out = val * MSEC_PER_SEC;
2258 CONFIGFS_ATTR(tcmu_, cmd_time_out);
2260 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2262 struct se_dev_attrib *da = container_of(to_config_group(item),
2263 struct se_dev_attrib, da_group);
2264 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2266 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2267 udev->qfull_time_out :
2268 udev->qfull_time_out / MSEC_PER_SEC);
2271 static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2272 const char *page, size_t count)
2274 struct se_dev_attrib *da = container_of(to_config_group(item),
2275 struct se_dev_attrib, da_group);
2276 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2280 ret = kstrtos32(page, 0, &val);
2285 udev->qfull_time_out = val * MSEC_PER_SEC;
2286 } else if (val == -1) {
2287 udev->qfull_time_out = val;
2289 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2294 CONFIGFS_ATTR(tcmu_, qfull_time_out);
2296 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2298 struct se_dev_attrib *da = container_of(to_config_group(item),
2299 struct se_dev_attrib, da_group);
2300 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2302 return snprintf(page, PAGE_SIZE, "%u\n",
2303 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2305 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2307 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2309 struct se_dev_attrib *da = container_of(to_config_group(item),
2310 struct se_dev_attrib, da_group);
2311 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2313 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2316 static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2317 const char *reconfig_data)
2319 struct sk_buff *skb = NULL;
2320 void *msg_header = NULL;
2323 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2327 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2332 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2337 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2340 struct se_dev_attrib *da = container_of(to_config_group(item),
2341 struct se_dev_attrib, da_group);
2342 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2346 if (!len || len > TCMU_CONFIG_LEN - 1)
2349 /* Check if device has been configured before */
2350 if (target_dev_configured(&udev->se_dev)) {
2351 ret = tcmu_send_dev_config_event(udev, page);
2353 pr_err("Unable to reconfigure device\n");
2356 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2358 ret = tcmu_update_uio_info(udev);
2363 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2367 CONFIGFS_ATTR(tcmu_, dev_config);
2369 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2371 struct se_dev_attrib *da = container_of(to_config_group(item),
2372 struct se_dev_attrib, da_group);
2373 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2375 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2378 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2380 struct sk_buff *skb = NULL;
2381 void *msg_header = NULL;
2384 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2388 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2389 size, TCMU_ATTR_PAD);
2394 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2398 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2401 struct se_dev_attrib *da = container_of(to_config_group(item),
2402 struct se_dev_attrib, da_group);
2403 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2407 ret = kstrtou64(page, 0, &val);
2411 /* Check if device has been configured before */
2412 if (target_dev_configured(&udev->se_dev)) {
2413 ret = tcmu_send_dev_size_event(udev, val);
2415 pr_err("Unable to reconfigure device\n");
2419 udev->dev_size = val;
2422 CONFIGFS_ATTR(tcmu_, dev_size);
2424 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2427 struct se_dev_attrib *da = container_of(to_config_group(item),
2428 struct se_dev_attrib, da_group);
2429 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2431 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2434 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2435 const char *page, size_t count)
2437 struct se_dev_attrib *da = container_of(to_config_group(item),
2438 struct se_dev_attrib, da_group);
2439 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2443 ret = kstrtos8(page, 0, &val);
2447 udev->nl_reply_supported = val;
2450 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2452 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2455 struct se_dev_attrib *da = container_of(to_config_group(item),
2456 struct se_dev_attrib, da_group);
2458 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2461 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2463 struct sk_buff *skb = NULL;
2464 void *msg_header = NULL;
2467 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2471 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2476 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2480 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2481 const char *page, size_t count)
2483 struct se_dev_attrib *da = container_of(to_config_group(item),
2484 struct se_dev_attrib, da_group);
2485 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2489 ret = kstrtou8(page, 0, &val);
2493 /* Check if device has been configured before */
2494 if (target_dev_configured(&udev->se_dev)) {
2495 ret = tcmu_send_emulate_write_cache(udev, val);
2497 pr_err("Unable to reconfigure device\n");
2502 da->emulate_write_cache = val;
2505 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2507 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2509 struct se_device *se_dev = container_of(to_config_group(item),
2512 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2514 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2515 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2517 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2520 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2523 struct se_device *se_dev = container_of(to_config_group(item),
2526 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2530 if (!target_dev_configured(&udev->se_dev)) {
2531 pr_err("Device is not configured.\n");
2535 ret = kstrtou8(page, 0, &val);
2540 pr_err("Invalid block value %d\n", val);
2545 tcmu_unblock_dev(udev);
2547 tcmu_block_dev(udev);
2550 CONFIGFS_ATTR(tcmu_, block_dev);
2552 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2555 struct se_device *se_dev = container_of(to_config_group(item),
2558 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2562 if (!target_dev_configured(&udev->se_dev)) {
2563 pr_err("Device is not configured.\n");
2567 ret = kstrtou8(page, 0, &val);
2571 if (val != 1 && val != 2) {
2572 pr_err("Invalid reset ring value %d\n", val);
2576 tcmu_reset_ring(udev, val);
2579 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2581 static struct configfs_attribute *tcmu_attrib_attrs[] = {
2582 &tcmu_attr_cmd_time_out,
2583 &tcmu_attr_qfull_time_out,
2584 &tcmu_attr_max_data_area_mb,
2585 &tcmu_attr_dev_config,
2586 &tcmu_attr_dev_size,
2587 &tcmu_attr_emulate_write_cache,
2588 &tcmu_attr_nl_reply_supported,
2592 static struct configfs_attribute **tcmu_attrs;
2594 static struct configfs_attribute *tcmu_action_attrs[] = {
2595 &tcmu_attr_block_dev,
2596 &tcmu_attr_reset_ring,
2600 static struct target_backend_ops tcmu_ops = {
2602 .owner = THIS_MODULE,
2603 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
2604 .attach_hba = tcmu_attach_hba,
2605 .detach_hba = tcmu_detach_hba,
2606 .alloc_device = tcmu_alloc_device,
2607 .configure_device = tcmu_configure_device,
2608 .destroy_device = tcmu_destroy_device,
2609 .free_device = tcmu_free_device,
2610 .parse_cdb = tcmu_parse_cdb,
2611 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2612 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2613 .get_device_type = sbc_get_device_type,
2614 .get_blocks = tcmu_get_blocks,
2615 .tb_dev_action_attrs = tcmu_action_attrs,
2618 static void find_free_blocks(void)
2620 struct tcmu_dev *udev;
2622 u32 start, end, block, total_freed = 0;
2624 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2627 mutex_lock(&root_udev_mutex);
2628 list_for_each_entry(udev, &root_udev, node) {
2629 mutex_lock(&udev->cmdr_lock);
2631 if (!target_dev_configured(&udev->se_dev)) {
2632 mutex_unlock(&udev->cmdr_lock);
2636 /* Try to complete the finished commands first */
2637 tcmu_handle_completions(udev);
2639 /* Skip the udevs in idle */
2640 if (!udev->dbi_thresh) {
2641 mutex_unlock(&udev->cmdr_lock);
2645 end = udev->dbi_max + 1;
2646 block = find_last_bit(udev->data_bitmap, end);
2647 if (block == udev->dbi_max) {
2649 * The last bit is dbi_max, so it is not possible
2650 * reclaim any blocks.
2652 mutex_unlock(&udev->cmdr_lock);
2654 } else if (block == end) {
2655 /* The current udev will goto idle state */
2656 udev->dbi_thresh = start = 0;
2659 udev->dbi_thresh = start = block + 1;
2660 udev->dbi_max = block;
2663 /* Here will truncate the data area from off */
2664 off = udev->data_off + start * DATA_BLOCK_SIZE;
2665 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2667 /* Release the block pages */
2668 tcmu_blocks_release(&udev->data_blocks, start, end);
2669 mutex_unlock(&udev->cmdr_lock);
2671 total_freed += end - start;
2672 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2673 total_freed, udev->name);
2675 mutex_unlock(&root_udev_mutex);
2677 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2678 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2681 static void check_timedout_devices(void)
2683 struct tcmu_dev *udev, *tmp_dev;
2684 struct tcmu_cmd *cmd, *tmp_cmd;
2687 spin_lock_bh(&timed_out_udevs_lock);
2688 list_splice_init(&timed_out_udevs, &devs);
2690 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2691 list_del_init(&udev->timedout_entry);
2692 spin_unlock_bh(&timed_out_udevs_lock);
2694 mutex_lock(&udev->cmdr_lock);
2697 * If cmd_time_out is disabled but qfull is set deadline
2698 * will only reflect the qfull timeout. Ignore it.
2700 if (udev->cmd_time_out) {
2701 list_for_each_entry_safe(cmd, tmp_cmd,
2702 &udev->inflight_queue,
2704 tcmu_check_expired_ring_cmd(cmd);
2706 tcmu_set_next_deadline(&udev->inflight_queue,
2709 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
2711 tcmu_check_expired_queue_cmd(cmd);
2713 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2715 mutex_unlock(&udev->cmdr_lock);
2717 spin_lock_bh(&timed_out_udevs_lock);
2720 spin_unlock_bh(&timed_out_udevs_lock);
2723 static void tcmu_unmap_work_fn(struct work_struct *work)
2725 check_timedout_devices();
2729 static int __init tcmu_module_init(void)
2731 int ret, i, k, len = 0;
2733 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2735 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2737 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2738 sizeof(struct tcmu_cmd),
2739 __alignof__(struct tcmu_cmd),
2741 if (!tcmu_cmd_cache)
2744 tcmu_root_device = root_device_register("tcm_user");
2745 if (IS_ERR(tcmu_root_device)) {
2746 ret = PTR_ERR(tcmu_root_device);
2747 goto out_free_cache;
2750 ret = genl_register_family(&tcmu_genl_family);
2752 goto out_unreg_device;
2755 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2756 len += sizeof(struct configfs_attribute *);
2758 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
2759 len += sizeof(struct configfs_attribute *);
2761 len += sizeof(struct configfs_attribute *);
2763 tcmu_attrs = kzalloc(len, GFP_KERNEL);
2766 goto out_unreg_genl;
2769 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2770 tcmu_attrs[i] = passthrough_attrib_attrs[i];
2772 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2773 tcmu_attrs[i] = tcmu_attrib_attrs[k];
2776 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2778 ret = transport_backend_register(&tcmu_ops);
2787 genl_unregister_family(&tcmu_genl_family);
2789 root_device_unregister(tcmu_root_device);
2791 kmem_cache_destroy(tcmu_cmd_cache);
2796 static void __exit tcmu_module_exit(void)
2798 cancel_delayed_work_sync(&tcmu_unmap_work);
2799 target_backend_unregister(&tcmu_ops);
2801 genl_unregister_family(&tcmu_genl_family);
2802 root_device_unregister(tcmu_root_device);
2803 kmem_cache_destroy(tcmu_cmd_cache);
2806 MODULE_DESCRIPTION("TCM USER subsystem plugin");
2807 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2808 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2809 MODULE_LICENSE("GPL");
2811 module_init(tcmu_module_init);
2812 module_exit(tcmu_module_exit);