2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
5 * Copyright (C) 2017 Chinamobile, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/spinlock.h>
22 #include <linux/module.h>
23 #include <linux/idr.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/parser.h>
27 #include <linux/vmalloc.h>
28 #include <linux/uio_driver.h>
29 #include <linux/radix-tree.h>
30 #include <linux/stringify.h>
31 #include <linux/bitops.h>
32 #include <linux/highmem.h>
33 #include <linux/configfs.h>
34 #include <linux/mutex.h>
35 #include <linux/workqueue.h>
36 #include <net/genetlink.h>
37 #include <scsi/scsi_common.h>
38 #include <scsi/scsi_proto.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 #include <target/target_core_backend.h>
43 #include <linux/target_core_user.h>
50 * Define a shared-memory interface for LIO to pass SCSI commands and
51 * data to userspace for processing. This is to allow backends that
52 * are too complex for in-kernel support to be possible.
54 * It uses the UIO framework to do a lot of the device-creation and
55 * introspection work for us.
57 * See the .h file for how the ring is laid out. Note that while the
58 * command ring is defined, the particulars of the data area are
59 * not. Offset values in the command entry point to other locations
60 * internal to the mmap-ed area. There is separate space outside the
61 * command ring for data buffers. This leaves maximum flexibility for
62 * moving buffer allocations, or even page flipping or other
63 * allocation techniques, without altering the command ring layout.
66 * The user process must be assumed to be malicious. There's no way to
67 * prevent it breaking the command ring protocol if it wants, but in
68 * order to prevent other issues we must only ever read *data* from
69 * the shared memory area, not offsets or sizes. This applies to
70 * command ring entries as well as the mailbox. Extra code needed for
71 * this may have a 'UAM' comment.
74 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
76 /* For cmd area, the size is fixed 8MB */
77 #define CMDR_SIZE (8 * 1024 * 1024)
80 * For data area, the block size is PAGE_SIZE and
81 * the total size is 256K * PAGE_SIZE.
83 #define DATA_BLOCK_SIZE PAGE_SIZE
84 #define DATA_BLOCK_SHIFT PAGE_SHIFT
85 #define DATA_BLOCK_BITS_DEF (256 * 1024)
86 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
88 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
89 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
91 /* The total size of the ring is 8M + 256K * PAGE_SIZE */
92 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
95 * Default number of global data blocks(512K * PAGE_SIZE)
96 * when the unmap thread will be started.
98 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
100 static u8 tcmu_kern_cmd_reply_supported;
102 static struct device *tcmu_root_device;
108 #define TCMU_CONFIG_LEN 256
111 /* wake up thread waiting for reply */
112 struct completion complete;
118 struct list_head node;
121 struct se_device se_dev;
126 #define TCMU_DEV_BIT_OPEN 0
127 #define TCMU_DEV_BIT_BROKEN 1
128 #define TCMU_DEV_BIT_BLOCKED 2
131 struct uio_info uio_info;
135 struct tcmu_mailbox *mb_addr;
138 u32 cmdr_last_cleaned;
139 /* Offset of data area from start of mb */
140 /* Must add data_off and mb_addr to get the address */
146 struct mutex cmdr_lock;
147 struct list_head cmdr_queue;
151 unsigned long *data_bitmap;
152 struct radix_tree_root data_blocks;
156 struct timer_list cmd_timer;
157 unsigned int cmd_time_out;
159 struct timer_list qfull_timer;
162 struct list_head timedout_entry;
164 spinlock_t nl_cmd_lock;
165 struct tcmu_nl_cmd curr_nl_cmd;
166 /* wake up threads waiting on curr_nl_cmd */
167 wait_queue_head_t nl_cmd_wq;
169 char dev_config[TCMU_CONFIG_LEN];
171 int nl_reply_supported;
174 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
176 #define CMDR_OFF sizeof(struct tcmu_mailbox)
179 struct se_cmd *se_cmd;
180 struct tcmu_dev *tcmu_dev;
181 struct list_head cmdr_queue_entry;
185 /* Can't use se_cmd when cleaning up expired cmds, because if
186 cmd has been completed then accessing se_cmd is off limits */
191 unsigned long deadline;
193 #define TCMU_CMD_BIT_EXPIRED 0
197 * To avoid dead lock the mutex lock order should always be:
199 * mutex_lock(&root_udev_mutex);
201 * mutex_lock(&tcmu_dev->cmdr_lock);
202 * mutex_unlock(&tcmu_dev->cmdr_lock);
204 * mutex_unlock(&root_udev_mutex);
206 static DEFINE_MUTEX(root_udev_mutex);
207 static LIST_HEAD(root_udev);
209 static DEFINE_SPINLOCK(timed_out_udevs_lock);
210 static LIST_HEAD(timed_out_udevs);
212 static struct kmem_cache *tcmu_cmd_cache;
214 static atomic_t global_db_count = ATOMIC_INIT(0);
215 static struct delayed_work tcmu_unmap_work;
216 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
218 static int tcmu_set_global_max_data_area(const char *str,
219 const struct kernel_param *kp)
221 int ret, max_area_mb;
223 ret = kstrtoint(str, 10, &max_area_mb);
227 if (max_area_mb <= 0) {
228 pr_err("global_max_data_area must be larger than 0.\n");
232 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
233 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
234 schedule_delayed_work(&tcmu_unmap_work, 0);
236 cancel_delayed_work_sync(&tcmu_unmap_work);
241 static int tcmu_get_global_max_data_area(char *buffer,
242 const struct kernel_param *kp)
244 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
247 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
248 .set = tcmu_set_global_max_data_area,
249 .get = tcmu_get_global_max_data_area,
252 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
254 MODULE_PARM_DESC(global_max_data_area_mb,
255 "Max MBs allowed to be allocated to all the tcmu device's "
258 /* multicast group */
259 enum tcmu_multicast_groups {
263 static const struct genl_multicast_group tcmu_mcgrps[] = {
264 [TCMU_MCGRP_CONFIG] = { .name = "config", },
267 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
268 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
269 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
270 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
271 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
272 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
275 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
277 struct se_device *dev;
278 struct tcmu_dev *udev;
279 struct tcmu_nl_cmd *nl_cmd;
280 int dev_id, rc, ret = 0;
281 bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
283 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
284 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
285 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
289 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
290 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
292 dev = target_find_device(dev_id, !is_removed);
294 printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
295 completed_cmd, rc, dev_id);
298 udev = TCMU_DEV(dev);
300 spin_lock(&udev->nl_cmd_lock);
301 nl_cmd = &udev->curr_nl_cmd;
303 pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
304 nl_cmd->cmd, completed_cmd, rc);
306 if (nl_cmd->cmd != completed_cmd) {
307 printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
308 completed_cmd, nl_cmd->cmd);
314 spin_unlock(&udev->nl_cmd_lock);
316 target_undepend_item(&dev->dev_group.cg_item);
318 complete(&nl_cmd->complete);
322 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
324 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
327 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
329 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
332 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
333 struct genl_info *info)
335 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
338 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
340 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
341 tcmu_kern_cmd_reply_supported =
342 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
343 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
344 tcmu_kern_cmd_reply_supported);
350 static const struct genl_ops tcmu_genl_ops[] = {
352 .cmd = TCMU_CMD_SET_FEATURES,
353 .flags = GENL_ADMIN_PERM,
354 .policy = tcmu_attr_policy,
355 .doit = tcmu_genl_set_features,
358 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
359 .flags = GENL_ADMIN_PERM,
360 .policy = tcmu_attr_policy,
361 .doit = tcmu_genl_add_dev_done,
364 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
365 .flags = GENL_ADMIN_PERM,
366 .policy = tcmu_attr_policy,
367 .doit = tcmu_genl_rm_dev_done,
370 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
371 .flags = GENL_ADMIN_PERM,
372 .policy = tcmu_attr_policy,
373 .doit = tcmu_genl_reconfig_dev_done,
377 /* Our generic netlink family */
378 static struct genl_family tcmu_genl_family __ro_after_init = {
379 .module = THIS_MODULE,
383 .maxattr = TCMU_ATTR_MAX,
384 .mcgrps = tcmu_mcgrps,
385 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
387 .ops = tcmu_genl_ops,
388 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
391 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
392 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
393 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
394 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
396 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
398 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
401 for (i = 0; i < len; i++)
402 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
405 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
406 struct tcmu_cmd *tcmu_cmd)
411 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
412 if (dbi == udev->dbi_thresh)
415 page = radix_tree_lookup(&udev->data_blocks, dbi);
417 if (atomic_add_return(1, &global_db_count) >
418 tcmu_global_max_blocks)
419 schedule_delayed_work(&tcmu_unmap_work, 0);
421 /* try to get new page from the mm */
422 page = alloc_page(GFP_KERNEL);
426 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
431 if (dbi > udev->dbi_max)
434 set_bit(dbi, udev->data_bitmap);
435 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
441 atomic_dec(&global_db_count);
445 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
446 struct tcmu_cmd *tcmu_cmd)
450 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
451 if (!tcmu_get_empty_block(udev, tcmu_cmd))
457 static inline struct page *
458 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
460 return radix_tree_lookup(&udev->data_blocks, dbi);
463 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
465 kfree(tcmu_cmd->dbi);
466 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
469 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
471 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
472 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
474 if (se_cmd->se_cmd_flags & SCF_BIDI) {
475 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
476 data_length += round_up(se_cmd->t_bidi_data_sg->length,
483 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
485 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
487 return data_length / DATA_BLOCK_SIZE;
490 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
492 struct se_device *se_dev = se_cmd->se_dev;
493 struct tcmu_dev *udev = TCMU_DEV(se_dev);
494 struct tcmu_cmd *tcmu_cmd;
496 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
500 INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
501 tcmu_cmd->se_cmd = se_cmd;
502 tcmu_cmd->tcmu_dev = udev;
504 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
505 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
506 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
508 if (!tcmu_cmd->dbi) {
509 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
516 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
518 unsigned long offset = offset_in_page(vaddr);
519 void *start = vaddr - offset;
521 size = round_up(size+offset, PAGE_SIZE);
524 flush_dcache_page(virt_to_page(start));
531 * Some ring helper functions. We don't assume size is a power of 2 so
532 * we can't use circ_buf.h.
534 static inline size_t spc_used(size_t head, size_t tail, size_t size)
536 int diff = head - tail;
544 static inline size_t spc_free(size_t head, size_t tail, size_t size)
546 /* Keep 1 byte unused or we can't tell full from empty */
547 return (size - spc_used(head, tail, size) - 1);
550 static inline size_t head_to_end(size_t head, size_t size)
555 static inline void new_iov(struct iovec **iov, int *iov_cnt)
564 memset(iovec, 0, sizeof(struct iovec));
567 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
569 /* offset is relative to mb_addr */
570 static inline size_t get_block_offset_user(struct tcmu_dev *dev,
571 int dbi, int remaining)
573 return dev->data_off + dbi * DATA_BLOCK_SIZE +
574 DATA_BLOCK_SIZE - remaining;
577 static inline size_t iov_tail(struct iovec *iov)
579 return (size_t)iov->iov_base + iov->iov_len;
582 static void scatter_data_area(struct tcmu_dev *udev,
583 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
584 unsigned int data_nents, struct iovec **iov,
585 int *iov_cnt, bool copy_data)
588 int block_remaining = 0;
589 void *from, *to = NULL;
590 size_t copy_bytes, to_offset, offset;
591 struct scatterlist *sg;
594 for_each_sg(data_sg, sg, data_nents, i) {
595 int sg_remaining = sg->length;
596 from = kmap_atomic(sg_page(sg)) + sg->offset;
597 while (sg_remaining > 0) {
598 if (block_remaining == 0) {
602 block_remaining = DATA_BLOCK_SIZE;
603 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
604 page = tcmu_get_block_page(udev, dbi);
605 to = kmap_atomic(page);
609 * Covert to virtual offset of the ring data area.
611 to_offset = get_block_offset_user(udev, dbi,
615 * The following code will gather and map the blocks
616 * to the same iovec when the blocks are all next to
619 copy_bytes = min_t(size_t, sg_remaining,
622 to_offset == iov_tail(*iov)) {
624 * Will append to the current iovec, because
625 * the current block page is next to the
628 (*iov)->iov_len += copy_bytes;
631 * Will allocate a new iovec because we are
632 * first time here or the current block page
633 * is not next to the previous one.
635 new_iov(iov, iov_cnt);
636 (*iov)->iov_base = (void __user *)to_offset;
637 (*iov)->iov_len = copy_bytes;
641 offset = DATA_BLOCK_SIZE - block_remaining;
643 from + sg->length - sg_remaining,
645 tcmu_flush_dcache_range(to, copy_bytes);
648 sg_remaining -= copy_bytes;
649 block_remaining -= copy_bytes;
651 kunmap_atomic(from - sg->offset);
658 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
661 struct se_cmd *se_cmd = cmd->se_cmd;
663 int block_remaining = 0;
664 void *from = NULL, *to;
665 size_t copy_bytes, offset;
666 struct scatterlist *sg, *data_sg;
668 unsigned int data_nents;
672 data_sg = se_cmd->t_data_sg;
673 data_nents = se_cmd->t_data_nents;
677 * For bidi case, the first count blocks are for Data-Out
678 * buffer blocks, and before gathering the Data-In buffer
679 * the Data-Out buffer blocks should be discarded.
681 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
683 data_sg = se_cmd->t_bidi_data_sg;
684 data_nents = se_cmd->t_bidi_data_nents;
687 tcmu_cmd_set_dbi_cur(cmd, count);
689 for_each_sg(data_sg, sg, data_nents, i) {
690 int sg_remaining = sg->length;
691 to = kmap_atomic(sg_page(sg)) + sg->offset;
692 while (sg_remaining > 0) {
693 if (block_remaining == 0) {
697 block_remaining = DATA_BLOCK_SIZE;
698 dbi = tcmu_cmd_get_dbi(cmd);
699 page = tcmu_get_block_page(udev, dbi);
700 from = kmap_atomic(page);
702 copy_bytes = min_t(size_t, sg_remaining,
704 offset = DATA_BLOCK_SIZE - block_remaining;
705 tcmu_flush_dcache_range(from, copy_bytes);
706 memcpy(to + sg->length - sg_remaining, from + offset,
709 sg_remaining -= copy_bytes;
710 block_remaining -= copy_bytes;
712 kunmap_atomic(to - sg->offset);
718 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
720 return thresh - bitmap_weight(bitmap, thresh);
724 * We can't queue a command until we have space available on the cmd ring *and*
725 * space available on the data area.
727 * Called with ring lock held.
729 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
730 size_t cmd_size, size_t data_needed)
732 struct tcmu_mailbox *mb = udev->mb_addr;
733 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
735 size_t space, cmd_needed;
738 tcmu_flush_dcache_range(mb, sizeof(*mb));
740 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
743 * If cmd end-of-ring space is too small then we need space for a NOP plus
744 * original cmd - cmds are internally contiguous.
746 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
747 cmd_needed = cmd_size;
749 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
751 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
752 if (space < cmd_needed) {
753 pr_debug("no cmd space: %u %u %u\n", cmd_head,
754 udev->cmdr_last_cleaned, udev->cmdr_size);
758 /* try to check and get the data blocks as needed */
759 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
760 if ((space * DATA_BLOCK_SIZE) < data_needed) {
761 unsigned long blocks_left =
762 (udev->max_blocks - udev->dbi_thresh) + space;
764 if (blocks_left < blocks_needed) {
765 pr_debug("no data space: only %lu available, but ask for %zu\n",
766 blocks_left * DATA_BLOCK_SIZE,
771 udev->dbi_thresh += blocks_needed;
772 if (udev->dbi_thresh > udev->max_blocks)
773 udev->dbi_thresh = udev->max_blocks;
776 return tcmu_get_empty_blocks(udev, cmd);
779 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
781 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
782 sizeof(struct tcmu_cmd_entry));
785 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
786 size_t base_command_size)
788 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
791 command_size = base_command_size +
792 round_up(scsi_command_size(se_cmd->t_task_cdb),
795 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
800 static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
801 struct timer_list *timer)
803 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
806 if (tcmu_cmd->cmd_id)
809 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
811 pr_err("tcmu: Could not allocate cmd id.\n");
814 tcmu_cmd->cmd_id = cmd_id;
816 pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
817 udev->name, tmo / MSEC_PER_SEC);
823 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
824 mod_timer(timer, tcmu_cmd->deadline);
828 static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
830 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
835 * For backwards compat if qfull_time_out is not set use
836 * cmd_time_out and if that's not set use the default time out.
838 if (!udev->qfull_time_out)
840 else if (udev->qfull_time_out > 0)
841 tmo = udev->qfull_time_out;
842 else if (udev->cmd_time_out)
843 tmo = udev->cmd_time_out;
847 ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
851 list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
852 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
853 tcmu_cmd->cmd_id, udev->name);
858 * queue_cmd_ring - queue cmd to ring or internally
859 * @tcmu_cmd: cmd to queue
860 * @scsi_err: TCM error code if failure (-1) returned.
863 * -1 we cannot queue internally or to the ring.
865 * 1 internally queued to wait for ring memory to free.
867 static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
869 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
870 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
871 size_t base_command_size, command_size;
872 struct tcmu_mailbox *mb;
873 struct tcmu_cmd_entry *entry;
878 bool copy_to_data_area;
879 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
881 *scsi_err = TCM_NO_SENSE;
883 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
884 *scsi_err = TCM_LUN_BUSY;
888 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
889 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
894 * Must be a certain minimum size for response sense info, but
895 * also may be larger if the iov array is large.
897 * We prepare as many iovs as possbile for potential uses here,
898 * because it's expensive to tell how many regions are freed in
899 * the bitmap & global data pool, as the size calculated here
900 * will only be used to do the checks.
902 * The size will be recalculated later as actually needed to save
905 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
906 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
908 if (!list_empty(&udev->cmdr_queue))
912 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
913 if ((command_size > (udev->cmdr_size / 2)) ||
914 data_length > udev->data_size) {
915 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
916 "cmd ring/data area\n", command_size, data_length,
917 udev->cmdr_size, udev->data_size);
918 *scsi_err = TCM_INVALID_CDB_FIELD;
922 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
924 * Don't leave commands partially setup because the unmap
925 * thread might need the blocks to make forward progress.
927 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
928 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
932 /* Insert a PAD if end-of-ring space is too small */
933 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
934 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
936 entry = (void *) mb + CMDR_OFF + cmd_head;
937 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
938 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
939 entry->hdr.cmd_id = 0; /* not used for PAD */
940 entry->hdr.kflags = 0;
941 entry->hdr.uflags = 0;
942 tcmu_flush_dcache_range(entry, sizeof(*entry));
944 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
945 tcmu_flush_dcache_range(mb, sizeof(*mb));
947 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
948 WARN_ON(cmd_head != 0);
951 entry = (void *) mb + CMDR_OFF + cmd_head;
952 memset(entry, 0, command_size);
953 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
955 /* Handle allocating space from the data area */
956 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
957 iov = &entry->req.iov[0];
959 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
960 || se_cmd->se_cmd_flags & SCF_BIDI);
961 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
962 se_cmd->t_data_nents, &iov, &iov_cnt,
964 entry->req.iov_cnt = iov_cnt;
966 /* Handle BIDI commands */
968 if (se_cmd->se_cmd_flags & SCF_BIDI) {
970 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
971 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
974 entry->req.iov_bidi_cnt = iov_cnt;
976 ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
979 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
980 mutex_unlock(&udev->cmdr_lock);
982 *scsi_err = TCM_OUT_OF_RESOURCES;
985 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
988 * Recalaulate the command's base size and size according
989 * to the actual needs
991 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
992 entry->req.iov_bidi_cnt);
993 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
995 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
997 /* All offsets relative to mb_addr, not start of entry! */
998 cdb_off = CMDR_OFF + cmd_head + base_command_size;
999 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1000 entry->req.cdb_off = cdb_off;
1001 tcmu_flush_dcache_range(entry, sizeof(*entry));
1003 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1004 tcmu_flush_dcache_range(mb, sizeof(*mb));
1006 /* TODO: only if FLUSH and FUA? */
1007 uio_event_notify(&udev->uio_info);
1012 if (add_to_cmdr_queue(tcmu_cmd)) {
1013 *scsi_err = TCM_OUT_OF_RESOURCES;
1020 static sense_reason_t
1021 tcmu_queue_cmd(struct se_cmd *se_cmd)
1023 struct se_device *se_dev = se_cmd->se_dev;
1024 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1025 struct tcmu_cmd *tcmu_cmd;
1026 sense_reason_t scsi_ret;
1029 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1031 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1033 mutex_lock(&udev->cmdr_lock);
1034 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1035 mutex_unlock(&udev->cmdr_lock);
1037 tcmu_free_cmd(tcmu_cmd);
1041 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1043 struct se_cmd *se_cmd = cmd->se_cmd;
1044 struct tcmu_dev *udev = cmd->tcmu_dev;
1047 * cmd has been completed already from timeout, just reclaim
1048 * data area space and free cmd
1050 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1053 tcmu_cmd_reset_dbi_cur(cmd);
1055 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1056 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1058 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1059 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1060 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1061 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
1062 /* Get Data-In buffer before clean up */
1063 gather_data_area(udev, cmd, true);
1064 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1065 gather_data_area(udev, cmd, false);
1066 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1068 } else if (se_cmd->data_direction != DMA_NONE) {
1069 pr_warn("TCMU: data direction was %d!\n",
1070 se_cmd->data_direction);
1073 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1077 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1081 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1083 struct tcmu_mailbox *mb;
1086 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1087 pr_err("ring broken, not handling completions\n");
1092 tcmu_flush_dcache_range(mb, sizeof(*mb));
1094 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1096 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1097 struct tcmu_cmd *cmd;
1099 tcmu_flush_dcache_range(entry, sizeof(*entry));
1101 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1102 UPDATE_HEAD(udev->cmdr_last_cleaned,
1103 tcmu_hdr_get_len(entry->hdr.len_op),
1107 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1109 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1111 pr_err("cmd_id %u not found, ring is broken\n",
1113 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1117 tcmu_handle_completion(cmd, entry);
1119 UPDATE_HEAD(udev->cmdr_last_cleaned,
1120 tcmu_hdr_get_len(entry->hdr.len_op),
1126 if (mb->cmd_tail == mb->cmd_head) {
1127 /* no more pending commands */
1128 del_timer(&udev->cmd_timer);
1130 if (list_empty(&udev->cmdr_queue)) {
1132 * no more pending or waiting commands so try to
1133 * reclaim blocks if needed.
1135 if (atomic_read(&global_db_count) >
1136 tcmu_global_max_blocks)
1137 schedule_delayed_work(&tcmu_unmap_work, 0);
1144 static int tcmu_check_expired_cmd(int id, void *p, void *data)
1146 struct tcmu_cmd *cmd = p;
1147 struct tcmu_dev *udev = cmd->tcmu_dev;
1149 struct se_cmd *se_cmd;
1152 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1155 if (!time_after(jiffies, cmd->deadline))
1158 is_running = list_empty(&cmd->cmdr_queue_entry);
1159 se_cmd = cmd->se_cmd;
1163 * If cmd_time_out is disabled but qfull is set deadline
1164 * will only reflect the qfull timeout. Ignore it.
1166 if (!udev->cmd_time_out)
1169 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1171 * target_complete_cmd will translate this to LUN COMM FAILURE
1173 scsi_status = SAM_STAT_CHECK_CONDITION;
1175 list_del_init(&cmd->cmdr_queue_entry);
1177 idr_remove(&udev->commands, id);
1179 scsi_status = SAM_STAT_TASK_SET_FULL;
1182 pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1183 id, udev->name, is_running ? "inflight" : "queued");
1185 target_complete_cmd(se_cmd, scsi_status);
1189 static void tcmu_device_timedout(struct tcmu_dev *udev)
1191 spin_lock(&timed_out_udevs_lock);
1192 if (list_empty(&udev->timedout_entry))
1193 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1194 spin_unlock(&timed_out_udevs_lock);
1196 schedule_delayed_work(&tcmu_unmap_work, 0);
1199 static void tcmu_cmd_timedout(struct timer_list *t)
1201 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1203 pr_debug("%s cmd timeout has expired\n", udev->name);
1204 tcmu_device_timedout(udev);
1207 static void tcmu_qfull_timedout(struct timer_list *t)
1209 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1211 pr_debug("%s qfull timeout has expired\n", udev->name);
1212 tcmu_device_timedout(udev);
1215 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1217 struct tcmu_hba *tcmu_hba;
1219 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1223 tcmu_hba->host_id = host_id;
1224 hba->hba_ptr = tcmu_hba;
1229 static void tcmu_detach_hba(struct se_hba *hba)
1231 kfree(hba->hba_ptr);
1232 hba->hba_ptr = NULL;
1235 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1237 struct tcmu_dev *udev;
1239 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1242 kref_init(&udev->kref);
1244 udev->name = kstrdup(name, GFP_KERNEL);
1251 udev->cmd_time_out = TCMU_TIME_OUT;
1252 udev->qfull_time_out = -1;
1254 udev->max_blocks = DATA_BLOCK_BITS_DEF;
1255 mutex_init(&udev->cmdr_lock);
1257 INIT_LIST_HEAD(&udev->timedout_entry);
1258 INIT_LIST_HEAD(&udev->cmdr_queue);
1259 idr_init(&udev->commands);
1261 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1262 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1264 init_waitqueue_head(&udev->nl_cmd_wq);
1265 spin_lock_init(&udev->nl_cmd_lock);
1267 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1269 return &udev->se_dev;
1272 static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1274 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1276 bool drained = true;
1277 sense_reason_t scsi_ret;
1280 if (list_empty(&udev->cmdr_queue))
1283 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1285 list_splice_init(&udev->cmdr_queue, &cmds);
1287 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
1288 list_del_init(&tcmu_cmd->cmdr_queue_entry);
1290 pr_debug("removing cmd %u on dev %s from queue\n",
1291 tcmu_cmd->cmd_id, udev->name);
1294 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1296 * We were not able to even start the command, so
1297 * fail with busy to allow a retry in case runner
1298 * was only temporarily down. If the device is being
1299 * removed then LIO core will do the right thing and
1302 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1303 tcmu_free_cmd(tcmu_cmd);
1307 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1309 pr_debug("cmd %u on dev %s failed with %u\n",
1310 tcmu_cmd->cmd_id, udev->name, scsi_ret);
1312 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1314 * Ignore scsi_ret for now. target_complete_cmd
1317 target_complete_cmd(tcmu_cmd->se_cmd,
1318 SAM_STAT_CHECK_CONDITION);
1319 tcmu_free_cmd(tcmu_cmd);
1320 } else if (ret > 0) {
1321 pr_debug("ran out of space during cmdr queue run\n");
1323 * cmd was requeued, so just put all cmds back in
1326 list_splice_tail(&cmds, &udev->cmdr_queue);
1331 if (list_empty(&udev->cmdr_queue))
1332 del_timer(&udev->qfull_timer);
1337 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1339 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1341 mutex_lock(&udev->cmdr_lock);
1342 tcmu_handle_completions(udev);
1343 run_cmdr_queue(udev, false);
1344 mutex_unlock(&udev->cmdr_lock);
1350 * mmap code from uio.c. Copied here because we want to hook mmap()
1351 * and this stuff must come along.
1353 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1355 struct tcmu_dev *udev = vma->vm_private_data;
1356 struct uio_info *info = &udev->uio_info;
1358 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1359 if (info->mem[vma->vm_pgoff].size == 0)
1361 return (int)vma->vm_pgoff;
1366 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1370 mutex_lock(&udev->cmdr_lock);
1371 page = tcmu_get_block_page(udev, dbi);
1373 mutex_unlock(&udev->cmdr_lock);
1378 * Userspace messed up and passed in a address not in the
1379 * data iov passed to it.
1381 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
1384 mutex_unlock(&udev->cmdr_lock);
1389 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1391 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1392 struct uio_info *info = &udev->uio_info;
1394 unsigned long offset;
1397 int mi = tcmu_find_mem_index(vmf->vma);
1399 return VM_FAULT_SIGBUS;
1402 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1405 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1407 if (offset < udev->data_off) {
1408 /* For the vmalloc()ed cmd area pages */
1409 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1410 page = vmalloc_to_page(addr);
1414 /* For the dynamically growing data area pages */
1415 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1416 page = tcmu_try_get_block_page(udev, dbi);
1418 return VM_FAULT_SIGBUS;
1426 static const struct vm_operations_struct tcmu_vm_ops = {
1427 .fault = tcmu_vma_fault,
1430 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1432 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1434 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1435 vma->vm_ops = &tcmu_vm_ops;
1437 vma->vm_private_data = udev;
1439 /* Ensure the mmap is exactly the right size */
1440 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1446 static int tcmu_open(struct uio_info *info, struct inode *inode)
1448 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1450 /* O_EXCL not supported for char devs, so fake it? */
1451 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1454 udev->inode = inode;
1455 kref_get(&udev->kref);
1462 static void tcmu_dev_call_rcu(struct rcu_head *p)
1464 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1465 struct tcmu_dev *udev = TCMU_DEV(dev);
1467 kfree(udev->uio_info.name);
1472 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1474 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1475 kmem_cache_free(tcmu_cmd_cache, cmd);
1481 static void tcmu_blocks_release(struct radix_tree_root *blocks,
1487 for (i = start; i < end; i++) {
1488 page = radix_tree_delete(blocks, i);
1491 atomic_dec(&global_db_count);
1496 static void tcmu_dev_kref_release(struct kref *kref)
1498 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1499 struct se_device *dev = &udev->se_dev;
1500 struct tcmu_cmd *cmd;
1501 bool all_expired = true;
1504 vfree(udev->mb_addr);
1505 udev->mb_addr = NULL;
1507 spin_lock_bh(&timed_out_udevs_lock);
1508 if (!list_empty(&udev->timedout_entry))
1509 list_del(&udev->timedout_entry);
1510 spin_unlock_bh(&timed_out_udevs_lock);
1512 /* Upper layer should drain all requests before calling this */
1513 mutex_lock(&udev->cmdr_lock);
1514 idr_for_each_entry(&udev->commands, cmd, i) {
1515 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1516 all_expired = false;
1518 idr_destroy(&udev->commands);
1519 WARN_ON(!all_expired);
1521 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1522 kfree(udev->data_bitmap);
1523 mutex_unlock(&udev->cmdr_lock);
1525 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1528 static int tcmu_release(struct uio_info *info, struct inode *inode)
1530 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1532 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1534 pr_debug("close\n");
1535 /* release ref from open */
1536 kref_put(&udev->kref, tcmu_dev_kref_release);
1540 static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1542 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1544 if (!tcmu_kern_cmd_reply_supported)
1547 if (udev->nl_reply_supported <= 0)
1551 spin_lock(&udev->nl_cmd_lock);
1553 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1554 spin_unlock(&udev->nl_cmd_lock);
1555 pr_debug("sleeping for open nl cmd\n");
1556 wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
1560 memset(nl_cmd, 0, sizeof(*nl_cmd));
1562 init_completion(&nl_cmd->complete);
1564 spin_unlock(&udev->nl_cmd_lock);
1567 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1569 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1571 DEFINE_WAIT(__wait);
1573 if (!tcmu_kern_cmd_reply_supported)
1576 if (udev->nl_reply_supported <= 0)
1579 pr_debug("sleeping for nl reply\n");
1580 wait_for_completion(&nl_cmd->complete);
1582 spin_lock(&udev->nl_cmd_lock);
1583 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1584 ret = nl_cmd->status;
1586 spin_unlock(&udev->nl_cmd_lock);
1588 wake_up_all(&udev->nl_cmd_wq);
1593 static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
1594 int reconfig_attr, const void *reconfig_data)
1596 struct sk_buff *skb;
1600 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1604 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1608 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1612 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1616 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1620 if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
1621 switch (reconfig_attr) {
1622 case TCMU_ATTR_DEV_CFG:
1623 ret = nla_put_string(skb, reconfig_attr, reconfig_data);
1625 case TCMU_ATTR_DEV_SIZE:
1626 ret = nla_put_u64_64bit(skb, reconfig_attr,
1627 *((u64 *)reconfig_data),
1630 case TCMU_ATTR_WRITECACHE:
1631 ret = nla_put_u8(skb, reconfig_attr,
1632 *((u8 *)reconfig_data));
1642 genlmsg_end(skb, msg_header);
1644 tcmu_init_genl_cmd_reply(udev, cmd);
1646 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1647 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1648 /* We don't care if no one is listening */
1652 ret = tcmu_wait_genl_cmd_reply(udev);
1660 static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1661 enum tcmu_genl_cmd cmd,
1662 struct sk_buff **buf, void **hdr)
1664 struct sk_buff *skb;
1668 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1672 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1676 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1680 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1684 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1697 static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1698 enum tcmu_genl_cmd cmd,
1699 struct sk_buff **buf, void **hdr)
1702 struct sk_buff *skb = *buf;
1703 void *msg_header = *hdr;
1705 genlmsg_end(skb, msg_header);
1707 tcmu_init_genl_cmd_reply(udev, cmd);
1709 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1710 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1711 /* We don't care if no one is listening */
1715 ret = tcmu_wait_genl_cmd_reply(udev);
1719 static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1721 struct sk_buff *skb = NULL;
1722 void *msg_header = NULL;
1725 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1729 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1734 static int tcmu_update_uio_info(struct tcmu_dev *udev)
1736 struct tcmu_hba *hba = udev->hba->hba_ptr;
1737 struct uio_info *info;
1741 info = &udev->uio_info;
1742 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
1744 size += 1; /* for \0 */
1745 str = kmalloc(size, GFP_KERNEL);
1749 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
1750 if (udev->dev_config[0])
1751 snprintf(str + used, size - used, "/%s", udev->dev_config);
1753 /* If the old string exists, free it */
1760 static int tcmu_configure_device(struct se_device *dev)
1762 struct tcmu_dev *udev = TCMU_DEV(dev);
1763 struct uio_info *info;
1764 struct tcmu_mailbox *mb;
1767 ret = tcmu_update_uio_info(udev);
1771 info = &udev->uio_info;
1773 udev->data_bitmap = kzalloc(BITS_TO_LONGS(udev->max_blocks) *
1774 sizeof(unsigned long), GFP_KERNEL);
1775 if (!udev->data_bitmap) {
1777 goto err_bitmap_alloc;
1780 udev->mb_addr = vzalloc(CMDR_SIZE);
1781 if (!udev->mb_addr) {
1786 /* mailbox fits in first part of CMDR space */
1787 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1788 udev->data_off = CMDR_SIZE;
1789 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
1790 udev->dbi_thresh = 0; /* Default in Idle state */
1792 /* Initialise the mailbox of the ring buffer */
1794 mb->version = TCMU_MAILBOX_VERSION;
1795 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
1796 mb->cmdr_off = CMDR_OFF;
1797 mb->cmdr_size = udev->cmdr_size;
1799 WARN_ON(!PAGE_ALIGNED(udev->data_off));
1800 WARN_ON(udev->data_size % PAGE_SIZE);
1801 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1803 info->version = __stringify(TCMU_MAILBOX_VERSION);
1805 info->mem[0].name = "tcm-user command & data buffer";
1806 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1807 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
1808 info->mem[0].memtype = UIO_MEM_NONE;
1810 info->irqcontrol = tcmu_irqcontrol;
1811 info->irq = UIO_IRQ_CUSTOM;
1813 info->mmap = tcmu_mmap;
1814 info->open = tcmu_open;
1815 info->release = tcmu_release;
1817 ret = uio_register_device(tcmu_root_device, info);
1821 /* User can set hw_block_size before enable the device */
1822 if (dev->dev_attrib.hw_block_size == 0)
1823 dev->dev_attrib.hw_block_size = 512;
1824 /* Other attributes can be configured in userspace */
1825 if (!dev->dev_attrib.hw_max_sectors)
1826 dev->dev_attrib.hw_max_sectors = 128;
1827 if (!dev->dev_attrib.emulate_write_cache)
1828 dev->dev_attrib.emulate_write_cache = 0;
1829 dev->dev_attrib.hw_queue_depth = 128;
1831 /* If user didn't explicitly disable netlink reply support, use
1832 * module scope setting.
1834 if (udev->nl_reply_supported >= 0)
1835 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1838 * Get a ref incase userspace does a close on the uio device before
1839 * LIO has initiated tcmu_free_device.
1841 kref_get(&udev->kref);
1843 ret = tcmu_send_dev_add_event(udev);
1847 mutex_lock(&root_udev_mutex);
1848 list_add(&udev->node, &root_udev);
1849 mutex_unlock(&root_udev_mutex);
1854 kref_put(&udev->kref, tcmu_dev_kref_release);
1855 uio_unregister_device(&udev->uio_info);
1857 vfree(udev->mb_addr);
1858 udev->mb_addr = NULL;
1860 kfree(udev->data_bitmap);
1861 udev->data_bitmap = NULL;
1869 static bool tcmu_dev_configured(struct tcmu_dev *udev)
1871 return udev->uio_info.uio_dev ? true : false;
1874 static void tcmu_free_device(struct se_device *dev)
1876 struct tcmu_dev *udev = TCMU_DEV(dev);
1878 /* release ref from init */
1879 kref_put(&udev->kref, tcmu_dev_kref_release);
1882 static void tcmu_destroy_device(struct se_device *dev)
1884 struct tcmu_dev *udev = TCMU_DEV(dev);
1886 del_timer_sync(&udev->cmd_timer);
1887 del_timer_sync(&udev->qfull_timer);
1889 mutex_lock(&root_udev_mutex);
1890 list_del(&udev->node);
1891 mutex_unlock(&root_udev_mutex);
1893 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
1895 uio_unregister_device(&udev->uio_info);
1897 /* release ref from configure */
1898 kref_put(&udev->kref, tcmu_dev_kref_release);
1901 static void tcmu_unblock_dev(struct tcmu_dev *udev)
1903 mutex_lock(&udev->cmdr_lock);
1904 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
1905 mutex_unlock(&udev->cmdr_lock);
1908 static void tcmu_block_dev(struct tcmu_dev *udev)
1910 mutex_lock(&udev->cmdr_lock);
1912 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
1915 /* complete IO that has executed successfully */
1916 tcmu_handle_completions(udev);
1917 /* fail IO waiting to be queued */
1918 run_cmdr_queue(udev, true);
1921 mutex_unlock(&udev->cmdr_lock);
1924 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
1926 struct tcmu_mailbox *mb;
1927 struct tcmu_cmd *cmd;
1930 mutex_lock(&udev->cmdr_lock);
1932 idr_for_each_entry(&udev->commands, cmd, i) {
1933 if (!list_empty(&cmd->cmdr_queue_entry))
1936 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
1937 cmd->cmd_id, udev->name,
1938 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
1940 idr_remove(&udev->commands, i);
1941 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1942 if (err_level == 1) {
1944 * Userspace was not able to start the
1945 * command or it is retryable.
1947 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
1950 target_complete_cmd(cmd->se_cmd,
1951 SAM_STAT_CHECK_CONDITION);
1954 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1959 tcmu_flush_dcache_range(mb, sizeof(*mb));
1960 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
1961 mb->cmd_tail, mb->cmd_head);
1963 udev->cmdr_last_cleaned = 0;
1966 tcmu_flush_dcache_range(mb, sizeof(*mb));
1968 del_timer(&udev->cmd_timer);
1970 mutex_unlock(&udev->cmdr_lock);
1974 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1975 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
1978 static match_table_t tokens = {
1979 {Opt_dev_config, "dev_config=%s"},
1980 {Opt_dev_size, "dev_size=%u"},
1981 {Opt_hw_block_size, "hw_block_size=%u"},
1982 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1983 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
1984 {Opt_max_data_area_mb, "max_data_area_mb=%u"},
1988 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1990 unsigned long tmp_ul;
1994 arg_p = match_strdup(arg);
1998 ret = kstrtoul(arg_p, 0, &tmp_ul);
2001 pr_err("kstrtoul() failed for dev attrib\n");
2005 pr_err("dev attrib must be nonzero\n");
2008 *dev_attrib = tmp_ul;
2012 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2013 const char *page, ssize_t count)
2015 struct tcmu_dev *udev = TCMU_DEV(dev);
2016 char *orig, *ptr, *opts, *arg_p;
2017 substring_t args[MAX_OPT_ARGS];
2018 int ret = 0, token, tmpval;
2020 opts = kstrdup(page, GFP_KERNEL);
2026 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2030 token = match_token(ptr, tokens, args);
2032 case Opt_dev_config:
2033 if (match_strlcpy(udev->dev_config, &args[0],
2034 TCMU_CONFIG_LEN) == 0) {
2038 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2041 arg_p = match_strdup(&args[0]);
2046 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
2049 pr_err("kstrtoul() failed for dev_size=\n");
2051 case Opt_hw_block_size:
2052 ret = tcmu_set_dev_attrib(&args[0],
2053 &(dev->dev_attrib.hw_block_size));
2055 case Opt_hw_max_sectors:
2056 ret = tcmu_set_dev_attrib(&args[0],
2057 &(dev->dev_attrib.hw_max_sectors));
2059 case Opt_nl_reply_supported:
2060 arg_p = match_strdup(&args[0]);
2065 ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
2068 pr_err("kstrtoint() failed for nl_reply_supported=\n");
2070 case Opt_max_data_area_mb:
2071 if (dev->export_count) {
2072 pr_err("Unable to set max_data_area_mb while exports exist\n");
2077 arg_p = match_strdup(&args[0]);
2082 ret = kstrtoint(arg_p, 0, &tmpval);
2085 pr_err("kstrtoint() failed for max_data_area_mb=\n");
2090 pr_err("Invalid max_data_area %d\n", tmpval);
2095 udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
2096 if (udev->max_blocks > tcmu_global_max_blocks) {
2097 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2099 TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2100 udev->max_blocks = tcmu_global_max_blocks;
2112 return (!ret) ? count : ret;
2115 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2117 struct tcmu_dev *udev = TCMU_DEV(dev);
2120 bl = sprintf(b + bl, "Config: %s ",
2121 udev->dev_config[0] ? udev->dev_config : "NULL");
2122 bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
2123 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2124 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2129 static sector_t tcmu_get_blocks(struct se_device *dev)
2131 struct tcmu_dev *udev = TCMU_DEV(dev);
2133 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2134 dev->dev_attrib.block_size);
2137 static sense_reason_t
2138 tcmu_parse_cdb(struct se_cmd *cmd)
2140 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2143 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2145 struct se_dev_attrib *da = container_of(to_config_group(item),
2146 struct se_dev_attrib, da_group);
2147 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2149 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2152 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2155 struct se_dev_attrib *da = container_of(to_config_group(item),
2156 struct se_dev_attrib, da_group);
2157 struct tcmu_dev *udev = container_of(da->da_dev,
2158 struct tcmu_dev, se_dev);
2162 if (da->da_dev->export_count) {
2163 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2167 ret = kstrtou32(page, 0, &val);
2171 udev->cmd_time_out = val * MSEC_PER_SEC;
2174 CONFIGFS_ATTR(tcmu_, cmd_time_out);
2176 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2178 struct se_dev_attrib *da = container_of(to_config_group(item),
2179 struct se_dev_attrib, da_group);
2180 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2182 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2183 udev->qfull_time_out :
2184 udev->qfull_time_out / MSEC_PER_SEC);
2187 static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2188 const char *page, size_t count)
2190 struct se_dev_attrib *da = container_of(to_config_group(item),
2191 struct se_dev_attrib, da_group);
2192 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2196 ret = kstrtos32(page, 0, &val);
2201 udev->qfull_time_out = val * MSEC_PER_SEC;
2203 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2208 CONFIGFS_ATTR(tcmu_, qfull_time_out);
2210 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2212 struct se_dev_attrib *da = container_of(to_config_group(item),
2213 struct se_dev_attrib, da_group);
2214 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2216 return snprintf(page, PAGE_SIZE, "%u\n",
2217 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2219 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2221 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2223 struct se_dev_attrib *da = container_of(to_config_group(item),
2224 struct se_dev_attrib, da_group);
2225 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2227 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2230 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2233 struct se_dev_attrib *da = container_of(to_config_group(item),
2234 struct se_dev_attrib, da_group);
2235 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2239 if (!len || len > TCMU_CONFIG_LEN - 1)
2242 /* Check if device has been configured before */
2243 if (tcmu_dev_configured(udev)) {
2244 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
2245 TCMU_ATTR_DEV_CFG, page);
2247 pr_err("Unable to reconfigure device\n");
2250 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2252 ret = tcmu_update_uio_info(udev);
2257 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2261 CONFIGFS_ATTR(tcmu_, dev_config);
2263 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2265 struct se_dev_attrib *da = container_of(to_config_group(item),
2266 struct se_dev_attrib, da_group);
2267 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2269 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
2272 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2275 struct se_dev_attrib *da = container_of(to_config_group(item),
2276 struct se_dev_attrib, da_group);
2277 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2281 ret = kstrtou64(page, 0, &val);
2285 /* Check if device has been configured before */
2286 if (tcmu_dev_configured(udev)) {
2287 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
2288 TCMU_ATTR_DEV_SIZE, &val);
2290 pr_err("Unable to reconfigure device\n");
2294 udev->dev_size = val;
2297 CONFIGFS_ATTR(tcmu_, dev_size);
2299 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2302 struct se_dev_attrib *da = container_of(to_config_group(item),
2303 struct se_dev_attrib, da_group);
2304 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2306 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2309 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2310 const char *page, size_t count)
2312 struct se_dev_attrib *da = container_of(to_config_group(item),
2313 struct se_dev_attrib, da_group);
2314 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2318 ret = kstrtos8(page, 0, &val);
2322 udev->nl_reply_supported = val;
2325 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2327 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2330 struct se_dev_attrib *da = container_of(to_config_group(item),
2331 struct se_dev_attrib, da_group);
2333 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2336 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2337 const char *page, size_t count)
2339 struct se_dev_attrib *da = container_of(to_config_group(item),
2340 struct se_dev_attrib, da_group);
2341 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2345 ret = kstrtou8(page, 0, &val);
2349 /* Check if device has been configured before */
2350 if (tcmu_dev_configured(udev)) {
2351 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
2352 TCMU_ATTR_WRITECACHE, &val);
2354 pr_err("Unable to reconfigure device\n");
2359 da->emulate_write_cache = val;
2362 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2364 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2366 struct se_device *se_dev = container_of(to_config_group(item),
2369 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2371 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2372 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2374 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2377 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2380 struct se_device *se_dev = container_of(to_config_group(item),
2383 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2387 ret = kstrtou8(page, 0, &val);
2392 pr_err("Invalid block value %d\n", val);
2397 tcmu_unblock_dev(udev);
2399 tcmu_block_dev(udev);
2402 CONFIGFS_ATTR(tcmu_, block_dev);
2404 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2407 struct se_device *se_dev = container_of(to_config_group(item),
2410 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2414 ret = kstrtou8(page, 0, &val);
2418 if (val != 1 && val != 2) {
2419 pr_err("Invalid reset ring value %d\n", val);
2423 tcmu_reset_ring(udev, val);
2426 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2428 static struct configfs_attribute *tcmu_attrib_attrs[] = {
2429 &tcmu_attr_cmd_time_out,
2430 &tcmu_attr_qfull_time_out,
2431 &tcmu_attr_max_data_area_mb,
2432 &tcmu_attr_dev_config,
2433 &tcmu_attr_dev_size,
2434 &tcmu_attr_emulate_write_cache,
2435 &tcmu_attr_nl_reply_supported,
2439 static struct configfs_attribute **tcmu_attrs;
2441 static struct configfs_attribute *tcmu_action_attrs[] = {
2442 &tcmu_attr_block_dev,
2443 &tcmu_attr_reset_ring,
2447 static struct target_backend_ops tcmu_ops = {
2449 .owner = THIS_MODULE,
2450 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
2451 .attach_hba = tcmu_attach_hba,
2452 .detach_hba = tcmu_detach_hba,
2453 .alloc_device = tcmu_alloc_device,
2454 .configure_device = tcmu_configure_device,
2455 .destroy_device = tcmu_destroy_device,
2456 .free_device = tcmu_free_device,
2457 .parse_cdb = tcmu_parse_cdb,
2458 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2459 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2460 .get_device_type = sbc_get_device_type,
2461 .get_blocks = tcmu_get_blocks,
2462 .tb_dev_action_attrs = tcmu_action_attrs,
2465 static void find_free_blocks(void)
2467 struct tcmu_dev *udev;
2469 u32 start, end, block, total_freed = 0;
2471 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2474 mutex_lock(&root_udev_mutex);
2475 list_for_each_entry(udev, &root_udev, node) {
2476 mutex_lock(&udev->cmdr_lock);
2478 /* Try to complete the finished commands first */
2479 tcmu_handle_completions(udev);
2481 /* Skip the udevs in idle */
2482 if (!udev->dbi_thresh) {
2483 mutex_unlock(&udev->cmdr_lock);
2487 end = udev->dbi_max + 1;
2488 block = find_last_bit(udev->data_bitmap, end);
2489 if (block == udev->dbi_max) {
2491 * The last bit is dbi_max, so it is not possible
2492 * reclaim any blocks.
2494 mutex_unlock(&udev->cmdr_lock);
2496 } else if (block == end) {
2497 /* The current udev will goto idle state */
2498 udev->dbi_thresh = start = 0;
2501 udev->dbi_thresh = start = block + 1;
2502 udev->dbi_max = block;
2505 /* Here will truncate the data area from off */
2506 off = udev->data_off + start * DATA_BLOCK_SIZE;
2507 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2509 /* Release the block pages */
2510 tcmu_blocks_release(&udev->data_blocks, start, end);
2511 mutex_unlock(&udev->cmdr_lock);
2513 total_freed += end - start;
2514 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2515 total_freed, udev->name);
2517 mutex_unlock(&root_udev_mutex);
2519 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2520 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2523 static void check_timedout_devices(void)
2525 struct tcmu_dev *udev, *tmp_dev;
2528 spin_lock_bh(&timed_out_udevs_lock);
2529 list_splice_init(&timed_out_udevs, &devs);
2531 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2532 list_del_init(&udev->timedout_entry);
2533 spin_unlock_bh(&timed_out_udevs_lock);
2535 mutex_lock(&udev->cmdr_lock);
2536 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2537 mutex_unlock(&udev->cmdr_lock);
2539 spin_lock_bh(&timed_out_udevs_lock);
2542 spin_unlock_bh(&timed_out_udevs_lock);
2545 static void tcmu_unmap_work_fn(struct work_struct *work)
2547 check_timedout_devices();
2551 static int __init tcmu_module_init(void)
2553 int ret, i, k, len = 0;
2555 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2557 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2559 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2560 sizeof(struct tcmu_cmd),
2561 __alignof__(struct tcmu_cmd),
2563 if (!tcmu_cmd_cache)
2566 tcmu_root_device = root_device_register("tcm_user");
2567 if (IS_ERR(tcmu_root_device)) {
2568 ret = PTR_ERR(tcmu_root_device);
2569 goto out_free_cache;
2572 ret = genl_register_family(&tcmu_genl_family);
2574 goto out_unreg_device;
2577 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2578 len += sizeof(struct configfs_attribute *);
2580 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
2581 len += sizeof(struct configfs_attribute *);
2583 len += sizeof(struct configfs_attribute *);
2585 tcmu_attrs = kzalloc(len, GFP_KERNEL);
2588 goto out_unreg_genl;
2591 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2592 tcmu_attrs[i] = passthrough_attrib_attrs[i];
2594 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2595 tcmu_attrs[i] = tcmu_attrib_attrs[k];
2598 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2600 ret = transport_backend_register(&tcmu_ops);
2609 genl_unregister_family(&tcmu_genl_family);
2611 root_device_unregister(tcmu_root_device);
2613 kmem_cache_destroy(tcmu_cmd_cache);
2618 static void __exit tcmu_module_exit(void)
2620 cancel_delayed_work_sync(&tcmu_unmap_work);
2621 target_backend_unregister(&tcmu_ops);
2623 genl_unregister_family(&tcmu_genl_family);
2624 root_device_unregister(tcmu_root_device);
2625 kmem_cache_destroy(tcmu_cmd_cache);
2628 MODULE_DESCRIPTION("TCM USER subsystem plugin");
2629 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2630 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2631 MODULE_LICENSE("GPL");
2633 module_init(tcmu_module_init);
2634 module_exit(tcmu_module_exit);