1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4 * Copyright (C) 2014 Red Hat, Inc.
5 * Copyright (C) 2015 Arrikto, Inc.
6 * Copyright (C) 2017 Chinamobile, Inc.
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/timer.h>
13 #include <linux/parser.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uio_driver.h>
16 #include <linux/xarray.h>
17 #include <linux/stringify.h>
18 #include <linux/bitops.h>
19 #include <linux/highmem.h>
20 #include <linux/configfs.h>
21 #include <linux/mutex.h>
22 #include <linux/workqueue.h>
23 #include <net/genetlink.h>
24 #include <scsi/scsi_common.h>
25 #include <scsi/scsi_proto.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28 #include <target/target_core_backend.h>
30 #include <linux/target_core_user.h>
37 * Define a shared-memory interface for LIO to pass SCSI commands and
38 * data to userspace for processing. This is to allow backends that
39 * are too complex for in-kernel support to be possible.
41 * It uses the UIO framework to do a lot of the device-creation and
42 * introspection work for us.
44 * See the .h file for how the ring is laid out. Note that while the
45 * command ring is defined, the particulars of the data area are
46 * not. Offset values in the command entry point to other locations
47 * internal to the mmap-ed area. There is separate space outside the
48 * command ring for data buffers. This leaves maximum flexibility for
49 * moving buffer allocations, or even page flipping or other
50 * allocation techniques, without altering the command ring layout.
53 * The user process must be assumed to be malicious. There's no way to
54 * prevent it breaking the command ring protocol if it wants, but in
55 * order to prevent other issues we must only ever read *data* from
56 * the shared memory area, not offsets or sizes. This applies to
57 * command ring entries as well as the mailbox. Extra code needed for
58 * this may have a 'UAM' comment.
61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63 /* For mailbox plus cmd ring, the size is fixed 8MB */
64 #define MB_CMDR_SIZE (8 * 1024 * 1024)
65 /* Offset of cmd ring is size of mailbox */
66 #define CMDR_OFF sizeof(struct tcmu_mailbox)
67 #define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
70 * For data area, the default block size is PAGE_SIZE and
71 * the default total size is 256K * PAGE_SIZE.
73 #define DATA_PAGES_PER_BLK_DEF 1
74 #define DATA_AREA_PAGES_DEF (256 * 1024)
76 #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
77 #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
80 * Default number of global data blocks(512K * PAGE_SIZE)
81 * when the unmap thread will be started.
83 #define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
85 static u8 tcmu_kern_cmd_reply_supported;
86 static u8 tcmu_netlink_blocked;
88 static struct device *tcmu_root_device;
94 #define TCMU_CONFIG_LEN 256
96 static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
97 static LIST_HEAD(tcmu_nl_cmd_list);
102 /* wake up thread waiting for reply */
103 struct completion complete;
104 struct list_head nl_list;
105 struct tcmu_dev *udev;
111 struct list_head node;
114 struct se_device se_dev;
115 struct se_dev_plug se_plug;
120 #define TCMU_DEV_BIT_OPEN 0
121 #define TCMU_DEV_BIT_BROKEN 1
122 #define TCMU_DEV_BIT_BLOCKED 2
123 #define TCMU_DEV_BIT_TMR_NOTIFY 3
124 #define TCM_DEV_BIT_PLUGGED 4
127 struct uio_info uio_info;
133 struct tcmu_mailbox *mb_addr;
136 u32 cmdr_last_cleaned;
137 /* Offset of data area from start of mb */
138 /* Must add data_off and mb_addr to get the address */
144 struct mutex cmdr_lock;
145 struct list_head qfull_queue;
146 struct list_head tmr_queue;
150 unsigned long *data_bitmap;
151 struct xarray data_pages;
152 uint32_t data_pages_per_blk;
153 uint32_t data_blk_size;
155 struct xarray commands;
157 struct timer_list cmd_timer;
158 unsigned int cmd_time_out;
159 struct list_head inflight_queue;
161 struct timer_list qfull_timer;
164 struct list_head timedout_entry;
166 struct tcmu_nl_cmd curr_nl_cmd;
168 char dev_config[TCMU_CONFIG_LEN];
170 int nl_reply_supported;
173 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
176 struct se_cmd *se_cmd;
177 struct tcmu_dev *tcmu_dev;
178 struct list_head queue_entry;
182 /* Can't use se_cmd when cleaning up expired cmds, because if
183 cmd has been completed then accessing se_cmd is off limits */
185 uint32_t dbi_bidi_cnt;
189 uint32_t data_len_bidi;
191 unsigned long deadline;
193 #define TCMU_CMD_BIT_EXPIRED 0
198 struct list_head queue_entry;
201 uint32_t tmr_cmd_cnt;
202 int16_t tmr_cmd_ids[];
206 * To avoid dead lock the mutex lock order should always be:
208 * mutex_lock(&root_udev_mutex);
210 * mutex_lock(&tcmu_dev->cmdr_lock);
211 * mutex_unlock(&tcmu_dev->cmdr_lock);
213 * mutex_unlock(&root_udev_mutex);
215 static DEFINE_MUTEX(root_udev_mutex);
216 static LIST_HEAD(root_udev);
218 static DEFINE_SPINLOCK(timed_out_udevs_lock);
219 static LIST_HEAD(timed_out_udevs);
221 static struct kmem_cache *tcmu_cmd_cache;
223 static atomic_t global_page_count = ATOMIC_INIT(0);
224 static struct delayed_work tcmu_unmap_work;
225 static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
227 static int tcmu_set_global_max_data_area(const char *str,
228 const struct kernel_param *kp)
230 int ret, max_area_mb;
232 ret = kstrtoint(str, 10, &max_area_mb);
236 if (max_area_mb <= 0) {
237 pr_err("global_max_data_area must be larger than 0.\n");
241 tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
242 if (atomic_read(&global_page_count) > tcmu_global_max_pages)
243 schedule_delayed_work(&tcmu_unmap_work, 0);
245 cancel_delayed_work_sync(&tcmu_unmap_work);
250 static int tcmu_get_global_max_data_area(char *buffer,
251 const struct kernel_param *kp)
253 return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
256 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
257 .set = tcmu_set_global_max_data_area,
258 .get = tcmu_get_global_max_data_area,
261 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
263 MODULE_PARM_DESC(global_max_data_area_mb,
264 "Max MBs allowed to be allocated to all the tcmu device's "
267 static int tcmu_get_block_netlink(char *buffer,
268 const struct kernel_param *kp)
270 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
271 "blocked" : "unblocked");
274 static int tcmu_set_block_netlink(const char *str,
275 const struct kernel_param *kp)
280 ret = kstrtou8(str, 0, &val);
285 pr_err("Invalid block netlink value %u\n", val);
289 tcmu_netlink_blocked = val;
293 static const struct kernel_param_ops tcmu_block_netlink_op = {
294 .set = tcmu_set_block_netlink,
295 .get = tcmu_get_block_netlink,
298 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
299 MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
301 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
303 struct tcmu_dev *udev = nl_cmd->udev;
305 if (!tcmu_netlink_blocked) {
306 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
310 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
311 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
312 nl_cmd->status = -EINTR;
313 list_del(&nl_cmd->nl_list);
314 complete(&nl_cmd->complete);
319 static int tcmu_set_reset_netlink(const char *str,
320 const struct kernel_param *kp)
322 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
326 ret = kstrtou8(str, 0, &val);
331 pr_err("Invalid reset netlink value %u\n", val);
335 mutex_lock(&tcmu_nl_cmd_mutex);
336 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
337 ret = tcmu_fail_netlink_cmd(nl_cmd);
341 mutex_unlock(&tcmu_nl_cmd_mutex);
346 static const struct kernel_param_ops tcmu_reset_netlink_op = {
347 .set = tcmu_set_reset_netlink,
350 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
351 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
353 /* multicast group */
354 enum tcmu_multicast_groups {
358 static const struct genl_multicast_group tcmu_mcgrps[] = {
359 [TCMU_MCGRP_CONFIG] = { .name = "config", },
362 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
363 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
364 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
365 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
366 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
367 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
370 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
372 struct tcmu_dev *udev = NULL;
373 struct tcmu_nl_cmd *nl_cmd;
374 int dev_id, rc, ret = 0;
376 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
377 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
378 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
382 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
383 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
385 mutex_lock(&tcmu_nl_cmd_mutex);
386 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
387 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
394 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
395 completed_cmd, rc, dev_id);
399 list_del(&nl_cmd->nl_list);
401 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
402 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
405 if (nl_cmd->cmd != completed_cmd) {
406 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
407 udev->name, completed_cmd, nl_cmd->cmd);
413 complete(&nl_cmd->complete);
415 mutex_unlock(&tcmu_nl_cmd_mutex);
419 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
421 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
424 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
426 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
429 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
430 struct genl_info *info)
432 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
435 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
437 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
438 tcmu_kern_cmd_reply_supported =
439 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
440 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
441 tcmu_kern_cmd_reply_supported);
447 static const struct genl_small_ops tcmu_genl_ops[] = {
449 .cmd = TCMU_CMD_SET_FEATURES,
450 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
451 .flags = GENL_ADMIN_PERM,
452 .doit = tcmu_genl_set_features,
455 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
456 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
457 .flags = GENL_ADMIN_PERM,
458 .doit = tcmu_genl_add_dev_done,
461 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
462 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
463 .flags = GENL_ADMIN_PERM,
464 .doit = tcmu_genl_rm_dev_done,
467 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
468 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
469 .flags = GENL_ADMIN_PERM,
470 .doit = tcmu_genl_reconfig_dev_done,
474 /* Our generic netlink family */
475 static struct genl_family tcmu_genl_family __ro_after_init = {
476 .module = THIS_MODULE,
480 .maxattr = TCMU_ATTR_MAX,
481 .policy = tcmu_attr_policy,
482 .mcgrps = tcmu_mcgrps,
483 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
485 .small_ops = tcmu_genl_ops,
486 .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
489 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
490 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
491 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
492 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
494 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
496 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
499 for (i = 0; i < len; i++)
500 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
503 static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
504 struct tcmu_cmd *tcmu_cmd,
505 int prev_dbi, int length, int *iov_cnt)
507 XA_STATE(xas, &udev->data_pages, 0);
509 int i, cnt, dbi, dpi;
510 int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
512 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
513 if (dbi == udev->dbi_thresh)
516 dpi = dbi * udev->data_pages_per_blk;
517 /* Count the number of already allocated pages */
519 for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
522 for (i = cnt; i < page_cnt; i++) {
523 /* try to get new page from the mm */
524 page = alloc_page(GFP_NOIO);
528 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
533 if (atomic_add_return(i - cnt, &global_page_count) >
534 tcmu_global_max_pages)
535 schedule_delayed_work(&tcmu_unmap_work, 0);
537 if (i && dbi > udev->dbi_max)
540 set_bit(dbi, udev->data_bitmap);
541 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
543 if (dbi != prev_dbi + 1)
546 return i == page_cnt ? dbi : -1;
549 static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
550 struct tcmu_cmd *tcmu_cmd, int length)
552 /* start value of dbi + 1 must not be a valid dbi */
554 int blk_data_len, iov_cnt = 0;
555 uint32_t blk_size = udev->data_blk_size;
557 for (; length > 0; length -= blk_size) {
558 blk_data_len = min_t(uint32_t, length, blk_size);
559 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
567 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
569 kfree(tcmu_cmd->dbi);
570 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
573 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
576 struct se_cmd *se_cmd = cmd->se_cmd;
577 uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
579 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
581 if (se_cmd->se_cmd_flags & SCF_BIDI) {
582 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
583 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
584 len += se_cmd->t_bidi_data_sg[i].length;
585 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
586 cmd->dbi_cnt += cmd->dbi_bidi_cnt;
587 cmd->data_len_bidi = len;
591 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
592 struct iovec **iov, int prev_dbi, int len)
594 /* Get the next dbi */
595 int dbi = tcmu_cmd_get_dbi(cmd);
597 /* Do not add more than udev->data_blk_size to iov */
598 len = min_t(int, len, udev->data_blk_size);
601 * The following code will gather and map the blocks to the same iovec
602 * when the blocks are all next to each other.
604 if (dbi != prev_dbi + 1) {
605 /* dbi is not next to previous dbi, so start new iov */
608 /* write offset relative to mb_addr */
609 (*iov)->iov_base = (void __user *)
610 (udev->data_off + dbi * udev->data_blk_size);
612 (*iov)->iov_len += len;
617 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
618 struct iovec **iov, int data_length)
620 /* start value of dbi + 1 must not be a valid dbi */
623 /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
624 for (; data_length > 0; data_length -= udev->data_blk_size)
625 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
628 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
630 struct se_device *se_dev = se_cmd->se_dev;
631 struct tcmu_dev *udev = TCMU_DEV(se_dev);
632 struct tcmu_cmd *tcmu_cmd;
634 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
638 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
639 tcmu_cmd->se_cmd = se_cmd;
640 tcmu_cmd->tcmu_dev = udev;
642 tcmu_cmd_set_block_cnts(tcmu_cmd);
643 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
645 if (!tcmu_cmd->dbi) {
646 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
653 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
655 unsigned long offset = offset_in_page(vaddr);
656 void *start = vaddr - offset;
658 size = round_up(size+offset, PAGE_SIZE);
661 flush_dcache_page(vmalloc_to_page(start));
668 * Some ring helper functions. We don't assume size is a power of 2 so
669 * we can't use circ_buf.h.
671 static inline size_t spc_used(size_t head, size_t tail, size_t size)
673 int diff = head - tail;
681 static inline size_t spc_free(size_t head, size_t tail, size_t size)
683 /* Keep 1 byte unused or we can't tell full from empty */
684 return (size - spc_used(head, tail, size) - 1);
687 static inline size_t head_to_end(size_t head, size_t size)
692 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
694 #define TCMU_SG_TO_DATA_AREA 1
695 #define TCMU_DATA_AREA_TO_SG 2
697 static inline void tcmu_copy_data(struct tcmu_dev *udev,
698 struct tcmu_cmd *tcmu_cmd, uint32_t direction,
699 struct scatterlist *sg, unsigned int sg_nents,
700 struct iovec **iov, size_t data_len)
702 XA_STATE(xas, &udev->data_pages, 0);
703 /* start value of dbi + 1 must not be a valid dbi */
705 size_t page_remaining, cp_len;
706 int page_cnt, page_inx;
707 struct sg_mapping_iter sg_iter;
708 unsigned int sg_flags;
710 void *data_page_start, *data_addr;
712 if (direction == TCMU_SG_TO_DATA_AREA)
713 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
715 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
716 sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
719 if (direction == TCMU_SG_TO_DATA_AREA)
720 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
723 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
725 page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
726 if (page_cnt > udev->data_pages_per_blk)
727 page_cnt = udev->data_pages_per_blk;
729 xas_set(&xas, dbi * udev->data_pages_per_blk);
730 for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
731 page = xas_next(&xas);
733 if (direction == TCMU_DATA_AREA_TO_SG)
734 flush_dcache_page(page);
735 data_page_start = kmap_atomic(page);
736 page_remaining = PAGE_SIZE;
738 while (page_remaining && data_len) {
739 if (!sg_miter_next(&sg_iter)) {
740 /* set length to 0 to abort outer loop */
742 pr_debug("%s: aborting data copy due to exhausted sg_list\n",
746 cp_len = min3(sg_iter.length, page_remaining,
749 data_addr = data_page_start +
750 PAGE_SIZE - page_remaining;
751 if (direction == TCMU_SG_TO_DATA_AREA)
752 memcpy(data_addr, sg_iter.addr, cp_len);
754 memcpy(sg_iter.addr, data_addr, cp_len);
757 page_remaining -= cp_len;
758 sg_iter.consumed = cp_len;
760 sg_miter_stop(&sg_iter);
762 kunmap_atomic(data_page_start);
763 if (direction == TCMU_SG_TO_DATA_AREA)
764 flush_dcache_page(page);
769 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
772 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
774 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
775 se_cmd->t_data_nents, iov, se_cmd->data_length);
778 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
779 bool bidi, uint32_t read_len)
781 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
782 struct scatterlist *data_sg;
783 unsigned int data_nents;
786 data_sg = se_cmd->t_data_sg;
787 data_nents = se_cmd->t_data_nents;
790 * For bidi case, the first count blocks are for Data-Out
791 * buffer blocks, and before gathering the Data-In buffer
792 * the Data-Out buffer blocks should be skipped.
794 tcmu_cmd_set_dbi_cur(tcmu_cmd,
795 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
797 data_sg = se_cmd->t_bidi_data_sg;
798 data_nents = se_cmd->t_bidi_data_nents;
801 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
802 data_nents, NULL, read_len);
805 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
807 return thresh - bitmap_weight(bitmap, thresh);
811 * We can't queue a command until we have space available on the cmd ring.
813 * Called with ring lock held.
815 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
817 struct tcmu_mailbox *mb = udev->mb_addr;
818 size_t space, cmd_needed;
821 tcmu_flush_dcache_range(mb, sizeof(*mb));
823 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
826 * If cmd end-of-ring space is too small then we need space for a NOP plus
827 * original cmd - cmds are internally contiguous.
829 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
830 cmd_needed = cmd_size;
832 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
834 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
835 if (space < cmd_needed) {
836 pr_debug("no cmd space: %u %u %u\n", cmd_head,
837 udev->cmdr_last_cleaned, udev->cmdr_size);
844 * We have to allocate data buffers before we can queue a command.
845 * Returns -1 on error (not enough space) or number of needed iovs on success
847 * Called with ring lock held.
849 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
852 int space, iov_cnt = 0, ret = 0;
857 /* try to check and get the data blocks as needed */
858 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
859 if (space < cmd->dbi_cnt) {
860 unsigned long blocks_left =
861 (udev->max_blocks - udev->dbi_thresh) + space;
863 if (blocks_left < cmd->dbi_cnt) {
864 pr_debug("no data space: only %lu available, but ask for %u\n",
865 blocks_left * udev->data_blk_size,
866 cmd->dbi_cnt * udev->data_blk_size);
870 udev->dbi_thresh += cmd->dbi_cnt;
871 if (udev->dbi_thresh > udev->max_blocks)
872 udev->dbi_thresh = udev->max_blocks;
875 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
879 if (cmd->dbi_bidi_cnt) {
880 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
886 return iov_cnt + ret;
889 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
891 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
892 sizeof(struct tcmu_cmd_entry));
895 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
896 size_t base_command_size)
898 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
901 command_size = base_command_size +
902 round_up(scsi_command_size(se_cmd->t_task_cdb),
905 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
910 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
911 struct timer_list *timer)
916 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
917 if (!timer_pending(timer))
918 mod_timer(timer, tcmu_cmd->deadline);
920 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
921 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
924 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
926 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
930 * For backwards compat if qfull_time_out is not set use
931 * cmd_time_out and if that's not set use the default time out.
933 if (!udev->qfull_time_out)
935 else if (udev->qfull_time_out > 0)
936 tmo = udev->qfull_time_out;
937 else if (udev->cmd_time_out)
938 tmo = udev->cmd_time_out;
942 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
944 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
945 pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
946 tcmu_cmd, udev->name);
950 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
952 struct tcmu_cmd_entry_hdr *hdr;
953 struct tcmu_mailbox *mb = udev->mb_addr;
954 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
956 /* Insert a PAD if end-of-ring space is too small */
957 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
958 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
960 hdr = udev->cmdr + cmd_head;
961 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
962 tcmu_hdr_set_len(&hdr->len_op, pad_size);
963 hdr->cmd_id = 0; /* not used for PAD */
966 tcmu_flush_dcache_range(hdr, sizeof(*hdr));
968 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
969 tcmu_flush_dcache_range(mb, sizeof(*mb));
971 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
972 WARN_ON(cmd_head != 0);
978 static void tcmu_unplug_device(struct se_dev_plug *se_plug)
980 struct se_device *se_dev = se_plug->se_dev;
981 struct tcmu_dev *udev = TCMU_DEV(se_dev);
983 clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags);
984 uio_event_notify(&udev->uio_info);
987 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
989 struct tcmu_dev *udev = TCMU_DEV(se_dev);
991 if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
992 return &udev->se_plug;
998 * queue_cmd_ring - queue cmd to ring or internally
999 * @tcmu_cmd: cmd to queue
1000 * @scsi_err: TCM error code if failure (-1) returned.
1003 * -1 we cannot queue internally or to the ring.
1005 * 1 internally queued to wait for ring memory to free.
1007 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1009 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
1010 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
1011 size_t base_command_size, command_size;
1012 struct tcmu_mailbox *mb = udev->mb_addr;
1013 struct tcmu_cmd_entry *entry;
1015 int iov_cnt, iov_bidi_cnt;
1016 uint32_t cmd_id, cmd_head;
1018 uint32_t blk_size = udev->data_blk_size;
1019 /* size of data buffer needed */
1020 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
1022 *scsi_err = TCM_NO_SENSE;
1024 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
1025 *scsi_err = TCM_LUN_BUSY;
1029 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1030 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1034 if (!list_empty(&udev->qfull_queue))
1037 if (data_length > (size_t)udev->max_blocks * blk_size) {
1038 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
1039 data_length, (size_t)udev->max_blocks * blk_size);
1040 *scsi_err = TCM_INVALID_CDB_FIELD;
1044 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
1046 goto free_and_queue;
1049 * Must be a certain minimum size for response sense info, but
1050 * also may be larger if the iov array is large.
1052 base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
1053 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1055 if (command_size > (udev->cmdr_size / 2)) {
1056 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
1057 command_size, udev->cmdr_size);
1058 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1059 *scsi_err = TCM_INVALID_CDB_FIELD;
1063 if (!is_ring_space_avail(udev, command_size))
1065 * Don't leave commands partially setup because the unmap
1066 * thread might need the blocks to make forward progress.
1068 goto free_and_queue;
1070 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
1072 pr_err("tcmu: Could not allocate cmd id.\n");
1074 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1075 *scsi_err = TCM_OUT_OF_RESOURCES;
1078 tcmu_cmd->cmd_id = cmd_id;
1080 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1081 tcmu_cmd, udev->name);
1083 cmd_head = ring_insert_padding(udev, command_size);
1085 entry = udev->cmdr + cmd_head;
1086 memset(entry, 0, command_size);
1087 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1089 /* prepare iov list and copy data to data area if necessary */
1090 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1091 iov = &entry->req.iov[0];
1093 if (se_cmd->data_direction == DMA_TO_DEVICE ||
1094 se_cmd->se_cmd_flags & SCF_BIDI)
1095 scatter_data_area(udev, tcmu_cmd, &iov);
1097 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
1099 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
1101 /* Handle BIDI commands */
1102 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1104 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
1105 entry->req.iov_bidi_cnt = iov_bidi_cnt;
1108 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1110 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1112 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1114 /* All offsets relative to mb_addr, not start of entry! */
1115 cdb_off = CMDR_OFF + cmd_head + base_command_size;
1116 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1117 entry->req.cdb_off = cdb_off;
1118 tcmu_flush_dcache_range(entry, command_size);
1120 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1121 tcmu_flush_dcache_range(mb, sizeof(*mb));
1123 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1125 if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
1126 uio_event_notify(&udev->uio_info);
1131 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1132 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1135 if (add_to_qfull_queue(tcmu_cmd)) {
1136 *scsi_err = TCM_OUT_OF_RESOURCES;
1144 * queue_tmr_ring - queue tmr info to ring or internally
1145 * @udev: related tcmu_dev
1146 * @tmr: tcmu_tmr containing tmr info to queue
1150 * 1 internally queued to wait for ring memory to free.
1153 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
1155 struct tcmu_tmr_entry *entry;
1158 struct tcmu_mailbox *mb = udev->mb_addr;
1161 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
1164 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
1165 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
1167 if (!list_empty(&udev->tmr_queue) ||
1168 !is_ring_space_avail(udev, cmd_size)) {
1169 list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
1170 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
1175 cmd_head = ring_insert_padding(udev, cmd_size);
1177 entry = udev->cmdr + cmd_head;
1178 memset(entry, 0, cmd_size);
1179 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
1180 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
1181 entry->tmr_type = tmr->tmr_type;
1182 entry->cmd_cnt = tmr->tmr_cmd_cnt;
1183 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
1184 tcmu_flush_dcache_range(entry, cmd_size);
1186 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
1187 tcmu_flush_dcache_range(mb, sizeof(*mb));
1189 uio_event_notify(&udev->uio_info);
1197 static sense_reason_t
1198 tcmu_queue_cmd(struct se_cmd *se_cmd)
1200 struct se_device *se_dev = se_cmd->se_dev;
1201 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1202 struct tcmu_cmd *tcmu_cmd;
1203 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
1206 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1208 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1210 mutex_lock(&udev->cmdr_lock);
1211 if (!(se_cmd->transport_state & CMD_T_ABORTED))
1212 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1214 tcmu_free_cmd(tcmu_cmd);
1216 se_cmd->priv = tcmu_cmd;
1217 mutex_unlock(&udev->cmdr_lock);
1221 static void tcmu_set_next_deadline(struct list_head *queue,
1222 struct timer_list *timer)
1224 struct tcmu_cmd *cmd;
1226 if (!list_empty(queue)) {
1227 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
1228 mod_timer(timer, cmd->deadline);
1234 tcmu_tmr_type(enum tcm_tmreq_table tmf)
1237 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
1238 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
1239 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
1240 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
1241 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
1242 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
1243 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
1244 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
1245 default: return TCMU_TMR_UNKNOWN;
1250 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
1251 struct list_head *cmd_list)
1253 int i = 0, cmd_cnt = 0;
1254 bool unqueued = false;
1255 uint16_t *cmd_ids = NULL;
1256 struct tcmu_cmd *cmd;
1257 struct se_cmd *se_cmd;
1258 struct tcmu_tmr *tmr;
1259 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1261 mutex_lock(&udev->cmdr_lock);
1263 /* First we check for aborted commands in qfull_queue */
1264 list_for_each_entry(se_cmd, cmd_list, state_list) {
1269 /* Commands on qfull queue have no id yet */
1274 pr_debug("Removing aborted command %p from queue on dev %s.\n",
1277 list_del_init(&cmd->queue_entry);
1279 se_cmd->priv = NULL;
1280 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
1284 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1286 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
1289 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
1290 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
1292 tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO);
1296 tmr->tmr_type = tcmu_tmr_type(tmf);
1297 tmr->tmr_cmd_cnt = cmd_cnt;
1301 list_for_each_entry(se_cmd, cmd_list, state_list) {
1306 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
1310 queue_tmr_ring(udev, tmr);
1313 mutex_unlock(&udev->cmdr_lock);
1316 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1318 struct se_cmd *se_cmd = cmd->se_cmd;
1319 struct tcmu_dev *udev = cmd->tcmu_dev;
1320 bool read_len_valid = false;
1324 * cmd has been completed already from timeout, just reclaim
1325 * data area space and free cmd
1327 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1328 WARN_ON_ONCE(se_cmd);
1332 list_del_init(&cmd->queue_entry);
1334 tcmu_cmd_reset_dbi_cur(cmd);
1336 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1337 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1339 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1343 read_len = se_cmd->data_length;
1344 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1345 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1346 read_len_valid = true;
1347 if (entry->rsp.read_len < read_len)
1348 read_len = entry->rsp.read_len;
1351 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1352 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1353 if (!read_len_valid )
1356 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1358 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1359 /* Get Data-In buffer before clean up */
1360 gather_data_area(udev, cmd, true, read_len);
1361 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1362 gather_data_area(udev, cmd, false, read_len);
1363 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1365 } else if (se_cmd->data_direction != DMA_NONE) {
1366 pr_warn("TCMU: data direction was %d!\n",
1367 se_cmd->data_direction);
1371 se_cmd->priv = NULL;
1372 if (read_len_valid) {
1373 pr_debug("read_len = %d\n", read_len);
1374 target_complete_cmd_with_length(cmd->se_cmd,
1375 entry->rsp.scsi_status, read_len);
1377 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1380 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1384 static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
1386 struct tcmu_tmr *tmr, *tmp;
1389 if (list_empty(&udev->tmr_queue))
1392 pr_debug("running %s's tmr queue\n", udev->name);
1394 list_splice_init(&udev->tmr_queue, &tmrs);
1396 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
1397 list_del_init(&tmr->queue_entry);
1399 pr_debug("removing tmr %p on dev %s from queue\n",
1402 if (queue_tmr_ring(udev, tmr)) {
1403 pr_debug("ran out of space during tmr queue run\n");
1405 * tmr was requeued, so just put all tmrs back in
1408 list_splice_tail(&tmrs, &udev->tmr_queue);
1416 static bool tcmu_handle_completions(struct tcmu_dev *udev)
1418 struct tcmu_mailbox *mb;
1419 struct tcmu_cmd *cmd;
1420 bool free_space = false;
1422 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1423 pr_err("ring broken, not handling completions\n");
1428 tcmu_flush_dcache_range(mb, sizeof(*mb));
1430 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1432 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
1435 * Flush max. up to end of cmd ring since current entry might
1436 * be a padding that is shorter than sizeof(*entry)
1438 size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1440 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1441 ring_left : sizeof(*entry));
1445 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
1446 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
1447 UPDATE_HEAD(udev->cmdr_last_cleaned,
1448 tcmu_hdr_get_len(entry->hdr.len_op),
1452 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1454 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
1456 pr_err("cmd_id %u not found, ring is broken\n",
1458 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1462 tcmu_handle_completion(cmd, entry);
1464 UPDATE_HEAD(udev->cmdr_last_cleaned,
1465 tcmu_hdr_get_len(entry->hdr.len_op),
1469 free_space = tcmu_run_tmr_queue(udev);
1471 if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
1472 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
1474 * Allocated blocks exceeded global block limit, currently no
1475 * more pending or waiting commands so try to reclaim blocks.
1477 schedule_delayed_work(&tcmu_unmap_work, 0);
1479 if (udev->cmd_time_out)
1480 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1485 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1487 struct se_cmd *se_cmd;
1489 if (!time_after_eq(jiffies, cmd->deadline))
1492 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1493 list_del_init(&cmd->queue_entry);
1494 se_cmd = cmd->se_cmd;
1495 se_cmd->priv = NULL;
1498 pr_debug("Timing out inflight cmd %u on dev %s.\n",
1499 cmd->cmd_id, cmd->tcmu_dev->name);
1501 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1504 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1506 struct se_cmd *se_cmd;
1508 if (!time_after_eq(jiffies, cmd->deadline))
1511 pr_debug("Timing out queued cmd %p on dev %s.\n",
1512 cmd, cmd->tcmu_dev->name);
1514 list_del_init(&cmd->queue_entry);
1515 se_cmd = cmd->se_cmd;
1518 se_cmd->priv = NULL;
1519 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1522 static void tcmu_device_timedout(struct tcmu_dev *udev)
1524 spin_lock(&timed_out_udevs_lock);
1525 if (list_empty(&udev->timedout_entry))
1526 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1527 spin_unlock(&timed_out_udevs_lock);
1529 schedule_delayed_work(&tcmu_unmap_work, 0);
1532 static void tcmu_cmd_timedout(struct timer_list *t)
1534 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1536 pr_debug("%s cmd timeout has expired\n", udev->name);
1537 tcmu_device_timedout(udev);
1540 static void tcmu_qfull_timedout(struct timer_list *t)
1542 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1544 pr_debug("%s qfull timeout has expired\n", udev->name);
1545 tcmu_device_timedout(udev);
1548 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1550 struct tcmu_hba *tcmu_hba;
1552 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1556 tcmu_hba->host_id = host_id;
1557 hba->hba_ptr = tcmu_hba;
1562 static void tcmu_detach_hba(struct se_hba *hba)
1564 kfree(hba->hba_ptr);
1565 hba->hba_ptr = NULL;
1568 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1570 struct tcmu_dev *udev;
1572 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1575 kref_init(&udev->kref);
1577 udev->name = kstrdup(name, GFP_KERNEL);
1584 udev->cmd_time_out = TCMU_TIME_OUT;
1585 udev->qfull_time_out = -1;
1587 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
1588 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
1589 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
1591 mutex_init(&udev->cmdr_lock);
1593 INIT_LIST_HEAD(&udev->node);
1594 INIT_LIST_HEAD(&udev->timedout_entry);
1595 INIT_LIST_HEAD(&udev->qfull_queue);
1596 INIT_LIST_HEAD(&udev->tmr_queue);
1597 INIT_LIST_HEAD(&udev->inflight_queue);
1598 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
1600 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1601 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1603 xa_init(&udev->data_pages);
1605 return &udev->se_dev;
1608 static void tcmu_dev_call_rcu(struct rcu_head *p)
1610 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1611 struct tcmu_dev *udev = TCMU_DEV(dev);
1613 kfree(udev->uio_info.name);
1618 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1620 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1621 kmem_cache_free(tcmu_cmd_cache, cmd);
1627 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
1630 XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
1632 u32 pages_freed = 0;
1635 xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
1636 xas_store(&xas, NULL);
1642 atomic_sub(pages_freed, &global_page_count);
1647 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
1649 struct tcmu_tmr *tmr, *tmp;
1651 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
1652 list_del_init(&tmr->queue_entry);
1657 static void tcmu_dev_kref_release(struct kref *kref)
1659 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1660 struct se_device *dev = &udev->se_dev;
1661 struct tcmu_cmd *cmd;
1662 bool all_expired = true;
1665 vfree(udev->mb_addr);
1666 udev->mb_addr = NULL;
1668 spin_lock_bh(&timed_out_udevs_lock);
1669 if (!list_empty(&udev->timedout_entry))
1670 list_del(&udev->timedout_entry);
1671 spin_unlock_bh(&timed_out_udevs_lock);
1673 /* Upper layer should drain all requests before calling this */
1674 mutex_lock(&udev->cmdr_lock);
1675 xa_for_each(&udev->commands, i, cmd) {
1676 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1677 all_expired = false;
1679 /* There can be left over TMR cmds. Remove them. */
1680 tcmu_remove_all_queued_tmr(udev);
1681 if (!list_empty(&udev->qfull_queue))
1682 all_expired = false;
1683 xa_destroy(&udev->commands);
1684 WARN_ON(!all_expired);
1686 tcmu_blocks_release(udev, 0, udev->dbi_max);
1687 bitmap_free(udev->data_bitmap);
1688 mutex_unlock(&udev->cmdr_lock);
1690 pr_debug("dev_kref_release\n");
1692 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1695 static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1697 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1699 sense_reason_t scsi_ret;
1702 if (list_empty(&udev->qfull_queue))
1705 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1707 list_splice_init(&udev->qfull_queue, &cmds);
1709 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1710 list_del_init(&tcmu_cmd->queue_entry);
1712 pr_debug("removing cmd %p on dev %s from queue\n",
1713 tcmu_cmd, udev->name);
1717 * We were not able to even start the command, so
1718 * fail with busy to allow a retry in case runner
1719 * was only temporarily down. If the device is being
1720 * removed then LIO core will do the right thing and
1723 tcmu_cmd->se_cmd->priv = NULL;
1724 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1725 tcmu_free_cmd(tcmu_cmd);
1729 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1731 pr_debug("cmd %p on dev %s failed with %u\n",
1732 tcmu_cmd, udev->name, scsi_ret);
1734 * Ignore scsi_ret for now. target_complete_cmd
1737 tcmu_cmd->se_cmd->priv = NULL;
1738 target_complete_cmd(tcmu_cmd->se_cmd,
1739 SAM_STAT_CHECK_CONDITION);
1740 tcmu_free_cmd(tcmu_cmd);
1741 } else if (ret > 0) {
1742 pr_debug("ran out of space during cmdr queue run\n");
1744 * cmd was requeued, so just put all cmds back in
1747 list_splice_tail(&cmds, &udev->qfull_queue);
1752 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1755 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1757 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1759 mutex_lock(&udev->cmdr_lock);
1760 if (tcmu_handle_completions(udev))
1761 run_qfull_queue(udev, false);
1762 mutex_unlock(&udev->cmdr_lock);
1768 * mmap code from uio.c. Copied here because we want to hook mmap()
1769 * and this stuff must come along.
1771 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1773 struct tcmu_dev *udev = vma->vm_private_data;
1774 struct uio_info *info = &udev->uio_info;
1776 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1777 if (info->mem[vma->vm_pgoff].size == 0)
1779 return (int)vma->vm_pgoff;
1784 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
1788 mutex_lock(&udev->cmdr_lock);
1789 page = xa_load(&udev->data_pages, dpi);
1791 mutex_unlock(&udev->cmdr_lock);
1796 * Userspace messed up and passed in a address not in the
1797 * data iov passed to it.
1799 pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
1801 mutex_unlock(&udev->cmdr_lock);
1806 static void tcmu_vma_open(struct vm_area_struct *vma)
1808 struct tcmu_dev *udev = vma->vm_private_data;
1810 pr_debug("vma_open\n");
1812 kref_get(&udev->kref);
1815 static void tcmu_vma_close(struct vm_area_struct *vma)
1817 struct tcmu_dev *udev = vma->vm_private_data;
1819 pr_debug("vma_close\n");
1821 /* release ref from tcmu_vma_open */
1822 kref_put(&udev->kref, tcmu_dev_kref_release);
1825 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1827 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1828 struct uio_info *info = &udev->uio_info;
1830 unsigned long offset;
1833 int mi = tcmu_find_mem_index(vmf->vma);
1835 return VM_FAULT_SIGBUS;
1838 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1841 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1843 if (offset < udev->data_off) {
1844 /* For the vmalloc()ed cmd area pages */
1845 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1846 page = vmalloc_to_page(addr);
1850 /* For the dynamically growing data area pages */
1851 dpi = (offset - udev->data_off) / PAGE_SIZE;
1852 page = tcmu_try_get_data_page(udev, dpi);
1854 return VM_FAULT_SIGBUS;
1862 static const struct vm_operations_struct tcmu_vm_ops = {
1863 .open = tcmu_vma_open,
1864 .close = tcmu_vma_close,
1865 .fault = tcmu_vma_fault,
1868 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1870 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1872 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1873 vma->vm_ops = &tcmu_vm_ops;
1875 vma->vm_private_data = udev;
1877 /* Ensure the mmap is exactly the right size */
1878 if (vma_pages(vma) != udev->mmap_pages)
1886 static int tcmu_open(struct uio_info *info, struct inode *inode)
1888 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1890 /* O_EXCL not supported for char devs, so fake it? */
1891 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1894 udev->inode = inode;
1901 static int tcmu_release(struct uio_info *info, struct inode *inode)
1903 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1905 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1907 pr_debug("close\n");
1912 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1914 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1916 if (!tcmu_kern_cmd_reply_supported)
1919 if (udev->nl_reply_supported <= 0)
1922 mutex_lock(&tcmu_nl_cmd_mutex);
1924 if (tcmu_netlink_blocked) {
1925 mutex_unlock(&tcmu_nl_cmd_mutex);
1926 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1931 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1932 mutex_unlock(&tcmu_nl_cmd_mutex);
1933 pr_warn("netlink cmd %d already executing on %s\n",
1934 nl_cmd->cmd, udev->name);
1938 memset(nl_cmd, 0, sizeof(*nl_cmd));
1940 nl_cmd->udev = udev;
1941 init_completion(&nl_cmd->complete);
1942 INIT_LIST_HEAD(&nl_cmd->nl_list);
1944 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1946 mutex_unlock(&tcmu_nl_cmd_mutex);
1950 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1952 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1954 if (!tcmu_kern_cmd_reply_supported)
1957 if (udev->nl_reply_supported <= 0)
1960 mutex_lock(&tcmu_nl_cmd_mutex);
1962 list_del(&nl_cmd->nl_list);
1963 memset(nl_cmd, 0, sizeof(*nl_cmd));
1965 mutex_unlock(&tcmu_nl_cmd_mutex);
1968 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1970 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1973 if (!tcmu_kern_cmd_reply_supported)
1976 if (udev->nl_reply_supported <= 0)
1979 pr_debug("sleeping for nl reply\n");
1980 wait_for_completion(&nl_cmd->complete);
1982 mutex_lock(&tcmu_nl_cmd_mutex);
1983 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1984 ret = nl_cmd->status;
1985 mutex_unlock(&tcmu_nl_cmd_mutex);
1990 static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1991 enum tcmu_genl_cmd cmd,
1992 struct sk_buff **buf, void **hdr)
1994 struct sk_buff *skb;
1998 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2002 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
2006 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
2010 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
2014 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
2027 static int tcmu_netlink_event_send(struct tcmu_dev *udev,
2028 enum tcmu_genl_cmd cmd,
2029 struct sk_buff *skb, void *msg_header)
2033 genlmsg_end(skb, msg_header);
2035 ret = tcmu_init_genl_cmd_reply(udev, cmd);
2041 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
2042 TCMU_MCGRP_CONFIG, GFP_KERNEL);
2044 /* Wait during an add as the listener may not be up yet */
2046 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
2047 return tcmu_wait_genl_cmd_reply(udev);
2049 tcmu_destroy_genl_cmd_reply(udev);
2054 static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
2056 struct sk_buff *skb = NULL;
2057 void *msg_header = NULL;
2060 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
2064 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
2068 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
2070 struct sk_buff *skb = NULL;
2071 void *msg_header = NULL;
2074 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
2078 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
2082 static int tcmu_update_uio_info(struct tcmu_dev *udev)
2084 struct tcmu_hba *hba = udev->hba->hba_ptr;
2085 struct uio_info *info;
2088 info = &udev->uio_info;
2090 if (udev->dev_config[0])
2091 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
2092 udev->name, udev->dev_config);
2094 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
2099 /* If the old string exists, free it */
2106 static int tcmu_configure_device(struct se_device *dev)
2108 struct tcmu_dev *udev = TCMU_DEV(dev);
2109 struct uio_info *info;
2110 struct tcmu_mailbox *mb;
2114 ret = tcmu_update_uio_info(udev);
2118 info = &udev->uio_info;
2120 mutex_lock(&udev->cmdr_lock);
2121 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
2122 mutex_unlock(&udev->cmdr_lock);
2123 if (!udev->data_bitmap) {
2125 goto err_bitmap_alloc;
2128 mb = vzalloc(MB_CMDR_SIZE);
2134 /* mailbox fits in first part of CMDR space */
2136 udev->cmdr = (void *)mb + CMDR_OFF;
2137 udev->cmdr_size = CMDR_SIZE;
2138 udev->data_off = MB_CMDR_SIZE;
2139 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
2140 udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
2141 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
2142 udev->dbi_thresh = 0; /* Default in Idle state */
2144 /* Initialise the mailbox of the ring buffer */
2145 mb->version = TCMU_MAILBOX_VERSION;
2146 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
2147 TCMU_MAILBOX_FLAG_CAP_READ_LEN |
2148 TCMU_MAILBOX_FLAG_CAP_TMR;
2149 mb->cmdr_off = CMDR_OFF;
2150 mb->cmdr_size = udev->cmdr_size;
2152 WARN_ON(!PAGE_ALIGNED(udev->data_off));
2153 WARN_ON(data_size % PAGE_SIZE);
2155 info->version = __stringify(TCMU_MAILBOX_VERSION);
2157 info->mem[0].name = "tcm-user command & data buffer";
2158 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
2159 info->mem[0].size = data_size + MB_CMDR_SIZE;
2160 info->mem[0].memtype = UIO_MEM_NONE;
2162 info->irqcontrol = tcmu_irqcontrol;
2163 info->irq = UIO_IRQ_CUSTOM;
2165 info->mmap = tcmu_mmap;
2166 info->open = tcmu_open;
2167 info->release = tcmu_release;
2169 ret = uio_register_device(tcmu_root_device, info);
2173 /* User can set hw_block_size before enable the device */
2174 if (dev->dev_attrib.hw_block_size == 0)
2175 dev->dev_attrib.hw_block_size = 512;
2176 /* Other attributes can be configured in userspace */
2177 if (!dev->dev_attrib.hw_max_sectors)
2178 dev->dev_attrib.hw_max_sectors = 128;
2179 if (!dev->dev_attrib.emulate_write_cache)
2180 dev->dev_attrib.emulate_write_cache = 0;
2181 dev->dev_attrib.hw_queue_depth = 128;
2183 /* If user didn't explicitly disable netlink reply support, use
2184 * module scope setting.
2186 if (udev->nl_reply_supported >= 0)
2187 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
2190 * Get a ref incase userspace does a close on the uio device before
2191 * LIO has initiated tcmu_free_device.
2193 kref_get(&udev->kref);
2195 ret = tcmu_send_dev_add_event(udev);
2199 mutex_lock(&root_udev_mutex);
2200 list_add(&udev->node, &root_udev);
2201 mutex_unlock(&root_udev_mutex);
2206 kref_put(&udev->kref, tcmu_dev_kref_release);
2207 uio_unregister_device(&udev->uio_info);
2209 vfree(udev->mb_addr);
2210 udev->mb_addr = NULL;
2212 bitmap_free(udev->data_bitmap);
2213 udev->data_bitmap = NULL;
2221 static void tcmu_free_device(struct se_device *dev)
2223 struct tcmu_dev *udev = TCMU_DEV(dev);
2225 /* release ref from init */
2226 kref_put(&udev->kref, tcmu_dev_kref_release);
2229 static void tcmu_destroy_device(struct se_device *dev)
2231 struct tcmu_dev *udev = TCMU_DEV(dev);
2233 del_timer_sync(&udev->cmd_timer);
2234 del_timer_sync(&udev->qfull_timer);
2236 mutex_lock(&root_udev_mutex);
2237 list_del(&udev->node);
2238 mutex_unlock(&root_udev_mutex);
2240 tcmu_send_dev_remove_event(udev);
2242 uio_unregister_device(&udev->uio_info);
2244 /* release ref from configure */
2245 kref_put(&udev->kref, tcmu_dev_kref_release);
2248 static void tcmu_unblock_dev(struct tcmu_dev *udev)
2250 mutex_lock(&udev->cmdr_lock);
2251 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
2252 mutex_unlock(&udev->cmdr_lock);
2255 static void tcmu_block_dev(struct tcmu_dev *udev)
2257 mutex_lock(&udev->cmdr_lock);
2259 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2262 /* complete IO that has executed successfully */
2263 tcmu_handle_completions(udev);
2264 /* fail IO waiting to be queued */
2265 run_qfull_queue(udev, true);
2268 mutex_unlock(&udev->cmdr_lock);
2271 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2273 struct tcmu_mailbox *mb;
2274 struct tcmu_cmd *cmd;
2277 mutex_lock(&udev->cmdr_lock);
2279 xa_for_each(&udev->commands, i, cmd) {
2280 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2281 cmd->cmd_id, udev->name,
2282 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2284 xa_erase(&udev->commands, i);
2285 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2286 WARN_ON(!cmd->se_cmd);
2287 list_del_init(&cmd->queue_entry);
2288 cmd->se_cmd->priv = NULL;
2289 if (err_level == 1) {
2291 * Userspace was not able to start the
2292 * command or it is retryable.
2294 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2297 target_complete_cmd(cmd->se_cmd,
2298 SAM_STAT_CHECK_CONDITION);
2301 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2306 tcmu_flush_dcache_range(mb, sizeof(*mb));
2307 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2308 mb->cmd_tail, mb->cmd_head);
2310 udev->cmdr_last_cleaned = 0;
2313 tcmu_flush_dcache_range(mb, sizeof(*mb));
2314 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2316 del_timer(&udev->cmd_timer);
2319 * ring is empty and qfull queue never contains aborted commands.
2320 * So TMRs in tmr queue do not contain relevant cmd_ids.
2321 * After a ring reset userspace should do a fresh start, so
2322 * even LUN RESET message is no longer relevant.
2323 * Therefore remove all TMRs from qfull queue
2325 tcmu_remove_all_queued_tmr(udev);
2327 run_qfull_queue(udev, false);
2329 mutex_unlock(&udev->cmdr_lock);
2333 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2334 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
2338 static match_table_t tokens = {
2339 {Opt_dev_config, "dev_config=%s"},
2340 {Opt_dev_size, "dev_size=%s"},
2341 {Opt_hw_block_size, "hw_block_size=%d"},
2342 {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2343 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2344 {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2345 {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
2349 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2353 ret = match_int(arg, &val);
2355 pr_err("match_int() failed for dev attrib. Error %d.\n",
2361 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2369 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2372 uint32_t pages_per_blk = udev->data_pages_per_blk;
2374 ret = match_int(arg, &val);
2376 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2381 pr_err("Invalid max_data_area %d.\n", val);
2384 if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
2385 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2386 val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
2387 val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
2389 if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
2390 pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
2391 val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
2395 mutex_lock(&udev->cmdr_lock);
2396 if (udev->data_bitmap) {
2397 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2402 udev->data_area_mb = val;
2403 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
2406 mutex_unlock(&udev->cmdr_lock);
2410 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
2414 ret = match_int(arg, &val);
2416 pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
2421 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
2422 pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
2423 val, udev->data_area_mb,
2424 TCMU_MBS_TO_PAGES(udev->data_area_mb));
2428 mutex_lock(&udev->cmdr_lock);
2429 if (udev->data_bitmap) {
2430 pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
2435 udev->data_pages_per_blk = val;
2436 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
2439 mutex_unlock(&udev->cmdr_lock);
2443 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2444 const char *page, ssize_t count)
2446 struct tcmu_dev *udev = TCMU_DEV(dev);
2447 char *orig, *ptr, *opts;
2448 substring_t args[MAX_OPT_ARGS];
2451 opts = kstrdup(page, GFP_KERNEL);
2457 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2461 token = match_token(ptr, tokens, args);
2463 case Opt_dev_config:
2464 if (match_strlcpy(udev->dev_config, &args[0],
2465 TCMU_CONFIG_LEN) == 0) {
2469 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2472 ret = match_u64(&args[0], &udev->dev_size);
2474 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2477 case Opt_hw_block_size:
2478 ret = tcmu_set_dev_attrib(&args[0],
2479 &(dev->dev_attrib.hw_block_size));
2481 case Opt_hw_max_sectors:
2482 ret = tcmu_set_dev_attrib(&args[0],
2483 &(dev->dev_attrib.hw_max_sectors));
2485 case Opt_nl_reply_supported:
2486 ret = match_int(&args[0], &udev->nl_reply_supported);
2488 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2491 case Opt_max_data_area_mb:
2492 ret = tcmu_set_max_blocks_param(udev, &args[0]);
2494 case Opt_data_pages_per_blk:
2495 ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
2506 return (!ret) ? count : ret;
2509 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2511 struct tcmu_dev *udev = TCMU_DEV(dev);
2514 bl = sprintf(b + bl, "Config: %s ",
2515 udev->dev_config[0] ? udev->dev_config : "NULL");
2516 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2517 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
2518 bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
2523 static sector_t tcmu_get_blocks(struct se_device *dev)
2525 struct tcmu_dev *udev = TCMU_DEV(dev);
2527 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2528 dev->dev_attrib.block_size);
2531 static sense_reason_t
2532 tcmu_parse_cdb(struct se_cmd *cmd)
2534 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2537 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2539 struct se_dev_attrib *da = container_of(to_config_group(item),
2540 struct se_dev_attrib, da_group);
2541 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2543 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2546 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2549 struct se_dev_attrib *da = container_of(to_config_group(item),
2550 struct se_dev_attrib, da_group);
2551 struct tcmu_dev *udev = container_of(da->da_dev,
2552 struct tcmu_dev, se_dev);
2556 if (da->da_dev->export_count) {
2557 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2561 ret = kstrtou32(page, 0, &val);
2565 udev->cmd_time_out = val * MSEC_PER_SEC;
2568 CONFIGFS_ATTR(tcmu_, cmd_time_out);
2570 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2572 struct se_dev_attrib *da = container_of(to_config_group(item),
2573 struct se_dev_attrib, da_group);
2574 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2576 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2577 udev->qfull_time_out :
2578 udev->qfull_time_out / MSEC_PER_SEC);
2581 static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2582 const char *page, size_t count)
2584 struct se_dev_attrib *da = container_of(to_config_group(item),
2585 struct se_dev_attrib, da_group);
2586 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2590 ret = kstrtos32(page, 0, &val);
2595 udev->qfull_time_out = val * MSEC_PER_SEC;
2596 } else if (val == -1) {
2597 udev->qfull_time_out = val;
2599 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2604 CONFIGFS_ATTR(tcmu_, qfull_time_out);
2606 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2608 struct se_dev_attrib *da = container_of(to_config_group(item),
2609 struct se_dev_attrib, da_group);
2610 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2612 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
2614 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2616 static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
2619 struct se_dev_attrib *da = container_of(to_config_group(item),
2620 struct se_dev_attrib, da_group);
2621 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2623 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
2625 CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
2627 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2629 struct se_dev_attrib *da = container_of(to_config_group(item),
2630 struct se_dev_attrib, da_group);
2631 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2633 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2636 static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2637 const char *reconfig_data)
2639 struct sk_buff *skb = NULL;
2640 void *msg_header = NULL;
2643 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2647 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2652 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2657 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2660 struct se_dev_attrib *da = container_of(to_config_group(item),
2661 struct se_dev_attrib, da_group);
2662 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2666 if (!len || len > TCMU_CONFIG_LEN - 1)
2669 /* Check if device has been configured before */
2670 if (target_dev_configured(&udev->se_dev)) {
2671 ret = tcmu_send_dev_config_event(udev, page);
2673 pr_err("Unable to reconfigure device\n");
2676 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2678 ret = tcmu_update_uio_info(udev);
2683 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2687 CONFIGFS_ATTR(tcmu_, dev_config);
2689 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2691 struct se_dev_attrib *da = container_of(to_config_group(item),
2692 struct se_dev_attrib, da_group);
2693 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2695 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2698 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2700 struct sk_buff *skb = NULL;
2701 void *msg_header = NULL;
2704 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2708 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2709 size, TCMU_ATTR_PAD);
2714 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2718 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2721 struct se_dev_attrib *da = container_of(to_config_group(item),
2722 struct se_dev_attrib, da_group);
2723 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2727 ret = kstrtou64(page, 0, &val);
2731 /* Check if device has been configured before */
2732 if (target_dev_configured(&udev->se_dev)) {
2733 ret = tcmu_send_dev_size_event(udev, val);
2735 pr_err("Unable to reconfigure device\n");
2739 udev->dev_size = val;
2742 CONFIGFS_ATTR(tcmu_, dev_size);
2744 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2747 struct se_dev_attrib *da = container_of(to_config_group(item),
2748 struct se_dev_attrib, da_group);
2749 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2751 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2754 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2755 const char *page, size_t count)
2757 struct se_dev_attrib *da = container_of(to_config_group(item),
2758 struct se_dev_attrib, da_group);
2759 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2763 ret = kstrtos8(page, 0, &val);
2767 udev->nl_reply_supported = val;
2770 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2772 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2775 struct se_dev_attrib *da = container_of(to_config_group(item),
2776 struct se_dev_attrib, da_group);
2778 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2781 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2783 struct sk_buff *skb = NULL;
2784 void *msg_header = NULL;
2787 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2791 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2796 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2800 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2801 const char *page, size_t count)
2803 struct se_dev_attrib *da = container_of(to_config_group(item),
2804 struct se_dev_attrib, da_group);
2805 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2809 ret = kstrtou8(page, 0, &val);
2813 /* Check if device has been configured before */
2814 if (target_dev_configured(&udev->se_dev)) {
2815 ret = tcmu_send_emulate_write_cache(udev, val);
2817 pr_err("Unable to reconfigure device\n");
2822 da->emulate_write_cache = val;
2825 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2827 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
2829 struct se_dev_attrib *da = container_of(to_config_group(item),
2830 struct se_dev_attrib, da_group);
2831 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2833 return snprintf(page, PAGE_SIZE, "%i\n",
2834 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
2837 static ssize_t tcmu_tmr_notification_store(struct config_item *item,
2838 const char *page, size_t count)
2840 struct se_dev_attrib *da = container_of(to_config_group(item),
2841 struct se_dev_attrib, da_group);
2842 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2846 ret = kstrtou8(page, 0, &val);
2853 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2855 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2858 CONFIGFS_ATTR(tcmu_, tmr_notification);
2860 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2862 struct se_device *se_dev = container_of(to_config_group(item),
2865 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2867 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2868 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2870 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2873 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2876 struct se_device *se_dev = container_of(to_config_group(item),
2879 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2883 if (!target_dev_configured(&udev->se_dev)) {
2884 pr_err("Device is not configured.\n");
2888 ret = kstrtou8(page, 0, &val);
2893 pr_err("Invalid block value %d\n", val);
2898 tcmu_unblock_dev(udev);
2900 tcmu_block_dev(udev);
2903 CONFIGFS_ATTR(tcmu_, block_dev);
2905 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2908 struct se_device *se_dev = container_of(to_config_group(item),
2911 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2915 if (!target_dev_configured(&udev->se_dev)) {
2916 pr_err("Device is not configured.\n");
2920 ret = kstrtou8(page, 0, &val);
2924 if (val != 1 && val != 2) {
2925 pr_err("Invalid reset ring value %d\n", val);
2929 tcmu_reset_ring(udev, val);
2932 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2934 static struct configfs_attribute *tcmu_attrib_attrs[] = {
2935 &tcmu_attr_cmd_time_out,
2936 &tcmu_attr_qfull_time_out,
2937 &tcmu_attr_max_data_area_mb,
2938 &tcmu_attr_data_pages_per_blk,
2939 &tcmu_attr_dev_config,
2940 &tcmu_attr_dev_size,
2941 &tcmu_attr_emulate_write_cache,
2942 &tcmu_attr_tmr_notification,
2943 &tcmu_attr_nl_reply_supported,
2947 static struct configfs_attribute **tcmu_attrs;
2949 static struct configfs_attribute *tcmu_action_attrs[] = {
2950 &tcmu_attr_block_dev,
2951 &tcmu_attr_reset_ring,
2955 static struct target_backend_ops tcmu_ops = {
2957 .owner = THIS_MODULE,
2958 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
2959 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
2960 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
2961 .attach_hba = tcmu_attach_hba,
2962 .detach_hba = tcmu_detach_hba,
2963 .alloc_device = tcmu_alloc_device,
2964 .configure_device = tcmu_configure_device,
2965 .destroy_device = tcmu_destroy_device,
2966 .free_device = tcmu_free_device,
2967 .unplug_device = tcmu_unplug_device,
2968 .plug_device = tcmu_plug_device,
2969 .parse_cdb = tcmu_parse_cdb,
2970 .tmr_notify = tcmu_tmr_notify,
2971 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2972 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2973 .get_device_type = sbc_get_device_type,
2974 .get_blocks = tcmu_get_blocks,
2975 .tb_dev_action_attrs = tcmu_action_attrs,
2978 static void find_free_blocks(void)
2980 struct tcmu_dev *udev;
2982 u32 pages_freed, total_pages_freed = 0;
2983 u32 start, end, block, total_blocks_freed = 0;
2985 if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
2988 mutex_lock(&root_udev_mutex);
2989 list_for_each_entry(udev, &root_udev, node) {
2990 mutex_lock(&udev->cmdr_lock);
2992 if (!target_dev_configured(&udev->se_dev)) {
2993 mutex_unlock(&udev->cmdr_lock);
2997 /* Try to complete the finished commands first */
2998 if (tcmu_handle_completions(udev))
2999 run_qfull_queue(udev, false);
3001 /* Skip the udevs in idle */
3002 if (!udev->dbi_thresh) {
3003 mutex_unlock(&udev->cmdr_lock);
3007 end = udev->dbi_max + 1;
3008 block = find_last_bit(udev->data_bitmap, end);
3009 if (block == udev->dbi_max) {
3011 * The last bit is dbi_max, so it is not possible
3012 * reclaim any blocks.
3014 mutex_unlock(&udev->cmdr_lock);
3016 } else if (block == end) {
3017 /* The current udev will goto idle state */
3018 udev->dbi_thresh = start = 0;
3021 udev->dbi_thresh = start = block + 1;
3022 udev->dbi_max = block;
3025 /* Here will truncate the data area from off */
3026 off = udev->data_off + (loff_t)start * udev->data_blk_size;
3027 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
3029 /* Release the block pages */
3030 pages_freed = tcmu_blocks_release(udev, start, end - 1);
3031 mutex_unlock(&udev->cmdr_lock);
3033 total_pages_freed += pages_freed;
3034 total_blocks_freed += end - start;
3035 pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
3036 pages_freed, total_pages_freed, end - start,
3037 total_blocks_freed, udev->name);
3039 mutex_unlock(&root_udev_mutex);
3041 if (atomic_read(&global_page_count) > tcmu_global_max_pages)
3042 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
3045 static void check_timedout_devices(void)
3047 struct tcmu_dev *udev, *tmp_dev;
3048 struct tcmu_cmd *cmd, *tmp_cmd;
3051 spin_lock_bh(&timed_out_udevs_lock);
3052 list_splice_init(&timed_out_udevs, &devs);
3054 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
3055 list_del_init(&udev->timedout_entry);
3056 spin_unlock_bh(&timed_out_udevs_lock);
3058 mutex_lock(&udev->cmdr_lock);
3061 * If cmd_time_out is disabled but qfull is set deadline
3062 * will only reflect the qfull timeout. Ignore it.
3064 if (udev->cmd_time_out) {
3065 list_for_each_entry_safe(cmd, tmp_cmd,
3066 &udev->inflight_queue,
3068 tcmu_check_expired_ring_cmd(cmd);
3070 tcmu_set_next_deadline(&udev->inflight_queue,
3073 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
3075 tcmu_check_expired_queue_cmd(cmd);
3077 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3079 mutex_unlock(&udev->cmdr_lock);
3081 spin_lock_bh(&timed_out_udevs_lock);
3084 spin_unlock_bh(&timed_out_udevs_lock);
3087 static void tcmu_unmap_work_fn(struct work_struct *work)
3089 check_timedout_devices();
3093 static int __init tcmu_module_init(void)
3095 int ret, i, k, len = 0;
3097 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
3099 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
3101 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
3102 sizeof(struct tcmu_cmd),
3103 __alignof__(struct tcmu_cmd),
3105 if (!tcmu_cmd_cache)
3108 tcmu_root_device = root_device_register("tcm_user");
3109 if (IS_ERR(tcmu_root_device)) {
3110 ret = PTR_ERR(tcmu_root_device);
3111 goto out_free_cache;
3114 ret = genl_register_family(&tcmu_genl_family);
3116 goto out_unreg_device;
3119 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3120 len += sizeof(struct configfs_attribute *);
3121 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
3122 len += sizeof(struct configfs_attribute *);
3123 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
3124 len += sizeof(struct configfs_attribute *);
3125 len += sizeof(struct configfs_attribute *);
3127 tcmu_attrs = kzalloc(len, GFP_KERNEL);
3130 goto out_unreg_genl;
3133 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3134 tcmu_attrs[i] = passthrough_attrib_attrs[i];
3135 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
3136 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
3137 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
3138 tcmu_attrs[i++] = tcmu_attrib_attrs[k];
3139 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
3141 ret = transport_backend_register(&tcmu_ops);
3150 genl_unregister_family(&tcmu_genl_family);
3152 root_device_unregister(tcmu_root_device);
3154 kmem_cache_destroy(tcmu_cmd_cache);
3159 static void __exit tcmu_module_exit(void)
3161 cancel_delayed_work_sync(&tcmu_unmap_work);
3162 target_backend_unregister(&tcmu_ops);
3164 genl_unregister_family(&tcmu_genl_family);
3165 root_device_unregister(tcmu_root_device);
3166 kmem_cache_destroy(tcmu_cmd_cache);
3169 MODULE_DESCRIPTION("TCM USER subsystem plugin");
3170 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
3171 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
3172 MODULE_LICENSE("GPL");
3174 module_init(tcmu_module_init);
3175 module_exit(tcmu_module_exit);