2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/spinlock.h>
21 #include <linux/module.h>
22 #include <linux/idr.h>
23 #include <linux/kernel.h>
24 #include <linux/timer.h>
25 #include <linux/parser.h>
26 #include <linux/vmalloc.h>
27 #include <linux/uio_driver.h>
28 #include <linux/stringify.h>
29 #include <linux/bitops.h>
30 #include <linux/highmem.h>
31 #include <linux/configfs.h>
32 #include <net/genetlink.h>
33 #include <scsi/scsi_common.h>
34 #include <scsi/scsi_proto.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
37 #include <target/target_core_backend.h>
39 #include <linux/target_core_user.h>
42 * Define a shared-memory interface for LIO to pass SCSI commands and
43 * data to userspace for processing. This is to allow backends that
44 * are too complex for in-kernel support to be possible.
46 * It uses the UIO framework to do a lot of the device-creation and
47 * introspection work for us.
49 * See the .h file for how the ring is laid out. Note that while the
50 * command ring is defined, the particulars of the data area are
51 * not. Offset values in the command entry point to other locations
52 * internal to the mmap()ed area. There is separate space outside the
53 * command ring for data buffers. This leaves maximum flexibility for
54 * moving buffer allocations, or even page flipping or other
55 * allocation techniques, without altering the command ring layout.
58 * The user process must be assumed to be malicious. There's no way to
59 * prevent it breaking the command ring protocol if it wants, but in
60 * order to prevent other issues we must only ever read *data* from
61 * the shared memory area, not offsets or sizes. This applies to
62 * command ring entries as well as the mailbox. Extra code needed for
63 * this may have a 'UAM' comment.
67 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
69 #define DATA_BLOCK_BITS 256
70 #define DATA_BLOCK_SIZE 4096
72 #define CMDR_SIZE (16 * 4096)
73 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
75 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
77 static struct device *tcmu_root_device;
83 #define TCMU_CONFIG_LEN 256
86 struct se_device se_dev;
91 #define TCMU_DEV_BIT_OPEN 0
92 #define TCMU_DEV_BIT_BROKEN 1
95 struct uio_info uio_info;
97 struct tcmu_mailbox *mb_addr;
100 u32 cmdr_last_cleaned;
101 /* Offset of data area from start of mb */
102 /* Must add data_off and mb_addr to get the address */
106 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
108 wait_queue_head_t wait_cmdr;
109 /* TODO should this be a mutex? */
110 spinlock_t cmdr_lock;
113 spinlock_t commands_lock;
115 struct timer_list timeout;
116 unsigned int cmd_time_out;
118 char dev_config[TCMU_CONFIG_LEN];
121 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
123 #define CMDR_OFF sizeof(struct tcmu_mailbox)
126 struct se_cmd *se_cmd;
127 struct tcmu_dev *tcmu_dev;
131 /* Can't use se_cmd when cleaning up expired cmds, because if
132 cmd has been completed then accessing se_cmd is off limits */
133 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
135 unsigned long deadline;
137 #define TCMU_CMD_BIT_EXPIRED 0
141 static struct kmem_cache *tcmu_cmd_cache;
143 /* multicast group */
144 enum tcmu_multicast_groups {
148 static const struct genl_multicast_group tcmu_mcgrps[] = {
149 [TCMU_MCGRP_CONFIG] = { .name = "config", },
152 /* Our generic netlink family */
153 static struct genl_family tcmu_genl_family __ro_after_init = {
154 .module = THIS_MODULE,
158 .maxattr = TCMU_ATTR_MAX,
159 .mcgrps = tcmu_mcgrps,
160 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
164 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
166 struct se_device *se_dev = se_cmd->se_dev;
167 struct tcmu_dev *udev = TCMU_DEV(se_dev);
168 struct tcmu_cmd *tcmu_cmd;
171 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
175 tcmu_cmd->se_cmd = se_cmd;
176 tcmu_cmd->tcmu_dev = udev;
177 if (udev->cmd_time_out)
178 tcmu_cmd->deadline = jiffies +
179 msecs_to_jiffies(udev->cmd_time_out);
181 idr_preload(GFP_KERNEL);
182 spin_lock_irq(&udev->commands_lock);
183 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
184 USHRT_MAX, GFP_NOWAIT);
185 spin_unlock_irq(&udev->commands_lock);
189 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
192 tcmu_cmd->cmd_id = cmd_id;
197 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
199 unsigned long offset = offset_in_page(vaddr);
201 size = round_up(size+offset, PAGE_SIZE);
205 flush_dcache_page(virt_to_page(vaddr));
211 * Some ring helper functions. We don't assume size is a power of 2 so
212 * we can't use circ_buf.h.
214 static inline size_t spc_used(size_t head, size_t tail, size_t size)
216 int diff = head - tail;
224 static inline size_t spc_free(size_t head, size_t tail, size_t size)
226 /* Keep 1 byte unused or we can't tell full from empty */
227 return (size - spc_used(head, tail, size) - 1);
230 static inline size_t head_to_end(size_t head, size_t size)
235 static inline void new_iov(struct iovec **iov, int *iov_cnt,
236 struct tcmu_dev *udev)
245 memset(iovec, 0, sizeof(struct iovec));
248 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
250 /* offset is relative to mb_addr */
251 static inline size_t get_block_offset(struct tcmu_dev *dev,
252 int block, int remaining)
254 return dev->data_off + block * DATA_BLOCK_SIZE +
255 DATA_BLOCK_SIZE - remaining;
258 static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
260 return (size_t)iov->iov_base + iov->iov_len;
263 static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
264 struct scatterlist *data_sg, unsigned int data_nents,
265 struct iovec **iov, int *iov_cnt, bool copy_data)
268 int block_remaining = 0;
270 size_t copy_bytes, to_offset;
271 struct scatterlist *sg;
273 for_each_sg(data_sg, sg, data_nents, i) {
274 int sg_remaining = sg->length;
275 from = kmap_atomic(sg_page(sg)) + sg->offset;
276 while (sg_remaining > 0) {
277 if (block_remaining == 0) {
278 block = find_first_zero_bit(udev->data_bitmap,
280 block_remaining = DATA_BLOCK_SIZE;
281 set_bit(block, udev->data_bitmap);
283 copy_bytes = min_t(size_t, sg_remaining,
285 to_offset = get_block_offset(udev, block,
287 to = (void *)udev->mb_addr + to_offset;
289 to_offset == iov_tail(udev, *iov)) {
290 (*iov)->iov_len += copy_bytes;
292 new_iov(iov, iov_cnt, udev);
293 (*iov)->iov_base = (void __user *) to_offset;
294 (*iov)->iov_len = copy_bytes;
297 memcpy(to, from + sg->length - sg_remaining,
299 tcmu_flush_dcache_range(to, copy_bytes);
301 sg_remaining -= copy_bytes;
302 block_remaining -= copy_bytes;
304 kunmap_atomic(from - sg->offset);
308 static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
310 bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
314 static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
315 struct scatterlist *data_sg, unsigned int data_nents)
318 int block_remaining = 0;
320 size_t copy_bytes, from_offset;
321 struct scatterlist *sg;
323 for_each_sg(data_sg, sg, data_nents, i) {
324 int sg_remaining = sg->length;
325 to = kmap_atomic(sg_page(sg)) + sg->offset;
326 while (sg_remaining > 0) {
327 if (block_remaining == 0) {
328 block = find_first_bit(cmd_bitmap,
330 block_remaining = DATA_BLOCK_SIZE;
331 clear_bit(block, cmd_bitmap);
333 copy_bytes = min_t(size_t, sg_remaining,
335 from_offset = get_block_offset(udev, block,
337 from = (void *) udev->mb_addr + from_offset;
338 tcmu_flush_dcache_range(from, copy_bytes);
339 memcpy(to + sg->length - sg_remaining, from,
342 sg_remaining -= copy_bytes;
343 block_remaining -= copy_bytes;
345 kunmap_atomic(to - sg->offset);
349 static inline size_t spc_bitmap_free(unsigned long *bitmap)
351 return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
352 bitmap_weight(bitmap, DATA_BLOCK_BITS));
356 * We can't queue a command until we have space available on the cmd ring *and*
357 * space available on the data area.
359 * Called with ring lock held.
361 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
363 struct tcmu_mailbox *mb = udev->mb_addr;
364 size_t space, cmd_needed;
367 tcmu_flush_dcache_range(mb, sizeof(*mb));
369 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
372 * If cmd end-of-ring space is too small then we need space for a NOP plus
373 * original cmd - cmds are internally contiguous.
375 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
376 cmd_needed = cmd_size;
378 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
380 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
381 if (space < cmd_needed) {
382 pr_debug("no cmd space: %u %u %u\n", cmd_head,
383 udev->cmdr_last_cleaned, udev->cmdr_size);
387 space = spc_bitmap_free(udev->data_bitmap);
388 if (space < data_needed) {
389 pr_debug("no data space: only %zu available, but ask for %zu\n",
397 static sense_reason_t
398 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
400 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
401 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
402 size_t base_command_size, command_size;
403 struct tcmu_mailbox *mb;
404 struct tcmu_cmd_entry *entry;
409 bool copy_to_data_area;
411 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
413 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
414 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
417 * Must be a certain minimum size for response sense info, but
418 * also may be larger if the iov array is large.
420 * We prepare way too many iovs for potential uses here, because it's
421 * expensive to tell how many regions are freed in the bitmap
423 base_command_size = max(offsetof(struct tcmu_cmd_entry,
424 req.iov[se_cmd->t_bidi_data_nents +
425 se_cmd->t_data_nents]),
426 sizeof(struct tcmu_cmd_entry));
427 command_size = base_command_size
428 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
430 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
432 spin_lock_irq(&udev->cmdr_lock);
435 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
436 data_length = se_cmd->data_length;
437 if (se_cmd->se_cmd_flags & SCF_BIDI) {
438 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
439 data_length += se_cmd->t_bidi_data_sg->length;
441 if ((command_size > (udev->cmdr_size / 2)) ||
442 data_length > udev->data_size) {
443 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
444 "cmd ring/data area\n", command_size, data_length,
445 udev->cmdr_size, udev->data_size);
446 spin_unlock_irq(&udev->cmdr_lock);
447 return TCM_INVALID_CDB_FIELD;
450 while (!is_ring_space_avail(udev, command_size, data_length)) {
454 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
456 pr_debug("sleeping for ring space\n");
457 spin_unlock_irq(&udev->cmdr_lock);
458 if (udev->cmd_time_out)
459 ret = schedule_timeout(
460 msecs_to_jiffies(udev->cmd_time_out));
462 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
463 finish_wait(&udev->wait_cmdr, &__wait);
465 pr_warn("tcmu: command timed out\n");
466 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
469 spin_lock_irq(&udev->cmdr_lock);
471 /* We dropped cmdr_lock, cmd_head is stale */
472 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
475 /* Insert a PAD if end-of-ring space is too small */
476 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
477 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
479 entry = (void *) mb + CMDR_OFF + cmd_head;
480 tcmu_flush_dcache_range(entry, sizeof(*entry));
481 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
482 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
483 entry->hdr.cmd_id = 0; /* not used for PAD */
484 entry->hdr.kflags = 0;
485 entry->hdr.uflags = 0;
487 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
489 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
490 WARN_ON(cmd_head != 0);
493 entry = (void *) mb + CMDR_OFF + cmd_head;
494 tcmu_flush_dcache_range(entry, sizeof(*entry));
495 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
496 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
497 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
498 entry->hdr.kflags = 0;
499 entry->hdr.uflags = 0;
501 bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
503 /* Handle allocating space from the data area */
504 iov = &entry->req.iov[0];
506 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
507 || se_cmd->se_cmd_flags & SCF_BIDI);
508 alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
509 se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
510 entry->req.iov_cnt = iov_cnt;
511 entry->req.iov_dif_cnt = 0;
513 /* Handle BIDI commands */
515 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
516 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
517 entry->req.iov_bidi_cnt = iov_cnt;
519 /* cmd's data_bitmap is what changed in process */
520 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
523 /* All offsets relative to mb_addr, not start of entry! */
524 cdb_off = CMDR_OFF + cmd_head + base_command_size;
525 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
526 entry->req.cdb_off = cdb_off;
527 tcmu_flush_dcache_range(entry, sizeof(*entry));
529 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
530 tcmu_flush_dcache_range(mb, sizeof(*mb));
532 spin_unlock_irq(&udev->cmdr_lock);
534 /* TODO: only if FLUSH and FUA? */
535 uio_event_notify(&udev->uio_info);
537 if (udev->cmd_time_out)
538 mod_timer(&udev->timeout, round_jiffies_up(jiffies +
539 msecs_to_jiffies(udev->cmd_time_out)));
544 static sense_reason_t
545 tcmu_queue_cmd(struct se_cmd *se_cmd)
547 struct se_device *se_dev = se_cmd->se_dev;
548 struct tcmu_dev *udev = TCMU_DEV(se_dev);
549 struct tcmu_cmd *tcmu_cmd;
552 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
554 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
556 ret = tcmu_queue_cmd_ring(tcmu_cmd);
557 if (ret != TCM_NO_SENSE) {
558 pr_err("TCMU: Could not queue command\n");
559 spin_lock_irq(&udev->commands_lock);
560 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
561 spin_unlock_irq(&udev->commands_lock);
563 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
569 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
571 struct se_cmd *se_cmd = cmd->se_cmd;
572 struct tcmu_dev *udev = cmd->tcmu_dev;
574 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
576 * cmd has been completed already from timeout, just reclaim
577 * data area space and free cmd
579 free_data_area(udev, cmd);
581 kmem_cache_free(tcmu_cmd_cache, cmd);
585 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
586 free_data_area(udev, cmd);
587 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
589 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
590 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
591 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
592 se_cmd->scsi_sense_length);
593 free_data_area(udev, cmd);
594 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
595 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
597 /* Get Data-In buffer before clean up */
598 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
599 gather_data_area(udev, bitmap,
600 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
601 free_data_area(udev, cmd);
602 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
603 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
605 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
606 gather_data_area(udev, bitmap,
607 se_cmd->t_data_sg, se_cmd->t_data_nents);
608 free_data_area(udev, cmd);
609 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
610 free_data_area(udev, cmd);
611 } else if (se_cmd->data_direction != DMA_NONE) {
612 pr_warn("TCMU: data direction was %d!\n",
613 se_cmd->data_direction);
616 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
619 kmem_cache_free(tcmu_cmd_cache, cmd);
622 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
624 struct tcmu_mailbox *mb;
628 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
629 pr_err("ring broken, not handling completions\n");
633 spin_lock_irqsave(&udev->cmdr_lock, flags);
636 tcmu_flush_dcache_range(mb, sizeof(*mb));
638 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
640 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
641 struct tcmu_cmd *cmd;
643 tcmu_flush_dcache_range(entry, sizeof(*entry));
645 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
646 UPDATE_HEAD(udev->cmdr_last_cleaned,
647 tcmu_hdr_get_len(entry->hdr.len_op),
651 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
653 spin_lock(&udev->commands_lock);
654 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
655 spin_unlock(&udev->commands_lock);
658 pr_err("cmd_id not found, ring is broken\n");
659 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
663 tcmu_handle_completion(cmd, entry);
665 UPDATE_HEAD(udev->cmdr_last_cleaned,
666 tcmu_hdr_get_len(entry->hdr.len_op),
672 if (mb->cmd_tail == mb->cmd_head)
673 del_timer(&udev->timeout); /* no more pending cmds */
675 spin_unlock_irqrestore(&udev->cmdr_lock, flags);
677 wake_up(&udev->wait_cmdr);
682 static int tcmu_check_expired_cmd(int id, void *p, void *data)
684 struct tcmu_cmd *cmd = p;
686 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
689 if (!time_after(jiffies, cmd->deadline))
692 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
693 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
699 static void tcmu_device_timedout(unsigned long data)
701 struct tcmu_dev *udev = (struct tcmu_dev *)data;
705 handled = tcmu_handle_completions(udev);
707 pr_warn("%d completions handled from timeout\n", handled);
709 spin_lock_irqsave(&udev->commands_lock, flags);
710 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
711 spin_unlock_irqrestore(&udev->commands_lock, flags);
714 * We don't need to wakeup threads on wait_cmdr since they have their
719 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
721 struct tcmu_hba *tcmu_hba;
723 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
727 tcmu_hba->host_id = host_id;
728 hba->hba_ptr = tcmu_hba;
733 static void tcmu_detach_hba(struct se_hba *hba)
739 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
741 struct tcmu_dev *udev;
743 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
747 udev->name = kstrdup(name, GFP_KERNEL);
754 udev->cmd_time_out = TCMU_TIME_OUT;
756 init_waitqueue_head(&udev->wait_cmdr);
757 spin_lock_init(&udev->cmdr_lock);
759 idr_init(&udev->commands);
760 spin_lock_init(&udev->commands_lock);
762 setup_timer(&udev->timeout, tcmu_device_timedout,
763 (unsigned long)udev);
765 return &udev->se_dev;
768 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
770 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
772 tcmu_handle_completions(tcmu_dev);
778 * mmap code from uio.c. Copied here because we want to hook mmap()
779 * and this stuff must come along.
781 static int tcmu_find_mem_index(struct vm_area_struct *vma)
783 struct tcmu_dev *udev = vma->vm_private_data;
784 struct uio_info *info = &udev->uio_info;
786 if (vma->vm_pgoff < MAX_UIO_MAPS) {
787 if (info->mem[vma->vm_pgoff].size == 0)
789 return (int)vma->vm_pgoff;
794 static int tcmu_vma_fault(struct vm_fault *vmf)
796 struct tcmu_dev *udev = vmf->vma->vm_private_data;
797 struct uio_info *info = &udev->uio_info;
799 unsigned long offset;
802 int mi = tcmu_find_mem_index(vmf->vma);
804 return VM_FAULT_SIGBUS;
807 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
810 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
812 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
813 if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
814 page = virt_to_page(addr);
816 page = vmalloc_to_page(addr);
822 static const struct vm_operations_struct tcmu_vm_ops = {
823 .fault = tcmu_vma_fault,
826 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
828 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
830 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
831 vma->vm_ops = &tcmu_vm_ops;
833 vma->vm_private_data = udev;
835 /* Ensure the mmap is exactly the right size */
836 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
842 static int tcmu_open(struct uio_info *info, struct inode *inode)
844 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
846 /* O_EXCL not supported for char devs, so fake it? */
847 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
855 static int tcmu_release(struct uio_info *info, struct inode *inode)
857 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
859 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
866 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
872 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
876 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
880 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
884 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
888 genlmsg_end(skb, msg_header);
890 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
891 TCMU_MCGRP_CONFIG, GFP_KERNEL);
893 /* We don't care if no one is listening */
903 static int tcmu_configure_device(struct se_device *dev)
905 struct tcmu_dev *udev = TCMU_DEV(dev);
906 struct tcmu_hba *hba = udev->hba->hba_ptr;
907 struct uio_info *info;
908 struct tcmu_mailbox *mb;
914 info = &udev->uio_info;
916 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
918 size += 1; /* for \0 */
919 str = kmalloc(size, GFP_KERNEL);
923 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
925 if (udev->dev_config[0])
926 snprintf(str + used, size - used, "/%s", udev->dev_config);
930 udev->mb_addr = vzalloc(TCMU_RING_SIZE);
931 if (!udev->mb_addr) {
936 /* mailbox fits in first part of CMDR space */
937 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
938 udev->data_off = CMDR_SIZE;
939 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
942 mb->version = TCMU_MAILBOX_VERSION;
943 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
944 mb->cmdr_off = CMDR_OFF;
945 mb->cmdr_size = udev->cmdr_size;
947 WARN_ON(!PAGE_ALIGNED(udev->data_off));
948 WARN_ON(udev->data_size % PAGE_SIZE);
949 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
951 info->version = __stringify(TCMU_MAILBOX_VERSION);
953 info->mem[0].name = "tcm-user command & data buffer";
954 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
955 info->mem[0].size = TCMU_RING_SIZE;
956 info->mem[0].memtype = UIO_MEM_VIRTUAL;
958 info->irqcontrol = tcmu_irqcontrol;
959 info->irq = UIO_IRQ_CUSTOM;
961 info->mmap = tcmu_mmap;
962 info->open = tcmu_open;
963 info->release = tcmu_release;
965 ret = uio_register_device(tcmu_root_device, info);
969 /* User can set hw_block_size before enable the device */
970 if (dev->dev_attrib.hw_block_size == 0)
971 dev->dev_attrib.hw_block_size = 512;
972 /* Other attributes can be configured in userspace */
973 if (!dev->dev_attrib.hw_max_sectors)
974 dev->dev_attrib.hw_max_sectors = 128;
975 dev->dev_attrib.hw_queue_depth = 128;
977 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
978 udev->uio_info.uio_dev->minor);
985 uio_unregister_device(&udev->uio_info);
987 vfree(udev->mb_addr);
994 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
996 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
997 kmem_cache_free(tcmu_cmd_cache, cmd);
1003 static void tcmu_dev_call_rcu(struct rcu_head *p)
1005 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1006 struct tcmu_dev *udev = TCMU_DEV(dev);
1011 static bool tcmu_dev_configured(struct tcmu_dev *udev)
1013 return udev->uio_info.uio_dev ? true : false;
1016 static void tcmu_free_device(struct se_device *dev)
1018 struct tcmu_dev *udev = TCMU_DEV(dev);
1019 struct tcmu_cmd *cmd;
1020 bool all_expired = true;
1023 del_timer_sync(&udev->timeout);
1025 vfree(udev->mb_addr);
1027 /* Upper layer should drain all requests before calling this */
1028 spin_lock_irq(&udev->commands_lock);
1029 idr_for_each_entry(&udev->commands, cmd, i) {
1030 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1031 all_expired = false;
1033 idr_destroy(&udev->commands);
1034 spin_unlock_irq(&udev->commands_lock);
1035 WARN_ON(!all_expired);
1037 if (tcmu_dev_configured(udev)) {
1038 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1039 udev->uio_info.uio_dev->minor);
1041 uio_unregister_device(&udev->uio_info);
1042 kfree(udev->uio_info.name);
1045 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1049 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1053 static match_table_t tokens = {
1054 {Opt_dev_config, "dev_config=%s"},
1055 {Opt_dev_size, "dev_size=%u"},
1056 {Opt_hw_block_size, "hw_block_size=%u"},
1057 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1061 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1063 unsigned long tmp_ul;
1067 arg_p = match_strdup(arg);
1071 ret = kstrtoul(arg_p, 0, &tmp_ul);
1074 pr_err("kstrtoul() failed for dev attrib\n");
1078 pr_err("dev attrib must be nonzero\n");
1081 *dev_attrib = tmp_ul;
1085 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1086 const char *page, ssize_t count)
1088 struct tcmu_dev *udev = TCMU_DEV(dev);
1089 char *orig, *ptr, *opts, *arg_p;
1090 substring_t args[MAX_OPT_ARGS];
1093 opts = kstrdup(page, GFP_KERNEL);
1099 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1103 token = match_token(ptr, tokens, args);
1105 case Opt_dev_config:
1106 if (match_strlcpy(udev->dev_config, &args[0],
1107 TCMU_CONFIG_LEN) == 0) {
1111 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
1114 arg_p = match_strdup(&args[0]);
1119 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
1122 pr_err("kstrtoul() failed for dev_size=\n");
1124 case Opt_hw_block_size:
1125 ret = tcmu_set_dev_attrib(&args[0],
1126 &(dev->dev_attrib.hw_block_size));
1128 case Opt_hw_max_sectors:
1129 ret = tcmu_set_dev_attrib(&args[0],
1130 &(dev->dev_attrib.hw_max_sectors));
1141 return (!ret) ? count : ret;
1144 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1146 struct tcmu_dev *udev = TCMU_DEV(dev);
1149 bl = sprintf(b + bl, "Config: %s ",
1150 udev->dev_config[0] ? udev->dev_config : "NULL");
1151 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1156 static sector_t tcmu_get_blocks(struct se_device *dev)
1158 struct tcmu_dev *udev = TCMU_DEV(dev);
1160 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1161 dev->dev_attrib.block_size);
1164 static sense_reason_t
1165 tcmu_parse_cdb(struct se_cmd *cmd)
1167 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1170 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1172 struct se_dev_attrib *da = container_of(to_config_group(item),
1173 struct se_dev_attrib, da_group);
1174 struct tcmu_dev *udev = container_of(da->da_dev,
1175 struct tcmu_dev, se_dev);
1177 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1180 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
1183 struct se_dev_attrib *da = container_of(to_config_group(item),
1184 struct se_dev_attrib, da_group);
1185 struct tcmu_dev *udev = container_of(da->da_dev,
1186 struct tcmu_dev, se_dev);
1190 if (da->da_dev->export_count) {
1191 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1195 ret = kstrtou32(page, 0, &val);
1200 pr_err("Illegal value for cmd_time_out\n");
1204 udev->cmd_time_out = val * MSEC_PER_SEC;
1207 CONFIGFS_ATTR(tcmu_, cmd_time_out);
1209 static struct configfs_attribute **tcmu_attrs;
1211 static struct target_backend_ops tcmu_ops = {
1213 .owner = THIS_MODULE,
1214 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1215 .attach_hba = tcmu_attach_hba,
1216 .detach_hba = tcmu_detach_hba,
1217 .alloc_device = tcmu_alloc_device,
1218 .configure_device = tcmu_configure_device,
1219 .free_device = tcmu_free_device,
1220 .parse_cdb = tcmu_parse_cdb,
1221 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1222 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1223 .get_device_type = sbc_get_device_type,
1224 .get_blocks = tcmu_get_blocks,
1225 .tb_dev_attrib_attrs = NULL,
1228 static int __init tcmu_module_init(void)
1230 int ret, i, len = 0;
1232 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1234 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1235 sizeof(struct tcmu_cmd),
1236 __alignof__(struct tcmu_cmd),
1238 if (!tcmu_cmd_cache)
1241 tcmu_root_device = root_device_register("tcm_user");
1242 if (IS_ERR(tcmu_root_device)) {
1243 ret = PTR_ERR(tcmu_root_device);
1244 goto out_free_cache;
1247 ret = genl_register_family(&tcmu_genl_family);
1249 goto out_unreg_device;
1252 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1253 len += sizeof(struct configfs_attribute *);
1255 len += sizeof(struct configfs_attribute *) * 2;
1257 tcmu_attrs = kzalloc(len, GFP_KERNEL);
1260 goto out_unreg_genl;
1263 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1264 tcmu_attrs[i] = passthrough_attrib_attrs[i];
1266 tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
1267 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
1269 ret = transport_backend_register(&tcmu_ops);
1278 genl_unregister_family(&tcmu_genl_family);
1280 root_device_unregister(tcmu_root_device);
1282 kmem_cache_destroy(tcmu_cmd_cache);
1287 static void __exit tcmu_module_exit(void)
1289 target_backend_unregister(&tcmu_ops);
1291 genl_unregister_family(&tcmu_genl_family);
1292 root_device_unregister(tcmu_root_device);
1293 kmem_cache_destroy(tcmu_cmd_cache);
1296 MODULE_DESCRIPTION("TCM USER subsystem plugin");
1297 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1298 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1299 MODULE_LICENSE("GPL");
1301 module_init(tcmu_module_init);
1302 module_exit(tcmu_module_exit);