Merge tag 'sound-5.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-microblaze.git] / drivers / target / target_core_user.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4  * Copyright (C) 2014 Red Hat, Inc.
5  * Copyright (C) 2015 Arrikto, Inc.
6  * Copyright (C) 2017 Chinamobile, Inc.
7  */
8
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/timer.h>
13 #include <linux/parser.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uio_driver.h>
16 #include <linux/xarray.h>
17 #include <linux/stringify.h>
18 #include <linux/bitops.h>
19 #include <linux/highmem.h>
20 #include <linux/configfs.h>
21 #include <linux/mutex.h>
22 #include <linux/workqueue.h>
23 #include <net/genetlink.h>
24 #include <scsi/scsi_common.h>
25 #include <scsi/scsi_proto.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28 #include <target/target_core_backend.h>
29
30 #include <linux/target_core_user.h>
31
32 /**
33  * DOC: Userspace I/O
34  * Userspace I/O
35  * -------------
36  *
37  * Define a shared-memory interface for LIO to pass SCSI commands and
38  * data to userspace for processing. This is to allow backends that
39  * are too complex for in-kernel support to be possible.
40  *
41  * It uses the UIO framework to do a lot of the device-creation and
42  * introspection work for us.
43  *
44  * See the .h file for how the ring is laid out. Note that while the
45  * command ring is defined, the particulars of the data area are
46  * not. Offset values in the command entry point to other locations
47  * internal to the mmap-ed area. There is separate space outside the
48  * command ring for data buffers. This leaves maximum flexibility for
49  * moving buffer allocations, or even page flipping or other
50  * allocation techniques, without altering the command ring layout.
51  *
52  * SECURITY:
53  * The user process must be assumed to be malicious. There's no way to
54  * prevent it breaking the command ring protocol if it wants, but in
55  * order to prevent other issues we must only ever read *data* from
56  * the shared memory area, not offsets or sizes. This applies to
57  * command ring entries as well as the mailbox. Extra code needed for
58  * this may have a 'UAM' comment.
59  */
60
61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
62
63 /* For mailbox plus cmd ring, the size is fixed 8MB */
64 #define MB_CMDR_SIZE (8 * 1024 * 1024)
65 /* Offset of cmd ring is size of mailbox */
66 #define CMDR_OFF sizeof(struct tcmu_mailbox)
67 #define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
68
69 /*
70  * For data area, the default block size is PAGE_SIZE and
71  * the default total size is 256K * PAGE_SIZE.
72  */
73 #define DATA_PAGES_PER_BLK_DEF 1
74 #define DATA_AREA_PAGES_DEF (256 * 1024)
75
76 #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
77 #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
78
79 /*
80  * Default number of global data blocks(512K * PAGE_SIZE)
81  * when the unmap thread will be started.
82  */
83 #define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
84
85 static u8 tcmu_kern_cmd_reply_supported;
86 static u8 tcmu_netlink_blocked;
87
88 static struct device *tcmu_root_device;
89
90 struct tcmu_hba {
91         u32 host_id;
92 };
93
94 #define TCMU_CONFIG_LEN 256
95
96 static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
97 static LIST_HEAD(tcmu_nl_cmd_list);
98
99 struct tcmu_dev;
100
101 struct tcmu_nl_cmd {
102         /* wake up thread waiting for reply */
103         struct completion complete;
104         struct list_head nl_list;
105         struct tcmu_dev *udev;
106         int cmd;
107         int status;
108 };
109
110 struct tcmu_dev {
111         struct list_head node;
112         struct kref kref;
113
114         struct se_device se_dev;
115         struct se_dev_plug se_plug;
116
117         char *name;
118         struct se_hba *hba;
119
120 #define TCMU_DEV_BIT_OPEN 0
121 #define TCMU_DEV_BIT_BROKEN 1
122 #define TCMU_DEV_BIT_BLOCKED 2
123 #define TCMU_DEV_BIT_TMR_NOTIFY 3
124 #define TCMU_DEV_BIT_PLUGGED 4
125         unsigned long flags;
126
127         struct uio_info uio_info;
128
129         struct inode *inode;
130
131         uint64_t dev_size;
132
133         struct tcmu_mailbox *mb_addr;
134         void *cmdr;
135         u32 cmdr_size;
136         u32 cmdr_last_cleaned;
137         /* Offset of data area from start of mb */
138         /* Must add data_off and mb_addr to get the address */
139         size_t data_off;
140         int data_area_mb;
141         uint32_t max_blocks;
142         size_t mmap_pages;
143
144         struct mutex cmdr_lock;
145         struct list_head qfull_queue;
146         struct list_head tmr_queue;
147
148         uint32_t dbi_max;
149         uint32_t dbi_thresh;
150         unsigned long *data_bitmap;
151         struct xarray data_pages;
152         uint32_t data_pages_per_blk;
153         uint32_t data_blk_size;
154
155         struct xarray commands;
156
157         struct timer_list cmd_timer;
158         unsigned int cmd_time_out;
159         struct list_head inflight_queue;
160
161         struct timer_list qfull_timer;
162         int qfull_time_out;
163
164         struct list_head timedout_entry;
165
166         struct tcmu_nl_cmd curr_nl_cmd;
167
168         char dev_config[TCMU_CONFIG_LEN];
169
170         int nl_reply_supported;
171 };
172
173 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
174
175 struct tcmu_cmd {
176         struct se_cmd *se_cmd;
177         struct tcmu_dev *tcmu_dev;
178         struct list_head queue_entry;
179
180         uint16_t cmd_id;
181
182         /* Can't use se_cmd when cleaning up expired cmds, because if
183            cmd has been completed then accessing se_cmd is off limits */
184         uint32_t dbi_cnt;
185         uint32_t dbi_bidi_cnt;
186         uint32_t dbi_cur;
187         uint32_t *dbi;
188
189         uint32_t data_len_bidi;
190
191         unsigned long deadline;
192
193 #define TCMU_CMD_BIT_EXPIRED 0
194 #define TCMU_CMD_BIT_KEEP_BUF 1
195         unsigned long flags;
196 };
197
198 struct tcmu_tmr {
199         struct list_head queue_entry;
200
201         uint8_t tmr_type;
202         uint32_t tmr_cmd_cnt;
203         int16_t tmr_cmd_ids[];
204 };
205
206 /*
207  * To avoid dead lock the mutex lock order should always be:
208  *
209  * mutex_lock(&root_udev_mutex);
210  * ...
211  * mutex_lock(&tcmu_dev->cmdr_lock);
212  * mutex_unlock(&tcmu_dev->cmdr_lock);
213  * ...
214  * mutex_unlock(&root_udev_mutex);
215  */
216 static DEFINE_MUTEX(root_udev_mutex);
217 static LIST_HEAD(root_udev);
218
219 static DEFINE_SPINLOCK(timed_out_udevs_lock);
220 static LIST_HEAD(timed_out_udevs);
221
222 static struct kmem_cache *tcmu_cmd_cache;
223
224 static atomic_t global_page_count = ATOMIC_INIT(0);
225 static struct delayed_work tcmu_unmap_work;
226 static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
227
228 static int tcmu_set_global_max_data_area(const char *str,
229                                          const struct kernel_param *kp)
230 {
231         int ret, max_area_mb;
232
233         ret = kstrtoint(str, 10, &max_area_mb);
234         if (ret)
235                 return -EINVAL;
236
237         if (max_area_mb <= 0) {
238                 pr_err("global_max_data_area must be larger than 0.\n");
239                 return -EINVAL;
240         }
241
242         tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
243         if (atomic_read(&global_page_count) > tcmu_global_max_pages)
244                 schedule_delayed_work(&tcmu_unmap_work, 0);
245         else
246                 cancel_delayed_work_sync(&tcmu_unmap_work);
247
248         return 0;
249 }
250
251 static int tcmu_get_global_max_data_area(char *buffer,
252                                          const struct kernel_param *kp)
253 {
254         return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
255 }
256
257 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
258         .set = tcmu_set_global_max_data_area,
259         .get = tcmu_get_global_max_data_area,
260 };
261
262 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
263                 S_IWUSR | S_IRUGO);
264 MODULE_PARM_DESC(global_max_data_area_mb,
265                  "Max MBs allowed to be allocated to all the tcmu device's "
266                  "data areas.");
267
268 static int tcmu_get_block_netlink(char *buffer,
269                                   const struct kernel_param *kp)
270 {
271         return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
272                        "blocked" : "unblocked");
273 }
274
275 static int tcmu_set_block_netlink(const char *str,
276                                   const struct kernel_param *kp)
277 {
278         int ret;
279         u8 val;
280
281         ret = kstrtou8(str, 0, &val);
282         if (ret < 0)
283                 return ret;
284
285         if (val > 1) {
286                 pr_err("Invalid block netlink value %u\n", val);
287                 return -EINVAL;
288         }
289
290         tcmu_netlink_blocked = val;
291         return 0;
292 }
293
294 static const struct kernel_param_ops tcmu_block_netlink_op = {
295         .set = tcmu_set_block_netlink,
296         .get = tcmu_get_block_netlink,
297 };
298
299 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
300 MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
301
302 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
303 {
304         struct tcmu_dev *udev = nl_cmd->udev;
305
306         if (!tcmu_netlink_blocked) {
307                 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
308                 return -EBUSY;
309         }
310
311         if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
312                 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
313                 nl_cmd->status = -EINTR;
314                 list_del(&nl_cmd->nl_list);
315                 complete(&nl_cmd->complete);
316         }
317         return 0;
318 }
319
320 static int tcmu_set_reset_netlink(const char *str,
321                                   const struct kernel_param *kp)
322 {
323         struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
324         int ret;
325         u8 val;
326
327         ret = kstrtou8(str, 0, &val);
328         if (ret < 0)
329                 return ret;
330
331         if (val != 1) {
332                 pr_err("Invalid reset netlink value %u\n", val);
333                 return -EINVAL;
334         }
335
336         mutex_lock(&tcmu_nl_cmd_mutex);
337         list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
338                 ret = tcmu_fail_netlink_cmd(nl_cmd);
339                 if (ret)
340                         break;
341         }
342         mutex_unlock(&tcmu_nl_cmd_mutex);
343
344         return ret;
345 }
346
347 static const struct kernel_param_ops tcmu_reset_netlink_op = {
348         .set = tcmu_set_reset_netlink,
349 };
350
351 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
352 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
353
354 /* multicast group */
355 enum tcmu_multicast_groups {
356         TCMU_MCGRP_CONFIG,
357 };
358
359 static const struct genl_multicast_group tcmu_mcgrps[] = {
360         [TCMU_MCGRP_CONFIG] = { .name = "config", },
361 };
362
363 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
364         [TCMU_ATTR_DEVICE]      = { .type = NLA_STRING },
365         [TCMU_ATTR_MINOR]       = { .type = NLA_U32 },
366         [TCMU_ATTR_CMD_STATUS]  = { .type = NLA_S32 },
367         [TCMU_ATTR_DEVICE_ID]   = { .type = NLA_U32 },
368         [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
369 };
370
371 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
372 {
373         struct tcmu_dev *udev = NULL;
374         struct tcmu_nl_cmd *nl_cmd;
375         int dev_id, rc, ret = 0;
376
377         if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
378             !info->attrs[TCMU_ATTR_DEVICE_ID]) {
379                 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
380                 return -EINVAL;
381         }
382
383         dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
384         rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
385
386         mutex_lock(&tcmu_nl_cmd_mutex);
387         list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
388                 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
389                         udev = nl_cmd->udev;
390                         break;
391                 }
392         }
393
394         if (!udev) {
395                 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
396                        completed_cmd, rc, dev_id);
397                 ret = -ENODEV;
398                 goto unlock;
399         }
400         list_del(&nl_cmd->nl_list);
401
402         pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
403                  udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
404                  nl_cmd->status);
405
406         if (nl_cmd->cmd != completed_cmd) {
407                 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
408                        udev->name, completed_cmd, nl_cmd->cmd);
409                 ret = -EINVAL;
410                 goto unlock;
411         }
412
413         nl_cmd->status = rc;
414         complete(&nl_cmd->complete);
415 unlock:
416         mutex_unlock(&tcmu_nl_cmd_mutex);
417         return ret;
418 }
419
420 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
421 {
422         return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
423 }
424
425 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
426 {
427         return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
428 }
429
430 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
431                                        struct genl_info *info)
432 {
433         return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
434 }
435
436 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
437 {
438         if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
439                 tcmu_kern_cmd_reply_supported  =
440                         nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
441                 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
442                        tcmu_kern_cmd_reply_supported);
443         }
444
445         return 0;
446 }
447
448 static const struct genl_small_ops tcmu_genl_ops[] = {
449         {
450                 .cmd    = TCMU_CMD_SET_FEATURES,
451                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
452                 .flags  = GENL_ADMIN_PERM,
453                 .doit   = tcmu_genl_set_features,
454         },
455         {
456                 .cmd    = TCMU_CMD_ADDED_DEVICE_DONE,
457                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
458                 .flags  = GENL_ADMIN_PERM,
459                 .doit   = tcmu_genl_add_dev_done,
460         },
461         {
462                 .cmd    = TCMU_CMD_REMOVED_DEVICE_DONE,
463                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
464                 .flags  = GENL_ADMIN_PERM,
465                 .doit   = tcmu_genl_rm_dev_done,
466         },
467         {
468                 .cmd    = TCMU_CMD_RECONFIG_DEVICE_DONE,
469                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
470                 .flags  = GENL_ADMIN_PERM,
471                 .doit   = tcmu_genl_reconfig_dev_done,
472         },
473 };
474
475 /* Our generic netlink family */
476 static struct genl_family tcmu_genl_family __ro_after_init = {
477         .module = THIS_MODULE,
478         .hdrsize = 0,
479         .name = "TCM-USER",
480         .version = 2,
481         .maxattr = TCMU_ATTR_MAX,
482         .policy = tcmu_attr_policy,
483         .mcgrps = tcmu_mcgrps,
484         .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
485         .netnsok = true,
486         .small_ops = tcmu_genl_ops,
487         .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
488 };
489
490 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
491 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
492 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
493 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
494
495 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
496 {
497         struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
498         uint32_t i;
499
500         for (i = 0; i < len; i++)
501                 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
502 }
503
504 static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
505                                        struct tcmu_cmd *tcmu_cmd,
506                                        int prev_dbi, int length, int *iov_cnt)
507 {
508         XA_STATE(xas, &udev->data_pages, 0);
509         struct page *page;
510         int i, cnt, dbi, dpi;
511         int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
512
513         dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
514         if (dbi == udev->dbi_thresh)
515                 return -1;
516
517         dpi = dbi * udev->data_pages_per_blk;
518         /* Count the number of already allocated pages */
519         xas_set(&xas, dpi);
520         rcu_read_lock();
521         for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
522                 cnt++;
523         rcu_read_unlock();
524
525         for (i = cnt; i < page_cnt; i++) {
526                 /* try to get new zeroed page from the mm */
527                 page = alloc_page(GFP_NOIO | __GFP_ZERO);
528                 if (!page)
529                         break;
530
531                 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
532                         __free_page(page);
533                         break;
534                 }
535         }
536         if (atomic_add_return(i - cnt, &global_page_count) >
537                               tcmu_global_max_pages)
538                 schedule_delayed_work(&tcmu_unmap_work, 0);
539
540         if (i && dbi > udev->dbi_max)
541                 udev->dbi_max = dbi;
542
543         set_bit(dbi, udev->data_bitmap);
544         tcmu_cmd_set_dbi(tcmu_cmd, dbi);
545
546         if (dbi != prev_dbi + 1)
547                 *iov_cnt += 1;
548
549         return i == page_cnt ? dbi : -1;
550 }
551
552 static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
553                                  struct tcmu_cmd *tcmu_cmd, int length)
554 {
555         /* start value of dbi + 1 must not be a valid dbi */
556         int dbi = -2;
557         int blk_data_len, iov_cnt = 0;
558         uint32_t blk_size = udev->data_blk_size;
559
560         for (; length > 0; length -= blk_size) {
561                 blk_data_len = min_t(uint32_t, length, blk_size);
562                 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
563                                            &iov_cnt);
564                 if (dbi < 0)
565                         return -1;
566         }
567         return iov_cnt;
568 }
569
570 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
571 {
572         kfree(tcmu_cmd->dbi);
573         kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
574 }
575
576 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
577 {
578         int i, len;
579         struct se_cmd *se_cmd = cmd->se_cmd;
580         uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
581
582         cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
583
584         if (se_cmd->se_cmd_flags & SCF_BIDI) {
585                 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
586                 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
587                         len += se_cmd->t_bidi_data_sg[i].length;
588                 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
589                 cmd->dbi_cnt += cmd->dbi_bidi_cnt;
590                 cmd->data_len_bidi = len;
591         }
592 }
593
594 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
595                             struct iovec **iov, int prev_dbi, int len)
596 {
597         /* Get the next dbi */
598         int dbi = tcmu_cmd_get_dbi(cmd);
599
600         /* Do not add more than udev->data_blk_size to iov */
601         len = min_t(int,  len, udev->data_blk_size);
602
603         /*
604          * The following code will gather and map the blocks to the same iovec
605          * when the blocks are all next to each other.
606          */
607         if (dbi != prev_dbi + 1) {
608                 /* dbi is not next to previous dbi, so start new iov */
609                 if (prev_dbi >= 0)
610                         (*iov)++;
611                 /* write offset relative to mb_addr */
612                 (*iov)->iov_base = (void __user *)
613                                    (udev->data_off + dbi * udev->data_blk_size);
614         }
615         (*iov)->iov_len += len;
616
617         return dbi;
618 }
619
620 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
621                             struct iovec **iov, int data_length)
622 {
623         /* start value of dbi + 1 must not be a valid dbi */
624         int dbi = -2;
625
626         /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
627         for (; data_length > 0; data_length -= udev->data_blk_size)
628                 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
629 }
630
631 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
632 {
633         struct se_device *se_dev = se_cmd->se_dev;
634         struct tcmu_dev *udev = TCMU_DEV(se_dev);
635         struct tcmu_cmd *tcmu_cmd;
636
637         tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
638         if (!tcmu_cmd)
639                 return NULL;
640
641         INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
642         tcmu_cmd->se_cmd = se_cmd;
643         tcmu_cmd->tcmu_dev = udev;
644
645         tcmu_cmd_set_block_cnts(tcmu_cmd);
646         tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
647                                 GFP_NOIO);
648         if (!tcmu_cmd->dbi) {
649                 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
650                 return NULL;
651         }
652
653         return tcmu_cmd;
654 }
655
656 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
657 {
658         unsigned long offset = offset_in_page(vaddr);
659         void *start = vaddr - offset;
660
661         size = round_up(size+offset, PAGE_SIZE);
662
663         while (size) {
664                 flush_dcache_page(vmalloc_to_page(start));
665                 start += PAGE_SIZE;
666                 size -= PAGE_SIZE;
667         }
668 }
669
670 /*
671  * Some ring helper functions. We don't assume size is a power of 2 so
672  * we can't use circ_buf.h.
673  */
674 static inline size_t spc_used(size_t head, size_t tail, size_t size)
675 {
676         int diff = head - tail;
677
678         if (diff >= 0)
679                 return diff;
680         else
681                 return size + diff;
682 }
683
684 static inline size_t spc_free(size_t head, size_t tail, size_t size)
685 {
686         /* Keep 1 byte unused or we can't tell full from empty */
687         return (size - spc_used(head, tail, size) - 1);
688 }
689
690 static inline size_t head_to_end(size_t head, size_t size)
691 {
692         return size - head;
693 }
694
695 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
696
697 #define TCMU_SG_TO_DATA_AREA 1
698 #define TCMU_DATA_AREA_TO_SG 2
699
700 static inline void tcmu_copy_data(struct tcmu_dev *udev,
701                                   struct tcmu_cmd *tcmu_cmd, uint32_t direction,
702                                   struct scatterlist *sg, unsigned int sg_nents,
703                                   struct iovec **iov, size_t data_len)
704 {
705         /* start value of dbi + 1 must not be a valid dbi */
706         int dbi = -2;
707         size_t page_remaining, cp_len;
708         int page_cnt, page_inx, dpi;
709         struct sg_mapping_iter sg_iter;
710         unsigned int sg_flags;
711         struct page *page;
712         void *data_page_start, *data_addr;
713
714         if (direction == TCMU_SG_TO_DATA_AREA)
715                 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
716         else
717                 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
718         sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
719
720         while (data_len) {
721                 if (direction == TCMU_SG_TO_DATA_AREA)
722                         dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
723                                                data_len);
724                 else
725                         dbi = tcmu_cmd_get_dbi(tcmu_cmd);
726
727                 page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
728                 if (page_cnt > udev->data_pages_per_blk)
729                         page_cnt = udev->data_pages_per_blk;
730
731                 dpi = dbi * udev->data_pages_per_blk;
732                 for (page_inx = 0; page_inx < page_cnt && data_len;
733                      page_inx++, dpi++) {
734                         page = xa_load(&udev->data_pages, dpi);
735
736                         if (direction == TCMU_DATA_AREA_TO_SG)
737                                 flush_dcache_page(page);
738                         data_page_start = kmap_atomic(page);
739                         page_remaining = PAGE_SIZE;
740
741                         while (page_remaining && data_len) {
742                                 if (!sg_miter_next(&sg_iter)) {
743                                         /* set length to 0 to abort outer loop */
744                                         data_len = 0;
745                                         pr_debug("%s: aborting data copy due to exhausted sg_list\n",
746                                                  __func__);
747                                         break;
748                                 }
749                                 cp_len = min3(sg_iter.length, page_remaining,
750                                               data_len);
751
752                                 data_addr = data_page_start +
753                                             PAGE_SIZE - page_remaining;
754                                 if (direction == TCMU_SG_TO_DATA_AREA)
755                                         memcpy(data_addr, sg_iter.addr, cp_len);
756                                 else
757                                         memcpy(sg_iter.addr, data_addr, cp_len);
758
759                                 data_len -= cp_len;
760                                 page_remaining -= cp_len;
761                                 sg_iter.consumed = cp_len;
762                         }
763                         sg_miter_stop(&sg_iter);
764
765                         kunmap_atomic(data_page_start);
766                         if (direction == TCMU_SG_TO_DATA_AREA)
767                                 flush_dcache_page(page);
768                 }
769         }
770 }
771
772 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
773                               struct iovec **iov)
774 {
775         struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
776
777         tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
778                        se_cmd->t_data_nents, iov, se_cmd->data_length);
779 }
780
781 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
782                              bool bidi, uint32_t read_len)
783 {
784         struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
785         struct scatterlist *data_sg;
786         unsigned int data_nents;
787
788         if (!bidi) {
789                 data_sg = se_cmd->t_data_sg;
790                 data_nents = se_cmd->t_data_nents;
791         } else {
792                 /*
793                  * For bidi case, the first count blocks are for Data-Out
794                  * buffer blocks, and before gathering the Data-In buffer
795                  * the Data-Out buffer blocks should be skipped.
796                  */
797                 tcmu_cmd_set_dbi_cur(tcmu_cmd,
798                                      tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
799
800                 data_sg = se_cmd->t_bidi_data_sg;
801                 data_nents = se_cmd->t_bidi_data_nents;
802         }
803
804         tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
805                        data_nents, NULL, read_len);
806 }
807
808 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
809 {
810         return thresh - bitmap_weight(bitmap, thresh);
811 }
812
813 /*
814  * We can't queue a command until we have space available on the cmd ring.
815  *
816  * Called with ring lock held.
817  */
818 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
819 {
820         struct tcmu_mailbox *mb = udev->mb_addr;
821         size_t space, cmd_needed;
822         u32 cmd_head;
823
824         tcmu_flush_dcache_range(mb, sizeof(*mb));
825
826         cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
827
828         /*
829          * If cmd end-of-ring space is too small then we need space for a NOP plus
830          * original cmd - cmds are internally contiguous.
831          */
832         if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
833                 cmd_needed = cmd_size;
834         else
835                 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
836
837         space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
838         if (space < cmd_needed) {
839                 pr_debug("no cmd space: %u %u %u\n", cmd_head,
840                        udev->cmdr_last_cleaned, udev->cmdr_size);
841                 return false;
842         }
843         return true;
844 }
845
846 /*
847  * We have to allocate data buffers before we can queue a command.
848  * Returns -1 on error (not enough space) or number of needed iovs on success
849  *
850  * Called with ring lock held.
851  */
852 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
853                                   int *iov_bidi_cnt)
854 {
855         int space, iov_cnt = 0, ret = 0;
856
857         if (!cmd->dbi_cnt)
858                 goto wr_iov_cnts;
859
860         /* try to check and get the data blocks as needed */
861         space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
862         if (space < cmd->dbi_cnt) {
863                 unsigned long blocks_left =
864                                 (udev->max_blocks - udev->dbi_thresh) + space;
865
866                 if (blocks_left < cmd->dbi_cnt) {
867                         pr_debug("no data space: only %lu available, but ask for %u\n",
868                                         blocks_left * udev->data_blk_size,
869                                         cmd->dbi_cnt * udev->data_blk_size);
870                         return -1;
871                 }
872
873                 udev->dbi_thresh += cmd->dbi_cnt;
874                 if (udev->dbi_thresh > udev->max_blocks)
875                         udev->dbi_thresh = udev->max_blocks;
876         }
877
878         iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
879         if (iov_cnt < 0)
880                 return -1;
881
882         if (cmd->dbi_bidi_cnt) {
883                 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
884                 if (ret < 0)
885                         return -1;
886         }
887 wr_iov_cnts:
888         *iov_bidi_cnt = ret;
889         return iov_cnt + ret;
890 }
891
892 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
893 {
894         return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
895                         sizeof(struct tcmu_cmd_entry));
896 }
897
898 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
899                                            size_t base_command_size)
900 {
901         struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
902         size_t command_size;
903
904         command_size = base_command_size +
905                 round_up(scsi_command_size(se_cmd->t_task_cdb),
906                                 TCMU_OP_ALIGN_SIZE);
907
908         WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
909
910         return command_size;
911 }
912
913 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
914                                  struct timer_list *timer)
915 {
916         if (!tmo)
917                 return;
918
919         tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
920         if (!timer_pending(timer))
921                 mod_timer(timer, tcmu_cmd->deadline);
922
923         pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
924                  tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
925 }
926
927 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
928 {
929         struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
930         unsigned int tmo;
931
932         /*
933          * For backwards compat if qfull_time_out is not set use
934          * cmd_time_out and if that's not set use the default time out.
935          */
936         if (!udev->qfull_time_out)
937                 return -ETIMEDOUT;
938         else if (udev->qfull_time_out > 0)
939                 tmo = udev->qfull_time_out;
940         else if (udev->cmd_time_out)
941                 tmo = udev->cmd_time_out;
942         else
943                 tmo = TCMU_TIME_OUT;
944
945         tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
946
947         list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
948         pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
949                  tcmu_cmd, udev->name);
950         return 0;
951 }
952
953 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
954 {
955         struct tcmu_cmd_entry_hdr *hdr;
956         struct tcmu_mailbox *mb = udev->mb_addr;
957         uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
958
959         /* Insert a PAD if end-of-ring space is too small */
960         if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
961                 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
962
963                 hdr = udev->cmdr + cmd_head;
964                 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
965                 tcmu_hdr_set_len(&hdr->len_op, pad_size);
966                 hdr->cmd_id = 0; /* not used for PAD */
967                 hdr->kflags = 0;
968                 hdr->uflags = 0;
969                 tcmu_flush_dcache_range(hdr, sizeof(*hdr));
970
971                 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
972                 tcmu_flush_dcache_range(mb, sizeof(*mb));
973
974                 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
975                 WARN_ON(cmd_head != 0);
976         }
977
978         return cmd_head;
979 }
980
981 static void tcmu_unplug_device(struct se_dev_plug *se_plug)
982 {
983         struct se_device *se_dev = se_plug->se_dev;
984         struct tcmu_dev *udev = TCMU_DEV(se_dev);
985
986         clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
987         uio_event_notify(&udev->uio_info);
988 }
989
990 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
991 {
992         struct tcmu_dev *udev = TCMU_DEV(se_dev);
993
994         if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
995                 return &udev->se_plug;
996
997         return NULL;
998 }
999
1000 /**
1001  * queue_cmd_ring - queue cmd to ring or internally
1002  * @tcmu_cmd: cmd to queue
1003  * @scsi_err: TCM error code if failure (-1) returned.
1004  *
1005  * Returns:
1006  * -1 we cannot queue internally or to the ring.
1007  *  0 success
1008  *  1 internally queued to wait for ring memory to free.
1009  */
1010 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1011 {
1012         struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
1013         struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
1014         size_t base_command_size, command_size;
1015         struct tcmu_mailbox *mb = udev->mb_addr;
1016         struct tcmu_cmd_entry *entry;
1017         struct iovec *iov;
1018         int iov_cnt, iov_bidi_cnt;
1019         uint32_t cmd_id, cmd_head;
1020         uint64_t cdb_off;
1021         uint32_t blk_size = udev->data_blk_size;
1022         /* size of data buffer needed */
1023         size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
1024
1025         *scsi_err = TCM_NO_SENSE;
1026
1027         if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
1028                 *scsi_err = TCM_LUN_BUSY;
1029                 return -1;
1030         }
1031
1032         if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1033                 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1034                 return -1;
1035         }
1036
1037         if (!list_empty(&udev->qfull_queue))
1038                 goto queue;
1039
1040         if (data_length > (size_t)udev->max_blocks * blk_size) {
1041                 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
1042                         data_length, (size_t)udev->max_blocks * blk_size);
1043                 *scsi_err = TCM_INVALID_CDB_FIELD;
1044                 return -1;
1045         }
1046
1047         iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
1048         if (iov_cnt < 0)
1049                 goto free_and_queue;
1050
1051         /*
1052          * Must be a certain minimum size for response sense info, but
1053          * also may be larger if the iov array is large.
1054          */
1055         base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
1056         command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1057
1058         if (command_size > (udev->cmdr_size / 2)) {
1059                 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
1060                         command_size, udev->cmdr_size);
1061                 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1062                 *scsi_err = TCM_INVALID_CDB_FIELD;
1063                 return -1;
1064         }
1065
1066         if (!is_ring_space_avail(udev, command_size))
1067                 /*
1068                  * Don't leave commands partially setup because the unmap
1069                  * thread might need the blocks to make forward progress.
1070                  */
1071                 goto free_and_queue;
1072
1073         if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
1074                      GFP_NOWAIT) < 0) {
1075                 pr_err("tcmu: Could not allocate cmd id.\n");
1076
1077                 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1078                 *scsi_err = TCM_OUT_OF_RESOURCES;
1079                 return -1;
1080         }
1081         tcmu_cmd->cmd_id = cmd_id;
1082
1083         pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1084                  tcmu_cmd, udev->name);
1085
1086         cmd_head = ring_insert_padding(udev, command_size);
1087
1088         entry = udev->cmdr + cmd_head;
1089         memset(entry, 0, command_size);
1090         tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1091
1092         /* prepare iov list and copy data to data area if necessary */
1093         tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1094         iov = &entry->req.iov[0];
1095
1096         if (se_cmd->data_direction == DMA_TO_DEVICE ||
1097             se_cmd->se_cmd_flags & SCF_BIDI)
1098                 scatter_data_area(udev, tcmu_cmd, &iov);
1099         else
1100                 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
1101
1102         entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
1103
1104         /* Handle BIDI commands */
1105         if (se_cmd->se_cmd_flags & SCF_BIDI) {
1106                 iov++;
1107                 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
1108                 entry->req.iov_bidi_cnt = iov_bidi_cnt;
1109         }
1110
1111         tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1112
1113         entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1114
1115         tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1116
1117         /* All offsets relative to mb_addr, not start of entry! */
1118         cdb_off = CMDR_OFF + cmd_head + base_command_size;
1119         memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1120         entry->req.cdb_off = cdb_off;
1121         tcmu_flush_dcache_range(entry, command_size);
1122
1123         UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1124         tcmu_flush_dcache_range(mb, sizeof(*mb));
1125
1126         list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1127
1128         if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
1129                 uio_event_notify(&udev->uio_info);
1130
1131         return 0;
1132
1133 free_and_queue:
1134         tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1135         tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1136
1137 queue:
1138         if (add_to_qfull_queue(tcmu_cmd)) {
1139                 *scsi_err = TCM_OUT_OF_RESOURCES;
1140                 return -1;
1141         }
1142
1143         return 1;
1144 }
1145
1146 /**
1147  * queue_tmr_ring - queue tmr info to ring or internally
1148  * @udev: related tcmu_dev
1149  * @tmr: tcmu_tmr containing tmr info to queue
1150  *
1151  * Returns:
1152  *  0 success
1153  *  1 internally queued to wait for ring memory to free.
1154  */
1155 static int
1156 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
1157 {
1158         struct tcmu_tmr_entry *entry;
1159         int cmd_size;
1160         int id_list_sz;
1161         struct tcmu_mailbox *mb = udev->mb_addr;
1162         uint32_t cmd_head;
1163
1164         if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
1165                 goto out_free;
1166
1167         id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
1168         cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
1169
1170         if (!list_empty(&udev->tmr_queue) ||
1171             !is_ring_space_avail(udev, cmd_size)) {
1172                 list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
1173                 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
1174                          tmr, udev->name);
1175                 return 1;
1176         }
1177
1178         cmd_head = ring_insert_padding(udev, cmd_size);
1179
1180         entry = udev->cmdr + cmd_head;
1181         memset(entry, 0, cmd_size);
1182         tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
1183         tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
1184         entry->tmr_type = tmr->tmr_type;
1185         entry->cmd_cnt = tmr->tmr_cmd_cnt;
1186         memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
1187         tcmu_flush_dcache_range(entry, cmd_size);
1188
1189         UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
1190         tcmu_flush_dcache_range(mb, sizeof(*mb));
1191
1192         uio_event_notify(&udev->uio_info);
1193
1194 out_free:
1195         kfree(tmr);
1196
1197         return 0;
1198 }
1199
1200 static sense_reason_t
1201 tcmu_queue_cmd(struct se_cmd *se_cmd)
1202 {
1203         struct se_device *se_dev = se_cmd->se_dev;
1204         struct tcmu_dev *udev = TCMU_DEV(se_dev);
1205         struct tcmu_cmd *tcmu_cmd;
1206         sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
1207         int ret = -1;
1208
1209         tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1210         if (!tcmu_cmd)
1211                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1212
1213         mutex_lock(&udev->cmdr_lock);
1214         if (!(se_cmd->transport_state & CMD_T_ABORTED))
1215                 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1216         if (ret < 0)
1217                 tcmu_free_cmd(tcmu_cmd);
1218         else
1219                 se_cmd->priv = tcmu_cmd;
1220         mutex_unlock(&udev->cmdr_lock);
1221         return scsi_ret;
1222 }
1223
1224 static void tcmu_set_next_deadline(struct list_head *queue,
1225                                    struct timer_list *timer)
1226 {
1227         struct tcmu_cmd *cmd;
1228
1229         if (!list_empty(queue)) {
1230                 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
1231                 mod_timer(timer, cmd->deadline);
1232         } else
1233                 del_timer(timer);
1234 }
1235
1236 static int
1237 tcmu_tmr_type(enum tcm_tmreq_table tmf)
1238 {
1239         switch (tmf) {
1240         case TMR_ABORT_TASK:            return TCMU_TMR_ABORT_TASK;
1241         case TMR_ABORT_TASK_SET:        return TCMU_TMR_ABORT_TASK_SET;
1242         case TMR_CLEAR_ACA:             return TCMU_TMR_CLEAR_ACA;
1243         case TMR_CLEAR_TASK_SET:        return TCMU_TMR_CLEAR_TASK_SET;
1244         case TMR_LUN_RESET:             return TCMU_TMR_LUN_RESET;
1245         case TMR_TARGET_WARM_RESET:     return TCMU_TMR_TARGET_WARM_RESET;
1246         case TMR_TARGET_COLD_RESET:     return TCMU_TMR_TARGET_COLD_RESET;
1247         case TMR_LUN_RESET_PRO:         return TCMU_TMR_LUN_RESET_PRO;
1248         default:                        return TCMU_TMR_UNKNOWN;
1249         }
1250 }
1251
1252 static void
1253 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
1254                 struct list_head *cmd_list)
1255 {
1256         int i = 0, cmd_cnt = 0;
1257         bool unqueued = false;
1258         struct tcmu_cmd *cmd;
1259         struct se_cmd *se_cmd;
1260         struct tcmu_tmr *tmr;
1261         struct tcmu_dev *udev = TCMU_DEV(se_dev);
1262
1263         mutex_lock(&udev->cmdr_lock);
1264
1265         /* First we check for aborted commands in qfull_queue */
1266         list_for_each_entry(se_cmd, cmd_list, state_list) {
1267                 i++;
1268                 if (!se_cmd->priv)
1269                         continue;
1270                 cmd = se_cmd->priv;
1271                 /* Commands on qfull queue have no id yet */
1272                 if (cmd->cmd_id) {
1273                         cmd_cnt++;
1274                         continue;
1275                 }
1276                 pr_debug("Removing aborted command %p from queue on dev %s.\n",
1277                          cmd, udev->name);
1278
1279                 list_del_init(&cmd->queue_entry);
1280                 tcmu_free_cmd(cmd);
1281                 se_cmd->priv = NULL;
1282                 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
1283                 unqueued = true;
1284         }
1285         if (unqueued)
1286                 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1287
1288         if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
1289                 goto unlock;
1290
1291         pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
1292                  tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
1293
1294         tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO);
1295         if (!tmr)
1296                 goto unlock;
1297
1298         tmr->tmr_type = tcmu_tmr_type(tmf);
1299         tmr->tmr_cmd_cnt = cmd_cnt;
1300
1301         if (cmd_cnt != 0) {
1302                 cmd_cnt = 0;
1303                 list_for_each_entry(se_cmd, cmd_list, state_list) {
1304                         if (!se_cmd->priv)
1305                                 continue;
1306                         cmd = se_cmd->priv;
1307                         if (cmd->cmd_id)
1308                                 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
1309                 }
1310         }
1311
1312         queue_tmr_ring(udev, tmr);
1313
1314 unlock:
1315         mutex_unlock(&udev->cmdr_lock);
1316 }
1317
1318 static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
1319                                    struct tcmu_cmd_entry *entry, bool keep_buf)
1320 {
1321         struct se_cmd *se_cmd = cmd->se_cmd;
1322         struct tcmu_dev *udev = cmd->tcmu_dev;
1323         bool read_len_valid = false;
1324         bool ret = true;
1325         uint32_t read_len;
1326
1327         /*
1328          * cmd has been completed already from timeout, just reclaim
1329          * data area space and free cmd
1330          */
1331         if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1332                 WARN_ON_ONCE(se_cmd);
1333                 goto out;
1334         }
1335         if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
1336                 pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
1337                        entry->hdr.cmd_id);
1338                 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1339                 ret = false;
1340                 goto out;
1341         }
1342
1343         list_del_init(&cmd->queue_entry);
1344
1345         tcmu_cmd_reset_dbi_cur(cmd);
1346
1347         if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1348                 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1349                         cmd->se_cmd);
1350                 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1351                 goto done;
1352         }
1353
1354         read_len = se_cmd->data_length;
1355         if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1356             (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1357                 read_len_valid = true;
1358                 if (entry->rsp.read_len < read_len)
1359                         read_len = entry->rsp.read_len;
1360         }
1361
1362         if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1363                 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1364                 if (!read_len_valid )
1365                         goto done;
1366                 else
1367                         se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1368         }
1369         if (se_cmd->se_cmd_flags & SCF_BIDI) {
1370                 /* Get Data-In buffer before clean up */
1371                 gather_data_area(udev, cmd, true, read_len);
1372         } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1373                 gather_data_area(udev, cmd, false, read_len);
1374         } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1375                 /* TODO: */
1376         } else if (se_cmd->data_direction != DMA_NONE) {
1377                 pr_warn("TCMU: data direction was %d!\n",
1378                         se_cmd->data_direction);
1379         }
1380
1381 done:
1382         se_cmd->priv = NULL;
1383         if (read_len_valid) {
1384                 pr_debug("read_len = %d\n", read_len);
1385                 target_complete_cmd_with_length(cmd->se_cmd,
1386                                         entry->rsp.scsi_status, read_len);
1387         } else
1388                 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1389
1390 out:
1391         if (!keep_buf) {
1392                 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1393                 tcmu_free_cmd(cmd);
1394         } else {
1395                 /*
1396                  * Keep this command after completion, since userspace still
1397                  * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
1398                  * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
1399                  * a second completion later.
1400                  * Userspace can free the buffer later by writing the cmd_id
1401                  * to new action attribute free_kept_buf.
1402                  */
1403                 clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1404                 set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
1405         }
1406         return ret;
1407 }
1408
1409 static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
1410 {
1411         struct tcmu_tmr *tmr, *tmp;
1412         LIST_HEAD(tmrs);
1413
1414         if (list_empty(&udev->tmr_queue))
1415                 return 1;
1416
1417         pr_debug("running %s's tmr queue\n", udev->name);
1418
1419         list_splice_init(&udev->tmr_queue, &tmrs);
1420
1421         list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
1422                 list_del_init(&tmr->queue_entry);
1423
1424                 pr_debug("removing tmr %p on dev %s from queue\n",
1425                          tmr, udev->name);
1426
1427                 if (queue_tmr_ring(udev, tmr)) {
1428                         pr_debug("ran out of space during tmr queue run\n");
1429                         /*
1430                          * tmr was requeued, so just put all tmrs back in
1431                          * the queue
1432                          */
1433                         list_splice_tail(&tmrs, &udev->tmr_queue);
1434                         return 0;
1435                 }
1436         }
1437
1438         return 1;
1439 }
1440
1441 static bool tcmu_handle_completions(struct tcmu_dev *udev)
1442 {
1443         struct tcmu_mailbox *mb;
1444         struct tcmu_cmd *cmd;
1445         bool free_space = false;
1446
1447         if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1448                 pr_err("ring broken, not handling completions\n");
1449                 return false;
1450         }
1451
1452         mb = udev->mb_addr;
1453         tcmu_flush_dcache_range(mb, sizeof(*mb));
1454
1455         while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1456
1457                 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
1458                 bool keep_buf;
1459
1460                 /*
1461                  * Flush max. up to end of cmd ring since current entry might
1462                  * be a padding that is shorter than sizeof(*entry)
1463                  */
1464                 size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1465                                                udev->cmdr_size);
1466                 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1467                                         ring_left : sizeof(*entry));
1468
1469                 free_space = true;
1470
1471                 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
1472                     tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
1473                         UPDATE_HEAD(udev->cmdr_last_cleaned,
1474                                     tcmu_hdr_get_len(entry->hdr.len_op),
1475                                     udev->cmdr_size);
1476                         continue;
1477                 }
1478                 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1479
1480                 keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
1481                 if (keep_buf)
1482                         cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
1483                 else
1484                         cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
1485                 if (!cmd) {
1486                         pr_err("cmd_id %u not found, ring is broken\n",
1487                                entry->hdr.cmd_id);
1488                         set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1489                         return false;
1490                 }
1491
1492                 if (!tcmu_handle_completion(cmd, entry, keep_buf))
1493                         break;
1494
1495                 UPDATE_HEAD(udev->cmdr_last_cleaned,
1496                             tcmu_hdr_get_len(entry->hdr.len_op),
1497                             udev->cmdr_size);
1498         }
1499         if (free_space)
1500                 free_space = tcmu_run_tmr_queue(udev);
1501
1502         if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
1503             xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
1504                 /*
1505                  * Allocated blocks exceeded global block limit, currently no
1506                  * more pending or waiting commands so try to reclaim blocks.
1507                  */
1508                 schedule_delayed_work(&tcmu_unmap_work, 0);
1509         }
1510         if (udev->cmd_time_out)
1511                 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1512
1513         return free_space;
1514 }
1515
1516 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1517 {
1518         struct se_cmd *se_cmd;
1519
1520         if (!time_after_eq(jiffies, cmd->deadline))
1521                 return;
1522
1523         set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1524         list_del_init(&cmd->queue_entry);
1525         se_cmd = cmd->se_cmd;
1526         se_cmd->priv = NULL;
1527         cmd->se_cmd = NULL;
1528
1529         pr_debug("Timing out inflight cmd %u on dev %s.\n",
1530                  cmd->cmd_id, cmd->tcmu_dev->name);
1531
1532         target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1533 }
1534
1535 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1536 {
1537         struct se_cmd *se_cmd;
1538
1539         if (!time_after_eq(jiffies, cmd->deadline))
1540                 return;
1541
1542         pr_debug("Timing out queued cmd %p on dev %s.\n",
1543                   cmd, cmd->tcmu_dev->name);
1544
1545         list_del_init(&cmd->queue_entry);
1546         se_cmd = cmd->se_cmd;
1547         tcmu_free_cmd(cmd);
1548
1549         se_cmd->priv = NULL;
1550         target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1551 }
1552
1553 static void tcmu_device_timedout(struct tcmu_dev *udev)
1554 {
1555         spin_lock(&timed_out_udevs_lock);
1556         if (list_empty(&udev->timedout_entry))
1557                 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1558         spin_unlock(&timed_out_udevs_lock);
1559
1560         schedule_delayed_work(&tcmu_unmap_work, 0);
1561 }
1562
1563 static void tcmu_cmd_timedout(struct timer_list *t)
1564 {
1565         struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1566
1567         pr_debug("%s cmd timeout has expired\n", udev->name);
1568         tcmu_device_timedout(udev);
1569 }
1570
1571 static void tcmu_qfull_timedout(struct timer_list *t)
1572 {
1573         struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1574
1575         pr_debug("%s qfull timeout has expired\n", udev->name);
1576         tcmu_device_timedout(udev);
1577 }
1578
1579 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1580 {
1581         struct tcmu_hba *tcmu_hba;
1582
1583         tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1584         if (!tcmu_hba)
1585                 return -ENOMEM;
1586
1587         tcmu_hba->host_id = host_id;
1588         hba->hba_ptr = tcmu_hba;
1589
1590         return 0;
1591 }
1592
1593 static void tcmu_detach_hba(struct se_hba *hba)
1594 {
1595         kfree(hba->hba_ptr);
1596         hba->hba_ptr = NULL;
1597 }
1598
1599 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1600 {
1601         struct tcmu_dev *udev;
1602
1603         udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1604         if (!udev)
1605                 return NULL;
1606         kref_init(&udev->kref);
1607
1608         udev->name = kstrdup(name, GFP_KERNEL);
1609         if (!udev->name) {
1610                 kfree(udev);
1611                 return NULL;
1612         }
1613
1614         udev->hba = hba;
1615         udev->cmd_time_out = TCMU_TIME_OUT;
1616         udev->qfull_time_out = -1;
1617
1618         udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
1619         udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
1620         udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
1621
1622         mutex_init(&udev->cmdr_lock);
1623
1624         INIT_LIST_HEAD(&udev->node);
1625         INIT_LIST_HEAD(&udev->timedout_entry);
1626         INIT_LIST_HEAD(&udev->qfull_queue);
1627         INIT_LIST_HEAD(&udev->tmr_queue);
1628         INIT_LIST_HEAD(&udev->inflight_queue);
1629         xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
1630
1631         timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1632         timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1633
1634         xa_init(&udev->data_pages);
1635
1636         return &udev->se_dev;
1637 }
1638
1639 static void tcmu_dev_call_rcu(struct rcu_head *p)
1640 {
1641         struct se_device *dev = container_of(p, struct se_device, rcu_head);
1642         struct tcmu_dev *udev = TCMU_DEV(dev);
1643
1644         kfree(udev->uio_info.name);
1645         kfree(udev->name);
1646         kfree(udev);
1647 }
1648
1649 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1650 {
1651         if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
1652             test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
1653                 kmem_cache_free(tcmu_cmd_cache, cmd);
1654                 return 0;
1655         }
1656         return -EINVAL;
1657 }
1658
1659 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
1660                                 unsigned long last)
1661 {
1662         XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
1663         struct page *page;
1664         u32 pages_freed = 0;
1665
1666         xas_lock(&xas);
1667         xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
1668                 xas_store(&xas, NULL);
1669                 __free_page(page);
1670                 pages_freed++;
1671         }
1672         xas_unlock(&xas);
1673
1674         atomic_sub(pages_freed, &global_page_count);
1675
1676         return pages_freed;
1677 }
1678
1679 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
1680 {
1681         struct tcmu_tmr *tmr, *tmp;
1682
1683         list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
1684                 list_del_init(&tmr->queue_entry);
1685                 kfree(tmr);
1686         }
1687 }
1688
1689 static void tcmu_dev_kref_release(struct kref *kref)
1690 {
1691         struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1692         struct se_device *dev = &udev->se_dev;
1693         struct tcmu_cmd *cmd;
1694         bool all_expired = true;
1695         unsigned long i;
1696
1697         vfree(udev->mb_addr);
1698         udev->mb_addr = NULL;
1699
1700         spin_lock_bh(&timed_out_udevs_lock);
1701         if (!list_empty(&udev->timedout_entry))
1702                 list_del(&udev->timedout_entry);
1703         spin_unlock_bh(&timed_out_udevs_lock);
1704
1705         /* Upper layer should drain all requests before calling this */
1706         mutex_lock(&udev->cmdr_lock);
1707         xa_for_each(&udev->commands, i, cmd) {
1708                 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1709                         all_expired = false;
1710         }
1711         /* There can be left over TMR cmds. Remove them. */
1712         tcmu_remove_all_queued_tmr(udev);
1713         if (!list_empty(&udev->qfull_queue))
1714                 all_expired = false;
1715         xa_destroy(&udev->commands);
1716         WARN_ON(!all_expired);
1717
1718         tcmu_blocks_release(udev, 0, udev->dbi_max);
1719         bitmap_free(udev->data_bitmap);
1720         mutex_unlock(&udev->cmdr_lock);
1721
1722         pr_debug("dev_kref_release\n");
1723
1724         call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1725 }
1726
1727 static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1728 {
1729         struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1730         LIST_HEAD(cmds);
1731         sense_reason_t scsi_ret;
1732         int ret;
1733
1734         if (list_empty(&udev->qfull_queue))
1735                 return;
1736
1737         pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1738
1739         list_splice_init(&udev->qfull_queue, &cmds);
1740
1741         list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1742                 list_del_init(&tcmu_cmd->queue_entry);
1743
1744                 pr_debug("removing cmd %p on dev %s from queue\n",
1745                          tcmu_cmd, udev->name);
1746
1747                 if (fail) {
1748                         /*
1749                          * We were not able to even start the command, so
1750                          * fail with busy to allow a retry in case runner
1751                          * was only temporarily down. If the device is being
1752                          * removed then LIO core will do the right thing and
1753                          * fail the retry.
1754                          */
1755                         tcmu_cmd->se_cmd->priv = NULL;
1756                         target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1757                         tcmu_free_cmd(tcmu_cmd);
1758                         continue;
1759                 }
1760
1761                 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1762                 if (ret < 0) {
1763                         pr_debug("cmd %p on dev %s failed with %u\n",
1764                                  tcmu_cmd, udev->name, scsi_ret);
1765                         /*
1766                          * Ignore scsi_ret for now. target_complete_cmd
1767                          * drops it.
1768                          */
1769                         tcmu_cmd->se_cmd->priv = NULL;
1770                         target_complete_cmd(tcmu_cmd->se_cmd,
1771                                             SAM_STAT_CHECK_CONDITION);
1772                         tcmu_free_cmd(tcmu_cmd);
1773                 } else if (ret > 0) {
1774                         pr_debug("ran out of space during cmdr queue run\n");
1775                         /*
1776                          * cmd was requeued, so just put all cmds back in
1777                          * the queue
1778                          */
1779                         list_splice_tail(&cmds, &udev->qfull_queue);
1780                         break;
1781                 }
1782         }
1783
1784         tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1785 }
1786
1787 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1788 {
1789         struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1790
1791         mutex_lock(&udev->cmdr_lock);
1792         if (tcmu_handle_completions(udev))
1793                 run_qfull_queue(udev, false);
1794         mutex_unlock(&udev->cmdr_lock);
1795
1796         return 0;
1797 }
1798
1799 /*
1800  * mmap code from uio.c. Copied here because we want to hook mmap()
1801  * and this stuff must come along.
1802  */
1803 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1804 {
1805         struct tcmu_dev *udev = vma->vm_private_data;
1806         struct uio_info *info = &udev->uio_info;
1807
1808         if (vma->vm_pgoff < MAX_UIO_MAPS) {
1809                 if (info->mem[vma->vm_pgoff].size == 0)
1810                         return -1;
1811                 return (int)vma->vm_pgoff;
1812         }
1813         return -1;
1814 }
1815
1816 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
1817 {
1818         struct page *page;
1819
1820         mutex_lock(&udev->cmdr_lock);
1821         page = xa_load(&udev->data_pages, dpi);
1822         if (likely(page)) {
1823                 mutex_unlock(&udev->cmdr_lock);
1824                 return page;
1825         }
1826
1827         /*
1828          * Userspace messed up and passed in a address not in the
1829          * data iov passed to it.
1830          */
1831         pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
1832                dpi, udev->name);
1833         mutex_unlock(&udev->cmdr_lock);
1834
1835         return NULL;
1836 }
1837
1838 static void tcmu_vma_open(struct vm_area_struct *vma)
1839 {
1840         struct tcmu_dev *udev = vma->vm_private_data;
1841
1842         pr_debug("vma_open\n");
1843
1844         kref_get(&udev->kref);
1845 }
1846
1847 static void tcmu_vma_close(struct vm_area_struct *vma)
1848 {
1849         struct tcmu_dev *udev = vma->vm_private_data;
1850
1851         pr_debug("vma_close\n");
1852
1853         /* release ref from tcmu_vma_open */
1854         kref_put(&udev->kref, tcmu_dev_kref_release);
1855 }
1856
1857 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1858 {
1859         struct tcmu_dev *udev = vmf->vma->vm_private_data;
1860         struct uio_info *info = &udev->uio_info;
1861         struct page *page;
1862         unsigned long offset;
1863         void *addr;
1864
1865         int mi = tcmu_find_mem_index(vmf->vma);
1866         if (mi < 0)
1867                 return VM_FAULT_SIGBUS;
1868
1869         /*
1870          * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1871          * to use mem[N].
1872          */
1873         offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1874
1875         if (offset < udev->data_off) {
1876                 /* For the vmalloc()ed cmd area pages */
1877                 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1878                 page = vmalloc_to_page(addr);
1879         } else {
1880                 uint32_t dpi;
1881
1882                 /* For the dynamically growing data area pages */
1883                 dpi = (offset - udev->data_off) / PAGE_SIZE;
1884                 page = tcmu_try_get_data_page(udev, dpi);
1885                 if (!page)
1886                         return VM_FAULT_SIGBUS;
1887         }
1888
1889         get_page(page);
1890         vmf->page = page;
1891         return 0;
1892 }
1893
1894 static const struct vm_operations_struct tcmu_vm_ops = {
1895         .open = tcmu_vma_open,
1896         .close = tcmu_vma_close,
1897         .fault = tcmu_vma_fault,
1898 };
1899
1900 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1901 {
1902         struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1903
1904         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1905         vma->vm_ops = &tcmu_vm_ops;
1906
1907         vma->vm_private_data = udev;
1908
1909         /* Ensure the mmap is exactly the right size */
1910         if (vma_pages(vma) != udev->mmap_pages)
1911                 return -EINVAL;
1912
1913         tcmu_vma_open(vma);
1914
1915         return 0;
1916 }
1917
1918 static int tcmu_open(struct uio_info *info, struct inode *inode)
1919 {
1920         struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1921
1922         /* O_EXCL not supported for char devs, so fake it? */
1923         if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1924                 return -EBUSY;
1925
1926         udev->inode = inode;
1927
1928         pr_debug("open\n");
1929
1930         return 0;
1931 }
1932
1933 static int tcmu_release(struct uio_info *info, struct inode *inode)
1934 {
1935         struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1936         struct tcmu_cmd *cmd;
1937         unsigned long i;
1938         bool freed = false;
1939
1940         mutex_lock(&udev->cmdr_lock);
1941
1942         xa_for_each(&udev->commands, i, cmd) {
1943                 /* Cmds with KEEP_BUF set are no longer on the ring, but
1944                  * userspace still holds the data buffer. If userspace closes
1945                  * we implicitly free these cmds and buffers, since after new
1946                  * open the (new ?) userspace cannot find the cmd in the ring
1947                  * and thus never will release the buffer by writing cmd_id to
1948                  * free_kept_buf action attribute.
1949                  */
1950                 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
1951                         continue;
1952                 pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
1953                          cmd->cmd_id, udev->name);
1954                 freed = true;
1955
1956                 xa_erase(&udev->commands, i);
1957                 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1958                 tcmu_free_cmd(cmd);
1959         }
1960         /*
1961          * We only freed data space, not ring space. Therefore we dont call
1962          * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
1963          */
1964         if (freed && list_empty(&udev->tmr_queue))
1965                 run_qfull_queue(udev, false);
1966
1967         mutex_unlock(&udev->cmdr_lock);
1968
1969         clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1970
1971         pr_debug("close\n");
1972
1973         return 0;
1974 }
1975
1976 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1977 {
1978         struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1979
1980         if (!tcmu_kern_cmd_reply_supported)
1981                 return 0;
1982
1983         if (udev->nl_reply_supported <= 0)
1984                 return 0;
1985
1986         mutex_lock(&tcmu_nl_cmd_mutex);
1987
1988         if (tcmu_netlink_blocked) {
1989                 mutex_unlock(&tcmu_nl_cmd_mutex);
1990                 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1991                         udev->name);
1992                 return -EAGAIN;
1993         }
1994
1995         if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1996                 mutex_unlock(&tcmu_nl_cmd_mutex);
1997                 pr_warn("netlink cmd %d already executing on %s\n",
1998                          nl_cmd->cmd, udev->name);
1999                 return -EBUSY;
2000         }
2001
2002         memset(nl_cmd, 0, sizeof(*nl_cmd));
2003         nl_cmd->cmd = cmd;
2004         nl_cmd->udev = udev;
2005         init_completion(&nl_cmd->complete);
2006         INIT_LIST_HEAD(&nl_cmd->nl_list);
2007
2008         list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
2009
2010         mutex_unlock(&tcmu_nl_cmd_mutex);
2011         return 0;
2012 }
2013
2014 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
2015 {
2016         struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2017
2018         if (!tcmu_kern_cmd_reply_supported)
2019                 return;
2020
2021         if (udev->nl_reply_supported <= 0)
2022                 return;
2023
2024         mutex_lock(&tcmu_nl_cmd_mutex);
2025
2026         list_del(&nl_cmd->nl_list);
2027         memset(nl_cmd, 0, sizeof(*nl_cmd));
2028
2029         mutex_unlock(&tcmu_nl_cmd_mutex);
2030 }
2031
2032 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
2033 {
2034         struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2035         int ret;
2036
2037         if (!tcmu_kern_cmd_reply_supported)
2038                 return 0;
2039
2040         if (udev->nl_reply_supported <= 0)
2041                 return 0;
2042
2043         pr_debug("sleeping for nl reply\n");
2044         wait_for_completion(&nl_cmd->complete);
2045
2046         mutex_lock(&tcmu_nl_cmd_mutex);
2047         nl_cmd->cmd = TCMU_CMD_UNSPEC;
2048         ret = nl_cmd->status;
2049         mutex_unlock(&tcmu_nl_cmd_mutex);
2050
2051         return ret;
2052 }
2053
2054 static int tcmu_netlink_event_init(struct tcmu_dev *udev,
2055                                    enum tcmu_genl_cmd cmd,
2056                                    struct sk_buff **buf, void **hdr)
2057 {
2058         struct sk_buff *skb;
2059         void *msg_header;
2060         int ret = -ENOMEM;
2061
2062         skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2063         if (!skb)
2064                 return ret;
2065
2066         msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
2067         if (!msg_header)
2068                 goto free_skb;
2069
2070         ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
2071         if (ret < 0)
2072                 goto free_skb;
2073
2074         ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
2075         if (ret < 0)
2076                 goto free_skb;
2077
2078         ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
2079         if (ret < 0)
2080                 goto free_skb;
2081
2082         *buf = skb;
2083         *hdr = msg_header;
2084         return ret;
2085
2086 free_skb:
2087         nlmsg_free(skb);
2088         return ret;
2089 }
2090
2091 static int tcmu_netlink_event_send(struct tcmu_dev *udev,
2092                                    enum tcmu_genl_cmd cmd,
2093                                    struct sk_buff *skb, void *msg_header)
2094 {
2095         int ret;
2096
2097         genlmsg_end(skb, msg_header);
2098
2099         ret = tcmu_init_genl_cmd_reply(udev, cmd);
2100         if (ret) {
2101                 nlmsg_free(skb);
2102                 return ret;
2103         }
2104
2105         ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
2106                                       TCMU_MCGRP_CONFIG, GFP_KERNEL);
2107
2108         /* Wait during an add as the listener may not be up yet */
2109         if (ret == 0 ||
2110            (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
2111                 return tcmu_wait_genl_cmd_reply(udev);
2112         else
2113                 tcmu_destroy_genl_cmd_reply(udev);
2114
2115         return ret;
2116 }
2117
2118 static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
2119 {
2120         struct sk_buff *skb = NULL;
2121         void *msg_header = NULL;
2122         int ret = 0;
2123
2124         ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
2125                                       &msg_header);
2126         if (ret < 0)
2127                 return ret;
2128         return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
2129                                        msg_header);
2130 }
2131
2132 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
2133 {
2134         struct sk_buff *skb = NULL;
2135         void *msg_header = NULL;
2136         int ret = 0;
2137
2138         ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
2139                                       &skb, &msg_header);
2140         if (ret < 0)
2141                 return ret;
2142         return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
2143                                        skb, msg_header);
2144 }
2145
2146 static int tcmu_update_uio_info(struct tcmu_dev *udev)
2147 {
2148         struct tcmu_hba *hba = udev->hba->hba_ptr;
2149         struct uio_info *info;
2150         char *str;
2151
2152         info = &udev->uio_info;
2153
2154         if (udev->dev_config[0])
2155                 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
2156                                 udev->name, udev->dev_config);
2157         else
2158                 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
2159                                 udev->name);
2160         if (!str)
2161                 return -ENOMEM;
2162
2163         /* If the old string exists, free it */
2164         kfree(info->name);
2165         info->name = str;
2166
2167         return 0;
2168 }
2169
2170 static int tcmu_configure_device(struct se_device *dev)
2171 {
2172         struct tcmu_dev *udev = TCMU_DEV(dev);
2173         struct uio_info *info;
2174         struct tcmu_mailbox *mb;
2175         size_t data_size;
2176         int ret = 0;
2177
2178         ret = tcmu_update_uio_info(udev);
2179         if (ret)
2180                 return ret;
2181
2182         info = &udev->uio_info;
2183
2184         mutex_lock(&udev->cmdr_lock);
2185         udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
2186         mutex_unlock(&udev->cmdr_lock);
2187         if (!udev->data_bitmap) {
2188                 ret = -ENOMEM;
2189                 goto err_bitmap_alloc;
2190         }
2191
2192         mb = vzalloc(MB_CMDR_SIZE);
2193         if (!mb) {
2194                 ret = -ENOMEM;
2195                 goto err_vzalloc;
2196         }
2197
2198         /* mailbox fits in first part of CMDR space */
2199         udev->mb_addr = mb;
2200         udev->cmdr = (void *)mb + CMDR_OFF;
2201         udev->cmdr_size = CMDR_SIZE;
2202         udev->data_off = MB_CMDR_SIZE;
2203         data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
2204         udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
2205         udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
2206         udev->dbi_thresh = 0; /* Default in Idle state */
2207
2208         /* Initialise the mailbox of the ring buffer */
2209         mb->version = TCMU_MAILBOX_VERSION;
2210         mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
2211                     TCMU_MAILBOX_FLAG_CAP_READ_LEN |
2212                     TCMU_MAILBOX_FLAG_CAP_TMR |
2213                     TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
2214         mb->cmdr_off = CMDR_OFF;
2215         mb->cmdr_size = udev->cmdr_size;
2216
2217         WARN_ON(!PAGE_ALIGNED(udev->data_off));
2218         WARN_ON(data_size % PAGE_SIZE);
2219
2220         info->version = __stringify(TCMU_MAILBOX_VERSION);
2221
2222         info->mem[0].name = "tcm-user command & data buffer";
2223         info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
2224         info->mem[0].size = data_size + MB_CMDR_SIZE;
2225         info->mem[0].memtype = UIO_MEM_NONE;
2226
2227         info->irqcontrol = tcmu_irqcontrol;
2228         info->irq = UIO_IRQ_CUSTOM;
2229
2230         info->mmap = tcmu_mmap;
2231         info->open = tcmu_open;
2232         info->release = tcmu_release;
2233
2234         ret = uio_register_device(tcmu_root_device, info);
2235         if (ret)
2236                 goto err_register;
2237
2238         /* User can set hw_block_size before enable the device */
2239         if (dev->dev_attrib.hw_block_size == 0)
2240                 dev->dev_attrib.hw_block_size = 512;
2241         /* Other attributes can be configured in userspace */
2242         if (!dev->dev_attrib.hw_max_sectors)
2243                 dev->dev_attrib.hw_max_sectors = 128;
2244         if (!dev->dev_attrib.emulate_write_cache)
2245                 dev->dev_attrib.emulate_write_cache = 0;
2246         dev->dev_attrib.hw_queue_depth = 128;
2247
2248         /* If user didn't explicitly disable netlink reply support, use
2249          * module scope setting.
2250          */
2251         if (udev->nl_reply_supported >= 0)
2252                 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
2253
2254         /*
2255          * Get a ref incase userspace does a close on the uio device before
2256          * LIO has initiated tcmu_free_device.
2257          */
2258         kref_get(&udev->kref);
2259
2260         ret = tcmu_send_dev_add_event(udev);
2261         if (ret)
2262                 goto err_netlink;
2263
2264         mutex_lock(&root_udev_mutex);
2265         list_add(&udev->node, &root_udev);
2266         mutex_unlock(&root_udev_mutex);
2267
2268         return 0;
2269
2270 err_netlink:
2271         kref_put(&udev->kref, tcmu_dev_kref_release);
2272         uio_unregister_device(&udev->uio_info);
2273 err_register:
2274         vfree(udev->mb_addr);
2275         udev->mb_addr = NULL;
2276 err_vzalloc:
2277         bitmap_free(udev->data_bitmap);
2278         udev->data_bitmap = NULL;
2279 err_bitmap_alloc:
2280         kfree(info->name);
2281         info->name = NULL;
2282
2283         return ret;
2284 }
2285
2286 static void tcmu_free_device(struct se_device *dev)
2287 {
2288         struct tcmu_dev *udev = TCMU_DEV(dev);
2289
2290         /* release ref from init */
2291         kref_put(&udev->kref, tcmu_dev_kref_release);
2292 }
2293
2294 static void tcmu_destroy_device(struct se_device *dev)
2295 {
2296         struct tcmu_dev *udev = TCMU_DEV(dev);
2297
2298         del_timer_sync(&udev->cmd_timer);
2299         del_timer_sync(&udev->qfull_timer);
2300
2301         mutex_lock(&root_udev_mutex);
2302         list_del(&udev->node);
2303         mutex_unlock(&root_udev_mutex);
2304
2305         tcmu_send_dev_remove_event(udev);
2306
2307         uio_unregister_device(&udev->uio_info);
2308
2309         /* release ref from configure */
2310         kref_put(&udev->kref, tcmu_dev_kref_release);
2311 }
2312
2313 static void tcmu_unblock_dev(struct tcmu_dev *udev)
2314 {
2315         mutex_lock(&udev->cmdr_lock);
2316         clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
2317         mutex_unlock(&udev->cmdr_lock);
2318 }
2319
2320 static void tcmu_block_dev(struct tcmu_dev *udev)
2321 {
2322         mutex_lock(&udev->cmdr_lock);
2323
2324         if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2325                 goto unlock;
2326
2327         /* complete IO that has executed successfully */
2328         tcmu_handle_completions(udev);
2329         /* fail IO waiting to be queued */
2330         run_qfull_queue(udev, true);
2331
2332 unlock:
2333         mutex_unlock(&udev->cmdr_lock);
2334 }
2335
2336 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2337 {
2338         struct tcmu_mailbox *mb;
2339         struct tcmu_cmd *cmd;
2340         unsigned long i;
2341
2342         mutex_lock(&udev->cmdr_lock);
2343
2344         xa_for_each(&udev->commands, i, cmd) {
2345                 pr_debug("removing cmd %u on dev %s from ring %s\n",
2346                          cmd->cmd_id, udev->name,
2347                          test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
2348                          "(is expired)" :
2349                          (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
2350                          "(is keep buffer)" : ""));
2351
2352                 xa_erase(&udev->commands, i);
2353                 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
2354                     !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
2355                         WARN_ON(!cmd->se_cmd);
2356                         list_del_init(&cmd->queue_entry);
2357                         cmd->se_cmd->priv = NULL;
2358                         if (err_level == 1) {
2359                                 /*
2360                                  * Userspace was not able to start the
2361                                  * command or it is retryable.
2362                                  */
2363                                 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2364                         } else {
2365                                 /* hard failure */
2366                                 target_complete_cmd(cmd->se_cmd,
2367                                                     SAM_STAT_CHECK_CONDITION);
2368                         }
2369                 }
2370                 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2371                 tcmu_free_cmd(cmd);
2372         }
2373
2374         mb = udev->mb_addr;
2375         tcmu_flush_dcache_range(mb, sizeof(*mb));
2376         pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2377                  mb->cmd_tail, mb->cmd_head);
2378
2379         udev->cmdr_last_cleaned = 0;
2380         mb->cmd_tail = 0;
2381         mb->cmd_head = 0;
2382         tcmu_flush_dcache_range(mb, sizeof(*mb));
2383         clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2384
2385         del_timer(&udev->cmd_timer);
2386
2387         /*
2388          * ring is empty and qfull queue never contains aborted commands.
2389          * So TMRs in tmr queue do not contain relevant cmd_ids.
2390          * After a ring reset userspace should do a fresh start, so
2391          * even LUN RESET message is no longer relevant.
2392          * Therefore remove all TMRs from qfull queue
2393          */
2394         tcmu_remove_all_queued_tmr(udev);
2395
2396         run_qfull_queue(udev, false);
2397
2398         mutex_unlock(&udev->cmdr_lock);
2399 }
2400
2401 enum {
2402         Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2403         Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
2404         Opt_err,
2405 };
2406
2407 static match_table_t tokens = {
2408         {Opt_dev_config, "dev_config=%s"},
2409         {Opt_dev_size, "dev_size=%s"},
2410         {Opt_hw_block_size, "hw_block_size=%d"},
2411         {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2412         {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2413         {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2414         {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
2415         {Opt_err, NULL}
2416 };
2417
2418 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2419 {
2420         int val, ret;
2421
2422         ret = match_int(arg, &val);
2423         if (ret < 0) {
2424                 pr_err("match_int() failed for dev attrib. Error %d.\n",
2425                        ret);
2426                 return ret;
2427         }
2428
2429         if (val <= 0) {
2430                 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2431                        val);
2432                 return -EINVAL;
2433         }
2434         *dev_attrib = val;
2435         return 0;
2436 }
2437
2438 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2439 {
2440         int val, ret;
2441         uint32_t pages_per_blk = udev->data_pages_per_blk;
2442
2443         ret = match_int(arg, &val);
2444         if (ret < 0) {
2445                 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2446                        ret);
2447                 return ret;
2448         }
2449         if (val <= 0) {
2450                 pr_err("Invalid max_data_area %d.\n", val);
2451                 return -EINVAL;
2452         }
2453         if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
2454                 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2455                        val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
2456                 val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
2457         }
2458         if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
2459                 pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
2460                        val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
2461                 return -EINVAL;
2462         }
2463
2464         mutex_lock(&udev->cmdr_lock);
2465         if (udev->data_bitmap) {
2466                 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2467                 ret = -EINVAL;
2468                 goto unlock;
2469         }
2470
2471         udev->data_area_mb = val;
2472         udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
2473
2474 unlock:
2475         mutex_unlock(&udev->cmdr_lock);
2476         return ret;
2477 }
2478
2479 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
2480 {
2481         int val, ret;
2482
2483         ret = match_int(arg, &val);
2484         if (ret < 0) {
2485                 pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
2486                        ret);
2487                 return ret;
2488         }
2489
2490         if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
2491                 pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
2492                        val, udev->data_area_mb,
2493                        TCMU_MBS_TO_PAGES(udev->data_area_mb));
2494                 return -EINVAL;
2495         }
2496
2497         mutex_lock(&udev->cmdr_lock);
2498         if (udev->data_bitmap) {
2499                 pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
2500                 ret = -EINVAL;
2501                 goto unlock;
2502         }
2503
2504         udev->data_pages_per_blk = val;
2505         udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
2506
2507 unlock:
2508         mutex_unlock(&udev->cmdr_lock);
2509         return ret;
2510 }
2511
2512 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2513                 const char *page, ssize_t count)
2514 {
2515         struct tcmu_dev *udev = TCMU_DEV(dev);
2516         char *orig, *ptr, *opts;
2517         substring_t args[MAX_OPT_ARGS];
2518         int ret = 0, token;
2519
2520         opts = kstrdup(page, GFP_KERNEL);
2521         if (!opts)
2522                 return -ENOMEM;
2523
2524         orig = opts;
2525
2526         while ((ptr = strsep(&opts, ",\n")) != NULL) {
2527                 if (!*ptr)
2528                         continue;
2529
2530                 token = match_token(ptr, tokens, args);
2531                 switch (token) {
2532                 case Opt_dev_config:
2533                         if (match_strlcpy(udev->dev_config, &args[0],
2534                                           TCMU_CONFIG_LEN) == 0) {
2535                                 ret = -EINVAL;
2536                                 break;
2537                         }
2538                         pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2539                         break;
2540                 case Opt_dev_size:
2541                         ret = match_u64(&args[0], &udev->dev_size);
2542                         if (ret < 0)
2543                                 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2544                                        ret);
2545                         break;
2546                 case Opt_hw_block_size:
2547                         ret = tcmu_set_dev_attrib(&args[0],
2548                                         &(dev->dev_attrib.hw_block_size));
2549                         break;
2550                 case Opt_hw_max_sectors:
2551                         ret = tcmu_set_dev_attrib(&args[0],
2552                                         &(dev->dev_attrib.hw_max_sectors));
2553                         break;
2554                 case Opt_nl_reply_supported:
2555                         ret = match_int(&args[0], &udev->nl_reply_supported);
2556                         if (ret < 0)
2557                                 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2558                                        ret);
2559                         break;
2560                 case Opt_max_data_area_mb:
2561                         ret = tcmu_set_max_blocks_param(udev, &args[0]);
2562                         break;
2563                 case Opt_data_pages_per_blk:
2564                         ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
2565                         break;
2566                 default:
2567                         break;
2568                 }
2569
2570                 if (ret)
2571                         break;
2572         }
2573
2574         kfree(orig);
2575         return (!ret) ? count : ret;
2576 }
2577
2578 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2579 {
2580         struct tcmu_dev *udev = TCMU_DEV(dev);
2581         ssize_t bl = 0;
2582
2583         bl = sprintf(b + bl, "Config: %s ",
2584                      udev->dev_config[0] ? udev->dev_config : "NULL");
2585         bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2586         bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
2587         bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
2588
2589         return bl;
2590 }
2591
2592 static sector_t tcmu_get_blocks(struct se_device *dev)
2593 {
2594         struct tcmu_dev *udev = TCMU_DEV(dev);
2595
2596         return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2597                        dev->dev_attrib.block_size);
2598 }
2599
2600 static sense_reason_t
2601 tcmu_parse_cdb(struct se_cmd *cmd)
2602 {
2603         return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2604 }
2605
2606 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2607 {
2608         struct se_dev_attrib *da = container_of(to_config_group(item),
2609                                         struct se_dev_attrib, da_group);
2610         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2611
2612         return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2613 }
2614
2615 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2616                                        size_t count)
2617 {
2618         struct se_dev_attrib *da = container_of(to_config_group(item),
2619                                         struct se_dev_attrib, da_group);
2620         struct tcmu_dev *udev = container_of(da->da_dev,
2621                                         struct tcmu_dev, se_dev);
2622         u32 val;
2623         int ret;
2624
2625         if (da->da_dev->export_count) {
2626                 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2627                 return -EINVAL;
2628         }
2629
2630         ret = kstrtou32(page, 0, &val);
2631         if (ret < 0)
2632                 return ret;
2633
2634         udev->cmd_time_out = val * MSEC_PER_SEC;
2635         return count;
2636 }
2637 CONFIGFS_ATTR(tcmu_, cmd_time_out);
2638
2639 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2640 {
2641         struct se_dev_attrib *da = container_of(to_config_group(item),
2642                                                 struct se_dev_attrib, da_group);
2643         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2644
2645         return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2646                         udev->qfull_time_out :
2647                         udev->qfull_time_out / MSEC_PER_SEC);
2648 }
2649
2650 static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2651                                          const char *page, size_t count)
2652 {
2653         struct se_dev_attrib *da = container_of(to_config_group(item),
2654                                         struct se_dev_attrib, da_group);
2655         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2656         s32 val;
2657         int ret;
2658
2659         ret = kstrtos32(page, 0, &val);
2660         if (ret < 0)
2661                 return ret;
2662
2663         if (val >= 0) {
2664                 udev->qfull_time_out = val * MSEC_PER_SEC;
2665         } else if (val == -1) {
2666                 udev->qfull_time_out = val;
2667         } else {
2668                 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2669                 return -EINVAL;
2670         }
2671         return count;
2672 }
2673 CONFIGFS_ATTR(tcmu_, qfull_time_out);
2674
2675 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2676 {
2677         struct se_dev_attrib *da = container_of(to_config_group(item),
2678                                                 struct se_dev_attrib, da_group);
2679         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2680
2681         return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
2682 }
2683 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2684
2685 static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
2686                                             char *page)
2687 {
2688         struct se_dev_attrib *da = container_of(to_config_group(item),
2689                                                 struct se_dev_attrib, da_group);
2690         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2691
2692         return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
2693 }
2694 CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
2695
2696 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2697 {
2698         struct se_dev_attrib *da = container_of(to_config_group(item),
2699                                                 struct se_dev_attrib, da_group);
2700         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2701
2702         return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2703 }
2704
2705 static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2706                                       const char *reconfig_data)
2707 {
2708         struct sk_buff *skb = NULL;
2709         void *msg_header = NULL;
2710         int ret = 0;
2711
2712         ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2713                                       &skb, &msg_header);
2714         if (ret < 0)
2715                 return ret;
2716         ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2717         if (ret < 0) {
2718                 nlmsg_free(skb);
2719                 return ret;
2720         }
2721         return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2722                                        skb, msg_header);
2723 }
2724
2725
2726 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2727                                      size_t count)
2728 {
2729         struct se_dev_attrib *da = container_of(to_config_group(item),
2730                                                 struct se_dev_attrib, da_group);
2731         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2732         int ret, len;
2733
2734         len = strlen(page);
2735         if (!len || len > TCMU_CONFIG_LEN - 1)
2736                 return -EINVAL;
2737
2738         /* Check if device has been configured before */
2739         if (target_dev_configured(&udev->se_dev)) {
2740                 ret = tcmu_send_dev_config_event(udev, page);
2741                 if (ret) {
2742                         pr_err("Unable to reconfigure device\n");
2743                         return ret;
2744                 }
2745                 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2746
2747                 ret = tcmu_update_uio_info(udev);
2748                 if (ret)
2749                         return ret;
2750                 return count;
2751         }
2752         strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2753
2754         return count;
2755 }
2756 CONFIGFS_ATTR(tcmu_, dev_config);
2757
2758 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2759 {
2760         struct se_dev_attrib *da = container_of(to_config_group(item),
2761                                                 struct se_dev_attrib, da_group);
2762         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2763
2764         return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2765 }
2766
2767 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2768 {
2769         struct sk_buff *skb = NULL;
2770         void *msg_header = NULL;
2771         int ret = 0;
2772
2773         ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2774                                       &skb, &msg_header);
2775         if (ret < 0)
2776                 return ret;
2777         ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2778                                 size, TCMU_ATTR_PAD);
2779         if (ret < 0) {
2780                 nlmsg_free(skb);
2781                 return ret;
2782         }
2783         return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2784                                        skb, msg_header);
2785 }
2786
2787 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2788                                    size_t count)
2789 {
2790         struct se_dev_attrib *da = container_of(to_config_group(item),
2791                                                 struct se_dev_attrib, da_group);
2792         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2793         u64 val;
2794         int ret;
2795
2796         ret = kstrtou64(page, 0, &val);
2797         if (ret < 0)
2798                 return ret;
2799
2800         /* Check if device has been configured before */
2801         if (target_dev_configured(&udev->se_dev)) {
2802                 ret = tcmu_send_dev_size_event(udev, val);
2803                 if (ret) {
2804                         pr_err("Unable to reconfigure device\n");
2805                         return ret;
2806                 }
2807         }
2808         udev->dev_size = val;
2809         return count;
2810 }
2811 CONFIGFS_ATTR(tcmu_, dev_size);
2812
2813 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2814                 char *page)
2815 {
2816         struct se_dev_attrib *da = container_of(to_config_group(item),
2817                                                 struct se_dev_attrib, da_group);
2818         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2819
2820         return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2821 }
2822
2823 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2824                 const char *page, size_t count)
2825 {
2826         struct se_dev_attrib *da = container_of(to_config_group(item),
2827                                                 struct se_dev_attrib, da_group);
2828         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2829         s8 val;
2830         int ret;
2831
2832         ret = kstrtos8(page, 0, &val);
2833         if (ret < 0)
2834                 return ret;
2835
2836         udev->nl_reply_supported = val;
2837         return count;
2838 }
2839 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2840
2841 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2842                                              char *page)
2843 {
2844         struct se_dev_attrib *da = container_of(to_config_group(item),
2845                                         struct se_dev_attrib, da_group);
2846
2847         return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2848 }
2849
2850 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2851 {
2852         struct sk_buff *skb = NULL;
2853         void *msg_header = NULL;
2854         int ret = 0;
2855
2856         ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2857                                       &skb, &msg_header);
2858         if (ret < 0)
2859                 return ret;
2860         ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2861         if (ret < 0) {
2862                 nlmsg_free(skb);
2863                 return ret;
2864         }
2865         return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2866                                        skb, msg_header);
2867 }
2868
2869 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2870                                               const char *page, size_t count)
2871 {
2872         struct se_dev_attrib *da = container_of(to_config_group(item),
2873                                         struct se_dev_attrib, da_group);
2874         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2875         u8 val;
2876         int ret;
2877
2878         ret = kstrtou8(page, 0, &val);
2879         if (ret < 0)
2880                 return ret;
2881
2882         /* Check if device has been configured before */
2883         if (target_dev_configured(&udev->se_dev)) {
2884                 ret = tcmu_send_emulate_write_cache(udev, val);
2885                 if (ret) {
2886                         pr_err("Unable to reconfigure device\n");
2887                         return ret;
2888                 }
2889         }
2890
2891         da->emulate_write_cache = val;
2892         return count;
2893 }
2894 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2895
2896 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
2897 {
2898         struct se_dev_attrib *da = container_of(to_config_group(item),
2899                                         struct se_dev_attrib, da_group);
2900         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2901
2902         return snprintf(page, PAGE_SIZE, "%i\n",
2903                         test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
2904 }
2905
2906 static ssize_t tcmu_tmr_notification_store(struct config_item *item,
2907                                            const char *page, size_t count)
2908 {
2909         struct se_dev_attrib *da = container_of(to_config_group(item),
2910                                         struct se_dev_attrib, da_group);
2911         struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2912         u8 val;
2913         int ret;
2914
2915         ret = kstrtou8(page, 0, &val);
2916         if (ret < 0)
2917                 return ret;
2918         if (val > 1)
2919                 return -EINVAL;
2920
2921         if (val)
2922                 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2923         else
2924                 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2925         return count;
2926 }
2927 CONFIGFS_ATTR(tcmu_, tmr_notification);
2928
2929 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2930 {
2931         struct se_device *se_dev = container_of(to_config_group(item),
2932                                                 struct se_device,
2933                                                 dev_action_group);
2934         struct tcmu_dev *udev = TCMU_DEV(se_dev);
2935
2936         if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2937                 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2938         else
2939                 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2940 }
2941
2942 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2943                                     size_t count)
2944 {
2945         struct se_device *se_dev = container_of(to_config_group(item),
2946                                                 struct se_device,
2947                                                 dev_action_group);
2948         struct tcmu_dev *udev = TCMU_DEV(se_dev);
2949         u8 val;
2950         int ret;
2951
2952         if (!target_dev_configured(&udev->se_dev)) {
2953                 pr_err("Device is not configured.\n");
2954                 return -EINVAL;
2955         }
2956
2957         ret = kstrtou8(page, 0, &val);
2958         if (ret < 0)
2959                 return ret;
2960
2961         if (val > 1) {
2962                 pr_err("Invalid block value %d\n", val);
2963                 return -EINVAL;
2964         }
2965
2966         if (!val)
2967                 tcmu_unblock_dev(udev);
2968         else
2969                 tcmu_block_dev(udev);
2970         return count;
2971 }
2972 CONFIGFS_ATTR(tcmu_, block_dev);
2973
2974 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2975                                      size_t count)
2976 {
2977         struct se_device *se_dev = container_of(to_config_group(item),
2978                                                 struct se_device,
2979                                                 dev_action_group);
2980         struct tcmu_dev *udev = TCMU_DEV(se_dev);
2981         u8 val;
2982         int ret;
2983
2984         if (!target_dev_configured(&udev->se_dev)) {
2985                 pr_err("Device is not configured.\n");
2986                 return -EINVAL;
2987         }
2988
2989         ret = kstrtou8(page, 0, &val);
2990         if (ret < 0)
2991                 return ret;
2992
2993         if (val != 1 && val != 2) {
2994                 pr_err("Invalid reset ring value %d\n", val);
2995                 return -EINVAL;
2996         }
2997
2998         tcmu_reset_ring(udev, val);
2999         return count;
3000 }
3001 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
3002
3003 static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
3004                                         size_t count)
3005 {
3006         struct se_device *se_dev = container_of(to_config_group(item),
3007                                                 struct se_device,
3008                                                 dev_action_group);
3009         struct tcmu_dev *udev = TCMU_DEV(se_dev);
3010         struct tcmu_cmd *cmd;
3011         u16 cmd_id;
3012         int ret;
3013
3014         if (!target_dev_configured(&udev->se_dev)) {
3015                 pr_err("Device is not configured.\n");
3016                 return -EINVAL;
3017         }
3018
3019         ret = kstrtou16(page, 0, &cmd_id);
3020         if (ret < 0)
3021                 return ret;
3022
3023         mutex_lock(&udev->cmdr_lock);
3024
3025         {
3026                 XA_STATE(xas, &udev->commands, cmd_id);
3027
3028                 xas_lock(&xas);
3029                 cmd = xas_load(&xas);
3030                 if (!cmd) {
3031                         pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
3032                         count = -EINVAL;
3033                         xas_unlock(&xas);
3034                         goto out_unlock;
3035                 }
3036                 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
3037                         pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
3038                                cmd_id);
3039                         count = -EINVAL;
3040                         xas_unlock(&xas);
3041                         goto out_unlock;
3042                 }
3043                 xas_store(&xas, NULL);
3044                 xas_unlock(&xas);
3045         }
3046
3047         tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
3048         tcmu_free_cmd(cmd);
3049         /*
3050          * We only freed data space, not ring space. Therefore we dont call
3051          * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
3052          */
3053         if (list_empty(&udev->tmr_queue))
3054                 run_qfull_queue(udev, false);
3055
3056 out_unlock:
3057         mutex_unlock(&udev->cmdr_lock);
3058         return count;
3059 }
3060 CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
3061
3062 static struct configfs_attribute *tcmu_attrib_attrs[] = {
3063         &tcmu_attr_cmd_time_out,
3064         &tcmu_attr_qfull_time_out,
3065         &tcmu_attr_max_data_area_mb,
3066         &tcmu_attr_data_pages_per_blk,
3067         &tcmu_attr_dev_config,
3068         &tcmu_attr_dev_size,
3069         &tcmu_attr_emulate_write_cache,
3070         &tcmu_attr_tmr_notification,
3071         &tcmu_attr_nl_reply_supported,
3072         NULL,
3073 };
3074
3075 static struct configfs_attribute **tcmu_attrs;
3076
3077 static struct configfs_attribute *tcmu_action_attrs[] = {
3078         &tcmu_attr_block_dev,
3079         &tcmu_attr_reset_ring,
3080         &tcmu_attr_free_kept_buf,
3081         NULL,
3082 };
3083
3084 static struct target_backend_ops tcmu_ops = {
3085         .name                   = "user",
3086         .owner                  = THIS_MODULE,
3087         .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
3088         .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
3089                                       TRANSPORT_FLAG_PASSTHROUGH_ALUA,
3090         .attach_hba             = tcmu_attach_hba,
3091         .detach_hba             = tcmu_detach_hba,
3092         .alloc_device           = tcmu_alloc_device,
3093         .configure_device       = tcmu_configure_device,
3094         .destroy_device         = tcmu_destroy_device,
3095         .free_device            = tcmu_free_device,
3096         .unplug_device          = tcmu_unplug_device,
3097         .plug_device            = tcmu_plug_device,
3098         .parse_cdb              = tcmu_parse_cdb,
3099         .tmr_notify             = tcmu_tmr_notify,
3100         .set_configfs_dev_params = tcmu_set_configfs_dev_params,
3101         .show_configfs_dev_params = tcmu_show_configfs_dev_params,
3102         .get_device_type        = sbc_get_device_type,
3103         .get_blocks             = tcmu_get_blocks,
3104         .tb_dev_action_attrs    = tcmu_action_attrs,
3105 };
3106
3107 static void find_free_blocks(void)
3108 {
3109         struct tcmu_dev *udev;
3110         loff_t off;
3111         u32 pages_freed, total_pages_freed = 0;
3112         u32 start, end, block, total_blocks_freed = 0;
3113
3114         if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
3115                 return;
3116
3117         mutex_lock(&root_udev_mutex);
3118         list_for_each_entry(udev, &root_udev, node) {
3119                 mutex_lock(&udev->cmdr_lock);
3120
3121                 if (!target_dev_configured(&udev->se_dev)) {
3122                         mutex_unlock(&udev->cmdr_lock);
3123                         continue;
3124                 }
3125
3126                 /* Try to complete the finished commands first */
3127                 if (tcmu_handle_completions(udev))
3128                         run_qfull_queue(udev, false);
3129
3130                 /* Skip the udevs in idle */
3131                 if (!udev->dbi_thresh) {
3132                         mutex_unlock(&udev->cmdr_lock);
3133                         continue;
3134                 }
3135
3136                 end = udev->dbi_max + 1;
3137                 block = find_last_bit(udev->data_bitmap, end);
3138                 if (block == udev->dbi_max) {
3139                         /*
3140                          * The last bit is dbi_max, so it is not possible
3141                          * reclaim any blocks.
3142                          */
3143                         mutex_unlock(&udev->cmdr_lock);
3144                         continue;
3145                 } else if (block == end) {
3146                         /* The current udev will goto idle state */
3147                         udev->dbi_thresh = start = 0;
3148                         udev->dbi_max = 0;
3149                 } else {
3150                         udev->dbi_thresh = start = block + 1;
3151                         udev->dbi_max = block;
3152                 }
3153
3154                 /* Here will truncate the data area from off */
3155                 off = udev->data_off + (loff_t)start * udev->data_blk_size;
3156                 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
3157
3158                 /* Release the block pages */
3159                 pages_freed = tcmu_blocks_release(udev, start, end - 1);
3160                 mutex_unlock(&udev->cmdr_lock);
3161
3162                 total_pages_freed += pages_freed;
3163                 total_blocks_freed += end - start;
3164                 pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
3165                          pages_freed, total_pages_freed, end - start,
3166                          total_blocks_freed, udev->name);
3167         }
3168         mutex_unlock(&root_udev_mutex);
3169
3170         if (atomic_read(&global_page_count) > tcmu_global_max_pages)
3171                 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
3172 }
3173
3174 static void check_timedout_devices(void)
3175 {
3176         struct tcmu_dev *udev, *tmp_dev;
3177         struct tcmu_cmd *cmd, *tmp_cmd;
3178         LIST_HEAD(devs);
3179
3180         spin_lock_bh(&timed_out_udevs_lock);
3181         list_splice_init(&timed_out_udevs, &devs);
3182
3183         list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
3184                 list_del_init(&udev->timedout_entry);
3185                 spin_unlock_bh(&timed_out_udevs_lock);
3186
3187                 mutex_lock(&udev->cmdr_lock);
3188
3189                 /*
3190                  * If cmd_time_out is disabled but qfull is set deadline
3191                  * will only reflect the qfull timeout. Ignore it.
3192                  */
3193                 if (udev->cmd_time_out) {
3194                         list_for_each_entry_safe(cmd, tmp_cmd,
3195                                                  &udev->inflight_queue,
3196                                                  queue_entry) {
3197                                 tcmu_check_expired_ring_cmd(cmd);
3198                         }
3199                         tcmu_set_next_deadline(&udev->inflight_queue,
3200                                                &udev->cmd_timer);
3201                 }
3202                 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
3203                                          queue_entry) {
3204                         tcmu_check_expired_queue_cmd(cmd);
3205                 }
3206                 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3207
3208                 mutex_unlock(&udev->cmdr_lock);
3209
3210                 spin_lock_bh(&timed_out_udevs_lock);
3211         }
3212
3213         spin_unlock_bh(&timed_out_udevs_lock);
3214 }
3215
3216 static void tcmu_unmap_work_fn(struct work_struct *work)
3217 {
3218         check_timedout_devices();
3219         find_free_blocks();
3220 }
3221
3222 static int __init tcmu_module_init(void)
3223 {
3224         int ret, i, k, len = 0;
3225
3226         BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
3227
3228         INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
3229
3230         tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
3231                                 sizeof(struct tcmu_cmd),
3232                                 __alignof__(struct tcmu_cmd),
3233                                 0, NULL);
3234         if (!tcmu_cmd_cache)
3235                 return -ENOMEM;
3236
3237         tcmu_root_device = root_device_register("tcm_user");
3238         if (IS_ERR(tcmu_root_device)) {
3239                 ret = PTR_ERR(tcmu_root_device);
3240                 goto out_free_cache;
3241         }
3242
3243         ret = genl_register_family(&tcmu_genl_family);
3244         if (ret < 0) {
3245                 goto out_unreg_device;
3246         }
3247
3248         for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3249                 len += sizeof(struct configfs_attribute *);
3250         for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
3251                 len += sizeof(struct configfs_attribute *);
3252         for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
3253                 len += sizeof(struct configfs_attribute *);
3254         len += sizeof(struct configfs_attribute *);
3255
3256         tcmu_attrs = kzalloc(len, GFP_KERNEL);
3257         if (!tcmu_attrs) {
3258                 ret = -ENOMEM;
3259                 goto out_unreg_genl;
3260         }
3261
3262         for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3263                 tcmu_attrs[i] = passthrough_attrib_attrs[i];
3264         for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
3265                 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
3266         for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
3267                 tcmu_attrs[i++] = tcmu_attrib_attrs[k];
3268         tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
3269
3270         ret = transport_backend_register(&tcmu_ops);
3271         if (ret)
3272                 goto out_attrs;
3273
3274         return 0;
3275
3276 out_attrs:
3277         kfree(tcmu_attrs);
3278 out_unreg_genl:
3279         genl_unregister_family(&tcmu_genl_family);
3280 out_unreg_device:
3281         root_device_unregister(tcmu_root_device);
3282 out_free_cache:
3283         kmem_cache_destroy(tcmu_cmd_cache);
3284
3285         return ret;
3286 }
3287
3288 static void __exit tcmu_module_exit(void)
3289 {
3290         cancel_delayed_work_sync(&tcmu_unmap_work);
3291         target_backend_unregister(&tcmu_ops);
3292         kfree(tcmu_attrs);
3293         genl_unregister_family(&tcmu_genl_family);
3294         root_device_unregister(tcmu_root_device);
3295         kmem_cache_destroy(tcmu_cmd_cache);
3296 }
3297
3298 MODULE_DESCRIPTION("TCM USER subsystem plugin");
3299 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
3300 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
3301 MODULE_LICENSE("GPL");
3302
3303 module_init(tcmu_module_init);
3304 module_exit(tcmu_module_exit);