b076a0a53fb138ec697edb46e782bc6830603d7e
[linux-2.6-microblaze.git] / drivers / block / nbd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Network block device - make block devices work over TCP
4  *
5  * Note that you can not swap over this thing, yet. Seems to work but
6  * deadlocks sometimes - you can not swap over TCP in general.
7  * 
8  * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9  * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10  *
11  * (part of code stolen from loop.c)
12  */
13
14 #include <linux/major.h>
15
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/fs.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <net/sock.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
39
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
42
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
46
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
49
50 static DEFINE_IDR(nbd_index_idr);
51 static DEFINE_MUTEX(nbd_index_mutex);
52 static int nbd_total_devices = 0;
53
54 struct nbd_sock {
55         struct socket *sock;
56         struct mutex tx_lock;
57         struct request *pending;
58         int sent;
59         bool dead;
60         int fallback_index;
61         int cookie;
62 };
63
64 struct recv_thread_args {
65         struct work_struct work;
66         struct nbd_device *nbd;
67         int index;
68 };
69
70 struct link_dead_args {
71         struct work_struct work;
72         int index;
73 };
74
75 #define NBD_RT_TIMEDOUT                 0
76 #define NBD_RT_DISCONNECT_REQUESTED     1
77 #define NBD_RT_DISCONNECTED             2
78 #define NBD_RT_HAS_PID_FILE             3
79 #define NBD_RT_HAS_CONFIG_REF           4
80 #define NBD_RT_BOUND                    5
81 #define NBD_RT_DESTROY_ON_DISCONNECT    6
82 #define NBD_RT_DISCONNECT_ON_CLOSE      7
83
84 #define NBD_DESTROY_ON_DISCONNECT       0
85 #define NBD_DISCONNECT_REQUESTED        1
86
87 struct nbd_config {
88         u32 flags;
89         unsigned long runtime_flags;
90         u64 dead_conn_timeout;
91
92         struct nbd_sock **socks;
93         int num_connections;
94         atomic_t live_connections;
95         wait_queue_head_t conn_wait;
96
97         atomic_t recv_threads;
98         wait_queue_head_t recv_wq;
99         loff_t blksize;
100         loff_t bytesize;
101 #if IS_ENABLED(CONFIG_DEBUG_FS)
102         struct dentry *dbg_dir;
103 #endif
104 };
105
106 struct nbd_device {
107         struct blk_mq_tag_set tag_set;
108
109         int index;
110         refcount_t config_refs;
111         refcount_t refs;
112         struct nbd_config *config;
113         struct mutex config_lock;
114         struct gendisk *disk;
115         struct workqueue_struct *recv_workq;
116
117         struct list_head list;
118         struct task_struct *task_recv;
119         struct task_struct *task_setup;
120
121         struct completion *destroy_complete;
122         unsigned long flags;
123 };
124
125 #define NBD_CMD_REQUEUED        1
126
127 struct nbd_cmd {
128         struct nbd_device *nbd;
129         struct mutex lock;
130         int index;
131         int cookie;
132         int retries;
133         blk_status_t status;
134         unsigned long flags;
135         u32 cmd_cookie;
136 };
137
138 #if IS_ENABLED(CONFIG_DEBUG_FS)
139 static struct dentry *nbd_dbg_dir;
140 #endif
141
142 #define nbd_name(nbd) ((nbd)->disk->disk_name)
143
144 #define NBD_MAGIC 0x68797548
145
146 #define NBD_DEF_BLKSIZE 1024
147
148 static unsigned int nbds_max = 16;
149 static int max_part = 16;
150 static int part_shift;
151
152 static int nbd_dev_dbg_init(struct nbd_device *nbd);
153 static void nbd_dev_dbg_close(struct nbd_device *nbd);
154 static void nbd_config_put(struct nbd_device *nbd);
155 static void nbd_connect_reply(struct genl_info *info, int index);
156 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
157 static void nbd_dead_link_work(struct work_struct *work);
158 static void nbd_disconnect_and_put(struct nbd_device *nbd);
159
160 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
161 {
162         return disk_to_dev(nbd->disk);
163 }
164
165 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
166 {
167         struct request *req = blk_mq_rq_from_pdu(cmd);
168
169         if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
170                 blk_mq_requeue_request(req, true);
171 }
172
173 #define NBD_COOKIE_BITS 32
174
175 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
176 {
177         struct request *req = blk_mq_rq_from_pdu(cmd);
178         u32 tag = blk_mq_unique_tag(req);
179         u64 cookie = cmd->cmd_cookie;
180
181         return (cookie << NBD_COOKIE_BITS) | tag;
182 }
183
184 static u32 nbd_handle_to_tag(u64 handle)
185 {
186         return (u32)handle;
187 }
188
189 static u32 nbd_handle_to_cookie(u64 handle)
190 {
191         return (u32)(handle >> NBD_COOKIE_BITS);
192 }
193
194 static const char *nbdcmd_to_ascii(int cmd)
195 {
196         switch (cmd) {
197         case  NBD_CMD_READ: return "read";
198         case NBD_CMD_WRITE: return "write";
199         case  NBD_CMD_DISC: return "disconnect";
200         case NBD_CMD_FLUSH: return "flush";
201         case  NBD_CMD_TRIM: return "trim/discard";
202         }
203         return "invalid";
204 }
205
206 static ssize_t pid_show(struct device *dev,
207                         struct device_attribute *attr, char *buf)
208 {
209         struct gendisk *disk = dev_to_disk(dev);
210         struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
211
212         return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
213 }
214
215 static const struct device_attribute pid_attr = {
216         .attr = { .name = "pid", .mode = 0444},
217         .show = pid_show,
218 };
219
220 static void nbd_dev_remove(struct nbd_device *nbd)
221 {
222         struct gendisk *disk = nbd->disk;
223         struct request_queue *q;
224
225         if (disk) {
226                 q = disk->queue;
227                 del_gendisk(disk);
228                 blk_cleanup_queue(q);
229                 blk_mq_free_tag_set(&nbd->tag_set);
230                 disk->private_data = NULL;
231                 put_disk(disk);
232         }
233
234         /*
235          * Place this in the last just before the nbd is freed to
236          * make sure that the disk and the related kobject are also
237          * totally removed to avoid duplicate creation of the same
238          * one.
239          */
240         if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
241                 complete(nbd->destroy_complete);
242
243         kfree(nbd);
244 }
245
246 static void nbd_put(struct nbd_device *nbd)
247 {
248         if (refcount_dec_and_mutex_lock(&nbd->refs,
249                                         &nbd_index_mutex)) {
250                 idr_remove(&nbd_index_idr, nbd->index);
251                 nbd_dev_remove(nbd);
252                 mutex_unlock(&nbd_index_mutex);
253         }
254 }
255
256 static int nbd_disconnected(struct nbd_config *config)
257 {
258         return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
259                 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
260 }
261
262 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
263                                 int notify)
264 {
265         if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
266                 struct link_dead_args *args;
267                 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
268                 if (args) {
269                         INIT_WORK(&args->work, nbd_dead_link_work);
270                         args->index = nbd->index;
271                         queue_work(system_wq, &args->work);
272                 }
273         }
274         if (!nsock->dead) {
275                 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
276                 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
277                         if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
278                                                &nbd->config->runtime_flags)) {
279                                 set_bit(NBD_RT_DISCONNECTED,
280                                         &nbd->config->runtime_flags);
281                                 dev_info(nbd_to_dev(nbd),
282                                         "Disconnected due to user request.\n");
283                         }
284                 }
285         }
286         nsock->dead = true;
287         nsock->pending = NULL;
288         nsock->sent = 0;
289 }
290
291 static void nbd_size_clear(struct nbd_device *nbd)
292 {
293         if (nbd->config->bytesize) {
294                 set_capacity(nbd->disk, 0);
295                 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
296         }
297 }
298
299 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
300                 loff_t blksize)
301 {
302         if (!blksize)
303                 blksize = NBD_DEF_BLKSIZE;
304         if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
305                 return -EINVAL;
306
307         nbd->config->bytesize = bytesize;
308         nbd->config->blksize = blksize;
309
310         if (!nbd->task_recv)
311                 return 0;
312
313         if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
314                 nbd->disk->queue->limits.discard_granularity = blksize;
315                 nbd->disk->queue->limits.discard_alignment = blksize;
316                 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
317         }
318         blk_queue_logical_block_size(nbd->disk->queue, blksize);
319         blk_queue_physical_block_size(nbd->disk->queue, blksize);
320
321         if (max_part)
322                 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
323         if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
324                 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
325         return 0;
326 }
327
328 static void nbd_complete_rq(struct request *req)
329 {
330         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
331
332         dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
333                 cmd->status ? "failed" : "done");
334
335         blk_mq_end_request(req, cmd->status);
336 }
337
338 /*
339  * Forcibly shutdown the socket causing all listeners to error
340  */
341 static void sock_shutdown(struct nbd_device *nbd)
342 {
343         struct nbd_config *config = nbd->config;
344         int i;
345
346         if (config->num_connections == 0)
347                 return;
348         if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
349                 return;
350
351         for (i = 0; i < config->num_connections; i++) {
352                 struct nbd_sock *nsock = config->socks[i];
353                 mutex_lock(&nsock->tx_lock);
354                 nbd_mark_nsock_dead(nbd, nsock, 0);
355                 mutex_unlock(&nsock->tx_lock);
356         }
357         dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
358 }
359
360 static u32 req_to_nbd_cmd_type(struct request *req)
361 {
362         switch (req_op(req)) {
363         case REQ_OP_DISCARD:
364                 return NBD_CMD_TRIM;
365         case REQ_OP_FLUSH:
366                 return NBD_CMD_FLUSH;
367         case REQ_OP_WRITE:
368                 return NBD_CMD_WRITE;
369         case REQ_OP_READ:
370                 return NBD_CMD_READ;
371         default:
372                 return U32_MAX;
373         }
374 }
375
376 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
377                                                  bool reserved)
378 {
379         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
380         struct nbd_device *nbd = cmd->nbd;
381         struct nbd_config *config;
382
383         if (!mutex_trylock(&cmd->lock))
384                 return BLK_EH_RESET_TIMER;
385
386         if (!refcount_inc_not_zero(&nbd->config_refs)) {
387                 cmd->status = BLK_STS_TIMEOUT;
388                 mutex_unlock(&cmd->lock);
389                 goto done;
390         }
391         config = nbd->config;
392
393         if (config->num_connections > 1 ||
394             (config->num_connections == 1 && nbd->tag_set.timeout)) {
395                 dev_err_ratelimited(nbd_to_dev(nbd),
396                                     "Connection timed out, retrying (%d/%d alive)\n",
397                                     atomic_read(&config->live_connections),
398                                     config->num_connections);
399                 /*
400                  * Hooray we have more connections, requeue this IO, the submit
401                  * path will put it on a real connection. Or if only one
402                  * connection is configured, the submit path will wait util
403                  * a new connection is reconfigured or util dead timeout.
404                  */
405                 if (config->socks) {
406                         if (cmd->index < config->num_connections) {
407                                 struct nbd_sock *nsock =
408                                         config->socks[cmd->index];
409                                 mutex_lock(&nsock->tx_lock);
410                                 /* We can have multiple outstanding requests, so
411                                  * we don't want to mark the nsock dead if we've
412                                  * already reconnected with a new socket, so
413                                  * only mark it dead if its the same socket we
414                                  * were sent out on.
415                                  */
416                                 if (cmd->cookie == nsock->cookie)
417                                         nbd_mark_nsock_dead(nbd, nsock, 1);
418                                 mutex_unlock(&nsock->tx_lock);
419                         }
420                         mutex_unlock(&cmd->lock);
421                         nbd_requeue_cmd(cmd);
422                         nbd_config_put(nbd);
423                         return BLK_EH_DONE;
424                 }
425         }
426
427         if (!nbd->tag_set.timeout) {
428                 /*
429                  * Userspace sets timeout=0 to disable socket disconnection,
430                  * so just warn and reset the timer.
431                  */
432                 struct nbd_sock *nsock = config->socks[cmd->index];
433                 cmd->retries++;
434                 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
435                         req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
436                         (unsigned long long)blk_rq_pos(req) << 9,
437                         blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
438
439                 mutex_lock(&nsock->tx_lock);
440                 if (cmd->cookie != nsock->cookie) {
441                         nbd_requeue_cmd(cmd);
442                         mutex_unlock(&nsock->tx_lock);
443                         mutex_unlock(&cmd->lock);
444                         nbd_config_put(nbd);
445                         return BLK_EH_DONE;
446                 }
447                 mutex_unlock(&nsock->tx_lock);
448                 mutex_unlock(&cmd->lock);
449                 nbd_config_put(nbd);
450                 return BLK_EH_RESET_TIMER;
451         }
452
453         dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
454         set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
455         cmd->status = BLK_STS_IOERR;
456         mutex_unlock(&cmd->lock);
457         sock_shutdown(nbd);
458         nbd_config_put(nbd);
459 done:
460         blk_mq_complete_request(req);
461         return BLK_EH_DONE;
462 }
463
464 /*
465  *  Send or receive packet.
466  */
467 static int sock_xmit(struct nbd_device *nbd, int index, int send,
468                      struct iov_iter *iter, int msg_flags, int *sent)
469 {
470         struct nbd_config *config = nbd->config;
471         struct socket *sock = config->socks[index]->sock;
472         int result;
473         struct msghdr msg;
474         unsigned int noreclaim_flag;
475
476         if (unlikely(!sock)) {
477                 dev_err_ratelimited(disk_to_dev(nbd->disk),
478                         "Attempted %s on closed socket in sock_xmit\n",
479                         (send ? "send" : "recv"));
480                 return -EINVAL;
481         }
482
483         msg.msg_iter = *iter;
484
485         noreclaim_flag = memalloc_noreclaim_save();
486         do {
487                 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
488                 msg.msg_name = NULL;
489                 msg.msg_namelen = 0;
490                 msg.msg_control = NULL;
491                 msg.msg_controllen = 0;
492                 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
493
494                 if (send)
495                         result = sock_sendmsg(sock, &msg);
496                 else
497                         result = sock_recvmsg(sock, &msg, msg.msg_flags);
498
499                 if (result <= 0) {
500                         if (result == 0)
501                                 result = -EPIPE; /* short read */
502                         break;
503                 }
504                 if (sent)
505                         *sent += result;
506         } while (msg_data_left(&msg));
507
508         memalloc_noreclaim_restore(noreclaim_flag);
509
510         return result;
511 }
512
513 /*
514  * Different settings for sk->sk_sndtimeo can result in different return values
515  * if there is a signal pending when we enter sendmsg, because reasons?
516  */
517 static inline int was_interrupted(int result)
518 {
519         return result == -ERESTARTSYS || result == -EINTR;
520 }
521
522 /* always call with the tx_lock held */
523 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
524 {
525         struct request *req = blk_mq_rq_from_pdu(cmd);
526         struct nbd_config *config = nbd->config;
527         struct nbd_sock *nsock = config->socks[index];
528         int result;
529         struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
530         struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
531         struct iov_iter from;
532         unsigned long size = blk_rq_bytes(req);
533         struct bio *bio;
534         u64 handle;
535         u32 type;
536         u32 nbd_cmd_flags = 0;
537         int sent = nsock->sent, skip = 0;
538
539         iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
540
541         type = req_to_nbd_cmd_type(req);
542         if (type == U32_MAX)
543                 return -EIO;
544
545         if (rq_data_dir(req) == WRITE &&
546             (config->flags & NBD_FLAG_READ_ONLY)) {
547                 dev_err_ratelimited(disk_to_dev(nbd->disk),
548                                     "Write on read-only\n");
549                 return -EIO;
550         }
551
552         if (req->cmd_flags & REQ_FUA)
553                 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
554
555         /* We did a partial send previously, and we at least sent the whole
556          * request struct, so just go and send the rest of the pages in the
557          * request.
558          */
559         if (sent) {
560                 if (sent >= sizeof(request)) {
561                         skip = sent - sizeof(request);
562
563                         /* initialize handle for tracing purposes */
564                         handle = nbd_cmd_handle(cmd);
565
566                         goto send_pages;
567                 }
568                 iov_iter_advance(&from, sent);
569         } else {
570                 cmd->cmd_cookie++;
571         }
572         cmd->index = index;
573         cmd->cookie = nsock->cookie;
574         cmd->retries = 0;
575         request.type = htonl(type | nbd_cmd_flags);
576         if (type != NBD_CMD_FLUSH) {
577                 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
578                 request.len = htonl(size);
579         }
580         handle = nbd_cmd_handle(cmd);
581         memcpy(request.handle, &handle, sizeof(handle));
582
583         trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
584
585         dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
586                 req, nbdcmd_to_ascii(type),
587                 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
588         result = sock_xmit(nbd, index, 1, &from,
589                         (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
590         trace_nbd_header_sent(req, handle);
591         if (result <= 0) {
592                 if (was_interrupted(result)) {
593                         /* If we havne't sent anything we can just return BUSY,
594                          * however if we have sent something we need to make
595                          * sure we only allow this req to be sent until we are
596                          * completely done.
597                          */
598                         if (sent) {
599                                 nsock->pending = req;
600                                 nsock->sent = sent;
601                         }
602                         set_bit(NBD_CMD_REQUEUED, &cmd->flags);
603                         return BLK_STS_RESOURCE;
604                 }
605                 dev_err_ratelimited(disk_to_dev(nbd->disk),
606                         "Send control failed (result %d)\n", result);
607                 return -EAGAIN;
608         }
609 send_pages:
610         if (type != NBD_CMD_WRITE)
611                 goto out;
612
613         bio = req->bio;
614         while (bio) {
615                 struct bio *next = bio->bi_next;
616                 struct bvec_iter iter;
617                 struct bio_vec bvec;
618
619                 bio_for_each_segment(bvec, bio, iter) {
620                         bool is_last = !next && bio_iter_last(bvec, iter);
621                         int flags = is_last ? 0 : MSG_MORE;
622
623                         dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
624                                 req, bvec.bv_len);
625                         iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
626                         if (skip) {
627                                 if (skip >= iov_iter_count(&from)) {
628                                         skip -= iov_iter_count(&from);
629                                         continue;
630                                 }
631                                 iov_iter_advance(&from, skip);
632                                 skip = 0;
633                         }
634                         result = sock_xmit(nbd, index, 1, &from, flags, &sent);
635                         if (result <= 0) {
636                                 if (was_interrupted(result)) {
637                                         /* We've already sent the header, we
638                                          * have no choice but to set pending and
639                                          * return BUSY.
640                                          */
641                                         nsock->pending = req;
642                                         nsock->sent = sent;
643                                         set_bit(NBD_CMD_REQUEUED, &cmd->flags);
644                                         return BLK_STS_RESOURCE;
645                                 }
646                                 dev_err(disk_to_dev(nbd->disk),
647                                         "Send data failed (result %d)\n",
648                                         result);
649                                 return -EAGAIN;
650                         }
651                         /*
652                          * The completion might already have come in,
653                          * so break for the last one instead of letting
654                          * the iterator do it. This prevents use-after-free
655                          * of the bio.
656                          */
657                         if (is_last)
658                                 break;
659                 }
660                 bio = next;
661         }
662 out:
663         trace_nbd_payload_sent(req, handle);
664         nsock->pending = NULL;
665         nsock->sent = 0;
666         return 0;
667 }
668
669 /* NULL returned = something went wrong, inform userspace */
670 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
671 {
672         struct nbd_config *config = nbd->config;
673         int result;
674         struct nbd_reply reply;
675         struct nbd_cmd *cmd;
676         struct request *req = NULL;
677         u64 handle;
678         u16 hwq;
679         u32 tag;
680         struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
681         struct iov_iter to;
682         int ret = 0;
683
684         reply.magic = 0;
685         iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
686         result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
687         if (result <= 0) {
688                 if (!nbd_disconnected(config))
689                         dev_err(disk_to_dev(nbd->disk),
690                                 "Receive control failed (result %d)\n", result);
691                 return ERR_PTR(result);
692         }
693
694         if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
695                 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
696                                 (unsigned long)ntohl(reply.magic));
697                 return ERR_PTR(-EPROTO);
698         }
699
700         memcpy(&handle, reply.handle, sizeof(handle));
701         tag = nbd_handle_to_tag(handle);
702         hwq = blk_mq_unique_tag_to_hwq(tag);
703         if (hwq < nbd->tag_set.nr_hw_queues)
704                 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
705                                        blk_mq_unique_tag_to_tag(tag));
706         if (!req || !blk_mq_request_started(req)) {
707                 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
708                         tag, req);
709                 return ERR_PTR(-ENOENT);
710         }
711         trace_nbd_header_received(req, handle);
712         cmd = blk_mq_rq_to_pdu(req);
713
714         mutex_lock(&cmd->lock);
715         if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
716                 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
717                         req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
718                 ret = -ENOENT;
719                 goto out;
720         }
721         if (cmd->status != BLK_STS_OK) {
722                 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
723                         req);
724                 ret = -ENOENT;
725                 goto out;
726         }
727         if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
728                 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
729                         req);
730                 ret = -ENOENT;
731                 goto out;
732         }
733         if (ntohl(reply.error)) {
734                 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
735                         ntohl(reply.error));
736                 cmd->status = BLK_STS_IOERR;
737                 goto out;
738         }
739
740         dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
741         if (rq_data_dir(req) != WRITE) {
742                 struct req_iterator iter;
743                 struct bio_vec bvec;
744
745                 rq_for_each_segment(bvec, req, iter) {
746                         iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
747                         result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
748                         if (result <= 0) {
749                                 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
750                                         result);
751                                 /*
752                                  * If we've disconnected, we need to make sure we
753                                  * complete this request, otherwise error out
754                                  * and let the timeout stuff handle resubmitting
755                                  * this request onto another connection.
756                                  */
757                                 if (nbd_disconnected(config)) {
758                                         cmd->status = BLK_STS_IOERR;
759                                         goto out;
760                                 }
761                                 ret = -EIO;
762                                 goto out;
763                         }
764                         dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
765                                 req, bvec.bv_len);
766                 }
767         }
768 out:
769         trace_nbd_payload_received(req, handle);
770         mutex_unlock(&cmd->lock);
771         return ret ? ERR_PTR(ret) : cmd;
772 }
773
774 static void recv_work(struct work_struct *work)
775 {
776         struct recv_thread_args *args = container_of(work,
777                                                      struct recv_thread_args,
778                                                      work);
779         struct nbd_device *nbd = args->nbd;
780         struct nbd_config *config = nbd->config;
781         struct nbd_cmd *cmd;
782         struct request *rq;
783
784         while (1) {
785                 cmd = nbd_read_stat(nbd, args->index);
786                 if (IS_ERR(cmd)) {
787                         struct nbd_sock *nsock = config->socks[args->index];
788
789                         mutex_lock(&nsock->tx_lock);
790                         nbd_mark_nsock_dead(nbd, nsock, 1);
791                         mutex_unlock(&nsock->tx_lock);
792                         break;
793                 }
794
795                 rq = blk_mq_rq_from_pdu(cmd);
796                 if (likely(!blk_should_fake_timeout(rq->q)))
797                         blk_mq_complete_request(rq);
798         }
799         nbd_config_put(nbd);
800         atomic_dec(&config->recv_threads);
801         wake_up(&config->recv_wq);
802         kfree(args);
803 }
804
805 static bool nbd_clear_req(struct request *req, void *data, bool reserved)
806 {
807         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
808
809         mutex_lock(&cmd->lock);
810         cmd->status = BLK_STS_IOERR;
811         mutex_unlock(&cmd->lock);
812
813         blk_mq_complete_request(req);
814         return true;
815 }
816
817 static void nbd_clear_que(struct nbd_device *nbd)
818 {
819         blk_mq_quiesce_queue(nbd->disk->queue);
820         blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
821         blk_mq_unquiesce_queue(nbd->disk->queue);
822         dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
823 }
824
825 static int find_fallback(struct nbd_device *nbd, int index)
826 {
827         struct nbd_config *config = nbd->config;
828         int new_index = -1;
829         struct nbd_sock *nsock = config->socks[index];
830         int fallback = nsock->fallback_index;
831
832         if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
833                 return new_index;
834
835         if (config->num_connections <= 1) {
836                 dev_err_ratelimited(disk_to_dev(nbd->disk),
837                                     "Dead connection, failed to find a fallback\n");
838                 return new_index;
839         }
840
841         if (fallback >= 0 && fallback < config->num_connections &&
842             !config->socks[fallback]->dead)
843                 return fallback;
844
845         if (nsock->fallback_index < 0 ||
846             nsock->fallback_index >= config->num_connections ||
847             config->socks[nsock->fallback_index]->dead) {
848                 int i;
849                 for (i = 0; i < config->num_connections; i++) {
850                         if (i == index)
851                                 continue;
852                         if (!config->socks[i]->dead) {
853                                 new_index = i;
854                                 break;
855                         }
856                 }
857                 nsock->fallback_index = new_index;
858                 if (new_index < 0) {
859                         dev_err_ratelimited(disk_to_dev(nbd->disk),
860                                             "Dead connection, failed to find a fallback\n");
861                         return new_index;
862                 }
863         }
864         new_index = nsock->fallback_index;
865         return new_index;
866 }
867
868 static int wait_for_reconnect(struct nbd_device *nbd)
869 {
870         struct nbd_config *config = nbd->config;
871         if (!config->dead_conn_timeout)
872                 return 0;
873         if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
874                 return 0;
875         return wait_event_timeout(config->conn_wait,
876                                   atomic_read(&config->live_connections) > 0,
877                                   config->dead_conn_timeout) > 0;
878 }
879
880 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
881 {
882         struct request *req = blk_mq_rq_from_pdu(cmd);
883         struct nbd_device *nbd = cmd->nbd;
884         struct nbd_config *config;
885         struct nbd_sock *nsock;
886         int ret;
887
888         if (!refcount_inc_not_zero(&nbd->config_refs)) {
889                 dev_err_ratelimited(disk_to_dev(nbd->disk),
890                                     "Socks array is empty\n");
891                 blk_mq_start_request(req);
892                 return -EINVAL;
893         }
894         config = nbd->config;
895
896         if (index >= config->num_connections) {
897                 dev_err_ratelimited(disk_to_dev(nbd->disk),
898                                     "Attempted send on invalid socket\n");
899                 nbd_config_put(nbd);
900                 blk_mq_start_request(req);
901                 return -EINVAL;
902         }
903         cmd->status = BLK_STS_OK;
904 again:
905         nsock = config->socks[index];
906         mutex_lock(&nsock->tx_lock);
907         if (nsock->dead) {
908                 int old_index = index;
909                 index = find_fallback(nbd, index);
910                 mutex_unlock(&nsock->tx_lock);
911                 if (index < 0) {
912                         if (wait_for_reconnect(nbd)) {
913                                 index = old_index;
914                                 goto again;
915                         }
916                         /* All the sockets should already be down at this point,
917                          * we just want to make sure that DISCONNECTED is set so
918                          * any requests that come in that were queue'ed waiting
919                          * for the reconnect timer don't trigger the timer again
920                          * and instead just error out.
921                          */
922                         sock_shutdown(nbd);
923                         nbd_config_put(nbd);
924                         blk_mq_start_request(req);
925                         return -EIO;
926                 }
927                 goto again;
928         }
929
930         /* Handle the case that we have a pending request that was partially
931          * transmitted that _has_ to be serviced first.  We need to call requeue
932          * here so that it gets put _after_ the request that is already on the
933          * dispatch list.
934          */
935         blk_mq_start_request(req);
936         if (unlikely(nsock->pending && nsock->pending != req)) {
937                 nbd_requeue_cmd(cmd);
938                 ret = 0;
939                 goto out;
940         }
941         /*
942          * Some failures are related to the link going down, so anything that
943          * returns EAGAIN can be retried on a different socket.
944          */
945         ret = nbd_send_cmd(nbd, cmd, index);
946         if (ret == -EAGAIN) {
947                 dev_err_ratelimited(disk_to_dev(nbd->disk),
948                                     "Request send failed, requeueing\n");
949                 nbd_mark_nsock_dead(nbd, nsock, 1);
950                 nbd_requeue_cmd(cmd);
951                 ret = 0;
952         }
953 out:
954         mutex_unlock(&nsock->tx_lock);
955         nbd_config_put(nbd);
956         return ret;
957 }
958
959 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
960                         const struct blk_mq_queue_data *bd)
961 {
962         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
963         int ret;
964
965         /*
966          * Since we look at the bio's to send the request over the network we
967          * need to make sure the completion work doesn't mark this request done
968          * before we are done doing our send.  This keeps us from dereferencing
969          * freed data if we have particularly fast completions (ie we get the
970          * completion before we exit sock_xmit on the last bvec) or in the case
971          * that the server is misbehaving (or there was an error) before we're
972          * done sending everything over the wire.
973          */
974         mutex_lock(&cmd->lock);
975         clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
976
977         /* We can be called directly from the user space process, which means we
978          * could possibly have signals pending so our sendmsg will fail.  In
979          * this case we need to return that we are busy, otherwise error out as
980          * appropriate.
981          */
982         ret = nbd_handle_cmd(cmd, hctx->queue_num);
983         if (ret < 0)
984                 ret = BLK_STS_IOERR;
985         else if (!ret)
986                 ret = BLK_STS_OK;
987         mutex_unlock(&cmd->lock);
988
989         return ret;
990 }
991
992 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
993                                      int *err)
994 {
995         struct socket *sock;
996
997         *err = 0;
998         sock = sockfd_lookup(fd, err);
999         if (!sock)
1000                 return NULL;
1001
1002         if (sock->ops->shutdown == sock_no_shutdown) {
1003                 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1004                 *err = -EINVAL;
1005                 sockfd_put(sock);
1006                 return NULL;
1007         }
1008
1009         return sock;
1010 }
1011
1012 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1013                           bool netlink)
1014 {
1015         struct nbd_config *config = nbd->config;
1016         struct socket *sock;
1017         struct nbd_sock **socks;
1018         struct nbd_sock *nsock;
1019         int err;
1020
1021         sock = nbd_get_socket(nbd, arg, &err);
1022         if (!sock)
1023                 return err;
1024
1025         if (!netlink && !nbd->task_setup &&
1026             !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1027                 nbd->task_setup = current;
1028
1029         if (!netlink &&
1030             (nbd->task_setup != current ||
1031              test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1032                 dev_err(disk_to_dev(nbd->disk),
1033                         "Device being setup by another task");
1034                 err = -EBUSY;
1035                 goto put_socket;
1036         }
1037
1038         nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1039         if (!nsock) {
1040                 err = -ENOMEM;
1041                 goto put_socket;
1042         }
1043
1044         socks = krealloc(config->socks, (config->num_connections + 1) *
1045                          sizeof(struct nbd_sock *), GFP_KERNEL);
1046         if (!socks) {
1047                 kfree(nsock);
1048                 err = -ENOMEM;
1049                 goto put_socket;
1050         }
1051
1052         config->socks = socks;
1053
1054         nsock->fallback_index = -1;
1055         nsock->dead = false;
1056         mutex_init(&nsock->tx_lock);
1057         nsock->sock = sock;
1058         nsock->pending = NULL;
1059         nsock->sent = 0;
1060         nsock->cookie = 0;
1061         socks[config->num_connections++] = nsock;
1062         atomic_inc(&config->live_connections);
1063
1064         return 0;
1065
1066 put_socket:
1067         sockfd_put(sock);
1068         return err;
1069 }
1070
1071 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1072 {
1073         struct nbd_config *config = nbd->config;
1074         struct socket *sock, *old;
1075         struct recv_thread_args *args;
1076         int i;
1077         int err;
1078
1079         sock = nbd_get_socket(nbd, arg, &err);
1080         if (!sock)
1081                 return err;
1082
1083         args = kzalloc(sizeof(*args), GFP_KERNEL);
1084         if (!args) {
1085                 sockfd_put(sock);
1086                 return -ENOMEM;
1087         }
1088
1089         for (i = 0; i < config->num_connections; i++) {
1090                 struct nbd_sock *nsock = config->socks[i];
1091
1092                 if (!nsock->dead)
1093                         continue;
1094
1095                 mutex_lock(&nsock->tx_lock);
1096                 if (!nsock->dead) {
1097                         mutex_unlock(&nsock->tx_lock);
1098                         continue;
1099                 }
1100                 sk_set_memalloc(sock->sk);
1101                 if (nbd->tag_set.timeout)
1102                         sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1103                 atomic_inc(&config->recv_threads);
1104                 refcount_inc(&nbd->config_refs);
1105                 old = nsock->sock;
1106                 nsock->fallback_index = -1;
1107                 nsock->sock = sock;
1108                 nsock->dead = false;
1109                 INIT_WORK(&args->work, recv_work);
1110                 args->index = i;
1111                 args->nbd = nbd;
1112                 nsock->cookie++;
1113                 mutex_unlock(&nsock->tx_lock);
1114                 sockfd_put(old);
1115
1116                 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1117
1118                 /* We take the tx_mutex in an error path in the recv_work, so we
1119                  * need to queue_work outside of the tx_mutex.
1120                  */
1121                 queue_work(nbd->recv_workq, &args->work);
1122
1123                 atomic_inc(&config->live_connections);
1124                 wake_up(&config->conn_wait);
1125                 return 0;
1126         }
1127         sockfd_put(sock);
1128         kfree(args);
1129         return -ENOSPC;
1130 }
1131
1132 static void nbd_bdev_reset(struct block_device *bdev)
1133 {
1134         if (bdev->bd_openers > 1)
1135                 return;
1136         set_capacity(bdev->bd_disk, 0);
1137 }
1138
1139 static void nbd_parse_flags(struct nbd_device *nbd)
1140 {
1141         struct nbd_config *config = nbd->config;
1142         if (config->flags & NBD_FLAG_READ_ONLY)
1143                 set_disk_ro(nbd->disk, true);
1144         else
1145                 set_disk_ro(nbd->disk, false);
1146         if (config->flags & NBD_FLAG_SEND_TRIM)
1147                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1148         if (config->flags & NBD_FLAG_SEND_FLUSH) {
1149                 if (config->flags & NBD_FLAG_SEND_FUA)
1150                         blk_queue_write_cache(nbd->disk->queue, true, true);
1151                 else
1152                         blk_queue_write_cache(nbd->disk->queue, true, false);
1153         }
1154         else
1155                 blk_queue_write_cache(nbd->disk->queue, false, false);
1156 }
1157
1158 static void send_disconnects(struct nbd_device *nbd)
1159 {
1160         struct nbd_config *config = nbd->config;
1161         struct nbd_request request = {
1162                 .magic = htonl(NBD_REQUEST_MAGIC),
1163                 .type = htonl(NBD_CMD_DISC),
1164         };
1165         struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1166         struct iov_iter from;
1167         int i, ret;
1168
1169         for (i = 0; i < config->num_connections; i++) {
1170                 struct nbd_sock *nsock = config->socks[i];
1171
1172                 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
1173                 mutex_lock(&nsock->tx_lock);
1174                 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1175                 if (ret <= 0)
1176                         dev_err(disk_to_dev(nbd->disk),
1177                                 "Send disconnect failed %d\n", ret);
1178                 mutex_unlock(&nsock->tx_lock);
1179         }
1180 }
1181
1182 static int nbd_disconnect(struct nbd_device *nbd)
1183 {
1184         struct nbd_config *config = nbd->config;
1185
1186         dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1187         set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1188         set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1189         send_disconnects(nbd);
1190         return 0;
1191 }
1192
1193 static void nbd_clear_sock(struct nbd_device *nbd)
1194 {
1195         sock_shutdown(nbd);
1196         nbd_clear_que(nbd);
1197         nbd->task_setup = NULL;
1198 }
1199
1200 static void nbd_config_put(struct nbd_device *nbd)
1201 {
1202         if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1203                                         &nbd->config_lock)) {
1204                 struct nbd_config *config = nbd->config;
1205                 nbd_dev_dbg_close(nbd);
1206                 nbd_size_clear(nbd);
1207                 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1208                                        &config->runtime_flags))
1209                         device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1210                 nbd->task_recv = NULL;
1211                 nbd_clear_sock(nbd);
1212                 if (config->num_connections) {
1213                         int i;
1214                         for (i = 0; i < config->num_connections; i++) {
1215                                 sockfd_put(config->socks[i]->sock);
1216                                 kfree(config->socks[i]);
1217                         }
1218                         kfree(config->socks);
1219                 }
1220                 kfree(nbd->config);
1221                 nbd->config = NULL;
1222
1223                 if (nbd->recv_workq)
1224                         destroy_workqueue(nbd->recv_workq);
1225                 nbd->recv_workq = NULL;
1226
1227                 nbd->tag_set.timeout = 0;
1228                 nbd->disk->queue->limits.discard_granularity = 0;
1229                 nbd->disk->queue->limits.discard_alignment = 0;
1230                 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1231                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1232
1233                 mutex_unlock(&nbd->config_lock);
1234                 nbd_put(nbd);
1235                 module_put(THIS_MODULE);
1236         }
1237 }
1238
1239 static int nbd_start_device(struct nbd_device *nbd)
1240 {
1241         struct nbd_config *config = nbd->config;
1242         int num_connections = config->num_connections;
1243         int error = 0, i;
1244
1245         if (nbd->task_recv)
1246                 return -EBUSY;
1247         if (!config->socks)
1248                 return -EINVAL;
1249         if (num_connections > 1 &&
1250             !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1251                 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1252                 return -EINVAL;
1253         }
1254
1255         nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1256                                           WQ_MEM_RECLAIM | WQ_HIGHPRI |
1257                                           WQ_UNBOUND, 0, nbd->index);
1258         if (!nbd->recv_workq) {
1259                 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1260                 return -ENOMEM;
1261         }
1262
1263         blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1264         nbd->task_recv = current;
1265
1266         nbd_parse_flags(nbd);
1267
1268         error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1269         if (error) {
1270                 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1271                 return error;
1272         }
1273         set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1274
1275         nbd_dev_dbg_init(nbd);
1276         for (i = 0; i < num_connections; i++) {
1277                 struct recv_thread_args *args;
1278
1279                 args = kzalloc(sizeof(*args), GFP_KERNEL);
1280                 if (!args) {
1281                         sock_shutdown(nbd);
1282                         /*
1283                          * If num_connections is m (2 < m),
1284                          * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1285                          * But NO.(n + 1) failed. We still have n recv threads.
1286                          * So, add flush_workqueue here to prevent recv threads
1287                          * dropping the last config_refs and trying to destroy
1288                          * the workqueue from inside the workqueue.
1289                          */
1290                         if (i)
1291                                 flush_workqueue(nbd->recv_workq);
1292                         return -ENOMEM;
1293                 }
1294                 sk_set_memalloc(config->socks[i]->sock->sk);
1295                 if (nbd->tag_set.timeout)
1296                         config->socks[i]->sock->sk->sk_sndtimeo =
1297                                 nbd->tag_set.timeout;
1298                 atomic_inc(&config->recv_threads);
1299                 refcount_inc(&nbd->config_refs);
1300                 INIT_WORK(&args->work, recv_work);
1301                 args->nbd = nbd;
1302                 args->index = i;
1303                 queue_work(nbd->recv_workq, &args->work);
1304         }
1305         return nbd_set_size(nbd, config->bytesize, config->blksize);
1306 }
1307
1308 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1309 {
1310         struct nbd_config *config = nbd->config;
1311         int ret;
1312
1313         ret = nbd_start_device(nbd);
1314         if (ret)
1315                 return ret;
1316
1317         if (max_part)
1318                 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1319         mutex_unlock(&nbd->config_lock);
1320         ret = wait_event_interruptible(config->recv_wq,
1321                                          atomic_read(&config->recv_threads) == 0);
1322         if (ret)
1323                 sock_shutdown(nbd);
1324         flush_workqueue(nbd->recv_workq);
1325
1326         mutex_lock(&nbd->config_lock);
1327         nbd_bdev_reset(bdev);
1328         /* user requested, ignore socket errors */
1329         if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1330                 ret = 0;
1331         if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1332                 ret = -ETIMEDOUT;
1333         return ret;
1334 }
1335
1336 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1337                                  struct block_device *bdev)
1338 {
1339         sock_shutdown(nbd);
1340         __invalidate_device(bdev, true);
1341         nbd_bdev_reset(bdev);
1342         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1343                                &nbd->config->runtime_flags))
1344                 nbd_config_put(nbd);
1345 }
1346
1347 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1348 {
1349         nbd->tag_set.timeout = timeout * HZ;
1350         if (timeout)
1351                 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1352         else
1353                 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1354 }
1355
1356 /* Must be called with config_lock held */
1357 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1358                        unsigned int cmd, unsigned long arg)
1359 {
1360         struct nbd_config *config = nbd->config;
1361
1362         switch (cmd) {
1363         case NBD_DISCONNECT:
1364                 return nbd_disconnect(nbd);
1365         case NBD_CLEAR_SOCK:
1366                 nbd_clear_sock_ioctl(nbd, bdev);
1367                 return 0;
1368         case NBD_SET_SOCK:
1369                 return nbd_add_socket(nbd, arg, false);
1370         case NBD_SET_BLKSIZE:
1371                 return nbd_set_size(nbd, config->bytesize, arg);
1372         case NBD_SET_SIZE:
1373                 return nbd_set_size(nbd, arg, config->blksize);
1374         case NBD_SET_SIZE_BLOCKS:
1375                 return nbd_set_size(nbd, arg * config->blksize,
1376                                     config->blksize);
1377         case NBD_SET_TIMEOUT:
1378                 nbd_set_cmd_timeout(nbd, arg);
1379                 return 0;
1380
1381         case NBD_SET_FLAGS:
1382                 config->flags = arg;
1383                 return 0;
1384         case NBD_DO_IT:
1385                 return nbd_start_device_ioctl(nbd, bdev);
1386         case NBD_CLEAR_QUE:
1387                 /*
1388                  * This is for compatibility only.  The queue is always cleared
1389                  * by NBD_DO_IT or NBD_CLEAR_SOCK.
1390                  */
1391                 return 0;
1392         case NBD_PRINT_DEBUG:
1393                 /*
1394                  * For compatibility only, we no longer keep a list of
1395                  * outstanding requests.
1396                  */
1397                 return 0;
1398         }
1399         return -ENOTTY;
1400 }
1401
1402 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1403                      unsigned int cmd, unsigned long arg)
1404 {
1405         struct nbd_device *nbd = bdev->bd_disk->private_data;
1406         struct nbd_config *config = nbd->config;
1407         int error = -EINVAL;
1408
1409         if (!capable(CAP_SYS_ADMIN))
1410                 return -EPERM;
1411
1412         /* The block layer will pass back some non-nbd ioctls in case we have
1413          * special handling for them, but we don't so just return an error.
1414          */
1415         if (_IOC_TYPE(cmd) != 0xab)
1416                 return -EINVAL;
1417
1418         mutex_lock(&nbd->config_lock);
1419
1420         /* Don't allow ioctl operations on a nbd device that was created with
1421          * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1422          */
1423         if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1424             (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1425                 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1426         else
1427                 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1428         mutex_unlock(&nbd->config_lock);
1429         return error;
1430 }
1431
1432 static struct nbd_config *nbd_alloc_config(void)
1433 {
1434         struct nbd_config *config;
1435
1436         config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1437         if (!config)
1438                 return NULL;
1439         atomic_set(&config->recv_threads, 0);
1440         init_waitqueue_head(&config->recv_wq);
1441         init_waitqueue_head(&config->conn_wait);
1442         config->blksize = NBD_DEF_BLKSIZE;
1443         atomic_set(&config->live_connections, 0);
1444         try_module_get(THIS_MODULE);
1445         return config;
1446 }
1447
1448 static int nbd_open(struct block_device *bdev, fmode_t mode)
1449 {
1450         struct nbd_device *nbd;
1451         int ret = 0;
1452
1453         mutex_lock(&nbd_index_mutex);
1454         nbd = bdev->bd_disk->private_data;
1455         if (!nbd) {
1456                 ret = -ENXIO;
1457                 goto out;
1458         }
1459         if (!refcount_inc_not_zero(&nbd->refs)) {
1460                 ret = -ENXIO;
1461                 goto out;
1462         }
1463         if (!refcount_inc_not_zero(&nbd->config_refs)) {
1464                 struct nbd_config *config;
1465
1466                 mutex_lock(&nbd->config_lock);
1467                 if (refcount_inc_not_zero(&nbd->config_refs)) {
1468                         mutex_unlock(&nbd->config_lock);
1469                         goto out;
1470                 }
1471                 config = nbd->config = nbd_alloc_config();
1472                 if (!config) {
1473                         ret = -ENOMEM;
1474                         mutex_unlock(&nbd->config_lock);
1475                         goto out;
1476                 }
1477                 refcount_set(&nbd->config_refs, 1);
1478                 refcount_inc(&nbd->refs);
1479                 mutex_unlock(&nbd->config_lock);
1480                 if (max_part)
1481                         set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1482         } else if (nbd_disconnected(nbd->config)) {
1483                 if (max_part)
1484                         set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1485         }
1486 out:
1487         mutex_unlock(&nbd_index_mutex);
1488         return ret;
1489 }
1490
1491 static void nbd_release(struct gendisk *disk, fmode_t mode)
1492 {
1493         struct nbd_device *nbd = disk->private_data;
1494
1495         if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1496                         disk->part0->bd_openers == 0)
1497                 nbd_disconnect_and_put(nbd);
1498
1499         nbd_config_put(nbd);
1500         nbd_put(nbd);
1501 }
1502
1503 static const struct block_device_operations nbd_fops =
1504 {
1505         .owner =        THIS_MODULE,
1506         .open =         nbd_open,
1507         .release =      nbd_release,
1508         .ioctl =        nbd_ioctl,
1509         .compat_ioctl = nbd_ioctl,
1510 };
1511
1512 #if IS_ENABLED(CONFIG_DEBUG_FS)
1513
1514 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1515 {
1516         struct nbd_device *nbd = s->private;
1517
1518         if (nbd->task_recv)
1519                 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1520
1521         return 0;
1522 }
1523
1524 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1525
1526 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1527 {
1528         struct nbd_device *nbd = s->private;
1529         u32 flags = nbd->config->flags;
1530
1531         seq_printf(s, "Hex: 0x%08x\n\n", flags);
1532
1533         seq_puts(s, "Known flags:\n");
1534
1535         if (flags & NBD_FLAG_HAS_FLAGS)
1536                 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1537         if (flags & NBD_FLAG_READ_ONLY)
1538                 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1539         if (flags & NBD_FLAG_SEND_FLUSH)
1540                 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1541         if (flags & NBD_FLAG_SEND_FUA)
1542                 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1543         if (flags & NBD_FLAG_SEND_TRIM)
1544                 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1545
1546         return 0;
1547 }
1548
1549 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1550
1551 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1552 {
1553         struct dentry *dir;
1554         struct nbd_config *config = nbd->config;
1555
1556         if (!nbd_dbg_dir)
1557                 return -EIO;
1558
1559         dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1560         if (!dir) {
1561                 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1562                         nbd_name(nbd));
1563                 return -EIO;
1564         }
1565         config->dbg_dir = dir;
1566
1567         debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1568         debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1569         debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1570         debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1571         debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1572
1573         return 0;
1574 }
1575
1576 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1577 {
1578         debugfs_remove_recursive(nbd->config->dbg_dir);
1579 }
1580
1581 static int nbd_dbg_init(void)
1582 {
1583         struct dentry *dbg_dir;
1584
1585         dbg_dir = debugfs_create_dir("nbd", NULL);
1586         if (!dbg_dir)
1587                 return -EIO;
1588
1589         nbd_dbg_dir = dbg_dir;
1590
1591         return 0;
1592 }
1593
1594 static void nbd_dbg_close(void)
1595 {
1596         debugfs_remove_recursive(nbd_dbg_dir);
1597 }
1598
1599 #else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
1600
1601 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1602 {
1603         return 0;
1604 }
1605
1606 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1607 {
1608 }
1609
1610 static int nbd_dbg_init(void)
1611 {
1612         return 0;
1613 }
1614
1615 static void nbd_dbg_close(void)
1616 {
1617 }
1618
1619 #endif
1620
1621 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1622                             unsigned int hctx_idx, unsigned int numa_node)
1623 {
1624         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1625         cmd->nbd = set->driver_data;
1626         cmd->flags = 0;
1627         mutex_init(&cmd->lock);
1628         return 0;
1629 }
1630
1631 static const struct blk_mq_ops nbd_mq_ops = {
1632         .queue_rq       = nbd_queue_rq,
1633         .complete       = nbd_complete_rq,
1634         .init_request   = nbd_init_request,
1635         .timeout        = nbd_xmit_timeout,
1636 };
1637
1638 static int nbd_dev_add(int index)
1639 {
1640         struct nbd_device *nbd;
1641         struct gendisk *disk;
1642         struct request_queue *q;
1643         int err = -ENOMEM;
1644
1645         nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1646         if (!nbd)
1647                 goto out;
1648
1649         disk = alloc_disk(1 << part_shift);
1650         if (!disk)
1651                 goto out_free_nbd;
1652
1653         if (index >= 0) {
1654                 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1655                                 GFP_KERNEL);
1656                 if (err == -ENOSPC)
1657                         err = -EEXIST;
1658         } else {
1659                 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1660                 if (err >= 0)
1661                         index = err;
1662         }
1663         if (err < 0)
1664                 goto out_free_disk;
1665
1666         nbd->index = index;
1667         nbd->disk = disk;
1668         nbd->tag_set.ops = &nbd_mq_ops;
1669         nbd->tag_set.nr_hw_queues = 1;
1670         nbd->tag_set.queue_depth = 128;
1671         nbd->tag_set.numa_node = NUMA_NO_NODE;
1672         nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1673         nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1674                 BLK_MQ_F_BLOCKING;
1675         nbd->tag_set.driver_data = nbd;
1676         nbd->destroy_complete = NULL;
1677
1678         err = blk_mq_alloc_tag_set(&nbd->tag_set);
1679         if (err)
1680                 goto out_free_idr;
1681
1682         q = blk_mq_init_queue(&nbd->tag_set);
1683         if (IS_ERR(q)) {
1684                 err = PTR_ERR(q);
1685                 goto out_free_tags;
1686         }
1687         disk->queue = q;
1688
1689         /*
1690          * Tell the block layer that we are not a rotational device
1691          */
1692         blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1693         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1694         disk->queue->limits.discard_granularity = 0;
1695         disk->queue->limits.discard_alignment = 0;
1696         blk_queue_max_discard_sectors(disk->queue, 0);
1697         blk_queue_max_segment_size(disk->queue, UINT_MAX);
1698         blk_queue_max_segments(disk->queue, USHRT_MAX);
1699         blk_queue_max_hw_sectors(disk->queue, 65536);
1700         disk->queue->limits.max_sectors = 256;
1701
1702         mutex_init(&nbd->config_lock);
1703         refcount_set(&nbd->config_refs, 0);
1704         refcount_set(&nbd->refs, 1);
1705         INIT_LIST_HEAD(&nbd->list);
1706         disk->major = NBD_MAJOR;
1707         disk->first_minor = index << part_shift;
1708         disk->fops = &nbd_fops;
1709         disk->private_data = nbd;
1710         sprintf(disk->disk_name, "nbd%d", index);
1711         add_disk(disk);
1712         nbd_total_devices++;
1713         return index;
1714
1715 out_free_tags:
1716         blk_mq_free_tag_set(&nbd->tag_set);
1717 out_free_idr:
1718         idr_remove(&nbd_index_idr, index);
1719 out_free_disk:
1720         put_disk(disk);
1721 out_free_nbd:
1722         kfree(nbd);
1723 out:
1724         return err;
1725 }
1726
1727 static int find_free_cb(int id, void *ptr, void *data)
1728 {
1729         struct nbd_device *nbd = ptr;
1730         struct nbd_device **found = data;
1731
1732         if (!refcount_read(&nbd->config_refs)) {
1733                 *found = nbd;
1734                 return 1;
1735         }
1736         return 0;
1737 }
1738
1739 /* Netlink interface. */
1740 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1741         [NBD_ATTR_INDEX]                =       { .type = NLA_U32 },
1742         [NBD_ATTR_SIZE_BYTES]           =       { .type = NLA_U64 },
1743         [NBD_ATTR_BLOCK_SIZE_BYTES]     =       { .type = NLA_U64 },
1744         [NBD_ATTR_TIMEOUT]              =       { .type = NLA_U64 },
1745         [NBD_ATTR_SERVER_FLAGS]         =       { .type = NLA_U64 },
1746         [NBD_ATTR_CLIENT_FLAGS]         =       { .type = NLA_U64 },
1747         [NBD_ATTR_SOCKETS]              =       { .type = NLA_NESTED},
1748         [NBD_ATTR_DEAD_CONN_TIMEOUT]    =       { .type = NLA_U64 },
1749         [NBD_ATTR_DEVICE_LIST]          =       { .type = NLA_NESTED},
1750 };
1751
1752 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1753         [NBD_SOCK_FD]                   =       { .type = NLA_U32 },
1754 };
1755
1756 /* We don't use this right now since we don't parse the incoming list, but we
1757  * still want it here so userspace knows what to expect.
1758  */
1759 static const struct nla_policy __attribute__((unused))
1760 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1761         [NBD_DEVICE_INDEX]              =       { .type = NLA_U32 },
1762         [NBD_DEVICE_CONNECTED]          =       { .type = NLA_U8 },
1763 };
1764
1765 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1766 {
1767         struct nbd_config *config = nbd->config;
1768         u64 bsize = config->blksize;
1769         u64 bytes = config->bytesize;
1770
1771         if (info->attrs[NBD_ATTR_SIZE_BYTES])
1772                 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1773
1774         if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1775                 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1776
1777         if (bytes != config->bytesize || bsize != config->blksize)
1778                 return nbd_set_size(nbd, bytes, bsize);
1779         return 0;
1780 }
1781
1782 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1783 {
1784         DECLARE_COMPLETION_ONSTACK(destroy_complete);
1785         struct nbd_device *nbd = NULL;
1786         struct nbd_config *config;
1787         int index = -1;
1788         int ret;
1789         bool put_dev = false;
1790
1791         if (!netlink_capable(skb, CAP_SYS_ADMIN))
1792                 return -EPERM;
1793
1794         if (info->attrs[NBD_ATTR_INDEX])
1795                 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1796         if (!info->attrs[NBD_ATTR_SOCKETS]) {
1797                 printk(KERN_ERR "nbd: must specify at least one socket\n");
1798                 return -EINVAL;
1799         }
1800         if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1801                 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1802                 return -EINVAL;
1803         }
1804 again:
1805         mutex_lock(&nbd_index_mutex);
1806         if (index == -1) {
1807                 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1808                 if (ret == 0) {
1809                         int new_index;
1810                         new_index = nbd_dev_add(-1);
1811                         if (new_index < 0) {
1812                                 mutex_unlock(&nbd_index_mutex);
1813                                 printk(KERN_ERR "nbd: failed to add new device\n");
1814                                 return new_index;
1815                         }
1816                         nbd = idr_find(&nbd_index_idr, new_index);
1817                 }
1818         } else {
1819                 nbd = idr_find(&nbd_index_idr, index);
1820                 if (!nbd) {
1821                         ret = nbd_dev_add(index);
1822                         if (ret < 0) {
1823                                 mutex_unlock(&nbd_index_mutex);
1824                                 printk(KERN_ERR "nbd: failed to add new device\n");
1825                                 return ret;
1826                         }
1827                         nbd = idr_find(&nbd_index_idr, index);
1828                 }
1829         }
1830         if (!nbd) {
1831                 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1832                        index);
1833                 mutex_unlock(&nbd_index_mutex);
1834                 return -EINVAL;
1835         }
1836
1837         if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1838             test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
1839                 nbd->destroy_complete = &destroy_complete;
1840                 mutex_unlock(&nbd_index_mutex);
1841
1842                 /* Wait untill the the nbd stuff is totally destroyed */
1843                 wait_for_completion(&destroy_complete);
1844                 goto again;
1845         }
1846
1847         if (!refcount_inc_not_zero(&nbd->refs)) {
1848                 mutex_unlock(&nbd_index_mutex);
1849                 if (index == -1)
1850                         goto again;
1851                 printk(KERN_ERR "nbd: device at index %d is going down\n",
1852                        index);
1853                 return -EINVAL;
1854         }
1855         mutex_unlock(&nbd_index_mutex);
1856
1857         mutex_lock(&nbd->config_lock);
1858         if (refcount_read(&nbd->config_refs)) {
1859                 mutex_unlock(&nbd->config_lock);
1860                 nbd_put(nbd);
1861                 if (index == -1)
1862                         goto again;
1863                 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1864                 return -EBUSY;
1865         }
1866         if (WARN_ON(nbd->config)) {
1867                 mutex_unlock(&nbd->config_lock);
1868                 nbd_put(nbd);
1869                 return -EINVAL;
1870         }
1871         config = nbd->config = nbd_alloc_config();
1872         if (!nbd->config) {
1873                 mutex_unlock(&nbd->config_lock);
1874                 nbd_put(nbd);
1875                 printk(KERN_ERR "nbd: couldn't allocate config\n");
1876                 return -ENOMEM;
1877         }
1878         refcount_set(&nbd->config_refs, 1);
1879         set_bit(NBD_RT_BOUND, &config->runtime_flags);
1880
1881         ret = nbd_genl_size_set(info, nbd);
1882         if (ret)
1883                 goto out;
1884
1885         if (info->attrs[NBD_ATTR_TIMEOUT])
1886                 nbd_set_cmd_timeout(nbd,
1887                                     nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
1888         if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1889                 config->dead_conn_timeout =
1890                         nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1891                 config->dead_conn_timeout *= HZ;
1892         }
1893         if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1894                 config->flags =
1895                         nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1896         if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1897                 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1898                 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1899                         set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
1900                                 &config->runtime_flags);
1901                         set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
1902                         put_dev = true;
1903                 } else {
1904                         clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
1905                 }
1906                 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1907                         set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
1908                                 &config->runtime_flags);
1909                 }
1910         }
1911
1912         if (info->attrs[NBD_ATTR_SOCKETS]) {
1913                 struct nlattr *attr;
1914                 int rem, fd;
1915
1916                 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1917                                     rem) {
1918                         struct nlattr *socks[NBD_SOCK_MAX+1];
1919
1920                         if (nla_type(attr) != NBD_SOCK_ITEM) {
1921                                 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1922                                 ret = -EINVAL;
1923                                 goto out;
1924                         }
1925                         ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1926                                                           attr,
1927                                                           nbd_sock_policy,
1928                                                           info->extack);
1929                         if (ret != 0) {
1930                                 printk(KERN_ERR "nbd: error processing sock list\n");
1931                                 ret = -EINVAL;
1932                                 goto out;
1933                         }
1934                         if (!socks[NBD_SOCK_FD])
1935                                 continue;
1936                         fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1937                         ret = nbd_add_socket(nbd, fd, true);
1938                         if (ret)
1939                                 goto out;
1940                 }
1941         }
1942         ret = nbd_start_device(nbd);
1943 out:
1944         mutex_unlock(&nbd->config_lock);
1945         if (!ret) {
1946                 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
1947                 refcount_inc(&nbd->config_refs);
1948                 nbd_connect_reply(info, nbd->index);
1949         }
1950         nbd_config_put(nbd);
1951         if (put_dev)
1952                 nbd_put(nbd);
1953         return ret;
1954 }
1955
1956 static void nbd_disconnect_and_put(struct nbd_device *nbd)
1957 {
1958         mutex_lock(&nbd->config_lock);
1959         nbd_disconnect(nbd);
1960         nbd_clear_sock(nbd);
1961         mutex_unlock(&nbd->config_lock);
1962         /*
1963          * Make sure recv thread has finished, so it does not drop the last
1964          * config ref and try to destroy the workqueue from inside the work
1965          * queue.
1966          */
1967         flush_workqueue(nbd->recv_workq);
1968         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1969                                &nbd->config->runtime_flags))
1970                 nbd_config_put(nbd);
1971 }
1972
1973 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1974 {
1975         struct nbd_device *nbd;
1976         int index;
1977
1978         if (!netlink_capable(skb, CAP_SYS_ADMIN))
1979                 return -EPERM;
1980
1981         if (!info->attrs[NBD_ATTR_INDEX]) {
1982                 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1983                 return -EINVAL;
1984         }
1985         index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1986         mutex_lock(&nbd_index_mutex);
1987         nbd = idr_find(&nbd_index_idr, index);
1988         if (!nbd) {
1989                 mutex_unlock(&nbd_index_mutex);
1990                 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1991                        index);
1992                 return -EINVAL;
1993         }
1994         if (!refcount_inc_not_zero(&nbd->refs)) {
1995                 mutex_unlock(&nbd_index_mutex);
1996                 printk(KERN_ERR "nbd: device at index %d is going down\n",
1997                        index);
1998                 return -EINVAL;
1999         }
2000         mutex_unlock(&nbd_index_mutex);
2001         if (!refcount_inc_not_zero(&nbd->config_refs)) {
2002                 nbd_put(nbd);
2003                 return 0;
2004         }
2005         nbd_disconnect_and_put(nbd);
2006         nbd_config_put(nbd);
2007         nbd_put(nbd);
2008         return 0;
2009 }
2010
2011 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2012 {
2013         struct nbd_device *nbd = NULL;
2014         struct nbd_config *config;
2015         int index;
2016         int ret = 0;
2017         bool put_dev = false;
2018
2019         if (!netlink_capable(skb, CAP_SYS_ADMIN))
2020                 return -EPERM;
2021
2022         if (!info->attrs[NBD_ATTR_INDEX]) {
2023                 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
2024                 return -EINVAL;
2025         }
2026         index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2027         mutex_lock(&nbd_index_mutex);
2028         nbd = idr_find(&nbd_index_idr, index);
2029         if (!nbd) {
2030                 mutex_unlock(&nbd_index_mutex);
2031                 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
2032                        index);
2033                 return -EINVAL;
2034         }
2035         if (!refcount_inc_not_zero(&nbd->refs)) {
2036                 mutex_unlock(&nbd_index_mutex);
2037                 printk(KERN_ERR "nbd: device at index %d is going down\n",
2038                        index);
2039                 return -EINVAL;
2040         }
2041         mutex_unlock(&nbd_index_mutex);
2042
2043         if (!refcount_inc_not_zero(&nbd->config_refs)) {
2044                 dev_err(nbd_to_dev(nbd),
2045                         "not configured, cannot reconfigure\n");
2046                 nbd_put(nbd);
2047                 return -EINVAL;
2048         }
2049
2050         mutex_lock(&nbd->config_lock);
2051         config = nbd->config;
2052         if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2053             !nbd->task_recv) {
2054                 dev_err(nbd_to_dev(nbd),
2055                         "not configured, cannot reconfigure\n");
2056                 ret = -EINVAL;
2057                 goto out;
2058         }
2059
2060         ret = nbd_genl_size_set(info, nbd);
2061         if (ret)
2062                 goto out;
2063
2064         if (info->attrs[NBD_ATTR_TIMEOUT])
2065                 nbd_set_cmd_timeout(nbd,
2066                                     nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2067         if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2068                 config->dead_conn_timeout =
2069                         nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2070                 config->dead_conn_timeout *= HZ;
2071         }
2072         if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2073                 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2074                 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2075                         if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
2076                                               &config->runtime_flags))
2077                                 put_dev = true;
2078                         set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
2079                 } else {
2080                         if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
2081                                                &config->runtime_flags))
2082                                 refcount_inc(&nbd->refs);
2083                         clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
2084                 }
2085
2086                 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2087                         set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2088                                         &config->runtime_flags);
2089                 } else {
2090                         clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2091                                         &config->runtime_flags);
2092                 }
2093         }
2094
2095         if (info->attrs[NBD_ATTR_SOCKETS]) {
2096                 struct nlattr *attr;
2097                 int rem, fd;
2098
2099                 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2100                                     rem) {
2101                         struct nlattr *socks[NBD_SOCK_MAX+1];
2102
2103                         if (nla_type(attr) != NBD_SOCK_ITEM) {
2104                                 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2105                                 ret = -EINVAL;
2106                                 goto out;
2107                         }
2108                         ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2109                                                           attr,
2110                                                           nbd_sock_policy,
2111                                                           info->extack);
2112                         if (ret != 0) {
2113                                 printk(KERN_ERR "nbd: error processing sock list\n");
2114                                 ret = -EINVAL;
2115                                 goto out;
2116                         }
2117                         if (!socks[NBD_SOCK_FD])
2118                                 continue;
2119                         fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2120                         ret = nbd_reconnect_socket(nbd, fd);
2121                         if (ret) {
2122                                 if (ret == -ENOSPC)
2123                                         ret = 0;
2124                                 goto out;
2125                         }
2126                         dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2127                 }
2128         }
2129 out:
2130         mutex_unlock(&nbd->config_lock);
2131         nbd_config_put(nbd);
2132         nbd_put(nbd);
2133         if (put_dev)
2134                 nbd_put(nbd);
2135         return ret;
2136 }
2137
2138 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2139         {
2140                 .cmd    = NBD_CMD_CONNECT,
2141                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2142                 .doit   = nbd_genl_connect,
2143         },
2144         {
2145                 .cmd    = NBD_CMD_DISCONNECT,
2146                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2147                 .doit   = nbd_genl_disconnect,
2148         },
2149         {
2150                 .cmd    = NBD_CMD_RECONFIGURE,
2151                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2152                 .doit   = nbd_genl_reconfigure,
2153         },
2154         {
2155                 .cmd    = NBD_CMD_STATUS,
2156                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2157                 .doit   = nbd_genl_status,
2158         },
2159 };
2160
2161 static const struct genl_multicast_group nbd_mcast_grps[] = {
2162         { .name = NBD_GENL_MCAST_GROUP_NAME, },
2163 };
2164
2165 static struct genl_family nbd_genl_family __ro_after_init = {
2166         .hdrsize        = 0,
2167         .name           = NBD_GENL_FAMILY_NAME,
2168         .version        = NBD_GENL_VERSION,
2169         .module         = THIS_MODULE,
2170         .small_ops      = nbd_connect_genl_ops,
2171         .n_small_ops    = ARRAY_SIZE(nbd_connect_genl_ops),
2172         .maxattr        = NBD_ATTR_MAX,
2173         .policy = nbd_attr_policy,
2174         .mcgrps         = nbd_mcast_grps,
2175         .n_mcgrps       = ARRAY_SIZE(nbd_mcast_grps),
2176 };
2177
2178 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2179 {
2180         struct nlattr *dev_opt;
2181         u8 connected = 0;
2182         int ret;
2183
2184         /* This is a little racey, but for status it's ok.  The
2185          * reason we don't take a ref here is because we can't
2186          * take a ref in the index == -1 case as we would need
2187          * to put under the nbd_index_mutex, which could
2188          * deadlock if we are configured to remove ourselves
2189          * once we're disconnected.
2190          */
2191         if (refcount_read(&nbd->config_refs))
2192                 connected = 1;
2193         dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2194         if (!dev_opt)
2195                 return -EMSGSIZE;
2196         ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2197         if (ret)
2198                 return -EMSGSIZE;
2199         ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2200                          connected);
2201         if (ret)
2202                 return -EMSGSIZE;
2203         nla_nest_end(reply, dev_opt);
2204         return 0;
2205 }
2206
2207 static int status_cb(int id, void *ptr, void *data)
2208 {
2209         struct nbd_device *nbd = ptr;
2210         return populate_nbd_status(nbd, (struct sk_buff *)data);
2211 }
2212
2213 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2214 {
2215         struct nlattr *dev_list;
2216         struct sk_buff *reply;
2217         void *reply_head;
2218         size_t msg_size;
2219         int index = -1;
2220         int ret = -ENOMEM;
2221
2222         if (info->attrs[NBD_ATTR_INDEX])
2223                 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2224
2225         mutex_lock(&nbd_index_mutex);
2226
2227         msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2228                                   nla_attr_size(sizeof(u8)));
2229         msg_size *= (index == -1) ? nbd_total_devices : 1;
2230
2231         reply = genlmsg_new(msg_size, GFP_KERNEL);
2232         if (!reply)
2233                 goto out;
2234         reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2235                                        NBD_CMD_STATUS);
2236         if (!reply_head) {
2237                 nlmsg_free(reply);
2238                 goto out;
2239         }
2240
2241         dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2242         if (index == -1) {
2243                 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2244                 if (ret) {
2245                         nlmsg_free(reply);
2246                         goto out;
2247                 }
2248         } else {
2249                 struct nbd_device *nbd;
2250                 nbd = idr_find(&nbd_index_idr, index);
2251                 if (nbd) {
2252                         ret = populate_nbd_status(nbd, reply);
2253                         if (ret) {
2254                                 nlmsg_free(reply);
2255                                 goto out;
2256                         }
2257                 }
2258         }
2259         nla_nest_end(reply, dev_list);
2260         genlmsg_end(reply, reply_head);
2261         ret = genlmsg_reply(reply, info);
2262 out:
2263         mutex_unlock(&nbd_index_mutex);
2264         return ret;
2265 }
2266
2267 static void nbd_connect_reply(struct genl_info *info, int index)
2268 {
2269         struct sk_buff *skb;
2270         void *msg_head;
2271         int ret;
2272
2273         skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2274         if (!skb)
2275                 return;
2276         msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2277                                      NBD_CMD_CONNECT);
2278         if (!msg_head) {
2279                 nlmsg_free(skb);
2280                 return;
2281         }
2282         ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2283         if (ret) {
2284                 nlmsg_free(skb);
2285                 return;
2286         }
2287         genlmsg_end(skb, msg_head);
2288         genlmsg_reply(skb, info);
2289 }
2290
2291 static void nbd_mcast_index(int index)
2292 {
2293         struct sk_buff *skb;
2294         void *msg_head;
2295         int ret;
2296
2297         skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2298         if (!skb)
2299                 return;
2300         msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2301                                      NBD_CMD_LINK_DEAD);
2302         if (!msg_head) {
2303                 nlmsg_free(skb);
2304                 return;
2305         }
2306         ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2307         if (ret) {
2308                 nlmsg_free(skb);
2309                 return;
2310         }
2311         genlmsg_end(skb, msg_head);
2312         genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2313 }
2314
2315 static void nbd_dead_link_work(struct work_struct *work)
2316 {
2317         struct link_dead_args *args = container_of(work, struct link_dead_args,
2318                                                    work);
2319         nbd_mcast_index(args->index);
2320         kfree(args);
2321 }
2322
2323 static int __init nbd_init(void)
2324 {
2325         int i;
2326
2327         BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2328
2329         if (max_part < 0) {
2330                 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2331                 return -EINVAL;
2332         }
2333
2334         part_shift = 0;
2335         if (max_part > 0) {
2336                 part_shift = fls(max_part);
2337
2338                 /*
2339                  * Adjust max_part according to part_shift as it is exported
2340                  * to user space so that user can know the max number of
2341                  * partition kernel should be able to manage.
2342                  *
2343                  * Note that -1 is required because partition 0 is reserved
2344                  * for the whole disk.
2345                  */
2346                 max_part = (1UL << part_shift) - 1;
2347         }
2348
2349         if ((1UL << part_shift) > DISK_MAX_PARTS)
2350                 return -EINVAL;
2351
2352         if (nbds_max > 1UL << (MINORBITS - part_shift))
2353                 return -EINVAL;
2354
2355         if (register_blkdev(NBD_MAJOR, "nbd"))
2356                 return -EIO;
2357
2358         if (genl_register_family(&nbd_genl_family)) {
2359                 unregister_blkdev(NBD_MAJOR, "nbd");
2360                 return -EINVAL;
2361         }
2362         nbd_dbg_init();
2363
2364         mutex_lock(&nbd_index_mutex);
2365         for (i = 0; i < nbds_max; i++)
2366                 nbd_dev_add(i);
2367         mutex_unlock(&nbd_index_mutex);
2368         return 0;
2369 }
2370
2371 static int nbd_exit_cb(int id, void *ptr, void *data)
2372 {
2373         struct list_head *list = (struct list_head *)data;
2374         struct nbd_device *nbd = ptr;
2375
2376         list_add_tail(&nbd->list, list);
2377         return 0;
2378 }
2379
2380 static void __exit nbd_cleanup(void)
2381 {
2382         struct nbd_device *nbd;
2383         LIST_HEAD(del_list);
2384
2385         nbd_dbg_close();
2386
2387         mutex_lock(&nbd_index_mutex);
2388         idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2389         mutex_unlock(&nbd_index_mutex);
2390
2391         while (!list_empty(&del_list)) {
2392                 nbd = list_first_entry(&del_list, struct nbd_device, list);
2393                 list_del_init(&nbd->list);
2394                 if (refcount_read(&nbd->refs) != 1)
2395                         printk(KERN_ERR "nbd: possibly leaking a device\n");
2396                 nbd_put(nbd);
2397         }
2398
2399         idr_destroy(&nbd_index_idr);
2400         genl_unregister_family(&nbd_genl_family);
2401         unregister_blkdev(NBD_MAJOR, "nbd");
2402 }
2403
2404 module_init(nbd_init);
2405 module_exit(nbd_cleanup);
2406
2407 MODULE_DESCRIPTION("Network Block Device");
2408 MODULE_LICENSE("GPL");
2409
2410 module_param(nbds_max, int, 0444);
2411 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2412 module_param(max_part, int, 0444);
2413 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");