nbd: pass queue_limits to blk_mq_alloc_disk
[linux-2.6-microblaze.git] / drivers / block / nbd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Network block device - make block devices work over TCP
4  *
5  * Note that you can not swap over this thing, yet. Seems to work but
6  * deadlocks sometimes - you can not swap over TCP in general.
7  * 
8  * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9  * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10  *
11  * (part of code stolen from loop.c)
12  */
13
14 #define pr_fmt(fmt) "nbd: " fmt
15
16 #include <linux/major.h>
17
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
23 #include <linux/fs.h>
24 #include <linux/bio.h>
25 #include <linux/stat.h>
26 #include <linux/errno.h>
27 #include <linux/file.h>
28 #include <linux/ioctl.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler.h>
31 #include <linux/completion.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <net/sock.h>
36 #include <linux/net.h>
37 #include <linux/kthread.h>
38 #include <linux/types.h>
39 #include <linux/debugfs.h>
40 #include <linux/blk-mq.h>
41
42 #include <linux/uaccess.h>
43 #include <asm/types.h>
44
45 #include <linux/nbd.h>
46 #include <linux/nbd-netlink.h>
47 #include <net/genetlink.h>
48
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/nbd.h>
51
52 static DEFINE_IDR(nbd_index_idr);
53 static DEFINE_MUTEX(nbd_index_mutex);
54 static struct workqueue_struct *nbd_del_wq;
55 static int nbd_total_devices = 0;
56
57 struct nbd_sock {
58         struct socket *sock;
59         struct mutex tx_lock;
60         struct request *pending;
61         int sent;
62         bool dead;
63         int fallback_index;
64         int cookie;
65 };
66
67 struct recv_thread_args {
68         struct work_struct work;
69         struct nbd_device *nbd;
70         struct nbd_sock *nsock;
71         int index;
72 };
73
74 struct link_dead_args {
75         struct work_struct work;
76         int index;
77 };
78
79 #define NBD_RT_TIMEDOUT                 0
80 #define NBD_RT_DISCONNECT_REQUESTED     1
81 #define NBD_RT_DISCONNECTED             2
82 #define NBD_RT_HAS_PID_FILE             3
83 #define NBD_RT_HAS_CONFIG_REF           4
84 #define NBD_RT_BOUND                    5
85 #define NBD_RT_DISCONNECT_ON_CLOSE      6
86 #define NBD_RT_HAS_BACKEND_FILE         7
87
88 #define NBD_DESTROY_ON_DISCONNECT       0
89 #define NBD_DISCONNECT_REQUESTED        1
90
91 struct nbd_config {
92         u32 flags;
93         unsigned long runtime_flags;
94         u64 dead_conn_timeout;
95
96         struct nbd_sock **socks;
97         int num_connections;
98         atomic_t live_connections;
99         wait_queue_head_t conn_wait;
100
101         atomic_t recv_threads;
102         wait_queue_head_t recv_wq;
103         unsigned int blksize_bits;
104         loff_t bytesize;
105 #if IS_ENABLED(CONFIG_DEBUG_FS)
106         struct dentry *dbg_dir;
107 #endif
108 };
109
110 static inline unsigned int nbd_blksize(struct nbd_config *config)
111 {
112         return 1u << config->blksize_bits;
113 }
114
115 struct nbd_device {
116         struct blk_mq_tag_set tag_set;
117
118         int index;
119         refcount_t config_refs;
120         refcount_t refs;
121         struct nbd_config *config;
122         struct mutex config_lock;
123         struct gendisk *disk;
124         struct workqueue_struct *recv_workq;
125         struct work_struct remove_work;
126
127         struct list_head list;
128         struct task_struct *task_setup;
129
130         unsigned long flags;
131         pid_t pid; /* pid of nbd-client, if attached */
132
133         char *backend;
134 };
135
136 #define NBD_CMD_REQUEUED        1
137 /*
138  * This flag will be set if nbd_queue_rq() succeed, and will be checked and
139  * cleared in completion. Both setting and clearing of the flag are protected
140  * by cmd->lock.
141  */
142 #define NBD_CMD_INFLIGHT        2
143
144 struct nbd_cmd {
145         struct nbd_device *nbd;
146         struct mutex lock;
147         int index;
148         int cookie;
149         int retries;
150         blk_status_t status;
151         unsigned long flags;
152         u32 cmd_cookie;
153 };
154
155 #if IS_ENABLED(CONFIG_DEBUG_FS)
156 static struct dentry *nbd_dbg_dir;
157 #endif
158
159 #define nbd_name(nbd) ((nbd)->disk->disk_name)
160
161 #define NBD_DEF_BLKSIZE_BITS 10
162
163 static unsigned int nbds_max = 16;
164 static int max_part = 16;
165 static int part_shift;
166
167 static int nbd_dev_dbg_init(struct nbd_device *nbd);
168 static void nbd_dev_dbg_close(struct nbd_device *nbd);
169 static void nbd_config_put(struct nbd_device *nbd);
170 static void nbd_connect_reply(struct genl_info *info, int index);
171 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
172 static void nbd_dead_link_work(struct work_struct *work);
173 static void nbd_disconnect_and_put(struct nbd_device *nbd);
174
175 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
176 {
177         return disk_to_dev(nbd->disk);
178 }
179
180 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
181 {
182         struct request *req = blk_mq_rq_from_pdu(cmd);
183
184         if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
185                 blk_mq_requeue_request(req, true);
186 }
187
188 #define NBD_COOKIE_BITS 32
189
190 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
191 {
192         struct request *req = blk_mq_rq_from_pdu(cmd);
193         u32 tag = blk_mq_unique_tag(req);
194         u64 cookie = cmd->cmd_cookie;
195
196         return (cookie << NBD_COOKIE_BITS) | tag;
197 }
198
199 static u32 nbd_handle_to_tag(u64 handle)
200 {
201         return (u32)handle;
202 }
203
204 static u32 nbd_handle_to_cookie(u64 handle)
205 {
206         return (u32)(handle >> NBD_COOKIE_BITS);
207 }
208
209 static const char *nbdcmd_to_ascii(int cmd)
210 {
211         switch (cmd) {
212         case  NBD_CMD_READ: return "read";
213         case NBD_CMD_WRITE: return "write";
214         case  NBD_CMD_DISC: return "disconnect";
215         case NBD_CMD_FLUSH: return "flush";
216         case  NBD_CMD_TRIM: return "trim/discard";
217         }
218         return "invalid";
219 }
220
221 static ssize_t pid_show(struct device *dev,
222                         struct device_attribute *attr, char *buf)
223 {
224         struct gendisk *disk = dev_to_disk(dev);
225         struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
226
227         return sprintf(buf, "%d\n", nbd->pid);
228 }
229
230 static const struct device_attribute pid_attr = {
231         .attr = { .name = "pid", .mode = 0444},
232         .show = pid_show,
233 };
234
235 static ssize_t backend_show(struct device *dev,
236                 struct device_attribute *attr, char *buf)
237 {
238         struct gendisk *disk = dev_to_disk(dev);
239         struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
240
241         return sprintf(buf, "%s\n", nbd->backend ?: "");
242 }
243
244 static const struct device_attribute backend_attr = {
245         .attr = { .name = "backend", .mode = 0444},
246         .show = backend_show,
247 };
248
249 static void nbd_dev_remove(struct nbd_device *nbd)
250 {
251         struct gendisk *disk = nbd->disk;
252
253         del_gendisk(disk);
254         blk_mq_free_tag_set(&nbd->tag_set);
255
256         /*
257          * Remove from idr after del_gendisk() completes, so if the same ID is
258          * reused, the following add_disk() will succeed.
259          */
260         mutex_lock(&nbd_index_mutex);
261         idr_remove(&nbd_index_idr, nbd->index);
262         mutex_unlock(&nbd_index_mutex);
263         destroy_workqueue(nbd->recv_workq);
264         put_disk(disk);
265 }
266
267 static void nbd_dev_remove_work(struct work_struct *work)
268 {
269         nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
270 }
271
272 static void nbd_put(struct nbd_device *nbd)
273 {
274         if (!refcount_dec_and_test(&nbd->refs))
275                 return;
276
277         /* Call del_gendisk() asynchrounously to prevent deadlock */
278         if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
279                 queue_work(nbd_del_wq, &nbd->remove_work);
280         else
281                 nbd_dev_remove(nbd);
282 }
283
284 static int nbd_disconnected(struct nbd_config *config)
285 {
286         return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
287                 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
288 }
289
290 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
291                                 int notify)
292 {
293         if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
294                 struct link_dead_args *args;
295                 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
296                 if (args) {
297                         INIT_WORK(&args->work, nbd_dead_link_work);
298                         args->index = nbd->index;
299                         queue_work(system_wq, &args->work);
300                 }
301         }
302         if (!nsock->dead) {
303                 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
304                 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
305                         if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
306                                                &nbd->config->runtime_flags)) {
307                                 set_bit(NBD_RT_DISCONNECTED,
308                                         &nbd->config->runtime_flags);
309                                 dev_info(nbd_to_dev(nbd),
310                                         "Disconnected due to user request.\n");
311                         }
312                 }
313         }
314         nsock->dead = true;
315         nsock->pending = NULL;
316         nsock->sent = 0;
317 }
318
319 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
320                 loff_t blksize)
321 {
322         if (!blksize)
323                 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
324
325         if (blk_validate_block_size(blksize))
326                 return -EINVAL;
327
328         if (bytesize < 0)
329                 return -EINVAL;
330
331         nbd->config->bytesize = bytesize;
332         nbd->config->blksize_bits = __ffs(blksize);
333
334         if (!nbd->pid)
335                 return 0;
336
337         if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
338                 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
339         blk_queue_logical_block_size(nbd->disk->queue, blksize);
340         blk_queue_physical_block_size(nbd->disk->queue, blksize);
341
342         if (max_part)
343                 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
344         if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
345                 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
346         return 0;
347 }
348
349 static void nbd_complete_rq(struct request *req)
350 {
351         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
352
353         dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
354                 cmd->status ? "failed" : "done");
355
356         blk_mq_end_request(req, cmd->status);
357 }
358
359 /*
360  * Forcibly shutdown the socket causing all listeners to error
361  */
362 static void sock_shutdown(struct nbd_device *nbd)
363 {
364         struct nbd_config *config = nbd->config;
365         int i;
366
367         if (config->num_connections == 0)
368                 return;
369         if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
370                 return;
371
372         for (i = 0; i < config->num_connections; i++) {
373                 struct nbd_sock *nsock = config->socks[i];
374                 mutex_lock(&nsock->tx_lock);
375                 nbd_mark_nsock_dead(nbd, nsock, 0);
376                 mutex_unlock(&nsock->tx_lock);
377         }
378         dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
379 }
380
381 static u32 req_to_nbd_cmd_type(struct request *req)
382 {
383         switch (req_op(req)) {
384         case REQ_OP_DISCARD:
385                 return NBD_CMD_TRIM;
386         case REQ_OP_FLUSH:
387                 return NBD_CMD_FLUSH;
388         case REQ_OP_WRITE:
389                 return NBD_CMD_WRITE;
390         case REQ_OP_READ:
391                 return NBD_CMD_READ;
392         default:
393                 return U32_MAX;
394         }
395 }
396
397 static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
398 {
399         if (refcount_inc_not_zero(&nbd->config_refs)) {
400                 /*
401                  * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
402                  * and reading nbd->config is ordered. The pair is the barrier in
403                  * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
404                  * before nbd->config.
405                  */
406                 smp_mb__after_atomic();
407                 return nbd->config;
408         }
409
410         return NULL;
411 }
412
413 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
414 {
415         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
416         struct nbd_device *nbd = cmd->nbd;
417         struct nbd_config *config;
418
419         if (!mutex_trylock(&cmd->lock))
420                 return BLK_EH_RESET_TIMER;
421
422         if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
423                 mutex_unlock(&cmd->lock);
424                 return BLK_EH_DONE;
425         }
426
427         config = nbd_get_config_unlocked(nbd);
428         if (!config) {
429                 cmd->status = BLK_STS_TIMEOUT;
430                 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
431                 mutex_unlock(&cmd->lock);
432                 goto done;
433         }
434
435         if (config->num_connections > 1 ||
436             (config->num_connections == 1 && nbd->tag_set.timeout)) {
437                 dev_err_ratelimited(nbd_to_dev(nbd),
438                                     "Connection timed out, retrying (%d/%d alive)\n",
439                                     atomic_read(&config->live_connections),
440                                     config->num_connections);
441                 /*
442                  * Hooray we have more connections, requeue this IO, the submit
443                  * path will put it on a real connection. Or if only one
444                  * connection is configured, the submit path will wait util
445                  * a new connection is reconfigured or util dead timeout.
446                  */
447                 if (config->socks) {
448                         if (cmd->index < config->num_connections) {
449                                 struct nbd_sock *nsock =
450                                         config->socks[cmd->index];
451                                 mutex_lock(&nsock->tx_lock);
452                                 /* We can have multiple outstanding requests, so
453                                  * we don't want to mark the nsock dead if we've
454                                  * already reconnected with a new socket, so
455                                  * only mark it dead if its the same socket we
456                                  * were sent out on.
457                                  */
458                                 if (cmd->cookie == nsock->cookie)
459                                         nbd_mark_nsock_dead(nbd, nsock, 1);
460                                 mutex_unlock(&nsock->tx_lock);
461                         }
462                         mutex_unlock(&cmd->lock);
463                         nbd_requeue_cmd(cmd);
464                         nbd_config_put(nbd);
465                         return BLK_EH_DONE;
466                 }
467         }
468
469         if (!nbd->tag_set.timeout) {
470                 /*
471                  * Userspace sets timeout=0 to disable socket disconnection,
472                  * so just warn and reset the timer.
473                  */
474                 struct nbd_sock *nsock = config->socks[cmd->index];
475                 cmd->retries++;
476                 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
477                         req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
478                         (unsigned long long)blk_rq_pos(req) << 9,
479                         blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
480
481                 mutex_lock(&nsock->tx_lock);
482                 if (cmd->cookie != nsock->cookie) {
483                         nbd_requeue_cmd(cmd);
484                         mutex_unlock(&nsock->tx_lock);
485                         mutex_unlock(&cmd->lock);
486                         nbd_config_put(nbd);
487                         return BLK_EH_DONE;
488                 }
489                 mutex_unlock(&nsock->tx_lock);
490                 mutex_unlock(&cmd->lock);
491                 nbd_config_put(nbd);
492                 return BLK_EH_RESET_TIMER;
493         }
494
495         dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
496         set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
497         cmd->status = BLK_STS_IOERR;
498         __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
499         mutex_unlock(&cmd->lock);
500         sock_shutdown(nbd);
501         nbd_config_put(nbd);
502 done:
503         blk_mq_complete_request(req);
504         return BLK_EH_DONE;
505 }
506
507 static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
508                        struct iov_iter *iter, int msg_flags, int *sent)
509 {
510         int result;
511         struct msghdr msg = {} ;
512         unsigned int noreclaim_flag;
513
514         if (unlikely(!sock)) {
515                 dev_err_ratelimited(disk_to_dev(nbd->disk),
516                         "Attempted %s on closed socket in sock_xmit\n",
517                         (send ? "send" : "recv"));
518                 return -EINVAL;
519         }
520
521         msg.msg_iter = *iter;
522
523         noreclaim_flag = memalloc_noreclaim_save();
524         do {
525                 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
526                 sock->sk->sk_use_task_frag = false;
527                 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
528
529                 if (send)
530                         result = sock_sendmsg(sock, &msg);
531                 else
532                         result = sock_recvmsg(sock, &msg, msg.msg_flags);
533
534                 if (result <= 0) {
535                         if (result == 0)
536                                 result = -EPIPE; /* short read */
537                         break;
538                 }
539                 if (sent)
540                         *sent += result;
541         } while (msg_data_left(&msg));
542
543         memalloc_noreclaim_restore(noreclaim_flag);
544
545         return result;
546 }
547
548 /*
549  *  Send or receive packet. Return a positive value on success and
550  *  negtive value on failure, and never return 0.
551  */
552 static int sock_xmit(struct nbd_device *nbd, int index, int send,
553                      struct iov_iter *iter, int msg_flags, int *sent)
554 {
555         struct nbd_config *config = nbd->config;
556         struct socket *sock = config->socks[index]->sock;
557
558         return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
559 }
560
561 /*
562  * Different settings for sk->sk_sndtimeo can result in different return values
563  * if there is a signal pending when we enter sendmsg, because reasons?
564  */
565 static inline int was_interrupted(int result)
566 {
567         return result == -ERESTARTSYS || result == -EINTR;
568 }
569
570 /* always call with the tx_lock held */
571 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
572 {
573         struct request *req = blk_mq_rq_from_pdu(cmd);
574         struct nbd_config *config = nbd->config;
575         struct nbd_sock *nsock = config->socks[index];
576         int result;
577         struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
578         struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
579         struct iov_iter from;
580         unsigned long size = blk_rq_bytes(req);
581         struct bio *bio;
582         u64 handle;
583         u32 type;
584         u32 nbd_cmd_flags = 0;
585         int sent = nsock->sent, skip = 0;
586
587         iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
588
589         type = req_to_nbd_cmd_type(req);
590         if (type == U32_MAX)
591                 return -EIO;
592
593         if (rq_data_dir(req) == WRITE &&
594             (config->flags & NBD_FLAG_READ_ONLY)) {
595                 dev_err_ratelimited(disk_to_dev(nbd->disk),
596                                     "Write on read-only\n");
597                 return -EIO;
598         }
599
600         if (req->cmd_flags & REQ_FUA)
601                 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
602
603         /* We did a partial send previously, and we at least sent the whole
604          * request struct, so just go and send the rest of the pages in the
605          * request.
606          */
607         if (sent) {
608                 if (sent >= sizeof(request)) {
609                         skip = sent - sizeof(request);
610
611                         /* initialize handle for tracing purposes */
612                         handle = nbd_cmd_handle(cmd);
613
614                         goto send_pages;
615                 }
616                 iov_iter_advance(&from, sent);
617         } else {
618                 cmd->cmd_cookie++;
619         }
620         cmd->index = index;
621         cmd->cookie = nsock->cookie;
622         cmd->retries = 0;
623         request.type = htonl(type | nbd_cmd_flags);
624         if (type != NBD_CMD_FLUSH) {
625                 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
626                 request.len = htonl(size);
627         }
628         handle = nbd_cmd_handle(cmd);
629         request.cookie = cpu_to_be64(handle);
630
631         trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
632
633         dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
634                 req, nbdcmd_to_ascii(type),
635                 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
636         result = sock_xmit(nbd, index, 1, &from,
637                         (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
638         trace_nbd_header_sent(req, handle);
639         if (result < 0) {
640                 if (was_interrupted(result)) {
641                         /* If we haven't sent anything we can just return BUSY,
642                          * however if we have sent something we need to make
643                          * sure we only allow this req to be sent until we are
644                          * completely done.
645                          */
646                         if (sent) {
647                                 nsock->pending = req;
648                                 nsock->sent = sent;
649                         }
650                         set_bit(NBD_CMD_REQUEUED, &cmd->flags);
651                         return BLK_STS_RESOURCE;
652                 }
653                 dev_err_ratelimited(disk_to_dev(nbd->disk),
654                         "Send control failed (result %d)\n", result);
655                 return -EAGAIN;
656         }
657 send_pages:
658         if (type != NBD_CMD_WRITE)
659                 goto out;
660
661         bio = req->bio;
662         while (bio) {
663                 struct bio *next = bio->bi_next;
664                 struct bvec_iter iter;
665                 struct bio_vec bvec;
666
667                 bio_for_each_segment(bvec, bio, iter) {
668                         bool is_last = !next && bio_iter_last(bvec, iter);
669                         int flags = is_last ? 0 : MSG_MORE;
670
671                         dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
672                                 req, bvec.bv_len);
673                         iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
674                         if (skip) {
675                                 if (skip >= iov_iter_count(&from)) {
676                                         skip -= iov_iter_count(&from);
677                                         continue;
678                                 }
679                                 iov_iter_advance(&from, skip);
680                                 skip = 0;
681                         }
682                         result = sock_xmit(nbd, index, 1, &from, flags, &sent);
683                         if (result < 0) {
684                                 if (was_interrupted(result)) {
685                                         /* We've already sent the header, we
686                                          * have no choice but to set pending and
687                                          * return BUSY.
688                                          */
689                                         nsock->pending = req;
690                                         nsock->sent = sent;
691                                         set_bit(NBD_CMD_REQUEUED, &cmd->flags);
692                                         return BLK_STS_RESOURCE;
693                                 }
694                                 dev_err(disk_to_dev(nbd->disk),
695                                         "Send data failed (result %d)\n",
696                                         result);
697                                 return -EAGAIN;
698                         }
699                         /*
700                          * The completion might already have come in,
701                          * so break for the last one instead of letting
702                          * the iterator do it. This prevents use-after-free
703                          * of the bio.
704                          */
705                         if (is_last)
706                                 break;
707                 }
708                 bio = next;
709         }
710 out:
711         trace_nbd_payload_sent(req, handle);
712         nsock->pending = NULL;
713         nsock->sent = 0;
714         return 0;
715 }
716
717 static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
718                           struct nbd_reply *reply)
719 {
720         struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
721         struct iov_iter to;
722         int result;
723
724         reply->magic = 0;
725         iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
726         result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
727         if (result < 0) {
728                 if (!nbd_disconnected(nbd->config))
729                         dev_err(disk_to_dev(nbd->disk),
730                                 "Receive control failed (result %d)\n", result);
731                 return result;
732         }
733
734         if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
735                 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
736                                 (unsigned long)ntohl(reply->magic));
737                 return -EPROTO;
738         }
739
740         return 0;
741 }
742
743 /* NULL returned = something went wrong, inform userspace */
744 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
745                                         struct nbd_reply *reply)
746 {
747         int result;
748         struct nbd_cmd *cmd;
749         struct request *req = NULL;
750         u64 handle;
751         u16 hwq;
752         u32 tag;
753         int ret = 0;
754
755         handle = be64_to_cpu(reply->cookie);
756         tag = nbd_handle_to_tag(handle);
757         hwq = blk_mq_unique_tag_to_hwq(tag);
758         if (hwq < nbd->tag_set.nr_hw_queues)
759                 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
760                                        blk_mq_unique_tag_to_tag(tag));
761         if (!req || !blk_mq_request_started(req)) {
762                 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
763                         tag, req);
764                 return ERR_PTR(-ENOENT);
765         }
766         trace_nbd_header_received(req, handle);
767         cmd = blk_mq_rq_to_pdu(req);
768
769         mutex_lock(&cmd->lock);
770         if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
771                 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
772                         tag, cmd->status, cmd->flags);
773                 ret = -ENOENT;
774                 goto out;
775         }
776         if (cmd->index != index) {
777                 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
778                         tag, index, cmd->index);
779                 ret = -ENOENT;
780                 goto out;
781         }
782         if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
783                 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
784                         req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
785                 ret = -ENOENT;
786                 goto out;
787         }
788         if (cmd->status != BLK_STS_OK) {
789                 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
790                         req);
791                 ret = -ENOENT;
792                 goto out;
793         }
794         if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
795                 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
796                         req);
797                 ret = -ENOENT;
798                 goto out;
799         }
800         if (ntohl(reply->error)) {
801                 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
802                         ntohl(reply->error));
803                 cmd->status = BLK_STS_IOERR;
804                 goto out;
805         }
806
807         dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
808         if (rq_data_dir(req) != WRITE) {
809                 struct req_iterator iter;
810                 struct bio_vec bvec;
811                 struct iov_iter to;
812
813                 rq_for_each_segment(bvec, req, iter) {
814                         iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
815                         result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
816                         if (result < 0) {
817                                 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
818                                         result);
819                                 /*
820                                  * If we've disconnected, we need to make sure we
821                                  * complete this request, otherwise error out
822                                  * and let the timeout stuff handle resubmitting
823                                  * this request onto another connection.
824                                  */
825                                 if (nbd_disconnected(nbd->config)) {
826                                         cmd->status = BLK_STS_IOERR;
827                                         goto out;
828                                 }
829                                 ret = -EIO;
830                                 goto out;
831                         }
832                         dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
833                                 req, bvec.bv_len);
834                 }
835         }
836 out:
837         trace_nbd_payload_received(req, handle);
838         mutex_unlock(&cmd->lock);
839         return ret ? ERR_PTR(ret) : cmd;
840 }
841
842 static void recv_work(struct work_struct *work)
843 {
844         struct recv_thread_args *args = container_of(work,
845                                                      struct recv_thread_args,
846                                                      work);
847         struct nbd_device *nbd = args->nbd;
848         struct nbd_config *config = nbd->config;
849         struct request_queue *q = nbd->disk->queue;
850         struct nbd_sock *nsock = args->nsock;
851         struct nbd_cmd *cmd;
852         struct request *rq;
853
854         while (1) {
855                 struct nbd_reply reply;
856
857                 if (nbd_read_reply(nbd, nsock->sock, &reply))
858                         break;
859
860                 /*
861                  * Grab .q_usage_counter so request pool won't go away, then no
862                  * request use-after-free is possible during nbd_handle_reply().
863                  * If queue is frozen, there won't be any inflight requests, we
864                  * needn't to handle the incoming garbage message.
865                  */
866                 if (!percpu_ref_tryget(&q->q_usage_counter)) {
867                         dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
868                                 __func__);
869                         break;
870                 }
871
872                 cmd = nbd_handle_reply(nbd, args->index, &reply);
873                 if (IS_ERR(cmd)) {
874                         percpu_ref_put(&q->q_usage_counter);
875                         break;
876                 }
877
878                 rq = blk_mq_rq_from_pdu(cmd);
879                 if (likely(!blk_should_fake_timeout(rq->q))) {
880                         bool complete;
881
882                         mutex_lock(&cmd->lock);
883                         complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
884                                                         &cmd->flags);
885                         mutex_unlock(&cmd->lock);
886                         if (complete)
887                                 blk_mq_complete_request(rq);
888                 }
889                 percpu_ref_put(&q->q_usage_counter);
890         }
891
892         mutex_lock(&nsock->tx_lock);
893         nbd_mark_nsock_dead(nbd, nsock, 1);
894         mutex_unlock(&nsock->tx_lock);
895
896         nbd_config_put(nbd);
897         atomic_dec(&config->recv_threads);
898         wake_up(&config->recv_wq);
899         kfree(args);
900 }
901
902 static bool nbd_clear_req(struct request *req, void *data)
903 {
904         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
905
906         /* don't abort one completed request */
907         if (blk_mq_request_completed(req))
908                 return true;
909
910         mutex_lock(&cmd->lock);
911         if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
912                 mutex_unlock(&cmd->lock);
913                 return true;
914         }
915         cmd->status = BLK_STS_IOERR;
916         mutex_unlock(&cmd->lock);
917
918         blk_mq_complete_request(req);
919         return true;
920 }
921
922 static void nbd_clear_que(struct nbd_device *nbd)
923 {
924         blk_mq_quiesce_queue(nbd->disk->queue);
925         blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
926         blk_mq_unquiesce_queue(nbd->disk->queue);
927         dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
928 }
929
930 static int find_fallback(struct nbd_device *nbd, int index)
931 {
932         struct nbd_config *config = nbd->config;
933         int new_index = -1;
934         struct nbd_sock *nsock = config->socks[index];
935         int fallback = nsock->fallback_index;
936
937         if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
938                 return new_index;
939
940         if (config->num_connections <= 1) {
941                 dev_err_ratelimited(disk_to_dev(nbd->disk),
942                                     "Dead connection, failed to find a fallback\n");
943                 return new_index;
944         }
945
946         if (fallback >= 0 && fallback < config->num_connections &&
947             !config->socks[fallback]->dead)
948                 return fallback;
949
950         if (nsock->fallback_index < 0 ||
951             nsock->fallback_index >= config->num_connections ||
952             config->socks[nsock->fallback_index]->dead) {
953                 int i;
954                 for (i = 0; i < config->num_connections; i++) {
955                         if (i == index)
956                                 continue;
957                         if (!config->socks[i]->dead) {
958                                 new_index = i;
959                                 break;
960                         }
961                 }
962                 nsock->fallback_index = new_index;
963                 if (new_index < 0) {
964                         dev_err_ratelimited(disk_to_dev(nbd->disk),
965                                             "Dead connection, failed to find a fallback\n");
966                         return new_index;
967                 }
968         }
969         new_index = nsock->fallback_index;
970         return new_index;
971 }
972
973 static int wait_for_reconnect(struct nbd_device *nbd)
974 {
975         struct nbd_config *config = nbd->config;
976         if (!config->dead_conn_timeout)
977                 return 0;
978
979         if (!wait_event_timeout(config->conn_wait,
980                                 test_bit(NBD_RT_DISCONNECTED,
981                                          &config->runtime_flags) ||
982                                 atomic_read(&config->live_connections) > 0,
983                                 config->dead_conn_timeout))
984                 return 0;
985
986         return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
987 }
988
989 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
990 {
991         struct request *req = blk_mq_rq_from_pdu(cmd);
992         struct nbd_device *nbd = cmd->nbd;
993         struct nbd_config *config;
994         struct nbd_sock *nsock;
995         int ret;
996
997         config = nbd_get_config_unlocked(nbd);
998         if (!config) {
999                 dev_err_ratelimited(disk_to_dev(nbd->disk),
1000                                     "Socks array is empty\n");
1001                 return -EINVAL;
1002         }
1003
1004         if (index >= config->num_connections) {
1005                 dev_err_ratelimited(disk_to_dev(nbd->disk),
1006                                     "Attempted send on invalid socket\n");
1007                 nbd_config_put(nbd);
1008                 return -EINVAL;
1009         }
1010         cmd->status = BLK_STS_OK;
1011 again:
1012         nsock = config->socks[index];
1013         mutex_lock(&nsock->tx_lock);
1014         if (nsock->dead) {
1015                 int old_index = index;
1016                 index = find_fallback(nbd, index);
1017                 mutex_unlock(&nsock->tx_lock);
1018                 if (index < 0) {
1019                         if (wait_for_reconnect(nbd)) {
1020                                 index = old_index;
1021                                 goto again;
1022                         }
1023                         /* All the sockets should already be down at this point,
1024                          * we just want to make sure that DISCONNECTED is set so
1025                          * any requests that come in that were queue'ed waiting
1026                          * for the reconnect timer don't trigger the timer again
1027                          * and instead just error out.
1028                          */
1029                         sock_shutdown(nbd);
1030                         nbd_config_put(nbd);
1031                         return -EIO;
1032                 }
1033                 goto again;
1034         }
1035
1036         /* Handle the case that we have a pending request that was partially
1037          * transmitted that _has_ to be serviced first.  We need to call requeue
1038          * here so that it gets put _after_ the request that is already on the
1039          * dispatch list.
1040          */
1041         blk_mq_start_request(req);
1042         if (unlikely(nsock->pending && nsock->pending != req)) {
1043                 nbd_requeue_cmd(cmd);
1044                 ret = 0;
1045                 goto out;
1046         }
1047         /*
1048          * Some failures are related to the link going down, so anything that
1049          * returns EAGAIN can be retried on a different socket.
1050          */
1051         ret = nbd_send_cmd(nbd, cmd, index);
1052         /*
1053          * Access to this flag is protected by cmd->lock, thus it's safe to set
1054          * the flag after nbd_send_cmd() succeed to send request to server.
1055          */
1056         if (!ret)
1057                 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
1058         else if (ret == -EAGAIN) {
1059                 dev_err_ratelimited(disk_to_dev(nbd->disk),
1060                                     "Request send failed, requeueing\n");
1061                 nbd_mark_nsock_dead(nbd, nsock, 1);
1062                 nbd_requeue_cmd(cmd);
1063                 ret = 0;
1064         }
1065 out:
1066         mutex_unlock(&nsock->tx_lock);
1067         nbd_config_put(nbd);
1068         return ret;
1069 }
1070
1071 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1072                         const struct blk_mq_queue_data *bd)
1073 {
1074         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1075         int ret;
1076
1077         /*
1078          * Since we look at the bio's to send the request over the network we
1079          * need to make sure the completion work doesn't mark this request done
1080          * before we are done doing our send.  This keeps us from dereferencing
1081          * freed data if we have particularly fast completions (ie we get the
1082          * completion before we exit sock_xmit on the last bvec) or in the case
1083          * that the server is misbehaving (or there was an error) before we're
1084          * done sending everything over the wire.
1085          */
1086         mutex_lock(&cmd->lock);
1087         clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1088
1089         /* We can be called directly from the user space process, which means we
1090          * could possibly have signals pending so our sendmsg will fail.  In
1091          * this case we need to return that we are busy, otherwise error out as
1092          * appropriate.
1093          */
1094         ret = nbd_handle_cmd(cmd, hctx->queue_num);
1095         if (ret < 0)
1096                 ret = BLK_STS_IOERR;
1097         else if (!ret)
1098                 ret = BLK_STS_OK;
1099         mutex_unlock(&cmd->lock);
1100
1101         return ret;
1102 }
1103
1104 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1105                                      int *err)
1106 {
1107         struct socket *sock;
1108
1109         *err = 0;
1110         sock = sockfd_lookup(fd, err);
1111         if (!sock)
1112                 return NULL;
1113
1114         if (sock->ops->shutdown == sock_no_shutdown) {
1115                 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1116                 *err = -EINVAL;
1117                 sockfd_put(sock);
1118                 return NULL;
1119         }
1120
1121         return sock;
1122 }
1123
1124 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1125                           bool netlink)
1126 {
1127         struct nbd_config *config = nbd->config;
1128         struct socket *sock;
1129         struct nbd_sock **socks;
1130         struct nbd_sock *nsock;
1131         int err;
1132
1133         /* Arg will be cast to int, check it to avoid overflow */
1134         if (arg > INT_MAX)
1135                 return -EINVAL;
1136         sock = nbd_get_socket(nbd, arg, &err);
1137         if (!sock)
1138                 return err;
1139
1140         /*
1141          * We need to make sure we don't get any errant requests while we're
1142          * reallocating the ->socks array.
1143          */
1144         blk_mq_freeze_queue(nbd->disk->queue);
1145
1146         if (!netlink && !nbd->task_setup &&
1147             !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1148                 nbd->task_setup = current;
1149
1150         if (!netlink &&
1151             (nbd->task_setup != current ||
1152              test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1153                 dev_err(disk_to_dev(nbd->disk),
1154                         "Device being setup by another task");
1155                 err = -EBUSY;
1156                 goto put_socket;
1157         }
1158
1159         nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1160         if (!nsock) {
1161                 err = -ENOMEM;
1162                 goto put_socket;
1163         }
1164
1165         socks = krealloc(config->socks, (config->num_connections + 1) *
1166                          sizeof(struct nbd_sock *), GFP_KERNEL);
1167         if (!socks) {
1168                 kfree(nsock);
1169                 err = -ENOMEM;
1170                 goto put_socket;
1171         }
1172
1173         config->socks = socks;
1174
1175         nsock->fallback_index = -1;
1176         nsock->dead = false;
1177         mutex_init(&nsock->tx_lock);
1178         nsock->sock = sock;
1179         nsock->pending = NULL;
1180         nsock->sent = 0;
1181         nsock->cookie = 0;
1182         socks[config->num_connections++] = nsock;
1183         atomic_inc(&config->live_connections);
1184         blk_mq_unfreeze_queue(nbd->disk->queue);
1185
1186         return 0;
1187
1188 put_socket:
1189         blk_mq_unfreeze_queue(nbd->disk->queue);
1190         sockfd_put(sock);
1191         return err;
1192 }
1193
1194 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1195 {
1196         struct nbd_config *config = nbd->config;
1197         struct socket *sock, *old;
1198         struct recv_thread_args *args;
1199         int i;
1200         int err;
1201
1202         sock = nbd_get_socket(nbd, arg, &err);
1203         if (!sock)
1204                 return err;
1205
1206         args = kzalloc(sizeof(*args), GFP_KERNEL);
1207         if (!args) {
1208                 sockfd_put(sock);
1209                 return -ENOMEM;
1210         }
1211
1212         for (i = 0; i < config->num_connections; i++) {
1213                 struct nbd_sock *nsock = config->socks[i];
1214
1215                 if (!nsock->dead)
1216                         continue;
1217
1218                 mutex_lock(&nsock->tx_lock);
1219                 if (!nsock->dead) {
1220                         mutex_unlock(&nsock->tx_lock);
1221                         continue;
1222                 }
1223                 sk_set_memalloc(sock->sk);
1224                 if (nbd->tag_set.timeout)
1225                         sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1226                 atomic_inc(&config->recv_threads);
1227                 refcount_inc(&nbd->config_refs);
1228                 old = nsock->sock;
1229                 nsock->fallback_index = -1;
1230                 nsock->sock = sock;
1231                 nsock->dead = false;
1232                 INIT_WORK(&args->work, recv_work);
1233                 args->index = i;
1234                 args->nbd = nbd;
1235                 args->nsock = nsock;
1236                 nsock->cookie++;
1237                 mutex_unlock(&nsock->tx_lock);
1238                 sockfd_put(old);
1239
1240                 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1241
1242                 /* We take the tx_mutex in an error path in the recv_work, so we
1243                  * need to queue_work outside of the tx_mutex.
1244                  */
1245                 queue_work(nbd->recv_workq, &args->work);
1246
1247                 atomic_inc(&config->live_connections);
1248                 wake_up(&config->conn_wait);
1249                 return 0;
1250         }
1251         sockfd_put(sock);
1252         kfree(args);
1253         return -ENOSPC;
1254 }
1255
1256 static void nbd_bdev_reset(struct nbd_device *nbd)
1257 {
1258         if (disk_openers(nbd->disk) > 1)
1259                 return;
1260         set_capacity(nbd->disk, 0);
1261 }
1262
1263 static void nbd_parse_flags(struct nbd_device *nbd)
1264 {
1265         struct nbd_config *config = nbd->config;
1266         if (config->flags & NBD_FLAG_READ_ONLY)
1267                 set_disk_ro(nbd->disk, true);
1268         else
1269                 set_disk_ro(nbd->disk, false);
1270         if (config->flags & NBD_FLAG_SEND_FLUSH) {
1271                 if (config->flags & NBD_FLAG_SEND_FUA)
1272                         blk_queue_write_cache(nbd->disk->queue, true, true);
1273                 else
1274                         blk_queue_write_cache(nbd->disk->queue, true, false);
1275         }
1276         else
1277                 blk_queue_write_cache(nbd->disk->queue, false, false);
1278 }
1279
1280 static void send_disconnects(struct nbd_device *nbd)
1281 {
1282         struct nbd_config *config = nbd->config;
1283         struct nbd_request request = {
1284                 .magic = htonl(NBD_REQUEST_MAGIC),
1285                 .type = htonl(NBD_CMD_DISC),
1286         };
1287         struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1288         struct iov_iter from;
1289         int i, ret;
1290
1291         for (i = 0; i < config->num_connections; i++) {
1292                 struct nbd_sock *nsock = config->socks[i];
1293
1294                 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1295                 mutex_lock(&nsock->tx_lock);
1296                 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1297                 if (ret < 0)
1298                         dev_err(disk_to_dev(nbd->disk),
1299                                 "Send disconnect failed %d\n", ret);
1300                 mutex_unlock(&nsock->tx_lock);
1301         }
1302 }
1303
1304 static int nbd_disconnect(struct nbd_device *nbd)
1305 {
1306         struct nbd_config *config = nbd->config;
1307
1308         dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1309         set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1310         set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1311         send_disconnects(nbd);
1312         return 0;
1313 }
1314
1315 static void nbd_clear_sock(struct nbd_device *nbd)
1316 {
1317         sock_shutdown(nbd);
1318         nbd_clear_que(nbd);
1319         nbd->task_setup = NULL;
1320 }
1321
1322 static void nbd_config_put(struct nbd_device *nbd)
1323 {
1324         if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1325                                         &nbd->config_lock)) {
1326                 struct nbd_config *config = nbd->config;
1327                 nbd_dev_dbg_close(nbd);
1328                 invalidate_disk(nbd->disk);
1329                 if (nbd->config->bytesize)
1330                         kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1331                 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1332                                        &config->runtime_flags))
1333                         device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1334                 nbd->pid = 0;
1335                 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1336                                        &config->runtime_flags)) {
1337                         device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1338                         kfree(nbd->backend);
1339                         nbd->backend = NULL;
1340                 }
1341                 nbd_clear_sock(nbd);
1342                 if (config->num_connections) {
1343                         int i;
1344                         for (i = 0; i < config->num_connections; i++) {
1345                                 sockfd_put(config->socks[i]->sock);
1346                                 kfree(config->socks[i]);
1347                         }
1348                         kfree(config->socks);
1349                 }
1350                 kfree(nbd->config);
1351                 nbd->config = NULL;
1352
1353                 nbd->tag_set.timeout = 0;
1354                 blk_queue_max_discard_sectors(nbd->disk->queue, 0);
1355
1356                 mutex_unlock(&nbd->config_lock);
1357                 nbd_put(nbd);
1358                 module_put(THIS_MODULE);
1359         }
1360 }
1361
1362 static int nbd_start_device(struct nbd_device *nbd)
1363 {
1364         struct nbd_config *config = nbd->config;
1365         int num_connections = config->num_connections;
1366         int error = 0, i;
1367
1368         if (nbd->pid)
1369                 return -EBUSY;
1370         if (!config->socks)
1371                 return -EINVAL;
1372         if (num_connections > 1 &&
1373             !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1374                 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1375                 return -EINVAL;
1376         }
1377
1378         blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1379         nbd->pid = task_pid_nr(current);
1380
1381         nbd_parse_flags(nbd);
1382
1383         error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1384         if (error) {
1385                 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1386                 return error;
1387         }
1388         set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1389
1390         nbd_dev_dbg_init(nbd);
1391         for (i = 0; i < num_connections; i++) {
1392                 struct recv_thread_args *args;
1393
1394                 args = kzalloc(sizeof(*args), GFP_KERNEL);
1395                 if (!args) {
1396                         sock_shutdown(nbd);
1397                         /*
1398                          * If num_connections is m (2 < m),
1399                          * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1400                          * But NO.(n + 1) failed. We still have n recv threads.
1401                          * So, add flush_workqueue here to prevent recv threads
1402                          * dropping the last config_refs and trying to destroy
1403                          * the workqueue from inside the workqueue.
1404                          */
1405                         if (i)
1406                                 flush_workqueue(nbd->recv_workq);
1407                         return -ENOMEM;
1408                 }
1409                 sk_set_memalloc(config->socks[i]->sock->sk);
1410                 if (nbd->tag_set.timeout)
1411                         config->socks[i]->sock->sk->sk_sndtimeo =
1412                                 nbd->tag_set.timeout;
1413                 atomic_inc(&config->recv_threads);
1414                 refcount_inc(&nbd->config_refs);
1415                 INIT_WORK(&args->work, recv_work);
1416                 args->nbd = nbd;
1417                 args->nsock = config->socks[i];
1418                 args->index = i;
1419                 queue_work(nbd->recv_workq, &args->work);
1420         }
1421         return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1422 }
1423
1424 static int nbd_start_device_ioctl(struct nbd_device *nbd)
1425 {
1426         struct nbd_config *config = nbd->config;
1427         int ret;
1428
1429         ret = nbd_start_device(nbd);
1430         if (ret)
1431                 return ret;
1432
1433         if (max_part)
1434                 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1435         mutex_unlock(&nbd->config_lock);
1436         ret = wait_event_interruptible(config->recv_wq,
1437                                          atomic_read(&config->recv_threads) == 0);
1438         if (ret) {
1439                 sock_shutdown(nbd);
1440                 nbd_clear_que(nbd);
1441         }
1442
1443         flush_workqueue(nbd->recv_workq);
1444         mutex_lock(&nbd->config_lock);
1445         nbd_bdev_reset(nbd);
1446         /* user requested, ignore socket errors */
1447         if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1448                 ret = 0;
1449         if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1450                 ret = -ETIMEDOUT;
1451         return ret;
1452 }
1453
1454 static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1455 {
1456         nbd_clear_sock(nbd);
1457         disk_force_media_change(nbd->disk);
1458         nbd_bdev_reset(nbd);
1459         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1460                                &nbd->config->runtime_flags))
1461                 nbd_config_put(nbd);
1462 }
1463
1464 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1465 {
1466         nbd->tag_set.timeout = timeout * HZ;
1467         if (timeout)
1468                 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1469         else
1470                 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1471 }
1472
1473 /* Must be called with config_lock held */
1474 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1475                        unsigned int cmd, unsigned long arg)
1476 {
1477         struct nbd_config *config = nbd->config;
1478         loff_t bytesize;
1479
1480         switch (cmd) {
1481         case NBD_DISCONNECT:
1482                 return nbd_disconnect(nbd);
1483         case NBD_CLEAR_SOCK:
1484                 nbd_clear_sock_ioctl(nbd);
1485                 return 0;
1486         case NBD_SET_SOCK:
1487                 return nbd_add_socket(nbd, arg, false);
1488         case NBD_SET_BLKSIZE:
1489                 return nbd_set_size(nbd, config->bytesize, arg);
1490         case NBD_SET_SIZE:
1491                 return nbd_set_size(nbd, arg, nbd_blksize(config));
1492         case NBD_SET_SIZE_BLOCKS:
1493                 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1494                         return -EINVAL;
1495                 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1496         case NBD_SET_TIMEOUT:
1497                 nbd_set_cmd_timeout(nbd, arg);
1498                 return 0;
1499
1500         case NBD_SET_FLAGS:
1501                 config->flags = arg;
1502                 return 0;
1503         case NBD_DO_IT:
1504                 return nbd_start_device_ioctl(nbd);
1505         case NBD_CLEAR_QUE:
1506                 /*
1507                  * This is for compatibility only.  The queue is always cleared
1508                  * by NBD_DO_IT or NBD_CLEAR_SOCK.
1509                  */
1510                 return 0;
1511         case NBD_PRINT_DEBUG:
1512                 /*
1513                  * For compatibility only, we no longer keep a list of
1514                  * outstanding requests.
1515                  */
1516                 return 0;
1517         }
1518         return -ENOTTY;
1519 }
1520
1521 static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1522                      unsigned int cmd, unsigned long arg)
1523 {
1524         struct nbd_device *nbd = bdev->bd_disk->private_data;
1525         struct nbd_config *config = nbd->config;
1526         int error = -EINVAL;
1527
1528         if (!capable(CAP_SYS_ADMIN))
1529                 return -EPERM;
1530
1531         /* The block layer will pass back some non-nbd ioctls in case we have
1532          * special handling for them, but we don't so just return an error.
1533          */
1534         if (_IOC_TYPE(cmd) != 0xab)
1535                 return -EINVAL;
1536
1537         mutex_lock(&nbd->config_lock);
1538
1539         /* Don't allow ioctl operations on a nbd device that was created with
1540          * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1541          */
1542         if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1543             (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1544                 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1545         else
1546                 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1547         mutex_unlock(&nbd->config_lock);
1548         return error;
1549 }
1550
1551 static int nbd_alloc_and_init_config(struct nbd_device *nbd)
1552 {
1553         struct nbd_config *config;
1554
1555         if (WARN_ON(nbd->config))
1556                 return -EINVAL;
1557
1558         if (!try_module_get(THIS_MODULE))
1559                 return -ENODEV;
1560
1561         config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1562         if (!config) {
1563                 module_put(THIS_MODULE);
1564                 return -ENOMEM;
1565         }
1566
1567         atomic_set(&config->recv_threads, 0);
1568         init_waitqueue_head(&config->recv_wq);
1569         init_waitqueue_head(&config->conn_wait);
1570         config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1571         atomic_set(&config->live_connections, 0);
1572
1573         nbd->config = config;
1574         /*
1575          * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1576          * its pair is the barrier in nbd_get_config_unlocked().
1577          * So nbd_get_config_unlocked() won't see nbd->config as null after
1578          * refcount_inc_not_zero() succeed.
1579          */
1580         smp_mb__before_atomic();
1581         refcount_set(&nbd->config_refs, 1);
1582
1583         return 0;
1584 }
1585
1586 static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1587 {
1588         struct nbd_device *nbd;
1589         struct nbd_config *config;
1590         int ret = 0;
1591
1592         mutex_lock(&nbd_index_mutex);
1593         nbd = disk->private_data;
1594         if (!nbd) {
1595                 ret = -ENXIO;
1596                 goto out;
1597         }
1598         if (!refcount_inc_not_zero(&nbd->refs)) {
1599                 ret = -ENXIO;
1600                 goto out;
1601         }
1602
1603         config = nbd_get_config_unlocked(nbd);
1604         if (!config) {
1605                 mutex_lock(&nbd->config_lock);
1606                 if (refcount_inc_not_zero(&nbd->config_refs)) {
1607                         mutex_unlock(&nbd->config_lock);
1608                         goto out;
1609                 }
1610                 ret = nbd_alloc_and_init_config(nbd);
1611                 if (ret) {
1612                         mutex_unlock(&nbd->config_lock);
1613                         goto out;
1614                 }
1615
1616                 refcount_inc(&nbd->refs);
1617                 mutex_unlock(&nbd->config_lock);
1618                 if (max_part)
1619                         set_bit(GD_NEED_PART_SCAN, &disk->state);
1620         } else if (nbd_disconnected(config)) {
1621                 if (max_part)
1622                         set_bit(GD_NEED_PART_SCAN, &disk->state);
1623         }
1624 out:
1625         mutex_unlock(&nbd_index_mutex);
1626         return ret;
1627 }
1628
1629 static void nbd_release(struct gendisk *disk)
1630 {
1631         struct nbd_device *nbd = disk->private_data;
1632
1633         if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1634                         disk_openers(disk) == 0)
1635                 nbd_disconnect_and_put(nbd);
1636
1637         nbd_config_put(nbd);
1638         nbd_put(nbd);
1639 }
1640
1641 static void nbd_free_disk(struct gendisk *disk)
1642 {
1643         struct nbd_device *nbd = disk->private_data;
1644
1645         kfree(nbd);
1646 }
1647
1648 static const struct block_device_operations nbd_fops =
1649 {
1650         .owner =        THIS_MODULE,
1651         .open =         nbd_open,
1652         .release =      nbd_release,
1653         .ioctl =        nbd_ioctl,
1654         .compat_ioctl = nbd_ioctl,
1655         .free_disk =    nbd_free_disk,
1656 };
1657
1658 #if IS_ENABLED(CONFIG_DEBUG_FS)
1659
1660 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1661 {
1662         struct nbd_device *nbd = s->private;
1663
1664         if (nbd->pid)
1665                 seq_printf(s, "recv: %d\n", nbd->pid);
1666
1667         return 0;
1668 }
1669
1670 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1671
1672 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1673 {
1674         struct nbd_device *nbd = s->private;
1675         u32 flags = nbd->config->flags;
1676
1677         seq_printf(s, "Hex: 0x%08x\n\n", flags);
1678
1679         seq_puts(s, "Known flags:\n");
1680
1681         if (flags & NBD_FLAG_HAS_FLAGS)
1682                 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1683         if (flags & NBD_FLAG_READ_ONLY)
1684                 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1685         if (flags & NBD_FLAG_SEND_FLUSH)
1686                 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1687         if (flags & NBD_FLAG_SEND_FUA)
1688                 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1689         if (flags & NBD_FLAG_SEND_TRIM)
1690                 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1691
1692         return 0;
1693 }
1694
1695 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1696
1697 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1698 {
1699         struct dentry *dir;
1700         struct nbd_config *config = nbd->config;
1701
1702         if (!nbd_dbg_dir)
1703                 return -EIO;
1704
1705         dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1706         if (IS_ERR(dir)) {
1707                 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1708                         nbd_name(nbd));
1709                 return -EIO;
1710         }
1711         config->dbg_dir = dir;
1712
1713         debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1714         debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1715         debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1716         debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1717         debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1718
1719         return 0;
1720 }
1721
1722 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1723 {
1724         debugfs_remove_recursive(nbd->config->dbg_dir);
1725 }
1726
1727 static int nbd_dbg_init(void)
1728 {
1729         struct dentry *dbg_dir;
1730
1731         dbg_dir = debugfs_create_dir("nbd", NULL);
1732         if (IS_ERR(dbg_dir))
1733                 return -EIO;
1734
1735         nbd_dbg_dir = dbg_dir;
1736
1737         return 0;
1738 }
1739
1740 static void nbd_dbg_close(void)
1741 {
1742         debugfs_remove_recursive(nbd_dbg_dir);
1743 }
1744
1745 #else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
1746
1747 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1748 {
1749         return 0;
1750 }
1751
1752 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1753 {
1754 }
1755
1756 static int nbd_dbg_init(void)
1757 {
1758         return 0;
1759 }
1760
1761 static void nbd_dbg_close(void)
1762 {
1763 }
1764
1765 #endif
1766
1767 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1768                             unsigned int hctx_idx, unsigned int numa_node)
1769 {
1770         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1771         cmd->nbd = set->driver_data;
1772         cmd->flags = 0;
1773         mutex_init(&cmd->lock);
1774         return 0;
1775 }
1776
1777 static const struct blk_mq_ops nbd_mq_ops = {
1778         .queue_rq       = nbd_queue_rq,
1779         .complete       = nbd_complete_rq,
1780         .init_request   = nbd_init_request,
1781         .timeout        = nbd_xmit_timeout,
1782 };
1783
1784 static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1785 {
1786         struct queue_limits lim = {
1787                 .max_hw_sectors         = 65536,
1788                 .max_user_sectors       = 256,
1789                 .max_segments           = USHRT_MAX,
1790                 .max_segment_size       = UINT_MAX,
1791         };
1792         struct nbd_device *nbd;
1793         struct gendisk *disk;
1794         int err = -ENOMEM;
1795
1796         nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1797         if (!nbd)
1798                 goto out;
1799
1800         nbd->tag_set.ops = &nbd_mq_ops;
1801         nbd->tag_set.nr_hw_queues = 1;
1802         nbd->tag_set.queue_depth = 128;
1803         nbd->tag_set.numa_node = NUMA_NO_NODE;
1804         nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1805         nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1806                 BLK_MQ_F_BLOCKING;
1807         nbd->tag_set.driver_data = nbd;
1808         INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1809         nbd->backend = NULL;
1810
1811         err = blk_mq_alloc_tag_set(&nbd->tag_set);
1812         if (err)
1813                 goto out_free_nbd;
1814
1815         mutex_lock(&nbd_index_mutex);
1816         if (index >= 0) {
1817                 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1818                                 GFP_KERNEL);
1819                 if (err == -ENOSPC)
1820                         err = -EEXIST;
1821         } else {
1822                 err = idr_alloc(&nbd_index_idr, nbd, 0,
1823                                 (MINORMASK >> part_shift) + 1, GFP_KERNEL);
1824                 if (err >= 0)
1825                         index = err;
1826         }
1827         nbd->index = index;
1828         mutex_unlock(&nbd_index_mutex);
1829         if (err < 0)
1830                 goto out_free_tags;
1831
1832         disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
1833         if (IS_ERR(disk)) {
1834                 err = PTR_ERR(disk);
1835                 goto out_free_idr;
1836         }
1837         nbd->disk = disk;
1838
1839         nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1840                                           WQ_MEM_RECLAIM | WQ_HIGHPRI |
1841                                           WQ_UNBOUND, 0, nbd->index);
1842         if (!nbd->recv_workq) {
1843                 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1844                 err = -ENOMEM;
1845                 goto out_err_disk;
1846         }
1847
1848         /*
1849          * Tell the block layer that we are not a rotational device
1850          */
1851         blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1852
1853         mutex_init(&nbd->config_lock);
1854         refcount_set(&nbd->config_refs, 0);
1855         /*
1856          * Start out with a zero references to keep other threads from using
1857          * this device until it is fully initialized.
1858          */
1859         refcount_set(&nbd->refs, 0);
1860         INIT_LIST_HEAD(&nbd->list);
1861         disk->major = NBD_MAJOR;
1862         disk->first_minor = index << part_shift;
1863         disk->minors = 1 << part_shift;
1864         disk->fops = &nbd_fops;
1865         disk->private_data = nbd;
1866         sprintf(disk->disk_name, "nbd%d", index);
1867         err = add_disk(disk);
1868         if (err)
1869                 goto out_free_work;
1870
1871         /*
1872          * Now publish the device.
1873          */
1874         refcount_set(&nbd->refs, refs);
1875         nbd_total_devices++;
1876         return nbd;
1877
1878 out_free_work:
1879         destroy_workqueue(nbd->recv_workq);
1880 out_err_disk:
1881         put_disk(disk);
1882 out_free_idr:
1883         mutex_lock(&nbd_index_mutex);
1884         idr_remove(&nbd_index_idr, index);
1885         mutex_unlock(&nbd_index_mutex);
1886 out_free_tags:
1887         blk_mq_free_tag_set(&nbd->tag_set);
1888 out_free_nbd:
1889         kfree(nbd);
1890 out:
1891         return ERR_PTR(err);
1892 }
1893
1894 static struct nbd_device *nbd_find_get_unused(void)
1895 {
1896         struct nbd_device *nbd;
1897         int id;
1898
1899         lockdep_assert_held(&nbd_index_mutex);
1900
1901         idr_for_each_entry(&nbd_index_idr, nbd, id) {
1902                 if (refcount_read(&nbd->config_refs) ||
1903                     test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1904                         continue;
1905                 if (refcount_inc_not_zero(&nbd->refs))
1906                         return nbd;
1907         }
1908
1909         return NULL;
1910 }
1911
1912 /* Netlink interface. */
1913 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1914         [NBD_ATTR_INDEX]                =       { .type = NLA_U32 },
1915         [NBD_ATTR_SIZE_BYTES]           =       { .type = NLA_U64 },
1916         [NBD_ATTR_BLOCK_SIZE_BYTES]     =       { .type = NLA_U64 },
1917         [NBD_ATTR_TIMEOUT]              =       { .type = NLA_U64 },
1918         [NBD_ATTR_SERVER_FLAGS]         =       { .type = NLA_U64 },
1919         [NBD_ATTR_CLIENT_FLAGS]         =       { .type = NLA_U64 },
1920         [NBD_ATTR_SOCKETS]              =       { .type = NLA_NESTED},
1921         [NBD_ATTR_DEAD_CONN_TIMEOUT]    =       { .type = NLA_U64 },
1922         [NBD_ATTR_DEVICE_LIST]          =       { .type = NLA_NESTED},
1923         [NBD_ATTR_BACKEND_IDENTIFIER]   =       { .type = NLA_STRING},
1924 };
1925
1926 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1927         [NBD_SOCK_FD]                   =       { .type = NLA_U32 },
1928 };
1929
1930 /* We don't use this right now since we don't parse the incoming list, but we
1931  * still want it here so userspace knows what to expect.
1932  */
1933 static const struct nla_policy __attribute__((unused))
1934 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1935         [NBD_DEVICE_INDEX]              =       { .type = NLA_U32 },
1936         [NBD_DEVICE_CONNECTED]          =       { .type = NLA_U8 },
1937 };
1938
1939 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1940 {
1941         struct nbd_config *config = nbd->config;
1942         u64 bsize = nbd_blksize(config);
1943         u64 bytes = config->bytesize;
1944
1945         if (info->attrs[NBD_ATTR_SIZE_BYTES])
1946                 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1947
1948         if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1949                 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1950
1951         if (bytes != config->bytesize || bsize != nbd_blksize(config))
1952                 return nbd_set_size(nbd, bytes, bsize);
1953         return 0;
1954 }
1955
1956 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1957 {
1958         struct nbd_device *nbd;
1959         struct nbd_config *config;
1960         int index = -1;
1961         int ret;
1962         bool put_dev = false;
1963
1964         if (!netlink_capable(skb, CAP_SYS_ADMIN))
1965                 return -EPERM;
1966
1967         if (info->attrs[NBD_ATTR_INDEX]) {
1968                 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1969
1970                 /*
1971                  * Too big first_minor can cause duplicate creation of
1972                  * sysfs files/links, since index << part_shift might overflow, or
1973                  * MKDEV() expect that the max bits of first_minor is 20.
1974                  */
1975                 if (index < 0 || index > MINORMASK >> part_shift) {
1976                         pr_err("illegal input index %d\n", index);
1977                         return -EINVAL;
1978                 }
1979         }
1980         if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
1981                 pr_err("must specify at least one socket\n");
1982                 return -EINVAL;
1983         }
1984         if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
1985                 pr_err("must specify a size in bytes for the device\n");
1986                 return -EINVAL;
1987         }
1988 again:
1989         mutex_lock(&nbd_index_mutex);
1990         if (index == -1) {
1991                 nbd = nbd_find_get_unused();
1992         } else {
1993                 nbd = idr_find(&nbd_index_idr, index);
1994                 if (nbd) {
1995                         if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1996                              test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
1997                             !refcount_inc_not_zero(&nbd->refs)) {
1998                                 mutex_unlock(&nbd_index_mutex);
1999                                 pr_err("device at index %d is going down\n",
2000                                         index);
2001                                 return -EINVAL;
2002                         }
2003                 }
2004         }
2005         mutex_unlock(&nbd_index_mutex);
2006
2007         if (!nbd) {
2008                 nbd = nbd_dev_add(index, 2);
2009                 if (IS_ERR(nbd)) {
2010                         pr_err("failed to add new device\n");
2011                         return PTR_ERR(nbd);
2012                 }
2013         }
2014
2015         mutex_lock(&nbd->config_lock);
2016         if (refcount_read(&nbd->config_refs)) {
2017                 mutex_unlock(&nbd->config_lock);
2018                 nbd_put(nbd);
2019                 if (index == -1)
2020                         goto again;
2021                 pr_err("nbd%d already in use\n", index);
2022                 return -EBUSY;
2023         }
2024
2025         ret = nbd_alloc_and_init_config(nbd);
2026         if (ret) {
2027                 mutex_unlock(&nbd->config_lock);
2028                 nbd_put(nbd);
2029                 pr_err("couldn't allocate config\n");
2030                 return ret;
2031         }
2032
2033         config = nbd->config;
2034         set_bit(NBD_RT_BOUND, &config->runtime_flags);
2035         ret = nbd_genl_size_set(info, nbd);
2036         if (ret)
2037                 goto out;
2038
2039         if (info->attrs[NBD_ATTR_TIMEOUT])
2040                 nbd_set_cmd_timeout(nbd,
2041                                     nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2042         if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2043                 config->dead_conn_timeout =
2044                         nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2045                 config->dead_conn_timeout *= HZ;
2046         }
2047         if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2048                 config->flags =
2049                         nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2050         if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2051                 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2052                 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2053                         /*
2054                          * We have 1 ref to keep the device around, and then 1
2055                          * ref for our current operation here, which will be
2056                          * inherited by the config.  If we already have
2057                          * DESTROY_ON_DISCONNECT set then we know we don't have
2058                          * that extra ref already held so we don't need the
2059                          * put_dev.
2060                          */
2061                         if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2062                                               &nbd->flags))
2063                                 put_dev = true;
2064                 } else {
2065                         if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2066                                                &nbd->flags))
2067                                 refcount_inc(&nbd->refs);
2068                 }
2069                 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2070                         set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2071                                 &config->runtime_flags);
2072                 }
2073         }
2074
2075         if (info->attrs[NBD_ATTR_SOCKETS]) {
2076                 struct nlattr *attr;
2077                 int rem, fd;
2078
2079                 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2080                                     rem) {
2081                         struct nlattr *socks[NBD_SOCK_MAX+1];
2082
2083                         if (nla_type(attr) != NBD_SOCK_ITEM) {
2084                                 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2085                                 ret = -EINVAL;
2086                                 goto out;
2087                         }
2088                         ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2089                                                           attr,
2090                                                           nbd_sock_policy,
2091                                                           info->extack);
2092                         if (ret != 0) {
2093                                 pr_err("error processing sock list\n");
2094                                 ret = -EINVAL;
2095                                 goto out;
2096                         }
2097                         if (!socks[NBD_SOCK_FD])
2098                                 continue;
2099                         fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2100                         ret = nbd_add_socket(nbd, fd, true);
2101                         if (ret)
2102                                 goto out;
2103                 }
2104         }
2105         ret = nbd_start_device(nbd);
2106         if (ret)
2107                 goto out;
2108         if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2109                 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2110                                           GFP_KERNEL);
2111                 if (!nbd->backend) {
2112                         ret = -ENOMEM;
2113                         goto out;
2114                 }
2115         }
2116         ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2117         if (ret) {
2118                 dev_err(disk_to_dev(nbd->disk),
2119                         "device_create_file failed for backend!\n");
2120                 goto out;
2121         }
2122         set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2123 out:
2124         mutex_unlock(&nbd->config_lock);
2125         if (!ret) {
2126                 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2127                 refcount_inc(&nbd->config_refs);
2128                 nbd_connect_reply(info, nbd->index);
2129         }
2130         nbd_config_put(nbd);
2131         if (put_dev)
2132                 nbd_put(nbd);
2133         return ret;
2134 }
2135
2136 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2137 {
2138         mutex_lock(&nbd->config_lock);
2139         nbd_disconnect(nbd);
2140         sock_shutdown(nbd);
2141         wake_up(&nbd->config->conn_wait);
2142         /*
2143          * Make sure recv thread has finished, we can safely call nbd_clear_que()
2144          * to cancel the inflight I/Os.
2145          */
2146         flush_workqueue(nbd->recv_workq);
2147         nbd_clear_que(nbd);
2148         nbd->task_setup = NULL;
2149         mutex_unlock(&nbd->config_lock);
2150
2151         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2152                                &nbd->config->runtime_flags))
2153                 nbd_config_put(nbd);
2154 }
2155
2156 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2157 {
2158         struct nbd_device *nbd;
2159         int index;
2160
2161         if (!netlink_capable(skb, CAP_SYS_ADMIN))
2162                 return -EPERM;
2163
2164         if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2165                 pr_err("must specify an index to disconnect\n");
2166                 return -EINVAL;
2167         }
2168         index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2169         mutex_lock(&nbd_index_mutex);
2170         nbd = idr_find(&nbd_index_idr, index);
2171         if (!nbd) {
2172                 mutex_unlock(&nbd_index_mutex);
2173                 pr_err("couldn't find device at index %d\n", index);
2174                 return -EINVAL;
2175         }
2176         if (!refcount_inc_not_zero(&nbd->refs)) {
2177                 mutex_unlock(&nbd_index_mutex);
2178                 pr_err("device at index %d is going down\n", index);
2179                 return -EINVAL;
2180         }
2181         mutex_unlock(&nbd_index_mutex);
2182         if (!refcount_inc_not_zero(&nbd->config_refs))
2183                 goto put_nbd;
2184         nbd_disconnect_and_put(nbd);
2185         nbd_config_put(nbd);
2186 put_nbd:
2187         nbd_put(nbd);
2188         return 0;
2189 }
2190
2191 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2192 {
2193         struct nbd_device *nbd = NULL;
2194         struct nbd_config *config;
2195         int index;
2196         int ret = 0;
2197         bool put_dev = false;
2198
2199         if (!netlink_capable(skb, CAP_SYS_ADMIN))
2200                 return -EPERM;
2201
2202         if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2203                 pr_err("must specify a device to reconfigure\n");
2204                 return -EINVAL;
2205         }
2206         index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2207         mutex_lock(&nbd_index_mutex);
2208         nbd = idr_find(&nbd_index_idr, index);
2209         if (!nbd) {
2210                 mutex_unlock(&nbd_index_mutex);
2211                 pr_err("couldn't find a device at index %d\n", index);
2212                 return -EINVAL;
2213         }
2214         if (nbd->backend) {
2215                 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2216                         if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2217                                        nbd->backend)) {
2218                                 mutex_unlock(&nbd_index_mutex);
2219                                 dev_err(nbd_to_dev(nbd),
2220                                         "backend image doesn't match with %s\n",
2221                                         nbd->backend);
2222                                 return -EINVAL;
2223                         }
2224                 } else {
2225                         mutex_unlock(&nbd_index_mutex);
2226                         dev_err(nbd_to_dev(nbd), "must specify backend\n");
2227                         return -EINVAL;
2228                 }
2229         }
2230         if (!refcount_inc_not_zero(&nbd->refs)) {
2231                 mutex_unlock(&nbd_index_mutex);
2232                 pr_err("device at index %d is going down\n", index);
2233                 return -EINVAL;
2234         }
2235         mutex_unlock(&nbd_index_mutex);
2236
2237         config = nbd_get_config_unlocked(nbd);
2238         if (!config) {
2239                 dev_err(nbd_to_dev(nbd),
2240                         "not configured, cannot reconfigure\n");
2241                 nbd_put(nbd);
2242                 return -EINVAL;
2243         }
2244
2245         mutex_lock(&nbd->config_lock);
2246         if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2247             !nbd->pid) {
2248                 dev_err(nbd_to_dev(nbd),
2249                         "not configured, cannot reconfigure\n");
2250                 ret = -EINVAL;
2251                 goto out;
2252         }
2253
2254         ret = nbd_genl_size_set(info, nbd);
2255         if (ret)
2256                 goto out;
2257
2258         if (info->attrs[NBD_ATTR_TIMEOUT])
2259                 nbd_set_cmd_timeout(nbd,
2260                                     nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2261         if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2262                 config->dead_conn_timeout =
2263                         nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2264                 config->dead_conn_timeout *= HZ;
2265         }
2266         if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2267                 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2268                 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2269                         if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2270                                               &nbd->flags))
2271                                 put_dev = true;
2272                 } else {
2273                         if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2274                                                &nbd->flags))
2275                                 refcount_inc(&nbd->refs);
2276                 }
2277
2278                 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2279                         set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2280                                         &config->runtime_flags);
2281                 } else {
2282                         clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2283                                         &config->runtime_flags);
2284                 }
2285         }
2286
2287         if (info->attrs[NBD_ATTR_SOCKETS]) {
2288                 struct nlattr *attr;
2289                 int rem, fd;
2290
2291                 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2292                                     rem) {
2293                         struct nlattr *socks[NBD_SOCK_MAX+1];
2294
2295                         if (nla_type(attr) != NBD_SOCK_ITEM) {
2296                                 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2297                                 ret = -EINVAL;
2298                                 goto out;
2299                         }
2300                         ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2301                                                           attr,
2302                                                           nbd_sock_policy,
2303                                                           info->extack);
2304                         if (ret != 0) {
2305                                 pr_err("error processing sock list\n");
2306                                 ret = -EINVAL;
2307                                 goto out;
2308                         }
2309                         if (!socks[NBD_SOCK_FD])
2310                                 continue;
2311                         fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2312                         ret = nbd_reconnect_socket(nbd, fd);
2313                         if (ret) {
2314                                 if (ret == -ENOSPC)
2315                                         ret = 0;
2316                                 goto out;
2317                         }
2318                         dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2319                 }
2320         }
2321 out:
2322         mutex_unlock(&nbd->config_lock);
2323         nbd_config_put(nbd);
2324         nbd_put(nbd);
2325         if (put_dev)
2326                 nbd_put(nbd);
2327         return ret;
2328 }
2329
2330 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2331         {
2332                 .cmd    = NBD_CMD_CONNECT,
2333                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2334                 .doit   = nbd_genl_connect,
2335         },
2336         {
2337                 .cmd    = NBD_CMD_DISCONNECT,
2338                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2339                 .doit   = nbd_genl_disconnect,
2340         },
2341         {
2342                 .cmd    = NBD_CMD_RECONFIGURE,
2343                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2344                 .doit   = nbd_genl_reconfigure,
2345         },
2346         {
2347                 .cmd    = NBD_CMD_STATUS,
2348                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2349                 .doit   = nbd_genl_status,
2350         },
2351 };
2352
2353 static const struct genl_multicast_group nbd_mcast_grps[] = {
2354         { .name = NBD_GENL_MCAST_GROUP_NAME, },
2355 };
2356
2357 static struct genl_family nbd_genl_family __ro_after_init = {
2358         .hdrsize        = 0,
2359         .name           = NBD_GENL_FAMILY_NAME,
2360         .version        = NBD_GENL_VERSION,
2361         .module         = THIS_MODULE,
2362         .small_ops      = nbd_connect_genl_ops,
2363         .n_small_ops    = ARRAY_SIZE(nbd_connect_genl_ops),
2364         .resv_start_op  = NBD_CMD_STATUS + 1,
2365         .maxattr        = NBD_ATTR_MAX,
2366         .netnsok        = 1,
2367         .policy = nbd_attr_policy,
2368         .mcgrps         = nbd_mcast_grps,
2369         .n_mcgrps       = ARRAY_SIZE(nbd_mcast_grps),
2370 };
2371 MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
2372
2373 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2374 {
2375         struct nlattr *dev_opt;
2376         u8 connected = 0;
2377         int ret;
2378
2379         /* This is a little racey, but for status it's ok.  The
2380          * reason we don't take a ref here is because we can't
2381          * take a ref in the index == -1 case as we would need
2382          * to put under the nbd_index_mutex, which could
2383          * deadlock if we are configured to remove ourselves
2384          * once we're disconnected.
2385          */
2386         if (refcount_read(&nbd->config_refs))
2387                 connected = 1;
2388         dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2389         if (!dev_opt)
2390                 return -EMSGSIZE;
2391         ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2392         if (ret)
2393                 return -EMSGSIZE;
2394         ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2395                          connected);
2396         if (ret)
2397                 return -EMSGSIZE;
2398         nla_nest_end(reply, dev_opt);
2399         return 0;
2400 }
2401
2402 static int status_cb(int id, void *ptr, void *data)
2403 {
2404         struct nbd_device *nbd = ptr;
2405         return populate_nbd_status(nbd, (struct sk_buff *)data);
2406 }
2407
2408 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2409 {
2410         struct nlattr *dev_list;
2411         struct sk_buff *reply;
2412         void *reply_head;
2413         size_t msg_size;
2414         int index = -1;
2415         int ret = -ENOMEM;
2416
2417         if (info->attrs[NBD_ATTR_INDEX])
2418                 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2419
2420         mutex_lock(&nbd_index_mutex);
2421
2422         msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2423                                   nla_attr_size(sizeof(u8)));
2424         msg_size *= (index == -1) ? nbd_total_devices : 1;
2425
2426         reply = genlmsg_new(msg_size, GFP_KERNEL);
2427         if (!reply)
2428                 goto out;
2429         reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2430                                        NBD_CMD_STATUS);
2431         if (!reply_head) {
2432                 nlmsg_free(reply);
2433                 goto out;
2434         }
2435
2436         dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2437         if (!dev_list) {
2438                 nlmsg_free(reply);
2439                 ret = -EMSGSIZE;
2440                 goto out;
2441         }
2442
2443         if (index == -1) {
2444                 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2445                 if (ret) {
2446                         nlmsg_free(reply);
2447                         goto out;
2448                 }
2449         } else {
2450                 struct nbd_device *nbd;
2451                 nbd = idr_find(&nbd_index_idr, index);
2452                 if (nbd) {
2453                         ret = populate_nbd_status(nbd, reply);
2454                         if (ret) {
2455                                 nlmsg_free(reply);
2456                                 goto out;
2457                         }
2458                 }
2459         }
2460         nla_nest_end(reply, dev_list);
2461         genlmsg_end(reply, reply_head);
2462         ret = genlmsg_reply(reply, info);
2463 out:
2464         mutex_unlock(&nbd_index_mutex);
2465         return ret;
2466 }
2467
2468 static void nbd_connect_reply(struct genl_info *info, int index)
2469 {
2470         struct sk_buff *skb;
2471         void *msg_head;
2472         int ret;
2473
2474         skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2475         if (!skb)
2476                 return;
2477         msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2478                                      NBD_CMD_CONNECT);
2479         if (!msg_head) {
2480                 nlmsg_free(skb);
2481                 return;
2482         }
2483         ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2484         if (ret) {
2485                 nlmsg_free(skb);
2486                 return;
2487         }
2488         genlmsg_end(skb, msg_head);
2489         genlmsg_reply(skb, info);
2490 }
2491
2492 static void nbd_mcast_index(int index)
2493 {
2494         struct sk_buff *skb;
2495         void *msg_head;
2496         int ret;
2497
2498         skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2499         if (!skb)
2500                 return;
2501         msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2502                                      NBD_CMD_LINK_DEAD);
2503         if (!msg_head) {
2504                 nlmsg_free(skb);
2505                 return;
2506         }
2507         ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2508         if (ret) {
2509                 nlmsg_free(skb);
2510                 return;
2511         }
2512         genlmsg_end(skb, msg_head);
2513         genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2514 }
2515
2516 static void nbd_dead_link_work(struct work_struct *work)
2517 {
2518         struct link_dead_args *args = container_of(work, struct link_dead_args,
2519                                                    work);
2520         nbd_mcast_index(args->index);
2521         kfree(args);
2522 }
2523
2524 static int __init nbd_init(void)
2525 {
2526         int i;
2527
2528         BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2529
2530         if (max_part < 0) {
2531                 pr_err("max_part must be >= 0\n");
2532                 return -EINVAL;
2533         }
2534
2535         part_shift = 0;
2536         if (max_part > 0) {
2537                 part_shift = fls(max_part);
2538
2539                 /*
2540                  * Adjust max_part according to part_shift as it is exported
2541                  * to user space so that user can know the max number of
2542                  * partition kernel should be able to manage.
2543                  *
2544                  * Note that -1 is required because partition 0 is reserved
2545                  * for the whole disk.
2546                  */
2547                 max_part = (1UL << part_shift) - 1;
2548         }
2549
2550         if ((1UL << part_shift) > DISK_MAX_PARTS)
2551                 return -EINVAL;
2552
2553         if (nbds_max > 1UL << (MINORBITS - part_shift))
2554                 return -EINVAL;
2555
2556         if (register_blkdev(NBD_MAJOR, "nbd"))
2557                 return -EIO;
2558
2559         nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2560         if (!nbd_del_wq) {
2561                 unregister_blkdev(NBD_MAJOR, "nbd");
2562                 return -ENOMEM;
2563         }
2564
2565         if (genl_register_family(&nbd_genl_family)) {
2566                 destroy_workqueue(nbd_del_wq);
2567                 unregister_blkdev(NBD_MAJOR, "nbd");
2568                 return -EINVAL;
2569         }
2570         nbd_dbg_init();
2571
2572         for (i = 0; i < nbds_max; i++)
2573                 nbd_dev_add(i, 1);
2574         return 0;
2575 }
2576
2577 static int nbd_exit_cb(int id, void *ptr, void *data)
2578 {
2579         struct list_head *list = (struct list_head *)data;
2580         struct nbd_device *nbd = ptr;
2581
2582         /* Skip nbd that is being removed asynchronously */
2583         if (refcount_read(&nbd->refs))
2584                 list_add_tail(&nbd->list, list);
2585
2586         return 0;
2587 }
2588
2589 static void __exit nbd_cleanup(void)
2590 {
2591         struct nbd_device *nbd;
2592         LIST_HEAD(del_list);
2593
2594         /*
2595          * Unregister netlink interface prior to waiting
2596          * for the completion of netlink commands.
2597          */
2598         genl_unregister_family(&nbd_genl_family);
2599
2600         nbd_dbg_close();
2601
2602         mutex_lock(&nbd_index_mutex);
2603         idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2604         mutex_unlock(&nbd_index_mutex);
2605
2606         while (!list_empty(&del_list)) {
2607                 nbd = list_first_entry(&del_list, struct nbd_device, list);
2608                 list_del_init(&nbd->list);
2609                 if (refcount_read(&nbd->config_refs))
2610                         pr_err("possibly leaking nbd_config (ref %d)\n",
2611                                         refcount_read(&nbd->config_refs));
2612                 if (refcount_read(&nbd->refs) != 1)
2613                         pr_err("possibly leaking a device\n");
2614                 nbd_put(nbd);
2615         }
2616
2617         /* Also wait for nbd_dev_remove_work() completes */
2618         destroy_workqueue(nbd_del_wq);
2619
2620         idr_destroy(&nbd_index_idr);
2621         unregister_blkdev(NBD_MAJOR, "nbd");
2622 }
2623
2624 module_init(nbd_init);
2625 module_exit(nbd_cleanup);
2626
2627 MODULE_DESCRIPTION("Network Block Device");
2628 MODULE_LICENSE("GPL");
2629
2630 module_param(nbds_max, int, 0444);
2631 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2632 module_param(max_part, int, 0444);
2633 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");