1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
50 static DEFINE_IDR(nbd_index_idr);
51 static DEFINE_MUTEX(nbd_index_mutex);
52 static int nbd_total_devices = 0;
57 struct request *pending;
64 struct recv_thread_args {
65 struct work_struct work;
66 struct nbd_device *nbd;
70 struct link_dead_args {
71 struct work_struct work;
75 #define NBD_RT_TIMEDOUT 0
76 #define NBD_RT_DISCONNECT_REQUESTED 1
77 #define NBD_RT_DISCONNECTED 2
78 #define NBD_RT_HAS_PID_FILE 3
79 #define NBD_RT_HAS_CONFIG_REF 4
80 #define NBD_RT_BOUND 5
81 #define NBD_RT_DESTROY_ON_DISCONNECT 6
82 #define NBD_RT_DISCONNECT_ON_CLOSE 7
84 #define NBD_DESTROY_ON_DISCONNECT 0
85 #define NBD_DISCONNECT_REQUESTED 1
89 unsigned long runtime_flags;
90 u64 dead_conn_timeout;
92 struct nbd_sock **socks;
94 atomic_t live_connections;
95 wait_queue_head_t conn_wait;
97 atomic_t recv_threads;
98 wait_queue_head_t recv_wq;
101 #if IS_ENABLED(CONFIG_DEBUG_FS)
102 struct dentry *dbg_dir;
107 struct blk_mq_tag_set tag_set;
110 refcount_t config_refs;
112 struct nbd_config *config;
113 struct mutex config_lock;
114 struct gendisk *disk;
115 struct workqueue_struct *recv_workq;
117 struct list_head list;
118 struct task_struct *task_recv;
119 struct task_struct *task_setup;
121 struct completion *destroy_complete;
125 #define NBD_CMD_REQUEUED 1
128 struct nbd_device *nbd;
138 #if IS_ENABLED(CONFIG_DEBUG_FS)
139 static struct dentry *nbd_dbg_dir;
142 #define nbd_name(nbd) ((nbd)->disk->disk_name)
144 #define NBD_MAGIC 0x68797548
146 #define NBD_DEF_BLKSIZE 1024
148 static unsigned int nbds_max = 16;
149 static int max_part = 16;
150 static int part_shift;
152 static int nbd_dev_dbg_init(struct nbd_device *nbd);
153 static void nbd_dev_dbg_close(struct nbd_device *nbd);
154 static void nbd_config_put(struct nbd_device *nbd);
155 static void nbd_connect_reply(struct genl_info *info, int index);
156 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
157 static void nbd_dead_link_work(struct work_struct *work);
158 static void nbd_disconnect_and_put(struct nbd_device *nbd);
160 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
162 return disk_to_dev(nbd->disk);
165 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
167 struct request *req = blk_mq_rq_from_pdu(cmd);
169 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
170 blk_mq_requeue_request(req, true);
173 #define NBD_COOKIE_BITS 32
175 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
177 struct request *req = blk_mq_rq_from_pdu(cmd);
178 u32 tag = blk_mq_unique_tag(req);
179 u64 cookie = cmd->cmd_cookie;
181 return (cookie << NBD_COOKIE_BITS) | tag;
184 static u32 nbd_handle_to_tag(u64 handle)
189 static u32 nbd_handle_to_cookie(u64 handle)
191 return (u32)(handle >> NBD_COOKIE_BITS);
194 static const char *nbdcmd_to_ascii(int cmd)
197 case NBD_CMD_READ: return "read";
198 case NBD_CMD_WRITE: return "write";
199 case NBD_CMD_DISC: return "disconnect";
200 case NBD_CMD_FLUSH: return "flush";
201 case NBD_CMD_TRIM: return "trim/discard";
206 static ssize_t pid_show(struct device *dev,
207 struct device_attribute *attr, char *buf)
209 struct gendisk *disk = dev_to_disk(dev);
210 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
212 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
215 static const struct device_attribute pid_attr = {
216 .attr = { .name = "pid", .mode = 0444},
220 static void nbd_dev_remove(struct nbd_device *nbd)
222 struct gendisk *disk = nbd->disk;
223 struct request_queue *q;
228 blk_cleanup_queue(q);
229 blk_mq_free_tag_set(&nbd->tag_set);
230 disk->private_data = NULL;
235 * Place this in the last just before the nbd is freed to
236 * make sure that the disk and the related kobject are also
237 * totally removed to avoid duplicate creation of the same
240 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
241 complete(nbd->destroy_complete);
246 static void nbd_put(struct nbd_device *nbd)
248 if (refcount_dec_and_mutex_lock(&nbd->refs,
250 idr_remove(&nbd_index_idr, nbd->index);
252 mutex_unlock(&nbd_index_mutex);
256 static int nbd_disconnected(struct nbd_config *config)
258 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
259 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
262 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
265 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
266 struct link_dead_args *args;
267 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
269 INIT_WORK(&args->work, nbd_dead_link_work);
270 args->index = nbd->index;
271 queue_work(system_wq, &args->work);
275 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
276 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
277 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
278 &nbd->config->runtime_flags)) {
279 set_bit(NBD_RT_DISCONNECTED,
280 &nbd->config->runtime_flags);
281 dev_info(nbd_to_dev(nbd),
282 "Disconnected due to user request.\n");
287 nsock->pending = NULL;
291 static void nbd_size_clear(struct nbd_device *nbd)
293 if (nbd->config->bytesize) {
294 set_capacity(nbd->disk, 0);
295 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
299 static void nbd_size_update(struct nbd_device *nbd)
301 struct nbd_config *config = nbd->config;
302 struct block_device *bdev = bdget_disk(nbd->disk, 0);
304 if (config->flags & NBD_FLAG_SEND_TRIM) {
305 nbd->disk->queue->limits.discard_granularity = config->blksize;
306 nbd->disk->queue->limits.discard_alignment = config->blksize;
307 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
309 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
310 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
311 set_capacity(nbd->disk, config->bytesize >> 9);
314 bd_set_size(bdev, config->bytesize);
315 set_blocksize(bdev, config->blksize);
317 bdev->bd_invalidated = 1;
320 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
323 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
326 struct nbd_config *config = nbd->config;
327 config->blksize = blocksize;
328 config->bytesize = blocksize * nr_blocks;
329 if (nbd->task_recv != NULL)
330 nbd_size_update(nbd);
333 static void nbd_complete_rq(struct request *req)
335 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
337 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
338 cmd->status ? "failed" : "done");
340 blk_mq_end_request(req, cmd->status);
344 * Forcibly shutdown the socket causing all listeners to error
346 static void sock_shutdown(struct nbd_device *nbd)
348 struct nbd_config *config = nbd->config;
351 if (config->num_connections == 0)
353 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
356 for (i = 0; i < config->num_connections; i++) {
357 struct nbd_sock *nsock = config->socks[i];
358 mutex_lock(&nsock->tx_lock);
359 nbd_mark_nsock_dead(nbd, nsock, 0);
360 mutex_unlock(&nsock->tx_lock);
362 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
365 static u32 req_to_nbd_cmd_type(struct request *req)
367 switch (req_op(req)) {
371 return NBD_CMD_FLUSH;
373 return NBD_CMD_WRITE;
381 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
384 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
385 struct nbd_device *nbd = cmd->nbd;
386 struct nbd_config *config;
388 if (!mutex_trylock(&cmd->lock))
389 return BLK_EH_RESET_TIMER;
391 if (!refcount_inc_not_zero(&nbd->config_refs)) {
392 cmd->status = BLK_STS_TIMEOUT;
393 mutex_unlock(&cmd->lock);
396 config = nbd->config;
398 if (config->num_connections > 1) {
399 dev_err_ratelimited(nbd_to_dev(nbd),
400 "Connection timed out, retrying (%d/%d alive)\n",
401 atomic_read(&config->live_connections),
402 config->num_connections);
404 * Hooray we have more connections, requeue this IO, the submit
405 * path will put it on a real connection.
407 if (config->socks && config->num_connections > 1) {
408 if (cmd->index < config->num_connections) {
409 struct nbd_sock *nsock =
410 config->socks[cmd->index];
411 mutex_lock(&nsock->tx_lock);
412 /* We can have multiple outstanding requests, so
413 * we don't want to mark the nsock dead if we've
414 * already reconnected with a new socket, so
415 * only mark it dead if its the same socket we
418 if (cmd->cookie == nsock->cookie)
419 nbd_mark_nsock_dead(nbd, nsock, 1);
420 mutex_unlock(&nsock->tx_lock);
422 mutex_unlock(&cmd->lock);
423 nbd_requeue_cmd(cmd);
429 if (!nbd->tag_set.timeout) {
431 * Userspace sets timeout=0 to disable socket disconnection,
432 * so just warn and reset the timer.
435 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
436 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
437 (unsigned long long)blk_rq_pos(req) << 9,
438 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
440 mutex_unlock(&cmd->lock);
442 return BLK_EH_RESET_TIMER;
445 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
446 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
447 cmd->status = BLK_STS_IOERR;
448 mutex_unlock(&cmd->lock);
452 blk_mq_complete_request(req);
457 * Send or receive packet.
459 static int sock_xmit(struct nbd_device *nbd, int index, int send,
460 struct iov_iter *iter, int msg_flags, int *sent)
462 struct nbd_config *config = nbd->config;
463 struct socket *sock = config->socks[index]->sock;
466 unsigned int noreclaim_flag;
468 if (unlikely(!sock)) {
469 dev_err_ratelimited(disk_to_dev(nbd->disk),
470 "Attempted %s on closed socket in sock_xmit\n",
471 (send ? "send" : "recv"));
475 msg.msg_iter = *iter;
477 noreclaim_flag = memalloc_noreclaim_save();
479 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
482 msg.msg_control = NULL;
483 msg.msg_controllen = 0;
484 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
487 result = sock_sendmsg(sock, &msg);
489 result = sock_recvmsg(sock, &msg, msg.msg_flags);
493 result = -EPIPE; /* short read */
498 } while (msg_data_left(&msg));
500 memalloc_noreclaim_restore(noreclaim_flag);
506 * Different settings for sk->sk_sndtimeo can result in different return values
507 * if there is a signal pending when we enter sendmsg, because reasons?
509 static inline int was_interrupted(int result)
511 return result == -ERESTARTSYS || result == -EINTR;
514 /* always call with the tx_lock held */
515 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
517 struct request *req = blk_mq_rq_from_pdu(cmd);
518 struct nbd_config *config = nbd->config;
519 struct nbd_sock *nsock = config->socks[index];
521 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
522 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
523 struct iov_iter from;
524 unsigned long size = blk_rq_bytes(req);
528 u32 nbd_cmd_flags = 0;
529 int sent = nsock->sent, skip = 0;
531 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
533 type = req_to_nbd_cmd_type(req);
537 if (rq_data_dir(req) == WRITE &&
538 (config->flags & NBD_FLAG_READ_ONLY)) {
539 dev_err_ratelimited(disk_to_dev(nbd->disk),
540 "Write on read-only\n");
544 if (req->cmd_flags & REQ_FUA)
545 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
547 /* We did a partial send previously, and we at least sent the whole
548 * request struct, so just go and send the rest of the pages in the
552 if (sent >= sizeof(request)) {
553 skip = sent - sizeof(request);
555 /* initialize handle for tracing purposes */
556 handle = nbd_cmd_handle(cmd);
560 iov_iter_advance(&from, sent);
565 cmd->cookie = nsock->cookie;
567 request.type = htonl(type | nbd_cmd_flags);
568 if (type != NBD_CMD_FLUSH) {
569 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
570 request.len = htonl(size);
572 handle = nbd_cmd_handle(cmd);
573 memcpy(request.handle, &handle, sizeof(handle));
575 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
577 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
578 req, nbdcmd_to_ascii(type),
579 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
580 result = sock_xmit(nbd, index, 1, &from,
581 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
582 trace_nbd_header_sent(req, handle);
584 if (was_interrupted(result)) {
585 /* If we havne't sent anything we can just return BUSY,
586 * however if we have sent something we need to make
587 * sure we only allow this req to be sent until we are
591 nsock->pending = req;
594 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
595 return BLK_STS_RESOURCE;
597 dev_err_ratelimited(disk_to_dev(nbd->disk),
598 "Send control failed (result %d)\n", result);
602 if (type != NBD_CMD_WRITE)
607 struct bio *next = bio->bi_next;
608 struct bvec_iter iter;
611 bio_for_each_segment(bvec, bio, iter) {
612 bool is_last = !next && bio_iter_last(bvec, iter);
613 int flags = is_last ? 0 : MSG_MORE;
615 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
617 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
619 if (skip >= iov_iter_count(&from)) {
620 skip -= iov_iter_count(&from);
623 iov_iter_advance(&from, skip);
626 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
628 if (was_interrupted(result)) {
629 /* We've already sent the header, we
630 * have no choice but to set pending and
633 nsock->pending = req;
635 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
636 return BLK_STS_RESOURCE;
638 dev_err(disk_to_dev(nbd->disk),
639 "Send data failed (result %d)\n",
644 * The completion might already have come in,
645 * so break for the last one instead of letting
646 * the iterator do it. This prevents use-after-free
655 trace_nbd_payload_sent(req, handle);
656 nsock->pending = NULL;
661 /* NULL returned = something went wrong, inform userspace */
662 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
664 struct nbd_config *config = nbd->config;
666 struct nbd_reply reply;
668 struct request *req = NULL;
672 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
677 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
678 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
680 if (!nbd_disconnected(config))
681 dev_err(disk_to_dev(nbd->disk),
682 "Receive control failed (result %d)\n", result);
683 return ERR_PTR(result);
686 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
687 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
688 (unsigned long)ntohl(reply.magic));
689 return ERR_PTR(-EPROTO);
692 memcpy(&handle, reply.handle, sizeof(handle));
693 tag = nbd_handle_to_tag(handle);
694 hwq = blk_mq_unique_tag_to_hwq(tag);
695 if (hwq < nbd->tag_set.nr_hw_queues)
696 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
697 blk_mq_unique_tag_to_tag(tag));
698 if (!req || !blk_mq_request_started(req)) {
699 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
701 return ERR_PTR(-ENOENT);
703 trace_nbd_header_received(req, handle);
704 cmd = blk_mq_rq_to_pdu(req);
706 mutex_lock(&cmd->lock);
707 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
708 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
709 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
713 if (cmd->status != BLK_STS_OK) {
714 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
719 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
720 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
725 if (ntohl(reply.error)) {
726 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
728 cmd->status = BLK_STS_IOERR;
732 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
733 if (rq_data_dir(req) != WRITE) {
734 struct req_iterator iter;
737 rq_for_each_segment(bvec, req, iter) {
738 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
739 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
741 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
744 * If we've disconnected or we only have 1
745 * connection then we need to make sure we
746 * complete this request, otherwise error out
747 * and let the timeout stuff handle resubmitting
748 * this request onto another connection.
750 if (nbd_disconnected(config) ||
751 config->num_connections <= 1) {
752 cmd->status = BLK_STS_IOERR;
758 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
763 trace_nbd_payload_received(req, handle);
764 mutex_unlock(&cmd->lock);
765 return ret ? ERR_PTR(ret) : cmd;
768 static void recv_work(struct work_struct *work)
770 struct recv_thread_args *args = container_of(work,
771 struct recv_thread_args,
773 struct nbd_device *nbd = args->nbd;
774 struct nbd_config *config = nbd->config;
778 cmd = nbd_read_stat(nbd, args->index);
780 struct nbd_sock *nsock = config->socks[args->index];
782 mutex_lock(&nsock->tx_lock);
783 nbd_mark_nsock_dead(nbd, nsock, 1);
784 mutex_unlock(&nsock->tx_lock);
788 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
790 atomic_dec(&config->recv_threads);
791 wake_up(&config->recv_wq);
796 static bool nbd_clear_req(struct request *req, void *data, bool reserved)
798 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
800 mutex_lock(&cmd->lock);
801 cmd->status = BLK_STS_IOERR;
802 mutex_unlock(&cmd->lock);
804 blk_mq_complete_request(req);
808 static void nbd_clear_que(struct nbd_device *nbd)
810 blk_mq_quiesce_queue(nbd->disk->queue);
811 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
812 blk_mq_unquiesce_queue(nbd->disk->queue);
813 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
816 static int find_fallback(struct nbd_device *nbd, int index)
818 struct nbd_config *config = nbd->config;
820 struct nbd_sock *nsock = config->socks[index];
821 int fallback = nsock->fallback_index;
823 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
826 if (config->num_connections <= 1) {
827 dev_err_ratelimited(disk_to_dev(nbd->disk),
828 "Attempted send on invalid socket\n");
832 if (fallback >= 0 && fallback < config->num_connections &&
833 !config->socks[fallback]->dead)
836 if (nsock->fallback_index < 0 ||
837 nsock->fallback_index >= config->num_connections ||
838 config->socks[nsock->fallback_index]->dead) {
840 for (i = 0; i < config->num_connections; i++) {
843 if (!config->socks[i]->dead) {
848 nsock->fallback_index = new_index;
850 dev_err_ratelimited(disk_to_dev(nbd->disk),
851 "Dead connection, failed to find a fallback\n");
855 new_index = nsock->fallback_index;
859 static int wait_for_reconnect(struct nbd_device *nbd)
861 struct nbd_config *config = nbd->config;
862 if (!config->dead_conn_timeout)
864 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
866 return wait_event_timeout(config->conn_wait,
867 atomic_read(&config->live_connections) > 0,
868 config->dead_conn_timeout) > 0;
871 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
873 struct request *req = blk_mq_rq_from_pdu(cmd);
874 struct nbd_device *nbd = cmd->nbd;
875 struct nbd_config *config;
876 struct nbd_sock *nsock;
879 if (!refcount_inc_not_zero(&nbd->config_refs)) {
880 dev_err_ratelimited(disk_to_dev(nbd->disk),
881 "Socks array is empty\n");
882 blk_mq_start_request(req);
885 config = nbd->config;
887 if (index >= config->num_connections) {
888 dev_err_ratelimited(disk_to_dev(nbd->disk),
889 "Attempted send on invalid socket\n");
891 blk_mq_start_request(req);
894 cmd->status = BLK_STS_OK;
896 nsock = config->socks[index];
897 mutex_lock(&nsock->tx_lock);
899 int old_index = index;
900 index = find_fallback(nbd, index);
901 mutex_unlock(&nsock->tx_lock);
903 if (wait_for_reconnect(nbd)) {
907 /* All the sockets should already be down at this point,
908 * we just want to make sure that DISCONNECTED is set so
909 * any requests that come in that were queue'ed waiting
910 * for the reconnect timer don't trigger the timer again
911 * and instead just error out.
915 blk_mq_start_request(req);
921 /* Handle the case that we have a pending request that was partially
922 * transmitted that _has_ to be serviced first. We need to call requeue
923 * here so that it gets put _after_ the request that is already on the
926 blk_mq_start_request(req);
927 if (unlikely(nsock->pending && nsock->pending != req)) {
928 nbd_requeue_cmd(cmd);
933 * Some failures are related to the link going down, so anything that
934 * returns EAGAIN can be retried on a different socket.
936 ret = nbd_send_cmd(nbd, cmd, index);
937 if (ret == -EAGAIN) {
938 dev_err_ratelimited(disk_to_dev(nbd->disk),
939 "Request send failed, requeueing\n");
940 nbd_mark_nsock_dead(nbd, nsock, 1);
941 nbd_requeue_cmd(cmd);
945 mutex_unlock(&nsock->tx_lock);
950 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
951 const struct blk_mq_queue_data *bd)
953 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
957 * Since we look at the bio's to send the request over the network we
958 * need to make sure the completion work doesn't mark this request done
959 * before we are done doing our send. This keeps us from dereferencing
960 * freed data if we have particularly fast completions (ie we get the
961 * completion before we exit sock_xmit on the last bvec) or in the case
962 * that the server is misbehaving (or there was an error) before we're
963 * done sending everything over the wire.
965 mutex_lock(&cmd->lock);
966 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
968 /* We can be called directly from the user space process, which means we
969 * could possibly have signals pending so our sendmsg will fail. In
970 * this case we need to return that we are busy, otherwise error out as
973 ret = nbd_handle_cmd(cmd, hctx->queue_num);
978 mutex_unlock(&cmd->lock);
983 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
989 sock = sockfd_lookup(fd, err);
993 if (sock->ops->shutdown == sock_no_shutdown) {
994 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1003 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1006 struct nbd_config *config = nbd->config;
1007 struct socket *sock;
1008 struct nbd_sock **socks;
1009 struct nbd_sock *nsock;
1012 sock = nbd_get_socket(nbd, arg, &err);
1016 if (!netlink && !nbd->task_setup &&
1017 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1018 nbd->task_setup = current;
1021 (nbd->task_setup != current ||
1022 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1023 dev_err(disk_to_dev(nbd->disk),
1024 "Device being setup by another task");
1029 socks = krealloc(config->socks, (config->num_connections + 1) *
1030 sizeof(struct nbd_sock *), GFP_KERNEL);
1036 config->socks = socks;
1038 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
1044 nsock->fallback_index = -1;
1045 nsock->dead = false;
1046 mutex_init(&nsock->tx_lock);
1048 nsock->pending = NULL;
1051 socks[config->num_connections++] = nsock;
1052 atomic_inc(&config->live_connections);
1057 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1059 struct nbd_config *config = nbd->config;
1060 struct socket *sock, *old;
1061 struct recv_thread_args *args;
1065 sock = nbd_get_socket(nbd, arg, &err);
1069 args = kzalloc(sizeof(*args), GFP_KERNEL);
1075 for (i = 0; i < config->num_connections; i++) {
1076 struct nbd_sock *nsock = config->socks[i];
1081 mutex_lock(&nsock->tx_lock);
1083 mutex_unlock(&nsock->tx_lock);
1086 sk_set_memalloc(sock->sk);
1087 if (nbd->tag_set.timeout)
1088 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1089 atomic_inc(&config->recv_threads);
1090 refcount_inc(&nbd->config_refs);
1092 nsock->fallback_index = -1;
1094 nsock->dead = false;
1095 INIT_WORK(&args->work, recv_work);
1099 mutex_unlock(&nsock->tx_lock);
1102 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1104 /* We take the tx_mutex in an error path in the recv_work, so we
1105 * need to queue_work outside of the tx_mutex.
1107 queue_work(nbd->recv_workq, &args->work);
1109 atomic_inc(&config->live_connections);
1110 wake_up(&config->conn_wait);
1118 static void nbd_bdev_reset(struct block_device *bdev)
1120 if (bdev->bd_openers > 1)
1122 bd_set_size(bdev, 0);
1125 static void nbd_parse_flags(struct nbd_device *nbd)
1127 struct nbd_config *config = nbd->config;
1128 if (config->flags & NBD_FLAG_READ_ONLY)
1129 set_disk_ro(nbd->disk, true);
1131 set_disk_ro(nbd->disk, false);
1132 if (config->flags & NBD_FLAG_SEND_TRIM)
1133 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1134 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1135 if (config->flags & NBD_FLAG_SEND_FUA)
1136 blk_queue_write_cache(nbd->disk->queue, true, true);
1138 blk_queue_write_cache(nbd->disk->queue, true, false);
1141 blk_queue_write_cache(nbd->disk->queue, false, false);
1144 static void send_disconnects(struct nbd_device *nbd)
1146 struct nbd_config *config = nbd->config;
1147 struct nbd_request request = {
1148 .magic = htonl(NBD_REQUEST_MAGIC),
1149 .type = htonl(NBD_CMD_DISC),
1151 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1152 struct iov_iter from;
1155 for (i = 0; i < config->num_connections; i++) {
1156 struct nbd_sock *nsock = config->socks[i];
1158 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
1159 mutex_lock(&nsock->tx_lock);
1160 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1162 dev_err(disk_to_dev(nbd->disk),
1163 "Send disconnect failed %d\n", ret);
1164 mutex_unlock(&nsock->tx_lock);
1168 static int nbd_disconnect(struct nbd_device *nbd)
1170 struct nbd_config *config = nbd->config;
1172 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1173 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1174 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1175 send_disconnects(nbd);
1179 static void nbd_clear_sock(struct nbd_device *nbd)
1183 nbd->task_setup = NULL;
1186 static void nbd_config_put(struct nbd_device *nbd)
1188 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1189 &nbd->config_lock)) {
1190 struct nbd_config *config = nbd->config;
1191 nbd_dev_dbg_close(nbd);
1192 nbd_size_clear(nbd);
1193 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1194 &config->runtime_flags))
1195 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1196 nbd->task_recv = NULL;
1197 nbd_clear_sock(nbd);
1198 if (config->num_connections) {
1200 for (i = 0; i < config->num_connections; i++) {
1201 sockfd_put(config->socks[i]->sock);
1202 kfree(config->socks[i]);
1204 kfree(config->socks);
1209 if (nbd->recv_workq)
1210 destroy_workqueue(nbd->recv_workq);
1211 nbd->recv_workq = NULL;
1213 nbd->tag_set.timeout = 0;
1214 nbd->disk->queue->limits.discard_granularity = 0;
1215 nbd->disk->queue->limits.discard_alignment = 0;
1216 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1217 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1219 mutex_unlock(&nbd->config_lock);
1221 module_put(THIS_MODULE);
1225 static int nbd_start_device(struct nbd_device *nbd)
1227 struct nbd_config *config = nbd->config;
1228 int num_connections = config->num_connections;
1235 if (num_connections > 1 &&
1236 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1237 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1241 nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1242 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1243 WQ_UNBOUND, 0, nbd->index);
1244 if (!nbd->recv_workq) {
1245 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1249 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1250 nbd->task_recv = current;
1252 nbd_parse_flags(nbd);
1254 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1256 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1259 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1261 nbd_dev_dbg_init(nbd);
1262 for (i = 0; i < num_connections; i++) {
1263 struct recv_thread_args *args;
1265 args = kzalloc(sizeof(*args), GFP_KERNEL);
1269 * If num_connections is m (2 < m),
1270 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1271 * But NO.(n + 1) failed. We still have n recv threads.
1272 * So, add flush_workqueue here to prevent recv threads
1273 * dropping the last config_refs and trying to destroy
1274 * the workqueue from inside the workqueue.
1277 flush_workqueue(nbd->recv_workq);
1280 sk_set_memalloc(config->socks[i]->sock->sk);
1281 if (nbd->tag_set.timeout)
1282 config->socks[i]->sock->sk->sk_sndtimeo =
1283 nbd->tag_set.timeout;
1284 atomic_inc(&config->recv_threads);
1285 refcount_inc(&nbd->config_refs);
1286 INIT_WORK(&args->work, recv_work);
1289 queue_work(nbd->recv_workq, &args->work);
1291 nbd_size_update(nbd);
1295 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1297 struct nbd_config *config = nbd->config;
1300 ret = nbd_start_device(nbd);
1305 bdev->bd_invalidated = 1;
1306 mutex_unlock(&nbd->config_lock);
1307 ret = wait_event_interruptible(config->recv_wq,
1308 atomic_read(&config->recv_threads) == 0);
1311 flush_workqueue(nbd->recv_workq);
1313 mutex_lock(&nbd->config_lock);
1314 nbd_bdev_reset(bdev);
1315 /* user requested, ignore socket errors */
1316 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1318 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1323 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1324 struct block_device *bdev)
1327 __invalidate_device(bdev, true);
1328 nbd_bdev_reset(bdev);
1329 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1330 &nbd->config->runtime_flags))
1331 nbd_config_put(nbd);
1334 static bool nbd_is_valid_blksize(unsigned long blksize)
1336 if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
1337 blksize > PAGE_SIZE)
1342 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1344 nbd->tag_set.timeout = timeout * HZ;
1346 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1349 /* Must be called with config_lock held */
1350 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1351 unsigned int cmd, unsigned long arg)
1353 struct nbd_config *config = nbd->config;
1356 case NBD_DISCONNECT:
1357 return nbd_disconnect(nbd);
1358 case NBD_CLEAR_SOCK:
1359 nbd_clear_sock_ioctl(nbd, bdev);
1362 return nbd_add_socket(nbd, arg, false);
1363 case NBD_SET_BLKSIZE:
1365 arg = NBD_DEF_BLKSIZE;
1366 if (!nbd_is_valid_blksize(arg))
1368 nbd_size_set(nbd, arg,
1369 div_s64(config->bytesize, arg));
1372 nbd_size_set(nbd, config->blksize,
1373 div_s64(arg, config->blksize));
1375 case NBD_SET_SIZE_BLOCKS:
1376 nbd_size_set(nbd, config->blksize, arg);
1378 case NBD_SET_TIMEOUT:
1379 nbd_set_cmd_timeout(nbd, arg);
1383 config->flags = arg;
1386 return nbd_start_device_ioctl(nbd, bdev);
1389 * This is for compatibility only. The queue is always cleared
1390 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1393 case NBD_PRINT_DEBUG:
1395 * For compatibility only, we no longer keep a list of
1396 * outstanding requests.
1403 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1404 unsigned int cmd, unsigned long arg)
1406 struct nbd_device *nbd = bdev->bd_disk->private_data;
1407 struct nbd_config *config = nbd->config;
1408 int error = -EINVAL;
1410 if (!capable(CAP_SYS_ADMIN))
1413 /* The block layer will pass back some non-nbd ioctls in case we have
1414 * special handling for them, but we don't so just return an error.
1416 if (_IOC_TYPE(cmd) != 0xab)
1419 mutex_lock(&nbd->config_lock);
1421 /* Don't allow ioctl operations on a nbd device that was created with
1422 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1424 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1425 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1426 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1428 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1429 mutex_unlock(&nbd->config_lock);
1433 static struct nbd_config *nbd_alloc_config(void)
1435 struct nbd_config *config;
1437 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1440 atomic_set(&config->recv_threads, 0);
1441 init_waitqueue_head(&config->recv_wq);
1442 init_waitqueue_head(&config->conn_wait);
1443 config->blksize = NBD_DEF_BLKSIZE;
1444 atomic_set(&config->live_connections, 0);
1445 try_module_get(THIS_MODULE);
1449 static int nbd_open(struct block_device *bdev, fmode_t mode)
1451 struct nbd_device *nbd;
1454 mutex_lock(&nbd_index_mutex);
1455 nbd = bdev->bd_disk->private_data;
1460 if (!refcount_inc_not_zero(&nbd->refs)) {
1464 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1465 struct nbd_config *config;
1467 mutex_lock(&nbd->config_lock);
1468 if (refcount_inc_not_zero(&nbd->config_refs)) {
1469 mutex_unlock(&nbd->config_lock);
1472 config = nbd->config = nbd_alloc_config();
1475 mutex_unlock(&nbd->config_lock);
1478 refcount_set(&nbd->config_refs, 1);
1479 refcount_inc(&nbd->refs);
1480 mutex_unlock(&nbd->config_lock);
1481 bdev->bd_invalidated = 1;
1482 } else if (nbd_disconnected(nbd->config)) {
1483 bdev->bd_invalidated = 1;
1486 mutex_unlock(&nbd_index_mutex);
1490 static void nbd_release(struct gendisk *disk, fmode_t mode)
1492 struct nbd_device *nbd = disk->private_data;
1493 struct block_device *bdev = bdget_disk(disk, 0);
1495 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1496 bdev->bd_openers == 0)
1497 nbd_disconnect_and_put(nbd);
1499 nbd_config_put(nbd);
1503 static const struct block_device_operations nbd_fops =
1505 .owner = THIS_MODULE,
1507 .release = nbd_release,
1509 .compat_ioctl = nbd_ioctl,
1512 #if IS_ENABLED(CONFIG_DEBUG_FS)
1514 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1516 struct nbd_device *nbd = s->private;
1519 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1524 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1526 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1529 static const struct file_operations nbd_dbg_tasks_ops = {
1530 .open = nbd_dbg_tasks_open,
1532 .llseek = seq_lseek,
1533 .release = single_release,
1536 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1538 struct nbd_device *nbd = s->private;
1539 u32 flags = nbd->config->flags;
1541 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1543 seq_puts(s, "Known flags:\n");
1545 if (flags & NBD_FLAG_HAS_FLAGS)
1546 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1547 if (flags & NBD_FLAG_READ_ONLY)
1548 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1549 if (flags & NBD_FLAG_SEND_FLUSH)
1550 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1551 if (flags & NBD_FLAG_SEND_FUA)
1552 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1553 if (flags & NBD_FLAG_SEND_TRIM)
1554 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1559 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1561 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1564 static const struct file_operations nbd_dbg_flags_ops = {
1565 .open = nbd_dbg_flags_open,
1567 .llseek = seq_lseek,
1568 .release = single_release,
1571 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1574 struct nbd_config *config = nbd->config;
1579 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1581 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1585 config->dbg_dir = dir;
1587 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1588 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1589 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1590 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1591 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1596 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1598 debugfs_remove_recursive(nbd->config->dbg_dir);
1601 static int nbd_dbg_init(void)
1603 struct dentry *dbg_dir;
1605 dbg_dir = debugfs_create_dir("nbd", NULL);
1609 nbd_dbg_dir = dbg_dir;
1614 static void nbd_dbg_close(void)
1616 debugfs_remove_recursive(nbd_dbg_dir);
1619 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1621 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1626 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1630 static int nbd_dbg_init(void)
1635 static void nbd_dbg_close(void)
1641 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1642 unsigned int hctx_idx, unsigned int numa_node)
1644 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1645 cmd->nbd = set->driver_data;
1647 mutex_init(&cmd->lock);
1651 static const struct blk_mq_ops nbd_mq_ops = {
1652 .queue_rq = nbd_queue_rq,
1653 .complete = nbd_complete_rq,
1654 .init_request = nbd_init_request,
1655 .timeout = nbd_xmit_timeout,
1658 static int nbd_dev_add(int index)
1660 struct nbd_device *nbd;
1661 struct gendisk *disk;
1662 struct request_queue *q;
1665 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1669 disk = alloc_disk(1 << part_shift);
1674 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1679 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1688 nbd->tag_set.ops = &nbd_mq_ops;
1689 nbd->tag_set.nr_hw_queues = 1;
1690 nbd->tag_set.queue_depth = 128;
1691 nbd->tag_set.numa_node = NUMA_NO_NODE;
1692 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1693 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1695 nbd->tag_set.driver_data = nbd;
1696 nbd->destroy_complete = NULL;
1698 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1702 q = blk_mq_init_queue(&nbd->tag_set);
1710 * Tell the block layer that we are not a rotational device
1712 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1713 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1714 disk->queue->limits.discard_granularity = 0;
1715 disk->queue->limits.discard_alignment = 0;
1716 blk_queue_max_discard_sectors(disk->queue, 0);
1717 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1718 blk_queue_max_segments(disk->queue, USHRT_MAX);
1719 blk_queue_max_hw_sectors(disk->queue, 65536);
1720 disk->queue->limits.max_sectors = 256;
1722 mutex_init(&nbd->config_lock);
1723 refcount_set(&nbd->config_refs, 0);
1724 refcount_set(&nbd->refs, 1);
1725 INIT_LIST_HEAD(&nbd->list);
1726 disk->major = NBD_MAJOR;
1727 disk->first_minor = index << part_shift;
1728 disk->fops = &nbd_fops;
1729 disk->private_data = nbd;
1730 sprintf(disk->disk_name, "nbd%d", index);
1732 nbd_total_devices++;
1736 blk_mq_free_tag_set(&nbd->tag_set);
1738 idr_remove(&nbd_index_idr, index);
1747 static int find_free_cb(int id, void *ptr, void *data)
1749 struct nbd_device *nbd = ptr;
1750 struct nbd_device **found = data;
1752 if (!refcount_read(&nbd->config_refs)) {
1759 /* Netlink interface. */
1760 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1761 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1762 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1763 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1764 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1765 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1766 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1767 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1768 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1769 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1772 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1773 [NBD_SOCK_FD] = { .type = NLA_U32 },
1776 /* We don't use this right now since we don't parse the incoming list, but we
1777 * still want it here so userspace knows what to expect.
1779 static const struct nla_policy __attribute__((unused))
1780 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1781 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1782 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1785 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1787 struct nbd_config *config = nbd->config;
1788 u64 bsize = config->blksize;
1789 u64 bytes = config->bytesize;
1791 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1792 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1794 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1795 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1797 bsize = NBD_DEF_BLKSIZE;
1798 if (!nbd_is_valid_blksize(bsize)) {
1799 printk(KERN_ERR "Invalid block size %llu\n", bsize);
1804 if (bytes != config->bytesize || bsize != config->blksize)
1805 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
1809 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1811 DECLARE_COMPLETION_ONSTACK(destroy_complete);
1812 struct nbd_device *nbd = NULL;
1813 struct nbd_config *config;
1816 bool put_dev = false;
1818 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1821 if (info->attrs[NBD_ATTR_INDEX])
1822 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1823 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1824 printk(KERN_ERR "nbd: must specify at least one socket\n");
1827 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1828 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1832 mutex_lock(&nbd_index_mutex);
1834 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1837 new_index = nbd_dev_add(-1);
1838 if (new_index < 0) {
1839 mutex_unlock(&nbd_index_mutex);
1840 printk(KERN_ERR "nbd: failed to add new device\n");
1843 nbd = idr_find(&nbd_index_idr, new_index);
1846 nbd = idr_find(&nbd_index_idr, index);
1848 ret = nbd_dev_add(index);
1850 mutex_unlock(&nbd_index_mutex);
1851 printk(KERN_ERR "nbd: failed to add new device\n");
1854 nbd = idr_find(&nbd_index_idr, index);
1858 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1860 mutex_unlock(&nbd_index_mutex);
1864 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1865 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
1866 nbd->destroy_complete = &destroy_complete;
1867 mutex_unlock(&nbd_index_mutex);
1869 /* Wait untill the the nbd stuff is totally destroyed */
1870 wait_for_completion(&destroy_complete);
1874 if (!refcount_inc_not_zero(&nbd->refs)) {
1875 mutex_unlock(&nbd_index_mutex);
1878 printk(KERN_ERR "nbd: device at index %d is going down\n",
1882 mutex_unlock(&nbd_index_mutex);
1884 mutex_lock(&nbd->config_lock);
1885 if (refcount_read(&nbd->config_refs)) {
1886 mutex_unlock(&nbd->config_lock);
1890 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1893 if (WARN_ON(nbd->config)) {
1894 mutex_unlock(&nbd->config_lock);
1898 config = nbd->config = nbd_alloc_config();
1900 mutex_unlock(&nbd->config_lock);
1902 printk(KERN_ERR "nbd: couldn't allocate config\n");
1905 refcount_set(&nbd->config_refs, 1);
1906 set_bit(NBD_RT_BOUND, &config->runtime_flags);
1908 ret = nbd_genl_size_set(info, nbd);
1912 if (info->attrs[NBD_ATTR_TIMEOUT])
1913 nbd_set_cmd_timeout(nbd,
1914 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
1915 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1916 config->dead_conn_timeout =
1917 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1918 config->dead_conn_timeout *= HZ;
1920 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1922 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1923 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1924 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1925 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1926 set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
1927 &config->runtime_flags);
1928 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
1931 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
1933 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1934 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
1935 &config->runtime_flags);
1939 if (info->attrs[NBD_ATTR_SOCKETS]) {
1940 struct nlattr *attr;
1943 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1945 struct nlattr *socks[NBD_SOCK_MAX+1];
1947 if (nla_type(attr) != NBD_SOCK_ITEM) {
1948 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1952 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1957 printk(KERN_ERR "nbd: error processing sock list\n");
1961 if (!socks[NBD_SOCK_FD])
1963 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1964 ret = nbd_add_socket(nbd, fd, true);
1969 ret = nbd_start_device(nbd);
1971 mutex_unlock(&nbd->config_lock);
1973 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
1974 refcount_inc(&nbd->config_refs);
1975 nbd_connect_reply(info, nbd->index);
1977 nbd_config_put(nbd);
1983 static void nbd_disconnect_and_put(struct nbd_device *nbd)
1985 mutex_lock(&nbd->config_lock);
1986 nbd_disconnect(nbd);
1987 nbd_clear_sock(nbd);
1988 mutex_unlock(&nbd->config_lock);
1990 * Make sure recv thread has finished, so it does not drop the last
1991 * config ref and try to destroy the workqueue from inside the work
1994 flush_workqueue(nbd->recv_workq);
1995 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1996 &nbd->config->runtime_flags))
1997 nbd_config_put(nbd);
2000 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2002 struct nbd_device *nbd;
2005 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2008 if (!info->attrs[NBD_ATTR_INDEX]) {
2009 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
2012 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2013 mutex_lock(&nbd_index_mutex);
2014 nbd = idr_find(&nbd_index_idr, index);
2016 mutex_unlock(&nbd_index_mutex);
2017 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
2021 if (!refcount_inc_not_zero(&nbd->refs)) {
2022 mutex_unlock(&nbd_index_mutex);
2023 printk(KERN_ERR "nbd: device at index %d is going down\n",
2027 mutex_unlock(&nbd_index_mutex);
2028 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2032 nbd_disconnect_and_put(nbd);
2033 nbd_config_put(nbd);
2038 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2040 struct nbd_device *nbd = NULL;
2041 struct nbd_config *config;
2044 bool put_dev = false;
2046 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2049 if (!info->attrs[NBD_ATTR_INDEX]) {
2050 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
2053 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2054 mutex_lock(&nbd_index_mutex);
2055 nbd = idr_find(&nbd_index_idr, index);
2057 mutex_unlock(&nbd_index_mutex);
2058 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
2062 if (!refcount_inc_not_zero(&nbd->refs)) {
2063 mutex_unlock(&nbd_index_mutex);
2064 printk(KERN_ERR "nbd: device at index %d is going down\n",
2068 mutex_unlock(&nbd_index_mutex);
2070 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2071 dev_err(nbd_to_dev(nbd),
2072 "not configured, cannot reconfigure\n");
2077 mutex_lock(&nbd->config_lock);
2078 config = nbd->config;
2079 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2081 dev_err(nbd_to_dev(nbd),
2082 "not configured, cannot reconfigure\n");
2087 ret = nbd_genl_size_set(info, nbd);
2091 if (info->attrs[NBD_ATTR_TIMEOUT])
2092 nbd_set_cmd_timeout(nbd,
2093 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2094 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2095 config->dead_conn_timeout =
2096 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2097 config->dead_conn_timeout *= HZ;
2099 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2100 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2101 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2102 if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
2103 &config->runtime_flags))
2105 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
2107 if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
2108 &config->runtime_flags))
2109 refcount_inc(&nbd->refs);
2110 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
2113 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2114 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2115 &config->runtime_flags);
2117 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2118 &config->runtime_flags);
2122 if (info->attrs[NBD_ATTR_SOCKETS]) {
2123 struct nlattr *attr;
2126 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2128 struct nlattr *socks[NBD_SOCK_MAX+1];
2130 if (nla_type(attr) != NBD_SOCK_ITEM) {
2131 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2135 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2140 printk(KERN_ERR "nbd: error processing sock list\n");
2144 if (!socks[NBD_SOCK_FD])
2146 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2147 ret = nbd_reconnect_socket(nbd, fd);
2153 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2157 mutex_unlock(&nbd->config_lock);
2158 nbd_config_put(nbd);
2165 static const struct genl_ops nbd_connect_genl_ops[] = {
2167 .cmd = NBD_CMD_CONNECT,
2168 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2169 .doit = nbd_genl_connect,
2172 .cmd = NBD_CMD_DISCONNECT,
2173 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2174 .doit = nbd_genl_disconnect,
2177 .cmd = NBD_CMD_RECONFIGURE,
2178 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2179 .doit = nbd_genl_reconfigure,
2182 .cmd = NBD_CMD_STATUS,
2183 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2184 .doit = nbd_genl_status,
2188 static const struct genl_multicast_group nbd_mcast_grps[] = {
2189 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2192 static struct genl_family nbd_genl_family __ro_after_init = {
2194 .name = NBD_GENL_FAMILY_NAME,
2195 .version = NBD_GENL_VERSION,
2196 .module = THIS_MODULE,
2197 .ops = nbd_connect_genl_ops,
2198 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2199 .maxattr = NBD_ATTR_MAX,
2200 .policy = nbd_attr_policy,
2201 .mcgrps = nbd_mcast_grps,
2202 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2205 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2207 struct nlattr *dev_opt;
2211 /* This is a little racey, but for status it's ok. The
2212 * reason we don't take a ref here is because we can't
2213 * take a ref in the index == -1 case as we would need
2214 * to put under the nbd_index_mutex, which could
2215 * deadlock if we are configured to remove ourselves
2216 * once we're disconnected.
2218 if (refcount_read(&nbd->config_refs))
2220 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2223 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2226 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2230 nla_nest_end(reply, dev_opt);
2234 static int status_cb(int id, void *ptr, void *data)
2236 struct nbd_device *nbd = ptr;
2237 return populate_nbd_status(nbd, (struct sk_buff *)data);
2240 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2242 struct nlattr *dev_list;
2243 struct sk_buff *reply;
2249 if (info->attrs[NBD_ATTR_INDEX])
2250 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2252 mutex_lock(&nbd_index_mutex);
2254 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2255 nla_attr_size(sizeof(u8)));
2256 msg_size *= (index == -1) ? nbd_total_devices : 1;
2258 reply = genlmsg_new(msg_size, GFP_KERNEL);
2261 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2268 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2270 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2276 struct nbd_device *nbd;
2277 nbd = idr_find(&nbd_index_idr, index);
2279 ret = populate_nbd_status(nbd, reply);
2286 nla_nest_end(reply, dev_list);
2287 genlmsg_end(reply, reply_head);
2288 ret = genlmsg_reply(reply, info);
2290 mutex_unlock(&nbd_index_mutex);
2294 static void nbd_connect_reply(struct genl_info *info, int index)
2296 struct sk_buff *skb;
2300 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2303 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2309 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2314 genlmsg_end(skb, msg_head);
2315 genlmsg_reply(skb, info);
2318 static void nbd_mcast_index(int index)
2320 struct sk_buff *skb;
2324 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2327 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2333 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2338 genlmsg_end(skb, msg_head);
2339 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2342 static void nbd_dead_link_work(struct work_struct *work)
2344 struct link_dead_args *args = container_of(work, struct link_dead_args,
2346 nbd_mcast_index(args->index);
2350 static int __init nbd_init(void)
2354 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2357 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2363 part_shift = fls(max_part);
2366 * Adjust max_part according to part_shift as it is exported
2367 * to user space so that user can know the max number of
2368 * partition kernel should be able to manage.
2370 * Note that -1 is required because partition 0 is reserved
2371 * for the whole disk.
2373 max_part = (1UL << part_shift) - 1;
2376 if ((1UL << part_shift) > DISK_MAX_PARTS)
2379 if (nbds_max > 1UL << (MINORBITS - part_shift))
2382 if (register_blkdev(NBD_MAJOR, "nbd"))
2385 if (genl_register_family(&nbd_genl_family)) {
2386 unregister_blkdev(NBD_MAJOR, "nbd");
2391 mutex_lock(&nbd_index_mutex);
2392 for (i = 0; i < nbds_max; i++)
2394 mutex_unlock(&nbd_index_mutex);
2398 static int nbd_exit_cb(int id, void *ptr, void *data)
2400 struct list_head *list = (struct list_head *)data;
2401 struct nbd_device *nbd = ptr;
2403 list_add_tail(&nbd->list, list);
2407 static void __exit nbd_cleanup(void)
2409 struct nbd_device *nbd;
2410 LIST_HEAD(del_list);
2414 mutex_lock(&nbd_index_mutex);
2415 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2416 mutex_unlock(&nbd_index_mutex);
2418 while (!list_empty(&del_list)) {
2419 nbd = list_first_entry(&del_list, struct nbd_device, list);
2420 list_del_init(&nbd->list);
2421 if (refcount_read(&nbd->refs) != 1)
2422 printk(KERN_ERR "nbd: possibly leaking a device\n");
2426 idr_destroy(&nbd_index_idr);
2427 genl_unregister_family(&nbd_genl_family);
2428 unregister_blkdev(NBD_MAJOR, "nbd");
2431 module_init(nbd_init);
2432 module_exit(nbd_cleanup);
2434 MODULE_DESCRIPTION("Network Block Device");
2435 MODULE_LICENSE("GPL");
2437 module_param(nbds_max, int, 0444);
2438 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2439 module_param(max_part, int, 0444);
2440 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");