1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
43 #include <linux/nbd-netlink.h>
44 #include <net/genetlink.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nbd.h>
49 static DEFINE_IDR(nbd_index_idr);
50 static DEFINE_MUTEX(nbd_index_mutex);
51 static int nbd_total_devices = 0;
56 struct request *pending;
63 struct recv_thread_args {
64 struct work_struct work;
65 struct nbd_device *nbd;
69 struct link_dead_args {
70 struct work_struct work;
74 #define NBD_TIMEDOUT 0
75 #define NBD_DISCONNECT_REQUESTED 1
76 #define NBD_DISCONNECTED 2
77 #define NBD_HAS_PID_FILE 3
78 #define NBD_HAS_CONFIG_REF 4
80 #define NBD_DESTROY_ON_DISCONNECT 6
81 #define NBD_DISCONNECT_ON_CLOSE 7
85 unsigned long runtime_flags;
86 u64 dead_conn_timeout;
88 struct nbd_sock **socks;
90 atomic_t live_connections;
91 wait_queue_head_t conn_wait;
93 atomic_t recv_threads;
94 wait_queue_head_t recv_wq;
97 #if IS_ENABLED(CONFIG_DEBUG_FS)
98 struct dentry *dbg_dir;
103 struct blk_mq_tag_set tag_set;
106 refcount_t config_refs;
108 struct nbd_config *config;
109 struct mutex config_lock;
110 struct gendisk *disk;
112 struct list_head list;
113 struct task_struct *task_recv;
114 struct task_struct *task_setup;
117 #define NBD_CMD_REQUEUED 1
120 struct nbd_device *nbd;
129 #if IS_ENABLED(CONFIG_DEBUG_FS)
130 static struct dentry *nbd_dbg_dir;
133 #define nbd_name(nbd) ((nbd)->disk->disk_name)
135 #define NBD_MAGIC 0x68797548
137 static unsigned int nbds_max = 16;
138 static int max_part = 16;
139 static struct workqueue_struct *recv_workqueue;
140 static int part_shift;
142 static int nbd_dev_dbg_init(struct nbd_device *nbd);
143 static void nbd_dev_dbg_close(struct nbd_device *nbd);
144 static void nbd_config_put(struct nbd_device *nbd);
145 static void nbd_connect_reply(struct genl_info *info, int index);
146 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
147 static void nbd_dead_link_work(struct work_struct *work);
148 static void nbd_disconnect_and_put(struct nbd_device *nbd);
150 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
152 return disk_to_dev(nbd->disk);
155 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
157 struct request *req = blk_mq_rq_from_pdu(cmd);
159 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
160 blk_mq_requeue_request(req, true);
163 #define NBD_COOKIE_BITS 32
165 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
167 struct request *req = blk_mq_rq_from_pdu(cmd);
168 u32 tag = blk_mq_unique_tag(req);
169 u64 cookie = cmd->cmd_cookie;
171 return (cookie << NBD_COOKIE_BITS) | tag;
174 static u32 nbd_handle_to_tag(u64 handle)
179 static u32 nbd_handle_to_cookie(u64 handle)
181 return (u32)(handle >> NBD_COOKIE_BITS);
184 static const char *nbdcmd_to_ascii(int cmd)
187 case NBD_CMD_READ: return "read";
188 case NBD_CMD_WRITE: return "write";
189 case NBD_CMD_DISC: return "disconnect";
190 case NBD_CMD_FLUSH: return "flush";
191 case NBD_CMD_TRIM: return "trim/discard";
196 static ssize_t pid_show(struct device *dev,
197 struct device_attribute *attr, char *buf)
199 struct gendisk *disk = dev_to_disk(dev);
200 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
202 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
205 static const struct device_attribute pid_attr = {
206 .attr = { .name = "pid", .mode = 0444},
210 static void nbd_dev_remove(struct nbd_device *nbd)
212 struct gendisk *disk = nbd->disk;
213 struct request_queue *q;
218 blk_cleanup_queue(q);
219 blk_mq_free_tag_set(&nbd->tag_set);
220 disk->private_data = NULL;
226 static void nbd_put(struct nbd_device *nbd)
228 if (refcount_dec_and_mutex_lock(&nbd->refs,
230 idr_remove(&nbd_index_idr, nbd->index);
231 mutex_unlock(&nbd_index_mutex);
236 static int nbd_disconnected(struct nbd_config *config)
238 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
239 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
242 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
245 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
246 struct link_dead_args *args;
247 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
249 INIT_WORK(&args->work, nbd_dead_link_work);
250 args->index = nbd->index;
251 queue_work(system_wq, &args->work);
255 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
256 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
257 if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED,
258 &nbd->config->runtime_flags)) {
259 set_bit(NBD_DISCONNECTED,
260 &nbd->config->runtime_flags);
261 dev_info(nbd_to_dev(nbd),
262 "Disconnected due to user request.\n");
267 nsock->pending = NULL;
271 static void nbd_size_clear(struct nbd_device *nbd)
273 if (nbd->config->bytesize) {
274 set_capacity(nbd->disk, 0);
275 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
279 static void nbd_size_update(struct nbd_device *nbd)
281 struct nbd_config *config = nbd->config;
282 struct block_device *bdev = bdget_disk(nbd->disk, 0);
284 if (config->flags & NBD_FLAG_SEND_TRIM) {
285 nbd->disk->queue->limits.discard_granularity = config->blksize;
286 nbd->disk->queue->limits.discard_alignment = config->blksize;
287 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
289 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
290 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
291 set_capacity(nbd->disk, config->bytesize >> 9);
294 bd_set_size(bdev, config->bytesize);
295 set_blocksize(bdev, config->blksize);
297 bdev->bd_invalidated = 1;
300 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
303 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
306 struct nbd_config *config = nbd->config;
307 config->blksize = blocksize;
308 config->bytesize = blocksize * nr_blocks;
309 if (nbd->task_recv != NULL)
310 nbd_size_update(nbd);
313 static void nbd_complete_rq(struct request *req)
315 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
317 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
318 cmd->status ? "failed" : "done");
320 blk_mq_end_request(req, cmd->status);
324 * Forcibly shutdown the socket causing all listeners to error
326 static void sock_shutdown(struct nbd_device *nbd)
328 struct nbd_config *config = nbd->config;
331 if (config->num_connections == 0)
333 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
336 for (i = 0; i < config->num_connections; i++) {
337 struct nbd_sock *nsock = config->socks[i];
338 mutex_lock(&nsock->tx_lock);
339 nbd_mark_nsock_dead(nbd, nsock, 0);
340 mutex_unlock(&nsock->tx_lock);
342 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
345 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
348 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
349 struct nbd_device *nbd = cmd->nbd;
350 struct nbd_config *config;
352 if (!refcount_inc_not_zero(&nbd->config_refs)) {
353 cmd->status = BLK_STS_TIMEOUT;
356 config = nbd->config;
358 if (!mutex_trylock(&cmd->lock))
359 return BLK_EH_RESET_TIMER;
361 if (config->num_connections > 1) {
362 dev_err_ratelimited(nbd_to_dev(nbd),
363 "Connection timed out, retrying (%d/%d alive)\n",
364 atomic_read(&config->live_connections),
365 config->num_connections);
367 * Hooray we have more connections, requeue this IO, the submit
368 * path will put it on a real connection.
370 if (config->socks && config->num_connections > 1) {
371 if (cmd->index < config->num_connections) {
372 struct nbd_sock *nsock =
373 config->socks[cmd->index];
374 mutex_lock(&nsock->tx_lock);
375 /* We can have multiple outstanding requests, so
376 * we don't want to mark the nsock dead if we've
377 * already reconnected with a new socket, so
378 * only mark it dead if its the same socket we
381 if (cmd->cookie == nsock->cookie)
382 nbd_mark_nsock_dead(nbd, nsock, 1);
383 mutex_unlock(&nsock->tx_lock);
385 mutex_unlock(&cmd->lock);
386 nbd_requeue_cmd(cmd);
391 dev_err_ratelimited(nbd_to_dev(nbd),
392 "Connection timed out\n");
394 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
395 cmd->status = BLK_STS_IOERR;
396 mutex_unlock(&cmd->lock);
400 blk_mq_complete_request(req);
405 * Send or receive packet.
407 static int sock_xmit(struct nbd_device *nbd, int index, int send,
408 struct iov_iter *iter, int msg_flags, int *sent)
410 struct nbd_config *config = nbd->config;
411 struct socket *sock = config->socks[index]->sock;
414 unsigned int noreclaim_flag;
416 if (unlikely(!sock)) {
417 dev_err_ratelimited(disk_to_dev(nbd->disk),
418 "Attempted %s on closed socket in sock_xmit\n",
419 (send ? "send" : "recv"));
423 msg.msg_iter = *iter;
425 noreclaim_flag = memalloc_noreclaim_save();
427 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
430 msg.msg_control = NULL;
431 msg.msg_controllen = 0;
432 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
435 result = sock_sendmsg(sock, &msg);
437 result = sock_recvmsg(sock, &msg, msg.msg_flags);
441 result = -EPIPE; /* short read */
446 } while (msg_data_left(&msg));
448 memalloc_noreclaim_restore(noreclaim_flag);
454 * Different settings for sk->sk_sndtimeo can result in different return values
455 * if there is a signal pending when we enter sendmsg, because reasons?
457 static inline int was_interrupted(int result)
459 return result == -ERESTARTSYS || result == -EINTR;
462 /* always call with the tx_lock held */
463 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
465 struct request *req = blk_mq_rq_from_pdu(cmd);
466 struct nbd_config *config = nbd->config;
467 struct nbd_sock *nsock = config->socks[index];
469 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
470 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
471 struct iov_iter from;
472 unsigned long size = blk_rq_bytes(req);
476 u32 nbd_cmd_flags = 0;
477 int sent = nsock->sent, skip = 0;
479 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
481 switch (req_op(req)) {
486 type = NBD_CMD_FLUSH;
489 type = NBD_CMD_WRITE;
498 if (rq_data_dir(req) == WRITE &&
499 (config->flags & NBD_FLAG_READ_ONLY)) {
500 dev_err_ratelimited(disk_to_dev(nbd->disk),
501 "Write on read-only\n");
505 if (req->cmd_flags & REQ_FUA)
506 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
508 /* We did a partial send previously, and we at least sent the whole
509 * request struct, so just go and send the rest of the pages in the
513 if (sent >= sizeof(request)) {
514 skip = sent - sizeof(request);
516 /* initialize handle for tracing purposes */
517 handle = nbd_cmd_handle(cmd);
521 iov_iter_advance(&from, sent);
526 cmd->cookie = nsock->cookie;
527 request.type = htonl(type | nbd_cmd_flags);
528 if (type != NBD_CMD_FLUSH) {
529 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
530 request.len = htonl(size);
532 handle = nbd_cmd_handle(cmd);
533 memcpy(request.handle, &handle, sizeof(handle));
535 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
537 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
538 req, nbdcmd_to_ascii(type),
539 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
540 result = sock_xmit(nbd, index, 1, &from,
541 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
542 trace_nbd_header_sent(req, handle);
544 if (was_interrupted(result)) {
545 /* If we havne't sent anything we can just return BUSY,
546 * however if we have sent something we need to make
547 * sure we only allow this req to be sent until we are
551 nsock->pending = req;
554 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
555 return BLK_STS_RESOURCE;
557 dev_err_ratelimited(disk_to_dev(nbd->disk),
558 "Send control failed (result %d)\n", result);
562 if (type != NBD_CMD_WRITE)
567 struct bio *next = bio->bi_next;
568 struct bvec_iter iter;
571 bio_for_each_segment(bvec, bio, iter) {
572 bool is_last = !next && bio_iter_last(bvec, iter);
573 int flags = is_last ? 0 : MSG_MORE;
575 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
577 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
579 if (skip >= iov_iter_count(&from)) {
580 skip -= iov_iter_count(&from);
583 iov_iter_advance(&from, skip);
586 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
588 if (was_interrupted(result)) {
589 /* We've already sent the header, we
590 * have no choice but to set pending and
593 nsock->pending = req;
595 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
596 return BLK_STS_RESOURCE;
598 dev_err(disk_to_dev(nbd->disk),
599 "Send data failed (result %d)\n",
604 * The completion might already have come in,
605 * so break for the last one instead of letting
606 * the iterator do it. This prevents use-after-free
615 trace_nbd_payload_sent(req, handle);
616 nsock->pending = NULL;
621 /* NULL returned = something went wrong, inform userspace */
622 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
624 struct nbd_config *config = nbd->config;
626 struct nbd_reply reply;
628 struct request *req = NULL;
632 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
637 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
638 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
640 if (!nbd_disconnected(config))
641 dev_err(disk_to_dev(nbd->disk),
642 "Receive control failed (result %d)\n", result);
643 return ERR_PTR(result);
646 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
647 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
648 (unsigned long)ntohl(reply.magic));
649 return ERR_PTR(-EPROTO);
652 memcpy(&handle, reply.handle, sizeof(handle));
653 tag = nbd_handle_to_tag(handle);
654 hwq = blk_mq_unique_tag_to_hwq(tag);
655 if (hwq < nbd->tag_set.nr_hw_queues)
656 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
657 blk_mq_unique_tag_to_tag(tag));
658 if (!req || !blk_mq_request_started(req)) {
659 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
661 return ERR_PTR(-ENOENT);
663 trace_nbd_header_received(req, handle);
664 cmd = blk_mq_rq_to_pdu(req);
666 mutex_lock(&cmd->lock);
667 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
668 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
669 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
673 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
674 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
679 if (ntohl(reply.error)) {
680 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
682 cmd->status = BLK_STS_IOERR;
686 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
687 if (rq_data_dir(req) != WRITE) {
688 struct req_iterator iter;
691 rq_for_each_segment(bvec, req, iter) {
692 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
693 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
695 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
698 * If we've disconnected or we only have 1
699 * connection then we need to make sure we
700 * complete this request, otherwise error out
701 * and let the timeout stuff handle resubmitting
702 * this request onto another connection.
704 if (nbd_disconnected(config) ||
705 config->num_connections <= 1) {
706 cmd->status = BLK_STS_IOERR;
712 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
717 trace_nbd_payload_received(req, handle);
718 mutex_unlock(&cmd->lock);
719 return ret ? ERR_PTR(ret) : cmd;
722 static void recv_work(struct work_struct *work)
724 struct recv_thread_args *args = container_of(work,
725 struct recv_thread_args,
727 struct nbd_device *nbd = args->nbd;
728 struct nbd_config *config = nbd->config;
732 cmd = nbd_read_stat(nbd, args->index);
734 struct nbd_sock *nsock = config->socks[args->index];
736 mutex_lock(&nsock->tx_lock);
737 nbd_mark_nsock_dead(nbd, nsock, 1);
738 mutex_unlock(&nsock->tx_lock);
742 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
744 atomic_dec(&config->recv_threads);
745 wake_up(&config->recv_wq);
750 static bool nbd_clear_req(struct request *req, void *data, bool reserved)
752 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
754 cmd->status = BLK_STS_IOERR;
755 blk_mq_complete_request(req);
759 static void nbd_clear_que(struct nbd_device *nbd)
761 blk_mq_quiesce_queue(nbd->disk->queue);
762 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
763 blk_mq_unquiesce_queue(nbd->disk->queue);
764 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
767 static int find_fallback(struct nbd_device *nbd, int index)
769 struct nbd_config *config = nbd->config;
771 struct nbd_sock *nsock = config->socks[index];
772 int fallback = nsock->fallback_index;
774 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
777 if (config->num_connections <= 1) {
778 dev_err_ratelimited(disk_to_dev(nbd->disk),
779 "Attempted send on invalid socket\n");
783 if (fallback >= 0 && fallback < config->num_connections &&
784 !config->socks[fallback]->dead)
787 if (nsock->fallback_index < 0 ||
788 nsock->fallback_index >= config->num_connections ||
789 config->socks[nsock->fallback_index]->dead) {
791 for (i = 0; i < config->num_connections; i++) {
794 if (!config->socks[i]->dead) {
799 nsock->fallback_index = new_index;
801 dev_err_ratelimited(disk_to_dev(nbd->disk),
802 "Dead connection, failed to find a fallback\n");
806 new_index = nsock->fallback_index;
810 static int wait_for_reconnect(struct nbd_device *nbd)
812 struct nbd_config *config = nbd->config;
813 if (!config->dead_conn_timeout)
815 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
817 return wait_event_timeout(config->conn_wait,
818 atomic_read(&config->live_connections) > 0,
819 config->dead_conn_timeout) > 0;
822 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
824 struct request *req = blk_mq_rq_from_pdu(cmd);
825 struct nbd_device *nbd = cmd->nbd;
826 struct nbd_config *config;
827 struct nbd_sock *nsock;
830 if (!refcount_inc_not_zero(&nbd->config_refs)) {
831 dev_err_ratelimited(disk_to_dev(nbd->disk),
832 "Socks array is empty\n");
833 blk_mq_start_request(req);
836 config = nbd->config;
838 if (index >= config->num_connections) {
839 dev_err_ratelimited(disk_to_dev(nbd->disk),
840 "Attempted send on invalid socket\n");
842 blk_mq_start_request(req);
845 cmd->status = BLK_STS_OK;
847 nsock = config->socks[index];
848 mutex_lock(&nsock->tx_lock);
850 int old_index = index;
851 index = find_fallback(nbd, index);
852 mutex_unlock(&nsock->tx_lock);
854 if (wait_for_reconnect(nbd)) {
858 /* All the sockets should already be down at this point,
859 * we just want to make sure that DISCONNECTED is set so
860 * any requests that come in that were queue'ed waiting
861 * for the reconnect timer don't trigger the timer again
862 * and instead just error out.
866 blk_mq_start_request(req);
872 /* Handle the case that we have a pending request that was partially
873 * transmitted that _has_ to be serviced first. We need to call requeue
874 * here so that it gets put _after_ the request that is already on the
877 blk_mq_start_request(req);
878 if (unlikely(nsock->pending && nsock->pending != req)) {
879 nbd_requeue_cmd(cmd);
884 * Some failures are related to the link going down, so anything that
885 * returns EAGAIN can be retried on a different socket.
887 ret = nbd_send_cmd(nbd, cmd, index);
888 if (ret == -EAGAIN) {
889 dev_err_ratelimited(disk_to_dev(nbd->disk),
890 "Request send failed, requeueing\n");
891 nbd_mark_nsock_dead(nbd, nsock, 1);
892 nbd_requeue_cmd(cmd);
896 mutex_unlock(&nsock->tx_lock);
901 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
902 const struct blk_mq_queue_data *bd)
904 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
908 * Since we look at the bio's to send the request over the network we
909 * need to make sure the completion work doesn't mark this request done
910 * before we are done doing our send. This keeps us from dereferencing
911 * freed data if we have particularly fast completions (ie we get the
912 * completion before we exit sock_xmit on the last bvec) or in the case
913 * that the server is misbehaving (or there was an error) before we're
914 * done sending everything over the wire.
916 mutex_lock(&cmd->lock);
917 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
919 /* We can be called directly from the user space process, which means we
920 * could possibly have signals pending so our sendmsg will fail. In
921 * this case we need to return that we are busy, otherwise error out as
924 ret = nbd_handle_cmd(cmd, hctx->queue_num);
929 mutex_unlock(&cmd->lock);
934 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
937 struct nbd_config *config = nbd->config;
939 struct nbd_sock **socks;
940 struct nbd_sock *nsock;
943 sock = sockfd_lookup(arg, &err);
947 if (!netlink && !nbd->task_setup &&
948 !test_bit(NBD_BOUND, &config->runtime_flags))
949 nbd->task_setup = current;
952 (nbd->task_setup != current ||
953 test_bit(NBD_BOUND, &config->runtime_flags))) {
954 dev_err(disk_to_dev(nbd->disk),
955 "Device being setup by another task");
960 socks = krealloc(config->socks, (config->num_connections + 1) *
961 sizeof(struct nbd_sock *), GFP_KERNEL);
966 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
972 config->socks = socks;
974 nsock->fallback_index = -1;
976 mutex_init(&nsock->tx_lock);
978 nsock->pending = NULL;
981 socks[config->num_connections++] = nsock;
982 atomic_inc(&config->live_connections);
987 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
989 struct nbd_config *config = nbd->config;
990 struct socket *sock, *old;
991 struct recv_thread_args *args;
995 sock = sockfd_lookup(arg, &err);
999 args = kzalloc(sizeof(*args), GFP_KERNEL);
1005 for (i = 0; i < config->num_connections; i++) {
1006 struct nbd_sock *nsock = config->socks[i];
1011 mutex_lock(&nsock->tx_lock);
1013 mutex_unlock(&nsock->tx_lock);
1016 sk_set_memalloc(sock->sk);
1017 if (nbd->tag_set.timeout)
1018 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1019 atomic_inc(&config->recv_threads);
1020 refcount_inc(&nbd->config_refs);
1022 nsock->fallback_index = -1;
1024 nsock->dead = false;
1025 INIT_WORK(&args->work, recv_work);
1029 mutex_unlock(&nsock->tx_lock);
1032 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
1034 /* We take the tx_mutex in an error path in the recv_work, so we
1035 * need to queue_work outside of the tx_mutex.
1037 queue_work(recv_workqueue, &args->work);
1039 atomic_inc(&config->live_connections);
1040 wake_up(&config->conn_wait);
1048 static void nbd_bdev_reset(struct block_device *bdev)
1050 if (bdev->bd_openers > 1)
1052 bd_set_size(bdev, 0);
1055 static void nbd_parse_flags(struct nbd_device *nbd)
1057 struct nbd_config *config = nbd->config;
1058 if (config->flags & NBD_FLAG_READ_ONLY)
1059 set_disk_ro(nbd->disk, true);
1061 set_disk_ro(nbd->disk, false);
1062 if (config->flags & NBD_FLAG_SEND_TRIM)
1063 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1064 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1065 if (config->flags & NBD_FLAG_SEND_FUA)
1066 blk_queue_write_cache(nbd->disk->queue, true, true);
1068 blk_queue_write_cache(nbd->disk->queue, true, false);
1071 blk_queue_write_cache(nbd->disk->queue, false, false);
1074 static void send_disconnects(struct nbd_device *nbd)
1076 struct nbd_config *config = nbd->config;
1077 struct nbd_request request = {
1078 .magic = htonl(NBD_REQUEST_MAGIC),
1079 .type = htonl(NBD_CMD_DISC),
1081 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1082 struct iov_iter from;
1085 for (i = 0; i < config->num_connections; i++) {
1086 struct nbd_sock *nsock = config->socks[i];
1088 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
1089 mutex_lock(&nsock->tx_lock);
1090 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1092 dev_err(disk_to_dev(nbd->disk),
1093 "Send disconnect failed %d\n", ret);
1094 mutex_unlock(&nsock->tx_lock);
1098 static int nbd_disconnect(struct nbd_device *nbd)
1100 struct nbd_config *config = nbd->config;
1102 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1103 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
1104 send_disconnects(nbd);
1108 static void nbd_clear_sock(struct nbd_device *nbd)
1112 nbd->task_setup = NULL;
1115 static void nbd_config_put(struct nbd_device *nbd)
1117 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1118 &nbd->config_lock)) {
1119 struct nbd_config *config = nbd->config;
1120 nbd_dev_dbg_close(nbd);
1121 nbd_size_clear(nbd);
1122 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1123 &config->runtime_flags))
1124 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1125 nbd->task_recv = NULL;
1126 nbd_clear_sock(nbd);
1127 if (config->num_connections) {
1129 for (i = 0; i < config->num_connections; i++) {
1130 sockfd_put(config->socks[i]->sock);
1131 kfree(config->socks[i]);
1133 kfree(config->socks);
1138 nbd->tag_set.timeout = 0;
1139 nbd->disk->queue->limits.discard_granularity = 0;
1140 nbd->disk->queue->limits.discard_alignment = 0;
1141 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1142 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1144 mutex_unlock(&nbd->config_lock);
1146 module_put(THIS_MODULE);
1150 static int nbd_start_device(struct nbd_device *nbd)
1152 struct nbd_config *config = nbd->config;
1153 int num_connections = config->num_connections;
1160 if (num_connections > 1 &&
1161 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1162 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1166 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1167 nbd->task_recv = current;
1169 nbd_parse_flags(nbd);
1171 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1173 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1176 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
1178 nbd_dev_dbg_init(nbd);
1179 for (i = 0; i < num_connections; i++) {
1180 struct recv_thread_args *args;
1182 args = kzalloc(sizeof(*args), GFP_KERNEL);
1187 sk_set_memalloc(config->socks[i]->sock->sk);
1188 if (nbd->tag_set.timeout)
1189 config->socks[i]->sock->sk->sk_sndtimeo =
1190 nbd->tag_set.timeout;
1191 atomic_inc(&config->recv_threads);
1192 refcount_inc(&nbd->config_refs);
1193 INIT_WORK(&args->work, recv_work);
1196 queue_work(recv_workqueue, &args->work);
1198 nbd_size_update(nbd);
1202 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1204 struct nbd_config *config = nbd->config;
1207 ret = nbd_start_device(nbd);
1212 bdev->bd_invalidated = 1;
1213 mutex_unlock(&nbd->config_lock);
1214 ret = wait_event_interruptible(config->recv_wq,
1215 atomic_read(&config->recv_threads) == 0);
1218 mutex_lock(&nbd->config_lock);
1219 nbd_bdev_reset(bdev);
1220 /* user requested, ignore socket errors */
1221 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
1223 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
1228 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1229 struct block_device *bdev)
1233 nbd_bdev_reset(bdev);
1234 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1235 &nbd->config->runtime_flags))
1236 nbd_config_put(nbd);
1239 /* Must be called with config_lock held */
1240 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1241 unsigned int cmd, unsigned long arg)
1243 struct nbd_config *config = nbd->config;
1246 case NBD_DISCONNECT:
1247 return nbd_disconnect(nbd);
1248 case NBD_CLEAR_SOCK:
1249 nbd_clear_sock_ioctl(nbd, bdev);
1252 return nbd_add_socket(nbd, arg, false);
1253 case NBD_SET_BLKSIZE:
1254 if (!arg || !is_power_of_2(arg) || arg < 512 ||
1257 nbd_size_set(nbd, arg,
1258 div_s64(config->bytesize, arg));
1261 nbd_size_set(nbd, config->blksize,
1262 div_s64(arg, config->blksize));
1264 case NBD_SET_SIZE_BLOCKS:
1265 nbd_size_set(nbd, config->blksize, arg);
1267 case NBD_SET_TIMEOUT:
1269 nbd->tag_set.timeout = arg * HZ;
1270 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1275 config->flags = arg;
1278 return nbd_start_device_ioctl(nbd, bdev);
1281 * This is for compatibility only. The queue is always cleared
1282 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1285 case NBD_PRINT_DEBUG:
1287 * For compatibility only, we no longer keep a list of
1288 * outstanding requests.
1295 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1296 unsigned int cmd, unsigned long arg)
1298 struct nbd_device *nbd = bdev->bd_disk->private_data;
1299 struct nbd_config *config = nbd->config;
1300 int error = -EINVAL;
1302 if (!capable(CAP_SYS_ADMIN))
1305 /* The block layer will pass back some non-nbd ioctls in case we have
1306 * special handling for them, but we don't so just return an error.
1308 if (_IOC_TYPE(cmd) != 0xab)
1311 mutex_lock(&nbd->config_lock);
1313 /* Don't allow ioctl operations on a nbd device that was created with
1314 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1316 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1317 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1318 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1320 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1321 mutex_unlock(&nbd->config_lock);
1325 static struct nbd_config *nbd_alloc_config(void)
1327 struct nbd_config *config;
1329 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1332 atomic_set(&config->recv_threads, 0);
1333 init_waitqueue_head(&config->recv_wq);
1334 init_waitqueue_head(&config->conn_wait);
1335 config->blksize = 1024;
1336 atomic_set(&config->live_connections, 0);
1337 try_module_get(THIS_MODULE);
1341 static int nbd_open(struct block_device *bdev, fmode_t mode)
1343 struct nbd_device *nbd;
1346 mutex_lock(&nbd_index_mutex);
1347 nbd = bdev->bd_disk->private_data;
1352 if (!refcount_inc_not_zero(&nbd->refs)) {
1356 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1357 struct nbd_config *config;
1359 mutex_lock(&nbd->config_lock);
1360 if (refcount_inc_not_zero(&nbd->config_refs)) {
1361 mutex_unlock(&nbd->config_lock);
1364 config = nbd->config = nbd_alloc_config();
1367 mutex_unlock(&nbd->config_lock);
1370 refcount_set(&nbd->config_refs, 1);
1371 refcount_inc(&nbd->refs);
1372 mutex_unlock(&nbd->config_lock);
1373 bdev->bd_invalidated = 1;
1374 } else if (nbd_disconnected(nbd->config)) {
1375 bdev->bd_invalidated = 1;
1378 mutex_unlock(&nbd_index_mutex);
1382 static void nbd_release(struct gendisk *disk, fmode_t mode)
1384 struct nbd_device *nbd = disk->private_data;
1385 struct block_device *bdev = bdget_disk(disk, 0);
1387 if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1388 bdev->bd_openers == 0)
1389 nbd_disconnect_and_put(nbd);
1391 nbd_config_put(nbd);
1395 static const struct block_device_operations nbd_fops =
1397 .owner = THIS_MODULE,
1399 .release = nbd_release,
1401 .compat_ioctl = nbd_ioctl,
1404 #if IS_ENABLED(CONFIG_DEBUG_FS)
1406 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1408 struct nbd_device *nbd = s->private;
1411 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1416 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1418 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1421 static const struct file_operations nbd_dbg_tasks_ops = {
1422 .open = nbd_dbg_tasks_open,
1424 .llseek = seq_lseek,
1425 .release = single_release,
1428 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1430 struct nbd_device *nbd = s->private;
1431 u32 flags = nbd->config->flags;
1433 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1435 seq_puts(s, "Known flags:\n");
1437 if (flags & NBD_FLAG_HAS_FLAGS)
1438 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1439 if (flags & NBD_FLAG_READ_ONLY)
1440 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1441 if (flags & NBD_FLAG_SEND_FLUSH)
1442 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1443 if (flags & NBD_FLAG_SEND_FUA)
1444 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1445 if (flags & NBD_FLAG_SEND_TRIM)
1446 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1451 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1453 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1456 static const struct file_operations nbd_dbg_flags_ops = {
1457 .open = nbd_dbg_flags_open,
1459 .llseek = seq_lseek,
1460 .release = single_release,
1463 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1466 struct nbd_config *config = nbd->config;
1471 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1473 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1477 config->dbg_dir = dir;
1479 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1480 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1481 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1482 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1483 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1488 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1490 debugfs_remove_recursive(nbd->config->dbg_dir);
1493 static int nbd_dbg_init(void)
1495 struct dentry *dbg_dir;
1497 dbg_dir = debugfs_create_dir("nbd", NULL);
1501 nbd_dbg_dir = dbg_dir;
1506 static void nbd_dbg_close(void)
1508 debugfs_remove_recursive(nbd_dbg_dir);
1511 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1513 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1518 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1522 static int nbd_dbg_init(void)
1527 static void nbd_dbg_close(void)
1533 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1534 unsigned int hctx_idx, unsigned int numa_node)
1536 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1537 cmd->nbd = set->driver_data;
1539 mutex_init(&cmd->lock);
1543 static const struct blk_mq_ops nbd_mq_ops = {
1544 .queue_rq = nbd_queue_rq,
1545 .complete = nbd_complete_rq,
1546 .init_request = nbd_init_request,
1547 .timeout = nbd_xmit_timeout,
1550 static int nbd_dev_add(int index)
1552 struct nbd_device *nbd;
1553 struct gendisk *disk;
1554 struct request_queue *q;
1557 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1561 disk = alloc_disk(1 << part_shift);
1566 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1571 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1580 nbd->tag_set.ops = &nbd_mq_ops;
1581 nbd->tag_set.nr_hw_queues = 1;
1582 nbd->tag_set.queue_depth = 128;
1583 nbd->tag_set.numa_node = NUMA_NO_NODE;
1584 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1585 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1587 nbd->tag_set.driver_data = nbd;
1589 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1593 q = blk_mq_init_queue(&nbd->tag_set);
1601 * Tell the block layer that we are not a rotational device
1603 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1604 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1605 disk->queue->limits.discard_granularity = 0;
1606 disk->queue->limits.discard_alignment = 0;
1607 blk_queue_max_discard_sectors(disk->queue, 0);
1608 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1609 blk_queue_max_segments(disk->queue, USHRT_MAX);
1610 blk_queue_max_hw_sectors(disk->queue, 65536);
1611 disk->queue->limits.max_sectors = 256;
1613 mutex_init(&nbd->config_lock);
1614 refcount_set(&nbd->config_refs, 0);
1615 refcount_set(&nbd->refs, 1);
1616 INIT_LIST_HEAD(&nbd->list);
1617 disk->major = NBD_MAJOR;
1618 disk->first_minor = index << part_shift;
1619 disk->fops = &nbd_fops;
1620 disk->private_data = nbd;
1621 sprintf(disk->disk_name, "nbd%d", index);
1623 nbd_total_devices++;
1627 blk_mq_free_tag_set(&nbd->tag_set);
1629 idr_remove(&nbd_index_idr, index);
1638 static int find_free_cb(int id, void *ptr, void *data)
1640 struct nbd_device *nbd = ptr;
1641 struct nbd_device **found = data;
1643 if (!refcount_read(&nbd->config_refs)) {
1650 /* Netlink interface. */
1651 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1652 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1653 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1654 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1655 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1656 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1657 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1658 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1659 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1660 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1663 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1664 [NBD_SOCK_FD] = { .type = NLA_U32 },
1667 /* We don't use this right now since we don't parse the incoming list, but we
1668 * still want it here so userspace knows what to expect.
1670 static const struct nla_policy __attribute__((unused))
1671 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1672 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1673 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1676 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1678 struct nbd_device *nbd = NULL;
1679 struct nbd_config *config;
1682 bool put_dev = false;
1684 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1687 if (info->attrs[NBD_ATTR_INDEX])
1688 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1689 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1690 printk(KERN_ERR "nbd: must specify at least one socket\n");
1693 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1694 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1698 mutex_lock(&nbd_index_mutex);
1700 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1703 new_index = nbd_dev_add(-1);
1704 if (new_index < 0) {
1705 mutex_unlock(&nbd_index_mutex);
1706 printk(KERN_ERR "nbd: failed to add new device\n");
1709 nbd = idr_find(&nbd_index_idr, new_index);
1712 nbd = idr_find(&nbd_index_idr, index);
1714 ret = nbd_dev_add(index);
1716 mutex_unlock(&nbd_index_mutex);
1717 printk(KERN_ERR "nbd: failed to add new device\n");
1720 nbd = idr_find(&nbd_index_idr, index);
1724 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1726 mutex_unlock(&nbd_index_mutex);
1729 if (!refcount_inc_not_zero(&nbd->refs)) {
1730 mutex_unlock(&nbd_index_mutex);
1733 printk(KERN_ERR "nbd: device at index %d is going down\n",
1737 mutex_unlock(&nbd_index_mutex);
1739 mutex_lock(&nbd->config_lock);
1740 if (refcount_read(&nbd->config_refs)) {
1741 mutex_unlock(&nbd->config_lock);
1745 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1748 if (WARN_ON(nbd->config)) {
1749 mutex_unlock(&nbd->config_lock);
1753 config = nbd->config = nbd_alloc_config();
1755 mutex_unlock(&nbd->config_lock);
1757 printk(KERN_ERR "nbd: couldn't allocate config\n");
1760 refcount_set(&nbd->config_refs, 1);
1761 set_bit(NBD_BOUND, &config->runtime_flags);
1763 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1764 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1765 nbd_size_set(nbd, config->blksize,
1766 div64_u64(bytes, config->blksize));
1768 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1770 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1771 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1773 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1774 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1775 nbd->tag_set.timeout = timeout * HZ;
1776 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1778 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1779 config->dead_conn_timeout =
1780 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1781 config->dead_conn_timeout *= HZ;
1783 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1785 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1786 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1787 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1788 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1789 set_bit(NBD_DESTROY_ON_DISCONNECT,
1790 &config->runtime_flags);
1793 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1794 set_bit(NBD_DISCONNECT_ON_CLOSE,
1795 &config->runtime_flags);
1799 if (info->attrs[NBD_ATTR_SOCKETS]) {
1800 struct nlattr *attr;
1803 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1805 struct nlattr *socks[NBD_SOCK_MAX+1];
1807 if (nla_type(attr) != NBD_SOCK_ITEM) {
1808 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1812 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1817 printk(KERN_ERR "nbd: error processing sock list\n");
1821 if (!socks[NBD_SOCK_FD])
1823 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1824 ret = nbd_add_socket(nbd, fd, true);
1829 ret = nbd_start_device(nbd);
1831 mutex_unlock(&nbd->config_lock);
1833 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1834 refcount_inc(&nbd->config_refs);
1835 nbd_connect_reply(info, nbd->index);
1837 nbd_config_put(nbd);
1843 static void nbd_disconnect_and_put(struct nbd_device *nbd)
1845 mutex_lock(&nbd->config_lock);
1846 nbd_disconnect(nbd);
1847 nbd_clear_sock(nbd);
1848 mutex_unlock(&nbd->config_lock);
1849 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1850 &nbd->config->runtime_flags))
1851 nbd_config_put(nbd);
1854 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1856 struct nbd_device *nbd;
1859 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1862 if (!info->attrs[NBD_ATTR_INDEX]) {
1863 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1866 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1867 mutex_lock(&nbd_index_mutex);
1868 nbd = idr_find(&nbd_index_idr, index);
1870 mutex_unlock(&nbd_index_mutex);
1871 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1875 if (!refcount_inc_not_zero(&nbd->refs)) {
1876 mutex_unlock(&nbd_index_mutex);
1877 printk(KERN_ERR "nbd: device at index %d is going down\n",
1881 mutex_unlock(&nbd_index_mutex);
1882 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1886 nbd_disconnect_and_put(nbd);
1887 nbd_config_put(nbd);
1892 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1894 struct nbd_device *nbd = NULL;
1895 struct nbd_config *config;
1898 bool put_dev = false;
1900 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1903 if (!info->attrs[NBD_ATTR_INDEX]) {
1904 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1907 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1908 mutex_lock(&nbd_index_mutex);
1909 nbd = idr_find(&nbd_index_idr, index);
1911 mutex_unlock(&nbd_index_mutex);
1912 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1916 if (!refcount_inc_not_zero(&nbd->refs)) {
1917 mutex_unlock(&nbd_index_mutex);
1918 printk(KERN_ERR "nbd: device at index %d is going down\n",
1922 mutex_unlock(&nbd_index_mutex);
1924 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1925 dev_err(nbd_to_dev(nbd),
1926 "not configured, cannot reconfigure\n");
1931 mutex_lock(&nbd->config_lock);
1932 config = nbd->config;
1933 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1935 dev_err(nbd_to_dev(nbd),
1936 "not configured, cannot reconfigure\n");
1941 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1942 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1943 nbd->tag_set.timeout = timeout * HZ;
1944 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1946 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1947 config->dead_conn_timeout =
1948 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1949 config->dead_conn_timeout *= HZ;
1951 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1952 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1953 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1954 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1955 &config->runtime_flags))
1958 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1959 &config->runtime_flags))
1960 refcount_inc(&nbd->refs);
1963 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1964 set_bit(NBD_DISCONNECT_ON_CLOSE,
1965 &config->runtime_flags);
1967 clear_bit(NBD_DISCONNECT_ON_CLOSE,
1968 &config->runtime_flags);
1972 if (info->attrs[NBD_ATTR_SOCKETS]) {
1973 struct nlattr *attr;
1976 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1978 struct nlattr *socks[NBD_SOCK_MAX+1];
1980 if (nla_type(attr) != NBD_SOCK_ITEM) {
1981 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1985 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1990 printk(KERN_ERR "nbd: error processing sock list\n");
1994 if (!socks[NBD_SOCK_FD])
1996 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1997 ret = nbd_reconnect_socket(nbd, fd);
2003 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2007 mutex_unlock(&nbd->config_lock);
2008 nbd_config_put(nbd);
2015 static const struct genl_ops nbd_connect_genl_ops[] = {
2017 .cmd = NBD_CMD_CONNECT,
2018 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2019 .doit = nbd_genl_connect,
2022 .cmd = NBD_CMD_DISCONNECT,
2023 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2024 .doit = nbd_genl_disconnect,
2027 .cmd = NBD_CMD_RECONFIGURE,
2028 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2029 .doit = nbd_genl_reconfigure,
2032 .cmd = NBD_CMD_STATUS,
2033 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2034 .doit = nbd_genl_status,
2038 static const struct genl_multicast_group nbd_mcast_grps[] = {
2039 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2042 static struct genl_family nbd_genl_family __ro_after_init = {
2044 .name = NBD_GENL_FAMILY_NAME,
2045 .version = NBD_GENL_VERSION,
2046 .module = THIS_MODULE,
2047 .ops = nbd_connect_genl_ops,
2048 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2049 .maxattr = NBD_ATTR_MAX,
2050 .policy = nbd_attr_policy,
2051 .mcgrps = nbd_mcast_grps,
2052 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2055 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2057 struct nlattr *dev_opt;
2061 /* This is a little racey, but for status it's ok. The
2062 * reason we don't take a ref here is because we can't
2063 * take a ref in the index == -1 case as we would need
2064 * to put under the nbd_index_mutex, which could
2065 * deadlock if we are configured to remove ourselves
2066 * once we're disconnected.
2068 if (refcount_read(&nbd->config_refs))
2070 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2073 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2076 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2080 nla_nest_end(reply, dev_opt);
2084 static int status_cb(int id, void *ptr, void *data)
2086 struct nbd_device *nbd = ptr;
2087 return populate_nbd_status(nbd, (struct sk_buff *)data);
2090 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2092 struct nlattr *dev_list;
2093 struct sk_buff *reply;
2099 if (info->attrs[NBD_ATTR_INDEX])
2100 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2102 mutex_lock(&nbd_index_mutex);
2104 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2105 nla_attr_size(sizeof(u8)));
2106 msg_size *= (index == -1) ? nbd_total_devices : 1;
2108 reply = genlmsg_new(msg_size, GFP_KERNEL);
2111 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2118 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2120 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2126 struct nbd_device *nbd;
2127 nbd = idr_find(&nbd_index_idr, index);
2129 ret = populate_nbd_status(nbd, reply);
2136 nla_nest_end(reply, dev_list);
2137 genlmsg_end(reply, reply_head);
2138 ret = genlmsg_reply(reply, info);
2140 mutex_unlock(&nbd_index_mutex);
2144 static void nbd_connect_reply(struct genl_info *info, int index)
2146 struct sk_buff *skb;
2150 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2153 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2159 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2164 genlmsg_end(skb, msg_head);
2165 genlmsg_reply(skb, info);
2168 static void nbd_mcast_index(int index)
2170 struct sk_buff *skb;
2174 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2177 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2183 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2188 genlmsg_end(skb, msg_head);
2189 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2192 static void nbd_dead_link_work(struct work_struct *work)
2194 struct link_dead_args *args = container_of(work, struct link_dead_args,
2196 nbd_mcast_index(args->index);
2200 static int __init nbd_init(void)
2204 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2207 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2213 part_shift = fls(max_part);
2216 * Adjust max_part according to part_shift as it is exported
2217 * to user space so that user can know the max number of
2218 * partition kernel should be able to manage.
2220 * Note that -1 is required because partition 0 is reserved
2221 * for the whole disk.
2223 max_part = (1UL << part_shift) - 1;
2226 if ((1UL << part_shift) > DISK_MAX_PARTS)
2229 if (nbds_max > 1UL << (MINORBITS - part_shift))
2231 recv_workqueue = alloc_workqueue("knbd-recv",
2232 WQ_MEM_RECLAIM | WQ_HIGHPRI |
2234 if (!recv_workqueue)
2237 if (register_blkdev(NBD_MAJOR, "nbd")) {
2238 destroy_workqueue(recv_workqueue);
2242 if (genl_register_family(&nbd_genl_family)) {
2243 unregister_blkdev(NBD_MAJOR, "nbd");
2244 destroy_workqueue(recv_workqueue);
2249 mutex_lock(&nbd_index_mutex);
2250 for (i = 0; i < nbds_max; i++)
2252 mutex_unlock(&nbd_index_mutex);
2256 static int nbd_exit_cb(int id, void *ptr, void *data)
2258 struct list_head *list = (struct list_head *)data;
2259 struct nbd_device *nbd = ptr;
2261 list_add_tail(&nbd->list, list);
2265 static void __exit nbd_cleanup(void)
2267 struct nbd_device *nbd;
2268 LIST_HEAD(del_list);
2272 mutex_lock(&nbd_index_mutex);
2273 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2274 mutex_unlock(&nbd_index_mutex);
2276 while (!list_empty(&del_list)) {
2277 nbd = list_first_entry(&del_list, struct nbd_device, list);
2278 list_del_init(&nbd->list);
2279 if (refcount_read(&nbd->refs) != 1)
2280 printk(KERN_ERR "nbd: possibly leaking a device\n");
2284 idr_destroy(&nbd_index_idr);
2285 genl_unregister_family(&nbd_genl_family);
2286 destroy_workqueue(recv_workqueue);
2287 unregister_blkdev(NBD_MAJOR, "nbd");
2290 module_init(nbd_init);
2291 module_exit(nbd_cleanup);
2293 MODULE_DESCRIPTION("Network Block Device");
2294 MODULE_LICENSE("GPL");
2296 module_param(nbds_max, int, 0444);
2297 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2298 module_param(max_part, int, 0444);
2299 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");