1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
21 struct nvme_tcp_queue;
23 /* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
29 static int so_priority;
30 module_param(so_priority, int, 0644);
31 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
33 enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
40 struct nvme_tcp_request {
41 struct nvme_request req;
43 struct nvme_tcp_queue *queue;
48 struct list_head entry;
57 enum nvme_tcp_send_state state;
60 enum nvme_tcp_queue_flags {
61 NVME_TCP_Q_ALLOCATED = 0,
63 NVME_TCP_Q_POLLING = 2,
66 enum nvme_tcp_recv_state {
67 NVME_TCP_RECV_PDU = 0,
73 struct nvme_tcp_queue {
75 struct work_struct io_work;
79 struct mutex send_mutex;
80 struct list_head send_list;
86 size_t data_remaining;
87 size_t ddgst_remaining;
91 struct nvme_tcp_request *request;
94 size_t cmnd_capsule_len;
95 struct nvme_tcp_ctrl *ctrl;
101 struct ahash_request *rcv_hash;
102 struct ahash_request *snd_hash;
106 struct page_frag_cache pf_cache;
108 void (*state_change)(struct sock *);
109 void (*data_ready)(struct sock *);
110 void (*write_space)(struct sock *);
113 struct nvme_tcp_ctrl {
114 /* read only in the hot path */
115 struct nvme_tcp_queue *queues;
116 struct blk_mq_tag_set tag_set;
118 /* other member variables */
119 struct list_head list;
120 struct blk_mq_tag_set admin_tag_set;
121 struct sockaddr_storage addr;
122 struct sockaddr_storage src_addr;
123 struct nvme_ctrl ctrl;
125 struct work_struct err_work;
126 struct delayed_work connect_work;
127 struct nvme_tcp_request async_req;
128 u32 io_queues[HCTX_MAX_TYPES];
131 static LIST_HEAD(nvme_tcp_ctrl_list);
132 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
133 static struct workqueue_struct *nvme_tcp_wq;
134 static struct blk_mq_ops nvme_tcp_mq_ops;
135 static struct blk_mq_ops nvme_tcp_admin_mq_ops;
136 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
138 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
140 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
143 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
145 return queue - queue->ctrl->queues;
148 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
150 u32 queue_idx = nvme_tcp_queue_id(queue);
153 return queue->ctrl->admin_tag_set.tags[queue_idx];
154 return queue->ctrl->tag_set.tags[queue_idx - 1];
157 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
159 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
162 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
164 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
167 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
169 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
172 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
174 return req == &req->queue->ctrl->async_req;
177 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
181 if (unlikely(nvme_tcp_async_req(req)))
182 return false; /* async events don't have a request */
184 rq = blk_mq_rq_from_pdu(req);
186 return rq_data_dir(rq) == WRITE && req->data_len &&
187 req->data_len <= nvme_tcp_inline_data_size(req->queue);
190 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
192 return req->iter.bvec->bv_page;
195 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
197 return req->iter.bvec->bv_offset + req->iter.iov_offset;
200 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
202 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
203 req->pdu_len - req->pdu_sent);
206 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
208 return req->iter.iov_offset;
211 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
213 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
214 req->pdu_len - req->pdu_sent : 0;
217 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
220 return nvme_tcp_pdu_data_left(req) <= len;
223 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
226 struct request *rq = blk_mq_rq_from_pdu(req);
232 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
233 vec = &rq->special_vec;
235 size = blk_rq_payload_bytes(rq);
238 struct bio *bio = req->curr_bio;
240 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
241 nsegs = bio_segments(bio);
242 size = bio->bi_iter.bi_size;
243 offset = bio->bi_iter.bi_bvec_done;
246 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
247 req->iter.iov_offset = offset;
250 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
253 req->data_sent += len;
254 req->pdu_sent += len;
255 iov_iter_advance(&req->iter, len);
256 if (!iov_iter_count(&req->iter) &&
257 req->data_sent < req->data_len) {
258 req->curr_bio = req->curr_bio->bi_next;
259 nvme_tcp_init_iter(req, WRITE);
263 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
266 struct nvme_tcp_queue *queue = req->queue;
269 spin_lock(&queue->lock);
270 empty = list_empty(&queue->send_list) && !queue->request;
271 list_add_tail(&req->entry, &queue->send_list);
272 spin_unlock(&queue->lock);
275 * if we're the first on the send_list and we can try to send
276 * directly, otherwise queue io_work. Also, only do that if we
277 * are on the same cpu, so we don't introduce contention.
279 if (queue->io_cpu == smp_processor_id() &&
280 sync && empty && mutex_trylock(&queue->send_mutex)) {
281 nvme_tcp_try_send(queue);
282 mutex_unlock(&queue->send_mutex);
284 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
288 static inline struct nvme_tcp_request *
289 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
291 struct nvme_tcp_request *req;
293 spin_lock(&queue->lock);
294 req = list_first_entry_or_null(&queue->send_list,
295 struct nvme_tcp_request, entry);
297 list_del(&req->entry);
298 spin_unlock(&queue->lock);
303 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
306 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
307 crypto_ahash_final(hash);
310 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
311 struct page *page, off_t off, size_t len)
313 struct scatterlist sg;
315 sg_init_marker(&sg, 1);
316 sg_set_page(&sg, page, len, off);
317 ahash_request_set_crypt(hash, &sg, NULL, len);
318 crypto_ahash_update(hash);
321 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
322 void *pdu, size_t len)
324 struct scatterlist sg;
326 sg_init_one(&sg, pdu, len);
327 ahash_request_set_crypt(hash, &sg, pdu + len, len);
328 crypto_ahash_digest(hash);
331 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
332 void *pdu, size_t pdu_len)
334 struct nvme_tcp_hdr *hdr = pdu;
338 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
339 dev_err(queue->ctrl->ctrl.device,
340 "queue %d: header digest flag is cleared\n",
341 nvme_tcp_queue_id(queue));
345 recv_digest = *(__le32 *)(pdu + hdr->hlen);
346 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
347 exp_digest = *(__le32 *)(pdu + hdr->hlen);
348 if (recv_digest != exp_digest) {
349 dev_err(queue->ctrl->ctrl.device,
350 "header digest error: recv %#x expected %#x\n",
351 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
358 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
360 struct nvme_tcp_hdr *hdr = pdu;
361 u8 digest_len = nvme_tcp_hdgst_len(queue);
364 len = le32_to_cpu(hdr->plen) - hdr->hlen -
365 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
367 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
368 dev_err(queue->ctrl->ctrl.device,
369 "queue %d: data digest flag is cleared\n",
370 nvme_tcp_queue_id(queue));
373 crypto_ahash_init(queue->rcv_hash);
378 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
379 struct request *rq, unsigned int hctx_idx)
381 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
383 page_frag_free(req->pdu);
386 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
387 struct request *rq, unsigned int hctx_idx,
388 unsigned int numa_node)
390 struct nvme_tcp_ctrl *ctrl = set->driver_data;
391 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
392 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
393 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
394 u8 hdgst = nvme_tcp_hdgst_len(queue);
396 req->pdu = page_frag_alloc(&queue->pf_cache,
397 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
398 GFP_KERNEL | __GFP_ZERO);
403 nvme_req(rq)->ctrl = &ctrl->ctrl;
408 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
409 unsigned int hctx_idx)
411 struct nvme_tcp_ctrl *ctrl = data;
412 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
414 hctx->driver_data = queue;
418 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
419 unsigned int hctx_idx)
421 struct nvme_tcp_ctrl *ctrl = data;
422 struct nvme_tcp_queue *queue = &ctrl->queues[0];
424 hctx->driver_data = queue;
428 static enum nvme_tcp_recv_state
429 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
431 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
432 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
436 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
438 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
439 nvme_tcp_hdgst_len(queue);
440 queue->pdu_offset = 0;
441 queue->data_remaining = -1;
442 queue->ddgst_remaining = 0;
445 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
447 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
450 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
453 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
454 struct nvme_completion *cqe)
458 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
460 dev_err(queue->ctrl->ctrl.device,
461 "queue %d tag 0x%x not found\n",
462 nvme_tcp_queue_id(queue), cqe->command_id);
463 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
467 nvme_end_request(rq, cqe->status, cqe->result);
473 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
474 struct nvme_tcp_data_pdu *pdu)
478 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
480 dev_err(queue->ctrl->ctrl.device,
481 "queue %d tag %#x not found\n",
482 nvme_tcp_queue_id(queue), pdu->command_id);
486 if (!blk_rq_payload_bytes(rq)) {
487 dev_err(queue->ctrl->ctrl.device,
488 "queue %d tag %#x unexpected data\n",
489 nvme_tcp_queue_id(queue), rq->tag);
493 queue->data_remaining = le32_to_cpu(pdu->data_length);
495 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
496 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
497 dev_err(queue->ctrl->ctrl.device,
498 "queue %d tag %#x SUCCESS set but not last PDU\n",
499 nvme_tcp_queue_id(queue), rq->tag);
500 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
507 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
508 struct nvme_tcp_rsp_pdu *pdu)
510 struct nvme_completion *cqe = &pdu->cqe;
514 * AEN requests are special as they don't time out and can
515 * survive any kind of queue freeze and often don't respond to
516 * aborts. We don't even bother to allocate a struct request
517 * for them but rather special case them here.
519 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
521 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
524 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
529 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
530 struct nvme_tcp_r2t_pdu *pdu)
532 struct nvme_tcp_data_pdu *data = req->pdu;
533 struct nvme_tcp_queue *queue = req->queue;
534 struct request *rq = blk_mq_rq_from_pdu(req);
535 u8 hdgst = nvme_tcp_hdgst_len(queue);
536 u8 ddgst = nvme_tcp_ddgst_len(queue);
538 req->pdu_len = le32_to_cpu(pdu->r2t_length);
541 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
542 dev_err(queue->ctrl->ctrl.device,
543 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
544 rq->tag, req->pdu_len, req->data_len,
549 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
550 dev_err(queue->ctrl->ctrl.device,
551 "req %d unexpected r2t offset %u (expected %zu)\n",
552 rq->tag, le32_to_cpu(pdu->r2t_offset),
557 memset(data, 0, sizeof(*data));
558 data->hdr.type = nvme_tcp_h2c_data;
559 data->hdr.flags = NVME_TCP_F_DATA_LAST;
560 if (queue->hdr_digest)
561 data->hdr.flags |= NVME_TCP_F_HDGST;
562 if (queue->data_digest)
563 data->hdr.flags |= NVME_TCP_F_DDGST;
564 data->hdr.hlen = sizeof(*data);
565 data->hdr.pdo = data->hdr.hlen + hdgst;
567 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
568 data->ttag = pdu->ttag;
569 data->command_id = rq->tag;
570 data->data_offset = cpu_to_le32(req->data_sent);
571 data->data_length = cpu_to_le32(req->pdu_len);
575 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
576 struct nvme_tcp_r2t_pdu *pdu)
578 struct nvme_tcp_request *req;
582 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
584 dev_err(queue->ctrl->ctrl.device,
585 "queue %d tag %#x not found\n",
586 nvme_tcp_queue_id(queue), pdu->command_id);
589 req = blk_mq_rq_to_pdu(rq);
591 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
595 req->state = NVME_TCP_SEND_H2C_PDU;
598 nvme_tcp_queue_request(req, false);
603 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
604 unsigned int *offset, size_t *len)
606 struct nvme_tcp_hdr *hdr;
607 char *pdu = queue->pdu;
608 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
611 ret = skb_copy_bits(skb, *offset,
612 &pdu[queue->pdu_offset], rcv_len);
616 queue->pdu_remaining -= rcv_len;
617 queue->pdu_offset += rcv_len;
620 if (queue->pdu_remaining)
624 if (queue->hdr_digest) {
625 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
631 if (queue->data_digest) {
632 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
638 case nvme_tcp_c2h_data:
639 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
641 nvme_tcp_init_recv_ctx(queue);
642 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
644 nvme_tcp_init_recv_ctx(queue);
645 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
647 dev_err(queue->ctrl->ctrl.device,
648 "unsupported pdu type (%d)\n", hdr->type);
653 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
655 union nvme_result res = {};
657 nvme_end_request(rq, cpu_to_le16(status << 1), res);
660 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
661 unsigned int *offset, size_t *len)
663 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
664 struct nvme_tcp_request *req;
667 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
669 dev_err(queue->ctrl->ctrl.device,
670 "queue %d tag %#x not found\n",
671 nvme_tcp_queue_id(queue), pdu->command_id);
674 req = blk_mq_rq_to_pdu(rq);
679 recv_len = min_t(size_t, *len, queue->data_remaining);
683 if (!iov_iter_count(&req->iter)) {
684 req->curr_bio = req->curr_bio->bi_next;
687 * If we don`t have any bios it means that controller
688 * sent more data than we requested, hence error
690 if (!req->curr_bio) {
691 dev_err(queue->ctrl->ctrl.device,
692 "queue %d no space in request %#x",
693 nvme_tcp_queue_id(queue), rq->tag);
694 nvme_tcp_init_recv_ctx(queue);
697 nvme_tcp_init_iter(req, READ);
700 /* we can read only from what is left in this bio */
701 recv_len = min_t(size_t, recv_len,
702 iov_iter_count(&req->iter));
704 if (queue->data_digest)
705 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
706 &req->iter, recv_len, queue->rcv_hash);
708 ret = skb_copy_datagram_iter(skb, *offset,
709 &req->iter, recv_len);
711 dev_err(queue->ctrl->ctrl.device,
712 "queue %d failed to copy request %#x data",
713 nvme_tcp_queue_id(queue), rq->tag);
719 queue->data_remaining -= recv_len;
722 if (!queue->data_remaining) {
723 if (queue->data_digest) {
724 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
725 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
727 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
728 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
731 nvme_tcp_init_recv_ctx(queue);
738 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
739 struct sk_buff *skb, unsigned int *offset, size_t *len)
741 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
742 char *ddgst = (char *)&queue->recv_ddgst;
743 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
744 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
747 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
751 queue->ddgst_remaining -= recv_len;
754 if (queue->ddgst_remaining)
757 if (queue->recv_ddgst != queue->exp_ddgst) {
758 dev_err(queue->ctrl->ctrl.device,
759 "data digest error: recv %#x expected %#x\n",
760 le32_to_cpu(queue->recv_ddgst),
761 le32_to_cpu(queue->exp_ddgst));
765 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
766 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
769 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
773 nvme_tcp_init_recv_ctx(queue);
777 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
778 unsigned int offset, size_t len)
780 struct nvme_tcp_queue *queue = desc->arg.data;
781 size_t consumed = len;
785 switch (nvme_tcp_recv_state(queue)) {
786 case NVME_TCP_RECV_PDU:
787 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
789 case NVME_TCP_RECV_DATA:
790 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
792 case NVME_TCP_RECV_DDGST:
793 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
799 dev_err(queue->ctrl->ctrl.device,
800 "receive failed: %d\n", result);
801 queue->rd_enabled = false;
802 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
810 static void nvme_tcp_data_ready(struct sock *sk)
812 struct nvme_tcp_queue *queue;
814 read_lock_bh(&sk->sk_callback_lock);
815 queue = sk->sk_user_data;
816 if (likely(queue && queue->rd_enabled) &&
817 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
818 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
819 read_unlock_bh(&sk->sk_callback_lock);
822 static void nvme_tcp_write_space(struct sock *sk)
824 struct nvme_tcp_queue *queue;
826 read_lock_bh(&sk->sk_callback_lock);
827 queue = sk->sk_user_data;
828 if (likely(queue && sk_stream_is_writeable(sk))) {
829 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
830 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
832 read_unlock_bh(&sk->sk_callback_lock);
835 static void nvme_tcp_state_change(struct sock *sk)
837 struct nvme_tcp_queue *queue;
839 read_lock(&sk->sk_callback_lock);
840 queue = sk->sk_user_data;
844 switch (sk->sk_state) {
851 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
854 dev_info(queue->ctrl->ctrl.device,
855 "queue %d socket state %d\n",
856 nvme_tcp_queue_id(queue), sk->sk_state);
859 queue->state_change(sk);
861 read_unlock(&sk->sk_callback_lock);
864 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
866 queue->request = NULL;
869 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
871 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
874 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
876 struct nvme_tcp_queue *queue = req->queue;
879 struct page *page = nvme_tcp_req_cur_page(req);
880 size_t offset = nvme_tcp_req_cur_offset(req);
881 size_t len = nvme_tcp_req_cur_length(req);
882 bool last = nvme_tcp_pdu_last_send(req, len);
883 int ret, flags = MSG_DONTWAIT;
885 if (last && !queue->data_digest)
888 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
890 /* can't zcopy slab pages */
891 if (unlikely(PageSlab(page))) {
892 ret = sock_no_sendpage(queue->sock, page, offset, len,
895 ret = kernel_sendpage(queue->sock, page, offset, len,
901 nvme_tcp_advance_req(req, ret);
902 if (queue->data_digest)
903 nvme_tcp_ddgst_update(queue->snd_hash, page,
906 /* fully successful last write*/
907 if (last && ret == len) {
908 if (queue->data_digest) {
909 nvme_tcp_ddgst_final(queue->snd_hash,
911 req->state = NVME_TCP_SEND_DDGST;
914 nvme_tcp_done_send_req(queue);
922 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
924 struct nvme_tcp_queue *queue = req->queue;
925 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
926 bool inline_data = nvme_tcp_has_inline_data(req);
927 u8 hdgst = nvme_tcp_hdgst_len(queue);
928 int len = sizeof(*pdu) + hdgst - req->offset;
929 int flags = MSG_DONTWAIT;
933 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
937 if (queue->hdr_digest && !req->offset)
938 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
940 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
941 offset_in_page(pdu) + req->offset, len, flags);
942 if (unlikely(ret <= 0))
948 req->state = NVME_TCP_SEND_DATA;
949 if (queue->data_digest)
950 crypto_ahash_init(queue->snd_hash);
951 nvme_tcp_init_iter(req, WRITE);
953 nvme_tcp_done_send_req(queue);
962 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
964 struct nvme_tcp_queue *queue = req->queue;
965 struct nvme_tcp_data_pdu *pdu = req->pdu;
966 u8 hdgst = nvme_tcp_hdgst_len(queue);
967 int len = sizeof(*pdu) - req->offset + hdgst;
970 if (queue->hdr_digest && !req->offset)
971 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
973 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
974 offset_in_page(pdu) + req->offset, len,
975 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
976 if (unlikely(ret <= 0))
981 req->state = NVME_TCP_SEND_DATA;
982 if (queue->data_digest)
983 crypto_ahash_init(queue->snd_hash);
985 nvme_tcp_init_iter(req, WRITE);
993 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
995 struct nvme_tcp_queue *queue = req->queue;
997 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
999 .iov_base = &req->ddgst + req->offset,
1000 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1003 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1004 if (unlikely(ret <= 0))
1007 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1008 nvme_tcp_done_send_req(queue);
1016 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1018 struct nvme_tcp_request *req;
1021 if (!queue->request) {
1022 queue->request = nvme_tcp_fetch_request(queue);
1023 if (!queue->request)
1026 req = queue->request;
1028 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1029 ret = nvme_tcp_try_send_cmd_pdu(req);
1032 if (!nvme_tcp_has_inline_data(req))
1036 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1037 ret = nvme_tcp_try_send_data_pdu(req);
1042 if (req->state == NVME_TCP_SEND_DATA) {
1043 ret = nvme_tcp_try_send_data(req);
1048 if (req->state == NVME_TCP_SEND_DDGST)
1049 ret = nvme_tcp_try_send_ddgst(req);
1051 if (ret == -EAGAIN) {
1053 } else if (ret < 0) {
1054 dev_err(queue->ctrl->ctrl.device,
1055 "failed to send request %d\n", ret);
1056 if (ret != -EPIPE && ret != -ECONNRESET)
1057 nvme_tcp_fail_request(queue->request);
1058 nvme_tcp_done_send_req(queue);
1063 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1065 struct socket *sock = queue->sock;
1066 struct sock *sk = sock->sk;
1067 read_descriptor_t rd_desc;
1070 rd_desc.arg.data = queue;
1074 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1079 static void nvme_tcp_io_work(struct work_struct *w)
1081 struct nvme_tcp_queue *queue =
1082 container_of(w, struct nvme_tcp_queue, io_work);
1083 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1086 bool pending = false;
1089 if (mutex_trylock(&queue->send_mutex)) {
1090 result = nvme_tcp_try_send(queue);
1091 mutex_unlock(&queue->send_mutex);
1094 else if (unlikely(result < 0))
1098 result = nvme_tcp_try_recv(queue);
1101 else if (unlikely(result < 0))
1107 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1109 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1112 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1114 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1116 ahash_request_free(queue->rcv_hash);
1117 ahash_request_free(queue->snd_hash);
1118 crypto_free_ahash(tfm);
1121 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1123 struct crypto_ahash *tfm;
1125 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1127 return PTR_ERR(tfm);
1129 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1130 if (!queue->snd_hash)
1132 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1134 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1135 if (!queue->rcv_hash)
1137 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1141 ahash_request_free(queue->snd_hash);
1143 crypto_free_ahash(tfm);
1147 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1149 struct nvme_tcp_request *async = &ctrl->async_req;
1151 page_frag_free(async->pdu);
1154 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1156 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1157 struct nvme_tcp_request *async = &ctrl->async_req;
1158 u8 hdgst = nvme_tcp_hdgst_len(queue);
1160 async->pdu = page_frag_alloc(&queue->pf_cache,
1161 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1162 GFP_KERNEL | __GFP_ZERO);
1166 async->queue = &ctrl->queues[0];
1170 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1172 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1173 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1175 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1178 if (queue->hdr_digest || queue->data_digest)
1179 nvme_tcp_free_crypto(queue);
1181 sock_release(queue->sock);
1185 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1187 struct nvme_tcp_icreq_pdu *icreq;
1188 struct nvme_tcp_icresp_pdu *icresp;
1189 struct msghdr msg = {};
1191 bool ctrl_hdgst, ctrl_ddgst;
1194 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1198 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1204 icreq->hdr.type = nvme_tcp_icreq;
1205 icreq->hdr.hlen = sizeof(*icreq);
1207 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1208 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1209 icreq->maxr2t = 0; /* single inflight r2t supported */
1210 icreq->hpda = 0; /* no alignment constraint */
1211 if (queue->hdr_digest)
1212 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1213 if (queue->data_digest)
1214 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1216 iov.iov_base = icreq;
1217 iov.iov_len = sizeof(*icreq);
1218 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1222 memset(&msg, 0, sizeof(msg));
1223 iov.iov_base = icresp;
1224 iov.iov_len = sizeof(*icresp);
1225 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1226 iov.iov_len, msg.msg_flags);
1231 if (icresp->hdr.type != nvme_tcp_icresp) {
1232 pr_err("queue %d: bad type returned %d\n",
1233 nvme_tcp_queue_id(queue), icresp->hdr.type);
1237 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1238 pr_err("queue %d: bad pdu length returned %d\n",
1239 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1243 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1244 pr_err("queue %d: bad pfv returned %d\n",
1245 nvme_tcp_queue_id(queue), icresp->pfv);
1249 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1250 if ((queue->data_digest && !ctrl_ddgst) ||
1251 (!queue->data_digest && ctrl_ddgst)) {
1252 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1253 nvme_tcp_queue_id(queue),
1254 queue->data_digest ? "enabled" : "disabled",
1255 ctrl_ddgst ? "enabled" : "disabled");
1259 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1260 if ((queue->hdr_digest && !ctrl_hdgst) ||
1261 (!queue->hdr_digest && ctrl_hdgst)) {
1262 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1263 nvme_tcp_queue_id(queue),
1264 queue->hdr_digest ? "enabled" : "disabled",
1265 ctrl_hdgst ? "enabled" : "disabled");
1269 if (icresp->cpda != 0) {
1270 pr_err("queue %d: unsupported cpda returned %d\n",
1271 nvme_tcp_queue_id(queue), icresp->cpda);
1283 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1285 return nvme_tcp_queue_id(queue) == 0;
1288 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1290 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1291 int qid = nvme_tcp_queue_id(queue);
1293 return !nvme_tcp_admin_queue(queue) &&
1294 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1297 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1299 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1300 int qid = nvme_tcp_queue_id(queue);
1302 return !nvme_tcp_admin_queue(queue) &&
1303 !nvme_tcp_default_queue(queue) &&
1304 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1305 ctrl->io_queues[HCTX_TYPE_READ];
1308 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1310 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1311 int qid = nvme_tcp_queue_id(queue);
1313 return !nvme_tcp_admin_queue(queue) &&
1314 !nvme_tcp_default_queue(queue) &&
1315 !nvme_tcp_read_queue(queue) &&
1316 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1317 ctrl->io_queues[HCTX_TYPE_READ] +
1318 ctrl->io_queues[HCTX_TYPE_POLL];
1321 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1323 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1324 int qid = nvme_tcp_queue_id(queue);
1327 if (nvme_tcp_default_queue(queue))
1329 else if (nvme_tcp_read_queue(queue))
1330 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1331 else if (nvme_tcp_poll_queue(queue))
1332 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1333 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1334 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1337 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1338 int qid, size_t queue_size)
1340 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1341 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1342 int ret, rcv_pdu_size;
1345 INIT_LIST_HEAD(&queue->send_list);
1346 spin_lock_init(&queue->lock);
1347 mutex_init(&queue->send_mutex);
1348 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1349 queue->queue_size = queue_size;
1352 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1354 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1355 NVME_TCP_ADMIN_CCSZ;
1357 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1358 IPPROTO_TCP, &queue->sock);
1360 dev_err(nctrl->device,
1361 "failed to create socket: %d\n", ret);
1365 /* Single syn retry */
1366 tcp_sock_set_syncnt(queue->sock->sk, 1);
1368 /* Set TCP no delay */
1369 tcp_sock_set_nodelay(queue->sock->sk);
1372 * Cleanup whatever is sitting in the TCP transmit queue on socket
1373 * close. This is done to prevent stale data from being sent should
1374 * the network connection be restored before TCP times out.
1376 sock_no_linger(queue->sock->sk);
1378 if (so_priority > 0)
1379 sock_set_priority(queue->sock->sk, so_priority);
1381 /* Set socket type of service */
1382 if (nctrl->opts->tos >= 0)
1383 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1385 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1386 nvme_tcp_set_queue_io_cpu(queue);
1387 queue->request = NULL;
1388 queue->data_remaining = 0;
1389 queue->ddgst_remaining = 0;
1390 queue->pdu_remaining = 0;
1391 queue->pdu_offset = 0;
1392 sk_set_memalloc(queue->sock->sk);
1394 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1395 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1396 sizeof(ctrl->src_addr));
1398 dev_err(nctrl->device,
1399 "failed to bind queue %d socket %d\n",
1405 queue->hdr_digest = nctrl->opts->hdr_digest;
1406 queue->data_digest = nctrl->opts->data_digest;
1407 if (queue->hdr_digest || queue->data_digest) {
1408 ret = nvme_tcp_alloc_crypto(queue);
1410 dev_err(nctrl->device,
1411 "failed to allocate queue %d crypto\n", qid);
1416 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1417 nvme_tcp_hdgst_len(queue);
1418 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1424 dev_dbg(nctrl->device, "connecting queue %d\n",
1425 nvme_tcp_queue_id(queue));
1427 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1428 sizeof(ctrl->addr), 0);
1430 dev_err(nctrl->device,
1431 "failed to connect socket: %d\n", ret);
1435 ret = nvme_tcp_init_connection(queue);
1437 goto err_init_connect;
1439 queue->rd_enabled = true;
1440 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1441 nvme_tcp_init_recv_ctx(queue);
1443 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1444 queue->sock->sk->sk_user_data = queue;
1445 queue->state_change = queue->sock->sk->sk_state_change;
1446 queue->data_ready = queue->sock->sk->sk_data_ready;
1447 queue->write_space = queue->sock->sk->sk_write_space;
1448 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1449 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1450 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1451 #ifdef CONFIG_NET_RX_BUSY_POLL
1452 queue->sock->sk->sk_ll_usec = 1;
1454 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1459 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1463 if (queue->hdr_digest || queue->data_digest)
1464 nvme_tcp_free_crypto(queue);
1466 sock_release(queue->sock);
1471 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1473 struct socket *sock = queue->sock;
1475 write_lock_bh(&sock->sk->sk_callback_lock);
1476 sock->sk->sk_user_data = NULL;
1477 sock->sk->sk_data_ready = queue->data_ready;
1478 sock->sk->sk_state_change = queue->state_change;
1479 sock->sk->sk_write_space = queue->write_space;
1480 write_unlock_bh(&sock->sk->sk_callback_lock);
1483 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1485 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1486 nvme_tcp_restore_sock_calls(queue);
1487 cancel_work_sync(&queue->io_work);
1490 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1492 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1493 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1495 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1498 __nvme_tcp_stop_queue(queue);
1501 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1503 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1507 ret = nvmf_connect_io_queue(nctrl, idx, false);
1509 ret = nvmf_connect_admin_queue(nctrl);
1512 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1514 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1515 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1516 dev_err(nctrl->device,
1517 "failed to connect queue: %d ret=%d\n", idx, ret);
1522 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1525 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1526 struct blk_mq_tag_set *set;
1530 set = &ctrl->admin_tag_set;
1531 memset(set, 0, sizeof(*set));
1532 set->ops = &nvme_tcp_admin_mq_ops;
1533 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1534 set->reserved_tags = 2; /* connect + keep-alive */
1535 set->numa_node = NUMA_NO_NODE;
1536 set->flags = BLK_MQ_F_BLOCKING;
1537 set->cmd_size = sizeof(struct nvme_tcp_request);
1538 set->driver_data = ctrl;
1539 set->nr_hw_queues = 1;
1540 set->timeout = ADMIN_TIMEOUT;
1542 set = &ctrl->tag_set;
1543 memset(set, 0, sizeof(*set));
1544 set->ops = &nvme_tcp_mq_ops;
1545 set->queue_depth = nctrl->sqsize + 1;
1546 set->reserved_tags = 1; /* fabric connect */
1547 set->numa_node = NUMA_NO_NODE;
1548 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1549 set->cmd_size = sizeof(struct nvme_tcp_request);
1550 set->driver_data = ctrl;
1551 set->nr_hw_queues = nctrl->queue_count - 1;
1552 set->timeout = NVME_IO_TIMEOUT;
1553 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1556 ret = blk_mq_alloc_tag_set(set);
1558 return ERR_PTR(ret);
1563 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1565 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1566 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1567 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1570 nvme_tcp_free_queue(ctrl, 0);
1573 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1577 for (i = 1; i < ctrl->queue_count; i++)
1578 nvme_tcp_free_queue(ctrl, i);
1581 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1585 for (i = 1; i < ctrl->queue_count; i++)
1586 nvme_tcp_stop_queue(ctrl, i);
1589 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1593 for (i = 1; i < ctrl->queue_count; i++) {
1594 ret = nvme_tcp_start_queue(ctrl, i);
1596 goto out_stop_queues;
1602 for (i--; i >= 1; i--)
1603 nvme_tcp_stop_queue(ctrl, i);
1607 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1611 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1615 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1617 goto out_free_queue;
1622 nvme_tcp_free_queue(ctrl, 0);
1626 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1630 for (i = 1; i < ctrl->queue_count; i++) {
1631 ret = nvme_tcp_alloc_queue(ctrl, i,
1634 goto out_free_queues;
1640 for (i--; i >= 1; i--)
1641 nvme_tcp_free_queue(ctrl, i);
1646 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1648 unsigned int nr_io_queues;
1650 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1651 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1652 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1654 return nr_io_queues;
1657 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1658 unsigned int nr_io_queues)
1660 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1661 struct nvmf_ctrl_options *opts = nctrl->opts;
1663 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1665 * separate read/write queues
1666 * hand out dedicated default queues only after we have
1667 * sufficient read queues.
1669 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1670 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1671 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1672 min(opts->nr_write_queues, nr_io_queues);
1673 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1676 * shared read/write queues
1677 * either no write queues were requested, or we don't have
1678 * sufficient queue count to have dedicated default queues.
1680 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1681 min(opts->nr_io_queues, nr_io_queues);
1682 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1685 if (opts->nr_poll_queues && nr_io_queues) {
1686 /* map dedicated poll queues only if we have queues left */
1687 ctrl->io_queues[HCTX_TYPE_POLL] =
1688 min(opts->nr_poll_queues, nr_io_queues);
1692 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1694 unsigned int nr_io_queues;
1697 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1698 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1702 ctrl->queue_count = nr_io_queues + 1;
1703 if (ctrl->queue_count < 2)
1706 dev_info(ctrl->device,
1707 "creating %d I/O queues.\n", nr_io_queues);
1709 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1711 return __nvme_tcp_alloc_io_queues(ctrl);
1714 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1716 nvme_tcp_stop_io_queues(ctrl);
1718 blk_cleanup_queue(ctrl->connect_q);
1719 blk_mq_free_tag_set(ctrl->tagset);
1721 nvme_tcp_free_io_queues(ctrl);
1724 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1728 ret = nvme_tcp_alloc_io_queues(ctrl);
1733 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1734 if (IS_ERR(ctrl->tagset)) {
1735 ret = PTR_ERR(ctrl->tagset);
1736 goto out_free_io_queues;
1739 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1740 if (IS_ERR(ctrl->connect_q)) {
1741 ret = PTR_ERR(ctrl->connect_q);
1742 goto out_free_tag_set;
1745 blk_mq_update_nr_hw_queues(ctrl->tagset,
1746 ctrl->queue_count - 1);
1749 ret = nvme_tcp_start_io_queues(ctrl);
1751 goto out_cleanup_connect_q;
1755 out_cleanup_connect_q:
1757 blk_cleanup_queue(ctrl->connect_q);
1760 blk_mq_free_tag_set(ctrl->tagset);
1762 nvme_tcp_free_io_queues(ctrl);
1766 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1768 nvme_tcp_stop_queue(ctrl, 0);
1770 blk_cleanup_queue(ctrl->admin_q);
1771 blk_cleanup_queue(ctrl->fabrics_q);
1772 blk_mq_free_tag_set(ctrl->admin_tagset);
1774 nvme_tcp_free_admin_queue(ctrl);
1777 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1781 error = nvme_tcp_alloc_admin_queue(ctrl);
1786 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1787 if (IS_ERR(ctrl->admin_tagset)) {
1788 error = PTR_ERR(ctrl->admin_tagset);
1789 goto out_free_queue;
1792 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1793 if (IS_ERR(ctrl->fabrics_q)) {
1794 error = PTR_ERR(ctrl->fabrics_q);
1795 goto out_free_tagset;
1798 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1799 if (IS_ERR(ctrl->admin_q)) {
1800 error = PTR_ERR(ctrl->admin_q);
1801 goto out_cleanup_fabrics_q;
1805 error = nvme_tcp_start_queue(ctrl, 0);
1807 goto out_cleanup_queue;
1809 error = nvme_enable_ctrl(ctrl);
1811 goto out_stop_queue;
1813 blk_mq_unquiesce_queue(ctrl->admin_q);
1815 error = nvme_init_identify(ctrl);
1817 goto out_stop_queue;
1822 nvme_tcp_stop_queue(ctrl, 0);
1825 blk_cleanup_queue(ctrl->admin_q);
1826 out_cleanup_fabrics_q:
1828 blk_cleanup_queue(ctrl->fabrics_q);
1831 blk_mq_free_tag_set(ctrl->admin_tagset);
1833 nvme_tcp_free_admin_queue(ctrl);
1837 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1840 blk_mq_quiesce_queue(ctrl->admin_q);
1841 nvme_tcp_stop_queue(ctrl, 0);
1842 if (ctrl->admin_tagset) {
1843 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1844 nvme_cancel_request, ctrl);
1845 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1848 blk_mq_unquiesce_queue(ctrl->admin_q);
1849 nvme_tcp_destroy_admin_queue(ctrl, remove);
1852 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1855 if (ctrl->queue_count <= 1)
1857 nvme_stop_queues(ctrl);
1858 nvme_tcp_stop_io_queues(ctrl);
1860 blk_mq_tagset_busy_iter(ctrl->tagset,
1861 nvme_cancel_request, ctrl);
1862 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1865 nvme_start_queues(ctrl);
1866 nvme_tcp_destroy_io_queues(ctrl, remove);
1869 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1871 /* If we are resetting/deleting then do nothing */
1872 if (ctrl->state != NVME_CTRL_CONNECTING) {
1873 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1874 ctrl->state == NVME_CTRL_LIVE);
1878 if (nvmf_should_reconnect(ctrl)) {
1879 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1880 ctrl->opts->reconnect_delay);
1881 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1882 ctrl->opts->reconnect_delay * HZ);
1884 dev_info(ctrl->device, "Removing controller...\n");
1885 nvme_delete_ctrl(ctrl);
1889 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1891 struct nvmf_ctrl_options *opts = ctrl->opts;
1894 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1899 dev_err(ctrl->device, "icdoff is not supported!\n");
1903 if (opts->queue_size > ctrl->sqsize + 1)
1904 dev_warn(ctrl->device,
1905 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1906 opts->queue_size, ctrl->sqsize + 1);
1908 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1909 dev_warn(ctrl->device,
1910 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1911 ctrl->sqsize + 1, ctrl->maxcmd);
1912 ctrl->sqsize = ctrl->maxcmd - 1;
1915 if (ctrl->queue_count > 1) {
1916 ret = nvme_tcp_configure_io_queues(ctrl, new);
1921 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1923 * state change failure is ok if we're in DELETING state,
1924 * unless we're during creation of a new controller to
1925 * avoid races with teardown flow.
1927 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1933 nvme_start_ctrl(ctrl);
1937 if (ctrl->queue_count > 1)
1938 nvme_tcp_destroy_io_queues(ctrl, new);
1940 nvme_tcp_stop_queue(ctrl, 0);
1941 nvme_tcp_destroy_admin_queue(ctrl, new);
1945 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1947 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1948 struct nvme_tcp_ctrl, connect_work);
1949 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1951 ++ctrl->nr_reconnects;
1953 if (nvme_tcp_setup_ctrl(ctrl, false))
1956 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1957 ctrl->nr_reconnects);
1959 ctrl->nr_reconnects = 0;
1964 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1965 ctrl->nr_reconnects);
1966 nvme_tcp_reconnect_or_remove(ctrl);
1969 static void nvme_tcp_error_recovery_work(struct work_struct *work)
1971 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1972 struct nvme_tcp_ctrl, err_work);
1973 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1975 nvme_stop_keep_alive(ctrl);
1976 nvme_tcp_teardown_io_queues(ctrl, false);
1977 /* unquiesce to fail fast pending requests */
1978 nvme_start_queues(ctrl);
1979 nvme_tcp_teardown_admin_queue(ctrl, false);
1980 blk_mq_unquiesce_queue(ctrl->admin_q);
1982 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1983 /* state change failure is ok if we're in DELETING state */
1984 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1988 nvme_tcp_reconnect_or_remove(ctrl);
1991 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1993 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1994 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1996 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1997 blk_mq_quiesce_queue(ctrl->admin_q);
1999 nvme_shutdown_ctrl(ctrl);
2001 nvme_disable_ctrl(ctrl);
2002 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2005 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2007 nvme_tcp_teardown_ctrl(ctrl, true);
2010 static void nvme_reset_ctrl_work(struct work_struct *work)
2012 struct nvme_ctrl *ctrl =
2013 container_of(work, struct nvme_ctrl, reset_work);
2015 nvme_stop_ctrl(ctrl);
2016 nvme_tcp_teardown_ctrl(ctrl, false);
2018 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2019 /* state change failure is ok if we're in DELETING state */
2020 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2024 if (nvme_tcp_setup_ctrl(ctrl, false))
2030 ++ctrl->nr_reconnects;
2031 nvme_tcp_reconnect_or_remove(ctrl);
2034 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2036 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2038 if (list_empty(&ctrl->list))
2041 mutex_lock(&nvme_tcp_ctrl_mutex);
2042 list_del(&ctrl->list);
2043 mutex_unlock(&nvme_tcp_ctrl_mutex);
2045 nvmf_free_options(nctrl->opts);
2047 kfree(ctrl->queues);
2051 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2053 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2057 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2058 NVME_SGL_FMT_TRANSPORT_A;
2061 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2062 struct nvme_command *c, u32 data_len)
2064 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2066 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2067 sg->length = cpu_to_le32(data_len);
2068 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2071 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2074 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2077 sg->length = cpu_to_le32(data_len);
2078 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2079 NVME_SGL_FMT_TRANSPORT_A;
2082 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2084 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2085 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2086 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2087 struct nvme_command *cmd = &pdu->cmd;
2088 u8 hdgst = nvme_tcp_hdgst_len(queue);
2090 memset(pdu, 0, sizeof(*pdu));
2091 pdu->hdr.type = nvme_tcp_cmd;
2092 if (queue->hdr_digest)
2093 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2094 pdu->hdr.hlen = sizeof(*pdu);
2095 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2097 cmd->common.opcode = nvme_admin_async_event;
2098 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2099 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2100 nvme_tcp_set_sg_null(cmd);
2102 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2103 ctrl->async_req.offset = 0;
2104 ctrl->async_req.curr_bio = NULL;
2105 ctrl->async_req.data_len = 0;
2107 nvme_tcp_queue_request(&ctrl->async_req, true);
2110 static enum blk_eh_timer_return
2111 nvme_tcp_timeout(struct request *rq, bool reserved)
2113 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2114 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2115 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2118 * Restart the timer if a controller reset is already scheduled. Any
2119 * timed out commands would be handled before entering the connecting
2122 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2123 return BLK_EH_RESET_TIMER;
2125 dev_warn(ctrl->ctrl.device,
2126 "queue %d: timeout request %#x type %d\n",
2127 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2129 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2131 * Teardown immediately if controller times out while starting
2132 * or we are already started error recovery. all outstanding
2133 * requests are completed on shutdown, so we return BLK_EH_DONE.
2135 flush_work(&ctrl->err_work);
2136 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2137 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2141 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2142 nvme_tcp_error_recovery(&ctrl->ctrl);
2144 return BLK_EH_RESET_TIMER;
2147 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2150 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2151 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2152 struct nvme_command *c = &pdu->cmd;
2154 c->common.flags |= NVME_CMD_SGL_METABUF;
2156 if (!blk_rq_nr_phys_segments(rq))
2157 nvme_tcp_set_sg_null(c);
2158 else if (rq_data_dir(rq) == WRITE &&
2159 req->data_len <= nvme_tcp_inline_data_size(queue))
2160 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2162 nvme_tcp_set_sg_host_data(c, req->data_len);
2167 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2170 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2171 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2172 struct nvme_tcp_queue *queue = req->queue;
2173 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2176 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2180 req->state = NVME_TCP_SEND_CMD_PDU;
2185 req->data_len = blk_rq_nr_phys_segments(rq) ?
2186 blk_rq_payload_bytes(rq) : 0;
2187 req->curr_bio = rq->bio;
2189 if (rq_data_dir(rq) == WRITE &&
2190 req->data_len <= nvme_tcp_inline_data_size(queue))
2191 req->pdu_len = req->data_len;
2192 else if (req->curr_bio)
2193 nvme_tcp_init_iter(req, READ);
2195 pdu->hdr.type = nvme_tcp_cmd;
2197 if (queue->hdr_digest)
2198 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2199 if (queue->data_digest && req->pdu_len) {
2200 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2201 ddgst = nvme_tcp_ddgst_len(queue);
2203 pdu->hdr.hlen = sizeof(*pdu);
2204 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2206 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2208 ret = nvme_tcp_map_data(queue, rq);
2209 if (unlikely(ret)) {
2210 nvme_cleanup_cmd(rq);
2211 dev_err(queue->ctrl->ctrl.device,
2212 "Failed to map data (%d)\n", ret);
2219 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2220 const struct blk_mq_queue_data *bd)
2222 struct nvme_ns *ns = hctx->queue->queuedata;
2223 struct nvme_tcp_queue *queue = hctx->driver_data;
2224 struct request *rq = bd->rq;
2225 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2226 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2229 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2230 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2232 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2236 blk_mq_start_request(rq);
2238 nvme_tcp_queue_request(req, true);
2243 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2245 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2246 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2248 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2249 /* separate read/write queues */
2250 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2251 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2252 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2253 set->map[HCTX_TYPE_READ].nr_queues =
2254 ctrl->io_queues[HCTX_TYPE_READ];
2255 set->map[HCTX_TYPE_READ].queue_offset =
2256 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2258 /* shared read/write queues */
2259 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2260 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2261 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2262 set->map[HCTX_TYPE_READ].nr_queues =
2263 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2264 set->map[HCTX_TYPE_READ].queue_offset = 0;
2266 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2267 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2269 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2270 /* map dedicated poll queues only if we have queues left */
2271 set->map[HCTX_TYPE_POLL].nr_queues =
2272 ctrl->io_queues[HCTX_TYPE_POLL];
2273 set->map[HCTX_TYPE_POLL].queue_offset =
2274 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2275 ctrl->io_queues[HCTX_TYPE_READ];
2276 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2279 dev_info(ctrl->ctrl.device,
2280 "mapped %d/%d/%d default/read/poll queues.\n",
2281 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2282 ctrl->io_queues[HCTX_TYPE_READ],
2283 ctrl->io_queues[HCTX_TYPE_POLL]);
2288 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2290 struct nvme_tcp_queue *queue = hctx->driver_data;
2291 struct sock *sk = queue->sock->sk;
2293 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2296 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2297 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2298 sk_busy_loop(sk, true);
2299 nvme_tcp_try_recv(queue);
2300 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2301 return queue->nr_cqe;
2304 static struct blk_mq_ops nvme_tcp_mq_ops = {
2305 .queue_rq = nvme_tcp_queue_rq,
2306 .complete = nvme_complete_rq,
2307 .init_request = nvme_tcp_init_request,
2308 .exit_request = nvme_tcp_exit_request,
2309 .init_hctx = nvme_tcp_init_hctx,
2310 .timeout = nvme_tcp_timeout,
2311 .map_queues = nvme_tcp_map_queues,
2312 .poll = nvme_tcp_poll,
2315 static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2316 .queue_rq = nvme_tcp_queue_rq,
2317 .complete = nvme_complete_rq,
2318 .init_request = nvme_tcp_init_request,
2319 .exit_request = nvme_tcp_exit_request,
2320 .init_hctx = nvme_tcp_init_admin_hctx,
2321 .timeout = nvme_tcp_timeout,
2324 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2326 .module = THIS_MODULE,
2327 .flags = NVME_F_FABRICS,
2328 .reg_read32 = nvmf_reg_read32,
2329 .reg_read64 = nvmf_reg_read64,
2330 .reg_write32 = nvmf_reg_write32,
2331 .free_ctrl = nvme_tcp_free_ctrl,
2332 .submit_async_event = nvme_tcp_submit_async_event,
2333 .delete_ctrl = nvme_tcp_delete_ctrl,
2334 .get_address = nvmf_get_address,
2338 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2340 struct nvme_tcp_ctrl *ctrl;
2343 mutex_lock(&nvme_tcp_ctrl_mutex);
2344 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2345 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2349 mutex_unlock(&nvme_tcp_ctrl_mutex);
2354 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2355 struct nvmf_ctrl_options *opts)
2357 struct nvme_tcp_ctrl *ctrl;
2360 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2362 return ERR_PTR(-ENOMEM);
2364 INIT_LIST_HEAD(&ctrl->list);
2365 ctrl->ctrl.opts = opts;
2366 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2367 opts->nr_poll_queues + 1;
2368 ctrl->ctrl.sqsize = opts->queue_size - 1;
2369 ctrl->ctrl.kato = opts->kato;
2371 INIT_DELAYED_WORK(&ctrl->connect_work,
2372 nvme_tcp_reconnect_ctrl_work);
2373 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2374 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2376 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2378 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2379 if (!opts->trsvcid) {
2383 opts->mask |= NVMF_OPT_TRSVCID;
2386 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2387 opts->traddr, opts->trsvcid, &ctrl->addr);
2389 pr_err("malformed address passed: %s:%s\n",
2390 opts->traddr, opts->trsvcid);
2394 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2395 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2396 opts->host_traddr, NULL, &ctrl->src_addr);
2398 pr_err("malformed src address passed: %s\n",
2404 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2409 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2411 if (!ctrl->queues) {
2416 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2418 goto out_kfree_queues;
2420 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2423 goto out_uninit_ctrl;
2426 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2428 goto out_uninit_ctrl;
2430 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2431 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2433 mutex_lock(&nvme_tcp_ctrl_mutex);
2434 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2435 mutex_unlock(&nvme_tcp_ctrl_mutex);
2440 nvme_uninit_ctrl(&ctrl->ctrl);
2441 nvme_put_ctrl(&ctrl->ctrl);
2444 return ERR_PTR(ret);
2446 kfree(ctrl->queues);
2449 return ERR_PTR(ret);
2452 static struct nvmf_transport_ops nvme_tcp_transport = {
2454 .module = THIS_MODULE,
2455 .required_opts = NVMF_OPT_TRADDR,
2456 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2457 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2458 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2459 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2461 .create_ctrl = nvme_tcp_create_ctrl,
2464 static int __init nvme_tcp_init_module(void)
2466 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2467 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2471 nvmf_register_transport(&nvme_tcp_transport);
2475 static void __exit nvme_tcp_cleanup_module(void)
2477 struct nvme_tcp_ctrl *ctrl;
2479 nvmf_unregister_transport(&nvme_tcp_transport);
2481 mutex_lock(&nvme_tcp_ctrl_mutex);
2482 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2483 nvme_delete_ctrl(&ctrl->ctrl);
2484 mutex_unlock(&nvme_tcp_ctrl_mutex);
2485 flush_workqueue(nvme_delete_wq);
2487 destroy_workqueue(nvme_tcp_wq);
2490 module_init(nvme_tcp_init_module);
2491 module_exit(nvme_tcp_cleanup_module);
2493 MODULE_LICENSE("GPL v2");