1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
20 struct nvme_tcp_queue;
22 enum nvme_tcp_send_state {
23 NVME_TCP_SEND_CMD_PDU = 0,
24 NVME_TCP_SEND_H2C_PDU,
29 struct nvme_tcp_request {
30 struct nvme_request req;
32 struct nvme_tcp_queue *queue;
37 struct list_head entry;
46 enum nvme_tcp_send_state state;
49 enum nvme_tcp_queue_flags {
50 NVME_TCP_Q_ALLOCATED = 0,
54 enum nvme_tcp_recv_state {
55 NVME_TCP_RECV_PDU = 0,
61 struct nvme_tcp_queue {
63 struct work_struct io_work;
67 struct list_head send_list;
73 size_t data_remaining;
74 size_t ddgst_remaining;
77 struct nvme_tcp_request *request;
80 size_t cmnd_capsule_len;
81 struct nvme_tcp_ctrl *ctrl;
87 struct ahash_request *rcv_hash;
88 struct ahash_request *snd_hash;
92 struct page_frag_cache pf_cache;
94 void (*state_change)(struct sock *);
95 void (*data_ready)(struct sock *);
96 void (*write_space)(struct sock *);
99 struct nvme_tcp_ctrl {
100 /* read only in the hot path */
101 struct nvme_tcp_queue *queues;
102 struct blk_mq_tag_set tag_set;
104 /* other member variables */
105 struct list_head list;
106 struct blk_mq_tag_set admin_tag_set;
107 struct sockaddr_storage addr;
108 struct sockaddr_storage src_addr;
109 struct nvme_ctrl ctrl;
111 struct work_struct err_work;
112 struct delayed_work connect_work;
113 struct nvme_tcp_request async_req;
116 static LIST_HEAD(nvme_tcp_ctrl_list);
117 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
118 static struct workqueue_struct *nvme_tcp_wq;
119 static struct blk_mq_ops nvme_tcp_mq_ops;
120 static struct blk_mq_ops nvme_tcp_admin_mq_ops;
122 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
124 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
127 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
129 return queue - queue->ctrl->queues;
132 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
134 u32 queue_idx = nvme_tcp_queue_id(queue);
137 return queue->ctrl->admin_tag_set.tags[queue_idx];
138 return queue->ctrl->tag_set.tags[queue_idx - 1];
141 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
143 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
146 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
148 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
151 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
153 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
156 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
158 return req == &req->queue->ctrl->async_req;
161 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
166 if (unlikely(nvme_tcp_async_req(req)))
167 return false; /* async events don't have a request */
169 rq = blk_mq_rq_from_pdu(req);
170 bytes = blk_rq_payload_bytes(rq);
172 return rq_data_dir(rq) == WRITE && bytes &&
173 bytes <= nvme_tcp_inline_data_size(req->queue);
176 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
178 return req->iter.bvec->bv_page;
181 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
183 return req->iter.bvec->bv_offset + req->iter.iov_offset;
186 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
188 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
189 req->pdu_len - req->pdu_sent);
192 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
194 return req->iter.iov_offset;
197 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
199 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
200 req->pdu_len - req->pdu_sent : 0;
203 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
206 return nvme_tcp_pdu_data_left(req) <= len;
209 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
212 struct request *rq = blk_mq_rq_from_pdu(req);
218 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
219 vec = &rq->special_vec;
221 size = blk_rq_payload_bytes(rq);
224 struct bio *bio = req->curr_bio;
226 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
227 nsegs = bio_segments(bio);
228 size = bio->bi_iter.bi_size;
229 offset = bio->bi_iter.bi_bvec_done;
232 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
233 req->iter.iov_offset = offset;
236 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
239 req->data_sent += len;
240 req->pdu_sent += len;
241 iov_iter_advance(&req->iter, len);
242 if (!iov_iter_count(&req->iter) &&
243 req->data_sent < req->data_len) {
244 req->curr_bio = req->curr_bio->bi_next;
245 nvme_tcp_init_iter(req, WRITE);
249 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
251 struct nvme_tcp_queue *queue = req->queue;
253 spin_lock(&queue->lock);
254 list_add_tail(&req->entry, &queue->send_list);
255 spin_unlock(&queue->lock);
257 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
260 static inline struct nvme_tcp_request *
261 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
263 struct nvme_tcp_request *req;
265 spin_lock(&queue->lock);
266 req = list_first_entry_or_null(&queue->send_list,
267 struct nvme_tcp_request, entry);
269 list_del(&req->entry);
270 spin_unlock(&queue->lock);
275 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
278 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
279 crypto_ahash_final(hash);
282 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
283 struct page *page, off_t off, size_t len)
285 struct scatterlist sg;
287 sg_init_marker(&sg, 1);
288 sg_set_page(&sg, page, len, off);
289 ahash_request_set_crypt(hash, &sg, NULL, len);
290 crypto_ahash_update(hash);
293 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
294 void *pdu, size_t len)
296 struct scatterlist sg;
298 sg_init_one(&sg, pdu, len);
299 ahash_request_set_crypt(hash, &sg, pdu + len, len);
300 crypto_ahash_digest(hash);
303 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
304 void *pdu, size_t pdu_len)
306 struct nvme_tcp_hdr *hdr = pdu;
310 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
311 dev_err(queue->ctrl->ctrl.device,
312 "queue %d: header digest flag is cleared\n",
313 nvme_tcp_queue_id(queue));
317 recv_digest = *(__le32 *)(pdu + hdr->hlen);
318 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
319 exp_digest = *(__le32 *)(pdu + hdr->hlen);
320 if (recv_digest != exp_digest) {
321 dev_err(queue->ctrl->ctrl.device,
322 "header digest error: recv %#x expected %#x\n",
323 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
330 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
332 struct nvme_tcp_hdr *hdr = pdu;
333 u8 digest_len = nvme_tcp_hdgst_len(queue);
336 len = le32_to_cpu(hdr->plen) - hdr->hlen -
337 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
339 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
340 dev_err(queue->ctrl->ctrl.device,
341 "queue %d: data digest flag is cleared\n",
342 nvme_tcp_queue_id(queue));
345 crypto_ahash_init(queue->rcv_hash);
350 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
351 struct request *rq, unsigned int hctx_idx)
353 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
355 page_frag_free(req->pdu);
358 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
359 struct request *rq, unsigned int hctx_idx,
360 unsigned int numa_node)
362 struct nvme_tcp_ctrl *ctrl = set->driver_data;
363 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
364 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
365 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
366 u8 hdgst = nvme_tcp_hdgst_len(queue);
368 req->pdu = page_frag_alloc(&queue->pf_cache,
369 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
370 GFP_KERNEL | __GFP_ZERO);
375 nvme_req(rq)->ctrl = &ctrl->ctrl;
380 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
381 unsigned int hctx_idx)
383 struct nvme_tcp_ctrl *ctrl = data;
384 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
386 hctx->driver_data = queue;
390 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
391 unsigned int hctx_idx)
393 struct nvme_tcp_ctrl *ctrl = data;
394 struct nvme_tcp_queue *queue = &ctrl->queues[0];
396 hctx->driver_data = queue;
400 static enum nvme_tcp_recv_state
401 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
403 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
404 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
408 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
410 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
411 nvme_tcp_hdgst_len(queue);
412 queue->pdu_offset = 0;
413 queue->data_remaining = -1;
414 queue->ddgst_remaining = 0;
417 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
419 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
422 queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
425 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
426 struct nvme_completion *cqe)
430 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
432 dev_err(queue->ctrl->ctrl.device,
433 "queue %d tag 0x%x not found\n",
434 nvme_tcp_queue_id(queue), cqe->command_id);
435 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
439 nvme_end_request(rq, cqe->status, cqe->result);
444 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
445 struct nvme_tcp_data_pdu *pdu)
449 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
451 dev_err(queue->ctrl->ctrl.device,
452 "queue %d tag %#x not found\n",
453 nvme_tcp_queue_id(queue), pdu->command_id);
457 if (!blk_rq_payload_bytes(rq)) {
458 dev_err(queue->ctrl->ctrl.device,
459 "queue %d tag %#x unexpected data\n",
460 nvme_tcp_queue_id(queue), rq->tag);
464 queue->data_remaining = le32_to_cpu(pdu->data_length);
466 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
467 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
468 dev_err(queue->ctrl->ctrl.device,
469 "queue %d tag %#x SUCCESS set but not last PDU\n",
470 nvme_tcp_queue_id(queue), rq->tag);
471 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
479 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
480 struct nvme_tcp_rsp_pdu *pdu)
482 struct nvme_completion *cqe = &pdu->cqe;
486 * AEN requests are special as they don't time out and can
487 * survive any kind of queue freeze and often don't respond to
488 * aborts. We don't even bother to allocate a struct request
489 * for them but rather special case them here.
491 if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
492 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
493 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
496 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
501 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
502 struct nvme_tcp_r2t_pdu *pdu)
504 struct nvme_tcp_data_pdu *data = req->pdu;
505 struct nvme_tcp_queue *queue = req->queue;
506 struct request *rq = blk_mq_rq_from_pdu(req);
507 u8 hdgst = nvme_tcp_hdgst_len(queue);
508 u8 ddgst = nvme_tcp_ddgst_len(queue);
510 req->pdu_len = le32_to_cpu(pdu->r2t_length);
513 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
514 dev_err(queue->ctrl->ctrl.device,
515 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
516 rq->tag, req->pdu_len, req->data_len,
521 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
522 dev_err(queue->ctrl->ctrl.device,
523 "req %d unexpected r2t offset %u (expected %zu)\n",
524 rq->tag, le32_to_cpu(pdu->r2t_offset),
529 memset(data, 0, sizeof(*data));
530 data->hdr.type = nvme_tcp_h2c_data;
531 data->hdr.flags = NVME_TCP_F_DATA_LAST;
532 if (queue->hdr_digest)
533 data->hdr.flags |= NVME_TCP_F_HDGST;
534 if (queue->data_digest)
535 data->hdr.flags |= NVME_TCP_F_DDGST;
536 data->hdr.hlen = sizeof(*data);
537 data->hdr.pdo = data->hdr.hlen + hdgst;
539 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
540 data->ttag = pdu->ttag;
541 data->command_id = rq->tag;
542 data->data_offset = cpu_to_le32(req->data_sent);
543 data->data_length = cpu_to_le32(req->pdu_len);
547 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
548 struct nvme_tcp_r2t_pdu *pdu)
550 struct nvme_tcp_request *req;
554 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
556 dev_err(queue->ctrl->ctrl.device,
557 "queue %d tag %#x not found\n",
558 nvme_tcp_queue_id(queue), pdu->command_id);
561 req = blk_mq_rq_to_pdu(rq);
563 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
567 req->state = NVME_TCP_SEND_H2C_PDU;
570 nvme_tcp_queue_request(req);
575 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
576 unsigned int *offset, size_t *len)
578 struct nvme_tcp_hdr *hdr;
579 char *pdu = queue->pdu;
580 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
583 ret = skb_copy_bits(skb, *offset,
584 &pdu[queue->pdu_offset], rcv_len);
588 queue->pdu_remaining -= rcv_len;
589 queue->pdu_offset += rcv_len;
592 if (queue->pdu_remaining)
596 if (queue->hdr_digest) {
597 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
603 if (queue->data_digest) {
604 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
610 case nvme_tcp_c2h_data:
611 ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
614 nvme_tcp_init_recv_ctx(queue);
615 ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
618 nvme_tcp_init_recv_ctx(queue);
619 ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
622 dev_err(queue->ctrl->ctrl.device,
623 "unsupported pdu type (%d)\n", hdr->type);
630 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
632 union nvme_result res = {};
634 nvme_end_request(rq, cpu_to_le16(status << 1), res);
638 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
639 unsigned int *offset, size_t *len)
641 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
642 struct nvme_tcp_request *req;
645 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
647 dev_err(queue->ctrl->ctrl.device,
648 "queue %d tag %#x not found\n",
649 nvme_tcp_queue_id(queue), pdu->command_id);
652 req = blk_mq_rq_to_pdu(rq);
657 recv_len = min_t(size_t, *len, queue->data_remaining);
661 if (!iov_iter_count(&req->iter)) {
662 req->curr_bio = req->curr_bio->bi_next;
665 * If we don`t have any bios it means that controller
666 * sent more data than we requested, hence error
668 if (!req->curr_bio) {
669 dev_err(queue->ctrl->ctrl.device,
670 "queue %d no space in request %#x",
671 nvme_tcp_queue_id(queue), rq->tag);
672 nvme_tcp_init_recv_ctx(queue);
675 nvme_tcp_init_iter(req, READ);
678 /* we can read only from what is left in this bio */
679 recv_len = min_t(size_t, recv_len,
680 iov_iter_count(&req->iter));
682 if (queue->data_digest)
683 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
684 &req->iter, recv_len, queue->rcv_hash);
686 ret = skb_copy_datagram_iter(skb, *offset,
687 &req->iter, recv_len);
689 dev_err(queue->ctrl->ctrl.device,
690 "queue %d failed to copy request %#x data",
691 nvme_tcp_queue_id(queue), rq->tag);
697 queue->data_remaining -= recv_len;
700 if (!queue->data_remaining) {
701 if (queue->data_digest) {
702 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
703 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
705 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
706 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
707 nvme_tcp_init_recv_ctx(queue);
714 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
715 struct sk_buff *skb, unsigned int *offset, size_t *len)
717 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
718 char *ddgst = (char *)&queue->recv_ddgst;
719 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
720 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
723 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
727 queue->ddgst_remaining -= recv_len;
730 if (queue->ddgst_remaining)
733 if (queue->recv_ddgst != queue->exp_ddgst) {
734 dev_err(queue->ctrl->ctrl.device,
735 "data digest error: recv %#x expected %#x\n",
736 le32_to_cpu(queue->recv_ddgst),
737 le32_to_cpu(queue->exp_ddgst));
741 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
742 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
745 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
748 nvme_tcp_init_recv_ctx(queue);
752 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
753 unsigned int offset, size_t len)
755 struct nvme_tcp_queue *queue = desc->arg.data;
756 size_t consumed = len;
760 switch (nvme_tcp_recv_state(queue)) {
761 case NVME_TCP_RECV_PDU:
762 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
764 case NVME_TCP_RECV_DATA:
765 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
767 case NVME_TCP_RECV_DDGST:
768 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
774 dev_err(queue->ctrl->ctrl.device,
775 "receive failed: %d\n", result);
776 queue->rd_enabled = false;
777 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
785 static void nvme_tcp_data_ready(struct sock *sk)
787 struct nvme_tcp_queue *queue;
789 read_lock(&sk->sk_callback_lock);
790 queue = sk->sk_user_data;
791 if (likely(queue && queue->rd_enabled))
792 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
793 read_unlock(&sk->sk_callback_lock);
796 static void nvme_tcp_write_space(struct sock *sk)
798 struct nvme_tcp_queue *queue;
800 read_lock_bh(&sk->sk_callback_lock);
801 queue = sk->sk_user_data;
802 if (likely(queue && sk_stream_is_writeable(sk))) {
803 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
804 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
806 read_unlock_bh(&sk->sk_callback_lock);
809 static void nvme_tcp_state_change(struct sock *sk)
811 struct nvme_tcp_queue *queue;
813 read_lock(&sk->sk_callback_lock);
814 queue = sk->sk_user_data;
818 switch (sk->sk_state) {
825 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
828 dev_info(queue->ctrl->ctrl.device,
829 "queue %d socket state %d\n",
830 nvme_tcp_queue_id(queue), sk->sk_state);
833 queue->state_change(sk);
835 read_unlock(&sk->sk_callback_lock);
838 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
840 queue->request = NULL;
843 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
845 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
848 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
850 struct nvme_tcp_queue *queue = req->queue;
853 struct page *page = nvme_tcp_req_cur_page(req);
854 size_t offset = nvme_tcp_req_cur_offset(req);
855 size_t len = nvme_tcp_req_cur_length(req);
856 bool last = nvme_tcp_pdu_last_send(req, len);
857 int ret, flags = MSG_DONTWAIT;
859 if (last && !queue->data_digest)
864 ret = kernel_sendpage(queue->sock, page, offset, len, flags);
868 nvme_tcp_advance_req(req, ret);
869 if (queue->data_digest)
870 nvme_tcp_ddgst_update(queue->snd_hash, page,
873 /* fully successful last write*/
874 if (last && ret == len) {
875 if (queue->data_digest) {
876 nvme_tcp_ddgst_final(queue->snd_hash,
878 req->state = NVME_TCP_SEND_DDGST;
881 nvme_tcp_done_send_req(queue);
889 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
891 struct nvme_tcp_queue *queue = req->queue;
892 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
893 bool inline_data = nvme_tcp_has_inline_data(req);
894 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
895 u8 hdgst = nvme_tcp_hdgst_len(queue);
896 int len = sizeof(*pdu) + hdgst - req->offset;
899 if (queue->hdr_digest && !req->offset)
900 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
902 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
903 offset_in_page(pdu) + req->offset, len, flags);
904 if (unlikely(ret <= 0))
910 req->state = NVME_TCP_SEND_DATA;
911 if (queue->data_digest)
912 crypto_ahash_init(queue->snd_hash);
913 nvme_tcp_init_iter(req, WRITE);
915 nvme_tcp_done_send_req(queue);
924 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
926 struct nvme_tcp_queue *queue = req->queue;
927 struct nvme_tcp_data_pdu *pdu = req->pdu;
928 u8 hdgst = nvme_tcp_hdgst_len(queue);
929 int len = sizeof(*pdu) - req->offset + hdgst;
932 if (queue->hdr_digest && !req->offset)
933 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
935 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
936 offset_in_page(pdu) + req->offset, len,
937 MSG_DONTWAIT | MSG_MORE);
938 if (unlikely(ret <= 0))
943 req->state = NVME_TCP_SEND_DATA;
944 if (queue->data_digest)
945 crypto_ahash_init(queue->snd_hash);
947 nvme_tcp_init_iter(req, WRITE);
955 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
957 struct nvme_tcp_queue *queue = req->queue;
959 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
961 .iov_base = &req->ddgst + req->offset,
962 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
965 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
966 if (unlikely(ret <= 0))
969 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
970 nvme_tcp_done_send_req(queue);
978 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
980 struct nvme_tcp_request *req;
983 if (!queue->request) {
984 queue->request = nvme_tcp_fetch_request(queue);
988 req = queue->request;
990 if (req->state == NVME_TCP_SEND_CMD_PDU) {
991 ret = nvme_tcp_try_send_cmd_pdu(req);
994 if (!nvme_tcp_has_inline_data(req))
998 if (req->state == NVME_TCP_SEND_H2C_PDU) {
999 ret = nvme_tcp_try_send_data_pdu(req);
1004 if (req->state == NVME_TCP_SEND_DATA) {
1005 ret = nvme_tcp_try_send_data(req);
1010 if (req->state == NVME_TCP_SEND_DDGST)
1011 ret = nvme_tcp_try_send_ddgst(req);
1018 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1020 struct sock *sk = queue->sock->sk;
1021 read_descriptor_t rd_desc;
1024 rd_desc.arg.data = queue;
1027 consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1032 static void nvme_tcp_io_work(struct work_struct *w)
1034 struct nvme_tcp_queue *queue =
1035 container_of(w, struct nvme_tcp_queue, io_work);
1036 unsigned long start = jiffies + msecs_to_jiffies(1);
1039 bool pending = false;
1042 result = nvme_tcp_try_send(queue);
1045 } else if (unlikely(result < 0)) {
1046 dev_err(queue->ctrl->ctrl.device,
1047 "failed to send request %d\n", result);
1048 if (result != -EPIPE)
1049 nvme_tcp_fail_request(queue->request);
1050 nvme_tcp_done_send_req(queue);
1054 result = nvme_tcp_try_recv(queue);
1061 } while (time_after(jiffies, start)); /* quota is exhausted */
1063 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1066 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1068 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1070 ahash_request_free(queue->rcv_hash);
1071 ahash_request_free(queue->snd_hash);
1072 crypto_free_ahash(tfm);
1075 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1077 struct crypto_ahash *tfm;
1079 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1081 return PTR_ERR(tfm);
1083 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1084 if (!queue->snd_hash)
1086 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1088 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1089 if (!queue->rcv_hash)
1091 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1095 ahash_request_free(queue->snd_hash);
1097 crypto_free_ahash(tfm);
1101 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1103 struct nvme_tcp_request *async = &ctrl->async_req;
1105 page_frag_free(async->pdu);
1108 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1110 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1111 struct nvme_tcp_request *async = &ctrl->async_req;
1112 u8 hdgst = nvme_tcp_hdgst_len(queue);
1114 async->pdu = page_frag_alloc(&queue->pf_cache,
1115 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1116 GFP_KERNEL | __GFP_ZERO);
1120 async->queue = &ctrl->queues[0];
1124 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1126 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1127 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1129 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1132 if (queue->hdr_digest || queue->data_digest)
1133 nvme_tcp_free_crypto(queue);
1135 sock_release(queue->sock);
1139 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1141 struct nvme_tcp_icreq_pdu *icreq;
1142 struct nvme_tcp_icresp_pdu *icresp;
1143 struct msghdr msg = {};
1145 bool ctrl_hdgst, ctrl_ddgst;
1148 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1152 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1158 icreq->hdr.type = nvme_tcp_icreq;
1159 icreq->hdr.hlen = sizeof(*icreq);
1161 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1162 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1163 icreq->maxr2t = 0; /* single inflight r2t supported */
1164 icreq->hpda = 0; /* no alignment constraint */
1165 if (queue->hdr_digest)
1166 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1167 if (queue->data_digest)
1168 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1170 iov.iov_base = icreq;
1171 iov.iov_len = sizeof(*icreq);
1172 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1176 memset(&msg, 0, sizeof(msg));
1177 iov.iov_base = icresp;
1178 iov.iov_len = sizeof(*icresp);
1179 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1180 iov.iov_len, msg.msg_flags);
1185 if (icresp->hdr.type != nvme_tcp_icresp) {
1186 pr_err("queue %d: bad type returned %d\n",
1187 nvme_tcp_queue_id(queue), icresp->hdr.type);
1191 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1192 pr_err("queue %d: bad pdu length returned %d\n",
1193 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1197 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1198 pr_err("queue %d: bad pfv returned %d\n",
1199 nvme_tcp_queue_id(queue), icresp->pfv);
1203 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1204 if ((queue->data_digest && !ctrl_ddgst) ||
1205 (!queue->data_digest && ctrl_ddgst)) {
1206 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1207 nvme_tcp_queue_id(queue),
1208 queue->data_digest ? "enabled" : "disabled",
1209 ctrl_ddgst ? "enabled" : "disabled");
1213 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1214 if ((queue->hdr_digest && !ctrl_hdgst) ||
1215 (!queue->hdr_digest && ctrl_hdgst)) {
1216 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1217 nvme_tcp_queue_id(queue),
1218 queue->hdr_digest ? "enabled" : "disabled",
1219 ctrl_hdgst ? "enabled" : "disabled");
1223 if (icresp->cpda != 0) {
1224 pr_err("queue %d: unsupported cpda returned %d\n",
1225 nvme_tcp_queue_id(queue), icresp->cpda);
1237 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1238 int qid, size_t queue_size)
1240 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1241 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1242 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1243 int ret, opt, rcv_pdu_size, n;
1246 INIT_LIST_HEAD(&queue->send_list);
1247 spin_lock_init(&queue->lock);
1248 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1249 queue->queue_size = queue_size;
1252 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1254 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1255 NVME_TCP_ADMIN_CCSZ;
1257 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1258 IPPROTO_TCP, &queue->sock);
1260 dev_err(ctrl->ctrl.device,
1261 "failed to create socket: %d\n", ret);
1265 /* Single syn retry */
1267 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1268 (char *)&opt, sizeof(opt));
1270 dev_err(ctrl->ctrl.device,
1271 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1275 /* Set TCP no delay */
1277 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1278 TCP_NODELAY, (char *)&opt, sizeof(opt));
1280 dev_err(ctrl->ctrl.device,
1281 "failed to set TCP_NODELAY sock opt %d\n", ret);
1286 * Cleanup whatever is sitting in the TCP transmit queue on socket
1287 * close. This is done to prevent stale data from being sent should
1288 * the network connection be restored before TCP times out.
1290 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1291 (char *)&sol, sizeof(sol));
1293 dev_err(ctrl->ctrl.device,
1294 "failed to set SO_LINGER sock opt %d\n", ret);
1298 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1302 n = (qid - 1) % num_online_cpus();
1303 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1304 queue->request = NULL;
1305 queue->data_remaining = 0;
1306 queue->ddgst_remaining = 0;
1307 queue->pdu_remaining = 0;
1308 queue->pdu_offset = 0;
1309 sk_set_memalloc(queue->sock->sk);
1311 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1312 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1313 sizeof(ctrl->src_addr));
1315 dev_err(ctrl->ctrl.device,
1316 "failed to bind queue %d socket %d\n",
1322 queue->hdr_digest = nctrl->opts->hdr_digest;
1323 queue->data_digest = nctrl->opts->data_digest;
1324 if (queue->hdr_digest || queue->data_digest) {
1325 ret = nvme_tcp_alloc_crypto(queue);
1327 dev_err(ctrl->ctrl.device,
1328 "failed to allocate queue %d crypto\n", qid);
1333 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1334 nvme_tcp_hdgst_len(queue);
1335 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1341 dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1342 nvme_tcp_queue_id(queue));
1344 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1345 sizeof(ctrl->addr), 0);
1347 dev_err(ctrl->ctrl.device,
1348 "failed to connect socket: %d\n", ret);
1352 ret = nvme_tcp_init_connection(queue);
1354 goto err_init_connect;
1356 queue->rd_enabled = true;
1357 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1358 nvme_tcp_init_recv_ctx(queue);
1360 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1361 queue->sock->sk->sk_user_data = queue;
1362 queue->state_change = queue->sock->sk->sk_state_change;
1363 queue->data_ready = queue->sock->sk->sk_data_ready;
1364 queue->write_space = queue->sock->sk->sk_write_space;
1365 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1366 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1367 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1368 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1373 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1377 if (queue->hdr_digest || queue->data_digest)
1378 nvme_tcp_free_crypto(queue);
1380 sock_release(queue->sock);
1385 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1387 struct socket *sock = queue->sock;
1389 write_lock_bh(&sock->sk->sk_callback_lock);
1390 sock->sk->sk_user_data = NULL;
1391 sock->sk->sk_data_ready = queue->data_ready;
1392 sock->sk->sk_state_change = queue->state_change;
1393 sock->sk->sk_write_space = queue->write_space;
1394 write_unlock_bh(&sock->sk->sk_callback_lock);
1397 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1399 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1400 nvme_tcp_restore_sock_calls(queue);
1401 cancel_work_sync(&queue->io_work);
1404 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1406 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1407 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1409 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1412 __nvme_tcp_stop_queue(queue);
1415 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1417 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1421 ret = nvmf_connect_io_queue(nctrl, idx, false);
1423 ret = nvmf_connect_admin_queue(nctrl);
1426 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1428 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1429 dev_err(nctrl->device,
1430 "failed to connect queue: %d ret=%d\n", idx, ret);
1435 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1438 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1439 struct blk_mq_tag_set *set;
1443 set = &ctrl->admin_tag_set;
1444 memset(set, 0, sizeof(*set));
1445 set->ops = &nvme_tcp_admin_mq_ops;
1446 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1447 set->reserved_tags = 2; /* connect + keep-alive */
1448 set->numa_node = NUMA_NO_NODE;
1449 set->cmd_size = sizeof(struct nvme_tcp_request);
1450 set->driver_data = ctrl;
1451 set->nr_hw_queues = 1;
1452 set->timeout = ADMIN_TIMEOUT;
1454 set = &ctrl->tag_set;
1455 memset(set, 0, sizeof(*set));
1456 set->ops = &nvme_tcp_mq_ops;
1457 set->queue_depth = nctrl->sqsize + 1;
1458 set->reserved_tags = 1; /* fabric connect */
1459 set->numa_node = NUMA_NO_NODE;
1460 set->flags = BLK_MQ_F_SHOULD_MERGE;
1461 set->cmd_size = sizeof(struct nvme_tcp_request);
1462 set->driver_data = ctrl;
1463 set->nr_hw_queues = nctrl->queue_count - 1;
1464 set->timeout = NVME_IO_TIMEOUT;
1465 set->nr_maps = 2 /* default + read */;
1468 ret = blk_mq_alloc_tag_set(set);
1470 return ERR_PTR(ret);
1475 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1477 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1478 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1479 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1482 nvme_tcp_free_queue(ctrl, 0);
1485 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1489 for (i = 1; i < ctrl->queue_count; i++)
1490 nvme_tcp_free_queue(ctrl, i);
1493 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1497 for (i = 1; i < ctrl->queue_count; i++)
1498 nvme_tcp_stop_queue(ctrl, i);
1501 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1505 for (i = 1; i < ctrl->queue_count; i++) {
1506 ret = nvme_tcp_start_queue(ctrl, i);
1508 goto out_stop_queues;
1514 for (i--; i >= 1; i--)
1515 nvme_tcp_stop_queue(ctrl, i);
1519 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1523 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1527 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1529 goto out_free_queue;
1534 nvme_tcp_free_queue(ctrl, 0);
1538 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1542 for (i = 1; i < ctrl->queue_count; i++) {
1543 ret = nvme_tcp_alloc_queue(ctrl, i,
1546 goto out_free_queues;
1552 for (i--; i >= 1; i--)
1553 nvme_tcp_free_queue(ctrl, i);
1558 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1560 unsigned int nr_io_queues;
1562 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1563 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1565 return nr_io_queues;
1568 static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
1570 unsigned int nr_io_queues;
1573 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1574 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1578 ctrl->queue_count = nr_io_queues + 1;
1579 if (ctrl->queue_count < 2)
1582 dev_info(ctrl->device,
1583 "creating %d I/O queues.\n", nr_io_queues);
1585 return nvme_tcp_alloc_io_queues(ctrl);
1588 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1590 nvme_tcp_stop_io_queues(ctrl);
1592 blk_cleanup_queue(ctrl->connect_q);
1593 blk_mq_free_tag_set(ctrl->tagset);
1595 nvme_tcp_free_io_queues(ctrl);
1598 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1602 ret = nvme_alloc_io_queues(ctrl);
1607 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1608 if (IS_ERR(ctrl->tagset)) {
1609 ret = PTR_ERR(ctrl->tagset);
1610 goto out_free_io_queues;
1613 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1614 if (IS_ERR(ctrl->connect_q)) {
1615 ret = PTR_ERR(ctrl->connect_q);
1616 goto out_free_tag_set;
1619 blk_mq_update_nr_hw_queues(ctrl->tagset,
1620 ctrl->queue_count - 1);
1623 ret = nvme_tcp_start_io_queues(ctrl);
1625 goto out_cleanup_connect_q;
1629 out_cleanup_connect_q:
1631 blk_cleanup_queue(ctrl->connect_q);
1634 blk_mq_free_tag_set(ctrl->tagset);
1636 nvme_tcp_free_io_queues(ctrl);
1640 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1642 nvme_tcp_stop_queue(ctrl, 0);
1644 blk_cleanup_queue(ctrl->admin_q);
1645 blk_mq_free_tag_set(ctrl->admin_tagset);
1647 nvme_tcp_free_admin_queue(ctrl);
1650 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1654 error = nvme_tcp_alloc_admin_queue(ctrl);
1659 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1660 if (IS_ERR(ctrl->admin_tagset)) {
1661 error = PTR_ERR(ctrl->admin_tagset);
1662 goto out_free_queue;
1665 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1666 if (IS_ERR(ctrl->admin_q)) {
1667 error = PTR_ERR(ctrl->admin_q);
1668 goto out_free_tagset;
1672 error = nvme_tcp_start_queue(ctrl, 0);
1674 goto out_cleanup_queue;
1676 error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
1678 dev_err(ctrl->device,
1679 "prop_get NVME_REG_CAP failed\n");
1680 goto out_stop_queue;
1683 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
1685 error = nvme_enable_ctrl(ctrl, ctrl->cap);
1687 goto out_stop_queue;
1689 error = nvme_init_identify(ctrl);
1691 goto out_stop_queue;
1696 nvme_tcp_stop_queue(ctrl, 0);
1699 blk_cleanup_queue(ctrl->admin_q);
1702 blk_mq_free_tag_set(ctrl->admin_tagset);
1704 nvme_tcp_free_admin_queue(ctrl);
1708 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1711 blk_mq_quiesce_queue(ctrl->admin_q);
1712 nvme_tcp_stop_queue(ctrl, 0);
1713 blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
1714 blk_mq_unquiesce_queue(ctrl->admin_q);
1715 nvme_tcp_destroy_admin_queue(ctrl, remove);
1718 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1721 if (ctrl->queue_count <= 1)
1723 nvme_stop_queues(ctrl);
1724 nvme_tcp_stop_io_queues(ctrl);
1725 blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
1727 nvme_start_queues(ctrl);
1728 nvme_tcp_destroy_io_queues(ctrl, remove);
1731 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1733 /* If we are resetting/deleting then do nothing */
1734 if (ctrl->state != NVME_CTRL_CONNECTING) {
1735 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1736 ctrl->state == NVME_CTRL_LIVE);
1740 if (nvmf_should_reconnect(ctrl)) {
1741 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1742 ctrl->opts->reconnect_delay);
1743 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1744 ctrl->opts->reconnect_delay * HZ);
1746 dev_info(ctrl->device, "Removing controller...\n");
1747 nvme_delete_ctrl(ctrl);
1751 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1753 struct nvmf_ctrl_options *opts = ctrl->opts;
1756 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1761 dev_err(ctrl->device, "icdoff is not supported!\n");
1765 if (opts->queue_size > ctrl->sqsize + 1)
1766 dev_warn(ctrl->device,
1767 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1768 opts->queue_size, ctrl->sqsize + 1);
1770 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1771 dev_warn(ctrl->device,
1772 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1773 ctrl->sqsize + 1, ctrl->maxcmd);
1774 ctrl->sqsize = ctrl->maxcmd - 1;
1777 if (ctrl->queue_count > 1) {
1778 ret = nvme_tcp_configure_io_queues(ctrl, new);
1783 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1784 /* state change failure is ok if we're in DELETING state */
1785 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1790 nvme_start_ctrl(ctrl);
1794 if (ctrl->queue_count > 1)
1795 nvme_tcp_destroy_io_queues(ctrl, new);
1797 nvme_tcp_stop_queue(ctrl, 0);
1798 nvme_tcp_destroy_admin_queue(ctrl, new);
1802 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1804 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1805 struct nvme_tcp_ctrl, connect_work);
1806 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1808 ++ctrl->nr_reconnects;
1810 if (nvme_tcp_setup_ctrl(ctrl, false))
1813 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1814 ctrl->nr_reconnects);
1816 ctrl->nr_reconnects = 0;
1821 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1822 ctrl->nr_reconnects);
1823 nvme_tcp_reconnect_or_remove(ctrl);
1826 static void nvme_tcp_error_recovery_work(struct work_struct *work)
1828 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1829 struct nvme_tcp_ctrl, err_work);
1830 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1832 nvme_stop_keep_alive(ctrl);
1833 nvme_tcp_teardown_io_queues(ctrl, false);
1834 /* unquiesce to fail fast pending requests */
1835 nvme_start_queues(ctrl);
1836 nvme_tcp_teardown_admin_queue(ctrl, false);
1838 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1839 /* state change failure is ok if we're in DELETING state */
1840 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1844 nvme_tcp_reconnect_or_remove(ctrl);
1847 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1849 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1850 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1852 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1854 nvme_shutdown_ctrl(ctrl);
1856 nvme_disable_ctrl(ctrl, ctrl->cap);
1857 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1860 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1862 nvme_tcp_teardown_ctrl(ctrl, true);
1865 static void nvme_reset_ctrl_work(struct work_struct *work)
1867 struct nvme_ctrl *ctrl =
1868 container_of(work, struct nvme_ctrl, reset_work);
1870 nvme_stop_ctrl(ctrl);
1871 nvme_tcp_teardown_ctrl(ctrl, false);
1873 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1874 /* state change failure is ok if we're in DELETING state */
1875 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1879 if (nvme_tcp_setup_ctrl(ctrl, false))
1885 ++ctrl->nr_reconnects;
1886 nvme_tcp_reconnect_or_remove(ctrl);
1889 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1891 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1893 if (list_empty(&ctrl->list))
1896 mutex_lock(&nvme_tcp_ctrl_mutex);
1897 list_del(&ctrl->list);
1898 mutex_unlock(&nvme_tcp_ctrl_mutex);
1900 nvmf_free_options(nctrl->opts);
1902 kfree(ctrl->queues);
1906 static void nvme_tcp_set_sg_null(struct nvme_command *c)
1908 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1912 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1913 NVME_SGL_FMT_TRANSPORT_A;
1916 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1917 struct nvme_command *c, u32 data_len)
1919 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1921 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1922 sg->length = cpu_to_le32(data_len);
1923 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1926 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1929 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1932 sg->length = cpu_to_le32(data_len);
1933 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1934 NVME_SGL_FMT_TRANSPORT_A;
1937 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1939 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1940 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1941 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1942 struct nvme_command *cmd = &pdu->cmd;
1943 u8 hdgst = nvme_tcp_hdgst_len(queue);
1945 memset(pdu, 0, sizeof(*pdu));
1946 pdu->hdr.type = nvme_tcp_cmd;
1947 if (queue->hdr_digest)
1948 pdu->hdr.flags |= NVME_TCP_F_HDGST;
1949 pdu->hdr.hlen = sizeof(*pdu);
1950 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1952 cmd->common.opcode = nvme_admin_async_event;
1953 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1954 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1955 nvme_tcp_set_sg_null(cmd);
1957 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
1958 ctrl->async_req.offset = 0;
1959 ctrl->async_req.curr_bio = NULL;
1960 ctrl->async_req.data_len = 0;
1962 nvme_tcp_queue_request(&ctrl->async_req);
1965 static enum blk_eh_timer_return
1966 nvme_tcp_timeout(struct request *rq, bool reserved)
1968 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1969 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
1970 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1972 dev_warn(ctrl->ctrl.device,
1973 "queue %d: timeout request %#x type %d\n",
1974 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
1976 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1978 * Teardown immediately if controller times out while starting
1979 * or we are already started error recovery. all outstanding
1980 * requests are completed on shutdown, so we return BLK_EH_DONE.
1982 flush_work(&ctrl->err_work);
1983 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
1984 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
1988 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1989 nvme_tcp_error_recovery(&ctrl->ctrl);
1991 return BLK_EH_RESET_TIMER;
1994 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
1997 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1998 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1999 struct nvme_command *c = &pdu->cmd;
2001 c->common.flags |= NVME_CMD_SGL_METABUF;
2003 if (rq_data_dir(rq) == WRITE && req->data_len &&
2004 req->data_len <= nvme_tcp_inline_data_size(queue))
2005 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2007 nvme_tcp_set_sg_host_data(c, req->data_len);
2012 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2015 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2016 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2017 struct nvme_tcp_queue *queue = req->queue;
2018 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2021 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2025 req->state = NVME_TCP_SEND_CMD_PDU;
2030 req->data_len = blk_rq_payload_bytes(rq);
2031 req->curr_bio = rq->bio;
2033 if (rq_data_dir(rq) == WRITE &&
2034 req->data_len <= nvme_tcp_inline_data_size(queue))
2035 req->pdu_len = req->data_len;
2036 else if (req->curr_bio)
2037 nvme_tcp_init_iter(req, READ);
2039 pdu->hdr.type = nvme_tcp_cmd;
2041 if (queue->hdr_digest)
2042 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2043 if (queue->data_digest && req->pdu_len) {
2044 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2045 ddgst = nvme_tcp_ddgst_len(queue);
2047 pdu->hdr.hlen = sizeof(*pdu);
2048 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2050 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2052 ret = nvme_tcp_map_data(queue, rq);
2053 if (unlikely(ret)) {
2054 dev_err(queue->ctrl->ctrl.device,
2055 "Failed to map data (%d)\n", ret);
2062 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2063 const struct blk_mq_queue_data *bd)
2065 struct nvme_ns *ns = hctx->queue->queuedata;
2066 struct nvme_tcp_queue *queue = hctx->driver_data;
2067 struct request *rq = bd->rq;
2068 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2069 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2072 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2073 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2075 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2079 blk_mq_start_request(rq);
2081 nvme_tcp_queue_request(req);
2086 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2088 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2090 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2091 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
2092 if (ctrl->ctrl.opts->nr_write_queues) {
2093 /* separate read/write queues */
2094 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2095 ctrl->ctrl.opts->nr_write_queues;
2096 set->map[HCTX_TYPE_READ].queue_offset =
2097 ctrl->ctrl.opts->nr_write_queues;
2099 /* mixed read/write queues */
2100 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2101 ctrl->ctrl.opts->nr_io_queues;
2102 set->map[HCTX_TYPE_READ].queue_offset = 0;
2104 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2105 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2109 static struct blk_mq_ops nvme_tcp_mq_ops = {
2110 .queue_rq = nvme_tcp_queue_rq,
2111 .complete = nvme_complete_rq,
2112 .init_request = nvme_tcp_init_request,
2113 .exit_request = nvme_tcp_exit_request,
2114 .init_hctx = nvme_tcp_init_hctx,
2115 .timeout = nvme_tcp_timeout,
2116 .map_queues = nvme_tcp_map_queues,
2119 static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2120 .queue_rq = nvme_tcp_queue_rq,
2121 .complete = nvme_complete_rq,
2122 .init_request = nvme_tcp_init_request,
2123 .exit_request = nvme_tcp_exit_request,
2124 .init_hctx = nvme_tcp_init_admin_hctx,
2125 .timeout = nvme_tcp_timeout,
2128 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2130 .module = THIS_MODULE,
2131 .flags = NVME_F_FABRICS,
2132 .reg_read32 = nvmf_reg_read32,
2133 .reg_read64 = nvmf_reg_read64,
2134 .reg_write32 = nvmf_reg_write32,
2135 .free_ctrl = nvme_tcp_free_ctrl,
2136 .submit_async_event = nvme_tcp_submit_async_event,
2137 .delete_ctrl = nvme_tcp_delete_ctrl,
2138 .get_address = nvmf_get_address,
2142 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2144 struct nvme_tcp_ctrl *ctrl;
2147 mutex_lock(&nvme_tcp_ctrl_mutex);
2148 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2149 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2153 mutex_unlock(&nvme_tcp_ctrl_mutex);
2158 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2159 struct nvmf_ctrl_options *opts)
2161 struct nvme_tcp_ctrl *ctrl;
2164 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2166 return ERR_PTR(-ENOMEM);
2168 INIT_LIST_HEAD(&ctrl->list);
2169 ctrl->ctrl.opts = opts;
2170 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
2171 ctrl->ctrl.sqsize = opts->queue_size - 1;
2172 ctrl->ctrl.kato = opts->kato;
2174 INIT_DELAYED_WORK(&ctrl->connect_work,
2175 nvme_tcp_reconnect_ctrl_work);
2176 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2177 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2179 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2181 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2182 if (!opts->trsvcid) {
2186 opts->mask |= NVMF_OPT_TRSVCID;
2189 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2190 opts->traddr, opts->trsvcid, &ctrl->addr);
2192 pr_err("malformed address passed: %s:%s\n",
2193 opts->traddr, opts->trsvcid);
2197 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2198 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2199 opts->host_traddr, NULL, &ctrl->src_addr);
2201 pr_err("malformed src address passed: %s\n",
2207 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2212 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2214 if (!ctrl->queues) {
2219 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2221 goto out_kfree_queues;
2223 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2226 goto out_uninit_ctrl;
2229 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2231 goto out_uninit_ctrl;
2233 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2234 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2236 nvme_get_ctrl(&ctrl->ctrl);
2238 mutex_lock(&nvme_tcp_ctrl_mutex);
2239 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2240 mutex_unlock(&nvme_tcp_ctrl_mutex);
2245 nvme_uninit_ctrl(&ctrl->ctrl);
2246 nvme_put_ctrl(&ctrl->ctrl);
2249 return ERR_PTR(ret);
2251 kfree(ctrl->queues);
2254 return ERR_PTR(ret);
2257 static struct nvmf_transport_ops nvme_tcp_transport = {
2259 .module = THIS_MODULE,
2260 .required_opts = NVMF_OPT_TRADDR,
2261 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2262 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2263 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2264 NVMF_OPT_NR_WRITE_QUEUES,
2265 .create_ctrl = nvme_tcp_create_ctrl,
2268 static int __init nvme_tcp_init_module(void)
2270 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2271 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2275 nvmf_register_transport(&nvme_tcp_transport);
2279 static void __exit nvme_tcp_cleanup_module(void)
2281 struct nvme_tcp_ctrl *ctrl;
2283 nvmf_unregister_transport(&nvme_tcp_transport);
2285 mutex_lock(&nvme_tcp_ctrl_mutex);
2286 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2287 nvme_delete_ctrl(&ctrl->ctrl);
2288 mutex_unlock(&nvme_tcp_ctrl_mutex);
2289 flush_workqueue(nvme_delete_wq);
2291 destroy_workqueue(nvme_tcp_wq);
2294 module_init(nvme_tcp_init_module);
2295 module_exit(nvme_tcp_cleanup_module);
2297 MODULE_LICENSE("GPL v2");