1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
21 struct nvme_tcp_queue;
23 /* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
29 static int so_priority;
30 module_param(so_priority, int, 0644);
31 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
33 #ifdef CONFIG_DEBUG_LOCK_ALLOC
34 /* lockdep can detect a circular dependency of the form
35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36 * because dependencies are tracked for both nvme-tcp and user contexts. Using
37 * a separate class prevents lockdep from conflating nvme-tcp socket use with
38 * user-space socket API use.
40 static struct lock_class_key nvme_tcp_sk_key[2];
41 static struct lock_class_key nvme_tcp_slock_key[2];
43 static void nvme_tcp_reclassify_socket(struct socket *sock)
45 struct sock *sk = sock->sk;
47 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
50 switch (sk->sk_family) {
52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
53 &nvme_tcp_slock_key[0],
54 "sk_lock-AF_INET-NVME",
58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
59 &nvme_tcp_slock_key[1],
60 "sk_lock-AF_INET6-NVME",
68 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
71 enum nvme_tcp_send_state {
72 NVME_TCP_SEND_CMD_PDU = 0,
73 NVME_TCP_SEND_H2C_PDU,
78 struct nvme_tcp_request {
79 struct nvme_request req;
81 struct nvme_tcp_queue *queue;
89 struct list_head entry;
90 struct llist_node lentry;
99 enum nvme_tcp_send_state state;
102 enum nvme_tcp_queue_flags {
103 NVME_TCP_Q_ALLOCATED = 0,
105 NVME_TCP_Q_POLLING = 2,
108 enum nvme_tcp_recv_state {
109 NVME_TCP_RECV_PDU = 0,
114 struct nvme_tcp_ctrl;
115 struct nvme_tcp_queue {
117 struct work_struct io_work;
120 struct mutex queue_lock;
121 struct mutex send_mutex;
122 struct llist_head req_list;
123 struct list_head send_list;
130 size_t data_remaining;
131 size_t ddgst_remaining;
135 struct nvme_tcp_request *request;
139 size_t cmnd_capsule_len;
140 struct nvme_tcp_ctrl *ctrl;
146 struct ahash_request *rcv_hash;
147 struct ahash_request *snd_hash;
151 struct page_frag_cache pf_cache;
153 void (*state_change)(struct sock *);
154 void (*data_ready)(struct sock *);
155 void (*write_space)(struct sock *);
158 struct nvme_tcp_ctrl {
159 /* read only in the hot path */
160 struct nvme_tcp_queue *queues;
161 struct blk_mq_tag_set tag_set;
163 /* other member variables */
164 struct list_head list;
165 struct blk_mq_tag_set admin_tag_set;
166 struct sockaddr_storage addr;
167 struct sockaddr_storage src_addr;
168 struct nvme_ctrl ctrl;
170 struct work_struct err_work;
171 struct delayed_work connect_work;
172 struct nvme_tcp_request async_req;
173 u32 io_queues[HCTX_MAX_TYPES];
176 static LIST_HEAD(nvme_tcp_ctrl_list);
177 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
178 static struct workqueue_struct *nvme_tcp_wq;
179 static const struct blk_mq_ops nvme_tcp_mq_ops;
180 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
181 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
183 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
185 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
188 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
190 return queue - queue->ctrl->queues;
193 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
195 u32 queue_idx = nvme_tcp_queue_id(queue);
198 return queue->ctrl->admin_tag_set.tags[queue_idx];
199 return queue->ctrl->tag_set.tags[queue_idx - 1];
202 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
204 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
207 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
209 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
212 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
214 if (nvme_is_fabrics(req->req.cmd))
215 return NVME_TCP_ADMIN_CCSZ;
216 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
219 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
221 return req == &req->queue->ctrl->async_req;
224 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
228 if (unlikely(nvme_tcp_async_req(req)))
229 return false; /* async events don't have a request */
231 rq = blk_mq_rq_from_pdu(req);
233 return rq_data_dir(rq) == WRITE && req->data_len &&
234 req->data_len <= nvme_tcp_inline_data_size(req);
237 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
239 return req->iter.bvec->bv_page;
242 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
244 return req->iter.bvec->bv_offset + req->iter.iov_offset;
247 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
249 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
250 req->pdu_len - req->pdu_sent);
253 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
255 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
256 req->pdu_len - req->pdu_sent : 0;
259 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
262 return nvme_tcp_pdu_data_left(req) <= len;
265 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
268 struct request *rq = blk_mq_rq_from_pdu(req);
274 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
275 vec = &rq->special_vec;
277 size = blk_rq_payload_bytes(rq);
280 struct bio *bio = req->curr_bio;
284 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
286 bio_for_each_bvec(bv, bio, bi) {
289 size = bio->bi_iter.bi_size;
290 offset = bio->bi_iter.bi_bvec_done;
293 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
294 req->iter.iov_offset = offset;
297 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
300 req->data_sent += len;
301 req->pdu_sent += len;
302 iov_iter_advance(&req->iter, len);
303 if (!iov_iter_count(&req->iter) &&
304 req->data_sent < req->data_len) {
305 req->curr_bio = req->curr_bio->bi_next;
306 nvme_tcp_init_iter(req, WRITE);
310 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
314 /* drain the send queue as much as we can... */
316 ret = nvme_tcp_try_send(queue);
320 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
322 return !list_empty(&queue->send_list) ||
323 !llist_empty(&queue->req_list) || queue->more_requests;
326 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
327 bool sync, bool last)
329 struct nvme_tcp_queue *queue = req->queue;
332 empty = llist_add(&req->lentry, &queue->req_list) &&
333 list_empty(&queue->send_list) && !queue->request;
336 * if we're the first on the send_list and we can try to send
337 * directly, otherwise queue io_work. Also, only do that if we
338 * are on the same cpu, so we don't introduce contention.
340 if (queue->io_cpu == raw_smp_processor_id() &&
341 sync && empty && mutex_trylock(&queue->send_mutex)) {
342 queue->more_requests = !last;
343 nvme_tcp_send_all(queue);
344 queue->more_requests = false;
345 mutex_unlock(&queue->send_mutex);
348 if (last && nvme_tcp_queue_more(queue))
349 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
352 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
354 struct nvme_tcp_request *req;
355 struct llist_node *node;
357 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
358 req = llist_entry(node, struct nvme_tcp_request, lentry);
359 list_add(&req->entry, &queue->send_list);
363 static inline struct nvme_tcp_request *
364 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
366 struct nvme_tcp_request *req;
368 req = list_first_entry_or_null(&queue->send_list,
369 struct nvme_tcp_request, entry);
371 nvme_tcp_process_req_list(queue);
372 req = list_first_entry_or_null(&queue->send_list,
373 struct nvme_tcp_request, entry);
378 list_del(&req->entry);
382 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
385 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
386 crypto_ahash_final(hash);
389 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
390 struct page *page, off_t off, size_t len)
392 struct scatterlist sg;
394 sg_init_marker(&sg, 1);
395 sg_set_page(&sg, page, len, off);
396 ahash_request_set_crypt(hash, &sg, NULL, len);
397 crypto_ahash_update(hash);
400 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
401 void *pdu, size_t len)
403 struct scatterlist sg;
405 sg_init_one(&sg, pdu, len);
406 ahash_request_set_crypt(hash, &sg, pdu + len, len);
407 crypto_ahash_digest(hash);
410 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
411 void *pdu, size_t pdu_len)
413 struct nvme_tcp_hdr *hdr = pdu;
417 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
418 dev_err(queue->ctrl->ctrl.device,
419 "queue %d: header digest flag is cleared\n",
420 nvme_tcp_queue_id(queue));
424 recv_digest = *(__le32 *)(pdu + hdr->hlen);
425 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
426 exp_digest = *(__le32 *)(pdu + hdr->hlen);
427 if (recv_digest != exp_digest) {
428 dev_err(queue->ctrl->ctrl.device,
429 "header digest error: recv %#x expected %#x\n",
430 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
437 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
439 struct nvme_tcp_hdr *hdr = pdu;
440 u8 digest_len = nvme_tcp_hdgst_len(queue);
443 len = le32_to_cpu(hdr->plen) - hdr->hlen -
444 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
446 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
447 dev_err(queue->ctrl->ctrl.device,
448 "queue %d: data digest flag is cleared\n",
449 nvme_tcp_queue_id(queue));
452 crypto_ahash_init(queue->rcv_hash);
457 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
458 struct request *rq, unsigned int hctx_idx)
460 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
462 page_frag_free(req->pdu);
465 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
466 struct request *rq, unsigned int hctx_idx,
467 unsigned int numa_node)
469 struct nvme_tcp_ctrl *ctrl = set->driver_data;
470 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
471 struct nvme_tcp_cmd_pdu *pdu;
472 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
473 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
474 u8 hdgst = nvme_tcp_hdgst_len(queue);
476 req->pdu = page_frag_alloc(&queue->pf_cache,
477 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
478 GFP_KERNEL | __GFP_ZERO);
484 nvme_req(rq)->ctrl = &ctrl->ctrl;
485 nvme_req(rq)->cmd = &pdu->cmd;
490 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
491 unsigned int hctx_idx)
493 struct nvme_tcp_ctrl *ctrl = data;
494 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
496 hctx->driver_data = queue;
500 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
501 unsigned int hctx_idx)
503 struct nvme_tcp_ctrl *ctrl = data;
504 struct nvme_tcp_queue *queue = &ctrl->queues[0];
506 hctx->driver_data = queue;
510 static enum nvme_tcp_recv_state
511 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
513 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
514 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
518 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
520 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
521 nvme_tcp_hdgst_len(queue);
522 queue->pdu_offset = 0;
523 queue->data_remaining = -1;
524 queue->ddgst_remaining = 0;
527 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
529 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
532 dev_warn(ctrl->device, "starting error recovery\n");
533 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
536 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
537 struct nvme_completion *cqe)
539 struct nvme_tcp_request *req;
542 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
544 dev_err(queue->ctrl->ctrl.device,
545 "got bad cqe.command_id %#x on queue %d\n",
546 cqe->command_id, nvme_tcp_queue_id(queue));
547 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
551 req = blk_mq_rq_to_pdu(rq);
552 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
553 req->status = cqe->status;
555 if (!nvme_try_complete_req(rq, req->status, cqe->result))
556 nvme_complete_rq(rq);
562 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
563 struct nvme_tcp_data_pdu *pdu)
567 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
569 dev_err(queue->ctrl->ctrl.device,
570 "got bad c2hdata.command_id %#x on queue %d\n",
571 pdu->command_id, nvme_tcp_queue_id(queue));
575 if (!blk_rq_payload_bytes(rq)) {
576 dev_err(queue->ctrl->ctrl.device,
577 "queue %d tag %#x unexpected data\n",
578 nvme_tcp_queue_id(queue), rq->tag);
582 queue->data_remaining = le32_to_cpu(pdu->data_length);
584 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
585 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
586 dev_err(queue->ctrl->ctrl.device,
587 "queue %d tag %#x SUCCESS set but not last PDU\n",
588 nvme_tcp_queue_id(queue), rq->tag);
589 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
596 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
597 struct nvme_tcp_rsp_pdu *pdu)
599 struct nvme_completion *cqe = &pdu->cqe;
603 * AEN requests are special as they don't time out and can
604 * survive any kind of queue freeze and often don't respond to
605 * aborts. We don't even bother to allocate a struct request
606 * for them but rather special case them here.
608 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
610 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
613 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
618 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
620 struct nvme_tcp_data_pdu *data = req->pdu;
621 struct nvme_tcp_queue *queue = req->queue;
622 struct request *rq = blk_mq_rq_from_pdu(req);
623 u32 h2cdata_sent = req->pdu_len;
624 u8 hdgst = nvme_tcp_hdgst_len(queue);
625 u8 ddgst = nvme_tcp_ddgst_len(queue);
627 req->state = NVME_TCP_SEND_H2C_PDU;
629 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
631 req->h2cdata_left -= req->pdu_len;
632 req->h2cdata_offset += h2cdata_sent;
634 memset(data, 0, sizeof(*data));
635 data->hdr.type = nvme_tcp_h2c_data;
636 if (!req->h2cdata_left)
637 data->hdr.flags = NVME_TCP_F_DATA_LAST;
638 if (queue->hdr_digest)
639 data->hdr.flags |= NVME_TCP_F_HDGST;
640 if (queue->data_digest)
641 data->hdr.flags |= NVME_TCP_F_DDGST;
642 data->hdr.hlen = sizeof(*data);
643 data->hdr.pdo = data->hdr.hlen + hdgst;
645 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
646 data->ttag = req->ttag;
647 data->command_id = nvme_cid(rq);
648 data->data_offset = cpu_to_le32(req->h2cdata_offset);
649 data->data_length = cpu_to_le32(req->pdu_len);
652 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
653 struct nvme_tcp_r2t_pdu *pdu)
655 struct nvme_tcp_request *req;
657 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
658 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
660 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
662 dev_err(queue->ctrl->ctrl.device,
663 "got bad r2t.command_id %#x on queue %d\n",
664 pdu->command_id, nvme_tcp_queue_id(queue));
667 req = blk_mq_rq_to_pdu(rq);
669 if (unlikely(!r2t_length)) {
670 dev_err(queue->ctrl->ctrl.device,
671 "req %d r2t len is %u, probably a bug...\n",
672 rq->tag, r2t_length);
676 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
677 dev_err(queue->ctrl->ctrl.device,
678 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
679 rq->tag, r2t_length, req->data_len, req->data_sent);
683 if (unlikely(r2t_offset < req->data_sent)) {
684 dev_err(queue->ctrl->ctrl.device,
685 "req %d unexpected r2t offset %u (expected %zu)\n",
686 rq->tag, r2t_offset, req->data_sent);
691 req->h2cdata_left = r2t_length;
692 req->h2cdata_offset = r2t_offset;
693 req->ttag = pdu->ttag;
695 nvme_tcp_setup_h2c_data_pdu(req);
696 nvme_tcp_queue_request(req, false, true);
701 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
702 unsigned int *offset, size_t *len)
704 struct nvme_tcp_hdr *hdr;
705 char *pdu = queue->pdu;
706 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
709 ret = skb_copy_bits(skb, *offset,
710 &pdu[queue->pdu_offset], rcv_len);
714 queue->pdu_remaining -= rcv_len;
715 queue->pdu_offset += rcv_len;
718 if (queue->pdu_remaining)
722 if (queue->hdr_digest) {
723 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
729 if (queue->data_digest) {
730 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
736 case nvme_tcp_c2h_data:
737 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
739 nvme_tcp_init_recv_ctx(queue);
740 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
742 nvme_tcp_init_recv_ctx(queue);
743 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
745 dev_err(queue->ctrl->ctrl.device,
746 "unsupported pdu type (%d)\n", hdr->type);
751 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
753 union nvme_result res = {};
755 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
756 nvme_complete_rq(rq);
759 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
760 unsigned int *offset, size_t *len)
762 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
764 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
765 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
770 recv_len = min_t(size_t, *len, queue->data_remaining);
774 if (!iov_iter_count(&req->iter)) {
775 req->curr_bio = req->curr_bio->bi_next;
778 * If we don`t have any bios it means that controller
779 * sent more data than we requested, hence error
781 if (!req->curr_bio) {
782 dev_err(queue->ctrl->ctrl.device,
783 "queue %d no space in request %#x",
784 nvme_tcp_queue_id(queue), rq->tag);
785 nvme_tcp_init_recv_ctx(queue);
788 nvme_tcp_init_iter(req, READ);
791 /* we can read only from what is left in this bio */
792 recv_len = min_t(size_t, recv_len,
793 iov_iter_count(&req->iter));
795 if (queue->data_digest)
796 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
797 &req->iter, recv_len, queue->rcv_hash);
799 ret = skb_copy_datagram_iter(skb, *offset,
800 &req->iter, recv_len);
802 dev_err(queue->ctrl->ctrl.device,
803 "queue %d failed to copy request %#x data",
804 nvme_tcp_queue_id(queue), rq->tag);
810 queue->data_remaining -= recv_len;
813 if (!queue->data_remaining) {
814 if (queue->data_digest) {
815 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
816 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
818 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
819 nvme_tcp_end_request(rq,
820 le16_to_cpu(req->status));
823 nvme_tcp_init_recv_ctx(queue);
830 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
831 struct sk_buff *skb, unsigned int *offset, size_t *len)
833 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
834 char *ddgst = (char *)&queue->recv_ddgst;
835 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
836 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
839 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
843 queue->ddgst_remaining -= recv_len;
846 if (queue->ddgst_remaining)
849 if (queue->recv_ddgst != queue->exp_ddgst) {
850 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
852 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
854 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
856 dev_err(queue->ctrl->ctrl.device,
857 "data digest error: recv %#x expected %#x\n",
858 le32_to_cpu(queue->recv_ddgst),
859 le32_to_cpu(queue->exp_ddgst));
862 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
863 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
865 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
867 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
871 nvme_tcp_init_recv_ctx(queue);
875 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
876 unsigned int offset, size_t len)
878 struct nvme_tcp_queue *queue = desc->arg.data;
879 size_t consumed = len;
883 switch (nvme_tcp_recv_state(queue)) {
884 case NVME_TCP_RECV_PDU:
885 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
887 case NVME_TCP_RECV_DATA:
888 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
890 case NVME_TCP_RECV_DDGST:
891 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
897 dev_err(queue->ctrl->ctrl.device,
898 "receive failed: %d\n", result);
899 queue->rd_enabled = false;
900 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
908 static void nvme_tcp_data_ready(struct sock *sk)
910 struct nvme_tcp_queue *queue;
912 read_lock_bh(&sk->sk_callback_lock);
913 queue = sk->sk_user_data;
914 if (likely(queue && queue->rd_enabled) &&
915 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
916 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
917 read_unlock_bh(&sk->sk_callback_lock);
920 static void nvme_tcp_write_space(struct sock *sk)
922 struct nvme_tcp_queue *queue;
924 read_lock_bh(&sk->sk_callback_lock);
925 queue = sk->sk_user_data;
926 if (likely(queue && sk_stream_is_writeable(sk))) {
927 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
928 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
930 read_unlock_bh(&sk->sk_callback_lock);
933 static void nvme_tcp_state_change(struct sock *sk)
935 struct nvme_tcp_queue *queue;
937 read_lock_bh(&sk->sk_callback_lock);
938 queue = sk->sk_user_data;
942 switch (sk->sk_state) {
948 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
951 dev_info(queue->ctrl->ctrl.device,
952 "queue %d socket state %d\n",
953 nvme_tcp_queue_id(queue), sk->sk_state);
956 queue->state_change(sk);
958 read_unlock_bh(&sk->sk_callback_lock);
961 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
963 queue->request = NULL;
966 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
968 if (nvme_tcp_async_req(req)) {
969 union nvme_result res = {};
971 nvme_complete_async_event(&req->queue->ctrl->ctrl,
972 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
974 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
975 NVME_SC_HOST_PATH_ERROR);
979 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
981 struct nvme_tcp_queue *queue = req->queue;
982 int req_data_len = req->data_len;
983 u32 h2cdata_left = req->h2cdata_left;
986 struct page *page = nvme_tcp_req_cur_page(req);
987 size_t offset = nvme_tcp_req_cur_offset(req);
988 size_t len = nvme_tcp_req_cur_length(req);
989 bool last = nvme_tcp_pdu_last_send(req, len);
990 int req_data_sent = req->data_sent;
991 int ret, flags = MSG_DONTWAIT;
993 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
996 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
998 if (sendpage_ok(page)) {
999 ret = kernel_sendpage(queue->sock, page, offset, len,
1002 ret = sock_no_sendpage(queue->sock, page, offset, len,
1008 if (queue->data_digest)
1009 nvme_tcp_ddgst_update(queue->snd_hash, page,
1013 * update the request iterator except for the last payload send
1014 * in the request where we don't want to modify it as we may
1015 * compete with the RX path completing the request.
1017 if (req_data_sent + ret < req_data_len)
1018 nvme_tcp_advance_req(req, ret);
1020 /* fully successful last send in current PDU */
1021 if (last && ret == len) {
1022 if (queue->data_digest) {
1023 nvme_tcp_ddgst_final(queue->snd_hash,
1025 req->state = NVME_TCP_SEND_DDGST;
1029 nvme_tcp_setup_h2c_data_pdu(req);
1031 nvme_tcp_done_send_req(queue);
1039 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1041 struct nvme_tcp_queue *queue = req->queue;
1042 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1043 bool inline_data = nvme_tcp_has_inline_data(req);
1044 u8 hdgst = nvme_tcp_hdgst_len(queue);
1045 int len = sizeof(*pdu) + hdgst - req->offset;
1046 int flags = MSG_DONTWAIT;
1049 if (inline_data || nvme_tcp_queue_more(queue))
1050 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1054 if (queue->hdr_digest && !req->offset)
1055 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1057 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1058 offset_in_page(pdu) + req->offset, len, flags);
1059 if (unlikely(ret <= 0))
1065 req->state = NVME_TCP_SEND_DATA;
1066 if (queue->data_digest)
1067 crypto_ahash_init(queue->snd_hash);
1069 nvme_tcp_done_send_req(queue);
1078 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1080 struct nvme_tcp_queue *queue = req->queue;
1081 struct nvme_tcp_data_pdu *pdu = req->pdu;
1082 u8 hdgst = nvme_tcp_hdgst_len(queue);
1083 int len = sizeof(*pdu) - req->offset + hdgst;
1086 if (queue->hdr_digest && !req->offset)
1087 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1089 if (!req->h2cdata_left)
1090 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1091 offset_in_page(pdu) + req->offset, len,
1092 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1094 ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
1095 offset_in_page(pdu) + req->offset, len,
1096 MSG_DONTWAIT | MSG_MORE);
1097 if (unlikely(ret <= 0))
1102 req->state = NVME_TCP_SEND_DATA;
1103 if (queue->data_digest)
1104 crypto_ahash_init(queue->snd_hash);
1112 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1114 struct nvme_tcp_queue *queue = req->queue;
1115 size_t offset = req->offset;
1116 u32 h2cdata_left = req->h2cdata_left;
1118 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1120 .iov_base = (u8 *)&req->ddgst + req->offset,
1121 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1124 if (nvme_tcp_queue_more(queue))
1125 msg.msg_flags |= MSG_MORE;
1127 msg.msg_flags |= MSG_EOR;
1129 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1130 if (unlikely(ret <= 0))
1133 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1135 nvme_tcp_setup_h2c_data_pdu(req);
1137 nvme_tcp_done_send_req(queue);
1145 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1147 struct nvme_tcp_request *req;
1150 if (!queue->request) {
1151 queue->request = nvme_tcp_fetch_request(queue);
1152 if (!queue->request)
1155 req = queue->request;
1157 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1158 ret = nvme_tcp_try_send_cmd_pdu(req);
1161 if (!nvme_tcp_has_inline_data(req))
1165 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1166 ret = nvme_tcp_try_send_data_pdu(req);
1171 if (req->state == NVME_TCP_SEND_DATA) {
1172 ret = nvme_tcp_try_send_data(req);
1177 if (req->state == NVME_TCP_SEND_DDGST)
1178 ret = nvme_tcp_try_send_ddgst(req);
1180 if (ret == -EAGAIN) {
1182 } else if (ret < 0) {
1183 dev_err(queue->ctrl->ctrl.device,
1184 "failed to send request %d\n", ret);
1185 nvme_tcp_fail_request(queue->request);
1186 nvme_tcp_done_send_req(queue);
1191 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1193 struct socket *sock = queue->sock;
1194 struct sock *sk = sock->sk;
1195 read_descriptor_t rd_desc;
1198 rd_desc.arg.data = queue;
1202 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1207 static void nvme_tcp_io_work(struct work_struct *w)
1209 struct nvme_tcp_queue *queue =
1210 container_of(w, struct nvme_tcp_queue, io_work);
1211 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1214 bool pending = false;
1217 if (mutex_trylock(&queue->send_mutex)) {
1218 result = nvme_tcp_try_send(queue);
1219 mutex_unlock(&queue->send_mutex);
1222 else if (unlikely(result < 0))
1226 result = nvme_tcp_try_recv(queue);
1229 else if (unlikely(result < 0))
1235 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1237 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1240 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1242 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1244 ahash_request_free(queue->rcv_hash);
1245 ahash_request_free(queue->snd_hash);
1246 crypto_free_ahash(tfm);
1249 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1251 struct crypto_ahash *tfm;
1253 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1255 return PTR_ERR(tfm);
1257 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1258 if (!queue->snd_hash)
1260 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1262 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1263 if (!queue->rcv_hash)
1265 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1269 ahash_request_free(queue->snd_hash);
1271 crypto_free_ahash(tfm);
1275 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1277 struct nvme_tcp_request *async = &ctrl->async_req;
1279 page_frag_free(async->pdu);
1282 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1284 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1285 struct nvme_tcp_request *async = &ctrl->async_req;
1286 u8 hdgst = nvme_tcp_hdgst_len(queue);
1288 async->pdu = page_frag_alloc(&queue->pf_cache,
1289 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1290 GFP_KERNEL | __GFP_ZERO);
1294 async->queue = &ctrl->queues[0];
1298 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1301 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1302 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1304 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1307 if (queue->hdr_digest || queue->data_digest)
1308 nvme_tcp_free_crypto(queue);
1310 if (queue->pf_cache.va) {
1311 page = virt_to_head_page(queue->pf_cache.va);
1312 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1313 queue->pf_cache.va = NULL;
1315 sock_release(queue->sock);
1317 mutex_destroy(&queue->send_mutex);
1318 mutex_destroy(&queue->queue_lock);
1321 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1323 struct nvme_tcp_icreq_pdu *icreq;
1324 struct nvme_tcp_icresp_pdu *icresp;
1325 struct msghdr msg = {};
1327 bool ctrl_hdgst, ctrl_ddgst;
1331 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1335 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1341 icreq->hdr.type = nvme_tcp_icreq;
1342 icreq->hdr.hlen = sizeof(*icreq);
1344 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1345 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1346 icreq->maxr2t = 0; /* single inflight r2t supported */
1347 icreq->hpda = 0; /* no alignment constraint */
1348 if (queue->hdr_digest)
1349 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1350 if (queue->data_digest)
1351 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1353 iov.iov_base = icreq;
1354 iov.iov_len = sizeof(*icreq);
1355 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1359 memset(&msg, 0, sizeof(msg));
1360 iov.iov_base = icresp;
1361 iov.iov_len = sizeof(*icresp);
1362 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1363 iov.iov_len, msg.msg_flags);
1368 if (icresp->hdr.type != nvme_tcp_icresp) {
1369 pr_err("queue %d: bad type returned %d\n",
1370 nvme_tcp_queue_id(queue), icresp->hdr.type);
1374 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1375 pr_err("queue %d: bad pdu length returned %d\n",
1376 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1380 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1381 pr_err("queue %d: bad pfv returned %d\n",
1382 nvme_tcp_queue_id(queue), icresp->pfv);
1386 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1387 if ((queue->data_digest && !ctrl_ddgst) ||
1388 (!queue->data_digest && ctrl_ddgst)) {
1389 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1390 nvme_tcp_queue_id(queue),
1391 queue->data_digest ? "enabled" : "disabled",
1392 ctrl_ddgst ? "enabled" : "disabled");
1396 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1397 if ((queue->hdr_digest && !ctrl_hdgst) ||
1398 (!queue->hdr_digest && ctrl_hdgst)) {
1399 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1400 nvme_tcp_queue_id(queue),
1401 queue->hdr_digest ? "enabled" : "disabled",
1402 ctrl_hdgst ? "enabled" : "disabled");
1406 if (icresp->cpda != 0) {
1407 pr_err("queue %d: unsupported cpda returned %d\n",
1408 nvme_tcp_queue_id(queue), icresp->cpda);
1412 maxh2cdata = le32_to_cpu(icresp->maxdata);
1413 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1414 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1415 nvme_tcp_queue_id(queue), maxh2cdata);
1418 queue->maxh2cdata = maxh2cdata;
1428 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1430 return nvme_tcp_queue_id(queue) == 0;
1433 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1435 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1436 int qid = nvme_tcp_queue_id(queue);
1438 return !nvme_tcp_admin_queue(queue) &&
1439 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1442 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1444 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1445 int qid = nvme_tcp_queue_id(queue);
1447 return !nvme_tcp_admin_queue(queue) &&
1448 !nvme_tcp_default_queue(queue) &&
1449 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1450 ctrl->io_queues[HCTX_TYPE_READ];
1453 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1455 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1456 int qid = nvme_tcp_queue_id(queue);
1458 return !nvme_tcp_admin_queue(queue) &&
1459 !nvme_tcp_default_queue(queue) &&
1460 !nvme_tcp_read_queue(queue) &&
1461 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1462 ctrl->io_queues[HCTX_TYPE_READ] +
1463 ctrl->io_queues[HCTX_TYPE_POLL];
1466 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1468 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1469 int qid = nvme_tcp_queue_id(queue);
1472 if (nvme_tcp_default_queue(queue))
1474 else if (nvme_tcp_read_queue(queue))
1475 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1476 else if (nvme_tcp_poll_queue(queue))
1477 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1478 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1479 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1482 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1483 int qid, size_t queue_size)
1485 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1486 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1487 int ret, rcv_pdu_size;
1489 mutex_init(&queue->queue_lock);
1491 init_llist_head(&queue->req_list);
1492 INIT_LIST_HEAD(&queue->send_list);
1493 mutex_init(&queue->send_mutex);
1494 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1495 queue->queue_size = queue_size;
1498 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1500 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1501 NVME_TCP_ADMIN_CCSZ;
1503 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1504 IPPROTO_TCP, &queue->sock);
1506 dev_err(nctrl->device,
1507 "failed to create socket: %d\n", ret);
1508 goto err_destroy_mutex;
1511 nvme_tcp_reclassify_socket(queue->sock);
1513 /* Single syn retry */
1514 tcp_sock_set_syncnt(queue->sock->sk, 1);
1516 /* Set TCP no delay */
1517 tcp_sock_set_nodelay(queue->sock->sk);
1520 * Cleanup whatever is sitting in the TCP transmit queue on socket
1521 * close. This is done to prevent stale data from being sent should
1522 * the network connection be restored before TCP times out.
1524 sock_no_linger(queue->sock->sk);
1526 if (so_priority > 0)
1527 sock_set_priority(queue->sock->sk, so_priority);
1529 /* Set socket type of service */
1530 if (nctrl->opts->tos >= 0)
1531 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1533 /* Set 10 seconds timeout for icresp recvmsg */
1534 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1536 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1537 nvme_tcp_set_queue_io_cpu(queue);
1538 queue->request = NULL;
1539 queue->data_remaining = 0;
1540 queue->ddgst_remaining = 0;
1541 queue->pdu_remaining = 0;
1542 queue->pdu_offset = 0;
1543 sk_set_memalloc(queue->sock->sk);
1545 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1546 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1547 sizeof(ctrl->src_addr));
1549 dev_err(nctrl->device,
1550 "failed to bind queue %d socket %d\n",
1556 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1557 char *iface = nctrl->opts->host_iface;
1558 sockptr_t optval = KERNEL_SOCKPTR(iface);
1560 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1561 optval, strlen(iface));
1563 dev_err(nctrl->device,
1564 "failed to bind to interface %s queue %d err %d\n",
1570 queue->hdr_digest = nctrl->opts->hdr_digest;
1571 queue->data_digest = nctrl->opts->data_digest;
1572 if (queue->hdr_digest || queue->data_digest) {
1573 ret = nvme_tcp_alloc_crypto(queue);
1575 dev_err(nctrl->device,
1576 "failed to allocate queue %d crypto\n", qid);
1581 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1582 nvme_tcp_hdgst_len(queue);
1583 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1589 dev_dbg(nctrl->device, "connecting queue %d\n",
1590 nvme_tcp_queue_id(queue));
1592 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1593 sizeof(ctrl->addr), 0);
1595 dev_err(nctrl->device,
1596 "failed to connect socket: %d\n", ret);
1600 ret = nvme_tcp_init_connection(queue);
1602 goto err_init_connect;
1604 queue->rd_enabled = true;
1605 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1606 nvme_tcp_init_recv_ctx(queue);
1608 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1609 queue->sock->sk->sk_user_data = queue;
1610 queue->state_change = queue->sock->sk->sk_state_change;
1611 queue->data_ready = queue->sock->sk->sk_data_ready;
1612 queue->write_space = queue->sock->sk->sk_write_space;
1613 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1614 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1615 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1616 #ifdef CONFIG_NET_RX_BUSY_POLL
1617 queue->sock->sk->sk_ll_usec = 1;
1619 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1624 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1628 if (queue->hdr_digest || queue->data_digest)
1629 nvme_tcp_free_crypto(queue);
1631 sock_release(queue->sock);
1634 mutex_destroy(&queue->send_mutex);
1635 mutex_destroy(&queue->queue_lock);
1639 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1641 struct socket *sock = queue->sock;
1643 write_lock_bh(&sock->sk->sk_callback_lock);
1644 sock->sk->sk_user_data = NULL;
1645 sock->sk->sk_data_ready = queue->data_ready;
1646 sock->sk->sk_state_change = queue->state_change;
1647 sock->sk->sk_write_space = queue->write_space;
1648 write_unlock_bh(&sock->sk->sk_callback_lock);
1651 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1653 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1654 nvme_tcp_restore_sock_calls(queue);
1655 cancel_work_sync(&queue->io_work);
1658 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1660 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1661 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1663 mutex_lock(&queue->queue_lock);
1664 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1665 __nvme_tcp_stop_queue(queue);
1666 mutex_unlock(&queue->queue_lock);
1669 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1671 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1675 ret = nvmf_connect_io_queue(nctrl, idx);
1677 ret = nvmf_connect_admin_queue(nctrl);
1680 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1682 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1683 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1684 dev_err(nctrl->device,
1685 "failed to connect queue: %d ret=%d\n", idx, ret);
1690 static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
1692 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1693 struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
1696 memset(set, 0, sizeof(*set));
1697 set->ops = &nvme_tcp_admin_mq_ops;
1698 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1699 set->reserved_tags = NVMF_RESERVED_TAGS;
1700 set->numa_node = nctrl->numa_node;
1701 set->flags = BLK_MQ_F_BLOCKING;
1702 set->cmd_size = sizeof(struct nvme_tcp_request);
1703 set->driver_data = ctrl;
1704 set->nr_hw_queues = 1;
1705 set->timeout = NVME_ADMIN_TIMEOUT;
1706 ret = blk_mq_alloc_tag_set(set);
1708 nctrl->admin_tagset = set;
1712 static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
1714 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1715 struct blk_mq_tag_set *set = &ctrl->tag_set;
1718 memset(set, 0, sizeof(*set));
1719 set->ops = &nvme_tcp_mq_ops;
1720 set->queue_depth = nctrl->sqsize + 1;
1721 set->reserved_tags = NVMF_RESERVED_TAGS;
1722 set->numa_node = nctrl->numa_node;
1723 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1724 set->cmd_size = sizeof(struct nvme_tcp_request);
1725 set->driver_data = ctrl;
1726 set->nr_hw_queues = nctrl->queue_count - 1;
1727 set->timeout = NVME_IO_TIMEOUT;
1728 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1729 ret = blk_mq_alloc_tag_set(set);
1731 nctrl->tagset = set;
1735 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1737 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1738 cancel_work_sync(&ctrl->async_event_work);
1739 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1740 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1743 nvme_tcp_free_queue(ctrl, 0);
1746 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1750 for (i = 1; i < ctrl->queue_count; i++)
1751 nvme_tcp_free_queue(ctrl, i);
1754 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1758 for (i = 1; i < ctrl->queue_count; i++)
1759 nvme_tcp_stop_queue(ctrl, i);
1762 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1766 for (i = 1; i < ctrl->queue_count; i++) {
1767 ret = nvme_tcp_start_queue(ctrl, i);
1769 goto out_stop_queues;
1775 for (i--; i >= 1; i--)
1776 nvme_tcp_stop_queue(ctrl, i);
1780 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1784 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1788 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1790 goto out_free_queue;
1795 nvme_tcp_free_queue(ctrl, 0);
1799 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1803 for (i = 1; i < ctrl->queue_count; i++) {
1804 ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
1806 goto out_free_queues;
1812 for (i--; i >= 1; i--)
1813 nvme_tcp_free_queue(ctrl, i);
1818 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1820 unsigned int nr_io_queues;
1822 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1823 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1824 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1826 return nr_io_queues;
1829 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1830 unsigned int nr_io_queues)
1832 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1833 struct nvmf_ctrl_options *opts = nctrl->opts;
1835 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1837 * separate read/write queues
1838 * hand out dedicated default queues only after we have
1839 * sufficient read queues.
1841 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1842 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1843 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1844 min(opts->nr_write_queues, nr_io_queues);
1845 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1848 * shared read/write queues
1849 * either no write queues were requested, or we don't have
1850 * sufficient queue count to have dedicated default queues.
1852 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1853 min(opts->nr_io_queues, nr_io_queues);
1854 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1857 if (opts->nr_poll_queues && nr_io_queues) {
1858 /* map dedicated poll queues only if we have queues left */
1859 ctrl->io_queues[HCTX_TYPE_POLL] =
1860 min(opts->nr_poll_queues, nr_io_queues);
1864 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1866 unsigned int nr_io_queues;
1869 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1870 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1874 if (nr_io_queues == 0) {
1875 dev_err(ctrl->device,
1876 "unable to set any I/O queues\n");
1880 ctrl->queue_count = nr_io_queues + 1;
1881 dev_info(ctrl->device,
1882 "creating %d I/O queues.\n", nr_io_queues);
1884 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1886 return __nvme_tcp_alloc_io_queues(ctrl);
1889 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1891 nvme_tcp_stop_io_queues(ctrl);
1893 blk_mq_destroy_queue(ctrl->connect_q);
1894 blk_mq_free_tag_set(ctrl->tagset);
1896 nvme_tcp_free_io_queues(ctrl);
1899 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1903 ret = nvme_tcp_alloc_io_queues(ctrl);
1908 ret = nvme_tcp_alloc_tag_set(ctrl);
1910 goto out_free_io_queues;
1912 ret = nvme_ctrl_init_connect_q(ctrl);
1914 goto out_free_tag_set;
1917 ret = nvme_tcp_start_io_queues(ctrl);
1919 goto out_cleanup_connect_q;
1922 nvme_start_queues(ctrl);
1923 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1925 * If we timed out waiting for freeze we are likely to
1926 * be stuck. Fail the controller initialization just
1930 goto out_wait_freeze_timed_out;
1932 blk_mq_update_nr_hw_queues(ctrl->tagset,
1933 ctrl->queue_count - 1);
1934 nvme_unfreeze(ctrl);
1939 out_wait_freeze_timed_out:
1940 nvme_stop_queues(ctrl);
1941 nvme_sync_io_queues(ctrl);
1942 nvme_tcp_stop_io_queues(ctrl);
1943 out_cleanup_connect_q:
1944 nvme_cancel_tagset(ctrl);
1946 blk_mq_destroy_queue(ctrl->connect_q);
1949 blk_mq_free_tag_set(ctrl->tagset);
1951 nvme_tcp_free_io_queues(ctrl);
1955 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1957 nvme_tcp_stop_queue(ctrl, 0);
1959 blk_mq_destroy_queue(ctrl->admin_q);
1960 blk_mq_destroy_queue(ctrl->fabrics_q);
1961 blk_mq_free_tag_set(ctrl->admin_tagset);
1963 nvme_tcp_free_admin_queue(ctrl);
1966 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1970 error = nvme_tcp_alloc_admin_queue(ctrl);
1975 error = nvme_tcp_alloc_admin_tag_set(ctrl);
1977 goto out_free_queue;
1979 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1980 if (IS_ERR(ctrl->fabrics_q)) {
1981 error = PTR_ERR(ctrl->fabrics_q);
1982 goto out_free_tagset;
1985 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1986 if (IS_ERR(ctrl->admin_q)) {
1987 error = PTR_ERR(ctrl->admin_q);
1988 goto out_cleanup_fabrics_q;
1992 error = nvme_tcp_start_queue(ctrl, 0);
1994 goto out_cleanup_queue;
1996 error = nvme_enable_ctrl(ctrl);
1998 goto out_stop_queue;
2000 nvme_start_admin_queue(ctrl);
2002 error = nvme_init_ctrl_finish(ctrl);
2004 goto out_quiesce_queue;
2009 nvme_stop_admin_queue(ctrl);
2010 blk_sync_queue(ctrl->admin_q);
2012 nvme_tcp_stop_queue(ctrl, 0);
2013 nvme_cancel_admin_tagset(ctrl);
2016 blk_mq_destroy_queue(ctrl->admin_q);
2017 out_cleanup_fabrics_q:
2019 blk_mq_destroy_queue(ctrl->fabrics_q);
2022 blk_mq_free_tag_set(ctrl->admin_tagset);
2024 nvme_tcp_free_admin_queue(ctrl);
2028 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2031 nvme_stop_admin_queue(ctrl);
2032 blk_sync_queue(ctrl->admin_q);
2033 nvme_tcp_stop_queue(ctrl, 0);
2034 nvme_cancel_admin_tagset(ctrl);
2036 nvme_start_admin_queue(ctrl);
2037 nvme_tcp_destroy_admin_queue(ctrl, remove);
2040 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2043 if (ctrl->queue_count <= 1)
2045 nvme_stop_admin_queue(ctrl);
2046 nvme_start_freeze(ctrl);
2047 nvme_stop_queues(ctrl);
2048 nvme_sync_io_queues(ctrl);
2049 nvme_tcp_stop_io_queues(ctrl);
2050 nvme_cancel_tagset(ctrl);
2052 nvme_start_queues(ctrl);
2053 nvme_tcp_destroy_io_queues(ctrl, remove);
2056 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2058 /* If we are resetting/deleting then do nothing */
2059 if (ctrl->state != NVME_CTRL_CONNECTING) {
2060 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2061 ctrl->state == NVME_CTRL_LIVE);
2065 if (nvmf_should_reconnect(ctrl)) {
2066 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2067 ctrl->opts->reconnect_delay);
2068 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2069 ctrl->opts->reconnect_delay * HZ);
2071 dev_info(ctrl->device, "Removing controller...\n");
2072 nvme_delete_ctrl(ctrl);
2076 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2078 struct nvmf_ctrl_options *opts = ctrl->opts;
2081 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2087 dev_err(ctrl->device, "icdoff is not supported!\n");
2091 if (!nvme_ctrl_sgl_supported(ctrl)) {
2093 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2097 if (opts->queue_size > ctrl->sqsize + 1)
2098 dev_warn(ctrl->device,
2099 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2100 opts->queue_size, ctrl->sqsize + 1);
2102 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2103 dev_warn(ctrl->device,
2104 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2105 ctrl->sqsize + 1, ctrl->maxcmd);
2106 ctrl->sqsize = ctrl->maxcmd - 1;
2109 if (ctrl->queue_count > 1) {
2110 ret = nvme_tcp_configure_io_queues(ctrl, new);
2115 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2117 * state change failure is ok if we started ctrl delete,
2118 * unless we're during creation of a new controller to
2119 * avoid races with teardown flow.
2121 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2122 ctrl->state != NVME_CTRL_DELETING_NOIO);
2128 nvme_start_ctrl(ctrl);
2132 if (ctrl->queue_count > 1) {
2133 nvme_stop_queues(ctrl);
2134 nvme_sync_io_queues(ctrl);
2135 nvme_tcp_stop_io_queues(ctrl);
2136 nvme_cancel_tagset(ctrl);
2137 nvme_tcp_destroy_io_queues(ctrl, new);
2140 nvme_stop_admin_queue(ctrl);
2141 blk_sync_queue(ctrl->admin_q);
2142 nvme_tcp_stop_queue(ctrl, 0);
2143 nvme_cancel_admin_tagset(ctrl);
2144 nvme_tcp_destroy_admin_queue(ctrl, new);
2148 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2150 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2151 struct nvme_tcp_ctrl, connect_work);
2152 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2154 ++ctrl->nr_reconnects;
2156 if (nvme_tcp_setup_ctrl(ctrl, false))
2159 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2160 ctrl->nr_reconnects);
2162 ctrl->nr_reconnects = 0;
2167 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2168 ctrl->nr_reconnects);
2169 nvme_tcp_reconnect_or_remove(ctrl);
2172 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2174 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2175 struct nvme_tcp_ctrl, err_work);
2176 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2178 nvme_auth_stop(ctrl);
2179 nvme_stop_keep_alive(ctrl);
2180 flush_work(&ctrl->async_event_work);
2181 nvme_tcp_teardown_io_queues(ctrl, false);
2182 /* unquiesce to fail fast pending requests */
2183 nvme_start_queues(ctrl);
2184 nvme_tcp_teardown_admin_queue(ctrl, false);
2185 nvme_start_admin_queue(ctrl);
2187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2188 /* state change failure is ok if we started ctrl delete */
2189 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2190 ctrl->state != NVME_CTRL_DELETING_NOIO);
2194 nvme_tcp_reconnect_or_remove(ctrl);
2197 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2199 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2200 nvme_stop_admin_queue(ctrl);
2202 nvme_shutdown_ctrl(ctrl);
2204 nvme_disable_ctrl(ctrl);
2205 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2208 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2210 nvme_tcp_teardown_ctrl(ctrl, true);
2213 static void nvme_reset_ctrl_work(struct work_struct *work)
2215 struct nvme_ctrl *ctrl =
2216 container_of(work, struct nvme_ctrl, reset_work);
2218 nvme_stop_ctrl(ctrl);
2219 nvme_tcp_teardown_ctrl(ctrl, false);
2221 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2222 /* state change failure is ok if we started ctrl delete */
2223 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2224 ctrl->state != NVME_CTRL_DELETING_NOIO);
2228 if (nvme_tcp_setup_ctrl(ctrl, false))
2234 ++ctrl->nr_reconnects;
2235 nvme_tcp_reconnect_or_remove(ctrl);
2238 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2240 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2241 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2244 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2246 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2248 if (list_empty(&ctrl->list))
2251 mutex_lock(&nvme_tcp_ctrl_mutex);
2252 list_del(&ctrl->list);
2253 mutex_unlock(&nvme_tcp_ctrl_mutex);
2255 nvmf_free_options(nctrl->opts);
2257 kfree(ctrl->queues);
2261 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2263 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2267 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2268 NVME_SGL_FMT_TRANSPORT_A;
2271 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2272 struct nvme_command *c, u32 data_len)
2274 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2276 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2277 sg->length = cpu_to_le32(data_len);
2278 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2281 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2284 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2287 sg->length = cpu_to_le32(data_len);
2288 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2289 NVME_SGL_FMT_TRANSPORT_A;
2292 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2294 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2295 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2296 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2297 struct nvme_command *cmd = &pdu->cmd;
2298 u8 hdgst = nvme_tcp_hdgst_len(queue);
2300 memset(pdu, 0, sizeof(*pdu));
2301 pdu->hdr.type = nvme_tcp_cmd;
2302 if (queue->hdr_digest)
2303 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2304 pdu->hdr.hlen = sizeof(*pdu);
2305 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2307 cmd->common.opcode = nvme_admin_async_event;
2308 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2309 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2310 nvme_tcp_set_sg_null(cmd);
2312 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2313 ctrl->async_req.offset = 0;
2314 ctrl->async_req.curr_bio = NULL;
2315 ctrl->async_req.data_len = 0;
2317 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2320 static void nvme_tcp_complete_timed_out(struct request *rq)
2322 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2323 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2325 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2326 nvmf_complete_timed_out_request(rq);
2329 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2331 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2332 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2333 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2335 dev_warn(ctrl->device,
2336 "queue %d: timeout request %#x type %d\n",
2337 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2339 if (ctrl->state != NVME_CTRL_LIVE) {
2341 * If we are resetting, connecting or deleting we should
2342 * complete immediately because we may block controller
2343 * teardown or setup sequence
2344 * - ctrl disable/shutdown fabrics requests
2345 * - connect requests
2346 * - initialization admin requests
2347 * - I/O requests that entered after unquiescing and
2348 * the controller stopped responding
2350 * All other requests should be cancelled by the error
2351 * recovery work, so it's fine that we fail it here.
2353 nvme_tcp_complete_timed_out(rq);
2358 * LIVE state should trigger the normal error recovery which will
2359 * handle completing this request.
2361 nvme_tcp_error_recovery(ctrl);
2362 return BLK_EH_RESET_TIMER;
2365 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2368 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2369 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2370 struct nvme_command *c = &pdu->cmd;
2372 c->common.flags |= NVME_CMD_SGL_METABUF;
2374 if (!blk_rq_nr_phys_segments(rq))
2375 nvme_tcp_set_sg_null(c);
2376 else if (rq_data_dir(rq) == WRITE &&
2377 req->data_len <= nvme_tcp_inline_data_size(req))
2378 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2380 nvme_tcp_set_sg_host_data(c, req->data_len);
2385 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2388 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2389 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2390 struct nvme_tcp_queue *queue = req->queue;
2391 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2394 ret = nvme_setup_cmd(ns, rq);
2398 req->state = NVME_TCP_SEND_CMD_PDU;
2399 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2404 req->h2cdata_left = 0;
2405 req->data_len = blk_rq_nr_phys_segments(rq) ?
2406 blk_rq_payload_bytes(rq) : 0;
2407 req->curr_bio = rq->bio;
2408 if (req->curr_bio && req->data_len)
2409 nvme_tcp_init_iter(req, rq_data_dir(rq));
2411 if (rq_data_dir(rq) == WRITE &&
2412 req->data_len <= nvme_tcp_inline_data_size(req))
2413 req->pdu_len = req->data_len;
2415 pdu->hdr.type = nvme_tcp_cmd;
2417 if (queue->hdr_digest)
2418 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2419 if (queue->data_digest && req->pdu_len) {
2420 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2421 ddgst = nvme_tcp_ddgst_len(queue);
2423 pdu->hdr.hlen = sizeof(*pdu);
2424 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2426 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2428 ret = nvme_tcp_map_data(queue, rq);
2429 if (unlikely(ret)) {
2430 nvme_cleanup_cmd(rq);
2431 dev_err(queue->ctrl->ctrl.device,
2432 "Failed to map data (%d)\n", ret);
2439 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2441 struct nvme_tcp_queue *queue = hctx->driver_data;
2443 if (!llist_empty(&queue->req_list))
2444 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2447 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2448 const struct blk_mq_queue_data *bd)
2450 struct nvme_ns *ns = hctx->queue->queuedata;
2451 struct nvme_tcp_queue *queue = hctx->driver_data;
2452 struct request *rq = bd->rq;
2453 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2454 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2457 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2458 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2460 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2464 blk_mq_start_request(rq);
2466 nvme_tcp_queue_request(req, true, bd->last);
2471 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2473 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2474 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2476 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2477 /* separate read/write queues */
2478 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2479 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2480 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2481 set->map[HCTX_TYPE_READ].nr_queues =
2482 ctrl->io_queues[HCTX_TYPE_READ];
2483 set->map[HCTX_TYPE_READ].queue_offset =
2484 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2486 /* shared read/write queues */
2487 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2488 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2489 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2490 set->map[HCTX_TYPE_READ].nr_queues =
2491 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2492 set->map[HCTX_TYPE_READ].queue_offset = 0;
2494 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2495 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2497 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2498 /* map dedicated poll queues only if we have queues left */
2499 set->map[HCTX_TYPE_POLL].nr_queues =
2500 ctrl->io_queues[HCTX_TYPE_POLL];
2501 set->map[HCTX_TYPE_POLL].queue_offset =
2502 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2503 ctrl->io_queues[HCTX_TYPE_READ];
2504 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2507 dev_info(ctrl->ctrl.device,
2508 "mapped %d/%d/%d default/read/poll queues.\n",
2509 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2510 ctrl->io_queues[HCTX_TYPE_READ],
2511 ctrl->io_queues[HCTX_TYPE_POLL]);
2516 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2518 struct nvme_tcp_queue *queue = hctx->driver_data;
2519 struct sock *sk = queue->sock->sk;
2521 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2524 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2525 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2526 sk_busy_loop(sk, true);
2527 nvme_tcp_try_recv(queue);
2528 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2529 return queue->nr_cqe;
2532 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2533 .queue_rq = nvme_tcp_queue_rq,
2534 .commit_rqs = nvme_tcp_commit_rqs,
2535 .complete = nvme_complete_rq,
2536 .init_request = nvme_tcp_init_request,
2537 .exit_request = nvme_tcp_exit_request,
2538 .init_hctx = nvme_tcp_init_hctx,
2539 .timeout = nvme_tcp_timeout,
2540 .map_queues = nvme_tcp_map_queues,
2541 .poll = nvme_tcp_poll,
2544 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2545 .queue_rq = nvme_tcp_queue_rq,
2546 .complete = nvme_complete_rq,
2547 .init_request = nvme_tcp_init_request,
2548 .exit_request = nvme_tcp_exit_request,
2549 .init_hctx = nvme_tcp_init_admin_hctx,
2550 .timeout = nvme_tcp_timeout,
2553 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2555 .module = THIS_MODULE,
2556 .flags = NVME_F_FABRICS,
2557 .reg_read32 = nvmf_reg_read32,
2558 .reg_read64 = nvmf_reg_read64,
2559 .reg_write32 = nvmf_reg_write32,
2560 .free_ctrl = nvme_tcp_free_ctrl,
2561 .submit_async_event = nvme_tcp_submit_async_event,
2562 .delete_ctrl = nvme_tcp_delete_ctrl,
2563 .get_address = nvmf_get_address,
2564 .stop_ctrl = nvme_tcp_stop_ctrl,
2568 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2570 struct nvme_tcp_ctrl *ctrl;
2573 mutex_lock(&nvme_tcp_ctrl_mutex);
2574 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2575 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2579 mutex_unlock(&nvme_tcp_ctrl_mutex);
2584 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2585 struct nvmf_ctrl_options *opts)
2587 struct nvme_tcp_ctrl *ctrl;
2590 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2592 return ERR_PTR(-ENOMEM);
2594 INIT_LIST_HEAD(&ctrl->list);
2595 ctrl->ctrl.opts = opts;
2596 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2597 opts->nr_poll_queues + 1;
2598 ctrl->ctrl.sqsize = opts->queue_size - 1;
2599 ctrl->ctrl.kato = opts->kato;
2601 INIT_DELAYED_WORK(&ctrl->connect_work,
2602 nvme_tcp_reconnect_ctrl_work);
2603 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2604 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2606 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2608 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2609 if (!opts->trsvcid) {
2613 opts->mask |= NVMF_OPT_TRSVCID;
2616 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2617 opts->traddr, opts->trsvcid, &ctrl->addr);
2619 pr_err("malformed address passed: %s:%s\n",
2620 opts->traddr, opts->trsvcid);
2624 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2625 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2626 opts->host_traddr, NULL, &ctrl->src_addr);
2628 pr_err("malformed src address passed: %s\n",
2634 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2635 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2636 pr_err("invalid interface passed: %s\n",
2643 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2648 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2650 if (!ctrl->queues) {
2655 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2657 goto out_kfree_queues;
2659 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2662 goto out_uninit_ctrl;
2665 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2667 goto out_uninit_ctrl;
2669 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2670 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2672 mutex_lock(&nvme_tcp_ctrl_mutex);
2673 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2674 mutex_unlock(&nvme_tcp_ctrl_mutex);
2679 nvme_uninit_ctrl(&ctrl->ctrl);
2680 nvme_put_ctrl(&ctrl->ctrl);
2683 return ERR_PTR(ret);
2685 kfree(ctrl->queues);
2688 return ERR_PTR(ret);
2691 static struct nvmf_transport_ops nvme_tcp_transport = {
2693 .module = THIS_MODULE,
2694 .required_opts = NVMF_OPT_TRADDR,
2695 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2696 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2697 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2698 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2699 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2700 .create_ctrl = nvme_tcp_create_ctrl,
2703 static int __init nvme_tcp_init_module(void)
2705 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2706 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2710 nvmf_register_transport(&nvme_tcp_transport);
2714 static void __exit nvme_tcp_cleanup_module(void)
2716 struct nvme_tcp_ctrl *ctrl;
2718 nvmf_unregister_transport(&nvme_tcp_transport);
2720 mutex_lock(&nvme_tcp_ctrl_mutex);
2721 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2722 nvme_delete_ctrl(&ctrl->ctrl);
2723 mutex_unlock(&nvme_tcp_ctrl_mutex);
2724 flush_workqueue(nvme_delete_wq);
2726 destroy_workqueue(nvme_tcp_wq);
2729 module_init(nvme_tcp_init_module);
2730 module_exit(nvme_tcp_cleanup_module);
2732 MODULE_LICENSE("GPL v2");