1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
21 struct nvme_tcp_queue;
23 /* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
29 static int so_priority;
30 module_param(so_priority, int, 0644);
31 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
33 enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
40 struct nvme_tcp_request {
41 struct nvme_request req;
43 struct nvme_tcp_queue *queue;
49 struct list_head entry;
50 struct llist_node lentry;
59 enum nvme_tcp_send_state state;
62 enum nvme_tcp_queue_flags {
63 NVME_TCP_Q_ALLOCATED = 0,
65 NVME_TCP_Q_POLLING = 2,
68 enum nvme_tcp_recv_state {
69 NVME_TCP_RECV_PDU = 0,
75 struct nvme_tcp_queue {
77 struct work_struct io_work;
80 struct mutex queue_lock;
81 struct mutex send_mutex;
82 struct llist_head req_list;
83 struct list_head send_list;
90 size_t data_remaining;
91 size_t ddgst_remaining;
95 struct nvme_tcp_request *request;
98 size_t cmnd_capsule_len;
99 struct nvme_tcp_ctrl *ctrl;
105 struct ahash_request *rcv_hash;
106 struct ahash_request *snd_hash;
110 struct page_frag_cache pf_cache;
112 void (*state_change)(struct sock *);
113 void (*data_ready)(struct sock *);
114 void (*write_space)(struct sock *);
117 struct nvme_tcp_ctrl {
118 /* read only in the hot path */
119 struct nvme_tcp_queue *queues;
120 struct blk_mq_tag_set tag_set;
122 /* other member variables */
123 struct list_head list;
124 struct blk_mq_tag_set admin_tag_set;
125 struct sockaddr_storage addr;
126 struct sockaddr_storage src_addr;
127 struct nvme_ctrl ctrl;
129 struct work_struct err_work;
130 struct delayed_work connect_work;
131 struct nvme_tcp_request async_req;
132 u32 io_queues[HCTX_MAX_TYPES];
135 static LIST_HEAD(nvme_tcp_ctrl_list);
136 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
137 static struct workqueue_struct *nvme_tcp_wq;
138 static const struct blk_mq_ops nvme_tcp_mq_ops;
139 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
140 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
142 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
144 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
147 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
149 return queue - queue->ctrl->queues;
152 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
154 u32 queue_idx = nvme_tcp_queue_id(queue);
157 return queue->ctrl->admin_tag_set.tags[queue_idx];
158 return queue->ctrl->tag_set.tags[queue_idx - 1];
161 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
163 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
166 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
168 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
171 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
176 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
178 return req == &req->queue->ctrl->async_req;
181 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
185 if (unlikely(nvme_tcp_async_req(req)))
186 return false; /* async events don't have a request */
188 rq = blk_mq_rq_from_pdu(req);
190 return rq_data_dir(rq) == WRITE && req->data_len &&
191 req->data_len <= nvme_tcp_inline_data_size(req->queue);
194 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
196 return req->iter.bvec->bv_page;
199 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
201 return req->iter.bvec->bv_offset + req->iter.iov_offset;
204 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
206 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
207 req->pdu_len - req->pdu_sent);
210 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
212 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
213 req->pdu_len - req->pdu_sent : 0;
216 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
219 return nvme_tcp_pdu_data_left(req) <= len;
222 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
225 struct request *rq = blk_mq_rq_from_pdu(req);
231 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
232 vec = &rq->special_vec;
234 size = blk_rq_payload_bytes(rq);
237 struct bio *bio = req->curr_bio;
241 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
243 bio_for_each_bvec(bv, bio, bi) {
246 size = bio->bi_iter.bi_size;
247 offset = bio->bi_iter.bi_bvec_done;
250 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
251 req->iter.iov_offset = offset;
254 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
257 req->data_sent += len;
258 req->pdu_sent += len;
259 iov_iter_advance(&req->iter, len);
260 if (!iov_iter_count(&req->iter) &&
261 req->data_sent < req->data_len) {
262 req->curr_bio = req->curr_bio->bi_next;
263 nvme_tcp_init_iter(req, WRITE);
267 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
271 /* drain the send queue as much as we can... */
273 ret = nvme_tcp_try_send(queue);
277 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
279 return !list_empty(&queue->send_list) ||
280 !llist_empty(&queue->req_list) || queue->more_requests;
283 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
284 bool sync, bool last)
286 struct nvme_tcp_queue *queue = req->queue;
289 empty = llist_add(&req->lentry, &queue->req_list) &&
290 list_empty(&queue->send_list) && !queue->request;
293 * if we're the first on the send_list and we can try to send
294 * directly, otherwise queue io_work. Also, only do that if we
295 * are on the same cpu, so we don't introduce contention.
297 if (queue->io_cpu == raw_smp_processor_id() &&
298 sync && empty && mutex_trylock(&queue->send_mutex)) {
299 queue->more_requests = !last;
300 nvme_tcp_send_all(queue);
301 queue->more_requests = false;
302 mutex_unlock(&queue->send_mutex);
305 if (last && nvme_tcp_queue_more(queue))
306 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
309 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
311 struct nvme_tcp_request *req;
312 struct llist_node *node;
314 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
315 req = llist_entry(node, struct nvme_tcp_request, lentry);
316 list_add(&req->entry, &queue->send_list);
320 static inline struct nvme_tcp_request *
321 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
323 struct nvme_tcp_request *req;
325 req = list_first_entry_or_null(&queue->send_list,
326 struct nvme_tcp_request, entry);
328 nvme_tcp_process_req_list(queue);
329 req = list_first_entry_or_null(&queue->send_list,
330 struct nvme_tcp_request, entry);
335 list_del(&req->entry);
339 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
342 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
343 crypto_ahash_final(hash);
346 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
347 struct page *page, off_t off, size_t len)
349 struct scatterlist sg;
351 sg_init_marker(&sg, 1);
352 sg_set_page(&sg, page, len, off);
353 ahash_request_set_crypt(hash, &sg, NULL, len);
354 crypto_ahash_update(hash);
357 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
358 void *pdu, size_t len)
360 struct scatterlist sg;
362 sg_init_one(&sg, pdu, len);
363 ahash_request_set_crypt(hash, &sg, pdu + len, len);
364 crypto_ahash_digest(hash);
367 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
368 void *pdu, size_t pdu_len)
370 struct nvme_tcp_hdr *hdr = pdu;
374 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
375 dev_err(queue->ctrl->ctrl.device,
376 "queue %d: header digest flag is cleared\n",
377 nvme_tcp_queue_id(queue));
381 recv_digest = *(__le32 *)(pdu + hdr->hlen);
382 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
383 exp_digest = *(__le32 *)(pdu + hdr->hlen);
384 if (recv_digest != exp_digest) {
385 dev_err(queue->ctrl->ctrl.device,
386 "header digest error: recv %#x expected %#x\n",
387 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
394 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
396 struct nvme_tcp_hdr *hdr = pdu;
397 u8 digest_len = nvme_tcp_hdgst_len(queue);
400 len = le32_to_cpu(hdr->plen) - hdr->hlen -
401 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
403 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
404 dev_err(queue->ctrl->ctrl.device,
405 "queue %d: data digest flag is cleared\n",
406 nvme_tcp_queue_id(queue));
409 crypto_ahash_init(queue->rcv_hash);
414 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
415 struct request *rq, unsigned int hctx_idx)
417 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
419 page_frag_free(req->pdu);
422 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
423 struct request *rq, unsigned int hctx_idx,
424 unsigned int numa_node)
426 struct nvme_tcp_ctrl *ctrl = set->driver_data;
427 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
428 struct nvme_tcp_cmd_pdu *pdu;
429 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
430 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
431 u8 hdgst = nvme_tcp_hdgst_len(queue);
433 req->pdu = page_frag_alloc(&queue->pf_cache,
434 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
435 GFP_KERNEL | __GFP_ZERO);
441 nvme_req(rq)->ctrl = &ctrl->ctrl;
442 nvme_req(rq)->cmd = &pdu->cmd;
447 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
448 unsigned int hctx_idx)
450 struct nvme_tcp_ctrl *ctrl = data;
451 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
453 hctx->driver_data = queue;
457 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
458 unsigned int hctx_idx)
460 struct nvme_tcp_ctrl *ctrl = data;
461 struct nvme_tcp_queue *queue = &ctrl->queues[0];
463 hctx->driver_data = queue;
467 static enum nvme_tcp_recv_state
468 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
470 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
471 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
475 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
477 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
478 nvme_tcp_hdgst_len(queue);
479 queue->pdu_offset = 0;
480 queue->data_remaining = -1;
481 queue->ddgst_remaining = 0;
484 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
486 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
489 dev_warn(ctrl->device, "starting error recovery\n");
490 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
493 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
494 struct nvme_completion *cqe)
496 struct nvme_tcp_request *req;
499 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
501 dev_err(queue->ctrl->ctrl.device,
502 "got bad cqe.command_id %#x on queue %d\n",
503 cqe->command_id, nvme_tcp_queue_id(queue));
504 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
508 req = blk_mq_rq_to_pdu(rq);
509 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
510 req->status = cqe->status;
512 if (!nvme_try_complete_req(rq, req->status, cqe->result))
513 nvme_complete_rq(rq);
519 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
520 struct nvme_tcp_data_pdu *pdu)
524 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
526 dev_err(queue->ctrl->ctrl.device,
527 "got bad c2hdata.command_id %#x on queue %d\n",
528 pdu->command_id, nvme_tcp_queue_id(queue));
532 if (!blk_rq_payload_bytes(rq)) {
533 dev_err(queue->ctrl->ctrl.device,
534 "queue %d tag %#x unexpected data\n",
535 nvme_tcp_queue_id(queue), rq->tag);
539 queue->data_remaining = le32_to_cpu(pdu->data_length);
541 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
542 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
543 dev_err(queue->ctrl->ctrl.device,
544 "queue %d tag %#x SUCCESS set but not last PDU\n",
545 nvme_tcp_queue_id(queue), rq->tag);
546 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
553 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
554 struct nvme_tcp_rsp_pdu *pdu)
556 struct nvme_completion *cqe = &pdu->cqe;
560 * AEN requests are special as they don't time out and can
561 * survive any kind of queue freeze and often don't respond to
562 * aborts. We don't even bother to allocate a struct request
563 * for them but rather special case them here.
565 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
567 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
570 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
575 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
576 struct nvme_tcp_r2t_pdu *pdu)
578 struct nvme_tcp_data_pdu *data = req->pdu;
579 struct nvme_tcp_queue *queue = req->queue;
580 struct request *rq = blk_mq_rq_from_pdu(req);
581 u8 hdgst = nvme_tcp_hdgst_len(queue);
582 u8 ddgst = nvme_tcp_ddgst_len(queue);
584 req->state = NVME_TCP_SEND_H2C_PDU;
586 req->pdu_len = le32_to_cpu(pdu->r2t_length);
589 memset(data, 0, sizeof(*data));
590 data->hdr.type = nvme_tcp_h2c_data;
591 data->hdr.flags = NVME_TCP_F_DATA_LAST;
592 if (queue->hdr_digest)
593 data->hdr.flags |= NVME_TCP_F_HDGST;
594 if (queue->data_digest)
595 data->hdr.flags |= NVME_TCP_F_DDGST;
596 data->hdr.hlen = sizeof(*data);
597 data->hdr.pdo = data->hdr.hlen + hdgst;
599 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
600 data->ttag = pdu->ttag;
601 data->command_id = nvme_cid(rq);
602 data->data_offset = pdu->r2t_offset;
603 data->data_length = cpu_to_le32(req->pdu_len);
606 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
607 struct nvme_tcp_r2t_pdu *pdu)
609 struct nvme_tcp_request *req;
611 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
613 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
615 dev_err(queue->ctrl->ctrl.device,
616 "got bad r2t.command_id %#x on queue %d\n",
617 pdu->command_id, nvme_tcp_queue_id(queue));
620 req = blk_mq_rq_to_pdu(rq);
622 if (unlikely(!r2t_length)) {
623 dev_err(queue->ctrl->ctrl.device,
624 "req %d r2t len is %u, probably a bug...\n",
625 rq->tag, r2t_length);
629 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
630 dev_err(queue->ctrl->ctrl.device,
631 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
632 rq->tag, r2t_length, req->data_len, req->data_sent);
636 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
637 dev_err(queue->ctrl->ctrl.device,
638 "req %d unexpected r2t offset %u (expected %zu)\n",
639 rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
643 nvme_tcp_setup_h2c_data_pdu(req, pdu);
644 nvme_tcp_queue_request(req, false, true);
649 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
650 unsigned int *offset, size_t *len)
652 struct nvme_tcp_hdr *hdr;
653 char *pdu = queue->pdu;
654 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
657 ret = skb_copy_bits(skb, *offset,
658 &pdu[queue->pdu_offset], rcv_len);
662 queue->pdu_remaining -= rcv_len;
663 queue->pdu_offset += rcv_len;
666 if (queue->pdu_remaining)
670 if (queue->hdr_digest) {
671 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
677 if (queue->data_digest) {
678 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
684 case nvme_tcp_c2h_data:
685 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
687 nvme_tcp_init_recv_ctx(queue);
688 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
690 nvme_tcp_init_recv_ctx(queue);
691 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
693 dev_err(queue->ctrl->ctrl.device,
694 "unsupported pdu type (%d)\n", hdr->type);
699 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
701 union nvme_result res = {};
703 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
704 nvme_complete_rq(rq);
707 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
708 unsigned int *offset, size_t *len)
710 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
712 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
713 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
718 recv_len = min_t(size_t, *len, queue->data_remaining);
722 if (!iov_iter_count(&req->iter)) {
723 req->curr_bio = req->curr_bio->bi_next;
726 * If we don`t have any bios it means that controller
727 * sent more data than we requested, hence error
729 if (!req->curr_bio) {
730 dev_err(queue->ctrl->ctrl.device,
731 "queue %d no space in request %#x",
732 nvme_tcp_queue_id(queue), rq->tag);
733 nvme_tcp_init_recv_ctx(queue);
736 nvme_tcp_init_iter(req, READ);
739 /* we can read only from what is left in this bio */
740 recv_len = min_t(size_t, recv_len,
741 iov_iter_count(&req->iter));
743 if (queue->data_digest)
744 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
745 &req->iter, recv_len, queue->rcv_hash);
747 ret = skb_copy_datagram_iter(skb, *offset,
748 &req->iter, recv_len);
750 dev_err(queue->ctrl->ctrl.device,
751 "queue %d failed to copy request %#x data",
752 nvme_tcp_queue_id(queue), rq->tag);
758 queue->data_remaining -= recv_len;
761 if (!queue->data_remaining) {
762 if (queue->data_digest) {
763 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
764 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
766 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
767 nvme_tcp_end_request(rq,
768 le16_to_cpu(req->status));
771 nvme_tcp_init_recv_ctx(queue);
778 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
779 struct sk_buff *skb, unsigned int *offset, size_t *len)
781 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
782 char *ddgst = (char *)&queue->recv_ddgst;
783 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
784 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
787 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
791 queue->ddgst_remaining -= recv_len;
794 if (queue->ddgst_remaining)
797 if (queue->recv_ddgst != queue->exp_ddgst) {
798 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
800 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
802 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
804 dev_err(queue->ctrl->ctrl.device,
805 "data digest error: recv %#x expected %#x\n",
806 le32_to_cpu(queue->recv_ddgst),
807 le32_to_cpu(queue->exp_ddgst));
810 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
811 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
813 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
815 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
819 nvme_tcp_init_recv_ctx(queue);
823 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
824 unsigned int offset, size_t len)
826 struct nvme_tcp_queue *queue = desc->arg.data;
827 size_t consumed = len;
831 switch (nvme_tcp_recv_state(queue)) {
832 case NVME_TCP_RECV_PDU:
833 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
835 case NVME_TCP_RECV_DATA:
836 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
838 case NVME_TCP_RECV_DDGST:
839 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
845 dev_err(queue->ctrl->ctrl.device,
846 "receive failed: %d\n", result);
847 queue->rd_enabled = false;
848 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
856 static void nvme_tcp_data_ready(struct sock *sk)
858 struct nvme_tcp_queue *queue;
860 read_lock_bh(&sk->sk_callback_lock);
861 queue = sk->sk_user_data;
862 if (likely(queue && queue->rd_enabled) &&
863 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
864 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
865 read_unlock_bh(&sk->sk_callback_lock);
868 static void nvme_tcp_write_space(struct sock *sk)
870 struct nvme_tcp_queue *queue;
872 read_lock_bh(&sk->sk_callback_lock);
873 queue = sk->sk_user_data;
874 if (likely(queue && sk_stream_is_writeable(sk))) {
875 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
876 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
878 read_unlock_bh(&sk->sk_callback_lock);
881 static void nvme_tcp_state_change(struct sock *sk)
883 struct nvme_tcp_queue *queue;
885 read_lock_bh(&sk->sk_callback_lock);
886 queue = sk->sk_user_data;
890 switch (sk->sk_state) {
896 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
899 dev_info(queue->ctrl->ctrl.device,
900 "queue %d socket state %d\n",
901 nvme_tcp_queue_id(queue), sk->sk_state);
904 queue->state_change(sk);
906 read_unlock_bh(&sk->sk_callback_lock);
909 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
911 queue->request = NULL;
914 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
916 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
919 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
921 struct nvme_tcp_queue *queue = req->queue;
922 int req_data_len = req->data_len;
925 struct page *page = nvme_tcp_req_cur_page(req);
926 size_t offset = nvme_tcp_req_cur_offset(req);
927 size_t len = nvme_tcp_req_cur_length(req);
928 bool last = nvme_tcp_pdu_last_send(req, len);
929 int req_data_sent = req->data_sent;
930 int ret, flags = MSG_DONTWAIT;
932 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
935 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
937 if (sendpage_ok(page)) {
938 ret = kernel_sendpage(queue->sock, page, offset, len,
941 ret = sock_no_sendpage(queue->sock, page, offset, len,
947 if (queue->data_digest)
948 nvme_tcp_ddgst_update(queue->snd_hash, page,
952 * update the request iterator except for the last payload send
953 * in the request where we don't want to modify it as we may
954 * compete with the RX path completing the request.
956 if (req_data_sent + ret < req_data_len)
957 nvme_tcp_advance_req(req, ret);
959 /* fully successful last send in current PDU */
960 if (last && ret == len) {
961 if (queue->data_digest) {
962 nvme_tcp_ddgst_final(queue->snd_hash,
964 req->state = NVME_TCP_SEND_DDGST;
967 nvme_tcp_done_send_req(queue);
975 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
977 struct nvme_tcp_queue *queue = req->queue;
978 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
979 bool inline_data = nvme_tcp_has_inline_data(req);
980 u8 hdgst = nvme_tcp_hdgst_len(queue);
981 int len = sizeof(*pdu) + hdgst - req->offset;
982 int flags = MSG_DONTWAIT;
985 if (inline_data || nvme_tcp_queue_more(queue))
986 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
990 if (queue->hdr_digest && !req->offset)
991 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
993 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
994 offset_in_page(pdu) + req->offset, len, flags);
995 if (unlikely(ret <= 0))
1001 req->state = NVME_TCP_SEND_DATA;
1002 if (queue->data_digest)
1003 crypto_ahash_init(queue->snd_hash);
1005 nvme_tcp_done_send_req(queue);
1014 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1016 struct nvme_tcp_queue *queue = req->queue;
1017 struct nvme_tcp_data_pdu *pdu = req->pdu;
1018 u8 hdgst = nvme_tcp_hdgst_len(queue);
1019 int len = sizeof(*pdu) - req->offset + hdgst;
1022 if (queue->hdr_digest && !req->offset)
1023 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1025 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1026 offset_in_page(pdu) + req->offset, len,
1027 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1028 if (unlikely(ret <= 0))
1033 req->state = NVME_TCP_SEND_DATA;
1034 if (queue->data_digest)
1035 crypto_ahash_init(queue->snd_hash);
1043 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1045 struct nvme_tcp_queue *queue = req->queue;
1046 size_t offset = req->offset;
1048 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1050 .iov_base = (u8 *)&req->ddgst + req->offset,
1051 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1054 if (nvme_tcp_queue_more(queue))
1055 msg.msg_flags |= MSG_MORE;
1057 msg.msg_flags |= MSG_EOR;
1059 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1060 if (unlikely(ret <= 0))
1063 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1064 nvme_tcp_done_send_req(queue);
1072 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1074 struct nvme_tcp_request *req;
1077 if (!queue->request) {
1078 queue->request = nvme_tcp_fetch_request(queue);
1079 if (!queue->request)
1082 req = queue->request;
1084 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1085 ret = nvme_tcp_try_send_cmd_pdu(req);
1088 if (!nvme_tcp_has_inline_data(req))
1092 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1093 ret = nvme_tcp_try_send_data_pdu(req);
1098 if (req->state == NVME_TCP_SEND_DATA) {
1099 ret = nvme_tcp_try_send_data(req);
1104 if (req->state == NVME_TCP_SEND_DDGST)
1105 ret = nvme_tcp_try_send_ddgst(req);
1107 if (ret == -EAGAIN) {
1109 } else if (ret < 0) {
1110 dev_err(queue->ctrl->ctrl.device,
1111 "failed to send request %d\n", ret);
1112 if (ret != -EPIPE && ret != -ECONNRESET)
1113 nvme_tcp_fail_request(queue->request);
1114 nvme_tcp_done_send_req(queue);
1119 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1121 struct socket *sock = queue->sock;
1122 struct sock *sk = sock->sk;
1123 read_descriptor_t rd_desc;
1126 rd_desc.arg.data = queue;
1130 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1135 static void nvme_tcp_io_work(struct work_struct *w)
1137 struct nvme_tcp_queue *queue =
1138 container_of(w, struct nvme_tcp_queue, io_work);
1139 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1142 bool pending = false;
1145 if (mutex_trylock(&queue->send_mutex)) {
1146 result = nvme_tcp_try_send(queue);
1147 mutex_unlock(&queue->send_mutex);
1150 else if (unlikely(result < 0))
1154 result = nvme_tcp_try_recv(queue);
1157 else if (unlikely(result < 0))
1163 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1165 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1168 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1170 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1172 ahash_request_free(queue->rcv_hash);
1173 ahash_request_free(queue->snd_hash);
1174 crypto_free_ahash(tfm);
1177 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1179 struct crypto_ahash *tfm;
1181 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1183 return PTR_ERR(tfm);
1185 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1186 if (!queue->snd_hash)
1188 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1190 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1191 if (!queue->rcv_hash)
1193 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1197 ahash_request_free(queue->snd_hash);
1199 crypto_free_ahash(tfm);
1203 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1205 struct nvme_tcp_request *async = &ctrl->async_req;
1207 page_frag_free(async->pdu);
1210 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1212 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1213 struct nvme_tcp_request *async = &ctrl->async_req;
1214 u8 hdgst = nvme_tcp_hdgst_len(queue);
1216 async->pdu = page_frag_alloc(&queue->pf_cache,
1217 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1218 GFP_KERNEL | __GFP_ZERO);
1222 async->queue = &ctrl->queues[0];
1226 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1229 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1230 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1232 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1235 if (queue->hdr_digest || queue->data_digest)
1236 nvme_tcp_free_crypto(queue);
1238 if (queue->pf_cache.va) {
1239 page = virt_to_head_page(queue->pf_cache.va);
1240 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1241 queue->pf_cache.va = NULL;
1243 sock_release(queue->sock);
1245 mutex_destroy(&queue->send_mutex);
1246 mutex_destroy(&queue->queue_lock);
1249 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1251 struct nvme_tcp_icreq_pdu *icreq;
1252 struct nvme_tcp_icresp_pdu *icresp;
1253 struct msghdr msg = {};
1255 bool ctrl_hdgst, ctrl_ddgst;
1258 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1262 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1268 icreq->hdr.type = nvme_tcp_icreq;
1269 icreq->hdr.hlen = sizeof(*icreq);
1271 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1272 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1273 icreq->maxr2t = 0; /* single inflight r2t supported */
1274 icreq->hpda = 0; /* no alignment constraint */
1275 if (queue->hdr_digest)
1276 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1277 if (queue->data_digest)
1278 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1280 iov.iov_base = icreq;
1281 iov.iov_len = sizeof(*icreq);
1282 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1286 memset(&msg, 0, sizeof(msg));
1287 iov.iov_base = icresp;
1288 iov.iov_len = sizeof(*icresp);
1289 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1290 iov.iov_len, msg.msg_flags);
1295 if (icresp->hdr.type != nvme_tcp_icresp) {
1296 pr_err("queue %d: bad type returned %d\n",
1297 nvme_tcp_queue_id(queue), icresp->hdr.type);
1301 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1302 pr_err("queue %d: bad pdu length returned %d\n",
1303 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1307 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1308 pr_err("queue %d: bad pfv returned %d\n",
1309 nvme_tcp_queue_id(queue), icresp->pfv);
1313 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1314 if ((queue->data_digest && !ctrl_ddgst) ||
1315 (!queue->data_digest && ctrl_ddgst)) {
1316 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1317 nvme_tcp_queue_id(queue),
1318 queue->data_digest ? "enabled" : "disabled",
1319 ctrl_ddgst ? "enabled" : "disabled");
1323 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1324 if ((queue->hdr_digest && !ctrl_hdgst) ||
1325 (!queue->hdr_digest && ctrl_hdgst)) {
1326 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1327 nvme_tcp_queue_id(queue),
1328 queue->hdr_digest ? "enabled" : "disabled",
1329 ctrl_hdgst ? "enabled" : "disabled");
1333 if (icresp->cpda != 0) {
1334 pr_err("queue %d: unsupported cpda returned %d\n",
1335 nvme_tcp_queue_id(queue), icresp->cpda);
1347 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1349 return nvme_tcp_queue_id(queue) == 0;
1352 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1354 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1355 int qid = nvme_tcp_queue_id(queue);
1357 return !nvme_tcp_admin_queue(queue) &&
1358 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1361 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1363 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1364 int qid = nvme_tcp_queue_id(queue);
1366 return !nvme_tcp_admin_queue(queue) &&
1367 !nvme_tcp_default_queue(queue) &&
1368 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1369 ctrl->io_queues[HCTX_TYPE_READ];
1372 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1374 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1375 int qid = nvme_tcp_queue_id(queue);
1377 return !nvme_tcp_admin_queue(queue) &&
1378 !nvme_tcp_default_queue(queue) &&
1379 !nvme_tcp_read_queue(queue) &&
1380 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1381 ctrl->io_queues[HCTX_TYPE_READ] +
1382 ctrl->io_queues[HCTX_TYPE_POLL];
1385 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1387 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1388 int qid = nvme_tcp_queue_id(queue);
1391 if (nvme_tcp_default_queue(queue))
1393 else if (nvme_tcp_read_queue(queue))
1394 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1395 else if (nvme_tcp_poll_queue(queue))
1396 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1397 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1398 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1401 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1402 int qid, size_t queue_size)
1404 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1405 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1406 int ret, rcv_pdu_size;
1408 mutex_init(&queue->queue_lock);
1410 init_llist_head(&queue->req_list);
1411 INIT_LIST_HEAD(&queue->send_list);
1412 mutex_init(&queue->send_mutex);
1413 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1414 queue->queue_size = queue_size;
1417 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1419 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1420 NVME_TCP_ADMIN_CCSZ;
1422 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1423 IPPROTO_TCP, &queue->sock);
1425 dev_err(nctrl->device,
1426 "failed to create socket: %d\n", ret);
1427 goto err_destroy_mutex;
1430 /* Single syn retry */
1431 tcp_sock_set_syncnt(queue->sock->sk, 1);
1433 /* Set TCP no delay */
1434 tcp_sock_set_nodelay(queue->sock->sk);
1437 * Cleanup whatever is sitting in the TCP transmit queue on socket
1438 * close. This is done to prevent stale data from being sent should
1439 * the network connection be restored before TCP times out.
1441 sock_no_linger(queue->sock->sk);
1443 if (so_priority > 0)
1444 sock_set_priority(queue->sock->sk, so_priority);
1446 /* Set socket type of service */
1447 if (nctrl->opts->tos >= 0)
1448 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1450 /* Set 10 seconds timeout for icresp recvmsg */
1451 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1453 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1454 nvme_tcp_set_queue_io_cpu(queue);
1455 queue->request = NULL;
1456 queue->data_remaining = 0;
1457 queue->ddgst_remaining = 0;
1458 queue->pdu_remaining = 0;
1459 queue->pdu_offset = 0;
1460 sk_set_memalloc(queue->sock->sk);
1462 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1463 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1464 sizeof(ctrl->src_addr));
1466 dev_err(nctrl->device,
1467 "failed to bind queue %d socket %d\n",
1473 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1474 char *iface = nctrl->opts->host_iface;
1475 sockptr_t optval = KERNEL_SOCKPTR(iface);
1477 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1478 optval, strlen(iface));
1480 dev_err(nctrl->device,
1481 "failed to bind to interface %s queue %d err %d\n",
1487 queue->hdr_digest = nctrl->opts->hdr_digest;
1488 queue->data_digest = nctrl->opts->data_digest;
1489 if (queue->hdr_digest || queue->data_digest) {
1490 ret = nvme_tcp_alloc_crypto(queue);
1492 dev_err(nctrl->device,
1493 "failed to allocate queue %d crypto\n", qid);
1498 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1499 nvme_tcp_hdgst_len(queue);
1500 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1506 dev_dbg(nctrl->device, "connecting queue %d\n",
1507 nvme_tcp_queue_id(queue));
1509 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1510 sizeof(ctrl->addr), 0);
1512 dev_err(nctrl->device,
1513 "failed to connect socket: %d\n", ret);
1517 ret = nvme_tcp_init_connection(queue);
1519 goto err_init_connect;
1521 queue->rd_enabled = true;
1522 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1523 nvme_tcp_init_recv_ctx(queue);
1525 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1526 queue->sock->sk->sk_user_data = queue;
1527 queue->state_change = queue->sock->sk->sk_state_change;
1528 queue->data_ready = queue->sock->sk->sk_data_ready;
1529 queue->write_space = queue->sock->sk->sk_write_space;
1530 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1531 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1532 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1533 #ifdef CONFIG_NET_RX_BUSY_POLL
1534 queue->sock->sk->sk_ll_usec = 1;
1536 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1541 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1545 if (queue->hdr_digest || queue->data_digest)
1546 nvme_tcp_free_crypto(queue);
1548 sock_release(queue->sock);
1551 mutex_destroy(&queue->send_mutex);
1552 mutex_destroy(&queue->queue_lock);
1556 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1558 struct socket *sock = queue->sock;
1560 write_lock_bh(&sock->sk->sk_callback_lock);
1561 sock->sk->sk_user_data = NULL;
1562 sock->sk->sk_data_ready = queue->data_ready;
1563 sock->sk->sk_state_change = queue->state_change;
1564 sock->sk->sk_write_space = queue->write_space;
1565 write_unlock_bh(&sock->sk->sk_callback_lock);
1568 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1570 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1571 nvme_tcp_restore_sock_calls(queue);
1572 cancel_work_sync(&queue->io_work);
1575 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1577 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1578 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1580 mutex_lock(&queue->queue_lock);
1581 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1582 __nvme_tcp_stop_queue(queue);
1583 mutex_unlock(&queue->queue_lock);
1586 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1588 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1592 ret = nvmf_connect_io_queue(nctrl, idx);
1594 ret = nvmf_connect_admin_queue(nctrl);
1597 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1599 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1600 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1601 dev_err(nctrl->device,
1602 "failed to connect queue: %d ret=%d\n", idx, ret);
1607 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1610 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1611 struct blk_mq_tag_set *set;
1615 set = &ctrl->admin_tag_set;
1616 memset(set, 0, sizeof(*set));
1617 set->ops = &nvme_tcp_admin_mq_ops;
1618 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1619 set->reserved_tags = NVMF_RESERVED_TAGS;
1620 set->numa_node = nctrl->numa_node;
1621 set->flags = BLK_MQ_F_BLOCKING;
1622 set->cmd_size = sizeof(struct nvme_tcp_request);
1623 set->driver_data = ctrl;
1624 set->nr_hw_queues = 1;
1625 set->timeout = NVME_ADMIN_TIMEOUT;
1627 set = &ctrl->tag_set;
1628 memset(set, 0, sizeof(*set));
1629 set->ops = &nvme_tcp_mq_ops;
1630 set->queue_depth = nctrl->sqsize + 1;
1631 set->reserved_tags = NVMF_RESERVED_TAGS;
1632 set->numa_node = nctrl->numa_node;
1633 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1634 set->cmd_size = sizeof(struct nvme_tcp_request);
1635 set->driver_data = ctrl;
1636 set->nr_hw_queues = nctrl->queue_count - 1;
1637 set->timeout = NVME_IO_TIMEOUT;
1638 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1641 ret = blk_mq_alloc_tag_set(set);
1643 return ERR_PTR(ret);
1648 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1650 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1651 cancel_work_sync(&ctrl->async_event_work);
1652 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1653 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1656 nvme_tcp_free_queue(ctrl, 0);
1659 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1663 for (i = 1; i < ctrl->queue_count; i++)
1664 nvme_tcp_free_queue(ctrl, i);
1667 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1671 for (i = 1; i < ctrl->queue_count; i++)
1672 nvme_tcp_stop_queue(ctrl, i);
1675 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1679 for (i = 1; i < ctrl->queue_count; i++) {
1680 ret = nvme_tcp_start_queue(ctrl, i);
1682 goto out_stop_queues;
1688 for (i--; i >= 1; i--)
1689 nvme_tcp_stop_queue(ctrl, i);
1693 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1697 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1701 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1703 goto out_free_queue;
1708 nvme_tcp_free_queue(ctrl, 0);
1712 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1716 for (i = 1; i < ctrl->queue_count; i++) {
1717 ret = nvme_tcp_alloc_queue(ctrl, i,
1720 goto out_free_queues;
1726 for (i--; i >= 1; i--)
1727 nvme_tcp_free_queue(ctrl, i);
1732 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1734 unsigned int nr_io_queues;
1736 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1737 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1738 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1740 return nr_io_queues;
1743 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1744 unsigned int nr_io_queues)
1746 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1747 struct nvmf_ctrl_options *opts = nctrl->opts;
1749 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1751 * separate read/write queues
1752 * hand out dedicated default queues only after we have
1753 * sufficient read queues.
1755 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1756 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1757 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1758 min(opts->nr_write_queues, nr_io_queues);
1759 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1762 * shared read/write queues
1763 * either no write queues were requested, or we don't have
1764 * sufficient queue count to have dedicated default queues.
1766 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1767 min(opts->nr_io_queues, nr_io_queues);
1768 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1771 if (opts->nr_poll_queues && nr_io_queues) {
1772 /* map dedicated poll queues only if we have queues left */
1773 ctrl->io_queues[HCTX_TYPE_POLL] =
1774 min(opts->nr_poll_queues, nr_io_queues);
1778 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1780 unsigned int nr_io_queues;
1783 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1784 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1788 if (nr_io_queues == 0) {
1789 dev_err(ctrl->device,
1790 "unable to set any I/O queues\n");
1794 ctrl->queue_count = nr_io_queues + 1;
1795 dev_info(ctrl->device,
1796 "creating %d I/O queues.\n", nr_io_queues);
1798 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1800 return __nvme_tcp_alloc_io_queues(ctrl);
1803 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1805 nvme_tcp_stop_io_queues(ctrl);
1807 blk_cleanup_queue(ctrl->connect_q);
1808 blk_mq_free_tag_set(ctrl->tagset);
1810 nvme_tcp_free_io_queues(ctrl);
1813 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1817 ret = nvme_tcp_alloc_io_queues(ctrl);
1822 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1823 if (IS_ERR(ctrl->tagset)) {
1824 ret = PTR_ERR(ctrl->tagset);
1825 goto out_free_io_queues;
1828 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1829 if (IS_ERR(ctrl->connect_q)) {
1830 ret = PTR_ERR(ctrl->connect_q);
1831 goto out_free_tag_set;
1835 ret = nvme_tcp_start_io_queues(ctrl);
1837 goto out_cleanup_connect_q;
1840 nvme_start_queues(ctrl);
1841 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1843 * If we timed out waiting for freeze we are likely to
1844 * be stuck. Fail the controller initialization just
1848 goto out_wait_freeze_timed_out;
1850 blk_mq_update_nr_hw_queues(ctrl->tagset,
1851 ctrl->queue_count - 1);
1852 nvme_unfreeze(ctrl);
1857 out_wait_freeze_timed_out:
1858 nvme_stop_queues(ctrl);
1859 nvme_sync_io_queues(ctrl);
1860 nvme_tcp_stop_io_queues(ctrl);
1861 out_cleanup_connect_q:
1862 nvme_cancel_tagset(ctrl);
1864 blk_cleanup_queue(ctrl->connect_q);
1867 blk_mq_free_tag_set(ctrl->tagset);
1869 nvme_tcp_free_io_queues(ctrl);
1873 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1875 nvme_tcp_stop_queue(ctrl, 0);
1877 blk_cleanup_queue(ctrl->admin_q);
1878 blk_cleanup_queue(ctrl->fabrics_q);
1879 blk_mq_free_tag_set(ctrl->admin_tagset);
1881 nvme_tcp_free_admin_queue(ctrl);
1884 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1888 error = nvme_tcp_alloc_admin_queue(ctrl);
1893 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1894 if (IS_ERR(ctrl->admin_tagset)) {
1895 error = PTR_ERR(ctrl->admin_tagset);
1896 goto out_free_queue;
1899 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1900 if (IS_ERR(ctrl->fabrics_q)) {
1901 error = PTR_ERR(ctrl->fabrics_q);
1902 goto out_free_tagset;
1905 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1906 if (IS_ERR(ctrl->admin_q)) {
1907 error = PTR_ERR(ctrl->admin_q);
1908 goto out_cleanup_fabrics_q;
1912 error = nvme_tcp_start_queue(ctrl, 0);
1914 goto out_cleanup_queue;
1916 error = nvme_enable_ctrl(ctrl);
1918 goto out_stop_queue;
1920 nvme_start_admin_queue(ctrl);
1922 error = nvme_init_ctrl_finish(ctrl);
1924 goto out_quiesce_queue;
1929 nvme_stop_admin_queue(ctrl);
1930 blk_sync_queue(ctrl->admin_q);
1932 nvme_tcp_stop_queue(ctrl, 0);
1933 nvme_cancel_admin_tagset(ctrl);
1936 blk_cleanup_queue(ctrl->admin_q);
1937 out_cleanup_fabrics_q:
1939 blk_cleanup_queue(ctrl->fabrics_q);
1942 blk_mq_free_tag_set(ctrl->admin_tagset);
1944 nvme_tcp_free_admin_queue(ctrl);
1948 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1951 nvme_stop_admin_queue(ctrl);
1952 blk_sync_queue(ctrl->admin_q);
1953 nvme_tcp_stop_queue(ctrl, 0);
1954 nvme_cancel_admin_tagset(ctrl);
1956 nvme_start_admin_queue(ctrl);
1957 nvme_tcp_destroy_admin_queue(ctrl, remove);
1960 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1963 if (ctrl->queue_count <= 1)
1965 nvme_stop_admin_queue(ctrl);
1966 nvme_start_freeze(ctrl);
1967 nvme_stop_queues(ctrl);
1968 nvme_sync_io_queues(ctrl);
1969 nvme_tcp_stop_io_queues(ctrl);
1970 nvme_cancel_tagset(ctrl);
1972 nvme_start_queues(ctrl);
1973 nvme_tcp_destroy_io_queues(ctrl, remove);
1976 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1978 /* If we are resetting/deleting then do nothing */
1979 if (ctrl->state != NVME_CTRL_CONNECTING) {
1980 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1981 ctrl->state == NVME_CTRL_LIVE);
1985 if (nvmf_should_reconnect(ctrl)) {
1986 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1987 ctrl->opts->reconnect_delay);
1988 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1989 ctrl->opts->reconnect_delay * HZ);
1991 dev_info(ctrl->device, "Removing controller...\n");
1992 nvme_delete_ctrl(ctrl);
1996 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1998 struct nvmf_ctrl_options *opts = ctrl->opts;
2001 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2007 dev_err(ctrl->device, "icdoff is not supported!\n");
2011 if (!nvme_ctrl_sgl_supported(ctrl)) {
2013 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2017 if (opts->queue_size > ctrl->sqsize + 1)
2018 dev_warn(ctrl->device,
2019 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2020 opts->queue_size, ctrl->sqsize + 1);
2022 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2023 dev_warn(ctrl->device,
2024 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2025 ctrl->sqsize + 1, ctrl->maxcmd);
2026 ctrl->sqsize = ctrl->maxcmd - 1;
2029 if (ctrl->queue_count > 1) {
2030 ret = nvme_tcp_configure_io_queues(ctrl, new);
2035 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2037 * state change failure is ok if we started ctrl delete,
2038 * unless we're during creation of a new controller to
2039 * avoid races with teardown flow.
2041 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2042 ctrl->state != NVME_CTRL_DELETING_NOIO);
2048 nvme_start_ctrl(ctrl);
2052 if (ctrl->queue_count > 1) {
2053 nvme_stop_queues(ctrl);
2054 nvme_sync_io_queues(ctrl);
2055 nvme_tcp_stop_io_queues(ctrl);
2056 nvme_cancel_tagset(ctrl);
2057 nvme_tcp_destroy_io_queues(ctrl, new);
2060 nvme_stop_admin_queue(ctrl);
2061 blk_sync_queue(ctrl->admin_q);
2062 nvme_tcp_stop_queue(ctrl, 0);
2063 nvme_cancel_admin_tagset(ctrl);
2064 nvme_tcp_destroy_admin_queue(ctrl, new);
2068 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2070 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2071 struct nvme_tcp_ctrl, connect_work);
2072 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2074 ++ctrl->nr_reconnects;
2076 if (nvme_tcp_setup_ctrl(ctrl, false))
2079 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2080 ctrl->nr_reconnects);
2082 ctrl->nr_reconnects = 0;
2087 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2088 ctrl->nr_reconnects);
2089 nvme_tcp_reconnect_or_remove(ctrl);
2092 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2094 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2095 struct nvme_tcp_ctrl, err_work);
2096 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2098 nvme_stop_keep_alive(ctrl);
2099 nvme_tcp_teardown_io_queues(ctrl, false);
2100 /* unquiesce to fail fast pending requests */
2101 nvme_start_queues(ctrl);
2102 nvme_tcp_teardown_admin_queue(ctrl, false);
2103 nvme_start_admin_queue(ctrl);
2105 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2106 /* state change failure is ok if we started ctrl delete */
2107 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2108 ctrl->state != NVME_CTRL_DELETING_NOIO);
2112 nvme_tcp_reconnect_or_remove(ctrl);
2115 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2117 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2118 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2120 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2121 nvme_stop_admin_queue(ctrl);
2123 nvme_shutdown_ctrl(ctrl);
2125 nvme_disable_ctrl(ctrl);
2126 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2129 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2131 nvme_tcp_teardown_ctrl(ctrl, true);
2134 static void nvme_reset_ctrl_work(struct work_struct *work)
2136 struct nvme_ctrl *ctrl =
2137 container_of(work, struct nvme_ctrl, reset_work);
2139 nvme_stop_ctrl(ctrl);
2140 nvme_tcp_teardown_ctrl(ctrl, false);
2142 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2143 /* state change failure is ok if we started ctrl delete */
2144 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2145 ctrl->state != NVME_CTRL_DELETING_NOIO);
2149 if (nvme_tcp_setup_ctrl(ctrl, false))
2155 ++ctrl->nr_reconnects;
2156 nvme_tcp_reconnect_or_remove(ctrl);
2159 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2161 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2163 if (list_empty(&ctrl->list))
2166 mutex_lock(&nvme_tcp_ctrl_mutex);
2167 list_del(&ctrl->list);
2168 mutex_unlock(&nvme_tcp_ctrl_mutex);
2170 nvmf_free_options(nctrl->opts);
2172 kfree(ctrl->queues);
2176 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2178 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2182 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2183 NVME_SGL_FMT_TRANSPORT_A;
2186 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2187 struct nvme_command *c, u32 data_len)
2189 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2191 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2192 sg->length = cpu_to_le32(data_len);
2193 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2196 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2199 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2202 sg->length = cpu_to_le32(data_len);
2203 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2204 NVME_SGL_FMT_TRANSPORT_A;
2207 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2209 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2210 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2211 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2212 struct nvme_command *cmd = &pdu->cmd;
2213 u8 hdgst = nvme_tcp_hdgst_len(queue);
2215 memset(pdu, 0, sizeof(*pdu));
2216 pdu->hdr.type = nvme_tcp_cmd;
2217 if (queue->hdr_digest)
2218 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2219 pdu->hdr.hlen = sizeof(*pdu);
2220 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2222 cmd->common.opcode = nvme_admin_async_event;
2223 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2224 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2225 nvme_tcp_set_sg_null(cmd);
2227 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2228 ctrl->async_req.offset = 0;
2229 ctrl->async_req.curr_bio = NULL;
2230 ctrl->async_req.data_len = 0;
2232 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2235 static void nvme_tcp_complete_timed_out(struct request *rq)
2237 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2238 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2240 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2241 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2242 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2243 blk_mq_complete_request(rq);
2247 static enum blk_eh_timer_return
2248 nvme_tcp_timeout(struct request *rq, bool reserved)
2250 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2251 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2252 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2254 dev_warn(ctrl->device,
2255 "queue %d: timeout request %#x type %d\n",
2256 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2258 if (ctrl->state != NVME_CTRL_LIVE) {
2260 * If we are resetting, connecting or deleting we should
2261 * complete immediately because we may block controller
2262 * teardown or setup sequence
2263 * - ctrl disable/shutdown fabrics requests
2264 * - connect requests
2265 * - initialization admin requests
2266 * - I/O requests that entered after unquiescing and
2267 * the controller stopped responding
2269 * All other requests should be cancelled by the error
2270 * recovery work, so it's fine that we fail it here.
2272 nvme_tcp_complete_timed_out(rq);
2277 * LIVE state should trigger the normal error recovery which will
2278 * handle completing this request.
2280 nvme_tcp_error_recovery(ctrl);
2281 return BLK_EH_RESET_TIMER;
2284 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2287 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2288 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2289 struct nvme_command *c = &pdu->cmd;
2291 c->common.flags |= NVME_CMD_SGL_METABUF;
2293 if (!blk_rq_nr_phys_segments(rq))
2294 nvme_tcp_set_sg_null(c);
2295 else if (rq_data_dir(rq) == WRITE &&
2296 req->data_len <= nvme_tcp_inline_data_size(queue))
2297 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2299 nvme_tcp_set_sg_host_data(c, req->data_len);
2304 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2307 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2308 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2309 struct nvme_tcp_queue *queue = req->queue;
2310 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2313 ret = nvme_setup_cmd(ns, rq);
2317 req->state = NVME_TCP_SEND_CMD_PDU;
2318 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2323 req->data_len = blk_rq_nr_phys_segments(rq) ?
2324 blk_rq_payload_bytes(rq) : 0;
2325 req->curr_bio = rq->bio;
2326 if (req->curr_bio && req->data_len)
2327 nvme_tcp_init_iter(req, rq_data_dir(rq));
2329 if (rq_data_dir(rq) == WRITE &&
2330 req->data_len <= nvme_tcp_inline_data_size(queue))
2331 req->pdu_len = req->data_len;
2333 pdu->hdr.type = nvme_tcp_cmd;
2335 if (queue->hdr_digest)
2336 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2337 if (queue->data_digest && req->pdu_len) {
2338 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2339 ddgst = nvme_tcp_ddgst_len(queue);
2341 pdu->hdr.hlen = sizeof(*pdu);
2342 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2344 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2346 ret = nvme_tcp_map_data(queue, rq);
2347 if (unlikely(ret)) {
2348 nvme_cleanup_cmd(rq);
2349 dev_err(queue->ctrl->ctrl.device,
2350 "Failed to map data (%d)\n", ret);
2357 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2359 struct nvme_tcp_queue *queue = hctx->driver_data;
2361 if (!llist_empty(&queue->req_list))
2362 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2365 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2366 const struct blk_mq_queue_data *bd)
2368 struct nvme_ns *ns = hctx->queue->queuedata;
2369 struct nvme_tcp_queue *queue = hctx->driver_data;
2370 struct request *rq = bd->rq;
2371 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2372 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2375 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2376 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2378 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2382 blk_mq_start_request(rq);
2384 nvme_tcp_queue_request(req, true, bd->last);
2389 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2391 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2392 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2394 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2395 /* separate read/write queues */
2396 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2397 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2398 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2399 set->map[HCTX_TYPE_READ].nr_queues =
2400 ctrl->io_queues[HCTX_TYPE_READ];
2401 set->map[HCTX_TYPE_READ].queue_offset =
2402 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2404 /* shared read/write queues */
2405 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2406 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2407 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2408 set->map[HCTX_TYPE_READ].nr_queues =
2409 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2410 set->map[HCTX_TYPE_READ].queue_offset = 0;
2412 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2413 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2415 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2416 /* map dedicated poll queues only if we have queues left */
2417 set->map[HCTX_TYPE_POLL].nr_queues =
2418 ctrl->io_queues[HCTX_TYPE_POLL];
2419 set->map[HCTX_TYPE_POLL].queue_offset =
2420 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2421 ctrl->io_queues[HCTX_TYPE_READ];
2422 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2425 dev_info(ctrl->ctrl.device,
2426 "mapped %d/%d/%d default/read/poll queues.\n",
2427 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2428 ctrl->io_queues[HCTX_TYPE_READ],
2429 ctrl->io_queues[HCTX_TYPE_POLL]);
2434 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2436 struct nvme_tcp_queue *queue = hctx->driver_data;
2437 struct sock *sk = queue->sock->sk;
2439 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2442 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2443 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2444 sk_busy_loop(sk, true);
2445 nvme_tcp_try_recv(queue);
2446 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2447 return queue->nr_cqe;
2450 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2451 .queue_rq = nvme_tcp_queue_rq,
2452 .commit_rqs = nvme_tcp_commit_rqs,
2453 .complete = nvme_complete_rq,
2454 .init_request = nvme_tcp_init_request,
2455 .exit_request = nvme_tcp_exit_request,
2456 .init_hctx = nvme_tcp_init_hctx,
2457 .timeout = nvme_tcp_timeout,
2458 .map_queues = nvme_tcp_map_queues,
2459 .poll = nvme_tcp_poll,
2462 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2463 .queue_rq = nvme_tcp_queue_rq,
2464 .complete = nvme_complete_rq,
2465 .init_request = nvme_tcp_init_request,
2466 .exit_request = nvme_tcp_exit_request,
2467 .init_hctx = nvme_tcp_init_admin_hctx,
2468 .timeout = nvme_tcp_timeout,
2471 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2473 .module = THIS_MODULE,
2474 .flags = NVME_F_FABRICS,
2475 .reg_read32 = nvmf_reg_read32,
2476 .reg_read64 = nvmf_reg_read64,
2477 .reg_write32 = nvmf_reg_write32,
2478 .free_ctrl = nvme_tcp_free_ctrl,
2479 .submit_async_event = nvme_tcp_submit_async_event,
2480 .delete_ctrl = nvme_tcp_delete_ctrl,
2481 .get_address = nvmf_get_address,
2485 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2487 struct nvme_tcp_ctrl *ctrl;
2490 mutex_lock(&nvme_tcp_ctrl_mutex);
2491 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2492 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2496 mutex_unlock(&nvme_tcp_ctrl_mutex);
2501 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2502 struct nvmf_ctrl_options *opts)
2504 struct nvme_tcp_ctrl *ctrl;
2507 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2509 return ERR_PTR(-ENOMEM);
2511 INIT_LIST_HEAD(&ctrl->list);
2512 ctrl->ctrl.opts = opts;
2513 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2514 opts->nr_poll_queues + 1;
2515 ctrl->ctrl.sqsize = opts->queue_size - 1;
2516 ctrl->ctrl.kato = opts->kato;
2518 INIT_DELAYED_WORK(&ctrl->connect_work,
2519 nvme_tcp_reconnect_ctrl_work);
2520 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2521 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2523 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2525 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2526 if (!opts->trsvcid) {
2530 opts->mask |= NVMF_OPT_TRSVCID;
2533 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2534 opts->traddr, opts->trsvcid, &ctrl->addr);
2536 pr_err("malformed address passed: %s:%s\n",
2537 opts->traddr, opts->trsvcid);
2541 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2542 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2543 opts->host_traddr, NULL, &ctrl->src_addr);
2545 pr_err("malformed src address passed: %s\n",
2551 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2552 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2553 pr_err("invalid interface passed: %s\n",
2560 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2565 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2567 if (!ctrl->queues) {
2572 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2574 goto out_kfree_queues;
2576 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2579 goto out_uninit_ctrl;
2582 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2584 goto out_uninit_ctrl;
2586 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2587 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2589 mutex_lock(&nvme_tcp_ctrl_mutex);
2590 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2591 mutex_unlock(&nvme_tcp_ctrl_mutex);
2596 nvme_uninit_ctrl(&ctrl->ctrl);
2597 nvme_put_ctrl(&ctrl->ctrl);
2600 return ERR_PTR(ret);
2602 kfree(ctrl->queues);
2605 return ERR_PTR(ret);
2608 static struct nvmf_transport_ops nvme_tcp_transport = {
2610 .module = THIS_MODULE,
2611 .required_opts = NVMF_OPT_TRADDR,
2612 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2613 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2614 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2615 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2616 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2617 .create_ctrl = nvme_tcp_create_ctrl,
2620 static int __init nvme_tcp_init_module(void)
2622 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2623 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2627 nvmf_register_transport(&nvme_tcp_transport);
2631 static void __exit nvme_tcp_cleanup_module(void)
2633 struct nvme_tcp_ctrl *ctrl;
2635 nvmf_unregister_transport(&nvme_tcp_transport);
2637 mutex_lock(&nvme_tcp_ctrl_mutex);
2638 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2639 nvme_delete_ctrl(&ctrl->ctrl);
2640 mutex_unlock(&nvme_tcp_ctrl_mutex);
2641 flush_workqueue(nvme_delete_wq);
2643 destroy_workqueue(nvme_tcp_wq);
2646 module_init(nvme_tcp_init_module);
2647 module_exit(nvme_tcp_cleanup_module);
2649 MODULE_LICENSE("GPL v2");