1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
22 /* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
28 static int so_priority;
29 module_param(so_priority, int, 0644);
30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
32 #define NVMET_TCP_RECV_BUDGET 8
33 #define NVMET_TCP_SEND_BUDGET 8
34 #define NVMET_TCP_IO_WORK_BUDGET 64
36 enum nvmet_tcp_send_state {
37 NVMET_TCP_SEND_DATA_PDU,
41 NVMET_TCP_SEND_RESPONSE
44 enum nvmet_tcp_recv_state {
52 NVMET_TCP_F_INIT_FAILED = (1 << 0),
55 struct nvmet_tcp_cmd {
56 struct nvmet_tcp_queue *queue;
59 struct nvme_tcp_cmd_pdu *cmd_pdu;
60 struct nvme_tcp_rsp_pdu *rsp_pdu;
61 struct nvme_tcp_data_pdu *data_pdu;
62 struct nvme_tcp_r2t_pdu *r2t_pdu;
71 struct msghdr recv_msg;
75 struct list_head entry;
76 struct llist_node lentry;
80 struct scatterlist *cur_sg;
81 enum nvmet_tcp_send_state state;
87 enum nvmet_tcp_queue_state {
88 NVMET_TCP_Q_CONNECTING,
90 NVMET_TCP_Q_DISCONNECTING,
93 struct nvmet_tcp_queue {
95 struct nvmet_tcp_port *port;
96 struct work_struct io_work;
98 struct nvmet_cq nvme_cq;
99 struct nvmet_sq nvme_sq;
102 struct nvmet_tcp_cmd *cmds;
103 unsigned int nr_cmds;
104 struct list_head free_list;
105 struct llist_head resp_list;
106 struct list_head resp_send_list;
108 struct nvmet_tcp_cmd *snd_cmd;
113 enum nvmet_tcp_recv_state rcv_state;
114 struct nvmet_tcp_cmd *cmd;
115 union nvme_tcp_pdu pdu;
120 struct ahash_request *snd_hash;
121 struct ahash_request *rcv_hash;
123 spinlock_t state_lock;
124 enum nvmet_tcp_queue_state state;
126 struct sockaddr_storage sockaddr;
127 struct sockaddr_storage sockaddr_peer;
128 struct work_struct release_work;
131 struct list_head queue_list;
133 struct nvmet_tcp_cmd connect;
135 struct page_frag_cache pf_cache;
137 void (*data_ready)(struct sock *);
138 void (*state_change)(struct sock *);
139 void (*write_space)(struct sock *);
142 struct nvmet_tcp_port {
144 struct work_struct accept_work;
145 struct nvmet_port *nport;
146 struct sockaddr_storage addr;
148 void (*data_ready)(struct sock *);
151 static DEFINE_IDA(nvmet_tcp_queue_ida);
152 static LIST_HEAD(nvmet_tcp_queue_list);
153 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
155 static struct workqueue_struct *nvmet_tcp_wq;
156 static struct nvmet_fabrics_ops nvmet_tcp_ops;
157 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
158 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
160 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
161 struct nvmet_tcp_cmd *cmd)
163 return cmd - queue->cmds;
166 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
168 return nvme_is_write(cmd->req.cmd) &&
169 cmd->rbytes_done < cmd->req.transfer_len;
172 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
174 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
177 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
179 return !nvme_is_write(cmd->req.cmd) &&
180 cmd->req.transfer_len > 0 &&
181 !cmd->req.cqe->status;
184 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
186 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
190 static inline struct nvmet_tcp_cmd *
191 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
193 struct nvmet_tcp_cmd *cmd;
195 cmd = list_first_entry_or_null(&queue->free_list,
196 struct nvmet_tcp_cmd, entry);
199 list_del_init(&cmd->entry);
201 cmd->rbytes_done = cmd->wbytes_done = 0;
209 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
211 if (unlikely(cmd == &cmd->queue->connect))
214 list_add_tail(&cmd->entry, &cmd->queue->free_list);
217 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
219 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
222 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
224 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
227 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
228 void *pdu, size_t len)
230 struct scatterlist sg;
232 sg_init_one(&sg, pdu, len);
233 ahash_request_set_crypt(hash, &sg, pdu + len, len);
234 crypto_ahash_digest(hash);
237 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
238 void *pdu, size_t len)
240 struct nvme_tcp_hdr *hdr = pdu;
244 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
245 pr_err("queue %d: header digest enabled but no header digest\n",
250 recv_digest = *(__le32 *)(pdu + hdr->hlen);
251 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
252 exp_digest = *(__le32 *)(pdu + hdr->hlen);
253 if (recv_digest != exp_digest) {
254 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
255 queue->idx, le32_to_cpu(recv_digest),
256 le32_to_cpu(exp_digest));
263 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
265 struct nvme_tcp_hdr *hdr = pdu;
266 u8 digest_len = nvmet_tcp_hdgst_len(queue);
269 len = le32_to_cpu(hdr->plen) - hdr->hlen -
270 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
272 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
273 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
280 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
282 struct scatterlist *sg;
285 sg = &cmd->req.sg[cmd->sg_idx];
287 for (i = 0; i < cmd->nr_mapped; i++)
288 kunmap(sg_page(&sg[i]));
291 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
293 struct kvec *iov = cmd->iov;
294 struct scatterlist *sg;
295 u32 length, offset, sg_offset;
297 length = cmd->pdu_len;
298 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
299 offset = cmd->rbytes_done;
300 cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
301 sg_offset = offset % PAGE_SIZE;
302 sg = &cmd->req.sg[cmd->sg_idx];
305 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
307 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
308 iov->iov_len = iov_len;
315 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
316 cmd->nr_mapped, cmd->pdu_len);
319 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
321 queue->rcv_state = NVMET_TCP_RECV_ERR;
322 if (queue->nvme_sq.ctrl)
323 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
325 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
328 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
330 if (status == -EPIPE || status == -ECONNRESET)
331 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
333 nvmet_tcp_fatal_error(queue);
336 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
338 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
339 u32 len = le32_to_cpu(sgl->length);
344 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
345 NVME_SGL_FMT_OFFSET)) {
346 if (!nvme_is_write(cmd->req.cmd))
347 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
349 if (len > cmd->req.port->inline_data_size)
350 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
353 cmd->req.transfer_len += len;
355 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
357 return NVME_SC_INTERNAL;
358 cmd->cur_sg = cmd->req.sg;
360 if (nvmet_tcp_has_data_in(cmd)) {
361 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
362 sizeof(*cmd->iov), GFP_KERNEL);
369 sgl_free(cmd->req.sg);
370 return NVME_SC_INTERNAL;
373 static void nvmet_tcp_ddgst(struct ahash_request *hash,
374 struct nvmet_tcp_cmd *cmd)
376 ahash_request_set_crypt(hash, cmd->req.sg,
377 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
378 crypto_ahash_digest(hash);
381 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
383 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
384 struct nvmet_tcp_queue *queue = cmd->queue;
385 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
386 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
389 cmd->state = NVMET_TCP_SEND_DATA_PDU;
391 pdu->hdr.type = nvme_tcp_c2h_data;
392 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
393 NVME_TCP_F_DATA_SUCCESS : 0);
394 pdu->hdr.hlen = sizeof(*pdu);
395 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
397 cpu_to_le32(pdu->hdr.hlen + hdgst +
398 cmd->req.transfer_len + ddgst);
399 pdu->command_id = cmd->req.cqe->command_id;
400 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
401 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
403 if (queue->data_digest) {
404 pdu->hdr.flags |= NVME_TCP_F_DDGST;
405 nvmet_tcp_ddgst(queue->snd_hash, cmd);
408 if (cmd->queue->hdr_digest) {
409 pdu->hdr.flags |= NVME_TCP_F_HDGST;
410 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
414 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
416 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
417 struct nvmet_tcp_queue *queue = cmd->queue;
418 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
421 cmd->state = NVMET_TCP_SEND_R2T;
423 pdu->hdr.type = nvme_tcp_r2t;
425 pdu->hdr.hlen = sizeof(*pdu);
427 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
429 pdu->command_id = cmd->req.cmd->common.command_id;
430 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
431 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
432 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
433 if (cmd->queue->hdr_digest) {
434 pdu->hdr.flags |= NVME_TCP_F_HDGST;
435 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
439 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
441 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
442 struct nvmet_tcp_queue *queue = cmd->queue;
443 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
446 cmd->state = NVMET_TCP_SEND_RESPONSE;
448 pdu->hdr.type = nvme_tcp_rsp;
450 pdu->hdr.hlen = sizeof(*pdu);
452 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
453 if (cmd->queue->hdr_digest) {
454 pdu->hdr.flags |= NVME_TCP_F_HDGST;
455 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
459 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
461 struct llist_node *node;
463 node = llist_del_all(&queue->resp_list);
468 struct nvmet_tcp_cmd *cmd = llist_entry(node,
469 struct nvmet_tcp_cmd, lentry);
471 list_add(&cmd->entry, &queue->resp_send_list);
473 queue->send_list_len++;
477 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
479 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
480 struct nvmet_tcp_cmd, entry);
481 if (!queue->snd_cmd) {
482 nvmet_tcp_process_resp_list(queue);
484 list_first_entry_or_null(&queue->resp_send_list,
485 struct nvmet_tcp_cmd, entry);
486 if (unlikely(!queue->snd_cmd))
490 list_del_init(&queue->snd_cmd->entry);
491 queue->send_list_len--;
493 if (nvmet_tcp_need_data_out(queue->snd_cmd))
494 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
495 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
496 nvmet_setup_r2t_pdu(queue->snd_cmd);
498 nvmet_setup_response_pdu(queue->snd_cmd);
500 return queue->snd_cmd;
503 static void nvmet_tcp_queue_response(struct nvmet_req *req)
505 struct nvmet_tcp_cmd *cmd =
506 container_of(req, struct nvmet_tcp_cmd, req);
507 struct nvmet_tcp_queue *queue = cmd->queue;
509 llist_add(&cmd->lentry, &queue->resp_list);
510 queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
513 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
515 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
516 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
519 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
520 offset_in_page(cmd->data_pdu) + cmd->offset,
521 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
531 cmd->state = NVMET_TCP_SEND_DATA;
536 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
538 struct nvmet_tcp_queue *queue = cmd->queue;
541 while (cmd->cur_sg) {
542 struct page *page = sg_page(cmd->cur_sg);
543 u32 left = cmd->cur_sg->length - cmd->offset;
544 int flags = MSG_DONTWAIT;
546 if ((!last_in_batch && cmd->queue->send_list_len) ||
547 cmd->wbytes_done + left < cmd->req.transfer_len ||
548 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
549 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
551 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
557 cmd->wbytes_done += ret;
560 if (cmd->offset == cmd->cur_sg->length) {
561 cmd->cur_sg = sg_next(cmd->cur_sg);
566 if (queue->data_digest) {
567 cmd->state = NVMET_TCP_SEND_DDGST;
570 if (queue->nvme_sq.sqhd_disabled) {
571 cmd->queue->snd_cmd = NULL;
572 nvmet_tcp_put_cmd(cmd);
574 nvmet_setup_response_pdu(cmd);
578 if (queue->nvme_sq.sqhd_disabled) {
580 sgl_free(cmd->req.sg);
587 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
590 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
591 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
592 int flags = MSG_DONTWAIT;
595 if (!last_in_batch && cmd->queue->send_list_len)
596 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
600 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
601 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
611 sgl_free(cmd->req.sg);
612 cmd->queue->snd_cmd = NULL;
613 nvmet_tcp_put_cmd(cmd);
617 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
619 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
620 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
621 int flags = MSG_DONTWAIT;
624 if (!last_in_batch && cmd->queue->send_list_len)
625 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
629 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
630 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
639 cmd->queue->snd_cmd = NULL;
643 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
645 struct nvmet_tcp_queue *queue = cmd->queue;
646 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
648 .iov_base = &cmd->exp_ddgst + cmd->offset,
649 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
653 if (!last_in_batch && cmd->queue->send_list_len)
654 msg.msg_flags |= MSG_MORE;
656 msg.msg_flags |= MSG_EOR;
658 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
659 if (unlikely(ret <= 0))
664 if (queue->nvme_sq.sqhd_disabled) {
665 cmd->queue->snd_cmd = NULL;
666 nvmet_tcp_put_cmd(cmd);
668 nvmet_setup_response_pdu(cmd);
673 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
676 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
679 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
680 cmd = nvmet_tcp_fetch_cmd(queue);
685 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
686 ret = nvmet_try_send_data_pdu(cmd);
691 if (cmd->state == NVMET_TCP_SEND_DATA) {
692 ret = nvmet_try_send_data(cmd, last_in_batch);
697 if (cmd->state == NVMET_TCP_SEND_DDGST) {
698 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
703 if (cmd->state == NVMET_TCP_SEND_R2T) {
704 ret = nvmet_try_send_r2t(cmd, last_in_batch);
709 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
710 ret = nvmet_try_send_response(cmd, last_in_batch);
722 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
723 int budget, int *sends)
727 for (i = 0; i < budget; i++) {
728 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
729 if (unlikely(ret < 0)) {
730 nvmet_tcp_socket_error(queue, ret);
732 } else if (ret == 0) {
741 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
744 queue->left = sizeof(struct nvme_tcp_hdr);
746 queue->rcv_state = NVMET_TCP_RECV_PDU;
749 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
751 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
753 ahash_request_free(queue->rcv_hash);
754 ahash_request_free(queue->snd_hash);
755 crypto_free_ahash(tfm);
758 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
760 struct crypto_ahash *tfm;
762 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
766 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
767 if (!queue->snd_hash)
769 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
771 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
772 if (!queue->rcv_hash)
774 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
778 ahash_request_free(queue->snd_hash);
780 crypto_free_ahash(tfm);
785 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
787 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
788 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
789 struct msghdr msg = {};
793 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
794 pr_err("bad nvme-tcp pdu length (%d)\n",
795 le32_to_cpu(icreq->hdr.plen));
796 nvmet_tcp_fatal_error(queue);
799 if (icreq->pfv != NVME_TCP_PFV_1_0) {
800 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
804 if (icreq->hpda != 0) {
805 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
810 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
811 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
812 if (queue->hdr_digest || queue->data_digest) {
813 ret = nvmet_tcp_alloc_crypto(queue);
818 memset(icresp, 0, sizeof(*icresp));
819 icresp->hdr.type = nvme_tcp_icresp;
820 icresp->hdr.hlen = sizeof(*icresp);
822 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
823 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
824 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
826 if (queue->hdr_digest)
827 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
828 if (queue->data_digest)
829 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
831 iov.iov_base = icresp;
832 iov.iov_len = sizeof(*icresp);
833 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
837 queue->state = NVMET_TCP_Q_LIVE;
838 nvmet_prepare_receive_pdu(queue);
841 if (queue->hdr_digest || queue->data_digest)
842 nvmet_tcp_free_crypto(queue);
846 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
847 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
849 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
852 if (!nvme_is_write(cmd->req.cmd) ||
853 data_len > cmd->req.port->inline_data_size) {
854 nvmet_prepare_receive_pdu(queue);
858 ret = nvmet_tcp_map_data(cmd);
860 pr_err("queue %d: failed to map data\n", queue->idx);
861 nvmet_tcp_fatal_error(queue);
865 queue->rcv_state = NVMET_TCP_RECV_DATA;
866 nvmet_tcp_map_pdu_iovec(cmd);
867 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
870 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
872 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
873 struct nvmet_tcp_cmd *cmd;
875 cmd = &queue->cmds[data->ttag];
877 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
878 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
879 data->ttag, le32_to_cpu(data->data_offset),
881 /* FIXME: use path and transport errors */
882 nvmet_req_complete(&cmd->req,
883 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
887 cmd->pdu_len = le32_to_cpu(data->data_length);
889 nvmet_tcp_map_pdu_iovec(cmd);
891 queue->rcv_state = NVMET_TCP_RECV_DATA;
896 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
898 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
899 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
900 struct nvmet_req *req;
903 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
904 if (hdr->type != nvme_tcp_icreq) {
905 pr_err("unexpected pdu type (%d) before icreq\n",
907 nvmet_tcp_fatal_error(queue);
910 return nvmet_tcp_handle_icreq(queue);
913 if (hdr->type == nvme_tcp_h2c_data) {
914 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
920 queue->cmd = nvmet_tcp_get_cmd(queue);
921 if (unlikely(!queue->cmd)) {
922 /* This should never happen */
923 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
924 queue->idx, queue->nr_cmds, queue->send_list_len,
925 nvme_cmd->common.opcode);
926 nvmet_tcp_fatal_error(queue);
930 req = &queue->cmd->req;
931 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
933 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
934 &queue->nvme_sq, &nvmet_tcp_ops))) {
935 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
936 req->cmd, req->cmd->common.command_id,
937 req->cmd->common.opcode,
938 le32_to_cpu(req->cmd->common.dptr.sgl.length));
940 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
944 ret = nvmet_tcp_map_data(queue->cmd);
946 pr_err("queue %d: failed to map data\n", queue->idx);
947 if (nvmet_tcp_has_inline_data(queue->cmd))
948 nvmet_tcp_fatal_error(queue);
950 nvmet_req_complete(req, ret);
955 if (nvmet_tcp_need_data_in(queue->cmd)) {
956 if (nvmet_tcp_has_inline_data(queue->cmd)) {
957 queue->rcv_state = NVMET_TCP_RECV_DATA;
958 nvmet_tcp_map_pdu_iovec(queue->cmd);
962 nvmet_tcp_queue_response(&queue->cmd->req);
966 queue->cmd->req.execute(&queue->cmd->req);
968 nvmet_prepare_receive_pdu(queue);
972 static const u8 nvme_tcp_pdu_sizes[] = {
973 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
974 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
975 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
978 static inline u8 nvmet_tcp_pdu_size(u8 type)
982 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
983 nvme_tcp_pdu_sizes[idx]) ?
984 nvme_tcp_pdu_sizes[idx] : 0;
987 static inline bool nvmet_tcp_pdu_valid(u8 type)
992 case nvme_tcp_h2c_data:
1000 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1002 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1005 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1008 iov.iov_base = (void *)&queue->pdu + queue->offset;
1009 iov.iov_len = queue->left;
1010 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1011 iov.iov_len, msg.msg_flags);
1012 if (unlikely(len < 0))
1015 queue->offset += len;
1020 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1021 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1023 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1024 pr_err("unexpected pdu type %d\n", hdr->type);
1025 nvmet_tcp_fatal_error(queue);
1029 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1030 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1034 queue->left = hdr->hlen - queue->offset + hdgst;
1038 if (queue->hdr_digest &&
1039 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1040 nvmet_tcp_fatal_error(queue); /* fatal */
1044 if (queue->data_digest &&
1045 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1046 nvmet_tcp_fatal_error(queue); /* fatal */
1050 return nvmet_tcp_done_recv_pdu(queue);
1053 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1055 struct nvmet_tcp_queue *queue = cmd->queue;
1057 nvmet_tcp_ddgst(queue->rcv_hash, cmd);
1059 queue->left = NVME_TCP_DIGEST_LENGTH;
1060 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1063 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1065 struct nvmet_tcp_cmd *cmd = queue->cmd;
1068 while (msg_data_left(&cmd->recv_msg)) {
1069 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1070 cmd->recv_msg.msg_flags);
1074 cmd->pdu_recv += ret;
1075 cmd->rbytes_done += ret;
1078 nvmet_tcp_unmap_pdu_iovec(cmd);
1080 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1081 cmd->rbytes_done == cmd->req.transfer_len) {
1082 if (queue->data_digest) {
1083 nvmet_tcp_prep_recv_ddgst(cmd);
1086 cmd->req.execute(&cmd->req);
1089 nvmet_prepare_receive_pdu(queue);
1093 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1095 struct nvmet_tcp_cmd *cmd = queue->cmd;
1097 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1099 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1100 .iov_len = queue->left
1103 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1104 iov.iov_len, msg.msg_flags);
1105 if (unlikely(ret < 0))
1108 queue->offset += ret;
1113 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1114 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1115 queue->idx, cmd->req.cmd->common.command_id,
1116 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1117 le32_to_cpu(cmd->exp_ddgst));
1118 nvmet_tcp_finish_cmd(cmd);
1119 nvmet_tcp_fatal_error(queue);
1124 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1125 cmd->rbytes_done == cmd->req.transfer_len)
1126 cmd->req.execute(&cmd->req);
1129 nvmet_prepare_receive_pdu(queue);
1133 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1137 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1140 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1141 result = nvmet_tcp_try_recv_pdu(queue);
1146 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1147 result = nvmet_tcp_try_recv_data(queue);
1152 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1153 result = nvmet_tcp_try_recv_ddgst(queue);
1160 if (result == -EAGAIN)
1167 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1168 int budget, int *recvs)
1172 for (i = 0; i < budget; i++) {
1173 ret = nvmet_tcp_try_recv_one(queue);
1174 if (unlikely(ret < 0)) {
1175 nvmet_tcp_socket_error(queue, ret);
1177 } else if (ret == 0) {
1186 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1188 spin_lock(&queue->state_lock);
1189 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1190 queue->state = NVMET_TCP_Q_DISCONNECTING;
1191 schedule_work(&queue->release_work);
1193 spin_unlock(&queue->state_lock);
1196 static void nvmet_tcp_io_work(struct work_struct *w)
1198 struct nvmet_tcp_queue *queue =
1199 container_of(w, struct nvmet_tcp_queue, io_work);
1206 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1212 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1218 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1221 * We exahusted our budget, requeue our selves
1224 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1227 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1228 struct nvmet_tcp_cmd *c)
1230 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1233 c->req.port = queue->port->nport;
1235 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1236 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1239 c->req.cmd = &c->cmd_pdu->cmd;
1241 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1242 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1245 c->req.cqe = &c->rsp_pdu->cqe;
1247 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1248 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1252 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1253 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1257 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1259 list_add_tail(&c->entry, &queue->free_list);
1263 page_frag_free(c->data_pdu);
1265 page_frag_free(c->rsp_pdu);
1267 page_frag_free(c->cmd_pdu);
1271 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1273 page_frag_free(c->r2t_pdu);
1274 page_frag_free(c->data_pdu);
1275 page_frag_free(c->rsp_pdu);
1276 page_frag_free(c->cmd_pdu);
1279 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1281 struct nvmet_tcp_cmd *cmds;
1282 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1284 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1288 for (i = 0; i < nr_cmds; i++) {
1289 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1299 nvmet_tcp_free_cmd(cmds + i);
1305 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1307 struct nvmet_tcp_cmd *cmds = queue->cmds;
1310 for (i = 0; i < queue->nr_cmds; i++)
1311 nvmet_tcp_free_cmd(cmds + i);
1313 nvmet_tcp_free_cmd(&queue->connect);
1317 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1319 struct socket *sock = queue->sock;
1321 write_lock_bh(&sock->sk->sk_callback_lock);
1322 sock->sk->sk_data_ready = queue->data_ready;
1323 sock->sk->sk_state_change = queue->state_change;
1324 sock->sk->sk_write_space = queue->write_space;
1325 sock->sk->sk_user_data = NULL;
1326 write_unlock_bh(&sock->sk->sk_callback_lock);
1329 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1331 nvmet_req_uninit(&cmd->req);
1332 nvmet_tcp_unmap_pdu_iovec(cmd);
1334 sgl_free(cmd->req.sg);
1337 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1339 struct nvmet_tcp_cmd *cmd = queue->cmds;
1342 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1343 if (nvmet_tcp_need_data_in(cmd))
1344 nvmet_tcp_finish_cmd(cmd);
1347 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1348 /* failed in connect */
1349 nvmet_tcp_finish_cmd(&queue->connect);
1353 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1355 struct nvmet_tcp_queue *queue =
1356 container_of(w, struct nvmet_tcp_queue, release_work);
1358 mutex_lock(&nvmet_tcp_queue_mutex);
1359 list_del_init(&queue->queue_list);
1360 mutex_unlock(&nvmet_tcp_queue_mutex);
1362 nvmet_tcp_restore_socket_callbacks(queue);
1363 flush_work(&queue->io_work);
1365 nvmet_tcp_uninit_data_in_cmds(queue);
1366 nvmet_sq_destroy(&queue->nvme_sq);
1367 cancel_work_sync(&queue->io_work);
1368 sock_release(queue->sock);
1369 nvmet_tcp_free_cmds(queue);
1370 if (queue->hdr_digest || queue->data_digest)
1371 nvmet_tcp_free_crypto(queue);
1372 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1377 static void nvmet_tcp_data_ready(struct sock *sk)
1379 struct nvmet_tcp_queue *queue;
1381 read_lock_bh(&sk->sk_callback_lock);
1382 queue = sk->sk_user_data;
1384 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1385 read_unlock_bh(&sk->sk_callback_lock);
1388 static void nvmet_tcp_write_space(struct sock *sk)
1390 struct nvmet_tcp_queue *queue;
1392 read_lock_bh(&sk->sk_callback_lock);
1393 queue = sk->sk_user_data;
1394 if (unlikely(!queue))
1397 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1398 queue->write_space(sk);
1402 if (sk_stream_is_writeable(sk)) {
1403 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1404 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1407 read_unlock_bh(&sk->sk_callback_lock);
1410 static void nvmet_tcp_state_change(struct sock *sk)
1412 struct nvmet_tcp_queue *queue;
1414 write_lock_bh(&sk->sk_callback_lock);
1415 queue = sk->sk_user_data;
1419 switch (sk->sk_state) {
1421 case TCP_CLOSE_WAIT:
1424 sk->sk_user_data = NULL;
1425 nvmet_tcp_schedule_release_queue(queue);
1428 pr_warn("queue %d unhandled state %d\n",
1429 queue->idx, sk->sk_state);
1432 write_unlock_bh(&sk->sk_callback_lock);
1435 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1437 struct socket *sock = queue->sock;
1438 struct inet_sock *inet = inet_sk(sock->sk);
1441 ret = kernel_getsockname(sock,
1442 (struct sockaddr *)&queue->sockaddr);
1446 ret = kernel_getpeername(sock,
1447 (struct sockaddr *)&queue->sockaddr_peer);
1452 * Cleanup whatever is sitting in the TCP transmit queue on socket
1453 * close. This is done to prevent stale data from being sent should
1454 * the network connection be restored before TCP times out.
1456 sock_no_linger(sock->sk);
1458 if (so_priority > 0)
1459 sock_set_priority(sock->sk, so_priority);
1461 /* Set socket type of service */
1462 if (inet->rcv_tos > 0)
1463 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1465 write_lock_bh(&sock->sk->sk_callback_lock);
1466 sock->sk->sk_user_data = queue;
1467 queue->data_ready = sock->sk->sk_data_ready;
1468 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1469 queue->state_change = sock->sk->sk_state_change;
1470 sock->sk->sk_state_change = nvmet_tcp_state_change;
1471 queue->write_space = sock->sk->sk_write_space;
1472 sock->sk->sk_write_space = nvmet_tcp_write_space;
1473 write_unlock_bh(&sock->sk->sk_callback_lock);
1478 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1479 struct socket *newsock)
1481 struct nvmet_tcp_queue *queue;
1484 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1488 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1489 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1490 queue->sock = newsock;
1493 spin_lock_init(&queue->state_lock);
1494 queue->state = NVMET_TCP_Q_CONNECTING;
1495 INIT_LIST_HEAD(&queue->free_list);
1496 init_llist_head(&queue->resp_list);
1497 INIT_LIST_HEAD(&queue->resp_send_list);
1499 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1500 if (queue->idx < 0) {
1502 goto out_free_queue;
1505 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1507 goto out_ida_remove;
1509 ret = nvmet_sq_init(&queue->nvme_sq);
1511 goto out_free_connect;
1513 port->last_cpu = cpumask_next_wrap(port->last_cpu,
1514 cpu_online_mask, -1, false);
1515 queue->cpu = port->last_cpu;
1516 nvmet_prepare_receive_pdu(queue);
1518 mutex_lock(&nvmet_tcp_queue_mutex);
1519 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1520 mutex_unlock(&nvmet_tcp_queue_mutex);
1522 ret = nvmet_tcp_set_queue_sock(queue);
1524 goto out_destroy_sq;
1526 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1530 mutex_lock(&nvmet_tcp_queue_mutex);
1531 list_del_init(&queue->queue_list);
1532 mutex_unlock(&nvmet_tcp_queue_mutex);
1533 nvmet_sq_destroy(&queue->nvme_sq);
1535 nvmet_tcp_free_cmd(&queue->connect);
1537 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1543 static void nvmet_tcp_accept_work(struct work_struct *w)
1545 struct nvmet_tcp_port *port =
1546 container_of(w, struct nvmet_tcp_port, accept_work);
1547 struct socket *newsock;
1551 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1554 pr_warn("failed to accept err=%d\n", ret);
1557 ret = nvmet_tcp_alloc_queue(port, newsock);
1559 pr_err("failed to allocate queue\n");
1560 sock_release(newsock);
1565 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1567 struct nvmet_tcp_port *port;
1569 read_lock_bh(&sk->sk_callback_lock);
1570 port = sk->sk_user_data;
1574 if (sk->sk_state == TCP_LISTEN)
1575 schedule_work(&port->accept_work);
1577 read_unlock_bh(&sk->sk_callback_lock);
1580 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1582 struct nvmet_tcp_port *port;
1583 __kernel_sa_family_t af;
1586 port = kzalloc(sizeof(*port), GFP_KERNEL);
1590 switch (nport->disc_addr.adrfam) {
1591 case NVMF_ADDR_FAMILY_IP4:
1594 case NVMF_ADDR_FAMILY_IP6:
1598 pr_err("address family %d not supported\n",
1599 nport->disc_addr.adrfam);
1604 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1605 nport->disc_addr.trsvcid, &port->addr);
1607 pr_err("malformed ip/port passed: %s:%s\n",
1608 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1612 port->nport = nport;
1613 port->last_cpu = -1;
1614 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1615 if (port->nport->inline_data_size < 0)
1616 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1618 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1619 IPPROTO_TCP, &port->sock);
1621 pr_err("failed to create a socket\n");
1625 port->sock->sk->sk_user_data = port;
1626 port->data_ready = port->sock->sk->sk_data_ready;
1627 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1628 sock_set_reuseaddr(port->sock->sk);
1629 tcp_sock_set_nodelay(port->sock->sk);
1630 if (so_priority > 0)
1631 sock_set_priority(port->sock->sk, so_priority);
1633 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1634 sizeof(port->addr));
1636 pr_err("failed to bind port socket %d\n", ret);
1640 ret = kernel_listen(port->sock, 128);
1642 pr_err("failed to listen %d on port sock\n", ret);
1647 pr_info("enabling port %d (%pISpc)\n",
1648 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1653 sock_release(port->sock);
1659 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1661 struct nvmet_tcp_port *port = nport->priv;
1663 write_lock_bh(&port->sock->sk->sk_callback_lock);
1664 port->sock->sk->sk_data_ready = port->data_ready;
1665 port->sock->sk->sk_user_data = NULL;
1666 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1667 cancel_work_sync(&port->accept_work);
1669 sock_release(port->sock);
1673 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1675 struct nvmet_tcp_queue *queue;
1677 mutex_lock(&nvmet_tcp_queue_mutex);
1678 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1679 if (queue->nvme_sq.ctrl == ctrl)
1680 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1681 mutex_unlock(&nvmet_tcp_queue_mutex);
1684 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1686 struct nvmet_tcp_queue *queue =
1687 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1690 /* Let inflight controller teardown complete */
1691 flush_scheduled_work();
1694 queue->nr_cmds = sq->size * 2;
1695 if (nvmet_tcp_alloc_cmds(queue))
1696 return NVME_SC_INTERNAL;
1700 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1701 struct nvmet_port *nport, char *traddr)
1703 struct nvmet_tcp_port *port = nport->priv;
1705 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1706 struct nvmet_tcp_cmd *cmd =
1707 container_of(req, struct nvmet_tcp_cmd, req);
1708 struct nvmet_tcp_queue *queue = cmd->queue;
1710 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1712 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1716 static struct nvmet_fabrics_ops nvmet_tcp_ops = {
1717 .owner = THIS_MODULE,
1718 .type = NVMF_TRTYPE_TCP,
1720 .has_keyed_sgls = 0,
1721 .add_port = nvmet_tcp_add_port,
1722 .remove_port = nvmet_tcp_remove_port,
1723 .queue_response = nvmet_tcp_queue_response,
1724 .delete_ctrl = nvmet_tcp_delete_ctrl,
1725 .install_queue = nvmet_tcp_install_queue,
1726 .disc_traddr = nvmet_tcp_disc_port_addr,
1729 static int __init nvmet_tcp_init(void)
1733 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1737 ret = nvmet_register_transport(&nvmet_tcp_ops);
1743 destroy_workqueue(nvmet_tcp_wq);
1747 static void __exit nvmet_tcp_exit(void)
1749 struct nvmet_tcp_queue *queue;
1751 nvmet_unregister_transport(&nvmet_tcp_ops);
1753 flush_scheduled_work();
1754 mutex_lock(&nvmet_tcp_queue_mutex);
1755 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1756 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1757 mutex_unlock(&nvmet_tcp_queue_mutex);
1758 flush_scheduled_work();
1760 destroy_workqueue(nvmet_tcp_wq);
1763 module_init(nvmet_tcp_init);
1764 module_exit(nvmet_tcp_exit);
1766 MODULE_LICENSE("GPL v2");
1767 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */