1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
22 /* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
28 static int so_priority;
29 module_param(so_priority, int, 0644);
30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
32 /* Define a time period (in usecs) that io_work() shall sample an activated
33 * queue before determining it to be idle. This optional module behavior
34 * can enable NIC solutions that support socket optimized packet processing
35 * using advanced interrupt moderation techniques.
37 static int idle_poll_period_usecs;
38 module_param(idle_poll_period_usecs, int, 0644);
39 MODULE_PARM_DESC(idle_poll_period_usecs,
40 "nvmet tcp io_work poll till idle time period in usecs");
42 #define NVMET_TCP_RECV_BUDGET 8
43 #define NVMET_TCP_SEND_BUDGET 8
44 #define NVMET_TCP_IO_WORK_BUDGET 64
46 enum nvmet_tcp_send_state {
47 NVMET_TCP_SEND_DATA_PDU,
51 NVMET_TCP_SEND_RESPONSE
54 enum nvmet_tcp_recv_state {
62 NVMET_TCP_F_INIT_FAILED = (1 << 0),
65 struct nvmet_tcp_cmd {
66 struct nvmet_tcp_queue *queue;
69 struct nvme_tcp_cmd_pdu *cmd_pdu;
70 struct nvme_tcp_rsp_pdu *rsp_pdu;
71 struct nvme_tcp_data_pdu *data_pdu;
72 struct nvme_tcp_r2t_pdu *r2t_pdu;
81 struct msghdr recv_msg;
85 struct list_head entry;
86 struct llist_node lentry;
90 struct scatterlist *cur_sg;
91 enum nvmet_tcp_send_state state;
97 enum nvmet_tcp_queue_state {
98 NVMET_TCP_Q_CONNECTING,
100 NVMET_TCP_Q_DISCONNECTING,
103 struct nvmet_tcp_queue {
105 struct nvmet_tcp_port *port;
106 struct work_struct io_work;
107 struct nvmet_cq nvme_cq;
108 struct nvmet_sq nvme_sq;
111 struct nvmet_tcp_cmd *cmds;
112 unsigned int nr_cmds;
113 struct list_head free_list;
114 struct llist_head resp_list;
115 struct list_head resp_send_list;
117 struct nvmet_tcp_cmd *snd_cmd;
122 enum nvmet_tcp_recv_state rcv_state;
123 struct nvmet_tcp_cmd *cmd;
124 union nvme_tcp_pdu pdu;
129 struct ahash_request *snd_hash;
130 struct ahash_request *rcv_hash;
132 unsigned long poll_end;
134 spinlock_t state_lock;
135 enum nvmet_tcp_queue_state state;
137 struct sockaddr_storage sockaddr;
138 struct sockaddr_storage sockaddr_peer;
139 struct work_struct release_work;
142 struct list_head queue_list;
144 struct nvmet_tcp_cmd connect;
146 struct page_frag_cache pf_cache;
148 void (*data_ready)(struct sock *);
149 void (*state_change)(struct sock *);
150 void (*write_space)(struct sock *);
153 struct nvmet_tcp_port {
155 struct work_struct accept_work;
156 struct nvmet_port *nport;
157 struct sockaddr_storage addr;
158 void (*data_ready)(struct sock *);
161 static DEFINE_IDA(nvmet_tcp_queue_ida);
162 static LIST_HEAD(nvmet_tcp_queue_list);
163 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
165 static struct workqueue_struct *nvmet_tcp_wq;
166 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
167 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
170 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
171 struct nvmet_tcp_cmd *cmd)
173 if (unlikely(!queue->nr_cmds)) {
174 /* We didn't allocate cmds yet, send 0xffff */
178 return cmd - queue->cmds;
181 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
183 return nvme_is_write(cmd->req.cmd) &&
184 cmd->rbytes_done < cmd->req.transfer_len;
187 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
189 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
192 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
194 return !nvme_is_write(cmd->req.cmd) &&
195 cmd->req.transfer_len > 0 &&
196 !cmd->req.cqe->status;
199 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
201 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
205 static inline struct nvmet_tcp_cmd *
206 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
208 struct nvmet_tcp_cmd *cmd;
210 cmd = list_first_entry_or_null(&queue->free_list,
211 struct nvmet_tcp_cmd, entry);
214 list_del_init(&cmd->entry);
216 cmd->rbytes_done = cmd->wbytes_done = 0;
224 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
226 if (unlikely(cmd == &cmd->queue->connect))
229 list_add_tail(&cmd->entry, &cmd->queue->free_list);
232 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
234 return queue->sock->sk->sk_incoming_cpu;
237 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
239 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
242 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
244 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
247 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
248 void *pdu, size_t len)
250 struct scatterlist sg;
252 sg_init_one(&sg, pdu, len);
253 ahash_request_set_crypt(hash, &sg, pdu + len, len);
254 crypto_ahash_digest(hash);
257 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
258 void *pdu, size_t len)
260 struct nvme_tcp_hdr *hdr = pdu;
264 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
265 pr_err("queue %d: header digest enabled but no header digest\n",
270 recv_digest = *(__le32 *)(pdu + hdr->hlen);
271 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
272 exp_digest = *(__le32 *)(pdu + hdr->hlen);
273 if (recv_digest != exp_digest) {
274 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
275 queue->idx, le32_to_cpu(recv_digest),
276 le32_to_cpu(exp_digest));
283 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
285 struct nvme_tcp_hdr *hdr = pdu;
286 u8 digest_len = nvmet_tcp_hdgst_len(queue);
289 len = le32_to_cpu(hdr->plen) - hdr->hlen -
290 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
292 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
293 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
300 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
302 struct scatterlist *sg;
305 sg = &cmd->req.sg[cmd->sg_idx];
307 for (i = 0; i < cmd->nr_mapped; i++)
308 kunmap(sg_page(&sg[i]));
311 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
313 struct kvec *iov = cmd->iov;
314 struct scatterlist *sg;
315 u32 length, offset, sg_offset;
317 length = cmd->pdu_len;
318 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
319 offset = cmd->rbytes_done;
320 cmd->sg_idx = offset / PAGE_SIZE;
321 sg_offset = offset % PAGE_SIZE;
322 sg = &cmd->req.sg[cmd->sg_idx];
325 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
327 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
328 iov->iov_len = iov_len;
336 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
337 cmd->nr_mapped, cmd->pdu_len);
340 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
342 queue->rcv_state = NVMET_TCP_RECV_ERR;
343 if (queue->nvme_sq.ctrl)
344 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
346 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
349 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
351 if (status == -EPIPE || status == -ECONNRESET)
352 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
354 nvmet_tcp_fatal_error(queue);
357 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
359 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
360 u32 len = le32_to_cpu(sgl->length);
365 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
366 NVME_SGL_FMT_OFFSET)) {
367 if (!nvme_is_write(cmd->req.cmd))
368 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
370 if (len > cmd->req.port->inline_data_size)
371 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
374 cmd->req.transfer_len += len;
376 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
378 return NVME_SC_INTERNAL;
379 cmd->cur_sg = cmd->req.sg;
381 if (nvmet_tcp_has_data_in(cmd)) {
382 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
383 sizeof(*cmd->iov), GFP_KERNEL);
390 sgl_free(cmd->req.sg);
391 return NVME_SC_INTERNAL;
394 static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
395 struct nvmet_tcp_cmd *cmd)
397 ahash_request_set_crypt(hash, cmd->req.sg,
398 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
399 crypto_ahash_digest(hash);
402 static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
403 struct nvmet_tcp_cmd *cmd)
405 struct scatterlist sg;
409 crypto_ahash_init(hash);
410 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
411 sg_init_one(&sg, iov->iov_base, iov->iov_len);
412 ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
413 crypto_ahash_update(hash);
415 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
416 crypto_ahash_final(hash);
419 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
421 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
422 struct nvmet_tcp_queue *queue = cmd->queue;
423 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
424 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
427 cmd->state = NVMET_TCP_SEND_DATA_PDU;
429 pdu->hdr.type = nvme_tcp_c2h_data;
430 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
431 NVME_TCP_F_DATA_SUCCESS : 0);
432 pdu->hdr.hlen = sizeof(*pdu);
433 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
435 cpu_to_le32(pdu->hdr.hlen + hdgst +
436 cmd->req.transfer_len + ddgst);
437 pdu->command_id = cmd->req.cqe->command_id;
438 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
439 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
441 if (queue->data_digest) {
442 pdu->hdr.flags |= NVME_TCP_F_DDGST;
443 nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
446 if (cmd->queue->hdr_digest) {
447 pdu->hdr.flags |= NVME_TCP_F_HDGST;
448 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
452 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
454 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
455 struct nvmet_tcp_queue *queue = cmd->queue;
456 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
459 cmd->state = NVMET_TCP_SEND_R2T;
461 pdu->hdr.type = nvme_tcp_r2t;
463 pdu->hdr.hlen = sizeof(*pdu);
465 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
467 pdu->command_id = cmd->req.cmd->common.command_id;
468 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
469 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
470 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
471 if (cmd->queue->hdr_digest) {
472 pdu->hdr.flags |= NVME_TCP_F_HDGST;
473 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
477 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
479 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
480 struct nvmet_tcp_queue *queue = cmd->queue;
481 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
484 cmd->state = NVMET_TCP_SEND_RESPONSE;
486 pdu->hdr.type = nvme_tcp_rsp;
488 pdu->hdr.hlen = sizeof(*pdu);
490 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
491 if (cmd->queue->hdr_digest) {
492 pdu->hdr.flags |= NVME_TCP_F_HDGST;
493 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
497 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
499 struct llist_node *node;
500 struct nvmet_tcp_cmd *cmd;
502 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
503 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
504 list_add(&cmd->entry, &queue->resp_send_list);
505 queue->send_list_len++;
509 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
511 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
512 struct nvmet_tcp_cmd, entry);
513 if (!queue->snd_cmd) {
514 nvmet_tcp_process_resp_list(queue);
516 list_first_entry_or_null(&queue->resp_send_list,
517 struct nvmet_tcp_cmd, entry);
518 if (unlikely(!queue->snd_cmd))
522 list_del_init(&queue->snd_cmd->entry);
523 queue->send_list_len--;
525 if (nvmet_tcp_need_data_out(queue->snd_cmd))
526 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
527 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
528 nvmet_setup_r2t_pdu(queue->snd_cmd);
530 nvmet_setup_response_pdu(queue->snd_cmd);
532 return queue->snd_cmd;
535 static void nvmet_tcp_queue_response(struct nvmet_req *req)
537 struct nvmet_tcp_cmd *cmd =
538 container_of(req, struct nvmet_tcp_cmd, req);
539 struct nvmet_tcp_queue *queue = cmd->queue;
540 struct nvme_sgl_desc *sgl;
543 if (unlikely(cmd == queue->cmd)) {
544 sgl = &cmd->req.cmd->common.dptr.sgl;
545 len = le32_to_cpu(sgl->length);
548 * Wait for inline data before processing the response.
549 * Avoid using helpers, this might happen before
550 * nvmet_req_init is completed.
552 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
553 len && len <= cmd->req.port->inline_data_size &&
554 nvme_is_write(cmd->req.cmd))
558 llist_add(&cmd->lentry, &queue->resp_list);
559 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
562 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
564 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
565 nvmet_tcp_queue_response(&cmd->req);
567 cmd->req.execute(&cmd->req);
570 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
572 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
573 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
576 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
577 offset_in_page(cmd->data_pdu) + cmd->offset,
578 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
588 cmd->state = NVMET_TCP_SEND_DATA;
593 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
595 struct nvmet_tcp_queue *queue = cmd->queue;
598 while (cmd->cur_sg) {
599 struct page *page = sg_page(cmd->cur_sg);
600 u32 left = cmd->cur_sg->length - cmd->offset;
601 int flags = MSG_DONTWAIT;
603 if ((!last_in_batch && cmd->queue->send_list_len) ||
604 cmd->wbytes_done + left < cmd->req.transfer_len ||
605 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
606 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
608 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
614 cmd->wbytes_done += ret;
617 if (cmd->offset == cmd->cur_sg->length) {
618 cmd->cur_sg = sg_next(cmd->cur_sg);
623 if (queue->data_digest) {
624 cmd->state = NVMET_TCP_SEND_DDGST;
627 if (queue->nvme_sq.sqhd_disabled) {
628 cmd->queue->snd_cmd = NULL;
629 nvmet_tcp_put_cmd(cmd);
631 nvmet_setup_response_pdu(cmd);
635 if (queue->nvme_sq.sqhd_disabled) {
637 sgl_free(cmd->req.sg);
644 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
647 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
648 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
649 int flags = MSG_DONTWAIT;
652 if (!last_in_batch && cmd->queue->send_list_len)
653 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
657 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
658 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
668 sgl_free(cmd->req.sg);
669 cmd->queue->snd_cmd = NULL;
670 nvmet_tcp_put_cmd(cmd);
674 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
676 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
677 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
678 int flags = MSG_DONTWAIT;
681 if (!last_in_batch && cmd->queue->send_list_len)
682 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
686 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
687 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
696 cmd->queue->snd_cmd = NULL;
700 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
702 struct nvmet_tcp_queue *queue = cmd->queue;
703 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
705 .iov_base = &cmd->exp_ddgst + cmd->offset,
706 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
710 if (!last_in_batch && cmd->queue->send_list_len)
711 msg.msg_flags |= MSG_MORE;
713 msg.msg_flags |= MSG_EOR;
715 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
716 if (unlikely(ret <= 0))
721 if (queue->nvme_sq.sqhd_disabled) {
722 cmd->queue->snd_cmd = NULL;
723 nvmet_tcp_put_cmd(cmd);
725 nvmet_setup_response_pdu(cmd);
730 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
733 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
736 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
737 cmd = nvmet_tcp_fetch_cmd(queue);
742 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
743 ret = nvmet_try_send_data_pdu(cmd);
748 if (cmd->state == NVMET_TCP_SEND_DATA) {
749 ret = nvmet_try_send_data(cmd, last_in_batch);
754 if (cmd->state == NVMET_TCP_SEND_DDGST) {
755 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
760 if (cmd->state == NVMET_TCP_SEND_R2T) {
761 ret = nvmet_try_send_r2t(cmd, last_in_batch);
766 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
767 ret = nvmet_try_send_response(cmd, last_in_batch);
779 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
780 int budget, int *sends)
784 for (i = 0; i < budget; i++) {
785 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
786 if (unlikely(ret < 0)) {
787 nvmet_tcp_socket_error(queue, ret);
789 } else if (ret == 0) {
798 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
801 queue->left = sizeof(struct nvme_tcp_hdr);
803 queue->rcv_state = NVMET_TCP_RECV_PDU;
806 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
808 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
810 ahash_request_free(queue->rcv_hash);
811 ahash_request_free(queue->snd_hash);
812 crypto_free_ahash(tfm);
815 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
817 struct crypto_ahash *tfm;
819 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
823 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
824 if (!queue->snd_hash)
826 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
828 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
829 if (!queue->rcv_hash)
831 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
835 ahash_request_free(queue->snd_hash);
837 crypto_free_ahash(tfm);
842 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
844 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
845 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
846 struct msghdr msg = {};
850 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
851 pr_err("bad nvme-tcp pdu length (%d)\n",
852 le32_to_cpu(icreq->hdr.plen));
853 nvmet_tcp_fatal_error(queue);
856 if (icreq->pfv != NVME_TCP_PFV_1_0) {
857 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
861 if (icreq->hpda != 0) {
862 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
867 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
868 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
869 if (queue->hdr_digest || queue->data_digest) {
870 ret = nvmet_tcp_alloc_crypto(queue);
875 memset(icresp, 0, sizeof(*icresp));
876 icresp->hdr.type = nvme_tcp_icresp;
877 icresp->hdr.hlen = sizeof(*icresp);
879 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
880 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
881 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
883 if (queue->hdr_digest)
884 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
885 if (queue->data_digest)
886 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
888 iov.iov_base = icresp;
889 iov.iov_len = sizeof(*icresp);
890 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
894 queue->state = NVMET_TCP_Q_LIVE;
895 nvmet_prepare_receive_pdu(queue);
898 if (queue->hdr_digest || queue->data_digest)
899 nvmet_tcp_free_crypto(queue);
903 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
904 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
906 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
909 if (!nvme_is_write(cmd->req.cmd) ||
910 data_len > cmd->req.port->inline_data_size) {
911 nvmet_prepare_receive_pdu(queue);
915 ret = nvmet_tcp_map_data(cmd);
917 pr_err("queue %d: failed to map data\n", queue->idx);
918 nvmet_tcp_fatal_error(queue);
922 queue->rcv_state = NVMET_TCP_RECV_DATA;
923 nvmet_tcp_map_pdu_iovec(cmd);
924 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
927 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
929 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
930 struct nvmet_tcp_cmd *cmd;
932 if (likely(queue->nr_cmds))
933 cmd = &queue->cmds[data->ttag];
935 cmd = &queue->connect;
937 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
938 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
939 data->ttag, le32_to_cpu(data->data_offset),
941 /* FIXME: use path and transport errors */
942 nvmet_req_complete(&cmd->req,
943 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
947 cmd->pdu_len = le32_to_cpu(data->data_length);
949 nvmet_tcp_map_pdu_iovec(cmd);
951 queue->rcv_state = NVMET_TCP_RECV_DATA;
956 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
958 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
959 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
960 struct nvmet_req *req;
963 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
964 if (hdr->type != nvme_tcp_icreq) {
965 pr_err("unexpected pdu type (%d) before icreq\n",
967 nvmet_tcp_fatal_error(queue);
970 return nvmet_tcp_handle_icreq(queue);
973 if (hdr->type == nvme_tcp_h2c_data) {
974 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
980 queue->cmd = nvmet_tcp_get_cmd(queue);
981 if (unlikely(!queue->cmd)) {
982 /* This should never happen */
983 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
984 queue->idx, queue->nr_cmds, queue->send_list_len,
985 nvme_cmd->common.opcode);
986 nvmet_tcp_fatal_error(queue);
990 req = &queue->cmd->req;
991 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
993 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
994 &queue->nvme_sq, &nvmet_tcp_ops))) {
995 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
996 req->cmd, req->cmd->common.command_id,
997 req->cmd->common.opcode,
998 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1000 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1004 ret = nvmet_tcp_map_data(queue->cmd);
1005 if (unlikely(ret)) {
1006 pr_err("queue %d: failed to map data\n", queue->idx);
1007 if (nvmet_tcp_has_inline_data(queue->cmd))
1008 nvmet_tcp_fatal_error(queue);
1010 nvmet_req_complete(req, ret);
1015 if (nvmet_tcp_need_data_in(queue->cmd)) {
1016 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1017 queue->rcv_state = NVMET_TCP_RECV_DATA;
1018 nvmet_tcp_map_pdu_iovec(queue->cmd);
1022 nvmet_tcp_queue_response(&queue->cmd->req);
1026 queue->cmd->req.execute(&queue->cmd->req);
1028 nvmet_prepare_receive_pdu(queue);
1032 static const u8 nvme_tcp_pdu_sizes[] = {
1033 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1034 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1035 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1038 static inline u8 nvmet_tcp_pdu_size(u8 type)
1042 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1043 nvme_tcp_pdu_sizes[idx]) ?
1044 nvme_tcp_pdu_sizes[idx] : 0;
1047 static inline bool nvmet_tcp_pdu_valid(u8 type)
1050 case nvme_tcp_icreq:
1052 case nvme_tcp_h2c_data:
1060 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1062 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1065 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1068 iov.iov_base = (void *)&queue->pdu + queue->offset;
1069 iov.iov_len = queue->left;
1070 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1071 iov.iov_len, msg.msg_flags);
1072 if (unlikely(len < 0))
1075 queue->offset += len;
1080 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1081 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1083 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1084 pr_err("unexpected pdu type %d\n", hdr->type);
1085 nvmet_tcp_fatal_error(queue);
1089 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1090 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1094 queue->left = hdr->hlen - queue->offset + hdgst;
1098 if (queue->hdr_digest &&
1099 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1100 nvmet_tcp_fatal_error(queue); /* fatal */
1104 if (queue->data_digest &&
1105 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1106 nvmet_tcp_fatal_error(queue); /* fatal */
1110 return nvmet_tcp_done_recv_pdu(queue);
1113 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1115 struct nvmet_tcp_queue *queue = cmd->queue;
1117 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
1119 queue->left = NVME_TCP_DIGEST_LENGTH;
1120 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1123 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1125 struct nvmet_tcp_cmd *cmd = queue->cmd;
1128 while (msg_data_left(&cmd->recv_msg)) {
1129 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1130 cmd->recv_msg.msg_flags);
1134 cmd->pdu_recv += ret;
1135 cmd->rbytes_done += ret;
1138 nvmet_tcp_unmap_pdu_iovec(cmd);
1139 if (queue->data_digest) {
1140 nvmet_tcp_prep_recv_ddgst(cmd);
1144 if (cmd->rbytes_done == cmd->req.transfer_len)
1145 nvmet_tcp_execute_request(cmd);
1147 nvmet_prepare_receive_pdu(queue);
1151 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1153 struct nvmet_tcp_cmd *cmd = queue->cmd;
1155 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1157 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1158 .iov_len = queue->left
1161 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1162 iov.iov_len, msg.msg_flags);
1163 if (unlikely(ret < 0))
1166 queue->offset += ret;
1171 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1172 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1173 queue->idx, cmd->req.cmd->common.command_id,
1174 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1175 le32_to_cpu(cmd->exp_ddgst));
1176 nvmet_tcp_finish_cmd(cmd);
1177 nvmet_tcp_fatal_error(queue);
1182 if (cmd->rbytes_done == cmd->req.transfer_len)
1183 nvmet_tcp_execute_request(cmd);
1187 nvmet_prepare_receive_pdu(queue);
1191 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1195 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1198 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1199 result = nvmet_tcp_try_recv_pdu(queue);
1204 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1205 result = nvmet_tcp_try_recv_data(queue);
1210 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1211 result = nvmet_tcp_try_recv_ddgst(queue);
1218 if (result == -EAGAIN)
1225 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1226 int budget, int *recvs)
1230 for (i = 0; i < budget; i++) {
1231 ret = nvmet_tcp_try_recv_one(queue);
1232 if (unlikely(ret < 0)) {
1233 nvmet_tcp_socket_error(queue, ret);
1235 } else if (ret == 0) {
1244 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1246 spin_lock(&queue->state_lock);
1247 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1248 queue->state = NVMET_TCP_Q_DISCONNECTING;
1249 schedule_work(&queue->release_work);
1251 spin_unlock(&queue->state_lock);
1254 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1256 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1259 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1262 if (!idle_poll_period_usecs)
1266 nvmet_tcp_arm_queue_deadline(queue);
1268 return !time_after(jiffies, queue->poll_end);
1271 static void nvmet_tcp_io_work(struct work_struct *w)
1273 struct nvmet_tcp_queue *queue =
1274 container_of(w, struct nvmet_tcp_queue, io_work);
1281 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1287 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1293 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1296 * Requeue the worker if idle deadline period is in progress or any
1297 * ops activity was recorded during the do-while loop above.
1299 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1300 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1303 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1304 struct nvmet_tcp_cmd *c)
1306 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1309 c->req.port = queue->port->nport;
1311 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1312 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1315 c->req.cmd = &c->cmd_pdu->cmd;
1317 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1318 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1321 c->req.cqe = &c->rsp_pdu->cqe;
1323 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1324 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1328 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1329 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1333 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1335 list_add_tail(&c->entry, &queue->free_list);
1339 page_frag_free(c->data_pdu);
1341 page_frag_free(c->rsp_pdu);
1343 page_frag_free(c->cmd_pdu);
1347 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1349 page_frag_free(c->r2t_pdu);
1350 page_frag_free(c->data_pdu);
1351 page_frag_free(c->rsp_pdu);
1352 page_frag_free(c->cmd_pdu);
1355 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1357 struct nvmet_tcp_cmd *cmds;
1358 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1360 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1364 for (i = 0; i < nr_cmds; i++) {
1365 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1375 nvmet_tcp_free_cmd(cmds + i);
1381 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1383 struct nvmet_tcp_cmd *cmds = queue->cmds;
1386 for (i = 0; i < queue->nr_cmds; i++)
1387 nvmet_tcp_free_cmd(cmds + i);
1389 nvmet_tcp_free_cmd(&queue->connect);
1393 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1395 struct socket *sock = queue->sock;
1397 write_lock_bh(&sock->sk->sk_callback_lock);
1398 sock->sk->sk_data_ready = queue->data_ready;
1399 sock->sk->sk_state_change = queue->state_change;
1400 sock->sk->sk_write_space = queue->write_space;
1401 sock->sk->sk_user_data = NULL;
1402 write_unlock_bh(&sock->sk->sk_callback_lock);
1405 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1407 nvmet_req_uninit(&cmd->req);
1408 nvmet_tcp_unmap_pdu_iovec(cmd);
1410 sgl_free(cmd->req.sg);
1413 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1415 struct nvmet_tcp_cmd *cmd = queue->cmds;
1418 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1419 if (nvmet_tcp_need_data_in(cmd))
1420 nvmet_tcp_finish_cmd(cmd);
1423 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1424 /* failed in connect */
1425 nvmet_tcp_finish_cmd(&queue->connect);
1429 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1431 struct nvmet_tcp_queue *queue =
1432 container_of(w, struct nvmet_tcp_queue, release_work);
1434 mutex_lock(&nvmet_tcp_queue_mutex);
1435 list_del_init(&queue->queue_list);
1436 mutex_unlock(&nvmet_tcp_queue_mutex);
1438 nvmet_tcp_restore_socket_callbacks(queue);
1439 flush_work(&queue->io_work);
1441 nvmet_tcp_uninit_data_in_cmds(queue);
1442 nvmet_sq_destroy(&queue->nvme_sq);
1443 cancel_work_sync(&queue->io_work);
1444 sock_release(queue->sock);
1445 nvmet_tcp_free_cmds(queue);
1446 if (queue->hdr_digest || queue->data_digest)
1447 nvmet_tcp_free_crypto(queue);
1448 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1453 static void nvmet_tcp_data_ready(struct sock *sk)
1455 struct nvmet_tcp_queue *queue;
1457 read_lock_bh(&sk->sk_callback_lock);
1458 queue = sk->sk_user_data;
1460 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1461 read_unlock_bh(&sk->sk_callback_lock);
1464 static void nvmet_tcp_write_space(struct sock *sk)
1466 struct nvmet_tcp_queue *queue;
1468 read_lock_bh(&sk->sk_callback_lock);
1469 queue = sk->sk_user_data;
1470 if (unlikely(!queue))
1473 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1474 queue->write_space(sk);
1478 if (sk_stream_is_writeable(sk)) {
1479 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1480 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1483 read_unlock_bh(&sk->sk_callback_lock);
1486 static void nvmet_tcp_state_change(struct sock *sk)
1488 struct nvmet_tcp_queue *queue;
1490 read_lock_bh(&sk->sk_callback_lock);
1491 queue = sk->sk_user_data;
1495 switch (sk->sk_state) {
1497 case TCP_CLOSE_WAIT:
1500 nvmet_tcp_schedule_release_queue(queue);
1503 pr_warn("queue %d unhandled state %d\n",
1504 queue->idx, sk->sk_state);
1507 read_unlock_bh(&sk->sk_callback_lock);
1510 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1512 struct socket *sock = queue->sock;
1513 struct inet_sock *inet = inet_sk(sock->sk);
1516 ret = kernel_getsockname(sock,
1517 (struct sockaddr *)&queue->sockaddr);
1521 ret = kernel_getpeername(sock,
1522 (struct sockaddr *)&queue->sockaddr_peer);
1527 * Cleanup whatever is sitting in the TCP transmit queue on socket
1528 * close. This is done to prevent stale data from being sent should
1529 * the network connection be restored before TCP times out.
1531 sock_no_linger(sock->sk);
1533 if (so_priority > 0)
1534 sock_set_priority(sock->sk, so_priority);
1536 /* Set socket type of service */
1537 if (inet->rcv_tos > 0)
1538 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1541 write_lock_bh(&sock->sk->sk_callback_lock);
1542 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1544 * If the socket is already closing, don't even start
1549 sock->sk->sk_user_data = queue;
1550 queue->data_ready = sock->sk->sk_data_ready;
1551 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1552 queue->state_change = sock->sk->sk_state_change;
1553 sock->sk->sk_state_change = nvmet_tcp_state_change;
1554 queue->write_space = sock->sk->sk_write_space;
1555 sock->sk->sk_write_space = nvmet_tcp_write_space;
1556 if (idle_poll_period_usecs)
1557 nvmet_tcp_arm_queue_deadline(queue);
1558 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1560 write_unlock_bh(&sock->sk->sk_callback_lock);
1565 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1566 struct socket *newsock)
1568 struct nvmet_tcp_queue *queue;
1571 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1575 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1576 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1577 queue->sock = newsock;
1580 spin_lock_init(&queue->state_lock);
1581 queue->state = NVMET_TCP_Q_CONNECTING;
1582 INIT_LIST_HEAD(&queue->free_list);
1583 init_llist_head(&queue->resp_list);
1584 INIT_LIST_HEAD(&queue->resp_send_list);
1586 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1587 if (queue->idx < 0) {
1589 goto out_free_queue;
1592 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1594 goto out_ida_remove;
1596 ret = nvmet_sq_init(&queue->nvme_sq);
1598 goto out_free_connect;
1600 nvmet_prepare_receive_pdu(queue);
1602 mutex_lock(&nvmet_tcp_queue_mutex);
1603 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1604 mutex_unlock(&nvmet_tcp_queue_mutex);
1606 ret = nvmet_tcp_set_queue_sock(queue);
1608 goto out_destroy_sq;
1612 mutex_lock(&nvmet_tcp_queue_mutex);
1613 list_del_init(&queue->queue_list);
1614 mutex_unlock(&nvmet_tcp_queue_mutex);
1615 nvmet_sq_destroy(&queue->nvme_sq);
1617 nvmet_tcp_free_cmd(&queue->connect);
1619 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1625 static void nvmet_tcp_accept_work(struct work_struct *w)
1627 struct nvmet_tcp_port *port =
1628 container_of(w, struct nvmet_tcp_port, accept_work);
1629 struct socket *newsock;
1633 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1636 pr_warn("failed to accept err=%d\n", ret);
1639 ret = nvmet_tcp_alloc_queue(port, newsock);
1641 pr_err("failed to allocate queue\n");
1642 sock_release(newsock);
1647 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1649 struct nvmet_tcp_port *port;
1651 read_lock_bh(&sk->sk_callback_lock);
1652 port = sk->sk_user_data;
1656 if (sk->sk_state == TCP_LISTEN)
1657 schedule_work(&port->accept_work);
1659 read_unlock_bh(&sk->sk_callback_lock);
1662 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1664 struct nvmet_tcp_port *port;
1665 __kernel_sa_family_t af;
1668 port = kzalloc(sizeof(*port), GFP_KERNEL);
1672 switch (nport->disc_addr.adrfam) {
1673 case NVMF_ADDR_FAMILY_IP4:
1676 case NVMF_ADDR_FAMILY_IP6:
1680 pr_err("address family %d not supported\n",
1681 nport->disc_addr.adrfam);
1686 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1687 nport->disc_addr.trsvcid, &port->addr);
1689 pr_err("malformed ip/port passed: %s:%s\n",
1690 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1694 port->nport = nport;
1695 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1696 if (port->nport->inline_data_size < 0)
1697 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1699 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1700 IPPROTO_TCP, &port->sock);
1702 pr_err("failed to create a socket\n");
1706 port->sock->sk->sk_user_data = port;
1707 port->data_ready = port->sock->sk->sk_data_ready;
1708 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1709 sock_set_reuseaddr(port->sock->sk);
1710 tcp_sock_set_nodelay(port->sock->sk);
1711 if (so_priority > 0)
1712 sock_set_priority(port->sock->sk, so_priority);
1714 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1715 sizeof(port->addr));
1717 pr_err("failed to bind port socket %d\n", ret);
1721 ret = kernel_listen(port->sock, 128);
1723 pr_err("failed to listen %d on port sock\n", ret);
1728 pr_info("enabling port %d (%pISpc)\n",
1729 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1734 sock_release(port->sock);
1740 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1742 struct nvmet_tcp_port *port = nport->priv;
1744 write_lock_bh(&port->sock->sk->sk_callback_lock);
1745 port->sock->sk->sk_data_ready = port->data_ready;
1746 port->sock->sk->sk_user_data = NULL;
1747 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1748 cancel_work_sync(&port->accept_work);
1750 sock_release(port->sock);
1754 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1756 struct nvmet_tcp_queue *queue;
1758 mutex_lock(&nvmet_tcp_queue_mutex);
1759 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1760 if (queue->nvme_sq.ctrl == ctrl)
1761 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1762 mutex_unlock(&nvmet_tcp_queue_mutex);
1765 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1767 struct nvmet_tcp_queue *queue =
1768 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1771 /* Let inflight controller teardown complete */
1772 flush_scheduled_work();
1775 queue->nr_cmds = sq->size * 2;
1776 if (nvmet_tcp_alloc_cmds(queue))
1777 return NVME_SC_INTERNAL;
1781 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1782 struct nvmet_port *nport, char *traddr)
1784 struct nvmet_tcp_port *port = nport->priv;
1786 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1787 struct nvmet_tcp_cmd *cmd =
1788 container_of(req, struct nvmet_tcp_cmd, req);
1789 struct nvmet_tcp_queue *queue = cmd->queue;
1791 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1793 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1797 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1798 .owner = THIS_MODULE,
1799 .type = NVMF_TRTYPE_TCP,
1801 .add_port = nvmet_tcp_add_port,
1802 .remove_port = nvmet_tcp_remove_port,
1803 .queue_response = nvmet_tcp_queue_response,
1804 .delete_ctrl = nvmet_tcp_delete_ctrl,
1805 .install_queue = nvmet_tcp_install_queue,
1806 .disc_traddr = nvmet_tcp_disc_port_addr,
1809 static int __init nvmet_tcp_init(void)
1813 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1817 ret = nvmet_register_transport(&nvmet_tcp_ops);
1823 destroy_workqueue(nvmet_tcp_wq);
1827 static void __exit nvmet_tcp_exit(void)
1829 struct nvmet_tcp_queue *queue;
1831 nvmet_unregister_transport(&nvmet_tcp_ops);
1833 flush_scheduled_work();
1834 mutex_lock(&nvmet_tcp_queue_mutex);
1835 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1836 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1837 mutex_unlock(&nvmet_tcp_queue_mutex);
1838 flush_scheduled_work();
1840 destroy_workqueue(nvmet_tcp_wq);
1843 module_init(nvmet_tcp_init);
1844 module_exit(nvmet_tcp_exit);
1846 MODULE_LICENSE("GPL v2");
1847 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */