nvmet-tcp: support secure channel concatenation
authorHannes Reinecke <hare@kernel.org>
Mon, 24 Feb 2025 12:38:17 +0000 (13:38 +0100)
committerKeith Busch <kbusch@kernel.org>
Thu, 20 Mar 2025 23:53:54 +0000 (16:53 -0700)
Evaluate the SC_C flag during DH-CHAP-HMAC negotiation to check if secure
concatenation as specified in the NVMe Base Specification v2.1, section
8.3.4.3: "Secure Channel Concatenationand" is requested. If requested the
generated PSK is inserted into the keyring once negotiation has finished
allowing for an encrypted connection once the admin queue is restarted.

Signed-off-by: Hannes Reinecke <hare@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/target/auth.c
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd-auth.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/tcp.c

index d0392cc..0b0645a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/ctype.h>
 #include <linux/random.h>
 #include <linux/nvme-auth.h>
+#include <linux/nvme-keyring.h>
 #include <linux/unaligned.h>
 
 #include "nvmet.h"
@@ -165,6 +166,11 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
                goto out_unlock;
        }
 
+       if (nvmet_queue_tls_keyid(sq)) {
+               pr_debug("host %s tls enabled\n", ctrl->hostnqn);
+               goto out_unlock;
+       }
+
        ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
        if (ret < 0) {
                pr_warn("Failed to setup DH group");
@@ -233,6 +239,9 @@ out_unlock:
 void nvmet_auth_sq_free(struct nvmet_sq *sq)
 {
        cancel_delayed_work(&sq->auth_expired_work);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+       sq->tls_key = 0;
+#endif
        kfree(sq->dhchap_c1);
        sq->dhchap_c1 = NULL;
        kfree(sq->dhchap_c2);
@@ -261,6 +270,12 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
                nvme_auth_free_key(ctrl->ctrl_key);
                ctrl->ctrl_key = NULL;
        }
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+       if (ctrl->tls_key) {
+               key_put(ctrl->tls_key);
+               ctrl->tls_key = NULL;
+       }
+#endif
 }
 
 bool nvmet_check_auth_status(struct nvmet_req *req)
@@ -542,3 +557,58 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
 
        return ret;
 }
+
+void nvmet_auth_insert_psk(struct nvmet_sq *sq)
+{
+       int hash_len = nvme_auth_hmac_hash_len(sq->ctrl->shash_id);
+       u8 *psk, *digest, *tls_psk;
+       size_t psk_len;
+       int ret;
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+       struct key *tls_key = NULL;
+#endif
+
+       ret = nvme_auth_generate_psk(sq->ctrl->shash_id,
+                                    sq->dhchap_skey,
+                                    sq->dhchap_skey_len,
+                                    sq->dhchap_c1, sq->dhchap_c2,
+                                    hash_len, &psk, &psk_len);
+       if (ret) {
+               pr_warn("%s: ctrl %d qid %d failed to generate PSK, error %d\n",
+                       __func__, sq->ctrl->cntlid, sq->qid, ret);
+               return;
+       }
+       ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
+                                       sq->ctrl->subsysnqn,
+                                       sq->ctrl->hostnqn, &digest);
+       if (ret) {
+               pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
+                       __func__, sq->ctrl->cntlid, sq->qid, ret);
+               goto out_free_psk;
+       }
+       ret = nvme_auth_derive_tls_psk(sq->ctrl->shash_id, psk, psk_len,
+                                      digest, &tls_psk);
+       if (ret) {
+               pr_warn("%s: ctrl %d qid %d failed to derive TLS PSK, error %d\n",
+                       __func__, sq->ctrl->cntlid, sq->qid, ret);
+               goto out_free_digest;
+       }
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+       tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn,
+                                      sq->ctrl->shash_id, tls_psk, psk_len, digest);
+       if (IS_ERR(tls_key)) {
+               pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
+                       __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
+               tls_key = NULL;
+               kfree_sensitive(tls_psk);
+       }
+       if (sq->ctrl->tls_key)
+               key_put(sq->ctrl->tls_key);
+       sq->ctrl->tls_key = tls_key;
+#endif
+
+out_free_digest:
+       kfree_sensitive(digest);
+out_free_psk:
+       kfree_sensitive(psk);
+}
index 112df89..a058f47 100644 (file)
@@ -1664,11 +1664,12 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
 
        args->status = NVME_SC_SUCCESS;
 
-       pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+       pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s%s.\n",
                nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
                ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
                ctrl->pi_support ? " T10-PI is enabled" : "",
-               nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+               nvmet_has_auth(ctrl, args->sq) ? " with DH-HMAC-CHAP" : "",
+               nvmet_queue_tls_keyid(args->sq) ? ", TLS" : "");
 
        return ctrl;
 
index 43b684a..bf01ec4 100644 (file)
@@ -43,8 +43,26 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
                 data->auth_protocol[0].dhchap.halen,
                 data->auth_protocol[0].dhchap.dhlen);
        req->sq->dhchap_tid = le16_to_cpu(data->t_id);
-       if (data->sc_c)
-               return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+       if (data->sc_c != NVME_AUTH_SECP_NOSC) {
+               if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
+                       return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+               /* Secure concatenation can only be enabled on the admin queue */
+               if (req->sq->qid)
+                       return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+               switch (data->sc_c) {
+               case NVME_AUTH_SECP_NEWTLSPSK:
+                       if (nvmet_queue_tls_keyid(req->sq))
+                               return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+                       break;
+               case NVME_AUTH_SECP_REPLACETLSPSK:
+                       if (!nvmet_queue_tls_keyid(req->sq))
+                               return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+                       break;
+               default:
+                       return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+               }
+               ctrl->concat = true;
+       }
 
        if (data->napd != 1)
                return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
@@ -103,6 +121,12 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
                         nvme_auth_dhgroup_name(fallback_dhgid));
                ctrl->dh_gid = fallback_dhgid;
        }
+       if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
+               pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
+                        "for secure channel concatenation\n", __func__,
+                        ctrl->cntlid, req->sq->qid);
+               return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+       }
        pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
                 __func__, ctrl->cntlid, req->sq->qid,
                 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
@@ -148,12 +172,22 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
        if (memcmp(data->rval, response, data->hl)) {
                pr_info("ctrl %d qid %d host response mismatch\n",
                        ctrl->cntlid, req->sq->qid);
+               pr_debug("ctrl %d qid %d rval %*ph\n",
+                        ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+               pr_debug("ctrl %d qid %d response %*ph\n",
+                        ctrl->cntlid, req->sq->qid, data->hl, response);
                kfree(response);
                return NVME_AUTH_DHCHAP_FAILURE_FAILED;
        }
        kfree(response);
        pr_debug("%s: ctrl %d qid %d host authenticated\n",
                 __func__, ctrl->cntlid, req->sq->qid);
+       if (!data->cvalid && ctrl->concat) {
+               pr_debug("%s: ctrl %d qid %d invalid challenge\n",
+                        __func__, ctrl->cntlid, req->sq->qid);
+               return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+       }
+       req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
        if (data->cvalid) {
                req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
                                             GFP_KERNEL);
@@ -163,11 +197,23 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
                pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
                         __func__, ctrl->cntlid, req->sq->qid, data->hl,
                         req->sq->dhchap_c2);
-       } else {
+       }
+       /*
+        * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
+        * Sequence Number (SEQNUM): [ .. ]
+        * The value 0h is used to indicate that bidirectional authentication
+        * is not performed, but a challenge value C2 is carried in order to
+        * generate a pre-shared key (PSK) for subsequent establishment of a
+        * secure channel.
+        */
+       if (req->sq->dhchap_s2 == 0) {
+               if (ctrl->concat)
+                       nvmet_auth_insert_psk(req->sq);
                req->sq->authenticated = true;
+               kfree(req->sq->dhchap_c2);
                req->sq->dhchap_c2 = NULL;
-       }
-       req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+       } else if (!data->cvalid)
+               req->sq->authenticated = true;
 
        return 0;
 }
@@ -303,6 +349,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
                }
                goto done_kfree;
        case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+               if (ctrl->concat)
+                       nvmet_auth_insert_psk(req->sq);
                req->sq->authenticated = true;
                pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
                         __func__, ctrl->cntlid, req->sq->qid);
index 5e8a3e1..f012bdf 100644 (file)
@@ -236,8 +236,22 @@ err:
 
 static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
 {
-       bool needs_auth = nvmet_has_auth(ctrl);
+       bool needs_auth = nvmet_has_auth(ctrl, sq);
+       key_serial_t keyid = nvmet_queue_tls_keyid(sq);
 
+       /* Do not authenticate I/O queues for secure concatenation */
+       if (ctrl->concat && sq->qid)
+               needs_auth = false;
+
+       if (keyid)
+               pr_debug("%s: ctrl %d qid %d should %sauthenticate, tls psk %08x\n",
+                        __func__, ctrl->cntlid, sq->qid,
+                        needs_auth ? "" : "not ", keyid);
+       else
+               pr_debug("%s: ctrl %d qid %d should %sauthenticate%s\n",
+                        __func__, ctrl->cntlid, sq->qid,
+                        needs_auth ? "" : "not ",
+                        ctrl->concat ? ", secure concatenation" : "");
        return (u32)ctrl->cntlid |
                (needs_auth ? NVME_CONNECT_AUTHREQ_ATR : 0);
 }
index e0cfdae..9f6110a 100644 (file)
@@ -164,6 +164,9 @@ struct nvmet_sq {
        u32                     dhchap_s2;
        u8                      *dhchap_skey;
        int                     dhchap_skey_len;
+#endif
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+       struct key              *tls_key;
 #endif
        struct completion       free_done;
        struct completion       confirm_done;
@@ -289,6 +292,7 @@ struct nvmet_ctrl {
        u64                     err_counter;
        struct nvme_error_slot  slots[NVMET_ERROR_LOG_SLOTS];
        bool                    pi_support;
+       bool                    concat;
 #ifdef CONFIG_NVME_TARGET_AUTH
        struct nvme_dhchap_key  *host_key;
        struct nvme_dhchap_key  *ctrl_key;
@@ -297,6 +301,9 @@ struct nvmet_ctrl {
        u8                      dh_gid;
        u8                      *dh_key;
        size_t                  dh_keysize;
+#endif
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+       struct key              *tls_key;
 #endif
        struct nvmet_pr_log_mgr pr_log_mgr;
 };
@@ -853,6 +860,22 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
                bio_put(bio);
 }
 
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq)
+{
+       return sq->tls_key ? key_serial(sq->tls_key) : 0;
+}
+static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq)
+{
+       if (sq->tls_key) {
+               key_put(sq->tls_key);
+               sq->tls_key = NULL;
+       }
+}
+#else
+static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; }
+static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {}
+#endif
 #ifdef CONFIG_NVME_TARGET_AUTH
 u32 nvmet_auth_send_data_len(struct nvmet_req *req);
 void nvmet_execute_auth_send(struct nvmet_req *req);
@@ -871,14 +894,15 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
                         unsigned int hash_len);
 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
                         unsigned int hash_len);
-static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
 {
-       return ctrl->host_key != NULL;
+       return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq);
 }
 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
                                u8 *buf, int buf_size);
 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
                            u8 *buf, int buf_size);
+void nvmet_auth_insert_psk(struct nvmet_sq *sq);
 #else
 static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
                                  struct nvmet_sq *sq)
@@ -894,11 +918,13 @@ static inline bool nvmet_check_auth_status(struct nvmet_req *req)
 {
        return true;
 }
-static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl,
+                                 struct nvmet_sq *sq)
 {
        return false;
 }
 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {};
 #endif
 
 int nvmet_pr_init_ns(struct nvmet_ns *ns);
index fa59a79..9cf9743 100644 (file)
@@ -1072,10 +1072,11 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
 
        if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
                        &queue->nvme_sq, &nvmet_tcp_ops))) {
-               pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
+               pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
                        req->cmd, req->cmd->common.command_id,
                        req->cmd->common.opcode,
-                       le32_to_cpu(req->cmd->common.dptr.sgl.length));
+                       le32_to_cpu(req->cmd->common.dptr.sgl.length),
+                       le16_to_cpu(req->cqe->status));
 
                nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
                return 0;
@@ -1601,6 +1602,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
        /* stop accepting incoming data */
        queue->rcv_state = NVMET_TCP_RECV_ERR;
 
+       nvmet_sq_put_tls_key(&queue->nvme_sq);
        nvmet_tcp_uninit_data_in_cmds(queue);
        nvmet_sq_destroy(&queue->nvme_sq);
        cancel_work_sync(&queue->io_work);
@@ -1786,6 +1788,27 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
        return 0;
 }
 
+static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue,
+                                   key_serial_t peerid)
+{
+       struct key *tls_key = nvme_tls_key_lookup(peerid);
+       int status = 0;
+
+       if (IS_ERR(tls_key)) {
+               pr_warn("%s: queue %d failed to lookup key %x\n",
+                       __func__, queue->idx, peerid);
+               spin_lock_bh(&queue->state_lock);
+               queue->state = NVMET_TCP_Q_FAILED;
+               spin_unlock_bh(&queue->state_lock);
+               status = PTR_ERR(tls_key);
+       } else {
+               pr_debug("%s: queue %d using TLS PSK %x\n",
+                        __func__, queue->idx, peerid);
+               queue->nvme_sq.tls_key = tls_key;
+       }
+       return status;
+}
+
 static void nvmet_tcp_tls_handshake_done(void *data, int status,
                                         key_serial_t peerid)
 {
@@ -1806,6 +1829,10 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status,
        spin_unlock_bh(&queue->state_lock);
 
        cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
+
+       if (!status)
+               status = nvmet_tcp_tls_key_lookup(queue, peerid);
+
        if (status)
                nvmet_tcp_schedule_release_queue(queue);
        else