Merge branch 'drm-fixes-4.14' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux-2.6-microblaze.git] / drivers / nvme / target / fabrics-cmd.c
1 /*
2  * NVMe Fabrics command implementation.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/blkdev.h>
16 #include "nvmet.h"
17
18 static void nvmet_execute_prop_set(struct nvmet_req *req)
19 {
20         u16 status = 0;
21
22         if (!(req->cmd->prop_set.attrib & 1)) {
23                 u64 val = le64_to_cpu(req->cmd->prop_set.value);
24
25                 switch (le32_to_cpu(req->cmd->prop_set.offset)) {
26                 case NVME_REG_CC:
27                         nvmet_update_cc(req->sq->ctrl, val);
28                         break;
29                 default:
30                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
31                         break;
32                 }
33         } else {
34                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
35         }
36
37         nvmet_req_complete(req, status);
38 }
39
40 static void nvmet_execute_prop_get(struct nvmet_req *req)
41 {
42         struct nvmet_ctrl *ctrl = req->sq->ctrl;
43         u16 status = 0;
44         u64 val = 0;
45
46         if (req->cmd->prop_get.attrib & 1) {
47                 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
48                 case NVME_REG_CAP:
49                         val = ctrl->cap;
50                         break;
51                 default:
52                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
53                         break;
54                 }
55         } else {
56                 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
57                 case NVME_REG_VS:
58                         val = ctrl->subsys->ver;
59                         break;
60                 case NVME_REG_CC:
61                         val = ctrl->cc;
62                         break;
63                 case NVME_REG_CSTS:
64                         val = ctrl->csts;
65                         break;
66                 default:
67                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
68                         break;
69                 }
70         }
71
72         req->rsp->result.u64 = cpu_to_le64(val);
73         nvmet_req_complete(req, status);
74 }
75
76 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
77 {
78         struct nvme_command *cmd = req->cmd;
79
80         req->ns = NULL;
81
82         switch (cmd->fabrics.fctype) {
83         case nvme_fabrics_type_property_set:
84                 req->data_len = 0;
85                 req->execute = nvmet_execute_prop_set;
86                 break;
87         case nvme_fabrics_type_property_get:
88                 req->data_len = 0;
89                 req->execute = nvmet_execute_prop_get;
90                 break;
91         default:
92                 pr_err("received unknown capsule type 0x%x\n",
93                         cmd->fabrics.fctype);
94                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
95         }
96
97         return 0;
98 }
99
100 static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
101 {
102         struct nvmf_connect_command *c = &req->cmd->connect;
103         u16 qid = le16_to_cpu(c->qid);
104         u16 sqsize = le16_to_cpu(c->sqsize);
105         struct nvmet_ctrl *old;
106
107         old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
108         if (old) {
109                 pr_warn("queue already connected!\n");
110                 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
111         }
112
113         nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
114         nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
115         return 0;
116 }
117
118 static void nvmet_execute_admin_connect(struct nvmet_req *req)
119 {
120         struct nvmf_connect_command *c = &req->cmd->connect;
121         struct nvmf_connect_data *d;
122         struct nvmet_ctrl *ctrl = NULL;
123         u16 status = 0;
124
125         d = kmalloc(sizeof(*d), GFP_KERNEL);
126         if (!d) {
127                 status = NVME_SC_INTERNAL;
128                 goto complete;
129         }
130
131         status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
132         if (status)
133                 goto out;
134
135         /* zero out initial completion result, assign values as needed */
136         req->rsp->result.u32 = 0;
137
138         if (c->recfmt != 0) {
139                 pr_warn("invalid connect version (%d).\n",
140                         le16_to_cpu(c->recfmt));
141                 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
142                 goto out;
143         }
144
145         if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
146                 pr_warn("connect attempt for invalid controller ID %#x\n",
147                         d->cntlid);
148                 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
149                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
150                 goto out;
151         }
152
153         status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
154                                   le32_to_cpu(c->kato), &ctrl);
155         if (status)
156                 goto out;
157         uuid_copy(&ctrl->hostid, &d->hostid);
158
159         status = nvmet_install_queue(ctrl, req);
160         if (status) {
161                 nvmet_ctrl_put(ctrl);
162                 goto out;
163         }
164
165         pr_info("creating controller %d for subsystem %s for NQN %s.\n",
166                 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
167         req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
168
169 out:
170         kfree(d);
171 complete:
172         nvmet_req_complete(req, status);
173 }
174
175 static void nvmet_execute_io_connect(struct nvmet_req *req)
176 {
177         struct nvmf_connect_command *c = &req->cmd->connect;
178         struct nvmf_connect_data *d;
179         struct nvmet_ctrl *ctrl = NULL;
180         u16 qid = le16_to_cpu(c->qid);
181         u16 status = 0;
182
183         d = kmalloc(sizeof(*d), GFP_KERNEL);
184         if (!d) {
185                 status = NVME_SC_INTERNAL;
186                 goto complete;
187         }
188
189         status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
190         if (status)
191                 goto out;
192
193         /* zero out initial completion result, assign values as needed */
194         req->rsp->result.u32 = 0;
195
196         if (c->recfmt != 0) {
197                 pr_warn("invalid connect version (%d).\n",
198                         le16_to_cpu(c->recfmt));
199                 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
200                 goto out;
201         }
202
203         status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
204                                      le16_to_cpu(d->cntlid),
205                                      req, &ctrl);
206         if (status)
207                 goto out;
208
209         if (unlikely(qid > ctrl->subsys->max_qid)) {
210                 pr_warn("invalid queue id (%d)\n", qid);
211                 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
212                 req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
213                 goto out_ctrl_put;
214         }
215
216         status = nvmet_install_queue(ctrl, req);
217         if (status) {
218                 /* pass back cntlid that had the issue of installing queue */
219                 req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
220                 goto out_ctrl_put;
221         }
222
223         pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
224
225 out:
226         kfree(d);
227 complete:
228         nvmet_req_complete(req, status);
229         return;
230
231 out_ctrl_put:
232         nvmet_ctrl_put(ctrl);
233         goto out;
234 }
235
236 u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
237 {
238         struct nvme_command *cmd = req->cmd;
239
240         req->ns = NULL;
241
242         if (cmd->common.opcode != nvme_fabrics_command) {
243                 pr_err("invalid command 0x%x on unconnected queue.\n",
244                         cmd->fabrics.opcode);
245                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
246         }
247         if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
248                 pr_err("invalid capsule type 0x%x on unconnected queue.\n",
249                         cmd->fabrics.fctype);
250                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
251         }
252
253         req->data_len = sizeof(struct nvmf_connect_data);
254         if (cmd->connect.qid == 0)
255                 req->execute = nvmet_execute_admin_connect;
256         else
257                 req->execute = nvmet_execute_io_connect;
258         return 0;
259 }