1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/falloc.h>
10 #include <linux/file.h>
13 #define NVMET_MAX_MPOOL_BVEC 16
14 #define NVMET_MIN_MPOOL_OBJ 16
16 void nvmet_file_ns_disable(struct nvmet_ns *ns)
20 flush_workqueue(buffered_io_wq);
21 mempool_destroy(ns->bvec_pool);
23 kmem_cache_destroy(ns->bvec_cache);
24 ns->bvec_cache = NULL;
30 int nvmet_file_ns_enable(struct nvmet_ns *ns)
32 int flags = O_RDWR | O_LARGEFILE;
39 ns->file = filp_open(ns->device_path, flags, 0);
40 if (IS_ERR(ns->file)) {
41 pr_err("failed to open file %s: (%ld)\n",
42 ns->device_path, PTR_ERR(ns->file));
43 return PTR_ERR(ns->file);
46 ret = vfs_getattr(&ns->file->f_path,
47 &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
52 ns->blksize_shift = file_inode(ns->file)->i_blkbits;
54 ns->bvec_cache = kmem_cache_create("nvmet-bvec",
55 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
56 0, SLAB_HWCACHE_ALIGN, NULL);
57 if (!ns->bvec_cache) {
62 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
63 mempool_free_slab, ns->bvec_cache);
73 ns->blksize_shift = 0;
74 nvmet_file_ns_disable(ns);
78 static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
80 bv->bv_page = sg_page_iter_page(iter);
81 bv->bv_offset = iter->sg->offset;
82 bv->bv_len = PAGE_SIZE - iter->sg->offset;
85 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
86 unsigned long nr_segs, size_t count)
88 struct kiocb *iocb = &req->f.iocb;
89 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
94 if (req->cmd->rw.opcode == nvme_cmd_write) {
95 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
96 ki_flags = IOCB_DSYNC;
97 call_iter = req->ns->file->f_op->write_iter;
100 call_iter = req->ns->file->f_op->read_iter;
104 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
107 iocb->ki_filp = req->ns->file;
108 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
110 ret = call_iter(iocb, &iter);
112 if (ret != -EIOCBQUEUED && iocb->ki_complete)
113 iocb->ki_complete(iocb, ret, 0);
118 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
120 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
122 if (req->f.bvec != req->inline_bvec) {
123 if (likely(req->f.mpool_alloc == false))
126 mempool_free(req->f.bvec, req->ns->bvec_pool);
129 nvmet_req_complete(req, ret != req->data_len ?
130 NVME_SC_INTERNAL | NVME_SC_DNR : 0);
133 static void nvmet_file_execute_rw(struct nvmet_req *req)
135 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
136 struct sg_page_iter sg_pg_iter;
137 unsigned long bv_cnt = 0;
138 bool is_sync = false;
139 size_t len = 0, total_len = 0;
143 if (!req->sg_cnt || !nr_bvec) {
144 nvmet_req_complete(req, 0);
148 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
149 if (unlikely(pos + req->data_len > req->ns->size)) {
150 nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
154 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
155 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
158 req->f.bvec = req->inline_bvec;
160 req->f.mpool_alloc = false;
161 if (unlikely(!req->f.bvec)) {
162 /* fallback under memory pressure */
163 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
164 req->f.mpool_alloc = true;
165 if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
169 memset(&req->f.iocb, 0, sizeof(struct kiocb));
170 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
171 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
172 len += req->f.bvec[bv_cnt].bv_len;
173 total_len += req->f.bvec[bv_cnt].bv_len;
176 WARN_ON_ONCE((nr_bvec - 1) < 0);
178 if (unlikely(is_sync) &&
179 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
180 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
190 if (WARN_ON_ONCE(total_len != req->data_len))
193 if (unlikely(is_sync || ret)) {
194 nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
197 req->f.iocb.ki_complete = nvmet_file_io_done;
198 nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
201 static void nvmet_file_buffered_io_work(struct work_struct *w)
203 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
205 nvmet_file_execute_rw(req);
208 static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
210 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
211 queue_work(buffered_io_wq, &req->f.work);
214 u16 nvmet_file_flush(struct nvmet_req *req)
216 if (vfs_fsync(req->ns->file, 1) < 0)
217 return NVME_SC_INTERNAL | NVME_SC_DNR;
221 static void nvmet_file_flush_work(struct work_struct *w)
223 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
225 nvmet_req_complete(req, nvmet_file_flush(req));
228 static void nvmet_file_execute_flush(struct nvmet_req *req)
230 INIT_WORK(&req->f.work, nvmet_file_flush_work);
231 schedule_work(&req->f.work);
234 static void nvmet_file_execute_discard(struct nvmet_req *req)
236 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
237 struct nvme_dsm_range range;
242 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
243 ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
248 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
249 len = le32_to_cpu(range.nlb);
250 len <<= req->ns->blksize_shift;
251 if (offset + len > req->ns->size) {
252 ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
256 if (vfs_fallocate(req->ns->file, mode, offset, len)) {
257 ret = NVME_SC_INTERNAL | NVME_SC_DNR;
262 nvmet_req_complete(req, ret);
265 static void nvmet_file_dsm_work(struct work_struct *w)
267 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
269 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
271 nvmet_file_execute_discard(req);
273 case NVME_DSMGMT_IDR:
274 case NVME_DSMGMT_IDW:
276 /* Not supported yet */
277 nvmet_req_complete(req, 0);
282 static void nvmet_file_execute_dsm(struct nvmet_req *req)
284 INIT_WORK(&req->f.work, nvmet_file_dsm_work);
285 schedule_work(&req->f.work);
288 static void nvmet_file_write_zeroes_work(struct work_struct *w)
290 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
291 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
292 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
297 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
298 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
299 req->ns->blksize_shift);
301 if (unlikely(offset + len > req->ns->size)) {
302 nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
306 ret = vfs_fallocate(req->ns->file, mode, offset, len);
307 nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
310 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
312 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
313 schedule_work(&req->f.work);
316 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
318 struct nvme_command *cmd = req->cmd;
320 switch (cmd->common.opcode) {
323 if (req->ns->buffered_io)
324 req->execute = nvmet_file_execute_rw_buffered_io;
326 req->execute = nvmet_file_execute_rw;
327 req->data_len = nvmet_rw_len(req);
330 req->execute = nvmet_file_execute_flush;
334 req->execute = nvmet_file_execute_dsm;
335 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
336 sizeof(struct nvme_dsm_range);
338 case nvme_cmd_write_zeroes:
339 req->execute = nvmet_file_execute_write_zeroes;
343 pr_err("unhandled cmd for file ns %d on qid %d\n",
344 cmd->common.opcode, req->sq->qid);
345 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;