1 // SPDX-License-Identifier: GPL-2.0
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
10 #include <linux/pfn_t.h>
11 #include <linux/module.h>
12 #include <linux/virtio.h>
13 #include <linux/virtio_fs.h>
14 #include <linux/delay.h>
15 #include <linux/fs_context.h>
16 #include <linux/fs_parser.h>
17 #include <linux/highmem.h>
18 #include <linux/uio.h>
21 /* Used to help calculate the FUSE connection's max_pages limit for a request's
22 * size. Parts of the struct fuse_req are sliced into scattergather lists in
23 * addition to the pages used, so this can help account for that overhead.
25 #define FUSE_HEADER_OVERHEAD 4
27 /* List of virtio-fs device instances and a lock for the list. Also provides
28 * mutual exclusion in device removal and mounting path
30 static DEFINE_MUTEX(virtio_fs_mutex);
31 static LIST_HEAD(virtio_fs_instances);
38 #define VQ_NAME_LEN 24
40 /* Per-virtqueue state */
43 struct virtqueue *vq; /* protected by ->lock */
44 struct work_struct done_work;
45 struct list_head queued_reqs;
46 struct list_head end_reqs; /* End these requests */
47 struct delayed_work dispatch_work;
51 struct completion in_flight_zero; /* No inflight requests */
52 char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
55 /* A virtio-fs device instance */
58 struct list_head list; /* on virtio_fs_instances */
60 struct virtio_fs_vq *vqs;
61 unsigned int nvqs; /* number of virtqueues */
62 unsigned int num_request_queues; /* number of request queues */
63 struct dax_device *dax_dev;
65 /* DAX memory window where file contents are mapped */
67 phys_addr_t window_phys_addr;
71 struct virtio_fs_forget_req {
72 struct fuse_in_header ih;
73 struct fuse_forget_in arg;
76 struct virtio_fs_forget {
77 /* This request can be temporarily queued on virt queue */
78 struct list_head list;
79 struct virtio_fs_forget_req req;
82 struct virtio_fs_req_work {
84 struct virtio_fs_vq *fsvq;
85 struct work_struct done_work;
88 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
89 struct fuse_req *req, bool in_flight);
91 static const struct constant_table dax_param_enums[] = {
92 {"always", FUSE_DAX_ALWAYS },
93 {"never", FUSE_DAX_NEVER },
94 {"inode", FUSE_DAX_INODE_USER },
103 static const struct fs_parameter_spec virtio_fs_parameters[] = {
104 fsparam_flag("dax", OPT_DAX),
105 fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
109 static int virtio_fs_parse_param(struct fs_context *fsc,
110 struct fs_parameter *param)
112 struct fs_parse_result result;
113 struct fuse_fs_context *ctx = fsc->fs_private;
116 opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
122 ctx->dax_mode = FUSE_DAX_ALWAYS;
125 ctx->dax_mode = result.uint_32;
134 static void virtio_fs_free_fsc(struct fs_context *fsc)
136 struct fuse_fs_context *ctx = fsc->fs_private;
141 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
143 struct virtio_fs *fs = vq->vdev->priv;
145 return &fs->vqs[vq->index];
148 /* Should be called with fsvq->lock held. */
149 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
154 /* Should be called with fsvq->lock held. */
155 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
157 WARN_ON(fsvq->in_flight <= 0);
159 if (!fsvq->in_flight)
160 complete(&fsvq->in_flight_zero);
163 static void release_virtio_fs_obj(struct kref *ref)
165 struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
171 /* Make sure virtiofs_mutex is held */
172 static void virtio_fs_put(struct virtio_fs *fs)
174 kref_put(&fs->refcount, release_virtio_fs_obj);
177 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
179 struct virtio_fs *vfs = fiq->priv;
181 mutex_lock(&virtio_fs_mutex);
183 mutex_unlock(&virtio_fs_mutex);
186 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
188 WARN_ON(fsvq->in_flight < 0);
190 /* Wait for in flight requests to finish.*/
191 spin_lock(&fsvq->lock);
192 if (fsvq->in_flight) {
193 /* We are holding virtio_fs_mutex. There should not be any
194 * waiters waiting for completion.
196 reinit_completion(&fsvq->in_flight_zero);
197 spin_unlock(&fsvq->lock);
198 wait_for_completion(&fsvq->in_flight_zero);
200 spin_unlock(&fsvq->lock);
203 flush_work(&fsvq->done_work);
204 flush_delayed_work(&fsvq->dispatch_work);
207 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
209 struct virtio_fs_vq *fsvq;
212 for (i = 0; i < fs->nvqs; i++) {
214 virtio_fs_drain_queue(fsvq);
218 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
220 /* Provides mutual exclusion between ->remove and ->kill_sb
221 * paths. We don't want both of these draining queue at the
222 * same time. Current completion logic reinits completion
223 * and that means there should not be any other thread
224 * doing reinit or waiting for completion already.
226 mutex_lock(&virtio_fs_mutex);
227 virtio_fs_drain_all_queues_locked(fs);
228 mutex_unlock(&virtio_fs_mutex);
231 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
233 struct virtio_fs_vq *fsvq;
236 for (i = 0; i < fs->nvqs; i++) {
238 spin_lock(&fsvq->lock);
239 fsvq->connected = true;
240 spin_unlock(&fsvq->lock);
244 /* Add a new instance to the list or return -EEXIST if tag name exists*/
245 static int virtio_fs_add_instance(struct virtio_fs *fs)
247 struct virtio_fs *fs2;
248 bool duplicate = false;
250 mutex_lock(&virtio_fs_mutex);
252 list_for_each_entry(fs2, &virtio_fs_instances, list) {
253 if (strcmp(fs->tag, fs2->tag) == 0)
258 list_add_tail(&fs->list, &virtio_fs_instances);
260 mutex_unlock(&virtio_fs_mutex);
267 /* Return the virtio_fs with a given tag, or NULL */
268 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
270 struct virtio_fs *fs;
272 mutex_lock(&virtio_fs_mutex);
274 list_for_each_entry(fs, &virtio_fs_instances, list) {
275 if (strcmp(fs->tag, tag) == 0) {
276 kref_get(&fs->refcount);
281 fs = NULL; /* not found */
284 mutex_unlock(&virtio_fs_mutex);
289 static void virtio_fs_free_devs(struct virtio_fs *fs)
293 for (i = 0; i < fs->nvqs; i++) {
294 struct virtio_fs_vq *fsvq = &fs->vqs[i];
299 fuse_dev_free(fsvq->fud);
304 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
305 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
307 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
311 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
312 &tag_buf, sizeof(tag_buf));
313 end = memchr(tag_buf, '\0', sizeof(tag_buf));
315 return -EINVAL; /* empty tag */
317 end = &tag_buf[sizeof(tag_buf)];
320 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
323 memcpy(fs->tag, tag_buf, len);
328 /* Work function for hiprio completion */
329 static void virtio_fs_hiprio_done_work(struct work_struct *work)
331 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
333 struct virtqueue *vq = fsvq->vq;
335 /* Free completed FUSE_FORGET requests */
336 spin_lock(&fsvq->lock);
341 virtqueue_disable_cb(vq);
343 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
345 dec_in_flight_req(fsvq);
347 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
348 spin_unlock(&fsvq->lock);
351 static void virtio_fs_request_dispatch_work(struct work_struct *work)
353 struct fuse_req *req;
354 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
358 pr_debug("virtio-fs: worker %s called.\n", __func__);
360 spin_lock(&fsvq->lock);
361 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
364 spin_unlock(&fsvq->lock);
368 list_del_init(&req->list);
369 spin_unlock(&fsvq->lock);
370 fuse_request_end(req);
373 /* Dispatch pending requests */
375 spin_lock(&fsvq->lock);
376 req = list_first_entry_or_null(&fsvq->queued_reqs,
377 struct fuse_req, list);
379 spin_unlock(&fsvq->lock);
382 list_del_init(&req->list);
383 spin_unlock(&fsvq->lock);
385 ret = virtio_fs_enqueue_req(fsvq, req, true);
387 if (ret == -ENOMEM || ret == -ENOSPC) {
388 spin_lock(&fsvq->lock);
389 list_add_tail(&req->list, &fsvq->queued_reqs);
390 schedule_delayed_work(&fsvq->dispatch_work,
391 msecs_to_jiffies(1));
392 spin_unlock(&fsvq->lock);
395 req->out.h.error = ret;
396 spin_lock(&fsvq->lock);
397 dec_in_flight_req(fsvq);
398 spin_unlock(&fsvq->lock);
399 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
401 fuse_request_end(req);
407 * Returns 1 if queue is full and sender should wait a bit before sending
408 * next request, 0 otherwise.
410 static int send_forget_request(struct virtio_fs_vq *fsvq,
411 struct virtio_fs_forget *forget,
414 struct scatterlist sg;
415 struct virtqueue *vq;
418 struct virtio_fs_forget_req *req = &forget->req;
420 spin_lock(&fsvq->lock);
421 if (!fsvq->connected) {
423 dec_in_flight_req(fsvq);
428 sg_init_one(&sg, req, sizeof(*req));
430 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
432 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
434 if (ret == -ENOMEM || ret == -ENOSPC) {
435 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
437 list_add_tail(&forget->list, &fsvq->queued_reqs);
438 schedule_delayed_work(&fsvq->dispatch_work,
439 msecs_to_jiffies(1));
441 inc_in_flight_req(fsvq);
445 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
449 dec_in_flight_req(fsvq);
455 inc_in_flight_req(fsvq);
456 notify = virtqueue_kick_prepare(vq);
457 spin_unlock(&fsvq->lock);
460 virtqueue_notify(vq);
463 spin_unlock(&fsvq->lock);
467 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
469 struct virtio_fs_forget *forget;
470 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
472 pr_debug("virtio-fs: worker %s called.\n", __func__);
474 spin_lock(&fsvq->lock);
475 forget = list_first_entry_or_null(&fsvq->queued_reqs,
476 struct virtio_fs_forget, list);
478 spin_unlock(&fsvq->lock);
482 list_del(&forget->list);
483 spin_unlock(&fsvq->lock);
484 if (send_forget_request(fsvq, forget, true))
489 /* Allocate and copy args into req->argbuf */
490 static int copy_args_to_argbuf(struct fuse_req *req)
492 struct fuse_args *args = req->args;
493 unsigned int offset = 0;
495 unsigned int num_out;
499 num_in = args->in_numargs - args->in_pages;
500 num_out = args->out_numargs - args->out_pages;
501 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
502 fuse_len_args(num_out, args->out_args);
504 req->argbuf = kmalloc(len, GFP_ATOMIC);
508 for (i = 0; i < num_in; i++) {
509 memcpy(req->argbuf + offset,
510 args->in_args[i].value,
511 args->in_args[i].size);
512 offset += args->in_args[i].size;
518 /* Copy args out of and free req->argbuf */
519 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
521 unsigned int remaining;
524 unsigned int num_out;
527 remaining = req->out.h.len - sizeof(req->out.h);
528 num_in = args->in_numargs - args->in_pages;
529 num_out = args->out_numargs - args->out_pages;
530 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
532 for (i = 0; i < num_out; i++) {
533 unsigned int argsize = args->out_args[i].size;
535 if (args->out_argvar &&
536 i == args->out_numargs - 1 &&
537 argsize > remaining) {
541 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
544 if (i != args->out_numargs - 1)
545 remaining -= argsize;
548 /* Store the actual size of the variable-length arg */
549 if (args->out_argvar)
550 args->out_args[args->out_numargs - 1].size = remaining;
556 /* Work function for request completion */
557 static void virtio_fs_request_complete(struct fuse_req *req,
558 struct virtio_fs_vq *fsvq)
560 struct fuse_pqueue *fpq = &fsvq->fud->pq;
561 struct fuse_args *args;
562 struct fuse_args_pages *ap;
563 unsigned int len, i, thislen;
567 * TODO verify that server properly follows FUSE protocol
571 copy_args_from_argbuf(args, req);
573 if (args->out_pages && args->page_zeroing) {
574 len = args->out_args[args->out_numargs - 1].size;
575 ap = container_of(args, typeof(*ap), args);
576 for (i = 0; i < ap->num_pages; i++) {
577 thislen = ap->descs[i].length;
579 WARN_ON(ap->descs[i].offset);
581 zero_user_segment(page, len, thislen);
589 spin_lock(&fpq->lock);
590 clear_bit(FR_SENT, &req->flags);
591 spin_unlock(&fpq->lock);
593 fuse_request_end(req);
594 spin_lock(&fsvq->lock);
595 dec_in_flight_req(fsvq);
596 spin_unlock(&fsvq->lock);
599 static void virtio_fs_complete_req_work(struct work_struct *work)
601 struct virtio_fs_req_work *w =
602 container_of(work, typeof(*w), done_work);
604 virtio_fs_request_complete(w->req, w->fsvq);
608 static void virtio_fs_requests_done_work(struct work_struct *work)
610 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
612 struct fuse_pqueue *fpq = &fsvq->fud->pq;
613 struct virtqueue *vq = fsvq->vq;
614 struct fuse_req *req;
615 struct fuse_req *next;
619 /* Collect completed requests off the virtqueue */
620 spin_lock(&fsvq->lock);
622 virtqueue_disable_cb(vq);
624 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
625 spin_lock(&fpq->lock);
626 list_move_tail(&req->list, &reqs);
627 spin_unlock(&fpq->lock);
629 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
630 spin_unlock(&fsvq->lock);
633 list_for_each_entry_safe(req, next, &reqs, list) {
634 list_del_init(&req->list);
636 /* blocking async request completes in a worker context */
637 if (req->args->may_block) {
638 struct virtio_fs_req_work *w;
640 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
641 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
644 schedule_work(&w->done_work);
646 virtio_fs_request_complete(req, fsvq);
651 /* Virtqueue interrupt handler */
652 static void virtio_fs_vq_done(struct virtqueue *vq)
654 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
656 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
658 schedule_work(&fsvq->done_work);
661 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
664 strscpy(fsvq->name, name, VQ_NAME_LEN);
665 spin_lock_init(&fsvq->lock);
666 INIT_LIST_HEAD(&fsvq->queued_reqs);
667 INIT_LIST_HEAD(&fsvq->end_reqs);
668 init_completion(&fsvq->in_flight_zero);
670 if (vq_type == VQ_REQUEST) {
671 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
672 INIT_DELAYED_WORK(&fsvq->dispatch_work,
673 virtio_fs_request_dispatch_work);
675 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
676 INIT_DELAYED_WORK(&fsvq->dispatch_work,
677 virtio_fs_hiprio_dispatch_work);
681 /* Initialize virtqueues */
682 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
683 struct virtio_fs *fs)
685 struct virtqueue **vqs;
686 vq_callback_t **callbacks;
691 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
692 &fs->num_request_queues);
693 if (fs->num_request_queues == 0)
696 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
697 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
701 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
702 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
704 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
705 if (!vqs || !callbacks || !names) {
710 /* Initialize the hiprio/forget request virtqueue */
711 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
712 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
713 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
715 /* Initialize the requests virtqueues */
716 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
717 char vq_name[VQ_NAME_LEN];
719 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
720 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
721 callbacks[i] = virtio_fs_vq_done;
722 names[i] = fs->vqs[i].name;
725 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
729 for (i = 0; i < fs->nvqs; i++)
730 fs->vqs[i].vq = vqs[i];
732 virtio_fs_start_all_queues(fs);
742 /* Free virtqueues (device must already be reset) */
743 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
744 struct virtio_fs *fs)
746 vdev->config->del_vqs(vdev);
749 /* Map a window offset to a page frame number. The window offset will have
750 * been produced by .iomap_begin(), which maps a file offset to a window
753 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
754 long nr_pages, void **kaddr, pfn_t *pfn)
756 struct virtio_fs *fs = dax_get_private(dax_dev);
757 phys_addr_t offset = PFN_PHYS(pgoff);
758 size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
761 *kaddr = fs->window_kaddr + offset;
763 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
765 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
768 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
769 pgoff_t pgoff, size_t nr_pages)
774 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
777 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
778 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
782 static const struct dax_operations virtio_fs_dax_ops = {
783 .direct_access = virtio_fs_direct_access,
784 .zero_page_range = virtio_fs_zero_page_range,
787 static void virtio_fs_cleanup_dax(void *data)
789 struct dax_device *dax_dev = data;
795 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
797 struct virtio_shm_region cache_reg;
798 struct dev_pagemap *pgmap;
801 if (!IS_ENABLED(CONFIG_FUSE_DAX))
804 /* Get cache region */
805 have_cache = virtio_get_shm_region(vdev, &cache_reg,
806 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
808 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
812 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
813 dev_name(&vdev->dev))) {
814 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
815 cache_reg.addr, cache_reg.len);
819 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
822 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
826 pgmap->type = MEMORY_DEVICE_FS_DAX;
828 /* Ideally we would directly use the PCI BAR resource but
829 * devm_memremap_pages() wants its own copy in pgmap. So
830 * initialize a struct resource from scratch (only the start
831 * and end fields will be used).
833 pgmap->range = (struct range) {
834 .start = (phys_addr_t) cache_reg.addr,
835 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
839 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
840 if (IS_ERR(fs->window_kaddr))
841 return PTR_ERR(fs->window_kaddr);
843 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
844 fs->window_len = (phys_addr_t) cache_reg.len;
846 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
847 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
849 fs->dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
850 if (IS_ERR(fs->dax_dev))
851 return PTR_ERR(fs->dax_dev);
853 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
857 static int virtio_fs_probe(struct virtio_device *vdev)
859 struct virtio_fs *fs;
862 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
865 kref_init(&fs->refcount);
868 ret = virtio_fs_read_tag(vdev, fs);
872 ret = virtio_fs_setup_vqs(vdev, fs);
876 /* TODO vq affinity */
878 ret = virtio_fs_setup_dax(vdev, fs);
882 /* Bring the device online in case the filesystem is mounted and
883 * requests need to be sent before we return.
885 virtio_device_ready(vdev);
887 ret = virtio_fs_add_instance(fs);
894 vdev->config->reset(vdev);
895 virtio_fs_cleanup_vqs(vdev, fs);
904 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
906 struct virtio_fs_vq *fsvq;
909 for (i = 0; i < fs->nvqs; i++) {
911 spin_lock(&fsvq->lock);
912 fsvq->connected = false;
913 spin_unlock(&fsvq->lock);
917 static void virtio_fs_remove(struct virtio_device *vdev)
919 struct virtio_fs *fs = vdev->priv;
921 mutex_lock(&virtio_fs_mutex);
922 /* This device is going away. No one should get new reference */
923 list_del_init(&fs->list);
924 virtio_fs_stop_all_queues(fs);
925 virtio_fs_drain_all_queues_locked(fs);
926 vdev->config->reset(vdev);
927 virtio_fs_cleanup_vqs(vdev, fs);
930 /* Put device reference on virtio_fs object */
932 mutex_unlock(&virtio_fs_mutex);
935 #ifdef CONFIG_PM_SLEEP
936 static int virtio_fs_freeze(struct virtio_device *vdev)
938 /* TODO need to save state here */
939 pr_warn("virtio-fs: suspend/resume not yet supported\n");
943 static int virtio_fs_restore(struct virtio_device *vdev)
945 /* TODO need to restore state here */
948 #endif /* CONFIG_PM_SLEEP */
950 static const struct virtio_device_id id_table[] = {
951 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
955 static const unsigned int feature_table[] = {};
957 static struct virtio_driver virtio_fs_driver = {
958 .driver.name = KBUILD_MODNAME,
959 .driver.owner = THIS_MODULE,
960 .id_table = id_table,
961 .feature_table = feature_table,
962 .feature_table_size = ARRAY_SIZE(feature_table),
963 .probe = virtio_fs_probe,
964 .remove = virtio_fs_remove,
965 #ifdef CONFIG_PM_SLEEP
966 .freeze = virtio_fs_freeze,
967 .restore = virtio_fs_restore,
971 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
972 __releases(fiq->lock)
974 struct fuse_forget_link *link;
975 struct virtio_fs_forget *forget;
976 struct virtio_fs_forget_req *req;
977 struct virtio_fs *fs;
978 struct virtio_fs_vq *fsvq;
981 link = fuse_dequeue_forget(fiq, 1, NULL);
982 unique = fuse_get_unique(fiq);
985 fsvq = &fs->vqs[VQ_HIPRIO];
986 spin_unlock(&fiq->lock);
988 /* Allocate a buffer for the request */
989 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
992 req->ih = (struct fuse_in_header){
993 .opcode = FUSE_FORGET,
994 .nodeid = link->forget_one.nodeid,
998 req->arg = (struct fuse_forget_in){
999 .nlookup = link->forget_one.nlookup,
1002 send_forget_request(fsvq, forget, false);
1006 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1007 __releases(fiq->lock)
1012 * Normal fs operations on a local filesystems aren't interruptible.
1013 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1014 * with shared lock between host and guest.
1016 spin_unlock(&fiq->lock);
1019 /* Count number of scatter-gather elements required */
1020 static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1021 unsigned int num_pages,
1022 unsigned int total_len)
1025 unsigned int this_len;
1027 for (i = 0; i < num_pages && total_len; i++) {
1028 this_len = min(page_descs[i].length, total_len);
1029 total_len -= this_len;
1035 /* Return the number of scatter-gather list elements required */
1036 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1038 struct fuse_args *args = req->args;
1039 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1040 unsigned int size, total_sgs = 1 /* fuse_in_header */;
1042 if (args->in_numargs - args->in_pages)
1045 if (args->in_pages) {
1046 size = args->in_args[args->in_numargs - 1].size;
1047 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1051 if (!test_bit(FR_ISREPLY, &req->flags))
1054 total_sgs += 1 /* fuse_out_header */;
1056 if (args->out_numargs - args->out_pages)
1059 if (args->out_pages) {
1060 size = args->out_args[args->out_numargs - 1].size;
1061 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1068 /* Add pages to scatter-gather list and return number of elements used */
1069 static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1070 struct page **pages,
1071 struct fuse_page_desc *page_descs,
1072 unsigned int num_pages,
1073 unsigned int total_len)
1076 unsigned int this_len;
1078 for (i = 0; i < num_pages && total_len; i++) {
1079 sg_init_table(&sg[i], 1);
1080 this_len = min(page_descs[i].length, total_len);
1081 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1082 total_len -= this_len;
1088 /* Add args to scatter-gather list and return number of elements used */
1089 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1090 struct fuse_req *req,
1091 struct fuse_arg *args,
1092 unsigned int numargs,
1095 unsigned int *len_used)
1097 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1098 unsigned int total_sgs = 0;
1101 len = fuse_len_args(numargs - argpages, args);
1103 sg_init_one(&sg[total_sgs++], argbuf, len);
1106 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1107 ap->pages, ap->descs,
1109 args[numargs - 1].size);
1117 /* Add a request to a virtqueue and kick the device */
1118 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1119 struct fuse_req *req, bool in_flight)
1121 /* requests need at least 4 elements */
1122 struct scatterlist *stack_sgs[6];
1123 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1124 struct scatterlist **sgs = stack_sgs;
1125 struct scatterlist *sg = stack_sg;
1126 struct virtqueue *vq;
1127 struct fuse_args *args = req->args;
1128 unsigned int argbuf_used = 0;
1129 unsigned int out_sgs = 0;
1130 unsigned int in_sgs = 0;
1131 unsigned int total_sgs;
1135 struct fuse_pqueue *fpq;
1137 /* Does the sglist fit on the stack? */
1138 total_sgs = sg_count_fuse_req(req);
1139 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1140 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1141 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1148 /* Use a bounce buffer since stack args cannot be mapped */
1149 ret = copy_args_to_argbuf(req);
1153 /* Request elements */
1154 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1155 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1156 (struct fuse_arg *)args->in_args,
1157 args->in_numargs, args->in_pages,
1158 req->argbuf, &argbuf_used);
1160 /* Reply elements */
1161 if (test_bit(FR_ISREPLY, &req->flags)) {
1162 sg_init_one(&sg[out_sgs + in_sgs++],
1163 &req->out.h, sizeof(req->out.h));
1164 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1165 args->out_args, args->out_numargs,
1167 req->argbuf + argbuf_used, NULL);
1170 WARN_ON(out_sgs + in_sgs != total_sgs);
1172 for (i = 0; i < total_sgs; i++)
1175 spin_lock(&fsvq->lock);
1177 if (!fsvq->connected) {
1178 spin_unlock(&fsvq->lock);
1184 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1186 spin_unlock(&fsvq->lock);
1190 /* Request successfully sent. */
1191 fpq = &fsvq->fud->pq;
1192 spin_lock(&fpq->lock);
1193 list_add_tail(&req->list, fpq->processing);
1194 spin_unlock(&fpq->lock);
1195 set_bit(FR_SENT, &req->flags);
1196 /* matches barrier in request_wait_answer() */
1197 smp_mb__after_atomic();
1200 inc_in_flight_req(fsvq);
1201 notify = virtqueue_kick_prepare(vq);
1203 spin_unlock(&fsvq->lock);
1206 virtqueue_notify(vq);
1209 if (ret < 0 && req->argbuf) {
1213 if (sgs != stack_sgs) {
1221 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1222 __releases(fiq->lock)
1224 unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1225 struct virtio_fs *fs;
1226 struct fuse_req *req;
1227 struct virtio_fs_vq *fsvq;
1230 WARN_ON(list_empty(&fiq->pending));
1231 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1232 clear_bit(FR_PENDING, &req->flags);
1233 list_del_init(&req->list);
1234 WARN_ON(!list_empty(&fiq->pending));
1235 spin_unlock(&fiq->lock);
1239 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1240 __func__, req->in.h.opcode, req->in.h.unique,
1241 req->in.h.nodeid, req->in.h.len,
1242 fuse_len_args(req->args->out_numargs, req->args->out_args));
1244 fsvq = &fs->vqs[queue_id];
1245 ret = virtio_fs_enqueue_req(fsvq, req, false);
1247 if (ret == -ENOMEM || ret == -ENOSPC) {
1249 * Virtqueue full. Retry submission from worker
1250 * context as we might be holding fc->bg_lock.
1252 spin_lock(&fsvq->lock);
1253 list_add_tail(&req->list, &fsvq->queued_reqs);
1254 inc_in_flight_req(fsvq);
1255 schedule_delayed_work(&fsvq->dispatch_work,
1256 msecs_to_jiffies(1));
1257 spin_unlock(&fsvq->lock);
1260 req->out.h.error = ret;
1261 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1263 /* Can't end request in submission context. Use a worker */
1264 spin_lock(&fsvq->lock);
1265 list_add_tail(&req->list, &fsvq->end_reqs);
1266 schedule_delayed_work(&fsvq->dispatch_work, 0);
1267 spin_unlock(&fsvq->lock);
1272 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1273 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1274 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1275 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1276 .release = virtio_fs_fiq_release,
1279 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1281 ctx->rootmode = S_IFDIR;
1282 ctx->default_permissions = 1;
1283 ctx->allow_other = 1;
1284 ctx->max_read = UINT_MAX;
1286 ctx->destroy = true;
1287 ctx->no_control = true;
1288 ctx->no_force_umount = true;
1291 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1293 struct fuse_mount *fm = get_fuse_mount_super(sb);
1294 struct fuse_conn *fc = fm->fc;
1295 struct virtio_fs *fs = fc->iq.priv;
1296 struct fuse_fs_context *ctx = fsc->fs_private;
1300 virtio_fs_ctx_set_defaults(ctx);
1301 mutex_lock(&virtio_fs_mutex);
1303 /* After holding mutex, make sure virtiofs device is still there.
1304 * Though we are holding a reference to it, drive ->remove might
1305 * still have cleaned up virtual queues. In that case bail out.
1308 if (list_empty(&fs->list)) {
1309 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1314 /* Allocate fuse_dev for hiprio and notification queues */
1315 for (i = 0; i < fs->nvqs; i++) {
1316 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1318 fsvq->fud = fuse_dev_alloc();
1320 goto err_free_fuse_devs;
1323 /* virtiofs allocates and installs its own fuse devices */
1325 if (ctx->dax_mode != FUSE_DAX_NEVER) {
1326 if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
1328 pr_err("virtio-fs: dax can't be enabled as filesystem"
1329 " device does not support it.\n");
1330 goto err_free_fuse_devs;
1332 ctx->dax_dev = fs->dax_dev;
1334 err = fuse_fill_super_common(sb, ctx);
1336 goto err_free_fuse_devs;
1338 for (i = 0; i < fs->nvqs; i++) {
1339 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1341 fuse_dev_install(fsvq->fud, fc);
1344 /* Previous unmount will stop all queues. Start these again */
1345 virtio_fs_start_all_queues(fs);
1347 mutex_unlock(&virtio_fs_mutex);
1351 virtio_fs_free_devs(fs);
1353 mutex_unlock(&virtio_fs_mutex);
1357 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1359 struct fuse_conn *fc = fm->fc;
1360 struct virtio_fs *vfs = fc->iq.priv;
1361 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1363 /* Stop dax worker. Soon evict_inodes() will be called which
1364 * will free all memory ranges belonging to all inodes.
1366 if (IS_ENABLED(CONFIG_FUSE_DAX))
1367 fuse_dax_cancel_work(fc);
1369 /* Stop forget queue. Soon destroy will be sent */
1370 spin_lock(&fsvq->lock);
1371 fsvq->connected = false;
1372 spin_unlock(&fsvq->lock);
1373 virtio_fs_drain_all_queues(vfs);
1375 fuse_conn_destroy(fm);
1377 /* fuse_conn_destroy() must have sent destroy. Stop all queues
1378 * and drain one more time and free fuse devices. Freeing fuse
1379 * devices will drop their reference on fuse_conn and that in
1380 * turn will drop its reference on virtio_fs object.
1382 virtio_fs_stop_all_queues(vfs);
1383 virtio_fs_drain_all_queues(vfs);
1384 virtio_fs_free_devs(vfs);
1387 static void virtio_kill_sb(struct super_block *sb)
1389 struct fuse_mount *fm = get_fuse_mount_super(sb);
1392 /* If mount failed, we can still be called without any fc */
1394 last = fuse_mount_remove(fm);
1396 virtio_fs_conn_destroy(fm);
1398 kill_anon_super(sb);
1399 fuse_mount_destroy(fm);
1402 static int virtio_fs_test_super(struct super_block *sb,
1403 struct fs_context *fsc)
1405 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1406 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1408 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1411 static int virtio_fs_get_tree(struct fs_context *fsc)
1413 struct virtio_fs *fs;
1414 struct super_block *sb;
1415 struct fuse_conn *fc = NULL;
1416 struct fuse_mount *fm;
1417 unsigned int virtqueue_size;
1420 /* This gets a reference on virtio_fs object. This ptr gets installed
1421 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1422 * to drop the reference to this object.
1424 fs = virtio_fs_find_instance(fsc->source);
1426 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1430 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1431 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1435 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1439 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1443 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1444 fc->release = fuse_free_conn;
1445 fc->delete_stale = true;
1446 fc->auto_submounts = true;
1449 /* Tell FUSE to split requests that exceed the virtqueue's size */
1450 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1451 virtqueue_size - FUSE_HEADER_OVERHEAD);
1453 fsc->s_fs_info = fm;
1454 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
1456 fuse_mount_destroy(fm);
1461 err = virtio_fs_fill_super(sb, fsc);
1463 deactivate_locked_super(sb);
1467 sb->s_flags |= SB_ACTIVE;
1471 fsc->root = dget(sb->s_root);
1476 mutex_lock(&virtio_fs_mutex);
1478 mutex_unlock(&virtio_fs_mutex);
1482 static const struct fs_context_operations virtio_fs_context_ops = {
1483 .free = virtio_fs_free_fsc,
1484 .parse_param = virtio_fs_parse_param,
1485 .get_tree = virtio_fs_get_tree,
1488 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1490 struct fuse_fs_context *ctx;
1492 if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1493 return fuse_init_fs_context_submount(fsc);
1495 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1498 fsc->fs_private = ctx;
1499 fsc->ops = &virtio_fs_context_ops;
1503 static struct file_system_type virtio_fs_type = {
1504 .owner = THIS_MODULE,
1506 .init_fs_context = virtio_fs_init_fs_context,
1507 .kill_sb = virtio_kill_sb,
1510 static int __init virtio_fs_init(void)
1514 ret = register_virtio_driver(&virtio_fs_driver);
1518 ret = register_filesystem(&virtio_fs_type);
1520 unregister_virtio_driver(&virtio_fs_driver);
1526 module_init(virtio_fs_init);
1528 static void __exit virtio_fs_exit(void)
1530 unregister_filesystem(&virtio_fs_type);
1531 unregister_virtio_driver(&virtio_fs_driver);
1533 module_exit(virtio_fs_exit);
1535 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1536 MODULE_DESCRIPTION("Virtio Filesystem");
1537 MODULE_LICENSE("GPL");
1538 MODULE_ALIAS_FS(KBUILD_MODNAME);
1539 MODULE_DEVICE_TABLE(virtio, id_table);