2 * linux/drivers/video/fb_defio.c
4 * Copyright (C) 2006 Jaya Kumar
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
20 #include <linux/list.h>
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
28 void *screen_base = (void __force *) info->screen_base;
31 if (is_vmalloc_addr(screen_base + offs))
32 page = vmalloc_to_page(screen_base + offs);
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
39 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
43 struct fb_deferred_io *fbdefio = info->fbdefio;
44 struct list_head *pos = &fbdefio->pagelist;
45 unsigned long pgoff = offset >> PAGE_SHIFT;
46 struct fb_deferred_io_pageref *pageref, *cur;
48 if (WARN_ON_ONCE(pgoff >= info->npagerefs))
49 return NULL; /* incorrect allocation size */
51 /* 1:1 mapping between pageref and page offset */
52 pageref = &info->pagerefs[pgoff];
55 * This check is to catch the case where a new process could start
56 * writing to the same page through a new PTE. This new access
57 * can cause a call to .page_mkwrite even if the original process'
58 * PTE is marked writable.
60 if (!list_empty(&pageref->list))
61 goto pageref_already_added;
64 pageref->offset = pgoff << PAGE_SHIFT;
66 if (unlikely(fbdefio->sort_pagelist)) {
68 * We loop through the list of pagerefs before adding in
69 * order to keep the pagerefs sorted. This has significant
70 * overhead of O(n^2) with n being the number of written
71 * pages. If possible, drivers should try to work with
72 * unsorted page lists instead.
74 list_for_each_entry(cur, &info->fbdefio->pagelist, list) {
75 if (cur->offset > pageref->offset)
81 list_add_tail(&pageref->list, pos);
83 pageref_already_added:
87 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
90 list_del_init(&pageref->list);
93 /* this is to find and return the vmalloc-ed fb pages */
94 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
98 struct fb_info *info = vmf->vma->vm_private_data;
100 offset = vmf->pgoff << PAGE_SHIFT;
101 if (offset >= info->fix.smem_len)
102 return VM_FAULT_SIGBUS;
104 page = fb_deferred_io_page(info, offset);
106 return VM_FAULT_SIGBUS;
110 if (vmf->vma->vm_file)
111 page->mapping = vmf->vma->vm_file->f_mapping;
113 printk(KERN_ERR "no mapping available\n");
115 BUG_ON(!page->mapping);
116 page->index = vmf->pgoff; /* for page_mkclean() */
122 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
124 struct fb_info *info = file->private_data;
125 struct inode *inode = file_inode(file);
126 int err = file_write_and_wait_range(file, start, end);
130 /* Skip if deferred io is compiled-in but disabled on this fbdev */
135 /* Kill off the delayed work */
136 cancel_delayed_work_sync(&info->deferred_work);
138 /* Run it immediately */
139 schedule_delayed_work(&info->deferred_work, 0);
144 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
147 * Adds a page to the dirty list. Call this from struct
148 * vm_operations_struct.page_mkwrite.
150 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
153 struct fb_deferred_io *fbdefio = info->fbdefio;
154 struct fb_deferred_io_pageref *pageref;
157 /* protect against the workqueue changing the page list */
158 mutex_lock(&fbdefio->lock);
160 /* first write in this cycle, notify the driver */
161 if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
162 fbdefio->first_io(info);
164 pageref = fb_deferred_io_pageref_get(info, offset, page);
165 if (WARN_ON_ONCE(!pageref)) {
167 goto err_mutex_unlock;
171 * We want the page to remain locked from ->page_mkwrite until
172 * the PTE is marked dirty to avoid page_mkclean() being called
173 * before the PTE is updated, which would leave the page ignored
175 * Do this by locking the page here and informing the caller
176 * about it with VM_FAULT_LOCKED.
178 lock_page(pageref->page);
180 mutex_unlock(&fbdefio->lock);
182 /* come back after delay to process the deferred IO */
183 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
184 return VM_FAULT_LOCKED;
187 mutex_unlock(&fbdefio->lock);
192 * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
193 * @fb_info: The fbdev info structure
196 * This is a callback we get when userspace first tries to
197 * write to the page. We schedule a workqueue. That workqueue
198 * will eventually mkclean the touched pages and execute the
199 * deferred framebuffer IO. Then if userspace touches a page
200 * again, we repeat the same scheme.
203 * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
205 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
207 unsigned long offset = vmf->address - vmf->vma->vm_start;
208 struct page *page = vmf->page;
210 file_update_time(vmf->vma->vm_file);
212 return fb_deferred_io_track_page(info, offset, page);
215 /* vm_ops->page_mkwrite handler */
216 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
218 struct fb_info *info = vmf->vma->vm_private_data;
220 return fb_deferred_io_page_mkwrite(info, vmf);
223 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
224 .fault = fb_deferred_io_fault,
225 .page_mkwrite = fb_deferred_io_mkwrite,
228 static const struct address_space_operations fb_deferred_io_aops = {
229 .dirty_folio = noop_dirty_folio,
232 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
234 vma->vm_ops = &fb_deferred_io_vm_ops;
235 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
236 if (!(info->flags & FBINFO_VIRTFB))
237 vma->vm_flags |= VM_IO;
238 vma->vm_private_data = info;
241 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
243 /* workqueue callback */
244 static void fb_deferred_io_work(struct work_struct *work)
246 struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
247 struct fb_deferred_io_pageref *pageref, *next;
248 struct fb_deferred_io *fbdefio = info->fbdefio;
250 /* here we mkclean the pages, then do all deferred IO */
251 mutex_lock(&fbdefio->lock);
252 list_for_each_entry(pageref, &fbdefio->pagelist, list) {
253 struct page *cur = pageref->page;
259 /* driver's callback with pagelist */
260 fbdefio->deferred_io(info, &fbdefio->pagelist);
263 list_for_each_entry_safe(pageref, next, &fbdefio->pagelist, list)
264 fb_deferred_io_pageref_put(pageref, info);
266 mutex_unlock(&fbdefio->lock);
269 int fb_deferred_io_init(struct fb_info *info)
271 struct fb_deferred_io *fbdefio = info->fbdefio;
272 struct fb_deferred_io_pageref *pagerefs;
273 unsigned long npagerefs, i;
278 if (WARN_ON(!info->fix.smem_len))
281 mutex_init(&fbdefio->lock);
282 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
283 INIT_LIST_HEAD(&fbdefio->pagelist);
284 if (fbdefio->delay == 0) /* set a default of 1 s */
287 npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
289 /* alloc a page ref for each page of the display memory */
290 pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
295 for (i = 0; i < npagerefs; ++i)
296 INIT_LIST_HEAD(&pagerefs[i].list);
297 info->npagerefs = npagerefs;
298 info->pagerefs = pagerefs;
303 mutex_destroy(&fbdefio->lock);
306 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
308 void fb_deferred_io_open(struct fb_info *info,
312 file->f_mapping->a_ops = &fb_deferred_io_aops;
314 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
316 void fb_deferred_io_cleanup(struct fb_info *info)
318 struct fb_deferred_io *fbdefio = info->fbdefio;
323 cancel_delayed_work_sync(&info->deferred_work);
325 /* clear out the mapping that we setup */
326 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
327 page = fb_deferred_io_page(info, i);
328 page->mapping = NULL;
331 kvfree(info->pagerefs);
332 mutex_destroy(&fbdefio->lock);
334 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);