fbdev: Refactor implementation of page_mkwrite
[linux-2.6-microblaze.git] / drivers / video / fbdev / core / fb_defio.c
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25
26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28         void *screen_base = (void __force *) info->screen_base;
29         struct page *page;
30
31         if (is_vmalloc_addr(screen_base + offs))
32                 page = vmalloc_to_page(screen_base + offs);
33         else
34                 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36         return page;
37 }
38
39 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
40                                                                  unsigned long offset,
41                                                                  struct page *page)
42 {
43         struct fb_deferred_io *fbdefio = info->fbdefio;
44         struct list_head *pos = &fbdefio->pagelist;
45         unsigned long pgoff = offset >> PAGE_SHIFT;
46         struct fb_deferred_io_pageref *pageref, *cur;
47
48         if (WARN_ON_ONCE(pgoff >= info->npagerefs))
49                 return NULL; /* incorrect allocation size */
50
51         /* 1:1 mapping between pageref and page offset */
52         pageref = &info->pagerefs[pgoff];
53
54         /*
55          * This check is to catch the case where a new process could start
56          * writing to the same page through a new PTE. This new access
57          * can cause a call to .page_mkwrite even if the original process'
58          * PTE is marked writable.
59          */
60         if (!list_empty(&pageref->list))
61                 goto pageref_already_added;
62
63         pageref->page = page;
64         pageref->offset = pgoff << PAGE_SHIFT;
65
66         if (unlikely(fbdefio->sort_pagelist)) {
67                 /*
68                  * We loop through the list of pagerefs before adding in
69                  * order to keep the pagerefs sorted. This has significant
70                  * overhead of O(n^2) with n being the number of written
71                  * pages. If possible, drivers should try to work with
72                  * unsorted page lists instead.
73                  */
74                 list_for_each_entry(cur, &info->fbdefio->pagelist, list) {
75                         if (cur->offset > pageref->offset)
76                                 break;
77                 }
78                 pos = &cur->list;
79         }
80
81         list_add_tail(&pageref->list, pos);
82
83 pageref_already_added:
84         return pageref;
85 }
86
87 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
88                                        struct fb_info *info)
89 {
90         list_del_init(&pageref->list);
91 }
92
93 /* this is to find and return the vmalloc-ed fb pages */
94 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
95 {
96         unsigned long offset;
97         struct page *page;
98         struct fb_info *info = vmf->vma->vm_private_data;
99
100         offset = vmf->pgoff << PAGE_SHIFT;
101         if (offset >= info->fix.smem_len)
102                 return VM_FAULT_SIGBUS;
103
104         page = fb_deferred_io_page(info, offset);
105         if (!page)
106                 return VM_FAULT_SIGBUS;
107
108         get_page(page);
109
110         if (vmf->vma->vm_file)
111                 page->mapping = vmf->vma->vm_file->f_mapping;
112         else
113                 printk(KERN_ERR "no mapping available\n");
114
115         BUG_ON(!page->mapping);
116         page->index = vmf->pgoff; /* for page_mkclean() */
117
118         vmf->page = page;
119         return 0;
120 }
121
122 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
123 {
124         struct fb_info *info = file->private_data;
125         struct inode *inode = file_inode(file);
126         int err = file_write_and_wait_range(file, start, end);
127         if (err)
128                 return err;
129
130         /* Skip if deferred io is compiled-in but disabled on this fbdev */
131         if (!info->fbdefio)
132                 return 0;
133
134         inode_lock(inode);
135         /* Kill off the delayed work */
136         cancel_delayed_work_sync(&info->deferred_work);
137
138         /* Run it immediately */
139         schedule_delayed_work(&info->deferred_work, 0);
140         inode_unlock(inode);
141
142         return 0;
143 }
144 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
145
146 /*
147  * Adds a page to the dirty list. Call this from struct
148  * vm_operations_struct.page_mkwrite.
149  */
150 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
151                                             struct page *page)
152 {
153         struct fb_deferred_io *fbdefio = info->fbdefio;
154         struct fb_deferred_io_pageref *pageref;
155         vm_fault_t ret;
156
157         /* protect against the workqueue changing the page list */
158         mutex_lock(&fbdefio->lock);
159
160         /* first write in this cycle, notify the driver */
161         if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
162                 fbdefio->first_io(info);
163
164         pageref = fb_deferred_io_pageref_get(info, offset, page);
165         if (WARN_ON_ONCE(!pageref)) {
166                 ret = VM_FAULT_OOM;
167                 goto err_mutex_unlock;
168         }
169
170         /*
171          * We want the page to remain locked from ->page_mkwrite until
172          * the PTE is marked dirty to avoid page_mkclean() being called
173          * before the PTE is updated, which would leave the page ignored
174          * by defio.
175          * Do this by locking the page here and informing the caller
176          * about it with VM_FAULT_LOCKED.
177          */
178         lock_page(pageref->page);
179
180         mutex_unlock(&fbdefio->lock);
181
182         /* come back after delay to process the deferred IO */
183         schedule_delayed_work(&info->deferred_work, fbdefio->delay);
184         return VM_FAULT_LOCKED;
185
186 err_mutex_unlock:
187         mutex_unlock(&fbdefio->lock);
188         return ret;
189 }
190
191 /*
192  * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
193  * @fb_info: The fbdev info structure
194  * @vmf: The VM fault
195  *
196  * This is a callback we get when userspace first tries to
197  * write to the page. We schedule a workqueue. That workqueue
198  * will eventually mkclean the touched pages and execute the
199  * deferred framebuffer IO. Then if userspace touches a page
200  * again, we repeat the same scheme.
201  *
202  * Returns:
203  * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
204  */
205 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
206 {
207         unsigned long offset = vmf->address - vmf->vma->vm_start;
208         struct page *page = vmf->page;
209
210         file_update_time(vmf->vma->vm_file);
211
212         return fb_deferred_io_track_page(info, offset, page);
213 }
214
215 /* vm_ops->page_mkwrite handler */
216 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
217 {
218         struct fb_info *info = vmf->vma->vm_private_data;
219
220         return fb_deferred_io_page_mkwrite(info, vmf);
221 }
222
223 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
224         .fault          = fb_deferred_io_fault,
225         .page_mkwrite   = fb_deferred_io_mkwrite,
226 };
227
228 static const struct address_space_operations fb_deferred_io_aops = {
229         .dirty_folio    = noop_dirty_folio,
230 };
231
232 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
233 {
234         vma->vm_ops = &fb_deferred_io_vm_ops;
235         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
236         if (!(info->flags & FBINFO_VIRTFB))
237                 vma->vm_flags |= VM_IO;
238         vma->vm_private_data = info;
239         return 0;
240 }
241 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
242
243 /* workqueue callback */
244 static void fb_deferred_io_work(struct work_struct *work)
245 {
246         struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
247         struct fb_deferred_io_pageref *pageref, *next;
248         struct fb_deferred_io *fbdefio = info->fbdefio;
249
250         /* here we mkclean the pages, then do all deferred IO */
251         mutex_lock(&fbdefio->lock);
252         list_for_each_entry(pageref, &fbdefio->pagelist, list) {
253                 struct page *cur = pageref->page;
254                 lock_page(cur);
255                 page_mkclean(cur);
256                 unlock_page(cur);
257         }
258
259         /* driver's callback with pagelist */
260         fbdefio->deferred_io(info, &fbdefio->pagelist);
261
262         /* clear the list */
263         list_for_each_entry_safe(pageref, next, &fbdefio->pagelist, list)
264                 fb_deferred_io_pageref_put(pageref, info);
265
266         mutex_unlock(&fbdefio->lock);
267 }
268
269 int fb_deferred_io_init(struct fb_info *info)
270 {
271         struct fb_deferred_io *fbdefio = info->fbdefio;
272         struct fb_deferred_io_pageref *pagerefs;
273         unsigned long npagerefs, i;
274         int ret;
275
276         BUG_ON(!fbdefio);
277
278         if (WARN_ON(!info->fix.smem_len))
279                 return -EINVAL;
280
281         mutex_init(&fbdefio->lock);
282         INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
283         INIT_LIST_HEAD(&fbdefio->pagelist);
284         if (fbdefio->delay == 0) /* set a default of 1 s */
285                 fbdefio->delay = HZ;
286
287         npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
288
289         /* alloc a page ref for each page of the display memory */
290         pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
291         if (!pagerefs) {
292                 ret = -ENOMEM;
293                 goto err;
294         }
295         for (i = 0; i < npagerefs; ++i)
296                 INIT_LIST_HEAD(&pagerefs[i].list);
297         info->npagerefs = npagerefs;
298         info->pagerefs = pagerefs;
299
300         return 0;
301
302 err:
303         mutex_destroy(&fbdefio->lock);
304         return ret;
305 }
306 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
307
308 void fb_deferred_io_open(struct fb_info *info,
309                          struct inode *inode,
310                          struct file *file)
311 {
312         file->f_mapping->a_ops = &fb_deferred_io_aops;
313 }
314 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
315
316 void fb_deferred_io_cleanup(struct fb_info *info)
317 {
318         struct fb_deferred_io *fbdefio = info->fbdefio;
319         struct page *page;
320         int i;
321
322         BUG_ON(!fbdefio);
323         cancel_delayed_work_sync(&info->deferred_work);
324
325         /* clear out the mapping that we setup */
326         for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
327                 page = fb_deferred_io_page(info, i);
328                 page->mapping = NULL;
329         }
330
331         kvfree(info->pagerefs);
332         mutex_destroy(&fbdefio->lock);
333 }
334 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);