GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / video / fbdev / core / fb_defio.c
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25
26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28         void *screen_base = (void __force *) info->screen_base;
29         struct page *page;
30
31         if (is_vmalloc_addr(screen_base + offs))
32                 page = vmalloc_to_page(screen_base + offs);
33         else
34                 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36         return page;
37 }
38
39 /* this is to find and return the vmalloc-ed fb pages */
40 static int fb_deferred_io_fault(struct vm_area_struct *vma,
41                                 struct vm_fault *vmf)
42 {
43         unsigned long offset;
44         struct page *page;
45         struct fb_info *info = vma->vm_private_data;
46
47         offset = vmf->pgoff << PAGE_SHIFT;
48         if (offset >= info->fix.smem_len)
49                 return VM_FAULT_SIGBUS;
50
51         page = fb_deferred_io_page(info, offset);
52         if (!page)
53                 return VM_FAULT_SIGBUS;
54
55         get_page(page);
56
57         if (vma->vm_file)
58                 page->mapping = vma->vm_file->f_mapping;
59         else
60                 printk(KERN_ERR "no mapping available\n");
61
62         BUG_ON(!page->mapping);
63         page->index = vmf->pgoff;
64
65         vmf->page = page;
66         return 0;
67 }
68
69 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
70 {
71         struct fb_info *info = file->private_data;
72         struct inode *inode = file_inode(file);
73         int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
74         if (err)
75                 return err;
76
77         /* Skip if deferred io is compiled-in but disabled on this fbdev */
78         if (!info->fbdefio)
79                 return 0;
80
81         inode_lock(inode);
82         /* Kill off the delayed work */
83         cancel_delayed_work_sync(&info->deferred_work);
84
85         /* Run it immediately */
86         schedule_delayed_work(&info->deferred_work, 0);
87         inode_unlock(inode);
88
89         return 0;
90 }
91 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
92
93 /* vm_ops->page_mkwrite handler */
94 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
95                                   struct vm_fault *vmf)
96 {
97         struct page *page = vmf->page;
98         struct fb_info *info = vma->vm_private_data;
99         struct fb_deferred_io *fbdefio = info->fbdefio;
100         struct page *cur;
101
102         /* this is a callback we get when userspace first tries to
103         write to the page. we schedule a workqueue. that workqueue
104         will eventually mkclean the touched pages and execute the
105         deferred framebuffer IO. then if userspace touches a page
106         again, we repeat the same scheme */
107
108         file_update_time(vma->vm_file);
109
110         /* protect against the workqueue changing the page list */
111         mutex_lock(&fbdefio->lock);
112
113         /* first write in this cycle, notify the driver */
114         if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
115                 fbdefio->first_io(info);
116
117         /*
118          * We want the page to remain locked from ->page_mkwrite until
119          * the PTE is marked dirty to avoid page_mkclean() being called
120          * before the PTE is updated, which would leave the page ignored
121          * by defio.
122          * Do this by locking the page here and informing the caller
123          * about it with VM_FAULT_LOCKED.
124          */
125         lock_page(page);
126
127         /* we loop through the pagelist before adding in order
128         to keep the pagelist sorted */
129         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
130                 /* this check is to catch the case where a new
131                 process could start writing to the same page
132                 through a new pte. this new access can cause the
133                 mkwrite even when the original ps's pte is marked
134                 writable */
135                 if (unlikely(cur == page))
136                         goto page_already_added;
137                 else if (cur->index > page->index)
138                         break;
139         }
140
141         list_add_tail(&page->lru, &cur->lru);
142
143 page_already_added:
144         mutex_unlock(&fbdefio->lock);
145
146         /* come back after delay to process the deferred IO */
147         schedule_delayed_work(&info->deferred_work, fbdefio->delay);
148         return VM_FAULT_LOCKED;
149 }
150
151 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
152         .fault          = fb_deferred_io_fault,
153         .page_mkwrite   = fb_deferred_io_mkwrite,
154 };
155
156 static int fb_deferred_io_set_page_dirty(struct page *page)
157 {
158         if (!PageDirty(page))
159                 SetPageDirty(page);
160         return 0;
161 }
162
163 static const struct address_space_operations fb_deferred_io_aops = {
164         .set_page_dirty = fb_deferred_io_set_page_dirty,
165 };
166
167 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
168 {
169         vma->vm_ops = &fb_deferred_io_vm_ops;
170         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
171         if (!(info->flags & FBINFO_VIRTFB))
172                 vma->vm_flags |= VM_IO;
173         vma->vm_private_data = info;
174         return 0;
175 }
176 EXPORT_SYMBOL(fb_deferred_io_mmap);
177
178 /* workqueue callback */
179 static void fb_deferred_io_work(struct work_struct *work)
180 {
181         struct fb_info *info = container_of(work, struct fb_info,
182                                                 deferred_work.work);
183         struct list_head *node, *next;
184         struct page *cur;
185         struct fb_deferred_io *fbdefio = info->fbdefio;
186
187         /* here we mkclean the pages, then do all deferred IO */
188         mutex_lock(&fbdefio->lock);
189         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
190                 lock_page(cur);
191                 page_mkclean(cur);
192                 unlock_page(cur);
193         }
194
195         /* driver's callback with pagelist */
196         fbdefio->deferred_io(info, &fbdefio->pagelist);
197
198         /* clear the list */
199         list_for_each_safe(node, next, &fbdefio->pagelist) {
200                 list_del(node);
201         }
202         mutex_unlock(&fbdefio->lock);
203 }
204
205 void fb_deferred_io_init(struct fb_info *info)
206 {
207         struct fb_deferred_io *fbdefio = info->fbdefio;
208
209         BUG_ON(!fbdefio);
210         mutex_init(&fbdefio->lock);
211         info->fbops->fb_mmap = fb_deferred_io_mmap;
212         INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
213         INIT_LIST_HEAD(&fbdefio->pagelist);
214         if (fbdefio->delay == 0) /* set a default of 1 s */
215                 fbdefio->delay = HZ;
216 }
217 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
218
219 void fb_deferred_io_open(struct fb_info *info,
220                          struct inode *inode,
221                          struct file *file)
222 {
223         file->f_mapping->a_ops = &fb_deferred_io_aops;
224 }
225 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
226
227 void fb_deferred_io_cleanup(struct fb_info *info)
228 {
229         struct fb_deferred_io *fbdefio = info->fbdefio;
230         struct page *page;
231         int i;
232
233         BUG_ON(!fbdefio);
234         cancel_delayed_work_sync(&info->deferred_work);
235
236         /* clear out the mapping that we setup */
237         for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
238                 page = fb_deferred_io_page(info, i);
239                 page->mapping = NULL;
240         }
241
242         info->fbops->fb_mmap = NULL;
243         mutex_destroy(&fbdefio->lock);
244 }
245 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);