1 /*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/mm.h>
17 #include <linux/vmalloc.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/fb.h>
21 #include <linux/list.h>
22
23 /* to support deferred IO */
24 #include <linux/rmap.h>
25 #include <linux/pagemap.h>
26
fb_deferred_io_get_page(struct fb_info * info,unsigned long offs)27 static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
28 {
29 struct fb_deferred_io *fbdefio = info->fbdefio;
30 const void *screen_buffer = info->screen_buffer;
31 struct page *page = NULL;
32
33 if (fbdefio->get_page)
34 return fbdefio->get_page(info, offs);
35
36 if (is_vmalloc_addr(screen_buffer + offs))
37 page = vmalloc_to_page(screen_buffer + offs);
38 else if (info->fix.smem_start)
39 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
40
41 if (page)
42 get_page(page);
43
44 return page;
45 }
46
fb_deferred_io_pageref_lookup(struct fb_info * info,unsigned long offset,struct page * page)47 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_lookup(struct fb_info *info,
48 unsigned long offset,
49 struct page *page)
50 {
51 unsigned long pgoff = offset >> PAGE_SHIFT;
52 struct fb_deferred_io_pageref *pageref;
53
54 if (fb_WARN_ON_ONCE(info, pgoff >= info->npagerefs))
55 return NULL; /* incorrect allocation size */
56
57 /* 1:1 mapping between pageref and page offset */
58 pageref = &info->pagerefs[pgoff];
59
60 if (pageref->page)
61 goto out;
62
63 pageref->page = page;
64 pageref->offset = pgoff << PAGE_SHIFT;
65 INIT_LIST_HEAD(&pageref->list);
66
67 out:
68 if (fb_WARN_ON_ONCE(info, pageref->page != page))
69 return NULL; /* inconsistent state */
70 return pageref;
71 }
72
fb_deferred_io_pageref_get(struct fb_info * info,unsigned long offset,struct page * page)73 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
74 unsigned long offset,
75 struct page *page)
76 {
77 struct fb_deferred_io *fbdefio = info->fbdefio;
78 struct list_head *pos = &fbdefio->pagereflist;
79 struct fb_deferred_io_pageref *pageref, *cur;
80
81 pageref = fb_deferred_io_pageref_lookup(info, offset, page);
82 if (!pageref)
83 return NULL;
84
85 /*
86 * This check is to catch the case where a new process could start
87 * writing to the same page through a new PTE. This new access
88 * can cause a call to .page_mkwrite even if the original process'
89 * PTE is marked writable.
90 */
91 if (!list_empty(&pageref->list))
92 goto pageref_already_added;
93
94 if (unlikely(fbdefio->sort_pagereflist)) {
95 /*
96 * We loop through the list of pagerefs before adding in
97 * order to keep the pagerefs sorted. This has significant
98 * overhead of O(n^2) with n being the number of written
99 * pages. If possible, drivers should try to work with
100 * unsorted page lists instead.
101 */
102 list_for_each_entry(cur, &fbdefio->pagereflist, list) {
103 if (cur->offset > pageref->offset)
104 break;
105 }
106 pos = &cur->list;
107 }
108
109 list_add_tail(&pageref->list, pos);
110
111 pageref_already_added:
112 return pageref;
113 }
114
fb_deferred_io_pageref_put(struct fb_deferred_io_pageref * pageref,struct fb_info * info)115 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
116 struct fb_info *info)
117 {
118 list_del_init(&pageref->list);
119 }
120
121 /* this is to find and return the vmalloc-ed fb pages */
fb_deferred_io_fault(struct vm_fault * vmf)122 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
123 {
124 unsigned long offset;
125 struct page *page;
126 struct fb_info *info = vmf->vma->vm_private_data;
127
128 offset = vmf->pgoff << PAGE_SHIFT;
129 if (offset >= info->fix.smem_len)
130 return VM_FAULT_SIGBUS;
131
132 page = fb_deferred_io_get_page(info, offset);
133 if (!page)
134 return VM_FAULT_SIGBUS;
135
136 if (!vmf->vma->vm_file)
137 fb_err(info, "no mapping available\n");
138
139 BUG_ON(!info->fbdefio->mapping);
140
141 vmf->page = page;
142 return 0;
143 }
144
fb_deferred_io_fsync(struct file * file,loff_t start,loff_t end,int datasync)145 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
146 {
147 struct fb_info *info = file->private_data;
148 struct inode *inode = file_inode(file);
149 int err = file_write_and_wait_range(file, start, end);
150 if (err)
151 return err;
152
153 /* Skip if deferred io is compiled-in but disabled on this fbdev */
154 if (!info->fbdefio)
155 return 0;
156
157 inode_lock(inode);
158 flush_delayed_work(&info->deferred_work);
159 inode_unlock(inode);
160
161 return 0;
162 }
163 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
164
165 /*
166 * Adds a page to the dirty list. Call this from struct
167 * vm_operations_struct.page_mkwrite.
168 */
fb_deferred_io_track_page(struct fb_info * info,unsigned long offset,struct page * page)169 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
170 struct page *page)
171 {
172 struct fb_deferred_io *fbdefio = info->fbdefio;
173 struct fb_deferred_io_pageref *pageref;
174 vm_fault_t ret;
175
176 /* protect against the workqueue changing the page list */
177 mutex_lock(&fbdefio->lock);
178
179 pageref = fb_deferred_io_pageref_get(info, offset, page);
180 if (WARN_ON_ONCE(!pageref)) {
181 ret = VM_FAULT_OOM;
182 goto err_mutex_unlock;
183 }
184
185 /*
186 * We want the page to remain locked from ->page_mkwrite until
187 * the PTE is marked dirty to avoid mapping_wrprotect_range()
188 * being called before the PTE is updated, which would leave
189 * the page ignored by defio.
190 * Do this by locking the page here and informing the caller
191 * about it with VM_FAULT_LOCKED.
192 */
193 lock_page(pageref->page);
194
195 mutex_unlock(&fbdefio->lock);
196
197 /* come back after delay to process the deferred IO */
198 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
199 return VM_FAULT_LOCKED;
200
201 err_mutex_unlock:
202 mutex_unlock(&fbdefio->lock);
203 return ret;
204 }
205
206 /*
207 * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
208 * @fb_info: The fbdev info structure
209 * @vmf: The VM fault
210 *
211 * This is a callback we get when userspace first tries to
212 * write to the page. We schedule a workqueue. That workqueue
213 * will eventually mkclean the touched pages and execute the
214 * deferred framebuffer IO. Then if userspace touches a page
215 * again, we repeat the same scheme.
216 *
217 * Returns:
218 * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
219 */
fb_deferred_io_page_mkwrite(struct fb_info * info,struct vm_fault * vmf)220 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
221 {
222 unsigned long offset = vmf->pgoff << PAGE_SHIFT;
223 struct page *page = vmf->page;
224
225 file_update_time(vmf->vma->vm_file);
226
227 return fb_deferred_io_track_page(info, offset, page);
228 }
229
230 /* vm_ops->page_mkwrite handler */
fb_deferred_io_mkwrite(struct vm_fault * vmf)231 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
232 {
233 struct fb_info *info = vmf->vma->vm_private_data;
234
235 return fb_deferred_io_page_mkwrite(info, vmf);
236 }
237
238 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
239 .fault = fb_deferred_io_fault,
240 .page_mkwrite = fb_deferred_io_mkwrite,
241 };
242
243 static const struct address_space_operations fb_deferred_io_aops = {
244 .dirty_folio = noop_dirty_folio,
245 };
246
fb_deferred_io_mmap(struct fb_info * info,struct vm_area_struct * vma)247 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
248 {
249 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
250
251 vma->vm_ops = &fb_deferred_io_vm_ops;
252 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
253 if (!(info->flags & FBINFO_VIRTFB))
254 vm_flags_set(vma, VM_IO);
255 vma->vm_private_data = info;
256 return 0;
257 }
258 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
259
260 /* workqueue callback */
fb_deferred_io_work(struct work_struct * work)261 static void fb_deferred_io_work(struct work_struct *work)
262 {
263 struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
264 struct fb_deferred_io_pageref *pageref, *next;
265 struct fb_deferred_io *fbdefio = info->fbdefio;
266
267 /* here we wrprotect the page's mappings, then do all deferred IO. */
268 mutex_lock(&fbdefio->lock);
269 #ifdef CONFIG_MMU
270 list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
271 struct page *page = pageref->page;
272 pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
273
274 mapping_wrprotect_range(fbdefio->mapping, pgoff,
275 page_to_pfn(page), 1);
276 }
277 #endif
278
279 /* driver's callback with pagereflist */
280 fbdefio->deferred_io(info, &fbdefio->pagereflist);
281
282 /* clear the list */
283 list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
284 fb_deferred_io_pageref_put(pageref, info);
285
286 mutex_unlock(&fbdefio->lock);
287 }
288
fb_deferred_io_init(struct fb_info * info)289 int fb_deferred_io_init(struct fb_info *info)
290 {
291 struct fb_deferred_io *fbdefio = info->fbdefio;
292 struct fb_deferred_io_pageref *pagerefs;
293 unsigned long npagerefs;
294 int ret;
295
296 BUG_ON(!fbdefio);
297
298 if (WARN_ON(!info->fix.smem_len))
299 return -EINVAL;
300
301 mutex_init(&fbdefio->lock);
302 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
303 INIT_LIST_HEAD(&fbdefio->pagereflist);
304 if (fbdefio->delay == 0) /* set a default of 1 s */
305 fbdefio->delay = HZ;
306
307 npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
308
309 /* alloc a page ref for each page of the display memory */
310 pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
311 if (!pagerefs) {
312 ret = -ENOMEM;
313 goto err;
314 }
315 info->npagerefs = npagerefs;
316 info->pagerefs = pagerefs;
317
318 return 0;
319
320 err:
321 mutex_destroy(&fbdefio->lock);
322 return ret;
323 }
324 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
325
fb_deferred_io_open(struct fb_info * info,struct inode * inode,struct file * file)326 void fb_deferred_io_open(struct fb_info *info,
327 struct inode *inode,
328 struct file *file)
329 {
330 struct fb_deferred_io *fbdefio = info->fbdefio;
331
332 fbdefio->mapping = file->f_mapping;
333 file->f_mapping->a_ops = &fb_deferred_io_aops;
334 fbdefio->open_count++;
335 }
336 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
337
fb_deferred_io_lastclose(struct fb_info * info)338 static void fb_deferred_io_lastclose(struct fb_info *info)
339 {
340 flush_delayed_work(&info->deferred_work);
341 }
342
fb_deferred_io_release(struct fb_info * info)343 void fb_deferred_io_release(struct fb_info *info)
344 {
345 struct fb_deferred_io *fbdefio = info->fbdefio;
346
347 if (!--fbdefio->open_count)
348 fb_deferred_io_lastclose(info);
349 }
350 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
351
fb_deferred_io_cleanup(struct fb_info * info)352 void fb_deferred_io_cleanup(struct fb_info *info)
353 {
354 struct fb_deferred_io *fbdefio = info->fbdefio;
355
356 fb_deferred_io_lastclose(info);
357
358 kvfree(info->pagerefs);
359 mutex_destroy(&fbdefio->lock);
360 fbdefio->mapping = NULL;
361 }
362 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
363