1 /*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25
fb_deferred_io_get_page(struct fb_info * info,unsigned long offs)26 static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
27 {
28 struct fb_deferred_io *fbdefio = info->fbdefio;
29 const void *screen_buffer = info->screen_buffer;
30 struct page *page = NULL;
31
32 if (fbdefio->get_page)
33 return fbdefio->get_page(info, offs);
34
35 if (is_vmalloc_addr(screen_buffer + offs))
36 page = vmalloc_to_page(screen_buffer + offs);
37 else if (info->fix.smem_start)
38 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
39
40 if (page)
41 get_page(page);
42
43 return page;
44 }
45
fb_deferred_io_pageref_lookup(struct fb_info * info,unsigned long offset,struct page * page)46 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_lookup(struct fb_info *info,
47 unsigned long offset,
48 struct page *page)
49 {
50 unsigned long pgoff = offset >> PAGE_SHIFT;
51 struct fb_deferred_io_pageref *pageref;
52
53 if (fb_WARN_ON_ONCE(info, pgoff >= info->npagerefs))
54 return NULL; /* incorrect allocation size */
55
56 /* 1:1 mapping between pageref and page offset */
57 pageref = &info->pagerefs[pgoff];
58
59 if (pageref->page)
60 goto out;
61
62 pageref->page = page;
63 pageref->offset = pgoff << PAGE_SHIFT;
64 INIT_LIST_HEAD(&pageref->list);
65
66 out:
67 if (fb_WARN_ON_ONCE(info, pageref->page != page))
68 return NULL; /* inconsistent state */
69 return pageref;
70 }
71
fb_deferred_io_pageref_get(struct fb_info * info,unsigned long offset,struct page * page)72 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
73 unsigned long offset,
74 struct page *page)
75 {
76 struct fb_deferred_io *fbdefio = info->fbdefio;
77 struct list_head *pos = &fbdefio->pagereflist;
78 struct fb_deferred_io_pageref *pageref, *cur;
79
80 pageref = fb_deferred_io_pageref_lookup(info, offset, page);
81 if (!pageref)
82 return NULL;
83
84 /*
85 * This check is to catch the case where a new process could start
86 * writing to the same page through a new PTE. This new access
87 * can cause a call to .page_mkwrite even if the original process'
88 * PTE is marked writable.
89 */
90 if (!list_empty(&pageref->list))
91 goto pageref_already_added;
92
93 if (unlikely(fbdefio->sort_pagereflist)) {
94 /*
95 * We loop through the list of pagerefs before adding in
96 * order to keep the pagerefs sorted. This has significant
97 * overhead of O(n^2) with n being the number of written
98 * pages. If possible, drivers should try to work with
99 * unsorted page lists instead.
100 */
101 list_for_each_entry(cur, &fbdefio->pagereflist, list) {
102 if (cur->offset > pageref->offset)
103 break;
104 }
105 pos = &cur->list;
106 }
107
108 list_add_tail(&pageref->list, pos);
109
110 pageref_already_added:
111 return pageref;
112 }
113
fb_deferred_io_pageref_put(struct fb_deferred_io_pageref * pageref,struct fb_info * info)114 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
115 struct fb_info *info)
116 {
117 list_del_init(&pageref->list);
118 }
119
120 /* this is to find and return the vmalloc-ed fb pages */
fb_deferred_io_fault(struct vm_fault * vmf)121 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
122 {
123 unsigned long offset;
124 struct page *page;
125 struct fb_info *info = vmf->vma->vm_private_data;
126
127 offset = vmf->pgoff << PAGE_SHIFT;
128 if (offset >= info->fix.smem_len)
129 return VM_FAULT_SIGBUS;
130
131 page = fb_deferred_io_get_page(info, offset);
132 if (!page)
133 return VM_FAULT_SIGBUS;
134
135 if (!vmf->vma->vm_file)
136 fb_err(info, "no mapping available\n");
137
138 BUG_ON(!info->fbdefio->mapping);
139
140 vmf->page = page;
141 return 0;
142 }
143
fb_deferred_io_fsync(struct file * file,loff_t start,loff_t end,int datasync)144 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
145 {
146 struct fb_info *info = file->private_data;
147 struct inode *inode = file_inode(file);
148 int err = file_write_and_wait_range(file, start, end);
149 if (err)
150 return err;
151
152 /* Skip if deferred io is compiled-in but disabled on this fbdev */
153 if (!info->fbdefio)
154 return 0;
155
156 inode_lock(inode);
157 flush_delayed_work(&info->deferred_work);
158 inode_unlock(inode);
159
160 return 0;
161 }
162 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
163
164 /*
165 * Adds a page to the dirty list. Call this from struct
166 * vm_operations_struct.page_mkwrite.
167 */
fb_deferred_io_track_page(struct fb_info * info,unsigned long offset,struct page * page)168 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
169 struct page *page)
170 {
171 struct fb_deferred_io *fbdefio = info->fbdefio;
172 struct fb_deferred_io_pageref *pageref;
173 vm_fault_t ret;
174
175 /* protect against the workqueue changing the page list */
176 mutex_lock(&fbdefio->lock);
177
178 pageref = fb_deferred_io_pageref_get(info, offset, page);
179 if (WARN_ON_ONCE(!pageref)) {
180 ret = VM_FAULT_OOM;
181 goto err_mutex_unlock;
182 }
183
184 /*
185 * We want the page to remain locked from ->page_mkwrite until
186 * the PTE is marked dirty to avoid mapping_wrprotect_range()
187 * being called before the PTE is updated, which would leave
188 * the page ignored by defio.
189 * Do this by locking the page here and informing the caller
190 * about it with VM_FAULT_LOCKED.
191 */
192 lock_page(pageref->page);
193
194 mutex_unlock(&fbdefio->lock);
195
196 /* come back after delay to process the deferred IO */
197 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
198 return VM_FAULT_LOCKED;
199
200 err_mutex_unlock:
201 mutex_unlock(&fbdefio->lock);
202 return ret;
203 }
204
205 /*
206 * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
207 * @fb_info: The fbdev info structure
208 * @vmf: The VM fault
209 *
210 * This is a callback we get when userspace first tries to
211 * write to the page. We schedule a workqueue. That workqueue
212 * will eventually mkclean the touched pages and execute the
213 * deferred framebuffer IO. Then if userspace touches a page
214 * again, we repeat the same scheme.
215 *
216 * Returns:
217 * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
218 */
fb_deferred_io_page_mkwrite(struct fb_info * info,struct vm_fault * vmf)219 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
220 {
221 unsigned long offset = vmf->pgoff << PAGE_SHIFT;
222 struct page *page = vmf->page;
223
224 file_update_time(vmf->vma->vm_file);
225
226 return fb_deferred_io_track_page(info, offset, page);
227 }
228
229 /* vm_ops->page_mkwrite handler */
fb_deferred_io_mkwrite(struct vm_fault * vmf)230 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
231 {
232 struct fb_info *info = vmf->vma->vm_private_data;
233
234 return fb_deferred_io_page_mkwrite(info, vmf);
235 }
236
237 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
238 .fault = fb_deferred_io_fault,
239 .page_mkwrite = fb_deferred_io_mkwrite,
240 };
241
242 static const struct address_space_operations fb_deferred_io_aops = {
243 .dirty_folio = noop_dirty_folio,
244 };
245
fb_deferred_io_mmap(struct fb_info * info,struct vm_area_struct * vma)246 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
247 {
248 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
249
250 vma->vm_ops = &fb_deferred_io_vm_ops;
251 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
252 if (!(info->flags & FBINFO_VIRTFB))
253 vm_flags_set(vma, VM_IO);
254 vma->vm_private_data = info;
255 return 0;
256 }
257 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
258
259 /* workqueue callback */
fb_deferred_io_work(struct work_struct * work)260 static void fb_deferred_io_work(struct work_struct *work)
261 {
262 struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
263 struct fb_deferred_io_pageref *pageref, *next;
264 struct fb_deferred_io *fbdefio = info->fbdefio;
265
266 /* here we wrprotect the page's mappings, then do all deferred IO. */
267 mutex_lock(&fbdefio->lock);
268 #ifdef CONFIG_MMU
269 list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
270 struct page *page = pageref->page;
271 pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
272
273 mapping_wrprotect_range(fbdefio->mapping, pgoff,
274 page_to_pfn(page), 1);
275 }
276 #endif
277
278 /* driver's callback with pagereflist */
279 fbdefio->deferred_io(info, &fbdefio->pagereflist);
280
281 /* clear the list */
282 list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
283 fb_deferred_io_pageref_put(pageref, info);
284
285 mutex_unlock(&fbdefio->lock);
286 }
287
fb_deferred_io_init(struct fb_info * info)288 int fb_deferred_io_init(struct fb_info *info)
289 {
290 struct fb_deferred_io *fbdefio = info->fbdefio;
291 struct fb_deferred_io_pageref *pagerefs;
292 unsigned long npagerefs;
293 int ret;
294
295 BUG_ON(!fbdefio);
296
297 if (WARN_ON(!info->fix.smem_len))
298 return -EINVAL;
299
300 mutex_init(&fbdefio->lock);
301 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
302 INIT_LIST_HEAD(&fbdefio->pagereflist);
303 if (fbdefio->delay == 0) /* set a default of 1 s */
304 fbdefio->delay = HZ;
305
306 npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
307
308 /* alloc a page ref for each page of the display memory */
309 pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
310 if (!pagerefs) {
311 ret = -ENOMEM;
312 goto err;
313 }
314 info->npagerefs = npagerefs;
315 info->pagerefs = pagerefs;
316
317 return 0;
318
319 err:
320 mutex_destroy(&fbdefio->lock);
321 return ret;
322 }
323 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
324
fb_deferred_io_open(struct fb_info * info,struct inode * inode,struct file * file)325 void fb_deferred_io_open(struct fb_info *info,
326 struct inode *inode,
327 struct file *file)
328 {
329 struct fb_deferred_io *fbdefio = info->fbdefio;
330
331 fbdefio->mapping = file->f_mapping;
332 file->f_mapping->a_ops = &fb_deferred_io_aops;
333 fbdefio->open_count++;
334 }
335 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
336
fb_deferred_io_lastclose(struct fb_info * info)337 static void fb_deferred_io_lastclose(struct fb_info *info)
338 {
339 flush_delayed_work(&info->deferred_work);
340 }
341
fb_deferred_io_release(struct fb_info * info)342 void fb_deferred_io_release(struct fb_info *info)
343 {
344 struct fb_deferred_io *fbdefio = info->fbdefio;
345
346 if (!--fbdefio->open_count)
347 fb_deferred_io_lastclose(info);
348 }
349 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
350
fb_deferred_io_cleanup(struct fb_info * info)351 void fb_deferred_io_cleanup(struct fb_info *info)
352 {
353 struct fb_deferred_io *fbdefio = info->fbdefio;
354
355 fb_deferred_io_lastclose(info);
356
357 kvfree(info->pagerefs);
358 mutex_destroy(&fbdefio->lock);
359 fbdefio->mapping = NULL;
360 }
361 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
362