Lines Matching +full:start +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31 * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
32 * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
52 * struct vmw_bo_dirty - Dirty information for buffer objects
53 * @start: First currently dirty bit
65 unsigned long start; member
76 * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
81 * dirty-tracking method.
85 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable()
86 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_scan_pagetable()
87 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable()
92 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable()
93 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable()
94 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
96 dirty->change_count++; in vmw_bo_dirty_scan_pagetable()
98 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable()
100 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_pagetable()
101 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable()
102 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_scan_pagetable()
104 offset, dirty->bitmap_size); in vmw_bo_dirty_scan_pagetable()
106 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable()
107 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable()
108 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
113 * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
116 * Write-protect pages written to so that consecutive write accesses will
119 * This function may change the dirty-tracking method.
123 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite()
124 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_scan_mkwrite()
125 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite()
128 if (dirty->end <= dirty->start) in vmw_bo_dirty_scan_mkwrite()
131 num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite()
132 dirty->start + offset, in vmw_bo_dirty_scan_mkwrite()
133 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
135 if (100UL * num_marked / dirty->bitmap_size > in vmw_bo_dirty_scan_mkwrite()
137 dirty->change_count++; in vmw_bo_dirty_scan_mkwrite()
139 dirty->change_count = 0; in vmw_bo_dirty_scan_mkwrite()
142 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_mkwrite()
143 pgoff_t start = 0; in vmw_bo_dirty_scan_mkwrite() local
144 pgoff_t end = dirty->bitmap_size; in vmw_bo_dirty_scan_mkwrite()
146 dirty->method = VMW_BO_DIRTY_PAGETABLE; in vmw_bo_dirty_scan_mkwrite()
148 &dirty->bitmap[0], in vmw_bo_dirty_scan_mkwrite()
149 &start, &end); in vmw_bo_dirty_scan_mkwrite()
150 bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size); in vmw_bo_dirty_scan_mkwrite()
151 if (dirty->start < dirty->end) in vmw_bo_dirty_scan_mkwrite()
152 bitmap_set(&dirty->bitmap[0], dirty->start, in vmw_bo_dirty_scan_mkwrite()
153 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
154 dirty->change_count = 0; in vmw_bo_dirty_scan_mkwrite()
159 * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
167 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan()
169 if (dirty->method == VMW_BO_DIRTY_PAGETABLE) in vmw_bo_dirty_scan()
176 * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
179 * @start: First page of the range within the buffer object.
184 * up all dirty pages.
187 pgoff_t start, pgoff_t end) in vmw_bo_dirty_pre_unmap() argument
189 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_pre_unmap()
190 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_pre_unmap()
191 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_pre_unmap()
193 if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) in vmw_bo_dirty_pre_unmap()
196 wp_shared_mapping_range(mapping, start + offset, end - start); in vmw_bo_dirty_pre_unmap()
197 clean_record_shared_mapping_range(mapping, start + offset, in vmw_bo_dirty_pre_unmap()
198 end - start, offset, in vmw_bo_dirty_pre_unmap()
199 &dirty->bitmap[0], &dirty->start, in vmw_bo_dirty_pre_unmap()
200 &dirty->end); in vmw_bo_dirty_pre_unmap()
204 * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
206 * @start: First page of the range within the buffer object.
212 pgoff_t start, pgoff_t end) in vmw_bo_dirty_unmap() argument
214 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_unmap()
215 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_unmap()
217 vmw_bo_dirty_pre_unmap(vbo, start, end); in vmw_bo_dirty_unmap()
218 unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, in vmw_bo_dirty_unmap()
219 (loff_t) (end - start) << PAGE_SHIFT); in vmw_bo_dirty_unmap()
223 * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
226 * This function registers a dirty-tracking user to a buffer object.
227 * A user can be for example a resource or a vma in a special user-space
230 * Return: Zero on success, -ENOMEM on memory allocation failure.
234 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_add()
235 pgoff_t num_pages = vbo->base.num_pages; in vmw_bo_dirty_add()
244 dirty->ref_count++; in vmw_bo_dirty_add()
258 ret = -ENOMEM; in vmw_bo_dirty_add()
262 dirty->size = acc_size; in vmw_bo_dirty_add()
263 dirty->bitmap_size = num_pages; in vmw_bo_dirty_add()
264 dirty->start = dirty->bitmap_size; in vmw_bo_dirty_add()
265 dirty->end = 0; in vmw_bo_dirty_add()
266 dirty->ref_count = 1; in vmw_bo_dirty_add()
268 dirty->method = VMW_BO_DIRTY_PAGETABLE; in vmw_bo_dirty_add()
270 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_add()
271 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_add()
273 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_add()
275 /* Write-protect and then pick up already dirty bits */ in vmw_bo_dirty_add()
279 &dirty->bitmap[0], in vmw_bo_dirty_add()
280 &dirty->start, &dirty->end); in vmw_bo_dirty_add()
283 vbo->dirty = dirty; in vmw_bo_dirty_add()
293 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
296 * This function releases a dirty-tracking user from a buffer object.
297 * If the reference count reaches zero, then the dirty-tracking object is
300 * Return: Zero on success, -ENOMEM on memory allocation failure.
304 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_release()
306 if (dirty && --dirty->ref_count == 0) { in vmw_bo_dirty_release()
307 size_t acc_size = dirty->size; in vmw_bo_dirty_release()
311 vbo->dirty = NULL; in vmw_bo_dirty_release()
316 * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
320 * This function will pick up all dirty ranges affecting the resource from
327 struct vmw_buffer_object *vbo = res->backup; in vmw_bo_dirty_transfer_to_res()
328 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_transfer_to_res()
329 pgoff_t start, cur, end; in vmw_bo_dirty_transfer_to_res() local
330 unsigned long res_start = res->backup_offset; in vmw_bo_dirty_transfer_to_res()
331 unsigned long res_end = res->backup_offset + res->backup_size; in vmw_bo_dirty_transfer_to_res()
337 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_transfer_to_res()
340 cur = max(res_start, dirty->start); in vmw_bo_dirty_transfer_to_res()
341 res_end = max(res_end, dirty->end); in vmw_bo_dirty_transfer_to_res()
345 start = find_next_bit(&dirty->bitmap[0], res_end, cur); in vmw_bo_dirty_transfer_to_res()
346 if (start >= res_end) in vmw_bo_dirty_transfer_to_res()
349 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); in vmw_bo_dirty_transfer_to_res()
351 num = end - start; in vmw_bo_dirty_transfer_to_res()
352 bitmap_clear(&dirty->bitmap[0], start, num); in vmw_bo_dirty_transfer_to_res()
353 vmw_resource_dirty_update(res, start, end); in vmw_bo_dirty_transfer_to_res()
356 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_transfer_to_res()
357 dirty->start = res_end; in vmw_bo_dirty_transfer_to_res()
358 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_transfer_to_res()
359 dirty->end = res_start; in vmw_bo_dirty_transfer_to_res()
363 * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
372 unsigned long res_start = res->backup_offset; in vmw_bo_dirty_clear_res()
373 unsigned long res_end = res->backup_offset + res->backup_size; in vmw_bo_dirty_clear_res()
374 struct vmw_buffer_object *vbo = res->backup; in vmw_bo_dirty_clear_res()
375 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_clear_res()
380 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_clear_res()
383 res_start = max(res_start, dirty->start); in vmw_bo_dirty_clear_res()
384 res_end = min(res_end, dirty->end); in vmw_bo_dirty_clear_res()
385 bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start); in vmw_bo_dirty_clear_res()
387 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_clear_res()
388 dirty->start = res_end; in vmw_bo_dirty_clear_res()
389 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_clear_res()
390 dirty->end = res_start; in vmw_bo_dirty_clear_res()
395 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite()
397 vma->vm_private_data; in vmw_bo_vm_mkwrite()
408 save_flags = vmf->flags; in vmw_bo_vm_mkwrite()
409 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite()
411 vmf->flags = save_flags; in vmw_bo_vm_mkwrite()
415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
416 if (unlikely(page_offset >= bo->num_pages)) { in vmw_bo_vm_mkwrite()
421 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && in vmw_bo_vm_mkwrite()
422 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite()
423 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_vm_mkwrite()
425 __set_bit(page_offset, &dirty->bitmap[0]); in vmw_bo_vm_mkwrite()
426 dirty->start = min(dirty->start, page_offset); in vmw_bo_vm_mkwrite()
427 dirty->end = max(dirty->end, page_offset + 1); in vmw_bo_vm_mkwrite()
431 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_mkwrite()
437 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault()
439 vma->vm_private_data; in vmw_bo_vm_fault()
450 num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : in vmw_bo_vm_fault()
453 if (vbo->dirty) { in vmw_bo_vm_fault()
457 page_offset = vmf->pgoff - in vmw_bo_vm_fault()
458 drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_fault()
459 if (page_offset >= bo->num_pages || in vmw_bo_vm_fault()
472 * sure the page protection is write-enabled so we don't get in vmw_bo_vm_fault()
475 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) in vmw_bo_vm_fault()
476 prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); in vmw_bo_vm_fault()
478 prot = vm_get_page_prot(vma->vm_flags); in vmw_bo_vm_fault()
481 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in vmw_bo_vm_fault()
485 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_fault()
494 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_huge_fault()
496 vma->vm_private_data; in vmw_bo_vm_huge_fault()
502 bool write = vmf->flags & FAULT_FLAG_WRITE; in vmw_bo_vm_huge_fault()
504 (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in vmw_bo_vm_huge_fault()
520 /* Always do write dirty-tracking and COW on PTE level. */ in vmw_bo_vm_huge_fault()
521 if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) in vmw_bo_vm_huge_fault()
528 if (vbo->dirty) { in vmw_bo_vm_huge_fault()
532 page_offset = vmf->pgoff - in vmw_bo_vm_huge_fault()
533 drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_huge_fault()
534 if (page_offset >= bo->num_pages || in vmw_bo_vm_huge_fault()
546 prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); in vmw_bo_vm_huge_fault()
548 prot = vm_get_page_prot(vma->vm_flags); in vmw_bo_vm_huge_fault()
552 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in vmw_bo_vm_huge_fault()
556 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_huge_fault()