Lines Matching full:range
46 * space is chosen for the range size. Ranges are expected to be
53 * range allocation, notifier allocation, and invalidations.
74 * optionally migrate the range to device memory, and create GPU bindings.
92 * range RB tree and list, as well as the range's DMA mappings and sequence
94 * except for the recheck range's pages being valid
114 * device memory at the range granularity. For example, GPU SVM currently does
115 * not support mixing RAM and device memory pages within a range. This means
116 * that upon GPU fault, the entire range can be migrated to device memory, and
117 * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
118 * memory storage within a range could be added in the future if required.
120 * The reasoning for only supporting range granularity is as follows: it
121 * simplifies the implementation, and range sizes are driver-defined and should
130 * being that a subset of the range still has CPU and GPU mappings. If the
131 * backing store for the range is in device memory, a subset of the backing
132 * store has references. One option would be to split the range and device
134 * complicated. Given that partial unmappings are rare and driver-defined range
137 * With no support for range splitting, upon partial unmapping of a range, the
138 * driver is expected to invalidate and destroy the entire range. If the range
158 * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range)
162 * driver_alloc_and_setup_memory_for_bind(gpusvm, range);
165 * if (drm_gpusvm_range_pages_valid(range))
166 * driver_commit_bind(gpusvm, range);
185 * range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr,
188 * if (IS_ERR(range)) {
189 * err = PTR_ERR(range);
193 * if (driver_migration_policy(range)) {
196 * err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
204 * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
207 * drm_gpusvm_range_evict(gpusvm, range);
213 * err = driver_bind_range(gpusvm, range);
227 * struct drm_gpusvm_range *range)
232 * if (range->flags.partial_unmap)
233 * drm_gpusvm_range_evict(gpusvm, range);
235 * driver_unbind_range(range);
236 * drm_gpusvm_range_remove(gpusvm, range);
243 * for_each_range_in_garbage_collector(gpusvm, range)
244 * __driver_garbage_collector(gpusvm, range);
256 * struct drm_gpusvm_range *range = NULL;
260 * drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
262 * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
267 * drm_gpusvm_range_set_unmapped(range, mmu_range);
268 * driver_garbage_collector_add(gpusvm, range);
274 * npages_in_range() - Calculate the number of pages in a given range
275 * @start: The start address of the range
276 * @end: The end address of the range
278 * This macro calculates the number of pages in a given memory range,
281 * determine the number of pages in the range.
283 * Return: The number of pages in the specified range.
383 * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
385 * @start: Start address of the range
386 * @end: End address of the range
410 * @start__: Start address of the range
411 * @end__: End address of the range
532 * @mm_range: Range of the GPU SVM.
535 * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
661 struct drm_gpusvm_range *range, *__next; in drm_gpusvm_fini() local
669 drm_gpusvm_for_each_range_safe(range, __next, notifier, 0, in drm_gpusvm_fini()
671 drm_gpusvm_range_remove(gpusvm, range); in drm_gpusvm_fini()
741 * drm_gpusvm_range_insert() - Insert GPU SVM range
743 * @range: Pointer to the GPU SVM range structure
745 * This function inserts the GPU SVM range into the notifier RB tree and list.
748 struct drm_gpusvm_range *range) in drm_gpusvm_range_insert() argument
754 interval_tree_insert(&range->itree, ¬ifier->root); in drm_gpusvm_range_insert()
756 node = rb_prev(&range->itree.rb); in drm_gpusvm_range_insert()
762 list_add(&range->entry, head); in drm_gpusvm_range_insert()
767 * __drm_gpusvm_range_remove() - Remove GPU SVM range
769 * @range: Pointer to the GPU SVM range structure
771 * This macro removes the GPU SVM range from the notifier RB tree and list.
774 struct drm_gpusvm_range *range) in __drm_gpusvm_range_remove() argument
776 interval_tree_remove(&range->itree, ¬ifier->root); in __drm_gpusvm_range_remove()
777 list_del(&range->entry); in __drm_gpusvm_range_remove()
781 * drm_gpusvm_range_alloc() - Allocate GPU SVM range
788 * This function allocates and initializes the GPU SVM range structure.
790 * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure.
798 struct drm_gpusvm_range *range; in drm_gpusvm_range_alloc() local
801 range = gpusvm->ops->range_alloc(gpusvm); in drm_gpusvm_range_alloc()
803 range = kzalloc(sizeof(*range), GFP_KERNEL); in drm_gpusvm_range_alloc()
805 if (!range) in drm_gpusvm_range_alloc()
808 kref_init(&range->refcount); in drm_gpusvm_range_alloc()
809 range->gpusvm = gpusvm; in drm_gpusvm_range_alloc()
810 range->notifier = notifier; in drm_gpusvm_range_alloc()
811 range->itree.start = ALIGN_DOWN(fault_addr, chunk_size); in drm_gpusvm_range_alloc()
812 range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1; in drm_gpusvm_range_alloc()
813 INIT_LIST_HEAD(&range->entry); in drm_gpusvm_range_alloc()
814 range->notifier_seq = LONG_MAX; in drm_gpusvm_range_alloc()
815 range->flags.migrate_devmem = migrate_devmem ? 1 : 0; in drm_gpusvm_range_alloc()
817 return range; in drm_gpusvm_range_alloc()
887 * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
896 * This function determines the chunk size for the GPU SVM range based on the
934 struct drm_gpusvm_range *range; in drm_gpusvm_range_chunk_size() local
936 range = drm_gpusvm_range_find(notifier, start, end); in drm_gpusvm_range_chunk_size()
937 if (range) { in drm_gpusvm_range_chunk_size()
943 * XXX: Only create range on pages CPU has faulted in. Without in drm_gpusvm_range_chunk_size()
984 * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
991 * This function finds or inserts a newly allocated a GPU SVM range based on the
992 * fault address. Caller must hold a lock to protect range lookup and insertion.
994 * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure.
1004 struct drm_gpusvm_range *range; in drm_gpusvm_range_find_or_insert() local
1051 range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1); in drm_gpusvm_range_find_or_insert()
1052 if (range) in drm_gpusvm_range_find_or_insert()
1071 range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size, in drm_gpusvm_range_find_or_insert()
1073 if (IS_ERR(range)) { in drm_gpusvm_range_find_or_insert()
1074 err = PTR_ERR(range); in drm_gpusvm_range_find_or_insert()
1078 drm_gpusvm_range_insert(notifier, range); in drm_gpusvm_range_find_or_insert()
1086 return range; in drm_gpusvm_range_find_or_insert()
1102 * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
1104 * @range: Pointer to the GPU SVM range structure
1107 * This function unmap pages associated with a GPU SVM range. Assumes and
1111 struct drm_gpusvm_range *range, in __drm_gpusvm_range_unmap_pages() argument
1115 struct drm_pagemap *dpagemap = range->dpagemap; in __drm_gpusvm_range_unmap_pages()
1120 if (range->flags.has_dma_mapping) { in __drm_gpusvm_range_unmap_pages()
1122 .__flags = range->flags.__flags, in __drm_gpusvm_range_unmap_pages()
1126 struct drm_pagemap_device_addr *addr = &range->dma_addr[j]; in __drm_gpusvm_range_unmap_pages()
1142 WRITE_ONCE(range->flags.__flags, flags.__flags); in __drm_gpusvm_range_unmap_pages()
1144 range->dpagemap = NULL; in __drm_gpusvm_range_unmap_pages()
1149 * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
1151 * @range: Pointer to the GPU SVM range structure
1153 * This function frees the dma address array associated with a GPU SVM range.
1156 struct drm_gpusvm_range *range) in drm_gpusvm_range_free_pages() argument
1160 if (range->dma_addr) { in drm_gpusvm_range_free_pages()
1161 kvfree(range->dma_addr); in drm_gpusvm_range_free_pages()
1162 range->dma_addr = NULL; in drm_gpusvm_range_free_pages()
1167 * drm_gpusvm_range_remove() - Remove GPU SVM range
1169 * @range: Pointer to the GPU SVM range to be removed
1171 * This function removes the specified GPU SVM range and also removes the parent
1173 * hold a lock to protect range and notifier removal.
1176 struct drm_gpusvm_range *range) in drm_gpusvm_range_remove() argument
1178 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_remove()
1179 drm_gpusvm_range_end(range)); in drm_gpusvm_range_remove()
1185 drm_gpusvm_range_start(range)); in drm_gpusvm_range_remove()
1190 __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); in drm_gpusvm_range_remove()
1191 drm_gpusvm_range_free_pages(gpusvm, range); in drm_gpusvm_range_remove()
1192 __drm_gpusvm_range_remove(notifier, range); in drm_gpusvm_range_remove()
1195 drm_gpusvm_range_put(range); in drm_gpusvm_range_remove()
1207 * drm_gpusvm_range_get() - Get a reference to GPU SVM range
1208 * @range: Pointer to the GPU SVM range
1210 * This function increments the reference count of the specified GPU SVM range.
1212 * Return: Pointer to the GPU SVM range.
1215 drm_gpusvm_range_get(struct drm_gpusvm_range *range) in drm_gpusvm_range_get() argument
1217 kref_get(&range->refcount); in drm_gpusvm_range_get()
1219 return range; in drm_gpusvm_range_get()
1224 * drm_gpusvm_range_destroy() - Destroy GPU SVM range
1225 * @refcount: Pointer to the reference counter embedded in the GPU SVM range
1227 * This function destroys the specified GPU SVM range when its reference count
1228 * reaches zero. If a custom range-free function is provided, it is invoked to
1229 * free the range; otherwise, the range is deallocated using kfree().
1233 struct drm_gpusvm_range *range = in drm_gpusvm_range_destroy() local
1235 struct drm_gpusvm *gpusvm = range->gpusvm; in drm_gpusvm_range_destroy()
1238 gpusvm->ops->range_free(range); in drm_gpusvm_range_destroy()
1240 kfree(range); in drm_gpusvm_range_destroy()
1244 * drm_gpusvm_range_put() - Put a reference to GPU SVM range
1245 * @range: Pointer to the GPU SVM range
1247 * This function decrements the reference count of the specified GPU SVM range
1250 void drm_gpusvm_range_put(struct drm_gpusvm_range *range) in drm_gpusvm_range_put() argument
1252 kref_put(&range->refcount, drm_gpusvm_range_destroy); in drm_gpusvm_range_put()
1257 * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
1259 * @range: Pointer to the GPU SVM range structure
1261 * This function determines if a GPU SVM range pages are valid. Expected be
1265 * function is required for finer grained checking (i.e., per range) if pages
1268 * Return: True if GPU SVM range has valid pages, False otherwise
1271 struct drm_gpusvm_range *range) in drm_gpusvm_range_pages_valid() argument
1275 return range->flags.has_devmem_pages || range->flags.has_dma_mapping; in drm_gpusvm_range_pages_valid()
1280 * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
1282 * @range: Pointer to the GPU SVM range structure
1284 * This function determines if a GPU SVM range pages are valid. Expected be
1287 * Return: True if GPU SVM range has valid pages, False otherwise
1291 struct drm_gpusvm_range *range) in drm_gpusvm_range_pages_valid_unlocked() argument
1295 if (!range->dma_addr) in drm_gpusvm_range_pages_valid_unlocked()
1299 pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range); in drm_gpusvm_range_pages_valid_unlocked()
1301 drm_gpusvm_range_free_pages(gpusvm, range); in drm_gpusvm_range_pages_valid_unlocked()
1308 * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
1310 * @range: Pointer to the GPU SVM range structure
1313 * This function gets pages for a GPU SVM range and ensures they are mapped for
1319 struct drm_gpusvm_range *range, in drm_gpusvm_range_get_pages() argument
1322 struct mmu_interval_notifier *notifier = &range->notifier->notifier; in drm_gpusvm_range_get_pages()
1327 .start = drm_gpusvm_range_start(range), in drm_gpusvm_range_get_pages()
1328 .end = drm_gpusvm_range_end(range), in drm_gpusvm_range_get_pages()
1336 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_get_pages()
1337 drm_gpusvm_range_end(range)); in drm_gpusvm_range_get_pages()
1349 if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range)) in drm_gpusvm_range_get_pages()
1390 flags.__flags = range->flags.__flags; in drm_gpusvm_range_get_pages()
1403 if (!range->dma_addr) { in drm_gpusvm_range_get_pages()
1406 range->dma_addr = kvmalloc_array(npages, in drm_gpusvm_range_get_pages()
1407 sizeof(*range->dma_addr), in drm_gpusvm_range_get_pages()
1409 if (!range->dma_addr) { in drm_gpusvm_range_get_pages()
1447 range->dma_addr[j] = in drm_gpusvm_range_get_pages()
1453 range->dma_addr[j].addr)) { in drm_gpusvm_range_get_pages()
1481 range->dma_addr[j] = drm_pagemap_device_addr_encode in drm_gpusvm_range_get_pages()
1492 range->dpagemap = dpagemap; in drm_gpusvm_range_get_pages()
1496 WRITE_ONCE(range->flags.__flags, flags.__flags); in drm_gpusvm_range_get_pages()
1501 range->notifier_seq = hmm_range.notifier_seq; in drm_gpusvm_range_get_pages()
1506 __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped); in drm_gpusvm_range_get_pages()
1517 * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
1519 * @range: Pointer to the GPU SVM range structure
1522 * This function unmaps pages associated with a GPU SVM range. If @in_notifier
1525 * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
1529 struct drm_gpusvm_range *range, in drm_gpusvm_range_unmap_pages() argument
1532 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_unmap_pages()
1533 drm_gpusvm_range_end(range)); in drm_gpusvm_range_unmap_pages()
1540 __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); in drm_gpusvm_range_unmap_pages()
1665 * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
1667 * @range: Pointer to the GPU SVM range structure
1674 * This function migrates the specified GPU SVM range to device memory. It
1677 * safely reference @range until ops->devmem_release is called which only upon
1684 struct drm_gpusvm_range *range, in drm_gpusvm_migrate_to_devmem() argument
1689 unsigned long start = drm_gpusvm_range_start(range), in drm_gpusvm_migrate_to_devmem()
1690 end = drm_gpusvm_range_end(range); in drm_gpusvm_migrate_to_devmem()
1708 if (!range->flags.migrate_devmem) in drm_gpusvm_migrate_to_devmem()
1785 /* Upon success bind devmem allocation to range and zdd */ in drm_gpusvm_migrate_to_devmem()
1886 * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
1972 * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
1979 * This internal function performs the migration of the specified GPU SVM range
2099 * drm_gpusvm_range_evict - Evict GPU SVM range
2100 * @range: Pointer to the GPU SVM range to be removed
2102 * This function evicts the specified GPU SVM range. This function will not
2108 struct drm_gpusvm_range *range) in drm_gpusvm_range_evict() argument
2110 struct mmu_interval_notifier *notifier = &range->notifier->notifier; in drm_gpusvm_range_evict()
2114 .start = drm_gpusvm_range_start(range), in drm_gpusvm_range_evict()
2115 .end = drm_gpusvm_range_end(range), in drm_gpusvm_range_evict()
2121 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_evict()
2122 drm_gpusvm_range_end(range)); in drm_gpusvm_range_evict()
2168 * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
2171 * This function is a page fault handler used to migrate a GPU SVM range to RAM.
2172 * It retrieves the GPU SVM range information from the faulting page and invokes
2173 * the internal migration function to migrate the range back to RAM.
2210 * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
2223 struct drm_gpusvm_range *range = NULL; in drm_gpusvm_has_mapping() local
2225 drm_gpusvm_for_each_range(range, notifier, start, end) in drm_gpusvm_has_mapping()
2234 * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
2235 * @range: Pointer to the GPU SVM range structure.
2236 * @mmu_range: Pointer to the MMU notifier range structure.
2238 * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag
2239 * if the range partially falls within the provided MMU notifier range.
2241 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, in drm_gpusvm_range_set_unmapped() argument
2244 lockdep_assert_held_write(&range->gpusvm->notifier_lock); in drm_gpusvm_range_set_unmapped()
2246 range->flags.unmapped = true; in drm_gpusvm_range_set_unmapped()
2247 if (drm_gpusvm_range_start(range) < mmu_range->start || in drm_gpusvm_range_set_unmapped()
2248 drm_gpusvm_range_end(range) > mmu_range->end) in drm_gpusvm_range_set_unmapped()
2249 range->flags.partial_unmap = true; in drm_gpusvm_range_set_unmapped()