/linux/drivers/gpu/drm/ ! |
H A D | drm_mm.c | 336 u64 hole_start; in find_hole_addr() local 342 hole_start = __drm_mm_hole_node_start(node); in find_hole_addr() 344 if (addr < hole_start) in find_hole_addr() 346 else if (addr > hole_start + node->hole_size) in find_hole_addr() 453 u64 hole_start, hole_end; in drm_mm_reserve_node() local 466 adj_start = hole_start = __drm_mm_hole_node_start(hole); in drm_mm_reserve_node() 467 adj_end = hole_end = hole_start + hole->hole_size; in drm_mm_reserve_node() 483 if (node->start > hole_start) in drm_mm_reserve_node() 543 u64 hole_start = __drm_mm_hole_node_start(hole); in drm_mm_insert_node_in_range() local 544 u64 hole_end = hole_start in drm_mm_insert_node_in_range() 750 u64 hole_start, hole_end; drm_mm_scan_add_block() local 881 u64 hole_start, hole_end; drm_mm_scan_color_evict() local [all...] |
/linux/drivers/gpu/drm/i915/selftests/ ! |
H A D | i915_gem_gtt.c | 245 u64 hole_start, u64 hole_end, in lowlevel_hole() argument 259 for (size = 12; (hole_end - hole_start) >> size; size++) { in lowlevel_hole() 266 hole_size = (hole_end - hole_start) >> aligned_size; in lowlevel_hole() 272 __func__, hole_start, hole_end, size, hole_size); in lowlevel_hole() 288 GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end); in lowlevel_hole() 312 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); in lowlevel_hole() 320 hole_end = hole_start; /* quit */ in lowlevel_hole() 371 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); in lowlevel_hole() 410 u64 hole_start, u64 hole_end, in fill_hole() argument 413 const u64 hole_size = hole_end - hole_start; in fill_hole() 636 walk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) walk_hole() argument 719 pot_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) pot_hole() argument 794 drunk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) drunk_hole() argument 905 __shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) __shrink_hole() argument 983 shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) shrink_hole() argument 1005 shrink_boom(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) shrink_boom() argument 1148 misaligned_pin(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time) misaligned_pin() argument 1195 exercise_ppgtt(struct drm_i915_private * dev_priv,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)) exercise_ppgtt() argument 1281 exercise_ggtt(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)) exercise_ggtt() argument 1285 u64 hole_start, hole_end, last = 0; exercise_ggtt() local 1458 exercise_mock(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)) exercise_mock() argument [all...] |
/linux/drivers/gpu/drm/xe/ ! |
H A D | xe_ggtt.c | 845 u64 hole_start, hole_end, hole_size; in xe_ggtt_largest_hole() local 850 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { in xe_ggtt_largest_hole() 851 hole_start = max(hole_start, hole_min_start); in xe_ggtt_largest_hole() 852 hole_start = ALIGN(hole_start, alignment); in xe_ggtt_largest_hole() 854 if (hole_start >= hole_end) in xe_ggtt_largest_hole() 856 hole_size = hole_end - hole_start; in xe_ggtt_largest_hole() 945 u64 hole_start, hole_end, hole_size; in xe_ggtt_print_holes() local 951 drm_mm_for_each_hole(entry, mm, hole_start, hole_en in xe_ggtt_print_holes() [all...] |
/linux/drivers/gpu/drm/tests/ ! |
H A D | drm_mm_test.c | 41 u64 hole_start, __always_unused hole_end; in assert_no_holes() local 45 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 66 u64 hole_start, hole_end; in assert_one_hole() local 74 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole() 75 if (start != hole_start || end != hole_end) { in assert_one_hole() 79 hole_start, hole_end, start, end); in assert_one_hole()
|
/linux/arch/sparc/kernel/ ! |
H A D | adi_64.c | 163 unsigned long end_addr, hole_start, hole_end; in alloc_tag_store() local 167 hole_start = 0; in alloc_tag_store() 198 (tag_desc->end > hole_start)) in alloc_tag_store() 199 hole_start = tag_desc->end; in alloc_tag_store() 273 if (tmp_addr < hole_start) { in alloc_tag_store() 277 tmp_addr = hole_start + 1; in alloc_tag_store()
|
/linux/fs/btrfs/tests/ ! |
H A D | raid-stripe-tree-tests.c | 50 u64 hole_start = logical1 + SZ_256K; in test_punch_hole_3extents() local 133 ret = btrfs_delete_raid_extent(trans, hole_start, hole_len); in test_punch_hole_3extents() 136 hole_start, hole_start + hole_len); in test_punch_hole_3extents() 367 u64 hole_start = logical1 + SZ_32K; in test_punch_hole() local 369 u64 logical2 = hole_start + hole_len; in test_punch_hole() 427 ret = btrfs_delete_raid_extent(trans, hole_start, hole_len); in test_punch_hole() 430 hole_start, hole_start + hole_len); in test_punch_hole() 479 ret = btrfs_get_raid_extent_offset(fs_info, hole_start, in test_punch_hole() [all...] |
/linux/include/drm/ ! |
H A D | drm_mm.h | 382 * @hole_start: ulong variable to assign the hole start to on each iteration 391 * We need to inline list_for_each_entry in order to be able to set hole_start 394 #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ argument 398 hole_start = drm_mm_hole_node_start(pos), \ 399 hole_end = hole_start + pos->hole_size, \
|
/linux/fs/bcachefs/ ! |
H A D | fs-io.c | 652 u64 hole_start, hole_end; in __bchfs_fallocate() local 671 hole_start = iter.pos.offset; in __bchfs_fallocate() 694 &hole_start, in __bchfs_fallocate() 699 &hole_start, in __bchfs_fallocate() 705 bch2_btree_iter_set_pos(trans, &iter, POS(iter.pos.inode, hole_start)); in __bchfs_fallocate() 710 if (hole_start == hole_end) in __bchfs_fallocate() 714 sectors = hole_end - hole_start; in __bchfs_fallocate() 731 if (bch2_mark_pagecache_reserved(inode, &hole_start, in __bchfs_fallocate() 734 bch2_mark_pagecache_reserved(inode, &hole_start, in __bchfs_fallocate()
|
H A D | fs-io-pagecache.c | 801 u64 *hole_start, in bch2_clamp_data_hole() argument 809 *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9; in bch2_clamp_data_hole() 813 *hole_start = ret; in bch2_clamp_data_hole() 815 if (*hole_start == *hole_end) in bch2_clamp_data_hole() 819 *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9; in bch2_clamp_data_hole()
|
H A D | alloc_background.c | 1137 struct bpos hole_start = bkey_start_pos(k.k); in bch2_get_key_or_real_bucket_hole() local 1139 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { in bch2_get_key_or_real_bucket_hole() 1140 if (!next_bucket(c, ca, &hole_start)) in bch2_get_key_or_real_bucket_hole() 1143 bch2_btree_iter_set_pos(trans, iter, hole_start); in bch2_get_key_or_real_bucket_hole() 1148 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
|
/linux/fs/hugetlbfs/ ! |
H A D | inode.c | 676 loff_t hole_start, hole_end; in hugetlbfs_punch_hole() local 679 * hole_start and hole_end indicate the full pages within the hole. in hugetlbfs_punch_hole() 681 hole_start = round_up(offset, hpage_size); in hugetlbfs_punch_hole() 695 if (offset < hole_start) in hugetlbfs_punch_hole() 697 offset, min(offset + len, hole_start)); in hugetlbfs_punch_hole() 700 if (hole_end > hole_start) { in hugetlbfs_punch_hole() 703 hole_start >> PAGE_SHIFT, in hugetlbfs_punch_hole() 708 if ((offset + len) > hole_end && (offset + len) > hole_start) in hugetlbfs_punch_hole() 715 if (hole_end > hole_start) in hugetlbfs_punch_hole() 716 remove_inode_hugepages(inode, hole_start, hole_en in hugetlbfs_punch_hole() [all...] |
/linux/drivers/gpu/drm/i915/gt/ ! |
H A D | intel_ggtt.c | 872 unsigned long hole_start, hole_end; in init_ggtt() local 943 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt() 946 hole_start, hole_end); in init_ggtt() 947 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt() 948 hole_end - hole_start); in init_ggtt()
|
/linux/fs/btrfs/ ! |
H A D | zoned.c | 1027 * @hole_start: the position of the hole to allocate the region 1034 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, in btrfs_find_allocatable_zones() argument 1040 u64 pos = hole_start; in btrfs_find_allocatable_zones() 1045 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size)); in btrfs_find_allocatable_zones()
|
H A D | inode.c | 5097 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); in btrfs_cont_expand() local 5113 if (size <= hole_start) in btrfs_cont_expand() 5116 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, in btrfs_cont_expand() 5118 cur_offset = hole_start; in btrfs_cont_expand() 5174 btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); in btrfs_cont_expand()
|