Home
last modified time | relevance | path

Searched refs:prealloc (Results 1 – 25 of 35) sorted by relevance

12

/linux/fs/btrfs/
H A Dextent-io-tree.c166 static struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc) in alloc_extent_state_atomic() argument
168 if (!prealloc) in alloc_extent_state_atomic()
169 prealloc = alloc_extent_state(GFP_ATOMIC); in alloc_extent_state_atomic()
171 return prealloc; in alloc_extent_state_atomic()
491 * struct 'prealloc' as the newly created second half. 'split' indicates an
497 * prealloc: [orig->start, split - 1]
504 struct extent_state *prealloc, u64 split) in split_state() argument
512 prealloc->start = orig->start; in split_state()
513 prealloc->end = split - 1; in split_state()
514 prealloc in split_state()
618 struct extent_state *prealloc = NULL; btrfs_clear_extent_bit_changeset() local
1053 struct extent_state *prealloc = NULL; set_extent_bit() local
1323 struct extent_state *prealloc = NULL; btrfs_convert_extent_bit() local
[all...]
H A Dulist.c53 ulist->prealloc = NULL; in ulist_init()
72 kfree(ulist->prealloc); in ulist_release()
73 ulist->prealloc = NULL; in ulist_release()
113 if (!ulist->prealloc) in ulist_prealloc()
114 ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask); in ulist_prealloc()
218 if (ulist->prealloc) { in ulist_add_merge()
219 node = ulist->prealloc; in ulist_add_merge()
220 ulist->prealloc = NULL; in ulist_add_merge()
H A Dqgroup.c196 * Must be called with qgroup_lock held and @prealloc preallocated.
198 * The control on the lifespan of @prealloc would be transferred to this
199 * function, thus caller should no longer touch @prealloc.
202 struct btrfs_qgroup *prealloc, in add_qgroup_rb() argument
207 /* Caller must have pre-allocated @prealloc. */ in add_qgroup_rb()
208 ASSERT(prealloc); in add_qgroup_rb()
210 prealloc->qgroupid = qgroupid; in add_qgroup_rb()
211 node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp); in add_qgroup_rb()
213 kfree(prealloc); in add_qgroup_rb()
217 INIT_LIST_HEAD(&prealloc in add_qgroup_rb()
271 __add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent) __add_relation_rb() argument
298 add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid) add_relation_rb() argument
459 struct btrfs_qgroup *prealloc; btrfs_read_qgroup_config() local
998 struct btrfs_qgroup *prealloc = NULL; btrfs_quota_enable() local
1529 btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc) btrfs_add_qgroup_relation() argument
1661 struct btrfs_qgroup *prealloc = NULL; btrfs_create_qgroup() local
3302 struct btrfs_qgroup *prealloc; btrfs_qgroup_inherit() local
[all...]
H A Dulist.h44 struct ulist_node *prealloc; member
H A Dextent_io.c785 struct btrfs_folio_state *prealloc) in attach_extent_buffer_folio() argument
807 /* Already mapped, just free prealloc */ in attach_extent_buffer_folio()
809 btrfs_free_folio_state(prealloc); in attach_extent_buffer_folio()
813 if (prealloc) in attach_extent_buffer_folio()
815 folio_attach_private(folio, prealloc); in attach_extent_buffer_folio()
3173 struct btrfs_folio_state *prealloc, in attach_eb_folio_to_filemap() argument
3234 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); in attach_eb_folio_to_filemap()
3256 struct btrfs_folio_state *prealloc = NULL; in alloc_extent_buffer() local
3301 prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA); in alloc_extent_buffer()
3302 if (IS_ERR(prealloc)) { in alloc_extent_buffer()
[all...]
H A Dqgroup.h342 struct btrfs_qgroup_list *prealloc);
400 /* Reserve metadata space for pertrans and prealloc type */
/linux/lib/
H A Dstackdepot.c293 static bool depot_init_pool(void **prealloc) in depot_init_pool() argument
305 if (!new_pool && *prealloc) { in depot_init_pool()
307 WRITE_ONCE(new_pool, *prealloc); in depot_init_pool()
308 *prealloc = NULL; in depot_init_pool()
312 return false; /* new_pool and *prealloc are NULL */ in depot_init_pool()
340 static void depot_keep_new_pool(void **prealloc) in depot_keep_new_pool() argument
351 WRITE_ONCE(new_pool, *prealloc); in depot_keep_new_pool()
352 *prealloc = NULL; in depot_keep_new_pool()
359 static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) in depot_pop_free_pool() argument
368 if (!depot_init_pool(prealloc)) in depot_pop_free_pool()
430 depot_alloc_stack(unsigned long * entries,unsigned int nr_entries,u32 hash,depot_flags_t flags,void ** prealloc) depot_alloc_stack() argument
642 void *prealloc = NULL; stack_depot_save_flags() local
[all...]
/linux/tools/testing/selftests/net/
H A Dioam6.sh210 encap ioam6 trace prealloc type 0x800000 ns 0 size 4 dev veth0 &>/dev/null
693 encap ioam6 mode $mode trace prealloc type 0x800000 size 4 \
699 encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \
734 encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \
740 encap ioam6 mode $mode_tunsrc trace prealloc type 0x800000 ns 0 size 4 \
774 encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \
780 encap ioam6 mode $mode_tundst trace prealloc type 0x800000 ns 0 size 4 \
811 encap ioam6 mode $mode trace prealloc ns 0 size 4 \
817 encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \
842 encap ioam6 mode $mode trace prealloc typ
[all...]
H A Dlwt_dst_cache_ref_loop.sh59 encap ioam6 trace prealloc type 0x800000 ns 0 size 4 \
168 encap ioam6 trace prealloc type 0x800000 ns 1 size 4 \
/linux/arch/powerpc/platforms/ps3/
H A Dsetup.c113 static void __init prealloc(struct ps3_prealloc *p) in prealloc() function
132 #define prealloc_ps3fb_videomemory() prealloc(&ps3fb_videomemory)
155 #define prealloc_ps3flash_bounce_buffer() prealloc(&ps3flash_bounce_buffer)
/linux/fs/ext4/
H A Dextents_status.c179 struct extent_status *prealloc);
182 struct extent_status *prealloc);
188 struct pending_reservation **prealloc);
819 struct extent_status *prealloc) in __es_insert_extent() argument
859 if (prealloc) in __es_insert_extent()
860 es = prealloc; in __es_insert_extent()
1406 * @prealloc - pre-allocated es to avoid memory allocation failures
1415 struct extent_status *prealloc) in __es_remove_extent() argument
1462 err = __es_insert_extent(inode, &newes, prealloc); in __es_remove_extent()
1985 * @prealloc
1991 __insert_pending(struct inode * inode,ext4_lblk_t lblk,struct pending_reservation ** prealloc) __insert_pending() argument
2229 __revise_pending(struct inode * inode,ext4_lblk_t lblk,ext4_lblk_t len,struct pending_reservation ** prealloc) __revise_pending() argument
[all...]
/linux/net/sched/
H A Dsch_gred.c484 struct gred_sched_data **prealloc, in gred_change_vq() argument
496 table->tab[dp] = q = *prealloc; in gred_change_vq()
497 *prealloc = NULL; in gred_change_vq()
651 struct gred_sched_data *prealloc; in gred_change() local
703 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); in gred_change()
706 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, in gred_change()
721 kfree(prealloc); in gred_change()
728 kfree(prealloc); in gred_change()
/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c117 /** @prealloc: Tracking for pre-allocated MMU pgtable pages */
118 struct msm_mmu_prealloc prealloc; member
654 vm->mmu->prealloc = &job->prealloc; in msm_vma_job_run()
679 vm->mmu->prealloc = NULL; in msm_vma_job_run()
706 vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); in msm_vma_job_free()
708 atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight); in msm_vma_job_free()
1076 mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova); in prealloc_count()
1092 * Determine the amount of memory to prealloc for pgtables. For sparse images,
1144 atomic_add(job->prealloc in vm_bind_prealloc_count()
[all...]
H A Dmsm_mmu.h61 * @prealloc: pre-allocated pages for pgtable
66 struct msm_mmu_prealloc *prealloc; member
/linux/drivers/media/platform/renesas/vsp1/
H A Dvsp1_dl.c1114 unsigned int prealloc) in vsp1_dlm_create() argument
1140 * memory. An extra body is allocated on top of the prealloc to account in vsp1_dlm_create()
1149 dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1, in vsp1_dlm_create()
1154 for (i = 0; i < prealloc; ++i) { in vsp1_dlm_create()
1171 dlm->list_count = prealloc; in vsp1_dlm_create()
1175 VSP1_EXTCMD_AUTOFLD, prealloc); in vsp1_dlm_create()
H A Dvsp1_dl.h56 unsigned int prealloc);
/linux/drivers/gpu/drm/i915/display/
H A Dintel_fbdev.c220 bool prealloc = false; in intel_fbdev_driver_fbdev_probe() local
245 prealloc = true; in intel_fbdev_driver_fbdev_probe()
290 if (!intel_bo_is_shmem(obj) && !prealloc) in intel_fbdev_driver_fbdev_probe()
/linux/fs/xfs/
H A Dxfs_iomap.c410 /* no dq, or over hi wmark, squash the prealloc completely */ in xfs_quota_calc_throttle()
533 * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc in xfs_iomap_prealloc_size()
536 * prealloc size, we round up first, apply appropriate throttling, round in xfs_iomap_prealloc_size()
551 * Check each quota to cap the prealloc size, provide a shift value to in xfs_iomap_prealloc_size()
565 * The final prealloc size is set to the minimum of free space available in xfs_iomap_prealloc_size()
587 * available, squash the prealloc hard. This can happen if we in xfs_iomap_prealloc_size()
1397 xfs_filblks_t prealloc, in xfs_bmapi_reserve_delalloc() argument
1410 whichfork == XFS_COW_FORK && !prealloc; in xfs_bmapi_reserve_delalloc()
1414 * Cap the alloc length. Keep track of prealloc so we know whether to in xfs_bmapi_reserve_delalloc()
1418 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLE in xfs_bmapi_reserve_delalloc()
[all...]
H A Dxfs_dquot.c125 int prealloc = 0; in xfs_qm_adjust_dqlimits() local
132 prealloc = 1; in xfs_qm_adjust_dqlimits()
136 prealloc = 1; in xfs_qm_adjust_dqlimits()
147 if (prealloc) in xfs_qm_adjust_dqlimits()
659 /* initialize the dquot speculative prealloc thresholds */ in xfs_dquot_from_disk()
/linux/kernel/bpf/
H A Dhashtab.c421 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check() local
439 if (lru && !prealloc) in htab_map_alloc_check()
477 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc() local
560 if (prealloc) { in htab_map_alloc()
990 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem() local
994 if (prealloc) { in alloc_htab_elem()
1030 if (prealloc) { in alloc_htab_elem()
1047 if (!prealloc) in alloc_htab_elem()
2217 bool prealloc = htab_is_prealloc(htab); in htab_map_mem_usage() local
2225 if (prealloc) { in htab_map_mem_usage()
[all...]
/linux/drivers/md/
H A Ddm-cache-target.c1403 struct dm_bio_prison_cell_v2 *prealloc; in mg_lock_writes() local
1405 prealloc = alloc_prison_cell(cache); in mg_lock_writes()
1415 prealloc, &mg->cell); in mg_lock_writes()
1417 free_prison_cell(cache, prealloc); in mg_lock_writes()
1422 if (mg->cell != prealloc) in mg_lock_writes()
1423 free_prison_cell(cache, prealloc); in mg_lock_writes()
1533 struct dm_bio_prison_cell_v2 *prealloc; in invalidate_lock() local
1535 prealloc = alloc_prison_cell(cache); in invalidate_lock()
1539 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell); in invalidate_lock()
1541 free_prison_cell(cache, prealloc); in invalidate_lock()
[all...]
/linux/fs/sysfs/
H A Dfile.c244 .prealloc = true,
249 .prealloc = true,
255 .prealloc = true,
/linux/sound/hda/common/
H A DKconfig95 via a proc file (/proc/asound/card*/pcm*/sub*/prealloc), too.
/linux/mm/
H A Dmemory.c919 struct folio **prealloc, struct page *page) in copy_present_page() argument
924 new_folio = *prealloc; in copy_present_page()
929 * We have a prealloc page, all good! Take it in copy_present_page()
936 *prealloc = NULL; in copy_present_page()
985 int max_nr, int *rss, struct folio **prealloc) in copy_present_ptes() argument
1003 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { in copy_present_ptes()
1040 addr, rss, prealloc, page); in copy_present_ptes()
1092 struct folio *prealloc = NULL; in copy_pte_range() local
1172 /* copy_present_ptes() will clear `*prealloc' if consumed */ in copy_pte_range()
1175 ptent, addr, max_nr, rss, &prealloc); in copy_pte_range()
[all...]
/linux/include/linux/
H A Dkernfs.h313 * "prealloc" causes a buffer to be allocated at open for
316 * ->prealloc. Provide ->read and ->write with ->prealloc.
318 bool prealloc; member

12