Home
last modified time | relevance | path

Searched refs:tsdn (Results 1 – 25 of 74) sorted by relevance

123

/src/contrib/jemalloc/src/
H A Dextent.c16 static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
18 static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
20 static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
22 static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
24 static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
37 static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
38 static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
41 static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
43 static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
61 extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, in extent_try_delayed_coalesce() argument
[all …]
H A Dpa_extra.c12 pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_prefork0() argument
13 malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx); in pa_shard_prefork0()
14 malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx); in pa_shard_prefork0()
18 pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_prefork2() argument
20 sec_prefork2(tsdn, &shard->hpa_sec); in pa_shard_prefork2()
25 pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_prefork3() argument
26 malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx); in pa_shard_prefork3()
28 hpa_shard_prefork3(tsdn, &shard->hpa_shard); in pa_shard_prefork3()
33 pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_prefork4() argument
34 ecache_prefork(tsdn, &shard->pac.ecache_dirty); in pa_shard_prefork4()
[all …]
H A Dhpa.c11 static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
14 static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
16 static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
18 static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
20 static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
22 static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
24 static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
85 hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) { in hpa_alloc_ps() argument
86 return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t), in hpa_alloc_ps()
91 hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size, in hpa_central_extract() argument
[all …]
H A Darena.c63 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
65 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
68 arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
74 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, in arena_basic_stats_merge() argument
85 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, in arena_stats_merge() argument
92 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, in arena_stats_merge()
96 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, in arena_stats_merge()
102 LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); in arena_stats_merge()
109 uint64_t nmalloc = locked_read_u64(tsdn, in arena_stats_merge()
115 uint64_t ndalloc = locked_read_u64(tsdn, in arena_stats_merge()
[all …]
H A Dlarge.c14 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { in large_malloc() argument
17 return large_palloc(tsdn, arena, usize, CACHELINE, zero); in large_malloc()
21 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, in large_palloc() argument
27 assert(!tsdn_null(tsdn) || arena != NULL); in large_palloc()
34 if (likely(!tsdn_null(tsdn))) { in large_palloc()
35 arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); in large_palloc()
37 if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn, in large_palloc()
45 malloc_mutex_lock(tsdn, &arena->large_mtx); in large_palloc()
47 malloc_mutex_unlock(tsdn, &arena->large_mtx); in large_palloc()
50 arena_decay_tick(tsdn, arena); in large_palloc()
[all …]
H A Dprof.c304 prof_idump(tsdn_t *tsdn) { in prof_idump() argument
310 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { in prof_idump()
313 tsd = tsdn_tsd(tsdn); in prof_idump()
343 prof_gdump(tsdn_t *tsdn) { in prof_gdump() argument
349 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { in prof_gdump()
352 tsd = tsdn_tsd(tsdn); in prof_gdump()
370 prof_thr_uid_alloc(tsdn_t *tsdn) { in prof_thr_uid_alloc() argument
373 malloc_mutex_lock(tsdn, &next_thr_uid_mtx); in prof_thr_uid_alloc()
376 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); in prof_thr_uid_alloc()
415 prof_active_get(tsdn_t *tsdn) { in prof_active_get() argument
[all …]
H A Demap.c19 emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, in emap_update_edata_state() argument
21 witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn), in emap_update_edata_state()
27 rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree, in emap_update_edata_state()
32 rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, in emap_update_edata_state()
36 rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state); in emap_update_edata_state()
38 emap_assert_mapped(tsdn, emap, edata); in emap_update_edata_state()
42 emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata, in emap_try_acquire_edata_neighbor_impl() argument
45 witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn), in emap_try_acquire_edata_neighbor_impl()
69 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree, in emap_try_acquire_edata_neighbor_impl()
76 rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn, in emap_try_acquire_edata_neighbor_impl()
[all …]
H A Dsec.c6 static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
9 static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
11 static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
13 static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
24 sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, in sec_init() argument
34 void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE); in sec_init()
89 sec_shard_pick(tsdn_t *tsdn, sec_t *sec) { in sec_shard_pick() argument
95 if (tsdn_null(tsdn)) { in sec_shard_pick()
98 tsd_t *tsd = tsdn_tsd(tsdn); in sec_shard_pick()
121 sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) { in sec_flush_some_and_unlock() argument
[all …]
H A Dpac.c7 static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
10 static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
12 static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
14 static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
16 static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
38 pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, in pac_init() argument
49 if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind, in pac_init()
57 if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind, in pac_init()
67 if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained, in pac_init()
112 pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, in pac_alloc_real() argument
[all …]
H A Dedata_cache.c22 edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) { in edata_cache_get() argument
23 malloc_mutex_lock(tsdn, &edata_cache->mtx); in edata_cache_get()
26 malloc_mutex_unlock(tsdn, &edata_cache->mtx); in edata_cache_get()
27 return base_alloc_edata(tsdn, edata_cache->base); in edata_cache_get()
31 malloc_mutex_unlock(tsdn, &edata_cache->mtx); in edata_cache_get()
36 edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) { in edata_cache_put() argument
37 malloc_mutex_lock(tsdn, &edata_cache->mtx); in edata_cache_put()
40 malloc_mutex_unlock(tsdn, &edata_cache->mtx); in edata_cache_put()
44 edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) { in edata_cache_prefork() argument
45 malloc_mutex_prefork(tsdn, &edata_cache->mtx); in edata_cache_prefork()
[all …]
H A Dpa.c32 pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central, in pa_shard_init() argument
43 if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache, in pa_shard_init()
68 pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard, in pa_shard_enable_hpa() argument
74 if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai, in pa_shard_enable_hpa()
85 pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_disable_hpa() argument
88 sec_disable(tsdn, &shard->hpa_sec); in pa_shard_disable_hpa()
89 hpa_shard_disable(tsdn, &shard->hpa_shard); in pa_shard_disable_hpa()
94 pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_reset() argument
97 sec_flush(tsdn, &shard->hpa_sec); in pa_shard_reset()
107 pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) { in pa_shard_destroy() argument
[all …]
H A Dbackground_thread.c59 void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED in background_thread_create()
60 void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED in background_thread_create()
61 void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED in background_thread_create()
62 void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED in background_thread_create()
63 bool background_thread_stats_read(tsdn_t *tsdn, in background_thread_create()
65 void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED in background_thread_create()
72 background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
73 background_thread_wakeup_time_set(tsdn, info, 0);
120 background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
135 background_thread_wakeup_time_set(tsdn, info,
[all …]
H A Dbase.c40 base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) { in base_map() argument
54 addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero, in base_map()
62 base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr, in base_unmap() argument
90 if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) { in base_unmap()
93 if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) { in base_unmap()
96 if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) { in base_unmap()
99 if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) { in base_unmap()
139 base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { in base_auto_thp_switch() argument
141 malloc_mutex_assert_owner(tsdn, &base->mtx); in base_auto_thp_switch()
243 base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind, in base_block_alloc() argument
[all …]
H A Dsan_bump.c11 san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
15 san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, in san_bump_alloc() argument
22 malloc_mutex_lock(tsdn, &sba->mtx); in san_bump_alloc()
32 bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks, in san_bump_alloc()
45 edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac, in san_bump_alloc()
58 malloc_mutex_unlock(tsdn, &sba->mtx); in san_bump_alloc()
65 extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy); in san_bump_alloc()
68 san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false, in san_bump_alloc()
71 if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero, in san_bump_alloc()
73 extent_record(tsdn, pac, ehooks, &pac->ecache_retained, in san_bump_alloc()
[all …]
/src/contrib/jemalloc/include/jemalloc/internal/
H A Darena_externs.h37 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
40 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
45 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
46 edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
48 void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
50 void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
52 void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
54 bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
57 void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
59 uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
[all …]
H A Dlockedint.h32 # define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu)) argument
33 # define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu)) argument
34 # define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu)) argument
35 # define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \ argument
36 malloc_mutex_postfork_parent(tsdn, &(mu))
37 # define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \ argument
38 malloc_mutex_postfork_child(tsdn, &(mu))
43 # define LOCKEDINT_MTX_LOCK(tsdn, mu) argument
44 # define LOCKEDINT_MTX_UNLOCK(tsdn, mu) argument
45 # define LOCKEDINT_MTX_PREFORK(tsdn, mu) argument
[all …]
H A Djemalloc_internal_inlines_c.h30 iaalloc(tsdn_t *tsdn, const void *ptr) { in iaalloc() argument
33 return arena_aalloc(tsdn, ptr); in iaalloc()
37 isalloc(tsdn_t *tsdn, const void *ptr) { in isalloc() argument
40 return arena_salloc(tsdn, ptr); in isalloc()
44 iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, in iallocztm() argument
50 if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { in iallocztm()
51 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), in iallocztm()
55 ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); in iallocztm()
57 arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); in iallocztm()
69 ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, in ipallocztm() argument
[all …]
H A Dpai.h9 edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
18 size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
21 bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
24 bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
26 void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
29 void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
31 uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
40 pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, in pai_alloc() argument
43 return self->alloc(tsdn, self, size, alignment, zero, guarded, in pai_alloc()
48 pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, in pai_alloc_batch() argument
[all …]
H A Darena_inlines_b.h118 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { in arena_decay_ticks() argument
119 if (unlikely(tsdn_null(tsdn))) { in arena_decay_ticks()
122 tsd_t *tsd = tsdn_tsd(tsdn); in arena_decay_ticks()
135 arena_decay(tsdn, arena, false, false); in arena_decay_ticks()
140 arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { in arena_decay_tick() argument
141 arena_decay_ticks(tsdn, arena, 1); in arena_decay_tick()
145 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, in arena_malloc() argument
147 assert(!tsdn_null(tsdn) || tcache == NULL); in arena_malloc()
151 return tcache_alloc_small(tsdn_tsd(tsdn), arena, in arena_malloc()
155 return tcache_alloc_large(tsdn_tsd(tsdn), arena, in arena_malloc()
[all …]
H A Dehooks.h44 void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
64 bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
85 ehooks_pre_reentrancy(tsdn_t *tsdn) { in ehooks_pre_reentrancy() argument
86 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); in ehooks_pre_reentrancy()
91 ehooks_post_reentrancy(tsdn_t *tsdn) { in ehooks_post_reentrancy() argument
92 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); in ehooks_post_reentrancy()
191 ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size, in ehooks_alloc() argument
197 ret = ehooks_default_alloc_impl(tsdn, new_addr, size, in ehooks_alloc()
200 ehooks_pre_reentrancy(tsdn); in ehooks_alloc()
203 ehooks_post_reentrancy(tsdn); in ehooks_alloc()
[all …]
H A Dpa.h137 bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
146 bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
153 void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
159 void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
166 void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
169 edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
173 bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
179 bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
188 void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
190 bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
[all …]
H A Demap.h14 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
37 void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
40 void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
58 edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
61 edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
63 void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71 bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
93 void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
96 void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
97 void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
[all …]
H A Dmutex.h138 void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
139 void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
140 void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
143 void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
159 mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { in mutex_owner_stats_update() argument
163 if (data->prev_owner != tsdn) { in mutex_owner_stats_update()
164 data->prev_owner = tsdn; in mutex_owner_stats_update()
172 malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { in malloc_mutex_trylock() argument
173 witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); in malloc_mutex_trylock()
179 mutex_owner_stats_update(tsdn, mutex); in malloc_mutex_trylock()
[all …]
H A Dextent.h22 edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
25 edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
28 void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
30 edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
33 void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
34 void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
36 void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
38 edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
41 void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
43 void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
[all …]
H A Drtree.h134 rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
180 rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, in rtree_leaf_elm_bits_read() argument
239 rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, in rtree_leaf_elm_read() argument
242 uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); in rtree_leaf_elm_read()
282 rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, in rtree_leaf_elm_write_commit() argument
297 rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, in rtree_leaf_elm_write() argument
304 rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); in rtree_leaf_elm_write()
309 rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree, in rtree_leaf_elm_state_update() argument
313 uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1, in rtree_leaf_elm_state_update()
338 rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, in rtree_leaf_elm_lookup_fast() argument
[all …]

123