1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/ctl.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/inspect.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/nstime.h"
11 #include "jemalloc/internal/peak_event.h"
12 #include "jemalloc/internal/prof_data.h"
13 #include "jemalloc/internal/prof_log.h"
14 #include "jemalloc/internal/prof_recent.h"
15 #include "jemalloc/internal/prof_stats.h"
16 #include "jemalloc/internal/prof_sys.h"
17 #include "jemalloc/internal/safety_check.h"
18 #include "jemalloc/internal/sc.h"
19 #include "jemalloc/internal/util.h"
20
21 /******************************************************************************/
22 /* Data. */
23
24 /*
25 * ctl_mtx protects the following:
26 * - ctl_stats->*
27 */
28 static malloc_mutex_t ctl_mtx;
29 static bool ctl_initialized;
30 static ctl_stats_t *ctl_stats;
31 static ctl_arenas_t *ctl_arenas;
32
33 /******************************************************************************/
34 /* Helpers for named and indexed nodes. */
35
36 static const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)37 ctl_named_node(const ctl_node_t *node) {
38 return ((node->named) ? (const ctl_named_node_t *)node : NULL);
39 }
40
41 static const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,size_t index)42 ctl_named_children(const ctl_named_node_t *node, size_t index) {
43 const ctl_named_node_t *children = ctl_named_node(node->children);
44
45 return (children ? &children[index] : NULL);
46 }
47
48 static const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)49 ctl_indexed_node(const ctl_node_t *node) {
50 return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
51 }
52
53 /******************************************************************************/
54 /* Function prototypes for non-inline static functions. */
55
56 #define CTL_PROTO(n) \
57 static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
58 void *oldp, size_t *oldlenp, void *newp, size_t newlen);
59
60 #define INDEX_PROTO(n) \
61 static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
62 const size_t *mib, size_t miblen, size_t i);
63
64 CTL_PROTO(version)
65 CTL_PROTO(epoch)
66 CTL_PROTO(background_thread)
67 CTL_PROTO(max_background_threads)
68 CTL_PROTO(thread_tcache_enabled)
69 CTL_PROTO(thread_tcache_flush)
70 CTL_PROTO(thread_peak_read)
71 CTL_PROTO(thread_peak_reset)
72 CTL_PROTO(thread_prof_name)
73 CTL_PROTO(thread_prof_active)
74 CTL_PROTO(thread_arena)
75 CTL_PROTO(thread_allocated)
76 CTL_PROTO(thread_allocatedp)
77 CTL_PROTO(thread_deallocated)
78 CTL_PROTO(thread_deallocatedp)
79 CTL_PROTO(thread_idle)
80 CTL_PROTO(config_cache_oblivious)
81 CTL_PROTO(config_debug)
82 CTL_PROTO(config_fill)
83 CTL_PROTO(config_lazy_lock)
84 CTL_PROTO(config_malloc_conf)
85 CTL_PROTO(config_opt_safety_checks)
86 CTL_PROTO(config_prof)
87 CTL_PROTO(config_prof_libgcc)
88 CTL_PROTO(config_prof_libunwind)
89 CTL_PROTO(config_stats)
90 CTL_PROTO(config_utrace)
91 CTL_PROTO(config_xmalloc)
92 CTL_PROTO(opt_abort)
93 CTL_PROTO(opt_abort_conf)
94 CTL_PROTO(opt_cache_oblivious)
95 CTL_PROTO(opt_trust_madvise)
96 CTL_PROTO(opt_confirm_conf)
97 CTL_PROTO(opt_hpa)
98 CTL_PROTO(opt_hpa_slab_max_alloc)
99 CTL_PROTO(opt_hpa_hugification_threshold)
100 CTL_PROTO(opt_hpa_hugify_delay_ms)
101 CTL_PROTO(opt_hpa_min_purge_interval_ms)
102 CTL_PROTO(opt_hpa_dirty_mult)
103 CTL_PROTO(opt_hpa_sec_nshards)
104 CTL_PROTO(opt_hpa_sec_max_alloc)
105 CTL_PROTO(opt_hpa_sec_max_bytes)
106 CTL_PROTO(opt_hpa_sec_bytes_after_flush)
107 CTL_PROTO(opt_hpa_sec_batch_fill_extra)
108 CTL_PROTO(opt_metadata_thp)
109 CTL_PROTO(opt_retain)
110 CTL_PROTO(opt_dss)
111 CTL_PROTO(opt_narenas)
112 CTL_PROTO(opt_percpu_arena)
113 CTL_PROTO(opt_oversize_threshold)
114 CTL_PROTO(opt_background_thread)
115 CTL_PROTO(opt_mutex_max_spin)
116 CTL_PROTO(opt_max_background_threads)
117 CTL_PROTO(opt_dirty_decay_ms)
118 CTL_PROTO(opt_muzzy_decay_ms)
119 CTL_PROTO(opt_stats_print)
120 CTL_PROTO(opt_stats_print_opts)
121 CTL_PROTO(opt_stats_interval)
122 CTL_PROTO(opt_stats_interval_opts)
123 CTL_PROTO(opt_junk)
124 CTL_PROTO(opt_zero)
125 CTL_PROTO(opt_utrace)
126 CTL_PROTO(opt_xmalloc)
127 CTL_PROTO(opt_experimental_infallible_new)
128 CTL_PROTO(opt_tcache)
129 CTL_PROTO(opt_tcache_max)
130 CTL_PROTO(opt_tcache_nslots_small_min)
131 CTL_PROTO(opt_tcache_nslots_small_max)
132 CTL_PROTO(opt_tcache_nslots_large)
133 CTL_PROTO(opt_lg_tcache_nslots_mul)
134 CTL_PROTO(opt_tcache_gc_incr_bytes)
135 CTL_PROTO(opt_tcache_gc_delay_bytes)
136 CTL_PROTO(opt_lg_tcache_flush_small_div)
137 CTL_PROTO(opt_lg_tcache_flush_large_div)
138 CTL_PROTO(opt_thp)
139 CTL_PROTO(opt_lg_extent_max_active_fit)
140 CTL_PROTO(opt_prof)
141 CTL_PROTO(opt_prof_prefix)
142 CTL_PROTO(opt_prof_active)
143 CTL_PROTO(opt_prof_thread_active_init)
144 CTL_PROTO(opt_lg_prof_sample)
145 CTL_PROTO(opt_lg_prof_interval)
146 CTL_PROTO(opt_prof_gdump)
147 CTL_PROTO(opt_prof_final)
148 CTL_PROTO(opt_prof_leak)
149 CTL_PROTO(opt_prof_leak_error)
150 CTL_PROTO(opt_prof_accum)
151 CTL_PROTO(opt_prof_recent_alloc_max)
152 CTL_PROTO(opt_prof_stats)
153 CTL_PROTO(opt_prof_sys_thread_name)
154 CTL_PROTO(opt_prof_time_res)
155 CTL_PROTO(opt_lg_san_uaf_align)
156 CTL_PROTO(opt_zero_realloc)
157 CTL_PROTO(tcache_create)
158 CTL_PROTO(tcache_flush)
159 CTL_PROTO(tcache_destroy)
160 CTL_PROTO(arena_i_initialized)
161 CTL_PROTO(arena_i_decay)
162 CTL_PROTO(arena_i_purge)
163 CTL_PROTO(arena_i_reset)
164 CTL_PROTO(arena_i_destroy)
165 CTL_PROTO(arena_i_dss)
166 CTL_PROTO(arena_i_oversize_threshold)
167 CTL_PROTO(arena_i_dirty_decay_ms)
168 CTL_PROTO(arena_i_muzzy_decay_ms)
169 CTL_PROTO(arena_i_extent_hooks)
170 CTL_PROTO(arena_i_retain_grow_limit)
171 INDEX_PROTO(arena_i)
172 CTL_PROTO(arenas_bin_i_size)
173 CTL_PROTO(arenas_bin_i_nregs)
174 CTL_PROTO(arenas_bin_i_slab_size)
175 CTL_PROTO(arenas_bin_i_nshards)
176 INDEX_PROTO(arenas_bin_i)
177 CTL_PROTO(arenas_lextent_i_size)
178 INDEX_PROTO(arenas_lextent_i)
179 CTL_PROTO(arenas_narenas)
180 CTL_PROTO(arenas_dirty_decay_ms)
181 CTL_PROTO(arenas_muzzy_decay_ms)
182 CTL_PROTO(arenas_quantum)
183 CTL_PROTO(arenas_page)
184 CTL_PROTO(arenas_tcache_max)
185 CTL_PROTO(arenas_nbins)
186 CTL_PROTO(arenas_nhbins)
187 CTL_PROTO(arenas_nlextents)
188 CTL_PROTO(arenas_create)
189 CTL_PROTO(arenas_lookup)
190 CTL_PROTO(prof_thread_active_init)
191 CTL_PROTO(prof_active)
192 CTL_PROTO(prof_dump)
193 CTL_PROTO(prof_gdump)
194 CTL_PROTO(prof_prefix)
195 CTL_PROTO(prof_reset)
196 CTL_PROTO(prof_interval)
197 CTL_PROTO(lg_prof_sample)
198 CTL_PROTO(prof_log_start)
199 CTL_PROTO(prof_log_stop)
200 CTL_PROTO(prof_stats_bins_i_live)
201 CTL_PROTO(prof_stats_bins_i_accum)
202 INDEX_PROTO(prof_stats_bins_i)
203 CTL_PROTO(prof_stats_lextents_i_live)
204 CTL_PROTO(prof_stats_lextents_i_accum)
205 INDEX_PROTO(prof_stats_lextents_i)
206 CTL_PROTO(stats_arenas_i_small_allocated)
207 CTL_PROTO(stats_arenas_i_small_nmalloc)
208 CTL_PROTO(stats_arenas_i_small_ndalloc)
209 CTL_PROTO(stats_arenas_i_small_nrequests)
210 CTL_PROTO(stats_arenas_i_small_nfills)
211 CTL_PROTO(stats_arenas_i_small_nflushes)
212 CTL_PROTO(stats_arenas_i_large_allocated)
213 CTL_PROTO(stats_arenas_i_large_nmalloc)
214 CTL_PROTO(stats_arenas_i_large_ndalloc)
215 CTL_PROTO(stats_arenas_i_large_nrequests)
216 CTL_PROTO(stats_arenas_i_large_nfills)
217 CTL_PROTO(stats_arenas_i_large_nflushes)
218 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
219 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
220 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
221 CTL_PROTO(stats_arenas_i_bins_j_curregs)
222 CTL_PROTO(stats_arenas_i_bins_j_nfills)
223 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
224 CTL_PROTO(stats_arenas_i_bins_j_nslabs)
225 CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
226 CTL_PROTO(stats_arenas_i_bins_j_curslabs)
227 CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
228 INDEX_PROTO(stats_arenas_i_bins_j)
229 CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
230 CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
231 CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
232 CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
233 INDEX_PROTO(stats_arenas_i_lextents_j)
234 CTL_PROTO(stats_arenas_i_extents_j_ndirty)
235 CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
236 CTL_PROTO(stats_arenas_i_extents_j_nretained)
237 CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
238 CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
239 CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
240 INDEX_PROTO(stats_arenas_i_extents_j)
241 CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
242 CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
243 CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
244 CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
245
246 /* We have a set of stats for full slabs. */
247 CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
248 CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
249 CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
250 CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
251 CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
252 CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
253
254 /* A parallel set for the empty slabs. */
255 CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
256 CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
257 CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
258 CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
259 CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
260 CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
261
262 /*
263 * And one for the slabs that are neither empty nor full, but indexed by how
264 * full they are.
265 */
266 CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
267 CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
268 CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
269 CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
270 CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
271 CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
272
273 INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
274 CTL_PROTO(stats_arenas_i_nthreads)
275 CTL_PROTO(stats_arenas_i_uptime)
276 CTL_PROTO(stats_arenas_i_dss)
277 CTL_PROTO(stats_arenas_i_dirty_decay_ms)
278 CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
279 CTL_PROTO(stats_arenas_i_pactive)
280 CTL_PROTO(stats_arenas_i_pdirty)
281 CTL_PROTO(stats_arenas_i_pmuzzy)
282 CTL_PROTO(stats_arenas_i_mapped)
283 CTL_PROTO(stats_arenas_i_retained)
284 CTL_PROTO(stats_arenas_i_extent_avail)
285 CTL_PROTO(stats_arenas_i_dirty_npurge)
286 CTL_PROTO(stats_arenas_i_dirty_nmadvise)
287 CTL_PROTO(stats_arenas_i_dirty_purged)
288 CTL_PROTO(stats_arenas_i_muzzy_npurge)
289 CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
290 CTL_PROTO(stats_arenas_i_muzzy_purged)
291 CTL_PROTO(stats_arenas_i_base)
292 CTL_PROTO(stats_arenas_i_internal)
293 CTL_PROTO(stats_arenas_i_metadata_thp)
294 CTL_PROTO(stats_arenas_i_tcache_bytes)
295 CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
296 CTL_PROTO(stats_arenas_i_resident)
297 CTL_PROTO(stats_arenas_i_abandoned_vm)
298 CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
299 INDEX_PROTO(stats_arenas_i)
300 CTL_PROTO(stats_allocated)
301 CTL_PROTO(stats_active)
302 CTL_PROTO(stats_background_thread_num_threads)
303 CTL_PROTO(stats_background_thread_num_runs)
304 CTL_PROTO(stats_background_thread_run_interval)
305 CTL_PROTO(stats_metadata)
306 CTL_PROTO(stats_metadata_thp)
307 CTL_PROTO(stats_resident)
308 CTL_PROTO(stats_mapped)
309 CTL_PROTO(stats_retained)
310 CTL_PROTO(stats_zero_reallocs)
311 CTL_PROTO(experimental_hooks_install)
312 CTL_PROTO(experimental_hooks_remove)
313 CTL_PROTO(experimental_hooks_prof_backtrace)
314 CTL_PROTO(experimental_hooks_prof_dump)
315 CTL_PROTO(experimental_hooks_safety_check_abort)
316 CTL_PROTO(experimental_thread_activity_callback)
317 CTL_PROTO(experimental_utilization_query)
318 CTL_PROTO(experimental_utilization_batch_query)
319 CTL_PROTO(experimental_arenas_i_pactivep)
320 INDEX_PROTO(experimental_arenas_i)
321 CTL_PROTO(experimental_prof_recent_alloc_max)
322 CTL_PROTO(experimental_prof_recent_alloc_dump)
323 CTL_PROTO(experimental_batch_alloc)
324 CTL_PROTO(experimental_arenas_create_ext)
325
326 #define MUTEX_STATS_CTL_PROTO_GEN(n) \
327 CTL_PROTO(stats_##n##_num_ops) \
328 CTL_PROTO(stats_##n##_num_wait) \
329 CTL_PROTO(stats_##n##_num_spin_acq) \
330 CTL_PROTO(stats_##n##_num_owner_switch) \
331 CTL_PROTO(stats_##n##_total_wait_time) \
332 CTL_PROTO(stats_##n##_max_wait_time) \
333 CTL_PROTO(stats_##n##_max_num_thds)
334
335 /* Global mutexes. */
336 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
337 MUTEX_PROF_GLOBAL_MUTEXES
338 #undef OP
339
340 /* Per arena mutexes. */
341 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
342 MUTEX_PROF_ARENA_MUTEXES
343 #undef OP
344
345 /* Arena bin mutexes. */
346 MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
347 #undef MUTEX_STATS_CTL_PROTO_GEN
348
349 CTL_PROTO(stats_mutexes_reset)
350
351 /******************************************************************************/
352 /* mallctl tree. */
353
354 #define NAME(n) {true}, n
355 #define CHILD(t, c) \
356 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
357 (ctl_node_t *)c##_node, \
358 NULL
359 #define CTL(c) 0, NULL, c##_ctl
360
361 /*
362 * Only handles internal indexed nodes, since there are currently no external
363 * ones.
364 */
365 #define INDEX(i) {false}, i##_index
366
367 static const ctl_named_node_t thread_tcache_node[] = {
368 {NAME("enabled"), CTL(thread_tcache_enabled)},
369 {NAME("flush"), CTL(thread_tcache_flush)}
370 };
371
372 static const ctl_named_node_t thread_peak_node[] = {
373 {NAME("read"), CTL(thread_peak_read)},
374 {NAME("reset"), CTL(thread_peak_reset)},
375 };
376
377 static const ctl_named_node_t thread_prof_node[] = {
378 {NAME("name"), CTL(thread_prof_name)},
379 {NAME("active"), CTL(thread_prof_active)}
380 };
381
382 static const ctl_named_node_t thread_node[] = {
383 {NAME("arena"), CTL(thread_arena)},
384 {NAME("allocated"), CTL(thread_allocated)},
385 {NAME("allocatedp"), CTL(thread_allocatedp)},
386 {NAME("deallocated"), CTL(thread_deallocated)},
387 {NAME("deallocatedp"), CTL(thread_deallocatedp)},
388 {NAME("tcache"), CHILD(named, thread_tcache)},
389 {NAME("peak"), CHILD(named, thread_peak)},
390 {NAME("prof"), CHILD(named, thread_prof)},
391 {NAME("idle"), CTL(thread_idle)}
392 };
393
394 static const ctl_named_node_t config_node[] = {
395 {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
396 {NAME("debug"), CTL(config_debug)},
397 {NAME("fill"), CTL(config_fill)},
398 {NAME("lazy_lock"), CTL(config_lazy_lock)},
399 {NAME("malloc_conf"), CTL(config_malloc_conf)},
400 {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)},
401 {NAME("prof"), CTL(config_prof)},
402 {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
403 {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
404 {NAME("stats"), CTL(config_stats)},
405 {NAME("utrace"), CTL(config_utrace)},
406 {NAME("xmalloc"), CTL(config_xmalloc)}
407 };
408
409 static const ctl_named_node_t opt_node[] = {
410 {NAME("abort"), CTL(opt_abort)},
411 {NAME("abort_conf"), CTL(opt_abort_conf)},
412 {NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
413 {NAME("trust_madvise"), CTL(opt_trust_madvise)},
414 {NAME("confirm_conf"), CTL(opt_confirm_conf)},
415 {NAME("hpa"), CTL(opt_hpa)},
416 {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
417 {NAME("hpa_hugification_threshold"),
418 CTL(opt_hpa_hugification_threshold)},
419 {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
420 {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
421 {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
422 {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
423 {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
424 {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
425 {NAME("hpa_sec_bytes_after_flush"),
426 CTL(opt_hpa_sec_bytes_after_flush)},
427 {NAME("hpa_sec_batch_fill_extra"),
428 CTL(opt_hpa_sec_batch_fill_extra)},
429 {NAME("metadata_thp"), CTL(opt_metadata_thp)},
430 {NAME("retain"), CTL(opt_retain)},
431 {NAME("dss"), CTL(opt_dss)},
432 {NAME("narenas"), CTL(opt_narenas)},
433 {NAME("percpu_arena"), CTL(opt_percpu_arena)},
434 {NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
435 {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
436 {NAME("background_thread"), CTL(opt_background_thread)},
437 {NAME("max_background_threads"), CTL(opt_max_background_threads)},
438 {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
439 {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
440 {NAME("stats_print"), CTL(opt_stats_print)},
441 {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
442 {NAME("stats_interval"), CTL(opt_stats_interval)},
443 {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
444 {NAME("junk"), CTL(opt_junk)},
445 {NAME("zero"), CTL(opt_zero)},
446 {NAME("utrace"), CTL(opt_utrace)},
447 {NAME("xmalloc"), CTL(opt_xmalloc)},
448 {NAME("experimental_infallible_new"),
449 CTL(opt_experimental_infallible_new)},
450 {NAME("tcache"), CTL(opt_tcache)},
451 {NAME("tcache_max"), CTL(opt_tcache_max)},
452 {NAME("tcache_nslots_small_min"),
453 CTL(opt_tcache_nslots_small_min)},
454 {NAME("tcache_nslots_small_max"),
455 CTL(opt_tcache_nslots_small_max)},
456 {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
457 {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
458 {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
459 {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
460 {NAME("lg_tcache_flush_small_div"),
461 CTL(opt_lg_tcache_flush_small_div)},
462 {NAME("lg_tcache_flush_large_div"),
463 CTL(opt_lg_tcache_flush_large_div)},
464 {NAME("thp"), CTL(opt_thp)},
465 {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
466 {NAME("prof"), CTL(opt_prof)},
467 {NAME("prof_prefix"), CTL(opt_prof_prefix)},
468 {NAME("prof_active"), CTL(opt_prof_active)},
469 {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
470 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
471 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
472 {NAME("prof_gdump"), CTL(opt_prof_gdump)},
473 {NAME("prof_final"), CTL(opt_prof_final)},
474 {NAME("prof_leak"), CTL(opt_prof_leak)},
475 {NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
476 {NAME("prof_accum"), CTL(opt_prof_accum)},
477 {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
478 {NAME("prof_stats"), CTL(opt_prof_stats)},
479 {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
480 {NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
481 {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
482 {NAME("zero_realloc"), CTL(opt_zero_realloc)}
483 };
484
485 static const ctl_named_node_t tcache_node[] = {
486 {NAME("create"), CTL(tcache_create)},
487 {NAME("flush"), CTL(tcache_flush)},
488 {NAME("destroy"), CTL(tcache_destroy)}
489 };
490
491 static const ctl_named_node_t arena_i_node[] = {
492 {NAME("initialized"), CTL(arena_i_initialized)},
493 {NAME("decay"), CTL(arena_i_decay)},
494 {NAME("purge"), CTL(arena_i_purge)},
495 {NAME("reset"), CTL(arena_i_reset)},
496 {NAME("destroy"), CTL(arena_i_destroy)},
497 {NAME("dss"), CTL(arena_i_dss)},
498 /*
499 * Undocumented for now, since we anticipate an arena API in flux after
500 * we cut the last 5-series release.
501 */
502 {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
503 {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
504 {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
505 {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
506 {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
507 };
508 static const ctl_named_node_t super_arena_i_node[] = {
509 {NAME(""), CHILD(named, arena_i)}
510 };
511
512 static const ctl_indexed_node_t arena_node[] = {
513 {INDEX(arena_i)}
514 };
515
516 static const ctl_named_node_t arenas_bin_i_node[] = {
517 {NAME("size"), CTL(arenas_bin_i_size)},
518 {NAME("nregs"), CTL(arenas_bin_i_nregs)},
519 {NAME("slab_size"), CTL(arenas_bin_i_slab_size)},
520 {NAME("nshards"), CTL(arenas_bin_i_nshards)}
521 };
522 static const ctl_named_node_t super_arenas_bin_i_node[] = {
523 {NAME(""), CHILD(named, arenas_bin_i)}
524 };
525
526 static const ctl_indexed_node_t arenas_bin_node[] = {
527 {INDEX(arenas_bin_i)}
528 };
529
530 static const ctl_named_node_t arenas_lextent_i_node[] = {
531 {NAME("size"), CTL(arenas_lextent_i_size)}
532 };
533 static const ctl_named_node_t super_arenas_lextent_i_node[] = {
534 {NAME(""), CHILD(named, arenas_lextent_i)}
535 };
536
537 static const ctl_indexed_node_t arenas_lextent_node[] = {
538 {INDEX(arenas_lextent_i)}
539 };
540
541 static const ctl_named_node_t arenas_node[] = {
542 {NAME("narenas"), CTL(arenas_narenas)},
543 {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
544 {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
545 {NAME("quantum"), CTL(arenas_quantum)},
546 {NAME("page"), CTL(arenas_page)},
547 {NAME("tcache_max"), CTL(arenas_tcache_max)},
548 {NAME("nbins"), CTL(arenas_nbins)},
549 {NAME("nhbins"), CTL(arenas_nhbins)},
550 {NAME("bin"), CHILD(indexed, arenas_bin)},
551 {NAME("nlextents"), CTL(arenas_nlextents)},
552 {NAME("lextent"), CHILD(indexed, arenas_lextent)},
553 {NAME("create"), CTL(arenas_create)},
554 {NAME("lookup"), CTL(arenas_lookup)}
555 };
556
557 static const ctl_named_node_t prof_stats_bins_i_node[] = {
558 {NAME("live"), CTL(prof_stats_bins_i_live)},
559 {NAME("accum"), CTL(prof_stats_bins_i_accum)}
560 };
561
562 static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
563 {NAME(""), CHILD(named, prof_stats_bins_i)}
564 };
565
566 static const ctl_indexed_node_t prof_stats_bins_node[] = {
567 {INDEX(prof_stats_bins_i)}
568 };
569
570 static const ctl_named_node_t prof_stats_lextents_i_node[] = {
571 {NAME("live"), CTL(prof_stats_lextents_i_live)},
572 {NAME("accum"), CTL(prof_stats_lextents_i_accum)}
573 };
574
575 static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
576 {NAME(""), CHILD(named, prof_stats_lextents_i)}
577 };
578
579 static const ctl_indexed_node_t prof_stats_lextents_node[] = {
580 {INDEX(prof_stats_lextents_i)}
581 };
582
583 static const ctl_named_node_t prof_stats_node[] = {
584 {NAME("bins"), CHILD(indexed, prof_stats_bins)},
585 {NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
586 };
587
588 static const ctl_named_node_t prof_node[] = {
589 {NAME("thread_active_init"), CTL(prof_thread_active_init)},
590 {NAME("active"), CTL(prof_active)},
591 {NAME("dump"), CTL(prof_dump)},
592 {NAME("gdump"), CTL(prof_gdump)},
593 {NAME("prefix"), CTL(prof_prefix)},
594 {NAME("reset"), CTL(prof_reset)},
595 {NAME("interval"), CTL(prof_interval)},
596 {NAME("lg_sample"), CTL(lg_prof_sample)},
597 {NAME("log_start"), CTL(prof_log_start)},
598 {NAME("log_stop"), CTL(prof_log_stop)},
599 {NAME("stats"), CHILD(named, prof_stats)}
600 };
601
602 static const ctl_named_node_t stats_arenas_i_small_node[] = {
603 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
604 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
605 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
606 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)},
607 {NAME("nfills"), CTL(stats_arenas_i_small_nfills)},
608 {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)}
609 };
610
611 static const ctl_named_node_t stats_arenas_i_large_node[] = {
612 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
613 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
614 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
615 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)},
616 {NAME("nfills"), CTL(stats_arenas_i_large_nfills)},
617 {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)}
618 };
619
620 #define MUTEX_PROF_DATA_NODE(prefix) \
621 static const ctl_named_node_t stats_##prefix##_node[] = { \
622 {NAME("num_ops"), \
623 CTL(stats_##prefix##_num_ops)}, \
624 {NAME("num_wait"), \
625 CTL(stats_##prefix##_num_wait)}, \
626 {NAME("num_spin_acq"), \
627 CTL(stats_##prefix##_num_spin_acq)}, \
628 {NAME("num_owner_switch"), \
629 CTL(stats_##prefix##_num_owner_switch)}, \
630 {NAME("total_wait_time"), \
631 CTL(stats_##prefix##_total_wait_time)}, \
632 {NAME("max_wait_time"), \
633 CTL(stats_##prefix##_max_wait_time)}, \
634 {NAME("max_num_thds"), \
635 CTL(stats_##prefix##_max_num_thds)} \
636 /* Note that # of current waiting thread not provided. */ \
637 };
638
639 MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
640
641 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
642 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
643 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
644 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
645 {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
646 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
647 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
648 {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
649 {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
650 {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
651 {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)},
652 {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
653 };
654
655 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
656 {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
657 };
658
659 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
660 {INDEX(stats_arenas_i_bins_j)}
661 };
662
663 static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
664 {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
665 {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
666 {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
667 {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
668 };
669 static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
670 {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
671 };
672
673 static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
674 {INDEX(stats_arenas_i_lextents_j)}
675 };
676
677 static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
678 {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)},
679 {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)},
680 {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)},
681 {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)},
682 {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)},
683 {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
684 };
685
686 static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
687 {NAME(""), CHILD(named, stats_arenas_i_extents_j)}
688 };
689
690 static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
691 {INDEX(stats_arenas_i_extents_j)}
692 };
693
694 #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
695 MUTEX_PROF_ARENA_MUTEXES
696 #undef OP
697
698 static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
699 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
700 MUTEX_PROF_ARENA_MUTEXES
701 #undef OP
702 };
703
704 static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
705 {NAME("npageslabs_nonhuge"),
706 CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
707 {NAME("npageslabs_huge"),
708 CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
709 {NAME("nactive_nonhuge"),
710 CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
711 {NAME("nactive_huge"),
712 CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
713 {NAME("ndirty_nonhuge"),
714 CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
715 {NAME("ndirty_huge"),
716 CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
717 };
718
719 static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
720 {NAME("npageslabs_nonhuge"),
721 CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
722 {NAME("npageslabs_huge"),
723 CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
724 {NAME("nactive_nonhuge"),
725 CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
726 {NAME("nactive_huge"),
727 CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
728 {NAME("ndirty_nonhuge"),
729 CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
730 {NAME("ndirty_huge"),
731 CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
732 };
733
734 static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
735 {NAME("npageslabs_nonhuge"),
736 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
737 {NAME("npageslabs_huge"),
738 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
739 {NAME("nactive_nonhuge"),
740 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
741 {NAME("nactive_huge"),
742 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
743 {NAME("ndirty_nonhuge"),
744 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
745 {NAME("ndirty_huge"),
746 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
747 };
748
749 static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
750 {NAME(""),
751 CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
752 };
753
754 static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
755 {
756 {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
757 };
758
759 static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
760 {NAME("full_slabs"), CHILD(named,
761 stats_arenas_i_hpa_shard_full_slabs)},
762 {NAME("empty_slabs"), CHILD(named,
763 stats_arenas_i_hpa_shard_empty_slabs)},
764 {NAME("nonfull_slabs"), CHILD(indexed,
765 stats_arenas_i_hpa_shard_nonfull_slabs)},
766
767 {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
768 {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
769 {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
770 {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
771 };
772
773 static const ctl_named_node_t stats_arenas_i_node[] = {
774 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
775 {NAME("uptime"), CTL(stats_arenas_i_uptime)},
776 {NAME("dss"), CTL(stats_arenas_i_dss)},
777 {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
778 {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
779 {NAME("pactive"), CTL(stats_arenas_i_pactive)},
780 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
781 {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
782 {NAME("mapped"), CTL(stats_arenas_i_mapped)},
783 {NAME("retained"), CTL(stats_arenas_i_retained)},
784 {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)},
785 {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
786 {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
787 {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
788 {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
789 {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
790 {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
791 {NAME("base"), CTL(stats_arenas_i_base)},
792 {NAME("internal"), CTL(stats_arenas_i_internal)},
793 {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
794 {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
795 {NAME("tcache_stashed_bytes"),
796 CTL(stats_arenas_i_tcache_stashed_bytes)},
797 {NAME("resident"), CTL(stats_arenas_i_resident)},
798 {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
799 {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
800 {NAME("small"), CHILD(named, stats_arenas_i_small)},
801 {NAME("large"), CHILD(named, stats_arenas_i_large)},
802 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
803 {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
804 {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
805 {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
806 {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
807 };
808 static const ctl_named_node_t super_stats_arenas_i_node[] = {
809 {NAME(""), CHILD(named, stats_arenas_i)}
810 };
811
812 static const ctl_indexed_node_t stats_arenas_node[] = {
813 {INDEX(stats_arenas_i)}
814 };
815
816 static const ctl_named_node_t stats_background_thread_node[] = {
817 {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
818 {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
819 {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
820 };
821
822 #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
823 MUTEX_PROF_GLOBAL_MUTEXES
824 #undef OP
825
826 static const ctl_named_node_t stats_mutexes_node[] = {
827 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
828 MUTEX_PROF_GLOBAL_MUTEXES
829 #undef OP
830 {NAME("reset"), CTL(stats_mutexes_reset)}
831 };
832 #undef MUTEX_PROF_DATA_NODE
833
834 static const ctl_named_node_t stats_node[] = {
835 {NAME("allocated"), CTL(stats_allocated)},
836 {NAME("active"), CTL(stats_active)},
837 {NAME("metadata"), CTL(stats_metadata)},
838 {NAME("metadata_thp"), CTL(stats_metadata_thp)},
839 {NAME("resident"), CTL(stats_resident)},
840 {NAME("mapped"), CTL(stats_mapped)},
841 {NAME("retained"), CTL(stats_retained)},
842 {NAME("background_thread"),
843 CHILD(named, stats_background_thread)},
844 {NAME("mutexes"), CHILD(named, stats_mutexes)},
845 {NAME("arenas"), CHILD(indexed, stats_arenas)},
846 {NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
847 };
848
849 static const ctl_named_node_t experimental_hooks_node[] = {
850 {NAME("install"), CTL(experimental_hooks_install)},
851 {NAME("remove"), CTL(experimental_hooks_remove)},
852 {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
853 {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
854 {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
855 };
856
857 static const ctl_named_node_t experimental_thread_node[] = {
858 {NAME("activity_callback"),
859 CTL(experimental_thread_activity_callback)}
860 };
861
862 static const ctl_named_node_t experimental_utilization_node[] = {
863 {NAME("query"), CTL(experimental_utilization_query)},
864 {NAME("batch_query"), CTL(experimental_utilization_batch_query)}
865 };
866
867 static const ctl_named_node_t experimental_arenas_i_node[] = {
868 {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)}
869 };
870 static const ctl_named_node_t super_experimental_arenas_i_node[] = {
871 {NAME(""), CHILD(named, experimental_arenas_i)}
872 };
873
874 static const ctl_indexed_node_t experimental_arenas_node[] = {
875 {INDEX(experimental_arenas_i)}
876 };
877
878 static const ctl_named_node_t experimental_prof_recent_node[] = {
879 {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
880 {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
881 };
882
883 static const ctl_named_node_t experimental_node[] = {
884 {NAME("hooks"), CHILD(named, experimental_hooks)},
885 {NAME("utilization"), CHILD(named, experimental_utilization)},
886 {NAME("arenas"), CHILD(indexed, experimental_arenas)},
887 {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
888 {NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
889 {NAME("batch_alloc"), CTL(experimental_batch_alloc)},
890 {NAME("thread"), CHILD(named, experimental_thread)}
891 };
892
893 static const ctl_named_node_t root_node[] = {
894 {NAME("version"), CTL(version)},
895 {NAME("epoch"), CTL(epoch)},
896 {NAME("background_thread"), CTL(background_thread)},
897 {NAME("max_background_threads"), CTL(max_background_threads)},
898 {NAME("thread"), CHILD(named, thread)},
899 {NAME("config"), CHILD(named, config)},
900 {NAME("opt"), CHILD(named, opt)},
901 {NAME("tcache"), CHILD(named, tcache)},
902 {NAME("arena"), CHILD(indexed, arena)},
903 {NAME("arenas"), CHILD(named, arenas)},
904 {NAME("prof"), CHILD(named, prof)},
905 {NAME("stats"), CHILD(named, stats)},
906 {NAME("experimental"), CHILD(named, experimental)}
907 };
908 static const ctl_named_node_t super_root_node[] = {
909 {NAME(""), CHILD(named, root)}
910 };
911
912 #undef NAME
913 #undef CHILD
914 #undef CTL
915 #undef INDEX
916
917 /******************************************************************************/
918
919 /*
920 * Sets *dst + *src non-atomically. This is safe, since everything is
921 * synchronized by the ctl mutex.
922 */
923 static void
ctl_accum_locked_u64(locked_u64_t * dst,locked_u64_t * src)924 ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
925 locked_inc_u64_unsynchronized(dst,
926 locked_read_u64_unsynchronized(src));
927 }
928
929 static void
ctl_accum_atomic_zu(atomic_zu_t * dst,atomic_zu_t * src)930 ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
931 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
932 size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
933 atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
934 }
935
936 /******************************************************************************/
937
938 static unsigned
arenas_i2a_impl(size_t i,bool compat,bool validate)939 arenas_i2a_impl(size_t i, bool compat, bool validate) {
940 unsigned a;
941
942 switch (i) {
943 case MALLCTL_ARENAS_ALL:
944 a = 0;
945 break;
946 case MALLCTL_ARENAS_DESTROYED:
947 a = 1;
948 break;
949 default:
950 if (compat && i == ctl_arenas->narenas) {
951 /*
952 * Provide deprecated backward compatibility for
953 * accessing the merged stats at index narenas rather
954 * than via MALLCTL_ARENAS_ALL. This is scheduled for
955 * removal in 6.0.0.
956 */
957 a = 0;
958 } else if (validate && i >= ctl_arenas->narenas) {
959 a = UINT_MAX;
960 } else {
961 /*
962 * This function should never be called for an index
963 * more than one past the range of indices that have
964 * initialized ctl data.
965 */
966 assert(i < ctl_arenas->narenas || (!validate && i ==
967 ctl_arenas->narenas));
968 a = (unsigned)i + 2;
969 }
970 break;
971 }
972
973 return a;
974 }
975
976 static unsigned
arenas_i2a(size_t i)977 arenas_i2a(size_t i) {
978 return arenas_i2a_impl(i, true, false);
979 }
980
981 static ctl_arena_t *
arenas_i_impl(tsd_t * tsd,size_t i,bool compat,bool init)982 arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
983 ctl_arena_t *ret;
984
985 assert(!compat || !init);
986
987 ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
988 if (init && ret == NULL) {
989 if (config_stats) {
990 struct container_s {
991 ctl_arena_t ctl_arena;
992 ctl_arena_stats_t astats;
993 };
994 struct container_s *cont =
995 (struct container_s *)base_alloc(tsd_tsdn(tsd),
996 b0get(), sizeof(struct container_s), QUANTUM);
997 if (cont == NULL) {
998 return NULL;
999 }
1000 ret = &cont->ctl_arena;
1001 ret->astats = &cont->astats;
1002 } else {
1003 ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
1004 sizeof(ctl_arena_t), QUANTUM);
1005 if (ret == NULL) {
1006 return NULL;
1007 }
1008 }
1009 ret->arena_ind = (unsigned)i;
1010 ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
1011 }
1012
1013 assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
1014 return ret;
1015 }
1016
1017 static ctl_arena_t *
arenas_i(size_t i)1018 arenas_i(size_t i) {
1019 ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
1020 assert(ret != NULL);
1021 return ret;
1022 }
1023
1024 static void
ctl_arena_clear(ctl_arena_t * ctl_arena)1025 ctl_arena_clear(ctl_arena_t *ctl_arena) {
1026 ctl_arena->nthreads = 0;
1027 ctl_arena->dss = dss_prec_names[dss_prec_limit];
1028 ctl_arena->dirty_decay_ms = -1;
1029 ctl_arena->muzzy_decay_ms = -1;
1030 ctl_arena->pactive = 0;
1031 ctl_arena->pdirty = 0;
1032 ctl_arena->pmuzzy = 0;
1033 if (config_stats) {
1034 memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
1035 ctl_arena->astats->allocated_small = 0;
1036 ctl_arena->astats->nmalloc_small = 0;
1037 ctl_arena->astats->ndalloc_small = 0;
1038 ctl_arena->astats->nrequests_small = 0;
1039 ctl_arena->astats->nfills_small = 0;
1040 ctl_arena->astats->nflushes_small = 0;
1041 memset(ctl_arena->astats->bstats, 0, SC_NBINS *
1042 sizeof(bin_stats_data_t));
1043 memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
1044 sizeof(arena_stats_large_t));
1045 memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
1046 sizeof(pac_estats_t));
1047 memset(&ctl_arena->astats->hpastats, 0,
1048 sizeof(hpa_shard_stats_t));
1049 memset(&ctl_arena->astats->secstats, 0,
1050 sizeof(sec_stats_t));
1051 }
1052 }
1053
1054 static void
ctl_arena_stats_amerge(tsdn_t * tsdn,ctl_arena_t * ctl_arena,arena_t * arena)1055 ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
1056 unsigned i;
1057
1058 if (config_stats) {
1059 arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
1060 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
1061 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
1062 &ctl_arena->pdirty, &ctl_arena->pmuzzy,
1063 &ctl_arena->astats->astats, ctl_arena->astats->bstats,
1064 ctl_arena->astats->lstats, ctl_arena->astats->estats,
1065 &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
1066
1067 for (i = 0; i < SC_NBINS; i++) {
1068 bin_stats_t *bstats =
1069 &ctl_arena->astats->bstats[i].stats_data;
1070 ctl_arena->astats->allocated_small += bstats->curregs *
1071 sz_index2size(i);
1072 ctl_arena->astats->nmalloc_small += bstats->nmalloc;
1073 ctl_arena->astats->ndalloc_small += bstats->ndalloc;
1074 ctl_arena->astats->nrequests_small += bstats->nrequests;
1075 ctl_arena->astats->nfills_small += bstats->nfills;
1076 ctl_arena->astats->nflushes_small += bstats->nflushes;
1077 }
1078 } else {
1079 arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
1080 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
1081 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
1082 &ctl_arena->pdirty, &ctl_arena->pmuzzy);
1083 }
1084 }
1085
1086 static void
ctl_arena_stats_sdmerge(ctl_arena_t * ctl_sdarena,ctl_arena_t * ctl_arena,bool destroyed)1087 ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
1088 bool destroyed) {
1089 unsigned i;
1090
1091 if (!destroyed) {
1092 ctl_sdarena->nthreads += ctl_arena->nthreads;
1093 ctl_sdarena->pactive += ctl_arena->pactive;
1094 ctl_sdarena->pdirty += ctl_arena->pdirty;
1095 ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
1096 } else {
1097 assert(ctl_arena->nthreads == 0);
1098 assert(ctl_arena->pactive == 0);
1099 assert(ctl_arena->pdirty == 0);
1100 assert(ctl_arena->pmuzzy == 0);
1101 }
1102
1103 if (config_stats) {
1104 ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
1105 ctl_arena_stats_t *astats = ctl_arena->astats;
1106
1107 if (!destroyed) {
1108 sdstats->astats.mapped += astats->astats.mapped;
1109 sdstats->astats.pa_shard_stats.pac_stats.retained
1110 += astats->astats.pa_shard_stats.pac_stats.retained;
1111 sdstats->astats.pa_shard_stats.edata_avail
1112 += astats->astats.pa_shard_stats.edata_avail;
1113 }
1114
1115 ctl_accum_locked_u64(
1116 &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
1117 &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
1118 ctl_accum_locked_u64(
1119 &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
1120 &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
1121 ctl_accum_locked_u64(
1122 &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
1123 &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
1124
1125 ctl_accum_locked_u64(
1126 &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
1127 &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
1128 ctl_accum_locked_u64(
1129 &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
1130 &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
1131 ctl_accum_locked_u64(
1132 &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
1133 &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
1134
1135 #define OP(mtx) malloc_mutex_prof_merge( \
1136 &(sdstats->astats.mutex_prof_data[ \
1137 arena_prof_mutex_##mtx]), \
1138 &(astats->astats.mutex_prof_data[ \
1139 arena_prof_mutex_##mtx]));
1140 MUTEX_PROF_ARENA_MUTEXES
1141 #undef OP
1142 if (!destroyed) {
1143 sdstats->astats.base += astats->astats.base;
1144 sdstats->astats.resident += astats->astats.resident;
1145 sdstats->astats.metadata_thp += astats->astats.metadata_thp;
1146 ctl_accum_atomic_zu(&sdstats->astats.internal,
1147 &astats->astats.internal);
1148 } else {
1149 assert(atomic_load_zu(
1150 &astats->astats.internal, ATOMIC_RELAXED) == 0);
1151 }
1152
1153 if (!destroyed) {
1154 sdstats->allocated_small += astats->allocated_small;
1155 } else {
1156 assert(astats->allocated_small == 0);
1157 }
1158 sdstats->nmalloc_small += astats->nmalloc_small;
1159 sdstats->ndalloc_small += astats->ndalloc_small;
1160 sdstats->nrequests_small += astats->nrequests_small;
1161 sdstats->nfills_small += astats->nfills_small;
1162 sdstats->nflushes_small += astats->nflushes_small;
1163
1164 if (!destroyed) {
1165 sdstats->astats.allocated_large +=
1166 astats->astats.allocated_large;
1167 } else {
1168 assert(astats->astats.allocated_large == 0);
1169 }
1170 sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
1171 sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
1172 sdstats->astats.nrequests_large
1173 += astats->astats.nrequests_large;
1174 sdstats->astats.nflushes_large += astats->astats.nflushes_large;
1175 ctl_accum_atomic_zu(
1176 &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
1177 &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
1178
1179 sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
1180 sdstats->astats.tcache_stashed_bytes +=
1181 astats->astats.tcache_stashed_bytes;
1182
1183 if (ctl_arena->arena_ind == 0) {
1184 sdstats->astats.uptime = astats->astats.uptime;
1185 }
1186
1187 /* Merge bin stats. */
1188 for (i = 0; i < SC_NBINS; i++) {
1189 bin_stats_t *bstats = &astats->bstats[i].stats_data;
1190 bin_stats_t *merged = &sdstats->bstats[i].stats_data;
1191 merged->nmalloc += bstats->nmalloc;
1192 merged->ndalloc += bstats->ndalloc;
1193 merged->nrequests += bstats->nrequests;
1194 if (!destroyed) {
1195 merged->curregs += bstats->curregs;
1196 } else {
1197 assert(bstats->curregs == 0);
1198 }
1199 merged->nfills += bstats->nfills;
1200 merged->nflushes += bstats->nflushes;
1201 merged->nslabs += bstats->nslabs;
1202 merged->reslabs += bstats->reslabs;
1203 if (!destroyed) {
1204 merged->curslabs += bstats->curslabs;
1205 merged->nonfull_slabs += bstats->nonfull_slabs;
1206 } else {
1207 assert(bstats->curslabs == 0);
1208 assert(bstats->nonfull_slabs == 0);
1209 }
1210 malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
1211 &astats->bstats[i].mutex_data);
1212 }
1213
1214 /* Merge stats for large allocations. */
1215 for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
1216 ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
1217 &astats->lstats[i].nmalloc);
1218 ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
1219 &astats->lstats[i].ndalloc);
1220 ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
1221 &astats->lstats[i].nrequests);
1222 if (!destroyed) {
1223 sdstats->lstats[i].curlextents +=
1224 astats->lstats[i].curlextents;
1225 } else {
1226 assert(astats->lstats[i].curlextents == 0);
1227 }
1228 }
1229
1230 /* Merge extents stats. */
1231 for (i = 0; i < SC_NPSIZES; i++) {
1232 sdstats->estats[i].ndirty += astats->estats[i].ndirty;
1233 sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
1234 sdstats->estats[i].nretained
1235 += astats->estats[i].nretained;
1236 sdstats->estats[i].dirty_bytes
1237 += astats->estats[i].dirty_bytes;
1238 sdstats->estats[i].muzzy_bytes
1239 += astats->estats[i].muzzy_bytes;
1240 sdstats->estats[i].retained_bytes
1241 += astats->estats[i].retained_bytes;
1242 }
1243
1244 /* Merge HPA stats. */
1245 hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
1246 sec_stats_accum(&sdstats->secstats, &astats->secstats);
1247 }
1248 }
1249
1250 static void
ctl_arena_refresh(tsdn_t * tsdn,arena_t * arena,ctl_arena_t * ctl_sdarena,unsigned i,bool destroyed)1251 ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
1252 unsigned i, bool destroyed) {
1253 ctl_arena_t *ctl_arena = arenas_i(i);
1254
1255 ctl_arena_clear(ctl_arena);
1256 ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
1257 /* Merge into sum stats as well. */
1258 ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
1259 }
1260
1261 static unsigned
ctl_arena_init(tsd_t * tsd,const arena_config_t * config)1262 ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
1263 unsigned arena_ind;
1264 ctl_arena_t *ctl_arena;
1265
1266 if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
1267 NULL) {
1268 ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
1269 arena_ind = ctl_arena->arena_ind;
1270 } else {
1271 arena_ind = ctl_arenas->narenas;
1272 }
1273
1274 /* Trigger stats allocation. */
1275 if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
1276 return UINT_MAX;
1277 }
1278
1279 /* Initialize new arena. */
1280 if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
1281 return UINT_MAX;
1282 }
1283
1284 if (arena_ind == ctl_arenas->narenas) {
1285 ctl_arenas->narenas++;
1286 }
1287
1288 return arena_ind;
1289 }
1290
1291 static void
ctl_background_thread_stats_read(tsdn_t * tsdn)1292 ctl_background_thread_stats_read(tsdn_t *tsdn) {
1293 background_thread_stats_t *stats = &ctl_stats->background_thread;
1294 if (!have_background_thread ||
1295 background_thread_stats_read(tsdn, stats)) {
1296 memset(stats, 0, sizeof(background_thread_stats_t));
1297 nstime_init_zero(&stats->run_interval);
1298 }
1299 malloc_mutex_prof_copy(
1300 &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
1301 &stats->max_counter_per_bg_thd);
1302 }
1303
1304 static void
ctl_refresh(tsdn_t * tsdn)1305 ctl_refresh(tsdn_t *tsdn) {
1306 unsigned i;
1307 ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
1308 VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
1309
1310 /*
1311 * Clear sum stats, since they will be merged into by
1312 * ctl_arena_refresh().
1313 */
1314 ctl_arena_clear(ctl_sarena);
1315
1316 for (i = 0; i < ctl_arenas->narenas; i++) {
1317 tarenas[i] = arena_get(tsdn, i, false);
1318 }
1319
1320 for (i = 0; i < ctl_arenas->narenas; i++) {
1321 ctl_arena_t *ctl_arena = arenas_i(i);
1322 bool initialized = (tarenas[i] != NULL);
1323
1324 ctl_arena->initialized = initialized;
1325 if (initialized) {
1326 ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
1327 false);
1328 }
1329 }
1330
1331 if (config_stats) {
1332 ctl_stats->allocated = ctl_sarena->astats->allocated_small +
1333 ctl_sarena->astats->astats.allocated_large;
1334 ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
1335 ctl_stats->metadata = ctl_sarena->astats->astats.base +
1336 atomic_load_zu(&ctl_sarena->astats->astats.internal,
1337 ATOMIC_RELAXED);
1338 ctl_stats->resident = ctl_sarena->astats->astats.resident;
1339 ctl_stats->metadata_thp =
1340 ctl_sarena->astats->astats.metadata_thp;
1341 ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
1342 ctl_stats->retained = ctl_sarena->astats->astats
1343 .pa_shard_stats.pac_stats.retained;
1344
1345 ctl_background_thread_stats_read(tsdn);
1346
1347 #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
1348 malloc_mutex_lock(tsdn, &mtx); \
1349 malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
1350 malloc_mutex_unlock(tsdn, &mtx);
1351
1352 if (config_prof && opt_prof) {
1353 READ_GLOBAL_MUTEX_PROF_DATA(
1354 global_prof_mutex_prof, bt2gctx_mtx);
1355 READ_GLOBAL_MUTEX_PROF_DATA(
1356 global_prof_mutex_prof_thds_data, tdatas_mtx);
1357 READ_GLOBAL_MUTEX_PROF_DATA(
1358 global_prof_mutex_prof_dump, prof_dump_mtx);
1359 READ_GLOBAL_MUTEX_PROF_DATA(
1360 global_prof_mutex_prof_recent_alloc,
1361 prof_recent_alloc_mtx);
1362 READ_GLOBAL_MUTEX_PROF_DATA(
1363 global_prof_mutex_prof_recent_dump,
1364 prof_recent_dump_mtx);
1365 READ_GLOBAL_MUTEX_PROF_DATA(
1366 global_prof_mutex_prof_stats, prof_stats_mtx);
1367 }
1368 if (have_background_thread) {
1369 READ_GLOBAL_MUTEX_PROF_DATA(
1370 global_prof_mutex_background_thread,
1371 background_thread_lock);
1372 } else {
1373 memset(&ctl_stats->mutex_prof_data[
1374 global_prof_mutex_background_thread], 0,
1375 sizeof(mutex_prof_data_t));
1376 }
1377 /* We own ctl mutex already. */
1378 malloc_mutex_prof_read(tsdn,
1379 &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
1380 &ctl_mtx);
1381 #undef READ_GLOBAL_MUTEX_PROF_DATA
1382 }
1383 ctl_arenas->epoch++;
1384 }
1385
1386 static bool
ctl_init(tsd_t * tsd)1387 ctl_init(tsd_t *tsd) {
1388 bool ret;
1389 tsdn_t *tsdn = tsd_tsdn(tsd);
1390
1391 malloc_mutex_lock(tsdn, &ctl_mtx);
1392 if (!ctl_initialized) {
1393 ctl_arena_t *ctl_sarena, *ctl_darena;
1394 unsigned i;
1395
1396 /*
1397 * Allocate demand-zeroed space for pointers to the full
1398 * range of supported arena indices.
1399 */
1400 if (ctl_arenas == NULL) {
1401 ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1402 b0get(), sizeof(ctl_arenas_t), QUANTUM);
1403 if (ctl_arenas == NULL) {
1404 ret = true;
1405 goto label_return;
1406 }
1407 }
1408
1409 if (config_stats && ctl_stats == NULL) {
1410 ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1411 sizeof(ctl_stats_t), QUANTUM);
1412 if (ctl_stats == NULL) {
1413 ret = true;
1414 goto label_return;
1415 }
1416 }
1417
1418 /*
1419 * Allocate space for the current full range of arenas
1420 * here rather than doing it lazily elsewhere, in order
1421 * to limit when OOM-caused errors can occur.
1422 */
1423 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1424 true)) == NULL) {
1425 ret = true;
1426 goto label_return;
1427 }
1428 ctl_sarena->initialized = true;
1429
1430 if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1431 false, true)) == NULL) {
1432 ret = true;
1433 goto label_return;
1434 }
1435 ctl_arena_clear(ctl_darena);
1436 /*
1437 * Don't toggle ctl_darena to initialized until an arena is
1438 * actually destroyed, so that arena.<i>.initialized can be used
1439 * to query whether the stats are relevant.
1440 */
1441
1442 ctl_arenas->narenas = narenas_total_get();
1443 for (i = 0; i < ctl_arenas->narenas; i++) {
1444 if (arenas_i_impl(tsd, i, false, true) == NULL) {
1445 ret = true;
1446 goto label_return;
1447 }
1448 }
1449
1450 ql_new(&ctl_arenas->destroyed);
1451 ctl_refresh(tsdn);
1452
1453 ctl_initialized = true;
1454 }
1455
1456 ret = false;
1457 label_return:
1458 malloc_mutex_unlock(tsdn, &ctl_mtx);
1459 return ret;
1460 }
1461
1462 static int
ctl_lookup(tsdn_t * tsdn,const ctl_named_node_t * starting_node,const char * name,const ctl_named_node_t ** ending_nodep,size_t * mibp,size_t * depthp)1463 ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
1464 const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
1465 size_t *depthp) {
1466 int ret;
1467 const char *elm, *tdot, *dot;
1468 size_t elen, i, j;
1469 const ctl_named_node_t *node;
1470
1471 elm = name;
1472 /* Equivalent to strchrnul(). */
1473 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1474 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1475 if (elen == 0) {
1476 ret = ENOENT;
1477 goto label_return;
1478 }
1479 node = starting_node;
1480 for (i = 0; i < *depthp; i++) {
1481 assert(node);
1482 assert(node->nchildren > 0);
1483 if (ctl_named_node(node->children) != NULL) {
1484 const ctl_named_node_t *pnode = node;
1485
1486 /* Children are named. */
1487 for (j = 0; j < node->nchildren; j++) {
1488 const ctl_named_node_t *child =
1489 ctl_named_children(node, j);
1490 if (strlen(child->name) == elen &&
1491 strncmp(elm, child->name, elen) == 0) {
1492 node = child;
1493 mibp[i] = j;
1494 break;
1495 }
1496 }
1497 if (node == pnode) {
1498 ret = ENOENT;
1499 goto label_return;
1500 }
1501 } else {
1502 uintmax_t index;
1503 const ctl_indexed_node_t *inode;
1504
1505 /* Children are indexed. */
1506 index = malloc_strtoumax(elm, NULL, 10);
1507 if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1508 ret = ENOENT;
1509 goto label_return;
1510 }
1511
1512 inode = ctl_indexed_node(node->children);
1513 node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1514 if (node == NULL) {
1515 ret = ENOENT;
1516 goto label_return;
1517 }
1518
1519 mibp[i] = (size_t)index;
1520 }
1521
1522 /* Reached the end? */
1523 if (node->ctl != NULL || *dot == '\0') {
1524 /* Terminal node. */
1525 if (*dot != '\0') {
1526 /*
1527 * The name contains more elements than are
1528 * in this path through the tree.
1529 */
1530 ret = ENOENT;
1531 goto label_return;
1532 }
1533 /* Complete lookup successful. */
1534 *depthp = i + 1;
1535 break;
1536 }
1537
1538 /* Update elm. */
1539 elm = &dot[1];
1540 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1541 strchr(elm, '\0');
1542 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1543 }
1544 if (ending_nodep != NULL) {
1545 *ending_nodep = node;
1546 }
1547
1548 ret = 0;
1549 label_return:
1550 return ret;
1551 }
1552
1553 int
ctl_byname(tsd_t * tsd,const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1554 ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1555 void *newp, size_t newlen) {
1556 int ret;
1557 size_t depth;
1558 size_t mib[CTL_MAX_DEPTH];
1559 const ctl_named_node_t *node;
1560
1561 if (!ctl_initialized && ctl_init(tsd)) {
1562 ret = EAGAIN;
1563 goto label_return;
1564 }
1565
1566 depth = CTL_MAX_DEPTH;
1567 ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
1568 &depth);
1569 if (ret != 0) {
1570 goto label_return;
1571 }
1572
1573 if (node != NULL && node->ctl) {
1574 ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1575 } else {
1576 /* The name refers to a partial path through the ctl tree. */
1577 ret = ENOENT;
1578 }
1579
1580 label_return:
1581 return(ret);
1582 }
1583
1584 int
ctl_nametomib(tsd_t * tsd,const char * name,size_t * mibp,size_t * miblenp)1585 ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1586 int ret;
1587
1588 if (!ctl_initialized && ctl_init(tsd)) {
1589 ret = EAGAIN;
1590 goto label_return;
1591 }
1592
1593 ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
1594 miblenp);
1595 label_return:
1596 return(ret);
1597 }
1598
1599 static int
ctl_lookupbymib(tsdn_t * tsdn,const ctl_named_node_t ** ending_nodep,const size_t * mib,size_t miblen)1600 ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
1601 const size_t *mib, size_t miblen) {
1602 int ret;
1603
1604 const ctl_named_node_t *node = super_root_node;
1605 for (size_t i = 0; i < miblen; i++) {
1606 assert(node);
1607 assert(node->nchildren > 0);
1608 if (ctl_named_node(node->children) != NULL) {
1609 /* Children are named. */
1610 if (node->nchildren <= mib[i]) {
1611 ret = ENOENT;
1612 goto label_return;
1613 }
1614 node = ctl_named_children(node, mib[i]);
1615 } else {
1616 const ctl_indexed_node_t *inode;
1617
1618 /* Indexed element. */
1619 inode = ctl_indexed_node(node->children);
1620 node = inode->index(tsdn, mib, miblen, mib[i]);
1621 if (node == NULL) {
1622 ret = ENOENT;
1623 goto label_return;
1624 }
1625 }
1626 }
1627 assert(ending_nodep != NULL);
1628 *ending_nodep = node;
1629 ret = 0;
1630
1631 label_return:
1632 return(ret);
1633 }
1634
1635 int
ctl_bymib(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1636 ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1637 size_t *oldlenp, void *newp, size_t newlen) {
1638 int ret;
1639 const ctl_named_node_t *node;
1640
1641 if (!ctl_initialized && ctl_init(tsd)) {
1642 ret = EAGAIN;
1643 goto label_return;
1644 }
1645
1646 ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
1647 if (ret != 0) {
1648 goto label_return;
1649 }
1650
1651 /* Call the ctl function. */
1652 if (node && node->ctl) {
1653 ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1654 } else {
1655 /* Partial MIB. */
1656 ret = ENOENT;
1657 }
1658
1659 label_return:
1660 return(ret);
1661 }
1662
1663 int
ctl_mibnametomib(tsd_t * tsd,size_t * mib,size_t miblen,const char * name,size_t * miblenp)1664 ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
1665 size_t *miblenp) {
1666 int ret;
1667 const ctl_named_node_t *node;
1668
1669 if (!ctl_initialized && ctl_init(tsd)) {
1670 ret = EAGAIN;
1671 goto label_return;
1672 }
1673
1674 ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
1675 if (ret != 0) {
1676 goto label_return;
1677 }
1678 if (node == NULL || node->ctl != NULL) {
1679 ret = ENOENT;
1680 goto label_return;
1681 }
1682
1683 assert(miblenp != NULL);
1684 assert(*miblenp >= miblen);
1685 *miblenp -= miblen;
1686 ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
1687 miblenp);
1688 *miblenp += miblen;
1689 label_return:
1690 return(ret);
1691 }
1692
1693 int
ctl_bymibname(tsd_t * tsd,size_t * mib,size_t miblen,const char * name,size_t * miblenp,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1694 ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
1695 size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1696 int ret;
1697 const ctl_named_node_t *node;
1698
1699 if (!ctl_initialized && ctl_init(tsd)) {
1700 ret = EAGAIN;
1701 goto label_return;
1702 }
1703
1704 ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
1705 if (ret != 0) {
1706 goto label_return;
1707 }
1708 if (node == NULL || node->ctl != NULL) {
1709 ret = ENOENT;
1710 goto label_return;
1711 }
1712
1713 assert(miblenp != NULL);
1714 assert(*miblenp >= miblen);
1715 *miblenp -= miblen;
1716 /*
1717 * The same node supplies the starting node and stores the ending node.
1718 */
1719 ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
1720 miblenp);
1721 *miblenp += miblen;
1722 if (ret != 0) {
1723 goto label_return;
1724 }
1725
1726 if (node != NULL && node->ctl) {
1727 ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
1728 newlen);
1729 } else {
1730 /* The name refers to a partial path through the ctl tree. */
1731 ret = ENOENT;
1732 }
1733
1734 label_return:
1735 return(ret);
1736 }
1737
1738 bool
ctl_boot(void)1739 ctl_boot(void) {
1740 if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1741 malloc_mutex_rank_exclusive)) {
1742 return true;
1743 }
1744
1745 ctl_initialized = false;
1746
1747 return false;
1748 }
1749
1750 void
ctl_prefork(tsdn_t * tsdn)1751 ctl_prefork(tsdn_t *tsdn) {
1752 malloc_mutex_prefork(tsdn, &ctl_mtx);
1753 }
1754
1755 void
ctl_postfork_parent(tsdn_t * tsdn)1756 ctl_postfork_parent(tsdn_t *tsdn) {
1757 malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1758 }
1759
1760 void
ctl_postfork_child(tsdn_t * tsdn)1761 ctl_postfork_child(tsdn_t *tsdn) {
1762 malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1763 }
1764
1765 void
ctl_mtx_assert_held(tsdn_t * tsdn)1766 ctl_mtx_assert_held(tsdn_t *tsdn) {
1767 malloc_mutex_assert_owner(tsdn, &ctl_mtx);
1768 }
1769
1770 /******************************************************************************/
1771 /* *_ctl() functions. */
1772
1773 #define READONLY() do { \
1774 if (newp != NULL || newlen != 0) { \
1775 ret = EPERM; \
1776 goto label_return; \
1777 } \
1778 } while (0)
1779
1780 #define WRITEONLY() do { \
1781 if (oldp != NULL || oldlenp != NULL) { \
1782 ret = EPERM; \
1783 goto label_return; \
1784 } \
1785 } while (0)
1786
1787 /* Can read or write, but not both. */
1788 #define READ_XOR_WRITE() do { \
1789 if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
1790 newlen != 0)) { \
1791 ret = EPERM; \
1792 goto label_return; \
1793 } \
1794 } while (0)
1795
1796 /* Can neither read nor write. */
1797 #define NEITHER_READ_NOR_WRITE() do { \
1798 if (oldp != NULL || oldlenp != NULL || newp != NULL || \
1799 newlen != 0) { \
1800 ret = EPERM; \
1801 goto label_return; \
1802 } \
1803 } while (0)
1804
1805 /* Verify that the space provided is enough. */
1806 #define VERIFY_READ(t) do { \
1807 if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
1808 *oldlenp = 0; \
1809 ret = EINVAL; \
1810 goto label_return; \
1811 } \
1812 } while (0)
1813
1814 #define READ(v, t) do { \
1815 if (oldp != NULL && oldlenp != NULL) { \
1816 if (*oldlenp != sizeof(t)) { \
1817 size_t copylen = (sizeof(t) <= *oldlenp) \
1818 ? sizeof(t) : *oldlenp; \
1819 memcpy(oldp, (void *)&(v), copylen); \
1820 *oldlenp = copylen; \
1821 ret = EINVAL; \
1822 goto label_return; \
1823 } \
1824 *(t *)oldp = (v); \
1825 } \
1826 } while (0)
1827
1828 #define WRITE(v, t) do { \
1829 if (newp != NULL) { \
1830 if (newlen != sizeof(t)) { \
1831 ret = EINVAL; \
1832 goto label_return; \
1833 } \
1834 (v) = *(t *)newp; \
1835 } \
1836 } while (0)
1837
1838 #define ASSURED_WRITE(v, t) do { \
1839 if (newp == NULL || newlen != sizeof(t)) { \
1840 ret = EINVAL; \
1841 goto label_return; \
1842 } \
1843 (v) = *(t *)newp; \
1844 } while (0)
1845
1846 #define MIB_UNSIGNED(v, i) do { \
1847 if (mib[i] > UINT_MAX) { \
1848 ret = EFAULT; \
1849 goto label_return; \
1850 } \
1851 v = (unsigned)mib[i]; \
1852 } while (0)
1853
1854 /*
1855 * There's a lot of code duplication in the following macros due to limitations
1856 * in how nested cpp macros are expanded.
1857 */
1858 #define CTL_RO_CLGEN(c, l, n, v, t) \
1859 static int \
1860 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1861 size_t *oldlenp, void *newp, size_t newlen) { \
1862 int ret; \
1863 t oldval; \
1864 \
1865 if (!(c)) { \
1866 return ENOENT; \
1867 } \
1868 if (l) { \
1869 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1870 } \
1871 READONLY(); \
1872 oldval = (v); \
1873 READ(oldval, t); \
1874 \
1875 ret = 0; \
1876 label_return: \
1877 if (l) { \
1878 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1879 } \
1880 return ret; \
1881 }
1882
1883 #define CTL_RO_CGEN(c, n, v, t) \
1884 static int \
1885 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1886 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1887 int ret; \
1888 t oldval; \
1889 \
1890 if (!(c)) { \
1891 return ENOENT; \
1892 } \
1893 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1894 READONLY(); \
1895 oldval = (v); \
1896 READ(oldval, t); \
1897 \
1898 ret = 0; \
1899 label_return: \
1900 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1901 return ret; \
1902 }
1903
1904 #define CTL_RO_GEN(n, v, t) \
1905 static int \
1906 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1907 size_t *oldlenp, void *newp, size_t newlen) { \
1908 int ret; \
1909 t oldval; \
1910 \
1911 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1912 READONLY(); \
1913 oldval = (v); \
1914 READ(oldval, t); \
1915 \
1916 ret = 0; \
1917 label_return: \
1918 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1919 return ret; \
1920 }
1921
1922 /*
1923 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1924 * mutate during the call.
1925 */
1926 #define CTL_RO_NL_CGEN(c, n, v, t) \
1927 static int \
1928 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1929 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1930 int ret; \
1931 t oldval; \
1932 \
1933 if (!(c)) { \
1934 return ENOENT; \
1935 } \
1936 READONLY(); \
1937 oldval = (v); \
1938 READ(oldval, t); \
1939 \
1940 ret = 0; \
1941 label_return: \
1942 return ret; \
1943 }
1944
1945 #define CTL_RO_NL_GEN(n, v, t) \
1946 static int \
1947 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1948 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1949 int ret; \
1950 t oldval; \
1951 \
1952 READONLY(); \
1953 oldval = (v); \
1954 READ(oldval, t); \
1955 \
1956 ret = 0; \
1957 label_return: \
1958 return ret; \
1959 }
1960
1961 #define CTL_RO_CONFIG_GEN(n, t) \
1962 static int \
1963 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1964 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1965 int ret; \
1966 t oldval; \
1967 \
1968 READONLY(); \
1969 oldval = n; \
1970 READ(oldval, t); \
1971 \
1972 ret = 0; \
1973 label_return: \
1974 return ret; \
1975 }
1976
1977 /******************************************************************************/
1978
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1979 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1980
1981 static int
1982 epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1983 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1984 int ret;
1985 UNUSED uint64_t newval;
1986
1987 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1988 WRITE(newval, uint64_t);
1989 if (newp != NULL) {
1990 ctl_refresh(tsd_tsdn(tsd));
1991 }
1992 READ(ctl_arenas->epoch, uint64_t);
1993
1994 ret = 0;
1995 label_return:
1996 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1997 return ret;
1998 }
1999
2000 static int
background_thread_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2001 background_thread_ctl(tsd_t *tsd, const size_t *mib,
2002 size_t miblen, void *oldp, size_t *oldlenp,
2003 void *newp, size_t newlen) {
2004 int ret;
2005 bool oldval;
2006
2007 if (!have_background_thread) {
2008 return ENOENT;
2009 }
2010 background_thread_ctl_init(tsd_tsdn(tsd));
2011
2012 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2013 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2014 if (newp == NULL) {
2015 oldval = background_thread_enabled();
2016 READ(oldval, bool);
2017 } else {
2018 if (newlen != sizeof(bool)) {
2019 ret = EINVAL;
2020 goto label_return;
2021 }
2022 oldval = background_thread_enabled();
2023 READ(oldval, bool);
2024
2025 bool newval = *(bool *)newp;
2026 if (newval == oldval) {
2027 ret = 0;
2028 goto label_return;
2029 }
2030
2031 background_thread_enabled_set(tsd_tsdn(tsd), newval);
2032 if (newval) {
2033 if (background_threads_enable(tsd)) {
2034 ret = EFAULT;
2035 goto label_return;
2036 }
2037 } else {
2038 if (background_threads_disable(tsd)) {
2039 ret = EFAULT;
2040 goto label_return;
2041 }
2042 }
2043 }
2044 ret = 0;
2045 label_return:
2046 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2047 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2048
2049 return ret;
2050 }
2051
2052 static int
max_background_threads_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2053 max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
2054 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2055 size_t newlen) {
2056 int ret;
2057 size_t oldval;
2058
2059 if (!have_background_thread) {
2060 return ENOENT;
2061 }
2062 background_thread_ctl_init(tsd_tsdn(tsd));
2063
2064 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2065 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2066 if (newp == NULL) {
2067 oldval = max_background_threads;
2068 READ(oldval, size_t);
2069 } else {
2070 if (newlen != sizeof(size_t)) {
2071 ret = EINVAL;
2072 goto label_return;
2073 }
2074 oldval = max_background_threads;
2075 READ(oldval, size_t);
2076
2077 size_t newval = *(size_t *)newp;
2078 if (newval == oldval) {
2079 ret = 0;
2080 goto label_return;
2081 }
2082 if (newval > opt_max_background_threads) {
2083 ret = EINVAL;
2084 goto label_return;
2085 }
2086
2087 if (background_thread_enabled()) {
2088 background_thread_enabled_set(tsd_tsdn(tsd), false);
2089 if (background_threads_disable(tsd)) {
2090 ret = EFAULT;
2091 goto label_return;
2092 }
2093 max_background_threads = newval;
2094 background_thread_enabled_set(tsd_tsdn(tsd), true);
2095 if (background_threads_enable(tsd)) {
2096 ret = EFAULT;
2097 goto label_return;
2098 }
2099 } else {
2100 max_background_threads = newval;
2101 }
2102 }
2103 ret = 0;
2104 label_return:
2105 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2106 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2107
2108 return ret;
2109 }
2110
2111 /******************************************************************************/
2112
CTL_RO_CONFIG_GEN(config_cache_oblivious,bool)2113 CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
2114 CTL_RO_CONFIG_GEN(config_debug, bool)
2115 CTL_RO_CONFIG_GEN(config_fill, bool)
2116 CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
2117 CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
2118 CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
2119 CTL_RO_CONFIG_GEN(config_prof, bool)
2120 CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
2121 CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
2122 CTL_RO_CONFIG_GEN(config_stats, bool)
2123 CTL_RO_CONFIG_GEN(config_utrace, bool)
2124 CTL_RO_CONFIG_GEN(config_xmalloc, bool)
2125
2126 /******************************************************************************/
2127
2128 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
2129 CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
2130 CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
2131 CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
2132 CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
2133
2134 /* HPA options. */
2135 CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
2136 CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
2137 opt_hpa_opts.hugification_threshold, size_t)
2138 CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
2139 CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
2140 uint64_t)
2141
2142 /*
2143 * This will have to change before we publicly document this option; fxp_t and
2144 * its representation are internal implementation details.
2145 */
2146 CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
2147 CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
2148
2149 /* HPA SEC options */
2150 CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
2151 CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
2152 CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
2153 CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
2154 size_t)
2155 CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
2156 size_t)
2157
2158 CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
2159 const char *)
2160 CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
2161 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
2162 CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
2163 CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
2164 const char *)
2165 CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
2166 CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
2167 CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
2168 CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
2169 CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
2170 CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
2171 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
2172 CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
2173 CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
2174 CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
2175 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
2176 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
2177 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
2178 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
2179 CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
2180 opt_experimental_infallible_new, bool)
2181 CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
2182 CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
2183 CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
2184 unsigned)
2185 CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
2186 unsigned)
2187 CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
2188 CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
2189 CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
2190 CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
2191 CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
2192 unsigned)
2193 CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
2194 unsigned)
2195 CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
2196 CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
2197 size_t)
2198 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
2199 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
2200 CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
2201 CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
2202 opt_prof_thread_active_init, bool)
2203 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
2204 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
2205 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
2206 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
2207 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
2208 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
2209 CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
2210 CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
2211 opt_prof_recent_alloc_max, ssize_t)
2212 CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
2213 CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
2214 bool)
2215 CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
2216 prof_time_res_mode_names[opt_prof_time_res], const char *)
2217 CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
2218 opt_lg_san_uaf_align, ssize_t)
2219 CTL_RO_NL_GEN(opt_zero_realloc,
2220 zero_realloc_mode_names[opt_zero_realloc_action], const char *)
2221
2222 /******************************************************************************/
2223
2224 static int
2225 thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2226 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2227 int ret;
2228 arena_t *oldarena;
2229 unsigned newind, oldind;
2230
2231 oldarena = arena_choose(tsd, NULL);
2232 if (oldarena == NULL) {
2233 return EAGAIN;
2234 }
2235 newind = oldind = arena_ind_get(oldarena);
2236 WRITE(newind, unsigned);
2237 READ(oldind, unsigned);
2238
2239 if (newind != oldind) {
2240 arena_t *newarena;
2241
2242 if (newind >= narenas_total_get()) {
2243 /* New arena index is out of range. */
2244 ret = EFAULT;
2245 goto label_return;
2246 }
2247
2248 if (have_percpu_arena &&
2249 PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
2250 if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
2251 /*
2252 * If perCPU arena is enabled, thread_arena
2253 * control is not allowed for the auto arena
2254 * range.
2255 */
2256 ret = EPERM;
2257 goto label_return;
2258 }
2259 }
2260
2261 /* Initialize arena if necessary. */
2262 newarena = arena_get(tsd_tsdn(tsd), newind, true);
2263 if (newarena == NULL) {
2264 ret = EAGAIN;
2265 goto label_return;
2266 }
2267 /* Set new arena/tcache associations. */
2268 arena_migrate(tsd, oldarena, newarena);
2269 if (tcache_available(tsd)) {
2270 tcache_arena_reassociate(tsd_tsdn(tsd),
2271 tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
2272 newarena);
2273 }
2274 }
2275
2276 ret = 0;
2277 label_return:
2278 return ret;
2279 }
2280
CTL_RO_NL_GEN(thread_allocated,tsd_thread_allocated_get (tsd),uint64_t)2281 CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
2282 CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
2283 CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
2284 CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
2285
2286 static int
2287 thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
2288 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2289 size_t newlen) {
2290 int ret;
2291 bool oldval;
2292
2293 oldval = tcache_enabled_get(tsd);
2294 if (newp != NULL) {
2295 if (newlen != sizeof(bool)) {
2296 ret = EINVAL;
2297 goto label_return;
2298 }
2299 tcache_enabled_set(tsd, *(bool *)newp);
2300 }
2301 READ(oldval, bool);
2302
2303 ret = 0;
2304 label_return:
2305 return ret;
2306 }
2307
2308 static int
thread_tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2309 thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
2310 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2311 size_t newlen) {
2312 int ret;
2313
2314 if (!tcache_available(tsd)) {
2315 ret = EFAULT;
2316 goto label_return;
2317 }
2318
2319 NEITHER_READ_NOR_WRITE();
2320
2321 tcache_flush(tsd);
2322
2323 ret = 0;
2324 label_return:
2325 return ret;
2326 }
2327
2328 static int
thread_peak_read_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2329 thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
2330 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2331 size_t newlen) {
2332 int ret;
2333 if (!config_stats) {
2334 return ENOENT;
2335 }
2336 READONLY();
2337 peak_event_update(tsd);
2338 uint64_t result = peak_event_max(tsd);
2339 READ(result, uint64_t);
2340 ret = 0;
2341 label_return:
2342 return ret;
2343 }
2344
2345 static int
thread_peak_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2346 thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
2347 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2348 size_t newlen) {
2349 int ret;
2350 if (!config_stats) {
2351 return ENOENT;
2352 }
2353 NEITHER_READ_NOR_WRITE();
2354 peak_event_zero(tsd);
2355 ret = 0;
2356 label_return:
2357 return ret;
2358 }
2359
2360 static int
thread_prof_name_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2361 thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
2362 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2363 size_t newlen) {
2364 int ret;
2365
2366 if (!config_prof || !opt_prof) {
2367 return ENOENT;
2368 }
2369
2370 READ_XOR_WRITE();
2371
2372 if (newp != NULL) {
2373 if (newlen != sizeof(const char *)) {
2374 ret = EINVAL;
2375 goto label_return;
2376 }
2377
2378 if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
2379 0) {
2380 goto label_return;
2381 }
2382 } else {
2383 const char *oldname = prof_thread_name_get(tsd);
2384 READ(oldname, const char *);
2385 }
2386
2387 ret = 0;
2388 label_return:
2389 return ret;
2390 }
2391
2392 static int
thread_prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2393 thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
2394 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2395 size_t newlen) {
2396 int ret;
2397 bool oldval;
2398
2399 if (!config_prof) {
2400 return ENOENT;
2401 }
2402
2403 oldval = opt_prof ? prof_thread_active_get(tsd) : false;
2404 if (newp != NULL) {
2405 if (!opt_prof) {
2406 ret = ENOENT;
2407 goto label_return;
2408 }
2409 if (newlen != sizeof(bool)) {
2410 ret = EINVAL;
2411 goto label_return;
2412 }
2413 if (prof_thread_active_set(tsd, *(bool *)newp)) {
2414 ret = EAGAIN;
2415 goto label_return;
2416 }
2417 }
2418 READ(oldval, bool);
2419
2420 ret = 0;
2421 label_return:
2422 return ret;
2423 }
2424
2425 static int
thread_idle_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2426 thread_idle_ctl(tsd_t *tsd, const size_t *mib,
2427 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2428 size_t newlen) {
2429 int ret;
2430
2431 NEITHER_READ_NOR_WRITE();
2432
2433 if (tcache_available(tsd)) {
2434 tcache_flush(tsd);
2435 }
2436 /*
2437 * This heuristic is perhaps not the most well-considered. But it
2438 * matches the only idling policy we have experience with in the status
2439 * quo. Over time we should investigate more principled approaches.
2440 */
2441 if (opt_narenas > ncpus * 2) {
2442 arena_t *arena = arena_choose(tsd, NULL);
2443 if (arena != NULL) {
2444 arena_decay(tsd_tsdn(tsd), arena, false, true);
2445 }
2446 /*
2447 * The missing arena case is not actually an error; a thread
2448 * might be idle before it associates itself to one. This is
2449 * unusual, but not wrong.
2450 */
2451 }
2452
2453 ret = 0;
2454 label_return:
2455 return ret;
2456 }
2457
2458 /******************************************************************************/
2459
2460 static int
tcache_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2461 tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2462 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2463 int ret;
2464 unsigned tcache_ind;
2465
2466 READONLY();
2467 VERIFY_READ(unsigned);
2468 if (tcaches_create(tsd, b0get(), &tcache_ind)) {
2469 ret = EFAULT;
2470 goto label_return;
2471 }
2472 READ(tcache_ind, unsigned);
2473
2474 ret = 0;
2475 label_return:
2476 return ret;
2477 }
2478
2479 static int
tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2480 tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2481 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2482 int ret;
2483 unsigned tcache_ind;
2484
2485 WRITEONLY();
2486 ASSURED_WRITE(tcache_ind, unsigned);
2487 tcaches_flush(tsd, tcache_ind);
2488
2489 ret = 0;
2490 label_return:
2491 return ret;
2492 }
2493
2494 static int
tcache_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2495 tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2496 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2497 int ret;
2498 unsigned tcache_ind;
2499
2500 WRITEONLY();
2501 ASSURED_WRITE(tcache_ind, unsigned);
2502 tcaches_destroy(tsd, tcache_ind);
2503
2504 ret = 0;
2505 label_return:
2506 return ret;
2507 }
2508
2509 /******************************************************************************/
2510
2511 static int
arena_i_initialized_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2512 arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2513 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2514 int ret;
2515 tsdn_t *tsdn = tsd_tsdn(tsd);
2516 unsigned arena_ind;
2517 bool initialized;
2518
2519 READONLY();
2520 MIB_UNSIGNED(arena_ind, 1);
2521
2522 malloc_mutex_lock(tsdn, &ctl_mtx);
2523 initialized = arenas_i(arena_ind)->initialized;
2524 malloc_mutex_unlock(tsdn, &ctl_mtx);
2525
2526 READ(initialized, bool);
2527
2528 ret = 0;
2529 label_return:
2530 return ret;
2531 }
2532
2533 static void
arena_i_decay(tsdn_t * tsdn,unsigned arena_ind,bool all)2534 arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
2535 malloc_mutex_lock(tsdn, &ctl_mtx);
2536 {
2537 unsigned narenas = ctl_arenas->narenas;
2538
2539 /*
2540 * Access via index narenas is deprecated, and scheduled for
2541 * removal in 6.0.0.
2542 */
2543 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
2544 unsigned i;
2545 VARIABLE_ARRAY(arena_t *, tarenas, narenas);
2546
2547 for (i = 0; i < narenas; i++) {
2548 tarenas[i] = arena_get(tsdn, i, false);
2549 }
2550
2551 /*
2552 * No further need to hold ctl_mtx, since narenas and
2553 * tarenas contain everything needed below.
2554 */
2555 malloc_mutex_unlock(tsdn, &ctl_mtx);
2556
2557 for (i = 0; i < narenas; i++) {
2558 if (tarenas[i] != NULL) {
2559 arena_decay(tsdn, tarenas[i], false,
2560 all);
2561 }
2562 }
2563 } else {
2564 arena_t *tarena;
2565
2566 assert(arena_ind < narenas);
2567
2568 tarena = arena_get(tsdn, arena_ind, false);
2569
2570 /* No further need to hold ctl_mtx. */
2571 malloc_mutex_unlock(tsdn, &ctl_mtx);
2572
2573 if (tarena != NULL) {
2574 arena_decay(tsdn, tarena, false, all);
2575 }
2576 }
2577 }
2578 }
2579
2580 static int
arena_i_decay_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2581 arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2582 size_t *oldlenp, void *newp, size_t newlen) {
2583 int ret;
2584 unsigned arena_ind;
2585
2586 NEITHER_READ_NOR_WRITE();
2587 MIB_UNSIGNED(arena_ind, 1);
2588 arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
2589
2590 ret = 0;
2591 label_return:
2592 return ret;
2593 }
2594
2595 static int
arena_i_purge_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2596 arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2597 size_t *oldlenp, void *newp, size_t newlen) {
2598 int ret;
2599 unsigned arena_ind;
2600
2601 NEITHER_READ_NOR_WRITE();
2602 MIB_UNSIGNED(arena_ind, 1);
2603 arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2604
2605 ret = 0;
2606 label_return:
2607 return ret;
2608 }
2609
2610 static int
arena_i_reset_destroy_helper(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,unsigned * arena_ind,arena_t ** arena)2611 arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2612 void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2613 arena_t **arena) {
2614 int ret;
2615
2616 NEITHER_READ_NOR_WRITE();
2617 MIB_UNSIGNED(*arena_ind, 1);
2618
2619 *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2620 if (*arena == NULL || arena_is_auto(*arena)) {
2621 ret = EFAULT;
2622 goto label_return;
2623 }
2624
2625 ret = 0;
2626 label_return:
2627 return ret;
2628 }
2629
2630 static void
arena_reset_prepare_background_thread(tsd_t * tsd,unsigned arena_ind)2631 arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2632 /* Temporarily disable the background thread during arena reset. */
2633 if (have_background_thread) {
2634 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2635 if (background_thread_enabled()) {
2636 background_thread_info_t *info =
2637 background_thread_info_get(arena_ind);
2638 assert(info->state == background_thread_started);
2639 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2640 info->state = background_thread_paused;
2641 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2642 }
2643 }
2644 }
2645
2646 static void
arena_reset_finish_background_thread(tsd_t * tsd,unsigned arena_ind)2647 arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2648 if (have_background_thread) {
2649 if (background_thread_enabled()) {
2650 background_thread_info_t *info =
2651 background_thread_info_get(arena_ind);
2652 assert(info->state == background_thread_paused);
2653 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2654 info->state = background_thread_started;
2655 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2656 }
2657 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2658 }
2659 }
2660
2661 static int
arena_i_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2662 arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2663 size_t *oldlenp, void *newp, size_t newlen) {
2664 int ret;
2665 unsigned arena_ind;
2666 arena_t *arena;
2667
2668 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2669 newp, newlen, &arena_ind, &arena);
2670 if (ret != 0) {
2671 return ret;
2672 }
2673
2674 arena_reset_prepare_background_thread(tsd, arena_ind);
2675 arena_reset(tsd, arena);
2676 arena_reset_finish_background_thread(tsd, arena_ind);
2677
2678 return ret;
2679 }
2680
2681 static int
arena_i_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2682 arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2683 size_t *oldlenp, void *newp, size_t newlen) {
2684 int ret;
2685 unsigned arena_ind;
2686 arena_t *arena;
2687 ctl_arena_t *ctl_darena, *ctl_arena;
2688
2689 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2690
2691 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2692 newp, newlen, &arena_ind, &arena);
2693 if (ret != 0) {
2694 goto label_return;
2695 }
2696
2697 if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2698 true) != 0) {
2699 ret = EFAULT;
2700 goto label_return;
2701 }
2702
2703 arena_reset_prepare_background_thread(tsd, arena_ind);
2704 /* Merge stats after resetting and purging arena. */
2705 arena_reset(tsd, arena);
2706 arena_decay(tsd_tsdn(tsd), arena, false, true);
2707 ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2708 ctl_darena->initialized = true;
2709 ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2710 /* Destroy arena. */
2711 arena_destroy(tsd, arena);
2712 ctl_arena = arenas_i(arena_ind);
2713 ctl_arena->initialized = false;
2714 /* Record arena index for later recycling via arenas.create. */
2715 ql_elm_new(ctl_arena, destroyed_link);
2716 ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2717 arena_reset_finish_background_thread(tsd, arena_ind);
2718
2719 assert(ret == 0);
2720 label_return:
2721 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2722
2723 return ret;
2724 }
2725
2726 static int
arena_i_dss_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2727 arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2728 size_t *oldlenp, void *newp, size_t newlen) {
2729 int ret;
2730 const char *dss = NULL;
2731 unsigned arena_ind;
2732 dss_prec_t dss_prec_old = dss_prec_limit;
2733 dss_prec_t dss_prec = dss_prec_limit;
2734
2735 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2736 WRITE(dss, const char *);
2737 MIB_UNSIGNED(arena_ind, 1);
2738 if (dss != NULL) {
2739 int i;
2740 bool match = false;
2741
2742 for (i = 0; i < dss_prec_limit; i++) {
2743 if (strcmp(dss_prec_names[i], dss) == 0) {
2744 dss_prec = i;
2745 match = true;
2746 break;
2747 }
2748 }
2749
2750 if (!match) {
2751 ret = EINVAL;
2752 goto label_return;
2753 }
2754 }
2755
2756 /*
2757 * Access via index narenas is deprecated, and scheduled for removal in
2758 * 6.0.0.
2759 */
2760 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2761 ctl_arenas->narenas) {
2762 if (dss_prec != dss_prec_limit &&
2763 extent_dss_prec_set(dss_prec)) {
2764 ret = EFAULT;
2765 goto label_return;
2766 }
2767 dss_prec_old = extent_dss_prec_get();
2768 } else {
2769 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2770 if (arena == NULL || (dss_prec != dss_prec_limit &&
2771 arena_dss_prec_set(arena, dss_prec))) {
2772 ret = EFAULT;
2773 goto label_return;
2774 }
2775 dss_prec_old = arena_dss_prec_get(arena);
2776 }
2777
2778 dss = dss_prec_names[dss_prec_old];
2779 READ(dss, const char *);
2780
2781 ret = 0;
2782 label_return:
2783 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2784 return ret;
2785 }
2786
2787 static int
arena_i_oversize_threshold_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2788 arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2789 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2790 int ret;
2791
2792 unsigned arena_ind;
2793 MIB_UNSIGNED(arena_ind, 1);
2794
2795 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2796 if (arena == NULL) {
2797 ret = EFAULT;
2798 goto label_return;
2799 }
2800
2801 if (oldp != NULL && oldlenp != NULL) {
2802 size_t oldval = atomic_load_zu(
2803 &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
2804 READ(oldval, size_t);
2805 }
2806 if (newp != NULL) {
2807 if (newlen != sizeof(size_t)) {
2808 ret = EINVAL;
2809 goto label_return;
2810 }
2811 atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
2812 *(size_t *)newp, ATOMIC_RELAXED);
2813 }
2814 ret = 0;
2815 label_return:
2816 return ret;
2817 }
2818
2819 static int
arena_i_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2820 arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2821 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2822 int ret;
2823 unsigned arena_ind;
2824 arena_t *arena;
2825
2826 MIB_UNSIGNED(arena_ind, 1);
2827 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2828 if (arena == NULL) {
2829 ret = EFAULT;
2830 goto label_return;
2831 }
2832 extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
2833
2834 if (oldp != NULL && oldlenp != NULL) {
2835 size_t oldval = arena_decay_ms_get(arena, state);
2836 READ(oldval, ssize_t);
2837 }
2838 if (newp != NULL) {
2839 if (newlen != sizeof(ssize_t)) {
2840 ret = EINVAL;
2841 goto label_return;
2842 }
2843 if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
2844 /*
2845 * By default the huge arena purges eagerly. If it is
2846 * set to non-zero decay time afterwards, background
2847 * thread might be needed.
2848 */
2849 if (background_thread_create(tsd, arena_ind)) {
2850 ret = EFAULT;
2851 goto label_return;
2852 }
2853 }
2854
2855 if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
2856 *(ssize_t *)newp)) {
2857 ret = EFAULT;
2858 goto label_return;
2859 }
2860 }
2861
2862 ret = 0;
2863 label_return:
2864 return ret;
2865 }
2866
2867 static int
arena_i_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2868 arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2869 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2870 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2871 newlen, true);
2872 }
2873
2874 static int
arena_i_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2875 arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2876 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2877 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2878 newlen, false);
2879 }
2880
2881 static int
arena_i_extent_hooks_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2882 arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2883 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2884 int ret;
2885 unsigned arena_ind;
2886 arena_t *arena;
2887
2888 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2889 MIB_UNSIGNED(arena_ind, 1);
2890 if (arena_ind < narenas_total_get()) {
2891 extent_hooks_t *old_extent_hooks;
2892 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2893 if (arena == NULL) {
2894 if (arena_ind >= narenas_auto) {
2895 ret = EFAULT;
2896 goto label_return;
2897 }
2898 old_extent_hooks =
2899 (extent_hooks_t *)&ehooks_default_extent_hooks;
2900 READ(old_extent_hooks, extent_hooks_t *);
2901 if (newp != NULL) {
2902 /* Initialize a new arena as a side effect. */
2903 extent_hooks_t *new_extent_hooks
2904 JEMALLOC_CC_SILENCE_INIT(NULL);
2905 WRITE(new_extent_hooks, extent_hooks_t *);
2906 arena_config_t config = arena_config_default;
2907 config.extent_hooks = new_extent_hooks;
2908
2909 arena = arena_init(tsd_tsdn(tsd), arena_ind,
2910 &config);
2911 if (arena == NULL) {
2912 ret = EFAULT;
2913 goto label_return;
2914 }
2915 }
2916 } else {
2917 if (newp != NULL) {
2918 extent_hooks_t *new_extent_hooks
2919 JEMALLOC_CC_SILENCE_INIT(NULL);
2920 WRITE(new_extent_hooks, extent_hooks_t *);
2921 old_extent_hooks = arena_set_extent_hooks(tsd,
2922 arena, new_extent_hooks);
2923 READ(old_extent_hooks, extent_hooks_t *);
2924 } else {
2925 old_extent_hooks =
2926 ehooks_get_extent_hooks_ptr(
2927 arena_get_ehooks(arena));
2928 READ(old_extent_hooks, extent_hooks_t *);
2929 }
2930 }
2931 } else {
2932 ret = EFAULT;
2933 goto label_return;
2934 }
2935 ret = 0;
2936 label_return:
2937 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2938 return ret;
2939 }
2940
2941 static int
arena_i_retain_grow_limit_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2942 arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
2943 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2944 size_t newlen) {
2945 int ret;
2946 unsigned arena_ind;
2947 arena_t *arena;
2948
2949 if (!opt_retain) {
2950 /* Only relevant when retain is enabled. */
2951 return ENOENT;
2952 }
2953
2954 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2955 MIB_UNSIGNED(arena_ind, 1);
2956 if (arena_ind < narenas_total_get() && (arena =
2957 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2958 size_t old_limit, new_limit;
2959 if (newp != NULL) {
2960 WRITE(new_limit, size_t);
2961 }
2962 bool err = arena_retain_grow_limit_get_set(tsd, arena,
2963 &old_limit, newp != NULL ? &new_limit : NULL);
2964 if (!err) {
2965 READ(old_limit, size_t);
2966 ret = 0;
2967 } else {
2968 ret = EFAULT;
2969 }
2970 } else {
2971 ret = EFAULT;
2972 }
2973 label_return:
2974 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2975 return ret;
2976 }
2977
2978 static const ctl_named_node_t *
arena_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2979 arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2980 size_t i) {
2981 const ctl_named_node_t *ret;
2982
2983 malloc_mutex_lock(tsdn, &ctl_mtx);
2984 switch (i) {
2985 case MALLCTL_ARENAS_ALL:
2986 case MALLCTL_ARENAS_DESTROYED:
2987 break;
2988 default:
2989 if (i > ctl_arenas->narenas) {
2990 ret = NULL;
2991 goto label_return;
2992 }
2993 break;
2994 }
2995
2996 ret = super_arena_i_node;
2997 label_return:
2998 malloc_mutex_unlock(tsdn, &ctl_mtx);
2999 return ret;
3000 }
3001
3002 /******************************************************************************/
3003
3004 static int
arenas_narenas_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3005 arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3006 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3007 int ret;
3008 unsigned narenas;
3009
3010 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3011 READONLY();
3012 narenas = ctl_arenas->narenas;
3013 READ(narenas, unsigned);
3014
3015 ret = 0;
3016 label_return:
3017 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3018 return ret;
3019 }
3020
3021 static int
arenas_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)3022 arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
3023 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
3024 size_t newlen, bool dirty) {
3025 int ret;
3026
3027 if (oldp != NULL && oldlenp != NULL) {
3028 size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
3029 arena_muzzy_decay_ms_default_get());
3030 READ(oldval, ssize_t);
3031 }
3032 if (newp != NULL) {
3033 if (newlen != sizeof(ssize_t)) {
3034 ret = EINVAL;
3035 goto label_return;
3036 }
3037 if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
3038 : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
3039 ret = EFAULT;
3040 goto label_return;
3041 }
3042 }
3043
3044 ret = 0;
3045 label_return:
3046 return ret;
3047 }
3048
3049 static int
arenas_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3050 arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3051 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3052 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
3053 newlen, true);
3054 }
3055
3056 static int
arenas_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3057 arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3058 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3059 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
3060 newlen, false);
3061 }
3062
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)3063 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
3064 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
3065 CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
3066 CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
3067 CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
3068 CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
3069 CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
3070 CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
3071 CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
3072 static const ctl_named_node_t *
3073 arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
3074 size_t miblen, size_t i) {
3075 if (i > SC_NBINS) {
3076 return NULL;
3077 }
3078 return super_arenas_bin_i_node;
3079 }
3080
3081 CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
3082 CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
3083 size_t)
3084 static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)3085 arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
3086 size_t miblen, size_t i) {
3087 if (i > SC_NSIZES - SC_NBINS) {
3088 return NULL;
3089 }
3090 return super_arenas_lextent_i_node;
3091 }
3092
3093 static int
arenas_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3094 arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3095 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3096 int ret;
3097 unsigned arena_ind;
3098
3099 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3100
3101 VERIFY_READ(unsigned);
3102 arena_config_t config = arena_config_default;
3103 WRITE(config.extent_hooks, extent_hooks_t *);
3104 if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
3105 ret = EAGAIN;
3106 goto label_return;
3107 }
3108 READ(arena_ind, unsigned);
3109
3110 ret = 0;
3111 label_return:
3112 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3113 return ret;
3114 }
3115
3116 static int
experimental_arenas_create_ext_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3117 experimental_arenas_create_ext_ctl(tsd_t *tsd,
3118 const size_t *mib, size_t miblen,
3119 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3120 int ret;
3121 unsigned arena_ind;
3122
3123 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3124
3125 arena_config_t config = arena_config_default;
3126 VERIFY_READ(unsigned);
3127 WRITE(config, arena_config_t);
3128
3129 if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
3130 ret = EAGAIN;
3131 goto label_return;
3132 }
3133 READ(arena_ind, unsigned);
3134 ret = 0;
3135 label_return:
3136 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3137 return ret;
3138 }
3139
3140 static int
arenas_lookup_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3141 arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
3142 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
3143 size_t newlen) {
3144 int ret;
3145 unsigned arena_ind;
3146 void *ptr;
3147 edata_t *edata;
3148 arena_t *arena;
3149
3150 ptr = NULL;
3151 ret = EINVAL;
3152 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3153 WRITE(ptr, void *);
3154 edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
3155 if (edata == NULL) {
3156 goto label_return;
3157 }
3158
3159 arena = arena_get_from_edata(edata);
3160 if (arena == NULL) {
3161 goto label_return;
3162 }
3163
3164 arena_ind = arena_ind_get(arena);
3165 READ(arena_ind, unsigned);
3166
3167 ret = 0;
3168 label_return:
3169 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3170 return ret;
3171 }
3172
3173 /******************************************************************************/
3174
3175 static int
prof_thread_active_init_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3176 prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
3177 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
3178 size_t newlen) {
3179 int ret;
3180 bool oldval;
3181
3182 if (!config_prof) {
3183 return ENOENT;
3184 }
3185
3186 if (newp != NULL) {
3187 if (!opt_prof) {
3188 ret = ENOENT;
3189 goto label_return;
3190 }
3191 if (newlen != sizeof(bool)) {
3192 ret = EINVAL;
3193 goto label_return;
3194 }
3195 oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
3196 *(bool *)newp);
3197 } else {
3198 oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
3199 false;
3200 }
3201 READ(oldval, bool);
3202
3203 ret = 0;
3204 label_return:
3205 return ret;
3206 }
3207
3208 static int
prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3209 prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3210 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3211 int ret;
3212 bool oldval;
3213
3214 if (!config_prof) {
3215 ret = ENOENT;
3216 goto label_return;
3217 }
3218
3219 if (newp != NULL) {
3220 if (newlen != sizeof(bool)) {
3221 ret = EINVAL;
3222 goto label_return;
3223 }
3224 bool val = *(bool *)newp;
3225 if (!opt_prof) {
3226 if (val) {
3227 ret = ENOENT;
3228 goto label_return;
3229 } else {
3230 /* No change needed (already off). */
3231 oldval = false;
3232 }
3233 } else {
3234 oldval = prof_active_set(tsd_tsdn(tsd), val);
3235 }
3236 } else {
3237 oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
3238 }
3239 READ(oldval, bool);
3240
3241 ret = 0;
3242 label_return:
3243 return ret;
3244 }
3245
3246 static int
prof_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3247 prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3248 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3249 int ret;
3250 const char *filename = NULL;
3251
3252 if (!config_prof || !opt_prof) {
3253 return ENOENT;
3254 }
3255
3256 WRITEONLY();
3257 WRITE(filename, const char *);
3258
3259 if (prof_mdump(tsd, filename)) {
3260 ret = EFAULT;
3261 goto label_return;
3262 }
3263
3264 ret = 0;
3265 label_return:
3266 return ret;
3267 }
3268
3269 static int
prof_gdump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3270 prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3271 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3272 int ret;
3273 bool oldval;
3274
3275 if (!config_prof) {
3276 return ENOENT;
3277 }
3278
3279 if (newp != NULL) {
3280 if (!opt_prof) {
3281 ret = ENOENT;
3282 goto label_return;
3283 }
3284 if (newlen != sizeof(bool)) {
3285 ret = EINVAL;
3286 goto label_return;
3287 }
3288 oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
3289 } else {
3290 oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
3291 }
3292 READ(oldval, bool);
3293
3294 ret = 0;
3295 label_return:
3296 return ret;
3297 }
3298
3299 static int
prof_prefix_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3300 prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3301 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3302 int ret;
3303 const char *prefix = NULL;
3304
3305 if (!config_prof || !opt_prof) {
3306 return ENOENT;
3307 }
3308
3309 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3310 WRITEONLY();
3311 WRITE(prefix, const char *);
3312
3313 ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
3314 label_return:
3315 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3316 return ret;
3317 }
3318
3319 static int
prof_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3320 prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3321 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3322 int ret;
3323 size_t lg_sample = lg_prof_sample;
3324
3325 if (!config_prof || !opt_prof) {
3326 return ENOENT;
3327 }
3328
3329 WRITEONLY();
3330 WRITE(lg_sample, size_t);
3331 if (lg_sample >= (sizeof(uint64_t) << 3)) {
3332 lg_sample = (sizeof(uint64_t) << 3) - 1;
3333 }
3334
3335 prof_reset(tsd, lg_sample);
3336
3337 ret = 0;
3338 label_return:
3339 return ret;
3340 }
3341
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)3342 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
3343 CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
3344
3345 static int
3346 prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
3347 size_t *oldlenp, void *newp, size_t newlen) {
3348 int ret;
3349
3350 const char *filename = NULL;
3351
3352 if (!config_prof || !opt_prof) {
3353 return ENOENT;
3354 }
3355
3356 WRITEONLY();
3357 WRITE(filename, const char *);
3358
3359 if (prof_log_start(tsd_tsdn(tsd), filename)) {
3360 ret = EFAULT;
3361 goto label_return;
3362 }
3363
3364 ret = 0;
3365 label_return:
3366 return ret;
3367 }
3368
3369 static int
prof_log_stop_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3370 prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
3371 size_t *oldlenp, void *newp, size_t newlen) {
3372 if (!config_prof || !opt_prof) {
3373 return ENOENT;
3374 }
3375
3376 if (prof_log_stop(tsd_tsdn(tsd))) {
3377 return EFAULT;
3378 }
3379
3380 return 0;
3381 }
3382
3383 static int
experimental_hooks_prof_backtrace_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3384 experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
3385 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3386 int ret;
3387
3388 if (oldp == NULL && newp == NULL) {
3389 ret = EINVAL;
3390 goto label_return;
3391 }
3392 if (oldp != NULL) {
3393 prof_backtrace_hook_t old_hook =
3394 prof_backtrace_hook_get();
3395 READ(old_hook, prof_backtrace_hook_t);
3396 }
3397 if (newp != NULL) {
3398 if (!opt_prof) {
3399 ret = ENOENT;
3400 goto label_return;
3401 }
3402 prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
3403 WRITE(new_hook, prof_backtrace_hook_t);
3404 if (new_hook == NULL) {
3405 ret = EINVAL;
3406 goto label_return;
3407 }
3408 prof_backtrace_hook_set(new_hook);
3409 }
3410 ret = 0;
3411 label_return:
3412 return ret;
3413 }
3414
3415 static int
experimental_hooks_prof_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3416 experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
3417 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3418 int ret;
3419
3420 if (oldp == NULL && newp == NULL) {
3421 ret = EINVAL;
3422 goto label_return;
3423 }
3424 if (oldp != NULL) {
3425 prof_dump_hook_t old_hook =
3426 prof_dump_hook_get();
3427 READ(old_hook, prof_dump_hook_t);
3428 }
3429 if (newp != NULL) {
3430 if (!opt_prof) {
3431 ret = ENOENT;
3432 goto label_return;
3433 }
3434 prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
3435 WRITE(new_hook, prof_dump_hook_t);
3436 prof_dump_hook_set(new_hook);
3437 }
3438 ret = 0;
3439 label_return:
3440 return ret;
3441 }
3442
3443 /* For integration test purpose only. No plan to move out of experimental. */
3444 static int
experimental_hooks_safety_check_abort_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3445 experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
3446 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3447 int ret;
3448
3449 WRITEONLY();
3450 if (newp != NULL) {
3451 if (newlen != sizeof(safety_check_abort_hook_t)) {
3452 ret = EINVAL;
3453 goto label_return;
3454 }
3455 safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
3456 WRITE(hook, safety_check_abort_hook_t);
3457 safety_check_set_abort(hook);
3458 }
3459 ret = 0;
3460 label_return:
3461 return ret;
3462 }
3463
3464 /******************************************************************************/
3465
3466 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
3467 CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
3468 CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
3469 CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
3470 CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
3471 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
3472 CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
3473
3474 CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
3475 ctl_stats->background_thread.num_threads, size_t)
3476 CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
3477 ctl_stats->background_thread.num_runs, uint64_t)
3478 CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
3479 nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
3480
CTL_RO_CGEN(config_stats,stats_zero_reallocs,atomic_load_zu (& zero_realloc_count,ATOMIC_RELAXED),size_t)3481 CTL_RO_CGEN(config_stats, stats_zero_reallocs,
3482 atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
3483
3484 CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
3485 CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
3486 ssize_t)
3487 CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
3488 ssize_t)
3489 CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
3490 CTL_RO_GEN(stats_arenas_i_uptime,
3491 nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
3492 CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
3493 CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
3494 CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
3495 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
3496 arenas_i(mib[2])->astats->astats.mapped, size_t)
3497 CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
3498 arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
3499 CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
3500 arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
3501
3502 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
3503 locked_read_u64_unsynchronized(
3504 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
3505 uint64_t)
3506 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
3507 locked_read_u64_unsynchronized(
3508 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
3509 uint64_t)
3510 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
3511 locked_read_u64_unsynchronized(
3512 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
3513 uint64_t)
3514
3515 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
3516 locked_read_u64_unsynchronized(
3517 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
3518 uint64_t)
3519 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
3520 locked_read_u64_unsynchronized(
3521 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
3522 uint64_t)
3523 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
3524 locked_read_u64_unsynchronized(
3525 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
3526 uint64_t)
3527
3528 CTL_RO_CGEN(config_stats, stats_arenas_i_base,
3529 arenas_i(mib[2])->astats->astats.base,
3530 size_t)
3531 CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
3532 atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
3533 size_t)
3534 CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
3535 arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
3536 CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
3537 arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
3538 CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
3539 arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
3540 CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
3541 arenas_i(mib[2])->astats->astats.resident,
3542 size_t)
3543 CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
3544 atomic_load_zu(
3545 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
3546 ATOMIC_RELAXED), size_t)
3547
3548 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
3549 arenas_i(mib[2])->astats->secstats.bytes, size_t)
3550
3551 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
3552 arenas_i(mib[2])->astats->allocated_small, size_t)
3553 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
3554 arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
3555 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
3556 arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
3557 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
3558 arenas_i(mib[2])->astats->nrequests_small, uint64_t)
3559 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
3560 arenas_i(mib[2])->astats->nfills_small, uint64_t)
3561 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
3562 arenas_i(mib[2])->astats->nflushes_small, uint64_t)
3563 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
3564 arenas_i(mib[2])->astats->astats.allocated_large, size_t)
3565 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
3566 arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
3567 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
3568 arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
3569 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
3570 arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
3571 /*
3572 * Note: "nmalloc_large" here instead of "nfills" in the read. This is
3573 * intentional (large has no batch fill).
3574 */
3575 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
3576 arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
3577 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
3578 arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
3579
3580 /* Lock profiling related APIs below. */
3581 #define RO_MUTEX_CTL_GEN(n, l) \
3582 CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
3583 l.n_lock_ops, uint64_t) \
3584 CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
3585 l.n_wait_times, uint64_t) \
3586 CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
3587 l.n_spin_acquired, uint64_t) \
3588 CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
3589 l.n_owner_switches, uint64_t) \
3590 CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
3591 nstime_ns(&l.tot_wait_time), uint64_t) \
3592 CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
3593 nstime_ns(&l.max_wait_time), uint64_t) \
3594 CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
3595 l.max_n_thds, uint32_t)
3596
3597 /* Global mutexes. */
3598 #define OP(mtx) \
3599 RO_MUTEX_CTL_GEN(mutexes_##mtx, \
3600 ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
3601 MUTEX_PROF_GLOBAL_MUTEXES
3602 #undef OP
3603
3604 /* Per arena mutexes */
3605 #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
3606 arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
3607 MUTEX_PROF_ARENA_MUTEXES
3608 #undef OP
3609
3610 /* tcache bin mutex */
3611 RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
3612 arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
3613 #undef RO_MUTEX_CTL_GEN
3614
3615 /* Resets all mutex stats, including global, arena and bin mutexes. */
3616 static int
3617 stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
3618 size_t miblen, void *oldp, size_t *oldlenp,
3619 void *newp, size_t newlen) {
3620 if (!config_stats) {
3621 return ENOENT;
3622 }
3623
3624 tsdn_t *tsdn = tsd_tsdn(tsd);
3625
3626 #define MUTEX_PROF_RESET(mtx) \
3627 malloc_mutex_lock(tsdn, &mtx); \
3628 malloc_mutex_prof_data_reset(tsdn, &mtx); \
3629 malloc_mutex_unlock(tsdn, &mtx);
3630
3631 /* Global mutexes: ctl and prof. */
3632 MUTEX_PROF_RESET(ctl_mtx);
3633 if (have_background_thread) {
3634 MUTEX_PROF_RESET(background_thread_lock);
3635 }
3636 if (config_prof && opt_prof) {
3637 MUTEX_PROF_RESET(bt2gctx_mtx);
3638 MUTEX_PROF_RESET(tdatas_mtx);
3639 MUTEX_PROF_RESET(prof_dump_mtx);
3640 MUTEX_PROF_RESET(prof_recent_alloc_mtx);
3641 MUTEX_PROF_RESET(prof_recent_dump_mtx);
3642 MUTEX_PROF_RESET(prof_stats_mtx);
3643 }
3644
3645 /* Per arena mutexes. */
3646 unsigned n = narenas_total_get();
3647
3648 for (unsigned i = 0; i < n; i++) {
3649 arena_t *arena = arena_get(tsdn, i, false);
3650 if (!arena) {
3651 continue;
3652 }
3653 MUTEX_PROF_RESET(arena->large_mtx);
3654 MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
3655 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
3656 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
3657 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
3658 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
3659 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
3660 MUTEX_PROF_RESET(arena->tcache_ql_mtx);
3661 MUTEX_PROF_RESET(arena->base->mtx);
3662
3663 for (szind_t j = 0; j < SC_NBINS; j++) {
3664 for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
3665 bin_t *bin = arena_get_bin(arena, j, k);
3666 MUTEX_PROF_RESET(bin->lock);
3667 }
3668 }
3669 }
3670 #undef MUTEX_PROF_RESET
3671 return 0;
3672 }
3673
3674 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
3675 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
3676 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
3677 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
3678 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
3679 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
3680 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
3681 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
3682 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
3683 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
3684 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
3685 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
3686 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
3687 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
3688 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
3689 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
3690 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
3691 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
3692 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
3693 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
3694
3695 static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3696 stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
3697 size_t miblen, size_t j) {
3698 if (j > SC_NBINS) {
3699 return NULL;
3700 }
3701 return super_stats_arenas_i_bins_j_node;
3702 }
3703
3704 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
3705 locked_read_u64_unsynchronized(
3706 &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
3707 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
3708 locked_read_u64_unsynchronized(
3709 &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
3710 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
3711 locked_read_u64_unsynchronized(
3712 &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
3713 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
3714 arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
3715
3716 static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3717 stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
3718 size_t miblen, size_t j) {
3719 if (j > SC_NSIZES - SC_NBINS) {
3720 return NULL;
3721 }
3722 return super_stats_arenas_i_lextents_j_node;
3723 }
3724
3725 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
3726 arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
3727 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
3728 arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
3729 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
3730 arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
3731 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
3732 arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
3733 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
3734 arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
3735 CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
3736 arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
3737
3738 static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3739 stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
3740 size_t miblen, size_t j) {
3741 if (j >= SC_NPSIZES) {
3742 return NULL;
3743 }
3744 return super_stats_arenas_i_extents_j_node;
3745 }
3746
3747 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
3748 arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
3749 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
3750 arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
3751 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
3752 arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
3753 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
3754 arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
3755
3756 /* Full, nonhuge */
3757 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
3758 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
3759 size_t);
3760 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
3761 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
3762 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
3763 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
3764
3765 /* Full, huge */
3766 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
3767 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
3768 size_t);
3769 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
3770 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
3771 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
3772 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
3773
3774 /* Empty, nonhuge */
3775 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
3776 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
3777 size_t);
3778 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
3779 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
3780 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
3781 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
3782
3783 /* Empty, huge */
3784 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
3785 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
3786 size_t);
3787 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
3788 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
3789 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
3790 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
3791
3792 /* Nonfull, nonhuge */
3793 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
3794 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
3795 size_t);
3796 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
3797 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
3798 size_t);
3799 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
3800 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
3801 size_t);
3802
3803 /* Nonfull, huge */
3804 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
3805 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
3806 size_t);
3807 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
3808 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
3809 size_t);
3810 CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
3811 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
3812 size_t);
3813
3814 static const ctl_named_node_t *
stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3815 stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
3816 size_t miblen, size_t j) {
3817 if (j >= PSSET_NPSIZES) {
3818 return NULL;
3819 }
3820 return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
3821 }
3822
3823 static bool
ctl_arenas_i_verify(size_t i)3824 ctl_arenas_i_verify(size_t i) {
3825 size_t a = arenas_i2a_impl(i, true, true);
3826 if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
3827 return true;
3828 }
3829
3830 return false;
3831 }
3832
3833 static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)3834 stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
3835 size_t miblen, size_t i) {
3836 const ctl_named_node_t *ret;
3837
3838 malloc_mutex_lock(tsdn, &ctl_mtx);
3839 if (ctl_arenas_i_verify(i)) {
3840 ret = NULL;
3841 goto label_return;
3842 }
3843
3844 ret = super_stats_arenas_i_node;
3845 label_return:
3846 malloc_mutex_unlock(tsdn, &ctl_mtx);
3847 return ret;
3848 }
3849
3850 static int
experimental_hooks_install_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3851 experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3852 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3853 int ret;
3854 if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
3855 ret = EINVAL;
3856 goto label_return;
3857 }
3858 /*
3859 * Note: this is a *private* struct. This is an experimental interface;
3860 * forcing the user to know the jemalloc internals well enough to
3861 * extract the ABI hopefully ensures nobody gets too comfortable with
3862 * this API, which can change at a moment's notice.
3863 */
3864 hooks_t hooks;
3865 WRITE(hooks, hooks_t);
3866 void *handle = hook_install(tsd_tsdn(tsd), &hooks);
3867 if (handle == NULL) {
3868 ret = EAGAIN;
3869 goto label_return;
3870 }
3871 READ(handle, void *);
3872
3873 ret = 0;
3874 label_return:
3875 return ret;
3876 }
3877
3878 static int
experimental_hooks_remove_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3879 experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3880 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3881 int ret;
3882 WRITEONLY();
3883 void *handle = NULL;
3884 WRITE(handle, void *);
3885 if (handle == NULL) {
3886 ret = EINVAL;
3887 goto label_return;
3888 }
3889 hook_remove(tsd_tsdn(tsd), handle);
3890 ret = 0;
3891 label_return:
3892 return ret;
3893 }
3894
3895 static int
experimental_thread_activity_callback_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3896 experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
3897 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3898 int ret;
3899
3900 if (!config_stats) {
3901 return ENOENT;
3902 }
3903
3904 activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
3905 READ(t_old, activity_callback_thunk_t);
3906
3907 if (newp != NULL) {
3908 /*
3909 * This initialization is unnecessary. If it's omitted, though,
3910 * clang gets confused and warns on the subsequent use of t_new.
3911 */
3912 activity_callback_thunk_t t_new = {NULL, NULL};
3913 WRITE(t_new, activity_callback_thunk_t);
3914 tsd_activity_callback_thunk_set(tsd, t_new);
3915 }
3916 ret = 0;
3917 label_return:
3918 return ret;
3919 }
3920
3921 /*
3922 * Output six memory utilization entries for an input pointer, the first one of
3923 * type (void *) and the remaining five of type size_t, describing the following
3924 * (in the same order):
3925 *
3926 * (a) memory address of the extent a potential reallocation would go into,
3927 * == the five fields below describe about the extent the pointer resides in ==
3928 * (b) number of free regions in the extent,
3929 * (c) number of regions in the extent,
3930 * (d) size of the extent in terms of bytes,
3931 * (e) total number of free regions in the bin the extent belongs to, and
3932 * (f) total number of regions in the bin the extent belongs to.
3933 *
3934 * Note that "(e)" and "(f)" are only available when stats are enabled;
3935 * otherwise their values are undefined.
3936 *
3937 * This API is mainly intended for small class allocations, where extents are
3938 * used as slab. Note that if the bin the extent belongs to is completely
3939 * full, "(a)" will be NULL.
3940 *
3941 * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
3942 * will be zero (if stats are enabled; otherwise undefined). The other three
3943 * fields will be properly set though the values are trivial: "(b)" will be 0,
3944 * "(c)" will be 1, and "(d)" will be the usable size.
3945 *
3946 * The input pointer and size are respectively passed in by newp and newlen,
3947 * and the output fields and size are respectively oldp and *oldlenp.
3948 *
3949 * It can be beneficial to define the following macros to make it easier to
3950 * access the output:
3951 *
3952 * #define SLABCUR_READ(out) (*(void **)out)
3953 * #define COUNTS(out) ((size_t *)((void **)out + 1))
3954 * #define NFREE_READ(out) COUNTS(out)[0]
3955 * #define NREGS_READ(out) COUNTS(out)[1]
3956 * #define SIZE_READ(out) COUNTS(out)[2]
3957 * #define BIN_NFREE_READ(out) COUNTS(out)[3]
3958 * #define BIN_NREGS_READ(out) COUNTS(out)[4]
3959 *
3960 * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test
3961 * test_query in test/unit/extent_util.c for an example.
3962 *
3963 * For a typical defragmentation workflow making use of this API for
3964 * understanding the fragmentation level, please refer to the comment for
3965 * experimental_utilization_batch_query_ctl.
3966 *
3967 * It's up to the application how to determine the significance of
3968 * fragmentation relying on the outputs returned. Possible choices are:
3969 *
3970 * (a) if extent utilization ratio is below certain threshold,
3971 * (b) if extent memory consumption is above certain threshold,
3972 * (c) if extent utilization ratio is significantly below bin utilization ratio,
3973 * (d) if input pointer deviates a lot from potential reallocation address, or
3974 * (e) some selection/combination of the above.
3975 *
3976 * The caller needs to make sure that the input/output arguments are valid,
3977 * in particular, that the size of the output is correct, i.e.:
3978 *
3979 * *oldlenp = sizeof(void *) + sizeof(size_t) * 5
3980 *
3981 * Otherwise, the function immediately returns EINVAL without touching anything.
3982 *
3983 * In the rare case where there's no associated extent found for the input
3984 * pointer, the function zeros out all output fields and return. Please refer
3985 * to the comment for experimental_utilization_batch_query_ctl to understand the
3986 * motivation from C++.
3987 */
3988 static int
experimental_utilization_query_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3989 experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
3990 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3991 int ret;
3992
3993 assert(sizeof(inspect_extent_util_stats_verbose_t)
3994 == sizeof(void *) + sizeof(size_t) * 5);
3995
3996 if (oldp == NULL || oldlenp == NULL
3997 || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
3998 || newp == NULL) {
3999 ret = EINVAL;
4000 goto label_return;
4001 }
4002
4003 void *ptr = NULL;
4004 WRITE(ptr, void *);
4005 inspect_extent_util_stats_verbose_t *util_stats
4006 = (inspect_extent_util_stats_verbose_t *)oldp;
4007 inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
4008 &util_stats->nfree, &util_stats->nregs, &util_stats->size,
4009 &util_stats->bin_nfree, &util_stats->bin_nregs,
4010 &util_stats->slabcur_addr);
4011 ret = 0;
4012
4013 label_return:
4014 return ret;
4015 }
4016
4017 /*
4018 * Given an input array of pointers, output three memory utilization entries of
4019 * type size_t for each input pointer about the extent it resides in:
4020 *
4021 * (a) number of free regions in the extent,
4022 * (b) number of regions in the extent, and
4023 * (c) size of the extent in terms of bytes.
4024 *
4025 * This API is mainly intended for small class allocations, where extents are
4026 * used as slab. In case of large class allocations, the outputs are trivial:
4027 * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
4028 *
4029 * Note that multiple input pointers may reside on a same extent so the output
4030 * fields may contain duplicates.
4031 *
4032 * The format of the input/output looks like:
4033 *
4034 * input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions
4035 * | output[1]: 1st_extent_n_regions
4036 * | output[2]: 1st_extent_size
4037 * input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions
4038 * | output[4]: 2nd_extent_n_regions
4039 * | output[5]: 2nd_extent_size
4040 * ... | ...
4041 *
4042 * The input array and size are respectively passed in by newp and newlen, and
4043 * the output array and size are respectively oldp and *oldlenp.
4044 *
4045 * It can be beneficial to define the following macros to make it easier to
4046 * access the output:
4047 *
4048 * #define NFREE_READ(out, i) out[(i) * 3]
4049 * #define NREGS_READ(out, i) out[(i) * 3 + 1]
4050 * #define SIZE_READ(out, i) out[(i) * 3 + 2]
4051 *
4052 * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit
4053 * test test_batch in test/unit/extent_util.c for a concrete example.
4054 *
4055 * A typical workflow would be composed of the following steps:
4056 *
4057 * (1) flush tcache: mallctl("thread.tcache.flush", ...)
4058 * (2) initialize input array of pointers to query fragmentation
4059 * (3) allocate output array to hold utilization statistics
4060 * (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
4061 * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
4062 * (6) disable tcache: mallctl("thread.tcache.enabled", ...)
4063 * (7) defragment allocations with significant fragmentation, e.g.:
4064 * for each allocation {
4065 * if it's fragmented {
4066 * malloc(...);
4067 * memcpy(...);
4068 * free(...);
4069 * }
4070 * }
4071 * (8) enable tcache: mallctl("thread.tcache.enabled", ...)
4072 *
4073 * The application can determine the significance of fragmentation themselves
4074 * relying on the statistics returned, both at the overall level i.e. step "(5)"
4075 * and at individual allocation level i.e. within step "(7)". Possible choices
4076 * are:
4077 *
4078 * (a) whether memory utilization ratio is below certain threshold,
4079 * (b) whether memory consumption is above certain threshold, or
4080 * (c) some combination of the two.
4081 *
4082 * The caller needs to make sure that the input/output arrays are valid and
4083 * their sizes are proper as well as matched, meaning:
4084 *
4085 * (a) newlen = n_pointers * sizeof(const void *)
4086 * (b) *oldlenp = n_pointers * sizeof(size_t) * 3
4087 * (c) n_pointers > 0
4088 *
4089 * Otherwise, the function immediately returns EINVAL without touching anything.
4090 *
4091 * In the rare case where there's no associated extent found for some pointers,
4092 * rather than immediately terminating the computation and raising an error,
4093 * the function simply zeros out the corresponding output fields and continues
4094 * the computation until all input pointers are handled. The motivations of
4095 * such a design are as follows:
4096 *
4097 * (a) The function always either processes nothing or processes everything, and
4098 * never leaves the output half touched and half untouched.
4099 *
4100 * (b) It facilitates usage needs especially common in C++. A vast variety of
4101 * C++ objects are instantiated with multiple dynamic memory allocations. For
4102 * example, std::string and std::vector typically use at least two allocations,
4103 * one for the metadata and one for the actual content. Other types may use
4104 * even more allocations. When inquiring about utilization statistics, the
4105 * caller often wants to examine into all such allocations, especially internal
4106 * one(s), rather than just the topmost one. The issue comes when some
4107 * implementations do certain optimizations to reduce/aggregate some internal
4108 * allocations, e.g. putting short strings directly into the metadata, and such
4109 * decisions are not known to the caller. Therefore, we permit pointers to
4110 * memory usages that may not be returned by previous malloc calls, and we
4111 * provide the caller a convenient way to identify such cases.
4112 */
4113 static int
experimental_utilization_batch_query_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4114 experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
4115 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4116 int ret;
4117
4118 assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
4119
4120 const size_t len = newlen / sizeof(const void *);
4121 if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
4122 || newlen != len * sizeof(const void *)
4123 || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
4124 ret = EINVAL;
4125 goto label_return;
4126 }
4127
4128 void **ptrs = (void **)newp;
4129 inspect_extent_util_stats_t *util_stats =
4130 (inspect_extent_util_stats_t *)oldp;
4131 size_t i;
4132 for (i = 0; i < len; ++i) {
4133 inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
4134 &util_stats[i].nfree, &util_stats[i].nregs,
4135 &util_stats[i].size);
4136 }
4137 ret = 0;
4138
4139 label_return:
4140 return ret;
4141 }
4142
4143 static const ctl_named_node_t *
experimental_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)4144 experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
4145 size_t miblen, size_t i) {
4146 const ctl_named_node_t *ret;
4147
4148 malloc_mutex_lock(tsdn, &ctl_mtx);
4149 if (ctl_arenas_i_verify(i)) {
4150 ret = NULL;
4151 goto label_return;
4152 }
4153 ret = super_experimental_arenas_i_node;
4154 label_return:
4155 malloc_mutex_unlock(tsdn, &ctl_mtx);
4156 return ret;
4157 }
4158
4159 static int
experimental_arenas_i_pactivep_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4160 experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
4161 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4162 if (!config_stats) {
4163 return ENOENT;
4164 }
4165 if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
4166 return EINVAL;
4167 }
4168
4169 unsigned arena_ind;
4170 arena_t *arena;
4171 int ret;
4172 size_t *pactivep;
4173
4174 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
4175 READONLY();
4176 MIB_UNSIGNED(arena_ind, 2);
4177 if (arena_ind < narenas_total_get() && (arena =
4178 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
4179 #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
4180 defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
4181 /* Expose the underlying counter for fast read. */
4182 pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
4183 READ(pactivep, size_t *);
4184 ret = 0;
4185 #else
4186 ret = EFAULT;
4187 #endif
4188 } else {
4189 ret = EFAULT;
4190 }
4191 label_return:
4192 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
4193 return ret;
4194 }
4195
4196 static int
experimental_prof_recent_alloc_max_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4197 experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
4198 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4199 int ret;
4200
4201 if (!(config_prof && opt_prof)) {
4202 ret = ENOENT;
4203 goto label_return;
4204 }
4205
4206 ssize_t old_max;
4207 if (newp != NULL) {
4208 if (newlen != sizeof(ssize_t)) {
4209 ret = EINVAL;
4210 goto label_return;
4211 }
4212 ssize_t max = *(ssize_t *)newp;
4213 if (max < -1) {
4214 ret = EINVAL;
4215 goto label_return;
4216 }
4217 old_max = prof_recent_alloc_max_ctl_write(tsd, max);
4218 } else {
4219 old_max = prof_recent_alloc_max_ctl_read();
4220 }
4221 READ(old_max, ssize_t);
4222
4223 ret = 0;
4224
4225 label_return:
4226 return ret;
4227 }
4228
4229 typedef struct write_cb_packet_s write_cb_packet_t;
4230 struct write_cb_packet_s {
4231 write_cb_t *write_cb;
4232 void *cbopaque;
4233 };
4234
4235 static int
experimental_prof_recent_alloc_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4236 experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
4237 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4238 int ret;
4239
4240 if (!(config_prof && opt_prof)) {
4241 ret = ENOENT;
4242 goto label_return;
4243 }
4244
4245 assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
4246
4247 WRITEONLY();
4248 write_cb_packet_t write_cb_packet;
4249 ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
4250
4251 prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
4252 write_cb_packet.cbopaque);
4253
4254 ret = 0;
4255
4256 label_return:
4257 return ret;
4258 }
4259
4260 typedef struct batch_alloc_packet_s batch_alloc_packet_t;
4261 struct batch_alloc_packet_s {
4262 void **ptrs;
4263 size_t num;
4264 size_t size;
4265 int flags;
4266 };
4267
4268 static int
experimental_batch_alloc_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4269 experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
4270 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4271 int ret;
4272
4273 VERIFY_READ(size_t);
4274
4275 batch_alloc_packet_t batch_alloc_packet;
4276 ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
4277 size_t filled = batch_alloc(batch_alloc_packet.ptrs,
4278 batch_alloc_packet.num, batch_alloc_packet.size,
4279 batch_alloc_packet.flags);
4280 READ(filled, size_t);
4281
4282 ret = 0;
4283
4284 label_return:
4285 return ret;
4286 }
4287
4288 static int
prof_stats_bins_i_live_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4289 prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4290 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4291 int ret;
4292 unsigned binind;
4293 prof_stats_t stats;
4294
4295 if (!(config_prof && opt_prof && opt_prof_stats)) {
4296 ret = ENOENT;
4297 goto label_return;
4298 }
4299
4300 READONLY();
4301 MIB_UNSIGNED(binind, 3);
4302 if (binind >= SC_NBINS) {
4303 ret = EINVAL;
4304 goto label_return;
4305 }
4306 prof_stats_get_live(tsd, (szind_t)binind, &stats);
4307 READ(stats, prof_stats_t);
4308
4309 ret = 0;
4310 label_return:
4311 return ret;
4312 }
4313
4314 static int
prof_stats_bins_i_accum_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4315 prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4316 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4317 int ret;
4318 unsigned binind;
4319 prof_stats_t stats;
4320
4321 if (!(config_prof && opt_prof && opt_prof_stats)) {
4322 ret = ENOENT;
4323 goto label_return;
4324 }
4325
4326 READONLY();
4327 MIB_UNSIGNED(binind, 3);
4328 if (binind >= SC_NBINS) {
4329 ret = EINVAL;
4330 goto label_return;
4331 }
4332 prof_stats_get_accum(tsd, (szind_t)binind, &stats);
4333 READ(stats, prof_stats_t);
4334
4335 ret = 0;
4336 label_return:
4337 return ret;
4338 }
4339
4340 static const ctl_named_node_t *
prof_stats_bins_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)4341 prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
4342 size_t i) {
4343 if (!(config_prof && opt_prof && opt_prof_stats)) {
4344 return NULL;
4345 }
4346 if (i >= SC_NBINS) {
4347 return NULL;
4348 }
4349 return super_prof_stats_bins_i_node;
4350 }
4351
4352 static int
prof_stats_lextents_i_live_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4353 prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4354 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4355 int ret;
4356 unsigned lextent_ind;
4357 prof_stats_t stats;
4358
4359 if (!(config_prof && opt_prof && opt_prof_stats)) {
4360 ret = ENOENT;
4361 goto label_return;
4362 }
4363
4364 READONLY();
4365 MIB_UNSIGNED(lextent_ind, 3);
4366 if (lextent_ind >= SC_NSIZES - SC_NBINS) {
4367 ret = EINVAL;
4368 goto label_return;
4369 }
4370 prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
4371 READ(stats, prof_stats_t);
4372
4373 ret = 0;
4374 label_return:
4375 return ret;
4376 }
4377
4378 static int
prof_stats_lextents_i_accum_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)4379 prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4380 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4381 int ret;
4382 unsigned lextent_ind;
4383 prof_stats_t stats;
4384
4385 if (!(config_prof && opt_prof && opt_prof_stats)) {
4386 ret = ENOENT;
4387 goto label_return;
4388 }
4389
4390 READONLY();
4391 MIB_UNSIGNED(lextent_ind, 3);
4392 if (lextent_ind >= SC_NSIZES - SC_NBINS) {
4393 ret = EINVAL;
4394 goto label_return;
4395 }
4396 prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
4397 READ(stats, prof_stats_t);
4398
4399 ret = 0;
4400 label_return:
4401 return ret;
4402 }
4403
4404 static const ctl_named_node_t *
prof_stats_lextents_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)4405 prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
4406 size_t i) {
4407 if (!(config_prof && opt_prof && opt_prof_stats)) {
4408 return NULL;
4409 }
4410 if (i >= SC_NSIZES - SC_NBINS) {
4411 return NULL;
4412 }
4413 return super_prof_stats_lextents_i_node;
4414 }
4415