1 #ifndef JEMALLOC_INTERNAL_INLINES_A_H
2 #define JEMALLOC_INTERNAL_INLINES_A_H
3
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/bit_util.h"
6 #include "jemalloc/internal/jemalloc_internal_types.h"
7 #include "jemalloc/internal/sc.h"
8 #include "jemalloc/internal/ticker.h"
9
10 JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
malloc_getcpu(void)11 malloc_getcpu(void) {
12 assert(have_percpu_arena);
13 #if defined(_WIN32)
14 return GetCurrentProcessorNumber();
15 #elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
16 return (malloc_cpuid_t)sched_getcpu();
17 #else
18 not_reached();
19 return -1;
20 #endif
21 }
22
23 /* Return the chosen arena index based on current cpu. */
24 JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_choose(void)25 percpu_arena_choose(void) {
26 assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
27
28 malloc_cpuid_t cpuid = malloc_getcpu();
29 assert(cpuid >= 0);
30
31 unsigned arena_ind;
32 if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
33 2)) {
34 arena_ind = cpuid;
35 } else {
36 assert(opt_percpu_arena == per_phycpu_arena);
37 /* Hyper threads on the same physical CPU share arena. */
38 arena_ind = cpuid - ncpus / 2;
39 }
40
41 return arena_ind;
42 }
43
44 /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
45 JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_ind_limit(percpu_arena_mode_t mode)46 percpu_arena_ind_limit(percpu_arena_mode_t mode) {
47 assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
48 if (mode == per_phycpu_arena && ncpus > 1) {
49 if (ncpus % 2) {
50 /* This likely means a misconfig. */
51 return ncpus / 2 + 1;
52 }
53 return ncpus / 2;
54 } else {
55 return ncpus;
56 }
57 }
58
59 static inline arena_t *
arena_get(tsdn_t * tsdn,unsigned ind,bool init_if_missing)60 arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
61 arena_t *ret;
62
63 assert(ind < MALLOCX_ARENA_LIMIT);
64
65 ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
66 if (unlikely(ret == NULL)) {
67 if (init_if_missing) {
68 ret = arena_init(tsdn, ind, &arena_config_default);
69 }
70 }
71 return ret;
72 }
73
74 JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t * tsd)75 tcache_available(tsd_t *tsd) {
76 /*
77 * Thread specific auto tcache might be unavailable if: 1) during tcache
78 * initialization, or 2) disabled through thread.tcache.enabled mallctl
79 * or config options. This check covers all cases.
80 */
81 if (likely(tsd_tcache_enabled_get(tsd))) {
82 /* Associated arena == NULL implies tcache init in progress. */
83 if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
84 tcache_assert_initialized(tsd_tcachep_get(tsd));
85 }
86 return true;
87 }
88
89 return false;
90 }
91
92 JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t * tsd)93 tcache_get(tsd_t *tsd) {
94 if (!tcache_available(tsd)) {
95 return NULL;
96 }
97
98 return tsd_tcachep_get(tsd);
99 }
100
101 JEMALLOC_ALWAYS_INLINE tcache_slow_t *
tcache_slow_get(tsd_t * tsd)102 tcache_slow_get(tsd_t *tsd) {
103 if (!tcache_available(tsd)) {
104 return NULL;
105 }
106
107 return tsd_tcache_slowp_get(tsd);
108 }
109
110 static inline void
pre_reentrancy(tsd_t * tsd,arena_t * arena)111 pre_reentrancy(tsd_t *tsd, arena_t *arena) {
112 /* arena is the current context. Reentry from a0 is not allowed. */
113 assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
114 tsd_pre_reentrancy_raw(tsd);
115 }
116
117 static inline void
post_reentrancy(tsd_t * tsd)118 post_reentrancy(tsd_t *tsd) {
119 tsd_post_reentrancy_raw(tsd);
120 }
121
122 #endif /* JEMALLOC_INTERNAL_INLINES_A_H */
123