1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
5 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/auxv.h>
32 #include <sys/mman.h>
33 #include <sys/queue.h>
34 #include <sys/resource.h>
35 #include <sys/sysctl.h>
36 #include <stdlib.h>
37 #include <pthread.h>
38 #include <link.h>
39
40 #include "thr_private.h"
41
42 /* Spare thread stack. */
43 struct stack {
44 LIST_ENTRY(stack) qe; /* Stack queue linkage. */
45 size_t stacksize; /* Stack size (rounded up). */
46 size_t guardsize; /* Guard size. */
47 void *stackaddr; /* Stack address. */
48 };
49
50 /*
51 * Default sized (stack and guard) spare stack queue. Stacks are cached
52 * to avoid additional complexity managing mmap()ed stack regions. Spare
53 * stacks are used in LIFO order to increase cache locality.
54 */
55 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
56
57 /*
58 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
59 * Stacks are cached to avoid additional complexity managing mmap()ed
60 * stack regions. This list is unordered, since ordering on both stack
61 * size and guard size would be more trouble than it's worth. Stacks are
62 * allocated from this cache on a first size match basis.
63 */
64 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
65
66 /**
67 * Base address of the last stack allocated (including its red zone, if
68 * there is one). Stacks are allocated contiguously, starting beyond the
69 * top of the main stack. When a new stack is created, a red zone is
70 * typically created (actually, the red zone is mapped with PROT_NONE) above
71 * the top of the stack, such that the stack will not be able to grow all
72 * the way to the bottom of the next stack. This isn't fool-proof. It is
73 * possible for a stack to grow by a large amount, such that it grows into
74 * the next stack, and as long as the memory within the red zone is never
75 * accessed, nothing will prevent one thread stack from trouncing all over
76 * the next.
77 *
78 * low memory
79 * . . . . . . . . . . . . . . . . . .
80 * | |
81 * | stack 3 | start of 3rd thread stack
82 * +-----------------------------------+
83 * | |
84 * | Red Zone (guard page) | red zone for 2nd thread
85 * | |
86 * +-----------------------------------+
87 * | stack 2 - _thr_stack_default | top of 2nd thread stack
88 * | |
89 * | |
90 * | |
91 * | |
92 * | stack 2 |
93 * +-----------------------------------+ <-- start of 2nd thread stack
94 * | |
95 * | Red Zone | red zone for 1st thread
96 * | |
97 * +-----------------------------------+
98 * | stack 1 - _thr_stack_default | top of 1st thread stack
99 * | |
100 * | |
101 * | |
102 * | |
103 * | stack 1 |
104 * +-----------------------------------+ <-- start of 1st thread stack
105 * | | (initial value of last_stack)
106 * | Red Zone |
107 * | | red zone for main thread
108 * +-----------------------------------+
109 * | USRSTACK - _thr_stack_initial | top of main thread stack
110 * | | ^
111 * | | |
112 * | | |
113 * | | | stack growth
114 * | |
115 * +-----------------------------------+ <-- start of main thread stack
116 * (USRSTACK)
117 * high memory
118 *
119 */
120 static char *last_stack = NULL;
121
122 /*
123 * Round size up to the nearest multiple of
124 * _thr_page_size.
125 */
126 static inline size_t
round_up(size_t size)127 round_up(size_t size)
128 {
129 return (roundup2(size, _thr_page_size));
130 }
131
132 void
_thr_stack_fix_protection(struct pthread * thrd)133 _thr_stack_fix_protection(struct pthread *thrd)
134 {
135
136 mprotect((char *)thrd->attr.stackaddr_attr +
137 round_up(thrd->attr.guardsize_attr),
138 round_up(thrd->attr.stacksize_attr),
139 _rtld_get_stack_prot());
140 }
141
142 static void
singlethread_map_stacks_exec(void)143 singlethread_map_stacks_exec(void)
144 {
145 char *usrstack;
146 size_t stacksz;
147
148 if (!__thr_get_main_stack_base(&usrstack) ||
149 !__thr_get_main_stack_lim(&stacksz))
150 return;
151 mprotect(usrstack - stacksz, stacksz, _rtld_get_stack_prot());
152 }
153
154 void
__thr_map_stacks_exec(void)155 __thr_map_stacks_exec(void)
156 {
157 struct pthread *curthread, *thrd;
158 struct stack *st;
159
160 if (!_thr_is_inited()) {
161 singlethread_map_stacks_exec();
162 return;
163 }
164 curthread = _get_curthread();
165 THREAD_LIST_RDLOCK(curthread);
166 LIST_FOREACH(st, &mstackq, qe)
167 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
168 _rtld_get_stack_prot());
169 LIST_FOREACH(st, &dstackq, qe)
170 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
171 _rtld_get_stack_prot());
172 TAILQ_FOREACH(thrd, &_thread_gc_list, gcle)
173 _thr_stack_fix_protection(thrd);
174 TAILQ_FOREACH(thrd, &_thread_list, tle)
175 _thr_stack_fix_protection(thrd);
176 THREAD_LIST_UNLOCK(curthread);
177 }
178
179 int
_thr_stack_alloc(struct pthread_attr * attr)180 _thr_stack_alloc(struct pthread_attr *attr)
181 {
182 struct pthread *curthread = _get_curthread();
183 struct stack *spare_stack;
184 size_t stacksize;
185 size_t guardsize;
186 char *stackaddr;
187
188 /*
189 * Round up stack size to nearest multiple of _thr_page_size so
190 * that mmap() * will work. If the stack size is not an even
191 * multiple, we end up initializing things such that there is
192 * unused space above the beginning of the stack, so the stack
193 * sits snugly against its guard.
194 */
195 stacksize = round_up(attr->stacksize_attr);
196 guardsize = round_up(attr->guardsize_attr);
197
198 attr->stackaddr_attr = NULL;
199 attr->flags &= ~THR_STACK_USER;
200
201 /*
202 * Use the garbage collector lock for synchronization of the
203 * spare stack lists and allocations from usrstack.
204 */
205 THREAD_LIST_WRLOCK(curthread);
206 /*
207 * If the stack and guard sizes are default, try to allocate a stack
208 * from the default-size stack cache:
209 */
210 if ((stacksize == THR_STACK_DEFAULT) &&
211 (guardsize == _thr_guard_default)) {
212 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
213 /* Use the spare stack. */
214 LIST_REMOVE(spare_stack, qe);
215 attr->stackaddr_attr = spare_stack->stackaddr;
216 }
217 }
218 /*
219 * The user specified a non-default stack and/or guard size, so try to
220 * allocate a stack from the non-default size stack cache, using the
221 * rounded up stack size (stack_size) in the search:
222 */
223 else {
224 LIST_FOREACH(spare_stack, &mstackq, qe) {
225 if (spare_stack->stacksize == stacksize &&
226 spare_stack->guardsize == guardsize) {
227 LIST_REMOVE(spare_stack, qe);
228 attr->stackaddr_attr = spare_stack->stackaddr;
229 break;
230 }
231 }
232 }
233 if (attr->stackaddr_attr != NULL) {
234 /* A cached stack was found. Release the lock. */
235 THREAD_LIST_UNLOCK(curthread);
236 }
237 else {
238 /*
239 * Allocate a stack from or below usrstack, depending
240 * on the LIBPTHREAD_BIGSTACK_MAIN env variable.
241 */
242 if (last_stack == NULL)
243 last_stack = _usrstack - _thr_stack_initial -
244 _thr_guard_default;
245
246 /* Allocate a new stack. */
247 stackaddr = last_stack - stacksize - guardsize;
248
249 /*
250 * Even if stack allocation fails, we don't want to try to
251 * use this location again, so unconditionally decrement
252 * last_stack. Under normal operating conditions, the most
253 * likely reason for an mmap() error is a stack overflow of
254 * the adjacent thread stack.
255 */
256 last_stack -= (stacksize + guardsize);
257
258 /* Release the lock before mmap'ing it. */
259 THREAD_LIST_UNLOCK(curthread);
260
261 /* Map the stack and guard page together, and split guard
262 page from allocated space: */
263 if ((stackaddr = mmap(stackaddr, stacksize + guardsize,
264 _rtld_get_stack_prot(), MAP_STACK,
265 -1, 0)) != MAP_FAILED &&
266 (guardsize == 0 ||
267 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
268 stackaddr += guardsize;
269 } else {
270 if (stackaddr != MAP_FAILED)
271 munmap(stackaddr, stacksize + guardsize);
272 stackaddr = NULL;
273 }
274 attr->stackaddr_attr = stackaddr;
275 }
276 if (attr->stackaddr_attr != NULL)
277 return (0);
278 else
279 return (-1);
280 }
281
282 /* This function must be called with _thread_list_lock held. */
283 void
_thr_stack_free(struct pthread_attr * attr)284 _thr_stack_free(struct pthread_attr *attr)
285 {
286 struct stack *spare_stack;
287
288 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
289 && (attr->stackaddr_attr != NULL)) {
290 spare_stack = (struct stack *)
291 ((char *)attr->stackaddr_attr +
292 attr->stacksize_attr - sizeof(struct stack));
293 spare_stack->stacksize = round_up(attr->stacksize_attr);
294 spare_stack->guardsize = round_up(attr->guardsize_attr);
295 spare_stack->stackaddr = attr->stackaddr_attr;
296
297 if (spare_stack->stacksize == THR_STACK_DEFAULT &&
298 spare_stack->guardsize == _thr_guard_default) {
299 /* Default stack/guard size. */
300 LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
301 } else {
302 /* Non-default stack/guard size. */
303 LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
304 }
305 attr->stackaddr_attr = NULL;
306 }
307 }
308