1 /*-
2 * Copyright (c) 2014-2015 The FreeBSD Foundation
3 *
4 * Portions of this software were developed by Andrew Turner
5 * under sponsorship from the FreeBSD Foundation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/types.h>
30
31 #include <machine/sysarch.h>
32
33 #include <stdlib.h>
34
35 #include "debug.h"
36 #include "rtld.h"
37 #include "rtld_printf.h"
38
39 /*
40 * This is not the correct prototype, but we only need it for
41 * a function pointer to a simple asm function.
42 */
43 void *_rtld_tlsdesc_static(void *);
44 void *_rtld_tlsdesc_undef(void *);
45 void *_rtld_tlsdesc_dynamic(void *);
46
47 bool
arch_digest_dynamic(struct Struct_Obj_Entry * obj,const Elf_Dyn * dynp)48 arch_digest_dynamic(struct Struct_Obj_Entry *obj, const Elf_Dyn *dynp)
49 {
50 if (dynp->d_tag == DT_AARCH64_VARIANT_PCS) {
51 obj->variant_pcs = true;
52 return (true);
53 }
54
55 return (false);
56 }
57
58 bool
arch_digest_note(struct Struct_Obj_Entry * obj __unused,const Elf_Note * note)59 arch_digest_note(struct Struct_Obj_Entry *obj __unused, const Elf_Note *note)
60 {
61 const char *note_name;
62 const uint32_t *note_data;
63
64 note_name = (const char *)(note + 1);
65 /* Only handle GNU notes */
66 if (note->n_namesz != sizeof(ELF_NOTE_GNU) ||
67 strncmp(note_name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) != 0)
68 return (false);
69
70 /* Only handle GNU property notes */
71 if (note->n_type != NT_GNU_PROPERTY_TYPE_0)
72 return (false);
73
74 /*
75 * note_data[0] - Type
76 * note_data[1] - Length
77 * note_data[2] - Data
78 * note_data[3] - Padding?
79 */
80 note_data = (const uint32_t *)(note_name + note->n_namesz);
81
82 /* Only handle AArch64 feature notes */
83 if (note_data[0] != GNU_PROPERTY_AARCH64_FEATURE_1_AND)
84 return (false);
85
86 /* We expect at least 4 bytes of data */
87 if (note_data[1] < 4)
88 return (false);
89
90 /* TODO: Only guard if HWCAP2_BTI is set */
91 if ((note_data[2] & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) != 0) {
92 struct arm64_guard_page_args guard;
93
94 guard.addr = (uintptr_t)obj->mapbase;
95 guard.len = obj->mapsize;
96
97 sysarch(ARM64_GUARD_PAGE, &guard);
98 }
99
100 return (true);
101 }
102
103 void
init_pltgot(Obj_Entry * obj)104 init_pltgot(Obj_Entry *obj)
105 {
106
107 if (obj->pltgot != NULL) {
108 obj->pltgot[1] = (Elf_Addr) obj;
109 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
110 }
111 }
112
113 int
do_copy_relocations(Obj_Entry * dstobj)114 do_copy_relocations(Obj_Entry *dstobj)
115 {
116 const Obj_Entry *srcobj, *defobj;
117 const Elf_Rela *relalim;
118 const Elf_Rela *rela;
119 const Elf_Sym *srcsym;
120 const Elf_Sym *dstsym;
121 const void *srcaddr;
122 const char *name;
123 void *dstaddr;
124 SymLook req;
125 size_t size;
126 int res;
127
128 /*
129 * COPY relocs are invalid outside of the main program
130 */
131 assert(dstobj->mainprog);
132
133 relalim = (const Elf_Rela *)((const char *)dstobj->rela +
134 dstobj->relasize);
135 for (rela = dstobj->rela; rela < relalim; rela++) {
136 if (ELF_R_TYPE(rela->r_info) != R_AARCH64_COPY)
137 continue;
138
139 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
140 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
141 name = dstobj->strtab + dstsym->st_name;
142 size = dstsym->st_size;
143
144 symlook_init(&req, name);
145 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
146 req.flags = SYMLOOK_EARLY;
147
148 for (srcobj = globallist_next(dstobj); srcobj != NULL;
149 srcobj = globallist_next(srcobj)) {
150 res = symlook_obj(&req, srcobj);
151 if (res == 0) {
152 srcsym = req.sym_out;
153 defobj = req.defobj_out;
154 break;
155 }
156 }
157 if (srcobj == NULL) {
158 _rtld_error("Undefined symbol \"%s\" referenced from "
159 "COPY relocation in %s", name, dstobj->path);
160 return (-1);
161 }
162
163 srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
164 memcpy(dstaddr, srcaddr, size);
165 }
166
167 return (0);
168 }
169
170 struct tls_data {
171 Elf_Addr dtv_gen;
172 int tls_index;
173 Elf_Addr tls_offs;
174 };
175
176 static struct tls_data *
reloc_tlsdesc_alloc(int tlsindex,Elf_Addr tlsoffs)177 reloc_tlsdesc_alloc(int tlsindex, Elf_Addr tlsoffs)
178 {
179 struct tls_data *tlsdesc;
180
181 tlsdesc = xmalloc(sizeof(struct tls_data));
182 tlsdesc->dtv_gen = tls_dtv_generation;
183 tlsdesc->tls_index = tlsindex;
184 tlsdesc->tls_offs = tlsoffs;
185
186 return (tlsdesc);
187 }
188
189 struct tlsdesc_entry {
190 void *(*func)(void *);
191 union {
192 Elf_Ssize addend;
193 Elf_Size offset;
194 struct tls_data *data;
195 };
196 };
197
198 static void
reloc_tlsdesc(const Obj_Entry * obj,const Elf_Rela * rela,struct tlsdesc_entry * where,int flags,RtldLockState * lockstate)199 reloc_tlsdesc(const Obj_Entry *obj, const Elf_Rela *rela,
200 struct tlsdesc_entry *where, int flags, RtldLockState *lockstate)
201 {
202 const Elf_Sym *def;
203 const Obj_Entry *defobj;
204 Elf_Addr offs;
205
206 offs = 0;
207 if (ELF_R_SYM(rela->r_info) != 0) {
208 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags,
209 NULL, lockstate);
210 if (def == NULL)
211 rtld_die();
212 offs = def->st_value;
213 obj = defobj;
214 if (def->st_shndx == SHN_UNDEF) {
215 /* Weak undefined thread variable */
216 where->func = _rtld_tlsdesc_undef;
217 where->addend = rela->r_addend;
218 return;
219 }
220 }
221 offs += rela->r_addend;
222
223 if (obj->tlsoffset != 0) {
224 /* Variable is in initialy allocated TLS segment */
225 where->func = _rtld_tlsdesc_static;
226 where->offset = obj->tlsoffset + offs;
227 } else {
228 /* TLS offest is unknown at load time, use dynamic resolving */
229 where->func = _rtld_tlsdesc_dynamic;
230 where->data = reloc_tlsdesc_alloc(obj->tlsindex, offs);
231 }
232 }
233
234 /*
235 * Process the PLT relocations.
236 */
237 int
reloc_plt(Obj_Entry * obj,int flags,RtldLockState * lockstate)238 reloc_plt(Obj_Entry *obj, int flags, RtldLockState *lockstate)
239 {
240 const Obj_Entry *defobj;
241 const Elf_Rela *relalim;
242 const Elf_Rela *rela;
243 const Elf_Sym *def, *sym;
244 bool lazy;
245
246 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
247 obj->pltrelasize);
248 for (rela = obj->pltrela; rela < relalim; rela++) {
249 Elf_Addr *where, target;
250
251 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
252
253 switch(ELF_R_TYPE(rela->r_info)) {
254 case R_AARCH64_JUMP_SLOT:
255 lazy = true;
256 if (obj->variant_pcs) {
257 sym = &obj->symtab[ELF_R_SYM(rela->r_info)];
258 /*
259 * Variant PCS functions don't follow the
260 * standard register convention. Because of
261 * this we can't use lazy relocation and
262 * need to set the target address.
263 */
264 if ((sym->st_other & STO_AARCH64_VARIANT_PCS) !=
265 0)
266 lazy = false;
267 }
268 if (lazy) {
269 *where += (Elf_Addr)obj->relocbase;
270 } else {
271 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
272 &defobj, SYMLOOK_IN_PLT | flags, NULL,
273 lockstate);
274 if (def == NULL)
275 return (-1);
276 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC){
277 obj->gnu_ifunc = true;
278 continue;
279 }
280 target = (Elf_Addr)(defobj->relocbase +
281 def->st_value);
282 /*
283 * Ignore ld_bind_not as it requires lazy
284 * binding
285 */
286 *where = target;
287 }
288 break;
289 case R_AARCH64_TLSDESC:
290 reloc_tlsdesc(obj, rela, (struct tlsdesc_entry *)where,
291 SYMLOOK_IN_PLT | flags, lockstate);
292 break;
293 case R_AARCH64_IRELATIVE:
294 obj->irelative = true;
295 break;
296 case R_AARCH64_NONE:
297 break;
298 default:
299 _rtld_error("Unknown relocation type %u in PLT",
300 (unsigned int)ELF_R_TYPE(rela->r_info));
301 return (-1);
302 }
303 }
304
305 return (0);
306 }
307
308 /*
309 * LD_BIND_NOW was set - force relocation for all jump slots
310 */
311 int
reloc_jmpslots(Obj_Entry * obj,int flags,RtldLockState * lockstate)312 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
313 {
314 const Obj_Entry *defobj;
315 const Elf_Rela *relalim;
316 const Elf_Rela *rela;
317 const Elf_Sym *def;
318
319 if (obj->jmpslots_done)
320 return (0);
321
322 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
323 obj->pltrelasize);
324 for (rela = obj->pltrela; rela < relalim; rela++) {
325 Elf_Addr *where, target;
326
327 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
328 switch(ELF_R_TYPE(rela->r_info)) {
329 case R_AARCH64_JUMP_SLOT:
330 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
331 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
332 if (def == NULL)
333 return (-1);
334 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
335 obj->gnu_ifunc = true;
336 continue;
337 }
338 target = (Elf_Addr)(defobj->relocbase + def->st_value);
339 reloc_jmpslot(where, target, defobj, obj,
340 (const Elf_Rel *)rela);
341 break;
342 }
343 }
344 obj->jmpslots_done = true;
345
346 return (0);
347 }
348
349 static void
reloc_iresolve_one(Obj_Entry * obj,const Elf_Rela * rela,RtldLockState * lockstate)350 reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
351 RtldLockState *lockstate)
352 {
353 Elf_Addr *where, target, *ptr;
354
355 ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
356 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
357 lock_release(rtld_bind_lock, lockstate);
358 target = call_ifunc_resolver(ptr);
359 wlock_acquire(rtld_bind_lock, lockstate);
360 *where = target;
361 }
362
363 int
reloc_iresolve(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)364 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
365 {
366 const Elf_Rela *relalim;
367 const Elf_Rela *rela;
368
369 if (!obj->irelative)
370 return (0);
371 obj->irelative = false;
372 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
373 obj->pltrelasize);
374 for (rela = obj->pltrela; rela < relalim; rela++) {
375 if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
376 reloc_iresolve_one(obj, rela, lockstate);
377 }
378 return (0);
379 }
380
381 int
reloc_iresolve_nonplt(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)382 reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
383 {
384 const Elf_Rela *relalim;
385 const Elf_Rela *rela;
386
387 if (!obj->irelative_nonplt)
388 return (0);
389 obj->irelative_nonplt = false;
390 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
391 for (rela = obj->rela; rela < relalim; rela++) {
392 if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
393 reloc_iresolve_one(obj, rela, lockstate);
394 }
395 return (0);
396 }
397
398 int
reloc_gnu_ifunc(Obj_Entry * obj,int flags,struct Struct_RtldLockState * lockstate)399 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
400 struct Struct_RtldLockState *lockstate)
401 {
402 const Elf_Rela *relalim;
403 const Elf_Rela *rela;
404 Elf_Addr *where, target;
405 const Elf_Sym *def;
406 const Obj_Entry *defobj;
407
408 if (!obj->gnu_ifunc)
409 return (0);
410 relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
411 for (rela = obj->pltrela; rela < relalim; rela++) {
412 if (ELF_R_TYPE(rela->r_info) == R_AARCH64_JUMP_SLOT) {
413 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
414 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
415 SYMLOOK_IN_PLT | flags, NULL, lockstate);
416 if (def == NULL)
417 return (-1);
418 if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
419 continue;
420 lock_release(rtld_bind_lock, lockstate);
421 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
422 wlock_acquire(rtld_bind_lock, lockstate);
423 reloc_jmpslot(where, target, defobj, obj,
424 (const Elf_Rel *)rela);
425 }
426 }
427 obj->gnu_ifunc = false;
428 return (0);
429 }
430
431 Elf_Addr
reloc_jmpslot(Elf_Addr * where,Elf_Addr target,const Obj_Entry * defobj __unused,const Obj_Entry * obj __unused,const Elf_Rel * rel)432 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
433 const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
434 const Elf_Rel *rel)
435 {
436
437 assert(ELF_R_TYPE(rel->r_info) == R_AARCH64_JUMP_SLOT ||
438 ELF_R_TYPE(rel->r_info) == R_AARCH64_IRELATIVE);
439
440 if (*where != target && !ld_bind_not)
441 *where = target;
442 return (target);
443 }
444
445 __ifunc_arg_t ifunc_arg = {
446 ._size = sizeof(__ifunc_arg_t)
447 };
448
449 void
ifunc_init(Elf_Auxinfo * aux_info[__min_size (AT_COUNT)])450 ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)])
451 {
452 ifunc_arg._hwcap = aux_info[AT_HWCAP] != NULL ?
453 (aux_info[AT_HWCAP]->a_un.a_val | _IFUNC_ARG_HWCAP) : 0;
454 ifunc_arg._hwcap2 = aux_info[AT_HWCAP2] != NULL ?
455 aux_info[AT_HWCAP2]->a_un.a_val : 0;
456 ifunc_arg._hwcap3 = aux_info[AT_HWCAP3] != NULL ?
457 aux_info[AT_HWCAP3]->a_un.a_val : 0;
458 ifunc_arg._hwcap4 = aux_info[AT_HWCAP4] != NULL ?
459 aux_info[AT_HWCAP4]->a_un.a_val : 0;
460 }
461
462 /*
463 * Process non-PLT relocations
464 */
465 int
reloc_non_plt(Obj_Entry * obj,Obj_Entry * obj_rtld,int flags,RtldLockState * lockstate)466 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
467 RtldLockState *lockstate)
468 {
469 const Obj_Entry *defobj;
470 const Elf_Rela *relalim;
471 const Elf_Rela *rela;
472 const Elf_Sym *def;
473 SymCache *cache;
474 Elf_Addr *where, symval;
475
476 /*
477 * The dynamic loader may be called from a thread, we have
478 * limited amounts of stack available so we cannot use alloca().
479 */
480 if (obj == obj_rtld)
481 cache = NULL;
482 else
483 cache = calloc(obj->dynsymcount, sizeof(SymCache));
484 /* No need to check for NULL here */
485
486 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
487 for (rela = obj->rela; rela < relalim; rela++) {
488 /*
489 * First, resolve symbol for relocations which
490 * reference symbols.
491 */
492 switch (ELF_R_TYPE(rela->r_info)) {
493 case R_AARCH64_ABS64:
494 case R_AARCH64_GLOB_DAT:
495 case R_AARCH64_TLS_TPREL64:
496 case R_AARCH64_TLS_DTPREL64:
497 case R_AARCH64_TLS_DTPMOD64:
498 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
499 &defobj, flags, cache, lockstate);
500 if (def == NULL)
501 return (-1);
502 /*
503 * If symbol is IFUNC, only perform relocation
504 * when caller allowed it by passing
505 * SYMLOOK_IFUNC flag. Skip the relocations
506 * otherwise.
507 *
508 * Also error out in case IFUNC relocations
509 * are specified for TLS, which cannot be
510 * usefully interpreted.
511 */
512 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
513 switch (ELF_R_TYPE(rela->r_info)) {
514 case R_AARCH64_ABS64:
515 case R_AARCH64_GLOB_DAT:
516 if ((flags & SYMLOOK_IFUNC) == 0) {
517 obj->non_plt_gnu_ifunc = true;
518 continue;
519 }
520 symval = (Elf_Addr)rtld_resolve_ifunc(
521 defobj, def);
522 break;
523 default:
524 _rtld_error("%s: IFUNC for TLS reloc",
525 obj->path);
526 return (-1);
527 }
528 } else {
529 if ((flags & SYMLOOK_IFUNC) != 0)
530 continue;
531 symval = (Elf_Addr)defobj->relocbase +
532 def->st_value;
533 }
534 break;
535 default:
536 if ((flags & SYMLOOK_IFUNC) != 0)
537 continue;
538 }
539
540 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
541
542 switch (ELF_R_TYPE(rela->r_info)) {
543 case R_AARCH64_ABS64:
544 case R_AARCH64_GLOB_DAT:
545 *where = symval + rela->r_addend;
546 break;
547 case R_AARCH64_COPY:
548 /*
549 * These are deferred until all other relocations have
550 * been done. All we do here is make sure that the
551 * COPY relocation is not in a shared library. They
552 * are allowed only in executable files.
553 */
554 if (!obj->mainprog) {
555 _rtld_error("%s: Unexpected R_AARCH64_COPY "
556 "relocation in shared library", obj->path);
557 return (-1);
558 }
559 break;
560 case R_AARCH64_TLSDESC:
561 reloc_tlsdesc(obj, rela, (struct tlsdesc_entry *)where,
562 flags, lockstate);
563 break;
564 case R_AARCH64_TLS_TPREL64:
565 /*
566 * We lazily allocate offsets for static TLS as we
567 * see the first relocation that references the
568 * TLS block. This allows us to support (small
569 * amounts of) static TLS in dynamically loaded
570 * modules. If we run out of space, we generate an
571 * error.
572 */
573 if (!defobj->tls_static) {
574 if (!allocate_tls_offset(
575 __DECONST(Obj_Entry *, defobj))) {
576 _rtld_error(
577 "%s: No space available for static "
578 "Thread Local Storage", obj->path);
579 return (-1);
580 }
581 }
582 *where = def->st_value + rela->r_addend +
583 defobj->tlsoffset;
584 break;
585
586 /*
587 * !!! BEWARE !!!
588 * ARM ELF ABI defines TLS_DTPMOD64 as 1029, and TLS_DTPREL64
589 * as 1028. But actual bfd linker and the glibc RTLD linker
590 * treats TLS_DTPMOD64 as 1028 and TLS_DTPREL64 1029.
591 */
592 case R_AARCH64_TLS_DTPREL64: /* efectively is TLS_DTPMOD64 */
593 *where += (Elf_Addr)defobj->tlsindex;
594 break;
595 case R_AARCH64_TLS_DTPMOD64: /* efectively is TLS_DTPREL64 */
596 *where += (Elf_Addr)(def->st_value + rela->r_addend);
597 break;
598 case R_AARCH64_RELATIVE:
599 *where = (Elf_Addr)(obj->relocbase + rela->r_addend);
600 break;
601 case R_AARCH64_NONE:
602 break;
603 case R_AARCH64_IRELATIVE:
604 obj->irelative_nonplt = true;
605 break;
606 default:
607 rtld_printf("%s: Unhandled relocation %lu\n",
608 obj->path, ELF_R_TYPE(rela->r_info));
609 return (-1);
610 }
611 }
612
613 return (0);
614 }
615
616 void
allocate_initial_tls(Obj_Entry * objs)617 allocate_initial_tls(Obj_Entry *objs)
618 {
619
620 /*
621 * Fix the size of the static TLS block by using the maximum
622 * offset allocated so far and adding a bit for dynamic modules to
623 * use.
624 */
625 tls_static_space = tls_last_offset + tls_last_size +
626 ld_static_tls_extra;
627
628 _tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
629 }
630
631 void *
__tls_get_addr(tls_index * ti)632 __tls_get_addr(tls_index* ti)
633 {
634 return (tls_get_addr_common(_tcb_get(), ti->ti_module, ti->ti_offset));
635 }
636