12187b97cSVarad Gautam/* 22187b97cSVarad Gautam * Common bootstrapping code to transition from 16-bit to 32-bit code, and to 32187b97cSVarad Gautam * transition from 32-bit to 64-bit code (x86-64 only) 42187b97cSVarad Gautam */ 52821b32dSSean Christopherson#include "apic-defs.h" 6*d467e659SSean Christopherson#include "smp.h" 72821b32dSSean Christopherson 82821b32dSSean Christophersonper_cpu_size = PER_CPU_SIZE 9c8ab4c00SVarad Gautam 10c8a8a358SHang SU#include "desc.h" 11c8a8a358SHang SU 12c8ab4c00SVarad Gautam /* EFI provides it's own SIPI sequence to handle relocation. */ 13c8ab4c00SVarad Gautam#ifndef CONFIG_EFI 14c8ab4c00SVarad Gautam.code16 15c8ab4c00SVarad Gautam.globl rm_trampoline 16c8ab4c00SVarad Gautamrm_trampoline: 17c8ab4c00SVarad Gautam 18c8ab4c00SVarad Gautam/* Store SIPI vector code at the beginning of trampoline. */ 19c8ab4c00SVarad Gautamsipi_entry: 20c8ab4c00SVarad Gautam mov %cr0, %eax 21c8ab4c00SVarad Gautam or $1, %eax 22c8ab4c00SVarad Gautam mov %eax, %cr0 23c8ab4c00SVarad Gautam lgdtl ap_rm_gdt_descr - sipi_entry 24c8a8a358SHang SU ljmpl $KERNEL_CS, $ap_start32 25c8ab4c00SVarad Gautamsipi_end: 26c8ab4c00SVarad Gautam 27c8ab4c00SVarad Gautam.globl ap_rm_gdt_descr 28c8ab4c00SVarad Gautamap_rm_gdt_descr: 29c8ab4c00SVarad Gautam#ifdef __i386__ 30c8ab4c00SVarad Gautam .word 0 31c8ab4c00SVarad Gautam .long 0 32c8ab4c00SVarad Gautam#else 33c8ab4c00SVarad Gautam .word gdt32_end - gdt32 - 1 34c8ab4c00SVarad Gautam .long gdt32 35c8ab4c00SVarad Gautam#endif 36c8ab4c00SVarad Gautam 37c8ab4c00SVarad Gautam.globl rm_trampoline_end 38c8ab4c00SVarad Gautamrm_trampoline_end: 39c8ab4c00SVarad Gautam#endif 402187b97cSVarad Gautam 412187b97cSVarad Gautam/* The 32-bit => 64-bit trampoline is x86-64 only. */ 422187b97cSVarad Gautam#ifdef __x86_64__ 432187b97cSVarad Gautam.code32 442187b97cSVarad Gautam 451542cd7bSVarad Gautam/* 461542cd7bSVarad Gautam * EFI builds with "-shared -fPIC" and so cannot directly reference any absolute 471542cd7bSVarad Gautam * address. In 64-bit mode, RIP-relative addressing neatly solves the problem, 481542cd7bSVarad Gautam * but 32-bit code doesn't have that luxury. Make a dummy CALL to get RIP into 491542cd7bSVarad Gautam * a GPR in order to emulate RIP-relative for 32-bit transition code. 501542cd7bSVarad Gautam */ 511542cd7bSVarad Gautam.macro load_absolute_addr, addr, reg 521542cd7bSVarad Gautam#ifdef CONFIG_EFI 531542cd7bSVarad Gautam call 1f 541542cd7bSVarad Gautam1: 551542cd7bSVarad Gautam pop \reg 561542cd7bSVarad Gautam add \addr - 1b, \reg 571542cd7bSVarad Gautam#else 581542cd7bSVarad Gautam mov \addr, \reg 591542cd7bSVarad Gautam#endif 601542cd7bSVarad Gautam.endm 611542cd7bSVarad Gautam 622187b97cSVarad GautamMSR_GS_BASE = 0xc0000101 632187b97cSVarad Gautam 642187b97cSVarad Gautam.macro setup_percpu_area 652821b32dSSean Christopherson lea -per_cpu_size(%esp), %eax 662187b97cSVarad Gautam mov $0, %edx 672187b97cSVarad Gautam mov $MSR_GS_BASE, %ecx 682187b97cSVarad Gautam wrmsr 692187b97cSVarad Gautam.endm 702187b97cSVarad Gautam 712187b97cSVarad Gautam.macro setup_segments 722187b97cSVarad Gautam mov $MSR_GS_BASE, %ecx 732187b97cSVarad Gautam rdmsr 742187b97cSVarad Gautam 75c8a8a358SHang SU mov $KERNEL_DS, %bx 762187b97cSVarad Gautam mov %bx, %ds 772187b97cSVarad Gautam mov %bx, %es 782187b97cSVarad Gautam mov %bx, %fs 792187b97cSVarad Gautam mov %bx, %gs 802187b97cSVarad Gautam mov %bx, %ss 812187b97cSVarad Gautam 822187b97cSVarad Gautam /* restore MSR_GS_BASE */ 832187b97cSVarad Gautam wrmsr 842187b97cSVarad Gautam.endm 852187b97cSVarad Gautam 862187b97cSVarad Gautamprepare_64: 871542cd7bSVarad Gautam load_absolute_addr $gdt_descr, %edx 881542cd7bSVarad Gautam lgdtl (%edx) 891542cd7bSVarad Gautam 902187b97cSVarad Gautam setup_segments 912187b97cSVarad Gautam 922187b97cSVarad Gautam xor %eax, %eax 932187b97cSVarad Gautam mov %eax, %cr4 942187b97cSVarad Gautam 952187b97cSVarad Gautamenter_long_mode: 962187b97cSVarad Gautam mov %cr4, %eax 972187b97cSVarad Gautam bts $5, %eax // pae 982187b97cSVarad Gautam mov %eax, %cr4 992187b97cSVarad Gautam 1001542cd7bSVarad Gautam /* Note, EFI doesn't yet support 5-level paging. */ 1011542cd7bSVarad Gautam#ifdef CONFIG_EFI 1021542cd7bSVarad Gautam load_absolute_addr $ptl4, %eax 1031542cd7bSVarad Gautam#else 1042187b97cSVarad Gautam mov pt_root, %eax 1051542cd7bSVarad Gautam#endif 1062187b97cSVarad Gautam mov %eax, %cr3 1072187b97cSVarad Gautam 1082187b97cSVarad Gautamefer = 0xc0000080 1092187b97cSVarad Gautam mov $efer, %ecx 1102187b97cSVarad Gautam rdmsr 1112187b97cSVarad Gautam bts $8, %eax 1122187b97cSVarad Gautam wrmsr 1132187b97cSVarad Gautam 1142187b97cSVarad Gautam mov %cr0, %eax 1152187b97cSVarad Gautam bts $0, %eax 1162187b97cSVarad Gautam bts $31, %eax 1172187b97cSVarad Gautam mov %eax, %cr0 1182187b97cSVarad Gautam ret 1192187b97cSVarad Gautam 1201542cd7bSVarad Gautam.globl ap_start32 1212187b97cSVarad Gautamap_start32: 1222187b97cSVarad Gautam setup_segments 1231542cd7bSVarad Gautam 1241542cd7bSVarad Gautam load_absolute_addr $smp_stacktop, %edx 1252821b32dSSean Christopherson mov $-per_cpu_size, %esp 1261542cd7bSVarad Gautam lock xaddl %esp, (%edx) 1271542cd7bSVarad Gautam 1282187b97cSVarad Gautam setup_percpu_area 1292187b97cSVarad Gautam call prepare_64 1301542cd7bSVarad Gautam 1311542cd7bSVarad Gautam load_absolute_addr $ap_start64, %edx 132c8a8a358SHang SU pushl $KERNEL_CS 1331542cd7bSVarad Gautam pushl %edx 1341542cd7bSVarad Gautam lretl 1352187b97cSVarad Gautam#endif 136