xref: /qemu/target/i386/hvf/x86_task.c (revision 6701d81d74b3fbc7afd73a18d1c82602a811e409)
1 // This software is licensed under the terms of the GNU General Public
2 // License version 2, as published by the Free Software Foundation, and
3 // may be copied, distributed, and modified under those terms.
4 //
5 // This program is distributed in the hope that it will be useful,
6 // but WITHOUT ANY WARRANTY; without even the implied warranty of
7 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
8 // GNU General Public License for more details.
9 #include "qemu/osdep.h"
10 #include "qemu-common.h"
11 #include "qemu/error-report.h"
12 
13 #include "sysemu/hvf.h"
14 #include "hvf-i386.h"
15 #include "vmcs.h"
16 #include "vmx.h"
17 #include "x86.h"
18 #include "x86_descr.h"
19 #include "x86_mmu.h"
20 #include "x86_decode.h"
21 #include "x86_emu.h"
22 #include "x86_task.h"
23 #include "x86hvf.h"
24 
25 #include <Hypervisor/hv.h>
26 #include <Hypervisor/hv_vmx.h>
27 
28 #include "exec/address-spaces.h"
29 #include "exec/exec-all.h"
30 #include "exec/ioport.h"
31 #include "hw/i386/apic_internal.h"
32 #include "hw/boards.h"
33 #include "qemu/main-loop.h"
34 #include "strings.h"
35 #include "sysemu/accel.h"
36 #include "sysemu/sysemu.h"
37 #include "target/i386/cpu.h"
38 
39 // TODO: taskswitch handling
40 static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
41 {
42     X86CPU *x86_cpu = X86_CPU(cpu);
43     CPUX86State *env = &x86_cpu->env;
44 
45     /* CR3 and ldt selector are not saved intentionally */
46     tss->eip = EIP(env);
47     tss->eflags = EFLAGS(env);
48     tss->eax = EAX(env);
49     tss->ecx = ECX(env);
50     tss->edx = EDX(env);
51     tss->ebx = EBX(env);
52     tss->esp = ESP(env);
53     tss->ebp = EBP(env);
54     tss->esi = ESI(env);
55     tss->edi = EDI(env);
56 
57     tss->es = vmx_read_segment_selector(cpu, R_ES).sel;
58     tss->cs = vmx_read_segment_selector(cpu, R_CS).sel;
59     tss->ss = vmx_read_segment_selector(cpu, R_SS).sel;
60     tss->ds = vmx_read_segment_selector(cpu, R_DS).sel;
61     tss->fs = vmx_read_segment_selector(cpu, R_FS).sel;
62     tss->gs = vmx_read_segment_selector(cpu, R_GS).sel;
63 }
64 
65 static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
66 {
67     X86CPU *x86_cpu = X86_CPU(cpu);
68     CPUX86State *env = &x86_cpu->env;
69 
70     wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
71 
72     RIP(env) = tss->eip;
73     EFLAGS(env) = tss->eflags | 2;
74 
75     /* General purpose registers */
76     RAX(env) = tss->eax;
77     RCX(env) = tss->ecx;
78     RDX(env) = tss->edx;
79     RBX(env) = tss->ebx;
80     RSP(env) = tss->esp;
81     RBP(env) = tss->ebp;
82     RSI(env) = tss->esi;
83     RDI(env) = tss->edi;
84 
85     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
86     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
87     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
88     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
89     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
90     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
91     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
92 
93 #if 0
94     load_segment(cpu, R_LDTR, tss->ldt);
95     load_segment(cpu, R_ES, tss->es);
96     load_segment(cpu, R_CS, tss->cs);
97     load_segment(cpu, R_SS, tss->ss);
98     load_segment(cpu, R_DS, tss->ds);
99     load_segment(cpu, R_FS, tss->fs);
100     load_segment(cpu, R_GS, tss->gs);
101 #endif
102 }
103 
104 static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
105                           uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
106 {
107     struct x86_tss_segment32 tss_seg;
108     uint32_t new_tss_base = x86_segment_base(new_desc);
109     uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
110     uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
111 
112     vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
113     save_state_to_tss32(cpu, &tss_seg);
114 
115     vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
116     vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
117 
118     if (old_tss_sel.sel != 0xffff) {
119         tss_seg.prev_tss = old_tss_sel.sel;
120 
121         vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
122     }
123     load_state_from_tss32(cpu, &tss_seg);
124     return 0;
125 }
126 
127 void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
128 {
129     uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
130     if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
131                         gate_type != VMCS_INTR_T_HWINTR &&
132                         gate_type != VMCS_INTR_T_NMI)) {
133         int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
134         macvm_set_rip(cpu, rip + ins_len);
135         return;
136     }
137 
138     load_regs(cpu);
139 
140     struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
141     int ret;
142     x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
143     uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
144     uint32_t desc_limit;
145     struct x86_call_gate task_gate_desc;
146     struct vmx_segment vmx_seg;
147 
148     X86CPU *x86_cpu = X86_CPU(cpu);
149     CPUX86State *env = &x86_cpu->env;
150 
151     x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
152     x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
153 
154     if (reason == TSR_IDT_GATE && gate_valid) {
155         int dpl;
156 
157         ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
158 
159         dpl = task_gate_desc.dpl;
160         x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
161         if (tss_sel.rpl > dpl || cs.rpl > dpl)
162             ;//DPRINTF("emulate_gp");
163     }
164 
165     desc_limit = x86_segment_limit(&next_tss_desc);
166     if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
167         VM_PANIC("emulate_ts");
168     }
169 
170     if (reason == TSR_IRET || reason == TSR_JMP) {
171         curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
172         x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
173     }
174 
175     if (reason == TSR_IRET)
176         EFLAGS(env) &= ~RFLAGS_NT;
177 
178     if (reason != TSR_CALL && reason != TSR_IDT_GATE)
179         old_tss_sel.sel = 0xffff;
180 
181     if (reason != TSR_IRET) {
182         next_tss_desc.type |= (1 << 1); /* set busy flag */
183         x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
184     }
185 
186     if (next_tss_desc.type & 8)
187         ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
188     else
189         //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
190         VM_PANIC("task_switch_16");
191 
192     macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
193     x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
194     vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
195 
196     store_regs(cpu);
197 
198     hv_vcpu_invalidate_tlb(cpu->hvf_fd);
199     hv_vcpu_flush(cpu->hvf_fd);
200 }
201