1 /*
2 * This work is licensed under the terms of the GNU GPL, version 2 or later.
3 * See the COPYING file in the top-level directory.
4 */
5 #include "qemu/osdep.h"
6
7 #include "cpu.h"
8 #include "exec/tswap.h"
9
x86_cpu_xsave_all_areas(X86CPU * cpu,void * buf,uint32_t buflen)10 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
11 {
12 CPUX86State *env = &cpu->env;
13 const ExtSaveArea *e, *f;
14 int i;
15
16 X86LegacyXSaveArea *legacy;
17 X86XSaveHeader *header;
18 uint16_t cwd, swd, twd;
19
20 memset(buf, 0, buflen);
21
22 e = &x86_ext_save_areas[XSTATE_FP_BIT];
23
24 legacy = buf + e->offset;
25 header = buf + e->offset + sizeof(*legacy);
26
27 twd = 0;
28 swd = env->fpus & ~(7 << 11);
29 swd |= (env->fpstt & 7) << 11;
30 cwd = env->fpuc;
31 for (i = 0; i < 8; ++i) {
32 twd |= (!env->fptags[i]) << i;
33 }
34 legacy->fcw = cwd;
35 legacy->fsw = swd;
36 legacy->ftw = twd;
37 legacy->fpop = env->fpop;
38 legacy->fpip = env->fpip;
39 legacy->fpdp = env->fpdp;
40 memcpy(&legacy->fpregs, env->fpregs,
41 sizeof(env->fpregs));
42 legacy->mxcsr = env->mxcsr;
43
44 for (i = 0; i < CPU_NB_REGS; i++) {
45 uint8_t *xmm = legacy->xmm_regs[i];
46
47 stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
48 stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
49 }
50
51 header->xstate_bv = env->xstate_bv;
52
53 e = &x86_ext_save_areas[XSTATE_YMM_BIT];
54 if (e->size && e->offset) {
55 XSaveAVX *avx;
56
57 avx = buf + e->offset;
58
59 for (i = 0; i < CPU_NB_REGS; i++) {
60 uint8_t *ymmh = avx->ymmh[i];
61
62 stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
63 stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
64 }
65 }
66
67 e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
68 if (e->size && e->offset) {
69 XSaveBNDREG *bndreg;
70 XSaveBNDCSR *bndcsr;
71
72 f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
73 assert(f->size);
74 assert(f->offset);
75
76 bndreg = buf + e->offset;
77 bndcsr = buf + f->offset;
78
79 memcpy(&bndreg->bnd_regs, env->bnd_regs,
80 sizeof(env->bnd_regs));
81 bndcsr->bndcsr = env->bndcs_regs;
82 }
83
84 e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
85 if (e->size && e->offset) {
86 XSaveOpmask *opmask;
87 XSaveZMM_Hi256 *zmm_hi256;
88 #ifdef TARGET_X86_64
89 XSaveHi16_ZMM *hi16_zmm;
90 #endif
91
92 f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
93 assert(f->size);
94 assert(f->offset);
95
96 opmask = buf + e->offset;
97 zmm_hi256 = buf + f->offset;
98
99 memcpy(&opmask->opmask_regs, env->opmask_regs,
100 sizeof(env->opmask_regs));
101
102 for (i = 0; i < CPU_NB_REGS; i++) {
103 uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
104
105 stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
106 stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
107 stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
108 stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
109 }
110
111 #ifdef TARGET_X86_64
112 f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
113 assert(f->size);
114 assert(f->offset);
115
116 hi16_zmm = buf + f->offset;
117
118 memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
119 16 * sizeof(env->xmm_regs[16]));
120 #endif
121 }
122
123 #ifdef TARGET_X86_64
124 e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
125 if (e->size && e->offset) {
126 XSavePKRU *pkru = buf + e->offset;
127
128 memcpy(pkru, &env->pkru, sizeof(env->pkru));
129 }
130
131 e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
132 if (e->size && e->offset) {
133 XSaveXTILECFG *tilecfg = buf + e->offset;
134
135 memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg));
136 }
137
138 e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
139 if (e->size && e->offset && buflen >= e->size + e->offset) {
140 XSaveXTILEDATA *tiledata = buf + e->offset;
141
142 memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata));
143 }
144 #endif
145 }
146
x86_cpu_xrstor_all_areas(X86CPU * cpu,const void * buf,uint32_t buflen)147 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
148 {
149 CPUX86State *env = &cpu->env;
150 const ExtSaveArea *e, *f, *g;
151 int i;
152
153 const X86LegacyXSaveArea *legacy;
154 const X86XSaveHeader *header;
155 uint16_t cwd, swd, twd;
156
157 e = &x86_ext_save_areas[XSTATE_FP_BIT];
158
159 legacy = buf + e->offset;
160 header = buf + e->offset + sizeof(*legacy);
161
162 cwd = legacy->fcw;
163 swd = legacy->fsw;
164 twd = legacy->ftw;
165 env->fpop = legacy->fpop;
166 env->fpstt = (swd >> 11) & 7;
167 env->fpus = swd;
168 env->fpuc = cwd;
169 for (i = 0; i < 8; ++i) {
170 env->fptags[i] = !((twd >> i) & 1);
171 }
172 env->fpip = legacy->fpip;
173 env->fpdp = legacy->fpdp;
174 env->mxcsr = legacy->mxcsr;
175 memcpy(env->fpregs, &legacy->fpregs,
176 sizeof(env->fpregs));
177
178 for (i = 0; i < CPU_NB_REGS; i++) {
179 const uint8_t *xmm = legacy->xmm_regs[i];
180
181 env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
182 env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
183 }
184
185 env->xstate_bv = header->xstate_bv;
186
187 e = &x86_ext_save_areas[XSTATE_YMM_BIT];
188 if (e->size && e->offset) {
189 const XSaveAVX *avx;
190
191 avx = buf + e->offset;
192 for (i = 0; i < CPU_NB_REGS; i++) {
193 const uint8_t *ymmh = avx->ymmh[i];
194
195 env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
196 env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
197 }
198 }
199
200 e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
201 if (e->size && e->offset) {
202 const XSaveBNDREG *bndreg;
203 const XSaveBNDCSR *bndcsr;
204
205 f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
206 assert(f->size);
207 assert(f->offset);
208
209 bndreg = buf + e->offset;
210 bndcsr = buf + f->offset;
211
212 memcpy(env->bnd_regs, &bndreg->bnd_regs,
213 sizeof(env->bnd_regs));
214 env->bndcs_regs = bndcsr->bndcsr;
215 }
216
217 e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
218 if (e->size && e->offset) {
219 const XSaveOpmask *opmask;
220 const XSaveZMM_Hi256 *zmm_hi256;
221 #ifdef TARGET_X86_64
222 const XSaveHi16_ZMM *hi16_zmm;
223 #endif
224
225 f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
226 assert(f->size);
227 assert(f->offset);
228
229 g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
230 assert(g->size);
231 assert(g->offset);
232
233 opmask = buf + e->offset;
234 zmm_hi256 = buf + f->offset;
235 #ifdef TARGET_X86_64
236 hi16_zmm = buf + g->offset;
237 #endif
238
239 memcpy(env->opmask_regs, &opmask->opmask_regs,
240 sizeof(env->opmask_regs));
241
242 for (i = 0; i < CPU_NB_REGS; i++) {
243 const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
244
245 env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
246 env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
247 env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
248 env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
249 }
250
251 #ifdef TARGET_X86_64
252 memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
253 16 * sizeof(env->xmm_regs[16]));
254 #endif
255 }
256
257 #ifdef TARGET_X86_64
258 e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
259 if (e->size && e->offset) {
260 const XSavePKRU *pkru;
261
262 pkru = buf + e->offset;
263 memcpy(&env->pkru, pkru, sizeof(env->pkru));
264 }
265
266 e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
267 if (e->size && e->offset) {
268 const XSaveXTILECFG *tilecfg = buf + e->offset;
269
270 memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg));
271 }
272
273 e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
274 if (e->size && e->offset && buflen >= e->size + e->offset) {
275 const XSaveXTILEDATA *tiledata = buf + e->offset;
276
277 memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata));
278 }
279 #endif
280 }
281