1 // SPDX-License-Identifier: GPL-2.0
2
3 #define _GNU_SOURCE
4
5 #include <elf.h>
6 #include <pthread.h>
7 #include <stdbool.h>
8
9 #include <asm/prctl.h>
10 #include <sys/ptrace.h>
11 #include <sys/syscall.h>
12 #include <sys/uio.h>
13 #include <sys/wait.h>
14
15 #include "helpers.h"
16 #include "xstate.h"
17
18 /*
19 * The userspace xstate test suite is designed to be generic and operates
20 * with randomized xstate data. However, some states require special handling:
21 *
22 * - PKRU and XTILECFG need specific adjustments, such as modifying
23 * randomization behavior or using fixed values.
24 * - But, PKRU already has a dedicated test suite in /tools/selftests/mm.
25 * - Legacy states (FP and SSE) are excluded, as they are not considered
26 * part of extended states (xstates) and their usage is already deeply
27 * integrated into user-space libraries.
28 */
29 #define XFEATURE_MASK_TEST_SUPPORTED \
30 ((1 << XFEATURE_YMM) | \
31 (1 << XFEATURE_OPMASK) | \
32 (1 << XFEATURE_ZMM_Hi256) | \
33 (1 << XFEATURE_Hi16_ZMM) | \
34 (1 << XFEATURE_XTILEDATA) | \
35 (1 << XFEATURE_APX))
36
xgetbv(uint32_t index)37 static inline uint64_t xgetbv(uint32_t index)
38 {
39 uint32_t eax, edx;
40
41 asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index));
42 return eax + ((uint64_t)edx << 32);
43 }
44
get_xstatebv(struct xsave_buffer * xbuf)45 static inline uint64_t get_xstatebv(struct xsave_buffer *xbuf)
46 {
47 return *(uint64_t *)(&xbuf->header);
48 }
49
50 static struct xstate_info xstate;
51
52 struct futex_info {
53 unsigned int iterations;
54 struct futex_info *next;
55 pthread_mutex_t mutex;
56 pthread_t thread;
57 bool valid;
58 int nr;
59 };
60
load_rand_xstate(struct xstate_info * xstate,struct xsave_buffer * xbuf)61 static inline void load_rand_xstate(struct xstate_info *xstate, struct xsave_buffer *xbuf)
62 {
63 clear_xstate_header(xbuf);
64 set_xstatebv(xbuf, xstate->mask);
65 set_rand_data(xstate, xbuf);
66 xrstor(xbuf, xstate->mask);
67 }
68
load_init_xstate(struct xstate_info * xstate,struct xsave_buffer * xbuf)69 static inline void load_init_xstate(struct xstate_info *xstate, struct xsave_buffer *xbuf)
70 {
71 clear_xstate_header(xbuf);
72 xrstor(xbuf, xstate->mask);
73 }
74
copy_xstate(struct xsave_buffer * xbuf_dst,struct xsave_buffer * xbuf_src)75 static inline void copy_xstate(struct xsave_buffer *xbuf_dst, struct xsave_buffer *xbuf_src)
76 {
77 memcpy(&xbuf_dst->bytes[xstate.xbuf_offset],
78 &xbuf_src->bytes[xstate.xbuf_offset],
79 xstate.size);
80 }
81
validate_xstate_same(struct xsave_buffer * xbuf1,struct xsave_buffer * xbuf2)82 static inline bool validate_xstate_same(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
83 {
84 int ret;
85
86 ret = memcmp(&xbuf1->bytes[xstate.xbuf_offset],
87 &xbuf2->bytes[xstate.xbuf_offset],
88 xstate.size);
89 return ret == 0;
90 }
91
validate_xregs_same(struct xsave_buffer * xbuf1)92 static inline bool validate_xregs_same(struct xsave_buffer *xbuf1)
93 {
94 struct xsave_buffer *xbuf2;
95 bool ret;
96
97 xbuf2 = alloc_xbuf();
98 if (!xbuf2)
99 ksft_exit_fail_msg("failed to allocate XSAVE buffer\n");
100
101 xsave(xbuf2, xstate.mask);
102 ret = validate_xstate_same(xbuf1, xbuf2);
103
104 free(xbuf2);
105 return ret;
106 }
107
108 /* Context switching test */
109
check_xstate(void * info)110 static void *check_xstate(void *info)
111 {
112 struct futex_info *finfo = (struct futex_info *)info;
113 struct xsave_buffer *xbuf;
114 int i;
115
116 xbuf = alloc_xbuf();
117 if (!xbuf)
118 ksft_exit_fail_msg("unable to allocate XSAVE buffer\n");
119
120 /*
121 * Load random data into 'xbuf' and then restore it to the xstate
122 * registers.
123 */
124 load_rand_xstate(&xstate, xbuf);
125 finfo->valid = true;
126
127 for (i = 0; i < finfo->iterations; i++) {
128 pthread_mutex_lock(&finfo->mutex);
129
130 /*
131 * Ensure the register values have not diverged from the
132 * record. Then reload a new random value. If it failed
133 * ever before, skip it.
134 */
135 if (finfo->valid) {
136 finfo->valid = validate_xregs_same(xbuf);
137 load_rand_xstate(&xstate, xbuf);
138 }
139
140 /*
141 * The last thread's last unlock will be for thread 0's
142 * mutex. However, thread 0 will have already exited the
143 * loop and the mutex will already be unlocked.
144 *
145 * Because this is not an ERRORCHECK mutex, that
146 * inconsistency will be silently ignored.
147 */
148 pthread_mutex_unlock(&finfo->next->mutex);
149 }
150
151 free(xbuf);
152 return finfo;
153 }
154
create_threads(uint32_t num_threads,uint32_t iterations,struct futex_info * finfo)155 static void create_threads(uint32_t num_threads, uint32_t iterations, struct futex_info *finfo)
156 {
157 int i;
158
159 for (i = 0; i < num_threads; i++) {
160 int next_nr;
161
162 finfo[i].nr = i;
163 finfo[i].iterations = iterations;
164
165 /*
166 * Thread 'i' will wait on this mutex to be unlocked.
167 * Lock it immediately after initialization:
168 */
169 pthread_mutex_init(&finfo[i].mutex, NULL);
170 pthread_mutex_lock(&finfo[i].mutex);
171
172 next_nr = (i + 1) % num_threads;
173 finfo[i].next = &finfo[next_nr];
174
175 if (pthread_create(&finfo[i].thread, NULL, check_xstate, &finfo[i]))
176 ksft_exit_fail_msg("pthread_create() failed\n");
177 }
178 }
179
checkout_threads(uint32_t num_threads,struct futex_info * finfo)180 static bool checkout_threads(uint32_t num_threads, struct futex_info *finfo)
181 {
182 void *thread_retval;
183 bool valid = true;
184 int err, i;
185
186 for (i = 0; i < num_threads; i++) {
187 err = pthread_join(finfo[i].thread, &thread_retval);
188 if (err)
189 ksft_exit_fail_msg("pthread_join() failed for thread %d err: %d\n", i, err);
190
191 if (thread_retval != &finfo[i]) {
192 ksft_exit_fail_msg("unexpected thread retval for thread %d: %p\n",
193 i, thread_retval);
194 }
195
196 valid &= finfo[i].valid;
197 }
198
199 return valid;
200 }
201
affinitize_cpu0(void)202 static void affinitize_cpu0(void)
203 {
204 cpu_set_t cpuset;
205
206 CPU_ZERO(&cpuset);
207 CPU_SET(0, &cpuset);
208
209 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
210 ksft_exit_fail_msg("sched_setaffinity to CPU 0 failed\n");
211 }
212
test_context_switch(uint32_t num_threads,uint32_t iterations)213 static void test_context_switch(uint32_t num_threads, uint32_t iterations)
214 {
215 struct futex_info *finfo;
216
217 /* Affinitize to one CPU to force context switches */
218 affinitize_cpu0();
219
220 printf("[RUN]\t%s: check context switches, %d iterations, %d threads.\n",
221 xstate.name, iterations, num_threads);
222
223 finfo = malloc(sizeof(*finfo) * num_threads);
224 if (!finfo)
225 ksft_exit_fail_msg("unable allocate memory\n");
226
227 create_threads(num_threads, iterations, finfo);
228
229 /*
230 * This thread wakes up thread 0
231 * Thread 0 will wake up 1
232 * Thread 1 will wake up 2
233 * ...
234 * The last thread will wake up 0
235 *
236 * This will repeat for the configured
237 * number of iterations.
238 */
239 pthread_mutex_unlock(&finfo[0].mutex);
240
241 /* Wait for all the threads to finish: */
242 if (checkout_threads(num_threads, finfo))
243 printf("[OK]\tNo incorrect case was found.\n");
244 else
245 printf("[FAIL]\tFailed with context switching test.\n");
246
247 free(finfo);
248 }
249
250 /*
251 * Ptrace test for the ABI format as described in arch/x86/include/asm/user.h
252 */
253
254 /*
255 * Make sure the ptracee has the expanded kernel buffer on the first use.
256 * Then, initialize the state before performing the state injection from
257 * the ptracer. For non-dynamic states, this is benign.
258 */
ptracee_touch_xstate(void)259 static inline void ptracee_touch_xstate(void)
260 {
261 struct xsave_buffer *xbuf;
262
263 xbuf = alloc_xbuf();
264
265 load_rand_xstate(&xstate, xbuf);
266 load_init_xstate(&xstate, xbuf);
267
268 free(xbuf);
269 }
270
271 /*
272 * Ptracer injects the randomized xstate data. It also reads before and
273 * after that, which will execute the kernel's state copy functions.
274 */
ptracer_inject_xstate(pid_t target)275 static void ptracer_inject_xstate(pid_t target)
276 {
277 uint32_t xbuf_size = get_xbuf_size();
278 struct xsave_buffer *xbuf1, *xbuf2;
279 struct iovec iov;
280
281 /*
282 * Allocate buffers to keep data while ptracer can write the
283 * other buffer
284 */
285 xbuf1 = alloc_xbuf();
286 xbuf2 = alloc_xbuf();
287 if (!xbuf1 || !xbuf2)
288 ksft_exit_fail_msg("unable to allocate XSAVE buffer\n");
289
290 iov.iov_base = xbuf1;
291 iov.iov_len = xbuf_size;
292
293 if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
294 ksft_exit_fail_msg("PTRACE_GETREGSET failed\n");
295
296 printf("[RUN]\t%s: inject xstate via ptrace().\n", xstate.name);
297
298 load_rand_xstate(&xstate, xbuf1);
299 copy_xstate(xbuf2, xbuf1);
300
301 if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
302 ksft_exit_fail_msg("PTRACE_SETREGSET failed\n");
303
304 if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
305 ksft_exit_fail_msg("PTRACE_GETREGSET failed\n");
306
307 if (*(uint64_t *)get_fpx_sw_bytes(xbuf1) == xgetbv(0))
308 printf("[OK]\t'xfeatures' in SW reserved area was correctly written\n");
309 else
310 printf("[FAIL]\t'xfeatures' in SW reserved area was not correctly written\n");
311
312 if (validate_xstate_same(xbuf2, xbuf1))
313 printf("[OK]\txstate was correctly updated.\n");
314 else
315 printf("[FAIL]\txstate was not correctly updated.\n");
316
317 free(xbuf1);
318 free(xbuf2);
319 }
320
test_ptrace(void)321 static void test_ptrace(void)
322 {
323 pid_t child;
324 int status;
325
326 child = fork();
327 if (child < 0) {
328 ksft_exit_fail_msg("fork() failed\n");
329 } else if (!child) {
330 if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
331 ksft_exit_fail_msg("PTRACE_TRACEME failed\n");
332
333 ptracee_touch_xstate();
334
335 raise(SIGTRAP);
336 _exit(0);
337 }
338
339 do {
340 wait(&status);
341 } while (WSTOPSIG(status) != SIGTRAP);
342
343 ptracer_inject_xstate(child);
344
345 ptrace(PTRACE_DETACH, child, NULL, NULL);
346 wait(&status);
347 if (!WIFEXITED(status) || WEXITSTATUS(status))
348 ksft_exit_fail_msg("ptracee exit error\n");
349 }
350
351 /*
352 * Test signal delivery for the ABI compatibility.
353 * See the ABI format: arch/x86/include/uapi/asm/sigcontext.h
354 */
355
356 /*
357 * Avoid using printf() in signal handlers as it is not
358 * async-signal-safe.
359 */
360 #define SIGNAL_BUF_LEN 1000
361 static char signal_message_buffer[SIGNAL_BUF_LEN];
sig_print(char * msg)362 static void sig_print(char *msg)
363 {
364 int left = SIGNAL_BUF_LEN - strlen(signal_message_buffer) - 1;
365
366 strncat(signal_message_buffer, msg, left);
367 }
368
369 static struct xsave_buffer *stashed_xbuf;
370
validate_sigfpstate(int sig,siginfo_t * si,void * ctx_void)371 static void validate_sigfpstate(int sig, siginfo_t *si, void *ctx_void)
372 {
373 ucontext_t *ctx = (ucontext_t *)ctx_void;
374 void *xbuf = ctx->uc_mcontext.fpregs;
375 struct _fpx_sw_bytes *sw_bytes;
376 uint32_t magic2;
377
378 /* Reset the signal message buffer: */
379 signal_message_buffer[0] = '\0';
380
381 sw_bytes = get_fpx_sw_bytes(xbuf);
382 if (sw_bytes->magic1 == FP_XSTATE_MAGIC1)
383 sig_print("[OK]\t'magic1' is valid\n");
384 else
385 sig_print("[FAIL]\t'magic1' is not valid\n");
386
387 if (get_fpx_sw_bytes_features(xbuf) & xstate.mask)
388 sig_print("[OK]\t'xfeatures' in SW reserved area is valid\n");
389 else
390 sig_print("[FAIL]\t'xfeatures' in SW reserved area is not valid\n");
391
392 if (get_xstatebv(xbuf) & xstate.mask)
393 sig_print("[OK]\t'xfeatures' in XSAVE header is valid\n");
394 else
395 sig_print("[FAIL]\t'xfeatures' in XSAVE header is not valid\n");
396
397 if (validate_xstate_same(stashed_xbuf, xbuf))
398 sig_print("[OK]\txstate delivery was successful\n");
399 else
400 sig_print("[FAIL]\txstate delivery was not successful\n");
401
402 magic2 = *(uint32_t *)(xbuf + sw_bytes->xstate_size);
403 if (magic2 == FP_XSTATE_MAGIC2)
404 sig_print("[OK]\t'magic2' is valid\n");
405 else
406 sig_print("[FAIL]\t'magic2' is not valid\n");
407
408 set_rand_data(&xstate, xbuf);
409 copy_xstate(stashed_xbuf, xbuf);
410 }
411
test_signal(void)412 static void test_signal(void)
413 {
414 bool valid_xstate;
415
416 /*
417 * The signal handler will access this to verify xstate context
418 * preservation.
419 */
420 stashed_xbuf = alloc_xbuf();
421 if (!stashed_xbuf)
422 ksft_exit_fail_msg("unable to allocate XSAVE buffer\n");
423
424 printf("[RUN]\t%s: load xstate and raise SIGUSR1\n", xstate.name);
425
426 sethandler(SIGUSR1, validate_sigfpstate, 0);
427
428 load_rand_xstate(&xstate, stashed_xbuf);
429
430 raise(SIGUSR1);
431
432 /*
433 * Immediately record the test result, deferring printf() to
434 * prevent unintended state contamination by that.
435 */
436 valid_xstate = validate_xregs_same(stashed_xbuf);
437 printf("%s", signal_message_buffer);
438
439 printf("[RUN]\t%s: load new xstate from sighandler and check it after sigreturn\n",
440 xstate.name);
441
442 if (valid_xstate)
443 printf("[OK]\txstate was restored correctly\n");
444 else
445 printf("[FAIL]\txstate restoration failed\n");
446
447 clearhandler(SIGUSR1);
448 free(stashed_xbuf);
449 }
450
test_xstate(uint32_t feature_num)451 void test_xstate(uint32_t feature_num)
452 {
453 const unsigned int ctxtsw_num_threads = 5, ctxtsw_iterations = 10;
454 unsigned long features;
455 long rc;
456
457 if (!(XFEATURE_MASK_TEST_SUPPORTED & (1 << feature_num))) {
458 ksft_print_msg("The xstate test does not fully support the component %u, yet.\n",
459 feature_num);
460 return;
461 }
462
463 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features);
464 if (rc || !(features & (1 << feature_num))) {
465 ksft_print_msg("The kernel does not support feature number: %u\n", feature_num);
466 return;
467 }
468
469 xstate = get_xstate_info(feature_num);
470 if (!xstate.size || !xstate.xbuf_offset) {
471 ksft_exit_fail_msg("invalid state size/offset (%d/%d)\n",
472 xstate.size, xstate.xbuf_offset);
473 }
474
475 test_context_switch(ctxtsw_num_threads, ctxtsw_iterations);
476 test_ptrace();
477 test_signal();
478 }
479