xref: /linux/tools/testing/selftests/arm64/fp/fp-ptrace.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 ARM Limited.
4  * Original author: Mark Brown <broonie@kernel.org>
5  */
6 
7 #define _GNU_SOURCE
8 
9 #include <errno.h>
10 #include <stdbool.h>
11 #include <stddef.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <unistd.h>
16 
17 #include <sys/auxv.h>
18 #include <sys/prctl.h>
19 #include <sys/ptrace.h>
20 #include <sys/types.h>
21 #include <sys/uio.h>
22 #include <sys/wait.h>
23 
24 #include <linux/kernel.h>
25 
26 #include <asm/sigcontext.h>
27 #include <asm/sve_context.h>
28 #include <asm/ptrace.h>
29 
30 #include "../../kselftest.h"
31 
32 #include "fp-ptrace.h"
33 
34 #include <linux/bits.h>
35 
36 #define FPMR_LSCALE2_MASK                               GENMASK(37, 32)
37 #define FPMR_NSCALE_MASK                                GENMASK(31, 24)
38 #define FPMR_LSCALE_MASK                                GENMASK(22, 16)
39 #define FPMR_OSC_MASK                                   GENMASK(15, 15)
40 #define FPMR_OSM_MASK                                   GENMASK(14, 14)
41 
42 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
43 #ifndef NT_ARM_SVE
44 #define NT_ARM_SVE 0x405
45 #endif
46 
47 #ifndef NT_ARM_SSVE
48 #define NT_ARM_SSVE 0x40b
49 #endif
50 
51 #ifndef NT_ARM_ZA
52 #define NT_ARM_ZA 0x40c
53 #endif
54 
55 #ifndef NT_ARM_ZT
56 #define NT_ARM_ZT 0x40d
57 #endif
58 
59 #ifndef NT_ARM_FPMR
60 #define NT_ARM_FPMR 0x40e
61 #endif
62 
63 #define ARCH_VQ_MAX 256
64 
65 /* VL 128..2048 in powers of 2 */
66 #define MAX_NUM_VLS 5
67 
68 /*
69  * FPMR bits we can set without doing feature checks to see if values
70  * are valid.
71  */
72 #define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \
73 			FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK)
74 
75 #define NUM_FPR 32
76 __uint128_t v_in[NUM_FPR];
77 __uint128_t v_expected[NUM_FPR];
78 __uint128_t v_out[NUM_FPR];
79 
80 char z_in[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
81 char z_expected[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
82 char z_out[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
83 
84 char p_in[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
85 char p_expected[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
86 char p_out[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
87 
88 char ffr_in[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
89 char ffr_expected[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
90 char ffr_out[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
91 
92 char za_in[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
93 char za_expected[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
94 char za_out[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
95 
96 char zt_in[ZT_SIG_REG_BYTES];
97 char zt_expected[ZT_SIG_REG_BYTES];
98 char zt_out[ZT_SIG_REG_BYTES];
99 
100 uint64_t fpmr_in, fpmr_expected, fpmr_out;
101 
102 uint64_t sve_vl_out;
103 uint64_t sme_vl_out;
104 uint64_t svcr_in, svcr_expected, svcr_out;
105 
106 void load_and_save(int flags);
107 
108 static bool got_alarm;
109 
handle_alarm(int sig,siginfo_t * info,void * context)110 static void handle_alarm(int sig, siginfo_t *info, void *context)
111 {
112 	got_alarm = true;
113 }
114 
115 #ifdef CONFIG_CPU_BIG_ENDIAN
arm64_cpu_to_le128(__uint128_t x)116 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
117 {
118 	u64 a = swab64(x);
119 	u64 b = swab64(x >> 64);
120 
121 	return ((__uint128_t)a << 64) | b;
122 }
123 #else
arm64_cpu_to_le128(__uint128_t x)124 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
125 {
126 	return x;
127 }
128 #endif
129 
130 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
131 
sve_supported(void)132 static bool sve_supported(void)
133 {
134 	return getauxval(AT_HWCAP) & HWCAP_SVE;
135 }
136 
sme_supported(void)137 static bool sme_supported(void)
138 {
139 	return getauxval(AT_HWCAP2) & HWCAP2_SME;
140 }
141 
sme2_supported(void)142 static bool sme2_supported(void)
143 {
144 	return getauxval(AT_HWCAP2) & HWCAP2_SME2;
145 }
146 
fa64_supported(void)147 static bool fa64_supported(void)
148 {
149 	return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64;
150 }
151 
fpmr_supported(void)152 static bool fpmr_supported(void)
153 {
154 	return getauxval(AT_HWCAP2) & HWCAP2_FPMR;
155 }
156 
compare_buffer(const char * name,void * out,void * expected,size_t size)157 static bool compare_buffer(const char *name, void *out,
158 			   void *expected, size_t size)
159 {
160 	void *tmp;
161 
162 	if (memcmp(out, expected, size) == 0)
163 		return true;
164 
165 	ksft_print_msg("Mismatch in %s\n", name);
166 
167 	/* Did we just get zeros back? */
168 	tmp = malloc(size);
169 	if (!tmp) {
170 		ksft_print_msg("OOM allocating %lu bytes for %s\n",
171 			       size, name);
172 		ksft_exit_fail();
173 	}
174 	memset(tmp, 0, size);
175 
176 	if (memcmp(out, tmp, size) == 0)
177 		ksft_print_msg("%s is zero\n", name);
178 
179 	free(tmp);
180 
181 	return false;
182 }
183 
184 struct test_config {
185 	int sve_vl_in;
186 	int sve_vl_expected;
187 	int sme_vl_in;
188 	int sme_vl_expected;
189 	int svcr_in;
190 	int svcr_expected;
191 };
192 
193 struct test_definition {
194 	const char *name;
195 	bool sve_vl_change;
196 	bool (*supported)(struct test_config *config);
197 	void (*set_expected_values)(struct test_config *config);
198 	void (*modify_values)(pid_t child, struct test_config *test_config);
199 };
200 
vl_in(struct test_config * config)201 static int vl_in(struct test_config *config)
202 {
203 	int vl;
204 
205 	if (config->svcr_in & SVCR_SM)
206 		vl = config->sme_vl_in;
207 	else
208 		vl = config->sve_vl_in;
209 
210 	return vl;
211 }
212 
vl_expected(struct test_config * config)213 static int vl_expected(struct test_config *config)
214 {
215 	int vl;
216 
217 	if (config->svcr_expected & SVCR_SM)
218 		vl = config->sme_vl_expected;
219 	else
220 		vl = config->sve_vl_expected;
221 
222 	return vl;
223 }
224 
run_child(struct test_config * config)225 static void run_child(struct test_config *config)
226 {
227 	int ret, flags;
228 
229 	/* Let the parent attach to us */
230 	ret = ptrace(PTRACE_TRACEME, 0, 0, 0);
231 	if (ret < 0)
232 		ksft_exit_fail_msg("PTRACE_TRACEME failed: %s (%d)\n",
233 				   strerror(errno), errno);
234 
235 	/* VL setup */
236 	if (sve_supported()) {
237 		ret = prctl(PR_SVE_SET_VL, config->sve_vl_in);
238 		if (ret != config->sve_vl_in) {
239 			ksft_print_msg("Failed to set SVE VL %d: %d\n",
240 				       config->sve_vl_in, ret);
241 		}
242 	}
243 
244 	if (sme_supported()) {
245 		ret = prctl(PR_SME_SET_VL, config->sme_vl_in);
246 		if (ret != config->sme_vl_in) {
247 			ksft_print_msg("Failed to set SME VL %d: %d\n",
248 				       config->sme_vl_in, ret);
249 		}
250 	}
251 
252 	/* Load values and wait for the parent */
253 	flags = 0;
254 	if (sve_supported())
255 		flags |= HAVE_SVE;
256 	if (sme_supported())
257 		flags |= HAVE_SME;
258 	if (sme2_supported())
259 		flags |= HAVE_SME2;
260 	if (fa64_supported())
261 		flags |= HAVE_FA64;
262 	if (fpmr_supported())
263 		flags |= HAVE_FPMR;
264 
265 	load_and_save(flags);
266 
267 	exit(0);
268 }
269 
read_one_child_regs(pid_t child,char * name,struct iovec * iov_parent,struct iovec * iov_child)270 static void read_one_child_regs(pid_t child, char *name,
271 				struct iovec *iov_parent,
272 				struct iovec *iov_child)
273 {
274 	int len = iov_parent->iov_len;
275 	int ret;
276 
277 	ret = process_vm_readv(child, iov_parent, 1, iov_child, 1, 0);
278 	if (ret == -1)
279 		ksft_print_msg("%s read failed: %s (%d)\n",
280 			       name, strerror(errno), errno);
281 	else if (ret != len)
282 		ksft_print_msg("Short read of %s: %d\n", name, ret);
283 }
284 
read_child_regs(pid_t child)285 static void read_child_regs(pid_t child)
286 {
287 	struct iovec iov_parent, iov_child;
288 
289 	/*
290 	 * Since the child fork()ed from us the buffer addresses are
291 	 * the same in parent and child.
292 	 */
293 	iov_parent.iov_base = &v_out;
294 	iov_parent.iov_len = sizeof(v_out);
295 	iov_child.iov_base = &v_out;
296 	iov_child.iov_len = sizeof(v_out);
297 	read_one_child_regs(child, "FPSIMD", &iov_parent, &iov_child);
298 
299 	if (sve_supported() || sme_supported()) {
300 		iov_parent.iov_base = &sve_vl_out;
301 		iov_parent.iov_len = sizeof(sve_vl_out);
302 		iov_child.iov_base = &sve_vl_out;
303 		iov_child.iov_len = sizeof(sve_vl_out);
304 		read_one_child_regs(child, "SVE VL", &iov_parent, &iov_child);
305 
306 		iov_parent.iov_base = &z_out;
307 		iov_parent.iov_len = sizeof(z_out);
308 		iov_child.iov_base = &z_out;
309 		iov_child.iov_len = sizeof(z_out);
310 		read_one_child_regs(child, "Z", &iov_parent, &iov_child);
311 
312 		iov_parent.iov_base = &p_out;
313 		iov_parent.iov_len = sizeof(p_out);
314 		iov_child.iov_base = &p_out;
315 		iov_child.iov_len = sizeof(p_out);
316 		read_one_child_regs(child, "P", &iov_parent, &iov_child);
317 
318 		iov_parent.iov_base = &ffr_out;
319 		iov_parent.iov_len = sizeof(ffr_out);
320 		iov_child.iov_base = &ffr_out;
321 		iov_child.iov_len = sizeof(ffr_out);
322 		read_one_child_regs(child, "FFR", &iov_parent, &iov_child);
323 	}
324 
325 	if (sme_supported()) {
326 		iov_parent.iov_base = &sme_vl_out;
327 		iov_parent.iov_len = sizeof(sme_vl_out);
328 		iov_child.iov_base = &sme_vl_out;
329 		iov_child.iov_len = sizeof(sme_vl_out);
330 		read_one_child_regs(child, "SME VL", &iov_parent, &iov_child);
331 
332 		iov_parent.iov_base = &svcr_out;
333 		iov_parent.iov_len = sizeof(svcr_out);
334 		iov_child.iov_base = &svcr_out;
335 		iov_child.iov_len = sizeof(svcr_out);
336 		read_one_child_regs(child, "SVCR", &iov_parent, &iov_child);
337 
338 		iov_parent.iov_base = &za_out;
339 		iov_parent.iov_len = sizeof(za_out);
340 		iov_child.iov_base = &za_out;
341 		iov_child.iov_len = sizeof(za_out);
342 		read_one_child_regs(child, "ZA", &iov_parent, &iov_child);
343 	}
344 
345 	if (sme2_supported()) {
346 		iov_parent.iov_base = &zt_out;
347 		iov_parent.iov_len = sizeof(zt_out);
348 		iov_child.iov_base = &zt_out;
349 		iov_child.iov_len = sizeof(zt_out);
350 		read_one_child_regs(child, "ZT", &iov_parent, &iov_child);
351 	}
352 
353 	if (fpmr_supported()) {
354 		iov_parent.iov_base = &fpmr_out;
355 		iov_parent.iov_len = sizeof(fpmr_out);
356 		iov_child.iov_base = &fpmr_out;
357 		iov_child.iov_len = sizeof(fpmr_out);
358 		read_one_child_regs(child, "FPMR", &iov_parent, &iov_child);
359 	}
360 }
361 
continue_breakpoint(pid_t child,enum __ptrace_request restart_type)362 static bool continue_breakpoint(pid_t child,
363 				enum __ptrace_request restart_type)
364 {
365 	struct user_pt_regs pt_regs;
366 	struct iovec iov;
367 	int ret;
368 
369 	/* Get PC */
370 	iov.iov_base = &pt_regs;
371 	iov.iov_len = sizeof(pt_regs);
372 	ret = ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov);
373 	if (ret < 0) {
374 		ksft_print_msg("Failed to get PC: %s (%d)\n",
375 			       strerror(errno), errno);
376 		return false;
377 	}
378 
379 	/* Skip over the BRK */
380 	pt_regs.pc += 4;
381 	ret = ptrace(PTRACE_SETREGSET, child, NT_PRSTATUS, &iov);
382 	if (ret < 0) {
383 		ksft_print_msg("Failed to skip BRK: %s (%d)\n",
384 			       strerror(errno), errno);
385 		return false;
386 	}
387 
388 	/* Restart */
389 	ret = ptrace(restart_type, child, 0, 0);
390 	if (ret < 0) {
391 		ksft_print_msg("Failed to restart child: %s (%d)\n",
392 			       strerror(errno), errno);
393 		return false;
394 	}
395 
396 	return true;
397 }
398 
check_ptrace_values_sve(pid_t child,struct test_config * config)399 static bool check_ptrace_values_sve(pid_t child, struct test_config *config)
400 {
401 	struct user_sve_header *sve;
402 	struct user_fpsimd_state *fpsimd;
403 	struct iovec iov;
404 	int ret, vq;
405 	bool pass = true;
406 
407 	if (!sve_supported())
408 		return true;
409 
410 	vq = __sve_vq_from_vl(config->sve_vl_in);
411 
412 	iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
413 	iov.iov_base = malloc(iov.iov_len);
414 	if (!iov.iov_base) {
415 		ksft_print_msg("OOM allocating %lu byte SVE buffer\n",
416 			       iov.iov_len);
417 		return false;
418 	}
419 
420 	ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SVE, &iov);
421 	if (ret != 0) {
422 		ksft_print_msg("Failed to read initial SVE: %s (%d)\n",
423 			       strerror(errno), errno);
424 		pass = false;
425 		goto out;
426 	}
427 
428 	sve = iov.iov_base;
429 
430 	if (sve->vl != config->sve_vl_in) {
431 		ksft_print_msg("Mismatch in initial SVE VL: %d != %d\n",
432 			       sve->vl, config->sve_vl_in);
433 		pass = false;
434 	}
435 
436 	/* If we are in streaming mode we should just read FPSIMD */
437 	if ((config->svcr_in & SVCR_SM) && (sve->flags & SVE_PT_REGS_SVE)) {
438 		ksft_print_msg("NT_ARM_SVE reports SVE with PSTATE.SM\n");
439 		pass = false;
440 	}
441 
442 	if (svcr_in & SVCR_SM) {
443 		if (sve->size != sizeof(sve)) {
444 			ksft_print_msg("NT_ARM_SVE reports data with PSTATE.SM\n");
445 			pass = false;
446 		}
447 	} else {
448 		if (sve->size != SVE_PT_SIZE(vq, sve->flags)) {
449 			ksft_print_msg("Mismatch in SVE header size: %d != %lu\n",
450 				       sve->size, SVE_PT_SIZE(vq, sve->flags));
451 			pass = false;
452 		}
453 	}
454 
455 	/* The registers might be in completely different formats! */
456 	if (sve->flags & SVE_PT_REGS_SVE) {
457 		if (!compare_buffer("initial SVE Z",
458 				    iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
459 				    z_in, SVE_PT_SVE_ZREGS_SIZE(vq)))
460 			pass = false;
461 
462 		if (!compare_buffer("initial SVE P",
463 				    iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
464 				    p_in, SVE_PT_SVE_PREGS_SIZE(vq)))
465 			pass = false;
466 
467 		if (!compare_buffer("initial SVE FFR",
468 				    iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
469 				    ffr_in, SVE_PT_SVE_PREG_SIZE(vq)))
470 			pass = false;
471 	} else {
472 		fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET;
473 		if (!compare_buffer("initial V via SVE", &fpsimd->vregs[0],
474 				    v_in, sizeof(v_in)))
475 			pass = false;
476 	}
477 
478 out:
479 	free(iov.iov_base);
480 	return pass;
481 }
482 
check_ptrace_values_ssve(pid_t child,struct test_config * config)483 static bool check_ptrace_values_ssve(pid_t child, struct test_config *config)
484 {
485 	struct user_sve_header *sve;
486 	struct user_fpsimd_state *fpsimd;
487 	struct iovec iov;
488 	int ret, vq;
489 	bool pass = true;
490 
491 	if (!sme_supported())
492 		return true;
493 
494 	vq = __sve_vq_from_vl(config->sme_vl_in);
495 
496 	iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
497 	iov.iov_base = malloc(iov.iov_len);
498 	if (!iov.iov_base) {
499 		ksft_print_msg("OOM allocating %lu byte SSVE buffer\n",
500 			       iov.iov_len);
501 		return false;
502 	}
503 
504 	ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SSVE, &iov);
505 	if (ret != 0) {
506 		ksft_print_msg("Failed to read initial SSVE: %s (%d)\n",
507 			       strerror(errno), errno);
508 		pass = false;
509 		goto out;
510 	}
511 
512 	sve = iov.iov_base;
513 
514 	if (sve->vl != config->sme_vl_in) {
515 		ksft_print_msg("Mismatch in initial SSVE VL: %d != %d\n",
516 			       sve->vl, config->sme_vl_in);
517 		pass = false;
518 	}
519 
520 	if ((config->svcr_in & SVCR_SM) && !(sve->flags & SVE_PT_REGS_SVE)) {
521 		ksft_print_msg("NT_ARM_SSVE reports FPSIMD with PSTATE.SM\n");
522 		pass = false;
523 	}
524 
525 	if (!(svcr_in & SVCR_SM)) {
526 		if (sve->size != sizeof(sve)) {
527 			ksft_print_msg("NT_ARM_SSVE reports data without PSTATE.SM\n");
528 			pass = false;
529 		}
530 	} else {
531 		if (sve->size != SVE_PT_SIZE(vq, sve->flags)) {
532 			ksft_print_msg("Mismatch in SSVE header size: %d != %lu\n",
533 				       sve->size, SVE_PT_SIZE(vq, sve->flags));
534 			pass = false;
535 		}
536 	}
537 
538 	/* The registers might be in completely different formats! */
539 	if (sve->flags & SVE_PT_REGS_SVE) {
540 		if (!compare_buffer("initial SSVE Z",
541 				    iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
542 				    z_in, SVE_PT_SVE_ZREGS_SIZE(vq)))
543 			pass = false;
544 
545 		if (!compare_buffer("initial SSVE P",
546 				    iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
547 				    p_in, SVE_PT_SVE_PREGS_SIZE(vq)))
548 			pass = false;
549 
550 		if (!compare_buffer("initial SSVE FFR",
551 				    iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
552 				    ffr_in, SVE_PT_SVE_PREG_SIZE(vq)))
553 			pass = false;
554 	} else {
555 		fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET;
556 		if (!compare_buffer("initial V via SSVE",
557 				    &fpsimd->vregs[0], v_in, sizeof(v_in)))
558 			pass = false;
559 	}
560 
561 out:
562 	free(iov.iov_base);
563 	return pass;
564 }
565 
check_ptrace_values_za(pid_t child,struct test_config * config)566 static bool check_ptrace_values_za(pid_t child, struct test_config *config)
567 {
568 	struct user_za_header *za;
569 	struct iovec iov;
570 	int ret, vq;
571 	bool pass = true;
572 
573 	if (!sme_supported())
574 		return true;
575 
576 	vq = __sve_vq_from_vl(config->sme_vl_in);
577 
578 	iov.iov_len = ZA_SIG_CONTEXT_SIZE(vq);
579 	iov.iov_base = malloc(iov.iov_len);
580 	if (!iov.iov_base) {
581 		ksft_print_msg("OOM allocating %lu byte ZA buffer\n",
582 			       iov.iov_len);
583 		return false;
584 	}
585 
586 	ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZA, &iov);
587 	if (ret != 0) {
588 		ksft_print_msg("Failed to read initial ZA: %s (%d)\n",
589 			       strerror(errno), errno);
590 		pass = false;
591 		goto out;
592 	}
593 
594 	za = iov.iov_base;
595 
596 	if (za->vl != config->sme_vl_in) {
597 		ksft_print_msg("Mismatch in initial SME VL: %d != %d\n",
598 			       za->vl, config->sme_vl_in);
599 		pass = false;
600 	}
601 
602 	/* If PSTATE.ZA is not set we should just read the header */
603 	if (config->svcr_in & SVCR_ZA) {
604 		if (za->size != ZA_PT_SIZE(vq)) {
605 			ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n",
606 				       za->size, ZA_PT_SIZE(vq));
607 			pass = false;
608 		}
609 
610 		if (!compare_buffer("initial ZA",
611 				    iov.iov_base + ZA_PT_ZA_OFFSET,
612 				    za_in, ZA_PT_ZA_SIZE(vq)))
613 			pass = false;
614 	} else {
615 		if (za->size != sizeof(*za)) {
616 			ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n",
617 				       za->size, sizeof(*za));
618 			pass = false;
619 		}
620 	}
621 
622 out:
623 	free(iov.iov_base);
624 	return pass;
625 }
626 
check_ptrace_values_zt(pid_t child,struct test_config * config)627 static bool check_ptrace_values_zt(pid_t child, struct test_config *config)
628 {
629 	uint8_t buf[512];
630 	struct iovec iov;
631 	int ret;
632 
633 	if (!sme2_supported())
634 		return true;
635 
636 	iov.iov_base = &buf;
637 	iov.iov_len = ZT_SIG_REG_BYTES;
638 	ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZT, &iov);
639 	if (ret != 0) {
640 		ksft_print_msg("Failed to read initial ZT: %s (%d)\n",
641 			       strerror(errno), errno);
642 		return false;
643 	}
644 
645 	return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES);
646 }
647 
check_ptrace_values_fpmr(pid_t child,struct test_config * config)648 static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config)
649 {
650 	uint64_t val;
651 	struct iovec iov;
652 	int ret;
653 
654 	if (!fpmr_supported())
655 		return true;
656 
657 	iov.iov_base = &val;
658 	iov.iov_len = sizeof(val);
659 	ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov);
660 	if (ret != 0) {
661 		ksft_print_msg("Failed to read initial FPMR: %s (%d)\n",
662 			       strerror(errno), errno);
663 		return false;
664 	}
665 
666 	return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val));
667 }
668 
check_ptrace_values(pid_t child,struct test_config * config)669 static bool check_ptrace_values(pid_t child, struct test_config *config)
670 {
671 	bool pass = true;
672 	struct user_fpsimd_state fpsimd;
673 	struct iovec iov;
674 	int ret;
675 
676 	iov.iov_base = &fpsimd;
677 	iov.iov_len = sizeof(fpsimd);
678 	ret = ptrace(PTRACE_GETREGSET, child, NT_PRFPREG, &iov);
679 	if (ret == 0) {
680 		if (!compare_buffer("initial V", &fpsimd.vregs, v_in,
681 				    sizeof(v_in))) {
682 			pass = false;
683 		}
684 	} else {
685 		ksft_print_msg("Failed to read initial V: %s (%d)\n",
686 			       strerror(errno), errno);
687 		pass = false;
688 	}
689 
690 	if (!check_ptrace_values_sve(child, config))
691 		pass = false;
692 
693 	if (!check_ptrace_values_ssve(child, config))
694 		pass = false;
695 
696 	if (!check_ptrace_values_za(child, config))
697 		pass = false;
698 
699 	if (!check_ptrace_values_zt(child, config))
700 		pass = false;
701 
702 	if (!check_ptrace_values_fpmr(child, config))
703 		pass = false;
704 
705 	return pass;
706 }
707 
run_parent(pid_t child,struct test_definition * test,struct test_config * config)708 static bool run_parent(pid_t child, struct test_definition *test,
709 		       struct test_config *config)
710 {
711 	int wait_status, ret;
712 	pid_t pid;
713 	bool pass;
714 
715 	/* Initial attach */
716 	while (1) {
717 		pid = waitpid(child, &wait_status, 0);
718 		if (pid < 0) {
719 			if (errno == EINTR)
720 				continue;
721 			ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
722 					   strerror(errno), errno);
723 		}
724 
725 		if (pid == child)
726 			break;
727 	}
728 
729 	if (WIFEXITED(wait_status)) {
730 		ksft_print_msg("Child exited loading values with status %d\n",
731 			       WEXITSTATUS(wait_status));
732 		pass = false;
733 		goto out;
734 	}
735 
736 	if (WIFSIGNALED(wait_status)) {
737 		ksft_print_msg("Child died from signal %d loading values\n",
738 			       WTERMSIG(wait_status));
739 		pass = false;
740 		goto out;
741 	}
742 
743 	/* Read initial values via ptrace */
744 	pass = check_ptrace_values(child, config);
745 
746 	/* Do whatever writes we want to do */
747 	if (test->modify_values)
748 		test->modify_values(child, config);
749 
750 	if (!continue_breakpoint(child, PTRACE_CONT))
751 		goto cleanup;
752 
753 	while (1) {
754 		pid = waitpid(child, &wait_status, 0);
755 		if (pid < 0) {
756 			if (errno == EINTR)
757 				continue;
758 			ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
759 					   strerror(errno), errno);
760 		}
761 
762 		if (pid == child)
763 			break;
764 	}
765 
766 	if (WIFEXITED(wait_status)) {
767 		ksft_print_msg("Child exited saving values with status %d\n",
768 			       WEXITSTATUS(wait_status));
769 		pass = false;
770 		goto out;
771 	}
772 
773 	if (WIFSIGNALED(wait_status)) {
774 		ksft_print_msg("Child died from signal %d saving values\n",
775 			       WTERMSIG(wait_status));
776 		pass = false;
777 		goto out;
778 	}
779 
780 	/* See what happened as a result */
781 	read_child_regs(child);
782 
783 	if (!continue_breakpoint(child, PTRACE_DETACH))
784 		goto cleanup;
785 
786 	/* The child should exit cleanly */
787 	got_alarm = false;
788 	alarm(1);
789 	while (1) {
790 		if (got_alarm) {
791 			ksft_print_msg("Wait for child timed out\n");
792 			goto cleanup;
793 		}
794 
795 		pid = waitpid(child, &wait_status, 0);
796 		if (pid < 0) {
797 			if (errno == EINTR)
798 				continue;
799 			ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
800 					   strerror(errno), errno);
801 		}
802 
803 		if (pid == child)
804 			break;
805 	}
806 	alarm(0);
807 
808 	if (got_alarm) {
809 		ksft_print_msg("Timed out waiting for child\n");
810 		pass = false;
811 		goto cleanup;
812 	}
813 
814 	if (pid == child && WIFSIGNALED(wait_status)) {
815 		ksft_print_msg("Child died from signal %d cleaning up\n",
816 			       WTERMSIG(wait_status));
817 		pass = false;
818 		goto out;
819 	}
820 
821 	if (pid == child && WIFEXITED(wait_status)) {
822 		if (WEXITSTATUS(wait_status) != 0) {
823 			ksft_print_msg("Child exited with error %d\n",
824 				       WEXITSTATUS(wait_status));
825 			pass = false;
826 		}
827 	} else {
828 		ksft_print_msg("Child did not exit cleanly\n");
829 		pass = false;
830 		goto cleanup;
831 	}
832 
833 	goto out;
834 
835 cleanup:
836 	ret = kill(child, SIGKILL);
837 	if (ret != 0) {
838 		ksft_print_msg("kill() failed: %s (%d)\n",
839 			       strerror(errno), errno);
840 		return false;
841 	}
842 
843 	while (1) {
844 		pid = waitpid(child, &wait_status, 0);
845 		if (pid < 0) {
846 			if (errno == EINTR)
847 				continue;
848 			ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
849 					   strerror(errno), errno);
850 		}
851 
852 		if (pid == child)
853 			break;
854 	}
855 
856 out:
857 	return pass;
858 }
859 
fill_random(void * buf,size_t size)860 static void fill_random(void *buf, size_t size)
861 {
862 	int i;
863 	uint32_t *lbuf = buf;
864 
865 	/* random() returns a 32 bit number regardless of the size of long */
866 	for (i = 0; i < size / sizeof(uint32_t); i++)
867 		lbuf[i] = random();
868 }
869 
fill_random_ffr(void * buf,size_t vq)870 static void fill_random_ffr(void *buf, size_t vq)
871 {
872 	uint8_t *lbuf = buf;
873 	int bits, i;
874 
875 	/*
876 	 * Only values with a continuous set of 0..n bits set are
877 	 * valid for FFR, set all bits then clear a random number of
878 	 * high bits.
879 	 */
880 	memset(buf, 0, __SVE_FFR_SIZE(vq));
881 
882 	bits = random() % (__SVE_FFR_SIZE(vq) * 8);
883 	for (i = 0; i < bits / 8; i++)
884 		lbuf[i] = 0xff;
885 	if (bits / 8 != __SVE_FFR_SIZE(vq))
886 		lbuf[i] = (1 << (bits % 8)) - 1;
887 }
888 
fpsimd_to_sve(__uint128_t * v,char * z,int vl)889 static void fpsimd_to_sve(__uint128_t *v, char *z, int vl)
890 {
891 	int vq = __sve_vq_from_vl(vl);
892 	int i;
893 	__uint128_t *p;
894 
895 	if (!vl)
896 		return;
897 
898 	for (i = 0; i < __SVE_NUM_ZREGS; i++) {
899 		p = (__uint128_t *)&z[__SVE_ZREG_OFFSET(vq, i)];
900 		*p = arm64_cpu_to_le128(v[i]);
901 	}
902 }
903 
set_initial_values(struct test_config * config)904 static void set_initial_values(struct test_config *config)
905 {
906 	int vq = __sve_vq_from_vl(vl_in(config));
907 	int sme_vq = __sve_vq_from_vl(config->sme_vl_in);
908 
909 	svcr_in = config->svcr_in;
910 	svcr_expected = config->svcr_expected;
911 	svcr_out = 0;
912 
913 	fill_random(&v_in, sizeof(v_in));
914 	memcpy(v_expected, v_in, sizeof(v_in));
915 	memset(v_out, 0, sizeof(v_out));
916 
917 	/* Changes will be handled in the test case */
918 	if (sve_supported() || (config->svcr_in & SVCR_SM)) {
919 		/* The low 128 bits of Z are shared with the V registers */
920 		fill_random(&z_in, __SVE_ZREGS_SIZE(vq));
921 		fpsimd_to_sve(v_in, z_in, vl_in(config));
922 		memcpy(z_expected, z_in, __SVE_ZREGS_SIZE(vq));
923 		memset(z_out, 0, sizeof(z_out));
924 
925 		fill_random(&p_in, __SVE_PREGS_SIZE(vq));
926 		memcpy(p_expected, p_in, __SVE_PREGS_SIZE(vq));
927 		memset(p_out, 0, sizeof(p_out));
928 
929 		if ((config->svcr_in & SVCR_SM) && !fa64_supported())
930 			memset(ffr_in, 0, __SVE_PREG_SIZE(vq));
931 		else
932 			fill_random_ffr(&ffr_in, vq);
933 		memcpy(ffr_expected, ffr_in, __SVE_PREG_SIZE(vq));
934 		memset(ffr_out, 0, __SVE_PREG_SIZE(vq));
935 	}
936 
937 	if (config->svcr_in & SVCR_ZA)
938 		fill_random(za_in, ZA_SIG_REGS_SIZE(sme_vq));
939 	else
940 		memset(za_in, 0, ZA_SIG_REGS_SIZE(sme_vq));
941 	if (config->svcr_expected & SVCR_ZA)
942 		memcpy(za_expected, za_in, ZA_SIG_REGS_SIZE(sme_vq));
943 	else
944 		memset(za_expected, 0, ZA_SIG_REGS_SIZE(sme_vq));
945 	if (sme_supported())
946 		memset(za_out, 0, sizeof(za_out));
947 
948 	if (sme2_supported()) {
949 		if (config->svcr_in & SVCR_ZA)
950 			fill_random(zt_in, ZT_SIG_REG_BYTES);
951 		else
952 			memset(zt_in, 0, ZT_SIG_REG_BYTES);
953 		if (config->svcr_expected & SVCR_ZA)
954 			memcpy(zt_expected, zt_in, ZT_SIG_REG_BYTES);
955 		else
956 			memset(zt_expected, 0, ZT_SIG_REG_BYTES);
957 		memset(zt_out, 0, sizeof(zt_out));
958 	}
959 
960 	if (fpmr_supported()) {
961 		fill_random(&fpmr_in, sizeof(fpmr_in));
962 		fpmr_in &= FPMR_SAFE_BITS;
963 		fpmr_expected = fpmr_in;
964 	} else {
965 		fpmr_in = 0;
966 		fpmr_expected = 0;
967 		fpmr_out = 0;
968 	}
969 }
970 
check_memory_values(struct test_config * config)971 static bool check_memory_values(struct test_config *config)
972 {
973 	bool pass = true;
974 	int vq, sme_vq;
975 
976 	if (!compare_buffer("saved V", v_out, v_expected, sizeof(v_out)))
977 		pass = false;
978 
979 	vq = __sve_vq_from_vl(vl_expected(config));
980 	sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
981 
982 	if (svcr_out != svcr_expected) {
983 		ksft_print_msg("Mismatch in saved SVCR %lx != %lx\n",
984 			       svcr_out, svcr_expected);
985 		pass = false;
986 	}
987 
988 	if (sve_vl_out != config->sve_vl_expected) {
989 		ksft_print_msg("Mismatch in SVE VL: %ld != %d\n",
990 			       sve_vl_out, config->sve_vl_expected);
991 		pass = false;
992 	}
993 
994 	if (sme_vl_out != config->sme_vl_expected) {
995 		ksft_print_msg("Mismatch in SME VL: %ld != %d\n",
996 			       sme_vl_out, config->sme_vl_expected);
997 		pass = false;
998 	}
999 
1000 	if (!compare_buffer("saved Z", z_out, z_expected,
1001 			    __SVE_ZREGS_SIZE(vq)))
1002 		pass = false;
1003 
1004 	if (!compare_buffer("saved P", p_out, p_expected,
1005 			    __SVE_PREGS_SIZE(vq)))
1006 		pass = false;
1007 
1008 	if (!compare_buffer("saved FFR", ffr_out, ffr_expected,
1009 			    __SVE_PREG_SIZE(vq)))
1010 		pass = false;
1011 
1012 	if (!compare_buffer("saved ZA", za_out, za_expected,
1013 			    ZA_PT_ZA_SIZE(sme_vq)))
1014 		pass = false;
1015 
1016 	if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES))
1017 		pass = false;
1018 
1019 	if (fpmr_out != fpmr_expected) {
1020 		ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n",
1021 			       fpmr_out, fpmr_expected);
1022 		pass = false;
1023 	}
1024 
1025 	return pass;
1026 }
1027 
sve_sme_same(struct test_config * config)1028 static bool sve_sme_same(struct test_config *config)
1029 {
1030 	if (config->sve_vl_in != config->sve_vl_expected)
1031 		return false;
1032 
1033 	if (config->sme_vl_in != config->sme_vl_expected)
1034 		return false;
1035 
1036 	if (config->svcr_in != config->svcr_expected)
1037 		return false;
1038 
1039 	return true;
1040 }
1041 
sve_write_supported(struct test_config * config)1042 static bool sve_write_supported(struct test_config *config)
1043 {
1044 	if (!sve_supported() && !sme_supported())
1045 		return false;
1046 
1047 	if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
1048 		return false;
1049 
1050 	if (config->svcr_expected & SVCR_SM) {
1051 		if (config->sve_vl_in != config->sve_vl_expected) {
1052 			return false;
1053 		}
1054 
1055 		/* Changing the SME VL disables ZA */
1056 		if ((config->svcr_expected & SVCR_ZA) &&
1057 		    (config->sme_vl_in != config->sme_vl_expected)) {
1058 			return false;
1059 		}
1060 	} else {
1061 		if (config->sme_vl_in != config->sme_vl_expected) {
1062 			return false;
1063 		}
1064 
1065 		if (!sve_supported())
1066 			return false;
1067 	}
1068 
1069 	return true;
1070 }
1071 
sve_write_fpsimd_supported(struct test_config * config)1072 static bool sve_write_fpsimd_supported(struct test_config *config)
1073 {
1074 	if (!sve_supported())
1075 		return false;
1076 
1077 	if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
1078 		return false;
1079 
1080 	if (config->svcr_expected & SVCR_SM)
1081 		return false;
1082 
1083 	if (config->sme_vl_in != config->sme_vl_expected)
1084 		return false;
1085 
1086 	return true;
1087 }
1088 
fpsimd_write_expected(struct test_config * config)1089 static void fpsimd_write_expected(struct test_config *config)
1090 {
1091 	int vl;
1092 
1093 	fill_random(&v_expected, sizeof(v_expected));
1094 
1095 	/* The SVE registers are flushed by a FPSIMD write */
1096 	vl = vl_expected(config);
1097 
1098 	memset(z_expected, 0, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
1099 	memset(p_expected, 0, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
1100 	memset(ffr_expected, 0, __SVE_PREG_SIZE(__sve_vq_from_vl(vl)));
1101 
1102 	fpsimd_to_sve(v_expected, z_expected, vl);
1103 }
1104 
fpsimd_write(pid_t child,struct test_config * test_config)1105 static void fpsimd_write(pid_t child, struct test_config *test_config)
1106 {
1107 	struct user_fpsimd_state fpsimd;
1108 	struct iovec iov;
1109 	int ret;
1110 
1111 	memset(&fpsimd, 0, sizeof(fpsimd));
1112 	memcpy(&fpsimd.vregs, v_expected, sizeof(v_expected));
1113 
1114 	iov.iov_base = &fpsimd;
1115 	iov.iov_len = sizeof(fpsimd);
1116 	ret = ptrace(PTRACE_SETREGSET, child, NT_PRFPREG, &iov);
1117 	if (ret == -1)
1118 		ksft_print_msg("FPSIMD set failed: (%s) %d\n",
1119 			       strerror(errno), errno);
1120 }
1121 
fpmr_write_supported(struct test_config * config)1122 static bool fpmr_write_supported(struct test_config *config)
1123 {
1124 	if (!fpmr_supported())
1125 		return false;
1126 
1127 	if (!sve_sme_same(config))
1128 		return false;
1129 
1130 	return true;
1131 }
1132 
fpmr_write_expected(struct test_config * config)1133 static void fpmr_write_expected(struct test_config *config)
1134 {
1135 	fill_random(&fpmr_expected, sizeof(fpmr_expected));
1136 	fpmr_expected &= FPMR_SAFE_BITS;
1137 }
1138 
fpmr_write(pid_t child,struct test_config * config)1139 static void fpmr_write(pid_t child, struct test_config *config)
1140 {
1141 	struct iovec iov;
1142 	int ret;
1143 
1144 	iov.iov_len = sizeof(fpmr_expected);
1145 	iov.iov_base = &fpmr_expected;
1146 	ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov);
1147 	if (ret != 0)
1148 		ksft_print_msg("Failed to write FPMR: %s (%d)\n",
1149 			       strerror(errno), errno);
1150 }
1151 
sve_write_expected(struct test_config * config)1152 static void sve_write_expected(struct test_config *config)
1153 {
1154 	int vl = vl_expected(config);
1155 	int sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
1156 
1157 	if (!vl)
1158 		return;
1159 
1160 	fill_random(z_expected, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
1161 	fill_random(p_expected, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
1162 
1163 	if ((svcr_expected & SVCR_SM) && !fa64_supported())
1164 		memset(ffr_expected, 0, __SVE_PREG_SIZE(sme_vq));
1165 	else
1166 		fill_random_ffr(ffr_expected, __sve_vq_from_vl(vl));
1167 
1168 	/* Share the low bits of Z with V */
1169 	fill_random(&v_expected, sizeof(v_expected));
1170 	fpsimd_to_sve(v_expected, z_expected, vl);
1171 
1172 	if (config->sme_vl_in != config->sme_vl_expected) {
1173 		memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
1174 		memset(zt_expected, 0, sizeof(zt_expected));
1175 	}
1176 }
1177 
sve_write_sve(pid_t child,struct test_config * config)1178 static void sve_write_sve(pid_t child, struct test_config *config)
1179 {
1180 	struct user_sve_header *sve;
1181 	struct iovec iov;
1182 	int ret, vl, vq, regset;
1183 
1184 	vl = vl_expected(config);
1185 	vq = __sve_vq_from_vl(vl);
1186 
1187 	if (!vl)
1188 		return;
1189 
1190 	iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
1191 	iov.iov_base = malloc(iov.iov_len);
1192 	if (!iov.iov_base) {
1193 		ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
1194 			       iov.iov_len);
1195 		return;
1196 	}
1197 	memset(iov.iov_base, 0, iov.iov_len);
1198 
1199 	sve = iov.iov_base;
1200 	sve->size = iov.iov_len;
1201 	sve->flags = SVE_PT_REGS_SVE;
1202 	sve->vl = vl;
1203 
1204 	memcpy(iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
1205 	       z_expected, SVE_PT_SVE_ZREGS_SIZE(vq));
1206 	memcpy(iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
1207 	       p_expected, SVE_PT_SVE_PREGS_SIZE(vq));
1208 	memcpy(iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
1209 	       ffr_expected, SVE_PT_SVE_PREG_SIZE(vq));
1210 
1211 	if (svcr_expected & SVCR_SM)
1212 		regset = NT_ARM_SSVE;
1213 	else
1214 		regset = NT_ARM_SVE;
1215 
1216 	ret = ptrace(PTRACE_SETREGSET, child, regset, &iov);
1217 	if (ret != 0)
1218 		ksft_print_msg("Failed to write SVE: %s (%d)\n",
1219 			       strerror(errno), errno);
1220 
1221 	free(iov.iov_base);
1222 }
1223 
sve_write_fpsimd(pid_t child,struct test_config * config)1224 static void sve_write_fpsimd(pid_t child, struct test_config *config)
1225 {
1226 	struct user_sve_header *sve;
1227 	struct user_fpsimd_state *fpsimd;
1228 	struct iovec iov;
1229 	int ret, vl, vq;
1230 
1231 	vl = vl_expected(config);
1232 	vq = __sve_vq_from_vl(vl);
1233 
1234 	if (!vl)
1235 		return;
1236 
1237 	iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq,
1238 							  SVE_PT_REGS_FPSIMD);
1239 	iov.iov_base = malloc(iov.iov_len);
1240 	if (!iov.iov_base) {
1241 		ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
1242 			       iov.iov_len);
1243 		return;
1244 	}
1245 	memset(iov.iov_base, 0, iov.iov_len);
1246 
1247 	sve = iov.iov_base;
1248 	sve->size = iov.iov_len;
1249 	sve->flags = SVE_PT_REGS_FPSIMD;
1250 	sve->vl = vl;
1251 
1252 	fpsimd = iov.iov_base + SVE_PT_REGS_OFFSET;
1253 	memcpy(&fpsimd->vregs, v_expected, sizeof(v_expected));
1254 
1255 	ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_SVE, &iov);
1256 	if (ret != 0)
1257 		ksft_print_msg("Failed to write SVE: %s (%d)\n",
1258 			       strerror(errno), errno);
1259 
1260 	free(iov.iov_base);
1261 }
1262 
za_write_supported(struct test_config * config)1263 static bool za_write_supported(struct test_config *config)
1264 {
1265 	if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
1266 		return false;
1267 
1268 	return true;
1269 }
1270 
za_write_expected(struct test_config * config)1271 static void za_write_expected(struct test_config *config)
1272 {
1273 	int sme_vq, sve_vq;
1274 
1275 	sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
1276 
1277 	if (config->svcr_expected & SVCR_ZA) {
1278 		fill_random(za_expected, ZA_PT_ZA_SIZE(sme_vq));
1279 	} else {
1280 		memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
1281 		memset(zt_expected, 0, sizeof(zt_expected));
1282 	}
1283 
1284 	/* Changing the SME VL flushes ZT, SVE state */
1285 	if (config->sme_vl_in != config->sme_vl_expected) {
1286 		sve_vq = __sve_vq_from_vl(vl_expected(config));
1287 		memset(z_expected, 0, __SVE_ZREGS_SIZE(sve_vq));
1288 		memset(p_expected, 0, __SVE_PREGS_SIZE(sve_vq));
1289 		memset(ffr_expected, 0, __SVE_PREG_SIZE(sve_vq));
1290 		memset(zt_expected, 0, sizeof(zt_expected));
1291 
1292 		fpsimd_to_sve(v_expected, z_expected, vl_expected(config));
1293 	}
1294 }
1295 
za_write(pid_t child,struct test_config * config)1296 static void za_write(pid_t child, struct test_config *config)
1297 {
1298 	struct user_za_header *za;
1299 	struct iovec iov;
1300 	int ret, vq;
1301 
1302 	vq = __sve_vq_from_vl(config->sme_vl_expected);
1303 
1304 	if (config->svcr_expected & SVCR_ZA)
1305 		iov.iov_len = ZA_PT_SIZE(vq);
1306 	else
1307 		iov.iov_len = sizeof(*za);
1308 	iov.iov_base = malloc(iov.iov_len);
1309 	if (!iov.iov_base) {
1310 		ksft_print_msg("Failed allocating %lu byte ZA write buffer\n",
1311 			       iov.iov_len);
1312 		return;
1313 	}
1314 	memset(iov.iov_base, 0, iov.iov_len);
1315 
1316 	za = iov.iov_base;
1317 	za->size = iov.iov_len;
1318 	za->vl = config->sme_vl_expected;
1319 	if (config->svcr_expected & SVCR_ZA)
1320 		memcpy(iov.iov_base + ZA_PT_ZA_OFFSET, za_expected,
1321 		       ZA_PT_ZA_SIZE(vq));
1322 
1323 	ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZA, &iov);
1324 	if (ret != 0)
1325 		ksft_print_msg("Failed to write ZA: %s (%d)\n",
1326 			       strerror(errno), errno);
1327 
1328 	free(iov.iov_base);
1329 }
1330 
zt_write_supported(struct test_config * config)1331 static bool zt_write_supported(struct test_config *config)
1332 {
1333 	if (!sme2_supported())
1334 		return false;
1335 	if (config->sme_vl_in != config->sme_vl_expected)
1336 		return false;
1337 	if (!(config->svcr_expected & SVCR_ZA))
1338 		return false;
1339 	if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
1340 		return false;
1341 
1342 	return true;
1343 }
1344 
zt_write_expected(struct test_config * config)1345 static void zt_write_expected(struct test_config *config)
1346 {
1347 	int sme_vq;
1348 
1349 	sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
1350 
1351 	if (config->svcr_expected & SVCR_ZA) {
1352 		fill_random(zt_expected, sizeof(zt_expected));
1353 	} else {
1354 		memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
1355 		memset(zt_expected, 0, sizeof(zt_expected));
1356 	}
1357 }
1358 
zt_write(pid_t child,struct test_config * config)1359 static void zt_write(pid_t child, struct test_config *config)
1360 {
1361 	struct iovec iov;
1362 	int ret;
1363 
1364 	iov.iov_len = ZT_SIG_REG_BYTES;
1365 	iov.iov_base = zt_expected;
1366 	ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZT, &iov);
1367 	if (ret != 0)
1368 		ksft_print_msg("Failed to write ZT: %s (%d)\n",
1369 			       strerror(errno), errno);
1370 }
1371 
1372 /* Actually run a test */
run_test(struct test_definition * test,struct test_config * config)1373 static void run_test(struct test_definition *test, struct test_config *config)
1374 {
1375 	pid_t child;
1376 	char name[1024];
1377 	bool pass;
1378 
1379 	if (sve_supported() && sme_supported())
1380 		snprintf(name, sizeof(name), "%s, SVE %d->%d, SME %d/%x->%d/%x",
1381 			 test->name,
1382 			 config->sve_vl_in, config->sve_vl_expected,
1383 			 config->sme_vl_in, config->svcr_in,
1384 			 config->sme_vl_expected, config->svcr_expected);
1385 	else if (sve_supported())
1386 		snprintf(name, sizeof(name), "%s, SVE %d->%d", test->name,
1387 			 config->sve_vl_in, config->sve_vl_expected);
1388 	else if (sme_supported())
1389 		snprintf(name, sizeof(name), "%s, SME %d/%x->%d/%x",
1390 			 test->name,
1391 			 config->sme_vl_in, config->svcr_in,
1392 			 config->sme_vl_expected, config->svcr_expected);
1393 	else
1394 		snprintf(name, sizeof(name), "%s", test->name);
1395 
1396 	if (test->supported && !test->supported(config)) {
1397 		ksft_test_result_skip("%s\n", name);
1398 		return;
1399 	}
1400 
1401 	set_initial_values(config);
1402 
1403 	if (test->set_expected_values)
1404 		test->set_expected_values(config);
1405 
1406 	child = fork();
1407 	if (child < 0)
1408 		ksft_exit_fail_msg("fork() failed: %s (%d)\n",
1409 				   strerror(errno), errno);
1410 	/* run_child() never returns */
1411 	if (child == 0)
1412 		run_child(config);
1413 
1414 	pass = run_parent(child, test, config);
1415 	if (!check_memory_values(config))
1416 		pass = false;
1417 
1418 	ksft_test_result(pass, "%s\n", name);
1419 }
1420 
run_tests(struct test_definition defs[],int count,struct test_config * config)1421 static void run_tests(struct test_definition defs[], int count,
1422 		      struct test_config *config)
1423 {
1424 	int i;
1425 
1426 	for (i = 0; i < count; i++)
1427 		run_test(&defs[i], config);
1428 }
1429 
1430 static struct test_definition base_test_defs[] = {
1431 	{
1432 		.name = "No writes",
1433 		.supported = sve_sme_same,
1434 	},
1435 	{
1436 		.name = "FPSIMD write",
1437 		.supported = sve_sme_same,
1438 		.set_expected_values = fpsimd_write_expected,
1439 		.modify_values = fpsimd_write,
1440 	},
1441 	{
1442 		.name = "FPMR write",
1443 		.supported = fpmr_write_supported,
1444 		.set_expected_values = fpmr_write_expected,
1445 		.modify_values = fpmr_write,
1446 	},
1447 };
1448 
1449 static struct test_definition sve_test_defs[] = {
1450 	{
1451 		.name = "SVE write",
1452 		.supported = sve_write_supported,
1453 		.set_expected_values = sve_write_expected,
1454 		.modify_values = sve_write_sve,
1455 	},
1456 	{
1457 		.name = "SVE write FPSIMD format",
1458 		.supported = sve_write_fpsimd_supported,
1459 		.set_expected_values = fpsimd_write_expected,
1460 		.modify_values = sve_write_fpsimd,
1461 	},
1462 };
1463 
1464 static struct test_definition za_test_defs[] = {
1465 	{
1466 		.name = "ZA write",
1467 		.supported = za_write_supported,
1468 		.set_expected_values = za_write_expected,
1469 		.modify_values = za_write,
1470 	},
1471 };
1472 
1473 static struct test_definition zt_test_defs[] = {
1474 	{
1475 		.name = "ZT write",
1476 		.supported = zt_write_supported,
1477 		.set_expected_values = zt_write_expected,
1478 		.modify_values = zt_write,
1479 	},
1480 };
1481 
1482 static int sve_vls[MAX_NUM_VLS], sme_vls[MAX_NUM_VLS];
1483 static int sve_vl_count, sme_vl_count;
1484 
probe_vls(const char * name,int vls[],int * vl_count,int set_vl)1485 static void probe_vls(const char *name, int vls[], int *vl_count, int set_vl)
1486 {
1487 	unsigned int vq;
1488 	int vl;
1489 
1490 	*vl_count = 0;
1491 
1492 	for (vq = ARCH_VQ_MAX; vq > 0; vq /= 2) {
1493 		vl = prctl(set_vl, vq * 16);
1494 		if (vl == -1)
1495 			ksft_exit_fail_msg("SET_VL failed: %s (%d)\n",
1496 					   strerror(errno), errno);
1497 
1498 		vl &= PR_SVE_VL_LEN_MASK;
1499 
1500 		if (*vl_count && (vl == vls[*vl_count - 1]))
1501 			break;
1502 
1503 		vq = sve_vq_from_vl(vl);
1504 
1505 		vls[*vl_count] = vl;
1506 		*vl_count += 1;
1507 	}
1508 
1509 	if (*vl_count > 2) {
1510 		/* Just use the minimum and maximum */
1511 		vls[1] = vls[*vl_count - 1];
1512 		ksft_print_msg("%d %s VLs, using %d and %d\n",
1513 			       *vl_count, name, vls[0], vls[1]);
1514 		*vl_count = 2;
1515 	} else {
1516 		ksft_print_msg("%d %s VLs\n", *vl_count, name);
1517 	}
1518 }
1519 
1520 static struct {
1521 	int svcr_in, svcr_expected;
1522 } svcr_combinations[] = {
1523 	{ .svcr_in = 0, .svcr_expected = 0, },
1524 	{ .svcr_in = 0, .svcr_expected = SVCR_SM, },
1525 	{ .svcr_in = 0, .svcr_expected = SVCR_ZA, },
1526 	/* Can't enable both SM and ZA with a single ptrace write */
1527 
1528 	{ .svcr_in = SVCR_SM, .svcr_expected = 0, },
1529 	{ .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM, },
1530 	{ .svcr_in = SVCR_SM, .svcr_expected = SVCR_ZA, },
1531 	{ .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM | SVCR_ZA, },
1532 
1533 	{ .svcr_in = SVCR_ZA, .svcr_expected = 0, },
1534 	{ .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM, },
1535 	{ .svcr_in = SVCR_ZA, .svcr_expected = SVCR_ZA, },
1536 	{ .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, },
1537 
1538 	{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = 0, },
1539 	{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM, },
1540 	{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_ZA, },
1541 	{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, },
1542 };
1543 
run_sve_tests(void)1544 static void run_sve_tests(void)
1545 {
1546 	struct test_config test_config;
1547 	int i, j;
1548 
1549 	if (!sve_supported())
1550 		return;
1551 
1552 	test_config.sme_vl_in = sme_vls[0];
1553 	test_config.sme_vl_expected = sme_vls[0];
1554 	test_config.svcr_in = 0;
1555 	test_config.svcr_expected = 0;
1556 
1557 	for (i = 0; i < sve_vl_count; i++) {
1558 		test_config.sve_vl_in = sve_vls[i];
1559 
1560 		for (j = 0; j < sve_vl_count; j++) {
1561 			test_config.sve_vl_expected = sve_vls[j];
1562 
1563 			run_tests(base_test_defs,
1564 				  ARRAY_SIZE(base_test_defs),
1565 				  &test_config);
1566 			if (sve_supported())
1567 				run_tests(sve_test_defs,
1568 					  ARRAY_SIZE(sve_test_defs),
1569 					  &test_config);
1570 		}
1571 	}
1572 
1573 }
1574 
run_sme_tests(void)1575 static void run_sme_tests(void)
1576 {
1577 	struct test_config test_config;
1578 	int i, j, k;
1579 
1580 	if (!sme_supported())
1581 		return;
1582 
1583 	test_config.sve_vl_in = sve_vls[0];
1584 	test_config.sve_vl_expected = sve_vls[0];
1585 
1586 	/*
1587 	 * Every SME VL/SVCR combination
1588 	 */
1589 	for (i = 0; i < sme_vl_count; i++) {
1590 		test_config.sme_vl_in = sme_vls[i];
1591 
1592 		for (j = 0; j < sme_vl_count; j++) {
1593 			test_config.sme_vl_expected = sme_vls[j];
1594 
1595 			for (k = 0; k < ARRAY_SIZE(svcr_combinations); k++) {
1596 				test_config.svcr_in = svcr_combinations[k].svcr_in;
1597 				test_config.svcr_expected = svcr_combinations[k].svcr_expected;
1598 
1599 				run_tests(base_test_defs,
1600 					  ARRAY_SIZE(base_test_defs),
1601 					  &test_config);
1602 				run_tests(sve_test_defs,
1603 					  ARRAY_SIZE(sve_test_defs),
1604 					  &test_config);
1605 				run_tests(za_test_defs,
1606 					  ARRAY_SIZE(za_test_defs),
1607 					  &test_config);
1608 
1609 				if (sme2_supported())
1610 					run_tests(zt_test_defs,
1611 						  ARRAY_SIZE(zt_test_defs),
1612 						  &test_config);
1613 			}
1614 		}
1615 	}
1616 }
1617 
main(void)1618 int main(void)
1619 {
1620 	struct test_config test_config;
1621 	struct sigaction sa;
1622 	int tests, ret, tmp;
1623 
1624 	srandom(getpid());
1625 
1626 	ksft_print_header();
1627 
1628 	if (sve_supported()) {
1629 		probe_vls("SVE", sve_vls, &sve_vl_count, PR_SVE_SET_VL);
1630 
1631 		tests = ARRAY_SIZE(base_test_defs) +
1632 			ARRAY_SIZE(sve_test_defs);
1633 		tests *= sve_vl_count * sve_vl_count;
1634 	} else {
1635 		/* Only run the FPSIMD tests */
1636 		sve_vl_count = 1;
1637 		tests = ARRAY_SIZE(base_test_defs);
1638 	}
1639 
1640 	if (sme_supported()) {
1641 		probe_vls("SME", sme_vls, &sme_vl_count, PR_SME_SET_VL);
1642 
1643 		tmp = ARRAY_SIZE(base_test_defs) + ARRAY_SIZE(sve_test_defs)
1644 			+ ARRAY_SIZE(za_test_defs);
1645 
1646 		if (sme2_supported())
1647 			tmp += ARRAY_SIZE(zt_test_defs);
1648 
1649 		tmp *= sme_vl_count * sme_vl_count;
1650 		tmp *= ARRAY_SIZE(svcr_combinations);
1651 		tests += tmp;
1652 	} else {
1653 		sme_vl_count = 1;
1654 	}
1655 
1656 	if (sme2_supported())
1657 		ksft_print_msg("SME2 supported\n");
1658 
1659 	if (fa64_supported())
1660 		ksft_print_msg("FA64 supported\n");
1661 
1662 	if (fpmr_supported())
1663 		ksft_print_msg("FPMR supported\n");
1664 
1665 	ksft_set_plan(tests);
1666 
1667 	/* Get signal handers ready before we start any children */
1668 	memset(&sa, 0, sizeof(sa));
1669 	sa.sa_sigaction = handle_alarm;
1670 	sa.sa_flags = SA_RESTART | SA_SIGINFO;
1671 	sigemptyset(&sa.sa_mask);
1672 	ret = sigaction(SIGALRM, &sa, NULL);
1673 	if (ret < 0)
1674 		ksft_print_msg("Failed to install SIGALRM handler: %s (%d)\n",
1675 			       strerror(errno), errno);
1676 
1677 	/*
1678 	 * Run the test set if there is no SVE or SME, with those we
1679 	 * have to pick a VL for each run.
1680 	 */
1681 	if (!sve_supported() && !sme_supported()) {
1682 		test_config.sve_vl_in = 0;
1683 		test_config.sve_vl_expected = 0;
1684 		test_config.sme_vl_in = 0;
1685 		test_config.sme_vl_expected = 0;
1686 		test_config.svcr_in = 0;
1687 		test_config.svcr_expected = 0;
1688 
1689 		run_tests(base_test_defs, ARRAY_SIZE(base_test_defs),
1690 			  &test_config);
1691 	}
1692 
1693 	run_sve_tests();
1694 	run_sme_tests();
1695 
1696 	ksft_finished();
1697 }
1698