xref: /kvm-unit-tests/s390x/spec_ex.c (revision e08c4f5ed94fd20c6e8775f632c8390fd5d8dd99)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright IBM Corp. 2021, 2022
4  *
5  * Specification exception test.
6  * Tests that specification exceptions occur when expected.
7  *
8  * Can be extended by adding triggers to spec_ex_triggers, see comments below.
9  */
10 #include <stdlib.h>
11 #include <libcflat.h>
12 #include <bitops.h>
13 #include <asm/interrupt.h>
14 
15 /* toggled to signal occurrence of invalid psw fixup */
16 static bool invalid_psw_expected;
17 static struct psw expected_psw;
18 static struct psw invalid_psw;
19 static struct psw fixup_psw;
20 
21 /*
22  * The standard program exception handler cannot deal with invalid old PSWs,
23  * especially not invalid instruction addresses, as in that case one cannot
24  * find the instruction following the faulting one from the old PSW.
25  * The PSW to return to is set by load_psw.
26  */
27 static void fixup_invalid_psw(struct stack_frame_int *stack)
28 {
29 	assert_msg(invalid_psw_expected,
30 		   "Unexpected invalid PSW during program interrupt fixup: %#lx %#lx",
31 		   lowcore.pgm_old_psw.mask, lowcore.pgm_old_psw.addr);
32 	/* signal occurrence of invalid psw fixup */
33 	invalid_psw_expected = false;
34 	invalid_psw = lowcore.pgm_old_psw;
35 	lowcore.pgm_old_psw = fixup_psw;
36 }
37 
38 /*
39  * Load possibly invalid psw, but setup fixup_psw before,
40  * so that fixup_invalid_psw() can bring us back onto the right track.
41  * Also acts as compiler barrier, -> none required in expect/check_invalid_psw
42  */
43 static void load_psw(struct psw psw)
44 {
45 	uint64_t scratch;
46 
47 	/*
48 	 * The fixup psw is the current psw with the instruction address replaced
49 	 * by the address of the nop following the instruction loading the new psw.
50 	 */
51 	fixup_psw.mask = extract_psw_mask();
52 	asm volatile ( "larl	%[scratch],0f\n"
53 		"	stg	%[scratch],%[fixup_addr]\n"
54 		"	lpswe	%[psw]\n"
55 		"0:	nop\n"
56 		: [scratch] "=&d" (scratch),
57 		  [fixup_addr] "=&T" (fixup_psw.addr)
58 		: [psw] "Q" (psw)
59 		: "cc", "memory"
60 	);
61 }
62 
63 static void load_short_psw(struct short_psw psw)
64 {
65 	uint64_t scratch;
66 
67 	fixup_psw.mask = extract_psw_mask();
68 	asm volatile ( "larl	%[scratch],0f\n"
69 		"	stg	%[scratch],%[fixup_addr]\n"
70 		"	lpsw	%[psw]\n"
71 		"0:	nop\n"
72 		: [scratch] "=&d" (scratch),
73 		  [fixup_addr] "=&T" (fixup_psw.addr)
74 		: [psw] "Q" (psw)
75 		: "cc", "memory"
76 	);
77 }
78 
79 static void expect_invalid_psw(struct psw psw)
80 {
81 	expected_psw = psw;
82 	invalid_psw_expected = true;
83 }
84 
85 static int check_invalid_psw(void)
86 {
87 	/* Since the fixup sets this to false we check for false here. */
88 	if (!invalid_psw_expected) {
89 		if (expected_psw.mask == invalid_psw.mask &&
90 		    expected_psw.addr == invalid_psw.addr)
91 			return 0;
92 		report_fail("Wrong invalid PSW");
93 	} else {
94 		report_fail("Expected exception due to invalid PSW");
95 	}
96 	return 1;
97 }
98 
99 /* For normal PSWs bit 12 has to be 0 to be a valid PSW*/
100 static int psw_bit_12_is_1(void)
101 {
102 	struct psw invalid = {
103 		.mask = BIT(63 - 12),
104 		.addr = 0x00000000deadbeee
105 	};
106 
107 	expect_invalid_psw(invalid);
108 	load_psw(invalid);
109 	return check_invalid_psw();
110 }
111 
112 /* A short PSW needs to have bit 12 set to be valid. */
113 static int short_psw_bit_12_is_0(void)
114 {
115 	struct psw invalid = {
116 		.mask = BIT(63 - 12),
117 		.addr = 0x00000000deadbeee
118 	};
119 	struct short_psw short_invalid = {
120 		.mask = 0x0,
121 		.addr = 0xdeadbeee
122 	};
123 
124 	expect_invalid_psw(invalid);
125 	load_short_psw(short_invalid);
126 	/*
127 	 * lpsw may optionally check bit 12 before loading the new psw
128 	 * -> cannot check the expected invalid psw like with lpswe
129 	 */
130 	return 0;
131 }
132 
133 static int bad_alignment(void)
134 {
135 	uint32_t words[5] __attribute__((aligned(16)));
136 	uint32_t (*bad_aligned)[4] = (uint32_t (*)[4])&words[1];
137 
138 	/* LOAD PAIR FROM QUADWORD (LPQ) requires quadword alignment */
139 	asm volatile ("lpq %%r6,%[bad]"
140 		      : : [bad] "T" (*bad_aligned)
141 		      : "%r6", "%r7"
142 	);
143 	return 0;
144 }
145 
146 static int not_even(void)
147 {
148 	uint64_t quad[2] __attribute__((aligned(16))) = {0};
149 
150 	asm volatile (".insn	rxy,0xe3000000008f,%%r7,%[quad]" /* lpq %%r7,%[quad] */
151 		      : : [quad] "T" (quad)
152 		      : "%r7", "%r8"
153 	);
154 	return 0;
155 }
156 
157 /*
158  * Harness for specification exception testing.
159  * func only triggers exception, reporting is taken care of automatically.
160  */
161 struct spec_ex_trigger {
162 	const char *name;
163 	int (*func)(void);
164 	void (*fixup)(struct stack_frame_int *stack);
165 };
166 
167 /* List of all tests to execute */
168 static const struct spec_ex_trigger spec_ex_triggers[] = {
169 	{ "psw_bit_12_is_1", &psw_bit_12_is_1, &fixup_invalid_psw },
170 	{ "short_psw_bit_12_is_0", &short_psw_bit_12_is_0, &fixup_invalid_psw },
171 	{ "bad_alignment", &bad_alignment, NULL },
172 	{ "not_even", &not_even, NULL },
173 	{ NULL, NULL, NULL },
174 };
175 
176 static void test_spec_ex(const struct spec_ex_trigger *trigger)
177 {
178 	int rc;
179 
180 	expect_pgm_int();
181 	register_pgm_cleanup_func(trigger->fixup);
182 	rc = trigger->func();
183 	register_pgm_cleanup_func(NULL);
184 	/* test failed, nothing to be done, reporting responsibility of trigger */
185 	if (rc)
186 		return;
187 	check_pgm_int_code(PGM_INT_CODE_SPECIFICATION);
188 }
189 
190 int main(int argc, char **argv)
191 {
192 	unsigned int i;
193 
194 	report_prefix_push("specification exception");
195 	for (i = 0; spec_ex_triggers[i].name; i++) {
196 		report_prefix_push(spec_ex_triggers[i].name);
197 		test_spec_ex(&spec_ex_triggers[i]);
198 		report_prefix_pop();
199 	}
200 	report_prefix_pop();
201 
202 	return report_summary();
203 }
204