xref: /kvm-unit-tests/lib/s390x/asm/arch_def.h (revision 086985a39ccb9b7b3da910d3a23eb764f0b76423)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2017 Red Hat Inc
4  *
5  * Authors:
6  *  David Hildenbrand <david@redhat.com>
7  */
8 #ifndef _ASMS390X_ARCH_DEF_H_
9 #define _ASMS390X_ARCH_DEF_H_
10 
11 struct stack_frame {
12 	struct stack_frame *back_chain;
13 	uint64_t reserved;
14 	/* GRs 2 - 5 */
15 	uint64_t argument_area[4];
16 	/* GRs 6 - 15 */
17 	uint64_t grs[10];
18 	/* FPRs 0, 2, 4, 6 */
19 	int64_t  fprs[4];
20 };
21 
22 struct stack_frame_int {
23 	struct stack_frame *back_chain;
24 	uint64_t reserved;
25 	/*
26 	 * The GRs are offset compatible with struct stack_frame so we
27 	 * can easily fetch GR14 for backtraces.
28 	 */
29 	/* GRs 2 - 15 */
30 	uint64_t grs0[14];
31 	/* GRs 0 and 1 */
32 	uint64_t grs1[2];
33 	uint32_t reserved1;
34 	uint32_t fpc;
35 	uint64_t fprs[16];
36 	uint64_t crs[16];
37 };
38 
39 struct psw {
40 	uint64_t	mask;
41 	uint64_t	addr;
42 };
43 
44 #define AS_PRIM				0
45 #define AS_ACCR				1
46 #define AS_SECN				2
47 #define AS_HOME				3
48 
49 #define PSW_MASK_DAT			0x0400000000000000UL
50 #define PSW_MASK_IO			0x0200000000000000UL
51 #define PSW_MASK_EXT			0x0100000000000000UL
52 #define PSW_MASK_KEY			0x00F0000000000000UL
53 #define PSW_MASK_WAIT			0x0002000000000000UL
54 #define PSW_MASK_PSTATE			0x0001000000000000UL
55 #define PSW_MASK_EA			0x0000000100000000UL
56 #define PSW_MASK_BA			0x0000000080000000UL
57 #define PSW_MASK_64			(PSW_MASK_BA | PSW_MASK_EA)
58 
59 #define CTL0_LOW_ADDR_PROT			(63 - 35)
60 #define CTL0_EDAT				(63 - 40)
61 #define CTL0_FETCH_PROTECTION_OVERRIDE		(63 - 38)
62 #define CTL0_STORAGE_PROTECTION_OVERRIDE	(63 - 39)
63 #define CTL0_IEP				(63 - 43)
64 #define CTL0_AFP				(63 - 45)
65 #define CTL0_VECTOR				(63 - 46)
66 #define CTL0_EMERGENCY_SIGNAL			(63 - 49)
67 #define CTL0_EXTERNAL_CALL			(63 - 50)
68 #define CTL0_CLOCK_COMPARATOR			(63 - 52)
69 #define CTL0_SERVICE_SIGNAL			(63 - 54)
70 #define CR0_EXTM_MASK			0x0000000000006200UL /* Combined external masks */
71 
72 #define CTL2_GUARDED_STORAGE		(63 - 59)
73 
74 struct lowcore {
75 	uint8_t		pad_0x0000[0x0080 - 0x0000];	/* 0x0000 */
76 	uint32_t	ext_int_param;			/* 0x0080 */
77 	uint16_t	cpu_addr;			/* 0x0084 */
78 	uint16_t	ext_int_code;			/* 0x0086 */
79 	uint16_t	svc_int_id;			/* 0x0088 */
80 	uint16_t	svc_int_code;			/* 0x008a */
81 	uint16_t	pgm_int_id;			/* 0x008c */
82 	uint16_t	pgm_int_code;			/* 0x008e */
83 	uint32_t	dxc_vxc;			/* 0x0090 */
84 	uint16_t	mon_class_nb;			/* 0x0094 */
85 	uint8_t		per_code;			/* 0x0096 */
86 	uint8_t		per_atmid;			/* 0x0097 */
87 	uint64_t	per_addr;			/* 0x0098 */
88 	uint8_t		exc_acc_id;			/* 0x00a0 */
89 	uint8_t		per_acc_id;			/* 0x00a1 */
90 	uint8_t		op_acc_id;			/* 0x00a2 */
91 	uint8_t		arch_mode_id;			/* 0x00a3 */
92 	uint8_t		pad_0x00a4[0x00a8 - 0x00a4];	/* 0x00a4 */
93 	uint64_t	trans_exc_id;			/* 0x00a8 */
94 	uint64_t	mon_code;			/* 0x00b0 */
95 	uint32_t	subsys_id_word;			/* 0x00b8 */
96 	uint32_t	io_int_param;			/* 0x00bc */
97 	uint32_t	io_int_word;			/* 0x00c0 */
98 	uint8_t		pad_0x00c4[0x00c8 - 0x00c4];	/* 0x00c4 */
99 	uint32_t	stfl;				/* 0x00c8 */
100 	uint8_t		pad_0x00cc[0x00e8 - 0x00cc];	/* 0x00cc */
101 	uint64_t	mcck_int_code;			/* 0x00e8 */
102 	uint8_t		pad_0x00f0[0x00f4 - 0x00f0];	/* 0x00f0 */
103 	uint32_t	ext_damage_code;		/* 0x00f4 */
104 	uint64_t	failing_storage_addr;		/* 0x00f8 */
105 	uint64_t	emon_ca_origin;			/* 0x0100 */
106 	uint32_t	emon_ca_size;			/* 0x0108 */
107 	uint32_t	emon_exc_count;			/* 0x010c */
108 	uint64_t	breaking_event_addr;		/* 0x0110 */
109 	uint8_t		pad_0x0118[0x0120 - 0x0118];	/* 0x0118 */
110 	struct psw	restart_old_psw;		/* 0x0120 */
111 	struct psw	ext_old_psw;			/* 0x0130 */
112 	struct psw	svc_old_psw;			/* 0x0140 */
113 	struct psw	pgm_old_psw;			/* 0x0150 */
114 	struct psw	mcck_old_psw;			/* 0x0160 */
115 	struct psw	io_old_psw;			/* 0x0170 */
116 	uint8_t		pad_0x0180[0x01a0 - 0x0180];	/* 0x0180 */
117 	struct psw	restart_new_psw;		/* 0x01a0 */
118 	struct psw	ext_new_psw;			/* 0x01b0 */
119 	struct psw	svc_new_psw;			/* 0x01c0 */
120 	struct psw	pgm_new_psw;			/* 0x01d0 */
121 	struct psw	mcck_new_psw;			/* 0x01e0 */
122 	struct psw	io_new_psw;			/* 0x01f0 */
123 	/* sw definition: save area for registers in interrupt handlers */
124 	uint64_t	sw_int_grs[16];			/* 0x0200 */
125 	uint8_t		pad_0x0280[0x0308 - 0x0280];	/* 0x0280 */
126 	uint64_t	sw_int_crs[16];			/* 0x0308 */
127 	struct psw	sw_int_psw;			/* 0x0388 */
128 	uint8_t		pad_0x0310[0x11b0 - 0x0398];	/* 0x0398 */
129 	uint64_t	mcck_ext_sa_addr;		/* 0x11b0 */
130 	uint8_t		pad_0x11b8[0x1200 - 0x11b8];	/* 0x11b8 */
131 	uint64_t	fprs_sa[16];			/* 0x1200 */
132 	uint64_t	grs_sa[16];			/* 0x1280 */
133 	struct psw	psw_sa;				/* 0x1300 */
134 	uint8_t		pad_0x1310[0x1318 - 0x1310];	/* 0x1310 */
135 	uint32_t	prefix_sa;			/* 0x1318 */
136 	uint32_t	fpc_sa;				/* 0x131c */
137 	uint8_t		pad_0x1320[0x1324 - 0x1320];	/* 0x1320 */
138 	uint32_t	tod_pr_sa;			/* 0x1324 */
139 	uint64_t	cputm_sa;			/* 0x1328 */
140 	uint64_t	cc_sa;				/* 0x1330 */
141 	uint8_t		pad_0x1338[0x1340 - 0x1338];	/* 0x1338 */
142 	uint32_t	ars_sa[16];			/* 0x1340 */
143 	uint64_t	crs_sa[16];			/* 0x1380 */
144 	uint8_t		pad_0x1400[0x1800 - 0x1400];	/* 0x1400 */
145 	uint8_t		pgm_int_tdb[0x1900 - 0x1800];	/* 0x1800 */
146 } __attribute__ ((__packed__));
147 _Static_assert(sizeof(struct lowcore) == 0x1900, "Lowcore size");
148 
149 extern struct lowcore lowcore;
150 
151 #define PGM_INT_CODE_OPERATION			0x01
152 #define PGM_INT_CODE_PRIVILEGED_OPERATION	0x02
153 #define PGM_INT_CODE_EXECUTE			0x03
154 #define PGM_INT_CODE_PROTECTION			0x04
155 #define PGM_INT_CODE_ADDRESSING			0x05
156 #define PGM_INT_CODE_SPECIFICATION		0x06
157 #define PGM_INT_CODE_DATA			0x07
158 #define PGM_INT_CODE_FIXED_POINT_OVERFLOW	0x08
159 #define PGM_INT_CODE_FIXED_POINT_DIVIDE		0x09
160 #define PGM_INT_CODE_DECIMAL_OVERFLOW		0x0a
161 #define PGM_INT_CODE_DECIMAL_DIVIDE		0x0b
162 #define PGM_INT_CODE_HFP_EXPONENT_OVERFLOW	0x0c
163 #define PGM_INT_CODE_HFP_EXPONENT_UNDERFLOW	0x0d
164 #define PGM_INT_CODE_HFP_SIGNIFICANCE		0x0e
165 #define PGM_INT_CODE_HFP_DIVIDE			0x0f
166 #define PGM_INT_CODE_SEGMENT_TRANSLATION	0x10
167 #define PGM_INT_CODE_PAGE_TRANSLATION		0x11
168 #define PGM_INT_CODE_TRANSLATION_SPEC		0x12
169 #define PGM_INT_CODE_SPECIAL_OPERATION		0x13
170 #define PGM_INT_CODE_OPERAND			0x15
171 #define PGM_INT_CODE_TRACE_TABLE		0x16
172 #define PGM_INT_CODE_VECTOR_PROCESSING		0x1b
173 #define PGM_INT_CODE_SPACE_SWITCH_EVENT		0x1c
174 #define PGM_INT_CODE_HFP_SQUARE_ROOT		0x1d
175 #define PGM_INT_CODE_PC_TRANSLATION_SPEC	0x1f
176 #define PGM_INT_CODE_AFX_TRANSLATION		0x20
177 #define PGM_INT_CODE_ASX_TRANSLATION		0x21
178 #define PGM_INT_CODE_LX_TRANSLATION		0x22
179 #define PGM_INT_CODE_EX_TRANSLATION		0x23
180 #define PGM_INT_CODE_PRIMARY_AUTHORITY		0x24
181 #define PGM_INT_CODE_SECONDARY_AUTHORITY	0x25
182 #define PGM_INT_CODE_LFX_TRANSLATION		0x26
183 #define PGM_INT_CODE_LSX_TRANSLATION		0x27
184 #define PGM_INT_CODE_ALET_SPECIFICATION		0x28
185 #define PGM_INT_CODE_ALEN_TRANSLATION		0x29
186 #define PGM_INT_CODE_ALE_SEQUENCE		0x2a
187 #define PGM_INT_CODE_ASTE_VALIDITY		0x2b
188 #define PGM_INT_CODE_ASTE_SEQUENCE		0x2c
189 #define PGM_INT_CODE_EXTENDED_AUTHORITY		0x2d
190 #define PGM_INT_CODE_LSTE_SEQUENCE		0x2e
191 #define PGM_INT_CODE_ASTE_INSTANCE		0x2f
192 #define PGM_INT_CODE_STACK_FULL			0x30
193 #define PGM_INT_CODE_STACK_EMPTY		0x31
194 #define PGM_INT_CODE_STACK_SPECIFICATION	0x32
195 #define PGM_INT_CODE_STACK_TYPE			0x33
196 #define PGM_INT_CODE_STACK_OPERATION		0x34
197 #define PGM_INT_CODE_ASCE_TYPE			0x38
198 #define PGM_INT_CODE_REGION_FIRST_TRANS		0x39
199 #define PGM_INT_CODE_REGION_SECOND_TRANS	0x3a
200 #define PGM_INT_CODE_REGION_THIRD_TRANS		0x3b
201 #define PGM_INT_CODE_SECURE_STOR_ACCESS		0x3d
202 #define PGM_INT_CODE_NON_SECURE_STOR_ACCESS	0x3e
203 #define PGM_INT_CODE_SECURE_STOR_VIOLATION	0x3f
204 #define PGM_INT_CODE_MONITOR_EVENT		0x40
205 #define PGM_INT_CODE_PER			0x80
206 #define PGM_INT_CODE_CRYPTO_OPERATION		0x119
207 #define PGM_INT_CODE_TX_ABORTED_EVENT		0x200
208 
209 struct cpuid {
210 	uint64_t version : 8;
211 	uint64_t id : 24;
212 	uint64_t type : 16;
213 	uint64_t format : 1;
214 	uint64_t reserved : 15;
215 };
216 
217 #define SVC_LEAVE_PSTATE 1
218 
219 static inline unsigned short stap(void)
220 {
221 	unsigned short cpu_address;
222 
223 	asm volatile("stap %0" : "=Q" (cpu_address));
224 	return cpu_address;
225 }
226 
227 static inline uint64_t stidp(void)
228 {
229 	uint64_t cpuid;
230 
231 	asm volatile("stidp %0" : "=Q" (cpuid));
232 
233 	return cpuid;
234 }
235 
236 enum tprot_permission {
237 	TPROT_READ_WRITE = 0,
238 	TPROT_READ = 1,
239 	TPROT_RW_PROTECTED = 2,
240 	TPROT_TRANSL_UNAVAIL = 3,
241 };
242 
243 static inline enum tprot_permission tprot(unsigned long addr, char access_key)
244 {
245 	int cc;
246 
247 	asm volatile(
248 		"	tprot	0(%1),0(%2)\n"
249 		"	ipm	%0\n"
250 		"	srl	%0,28\n"
251 		: "=d" (cc) : "a" (addr), "a" (access_key << 4) : "cc");
252 	return (enum tprot_permission)cc;
253 }
254 
255 static inline void lctlg(int cr, uint64_t value)
256 {
257 	asm volatile(
258 		"	lctlg	%1,%1,%0\n"
259 		: : "Q" (value), "i" (cr));
260 }
261 
262 static inline uint64_t stctg(int cr)
263 {
264 	uint64_t value;
265 
266 	asm volatile(
267 		"	stctg	%1,%1,%0\n"
268 		: "=Q" (value) : "i" (cr) : "memory");
269 	return value;
270 }
271 
272 static inline void ctl_set_bit(int cr, unsigned int bit)
273 {
274         uint64_t reg;
275 
276 	reg = stctg(cr);
277 	reg |= 1UL << bit;
278 	lctlg(cr, reg);
279 }
280 
281 static inline void ctl_clear_bit(int cr, unsigned int bit)
282 {
283         uint64_t reg;
284 
285 	reg = stctg(cr);
286 	reg &= ~(1UL << bit);
287 	lctlg(cr, reg);
288 }
289 
290 static inline uint64_t extract_psw_mask(void)
291 {
292 	uint32_t mask_upper = 0, mask_lower = 0;
293 
294 	asm volatile(
295 		"	epsw	%0,%1\n"
296 		: "=r" (mask_upper), "=a" (mask_lower));
297 
298 	return (uint64_t) mask_upper << 32 | mask_lower;
299 }
300 
301 static inline void load_psw_mask(uint64_t mask)
302 {
303 	struct psw psw = {
304 		.mask = mask,
305 		.addr = 0,
306 	};
307 	uint64_t tmp = 0;
308 
309 	asm volatile(
310 		"	larl	%0,0f\n"
311 		"	stg	%0,8(%1)\n"
312 		"	lpswe	0(%1)\n"
313 		"0:\n"
314 		: "+r" (tmp) :  "a" (&psw) : "memory", "cc" );
315 }
316 
317 /**
318  * psw_mask_clear_bits - clears bits from the current PSW mask
319  * @clear: bitmask of bits that will be cleared
320  */
321 static inline void psw_mask_clear_bits(uint64_t clear)
322 {
323 	load_psw_mask(extract_psw_mask() & ~clear);
324 }
325 
326 /**
327  * psw_mask_set_bits - sets bits on the current PSW mask
328  * @set: bitmask of bits that will be set
329  */
330 static inline void psw_mask_set_bits(uint64_t set)
331 {
332 	load_psw_mask(extract_psw_mask() | set);
333 }
334 
335 /**
336  * psw_mask_clear_and_set_bits - clears and sets bits on the current PSW mask
337  * @clear: bitmask of bits that will be cleared
338  * @set: bitmask of bits that will be set
339  *
340  * The bits in the @clear mask will be cleared, then the bits in the @set mask
341  * will be set.
342  */
343 static inline void psw_mask_clear_and_set_bits(uint64_t clear, uint64_t set)
344 {
345 	load_psw_mask((extract_psw_mask() & ~clear) | set);
346 }
347 
348 /**
349  * enable_dat - enable the DAT bit in the current PSW
350  */
351 static inline void enable_dat(void)
352 {
353 	psw_mask_set_bits(PSW_MASK_DAT);
354 }
355 
356 /**
357  * disable_dat - disable the DAT bit in the current PSW
358  */
359 static inline void disable_dat(void)
360 {
361 	psw_mask_clear_bits(PSW_MASK_DAT);
362 }
363 
364 static inline void wait_for_interrupt(uint64_t irq_mask)
365 {
366 	uint64_t psw_mask = extract_psw_mask();
367 
368 	load_psw_mask(psw_mask | irq_mask | PSW_MASK_WAIT);
369 	/*
370 	 * After being woken and having processed the interrupt, let's restore
371 	 * the PSW mask.
372 	 */
373 	load_psw_mask(psw_mask);
374 }
375 
376 static inline void enter_pstate(void)
377 {
378 	psw_mask_set_bits(PSW_MASK_PSTATE);
379 }
380 
381 static inline void leave_pstate(void)
382 {
383 	asm volatile("	svc %0\n" : : "i" (SVC_LEAVE_PSTATE));
384 }
385 
386 static inline int stsi(void *addr, int fc, int sel1, int sel2)
387 {
388 	register int r0 asm("0") = (fc << 28) | sel1;
389 	register int r1 asm("1") = sel2;
390 	int cc;
391 
392 	asm volatile(
393 		"stsi	0(%3)\n"
394 		"ipm	%[cc]\n"
395 		"srl	%[cc],28\n"
396 		: "+d" (r0), [cc] "=d" (cc)
397 		: "d" (r1), "a" (addr)
398 		: "cc", "memory");
399 	return cc;
400 }
401 
402 static inline unsigned long stsi_get_fc(void)
403 {
404 	register unsigned long r0 asm("0") = 0;
405 	register unsigned long r1 asm("1") = 0;
406 	int cc;
407 
408 	asm volatile("stsi	0\n"
409 		     "ipm	%[cc]\n"
410 		     "srl	%[cc],28\n"
411 		     : "+d" (r0), [cc] "=d" (cc)
412 		     : "d" (r1)
413 		     : "cc", "memory");
414 	assert(!cc);
415 	return r0 >> 28;
416 }
417 
418 static inline int servc(uint32_t command, unsigned long sccb)
419 {
420 	int cc;
421 
422 	asm volatile(
423 		"       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
424 		"       ipm     %0\n"
425 		"       srl     %0,28"
426 		: "=&d" (cc) : "d" (command), "a" (sccb)
427 		: "cc", "memory");
428 	return cc;
429 }
430 
431 static inline void set_prefix(uint32_t new_prefix)
432 {
433 	asm volatile("	spx %0" : : "Q" (new_prefix) : "memory");
434 }
435 
436 static inline uint32_t get_prefix(void)
437 {
438 	uint32_t current_prefix;
439 
440 	asm volatile("	stpx %0" : "=Q" (current_prefix));
441 	return current_prefix;
442 }
443 
444 #endif
445