xref: /kvm-unit-tests/lib/s390x/asm/arch_def.h (revision 588887078688358e111e4582ccc23e548f7ad1a6)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2017 Red Hat Inc
4  *
5  * Authors:
6  *  David Hildenbrand <david@redhat.com>
7  */
8 #ifndef _ASMS390X_ARCH_DEF_H_
9 #define _ASMS390X_ARCH_DEF_H_
10 
11 #include <util.h>
12 
13 struct stack_frame {
14 	struct stack_frame *back_chain;
15 	uint64_t reserved;
16 	/* GRs 2 - 5 */
17 	uint64_t argument_area[4];
18 	/* GRs 6 - 15 */
19 	uint64_t grs[10];
20 	/* FPRs 0, 2, 4, 6 */
21 	int64_t  fprs[4];
22 };
23 
24 struct stack_frame_int {
25 	struct stack_frame *back_chain;
26 	uint64_t reserved;
27 	/*
28 	 * The GRs are offset compatible with struct stack_frame so we
29 	 * can easily fetch GR14 for backtraces.
30 	 */
31 	/* GRs 2 - 15 */
32 	uint64_t grs0[14];
33 	/* GRs 0 and 1 */
34 	uint64_t grs1[2];
35 	uint32_t reserved1;
36 	uint32_t fpc;
37 	uint64_t fprs[16];
38 	uint64_t crs[16];
39 };
40 
41 struct psw {
42 	union {
43 		uint64_t	mask;
44 		struct {
45 			uint64_t reserved00:1;
46 			uint64_t per:1;
47 			uint64_t reserved02:3;
48 			uint64_t dat:1;
49 			uint64_t io:1;
50 			uint64_t ext:1;
51 			uint64_t key:4;
52 			uint64_t reserved12:1;
53 			uint64_t mchk:1;
54 			uint64_t wait:1;
55 			uint64_t pstate:1;
56 			uint64_t as:2;
57 			uint64_t cc:2;
58 			uint64_t prg_mask:4;
59 			uint64_t reserved24:7;
60 			uint64_t ea:1;
61 			uint64_t ba:1;
62 			uint64_t reserved33:31;
63 		};
64 	};
65 	uint64_t	addr;
66 };
67 static_assert(sizeof(struct psw) == 16);
68 
69 #define PSW(m, a) ((struct psw){ .mask = (m), .addr = (uint64_t)(a) })
70 
71 struct short_psw {
72 	uint32_t	mask;
73 	uint32_t	addr;
74 };
75 
76 struct cpu {
77 	struct lowcore *lowcore;
78 	uint64_t *stack;
79 	void (*pgm_cleanup_func)(struct stack_frame_int *);
80 	void (*ext_cleanup_func)(struct stack_frame_int *);
81 	uint16_t addr;
82 	uint16_t idx;
83 	bool active;
84 	bool pgm_int_expected;
85 	bool ext_int_expected;
86 	bool in_interrupt_handler;
87 };
88 
89 enum address_space {
90 	AS_PRIM = 0,
91 	AS_ACCR = 1,
92 	AS_SECN = 2,
93 	AS_HOME = 3
94 };
95 
96 #define PSW_MASK_DAT			0x0400000000000000UL
97 #define PSW_MASK_HOME			0x0000C00000000000UL
98 #define PSW_MASK_IO			0x0200000000000000UL
99 #define PSW_MASK_EXT			0x0100000000000000UL
100 #define PSW_MASK_KEY			0x00F0000000000000UL
101 #define PSW_MASK_WAIT			0x0002000000000000UL
102 #define PSW_MASK_PSTATE			0x0001000000000000UL
103 #define PSW_MASK_EA			0x0000000100000000UL
104 #define PSW_MASK_BA			0x0000000080000000UL
105 #define PSW_MASK_64			(PSW_MASK_BA | PSW_MASK_EA)
106 
107 #define CTL0_TRANSACT_EX_CTL			(63 -  8)
108 #define CTL0_LOW_ADDR_PROT			(63 - 35)
109 #define CTL0_EDAT				(63 - 40)
110 #define CTL0_FETCH_PROTECTION_OVERRIDE		(63 - 38)
111 #define CTL0_STORAGE_PROTECTION_OVERRIDE	(63 - 39)
112 #define CTL0_IEP				(63 - 43)
113 #define CTL0_AFP				(63 - 45)
114 #define CTL0_VECTOR				(63 - 46)
115 #define CTL0_EMERGENCY_SIGNAL			(63 - 49)
116 #define CTL0_EXTERNAL_CALL			(63 - 50)
117 #define CTL0_CLOCK_COMPARATOR			(63 - 52)
118 #define CTL0_CPU_TIMER				(63 - 53)
119 #define CTL0_SERVICE_SIGNAL			(63 - 54)
120 #define CR0_EXTM_MASK			0x0000000000006200UL /* Combined external masks */
121 
122 #define CTL2_GUARDED_STORAGE		(63 - 59)
123 
124 #define LC_SIZE	(2 * PAGE_SIZE)
125 struct lowcore {
126 	uint8_t		pad_0x0000[0x0080 - 0x0000];	/* 0x0000 */
127 	uint32_t	ext_int_param;			/* 0x0080 */
128 	uint16_t	cpu_addr;			/* 0x0084 */
129 	uint16_t	ext_int_code;			/* 0x0086 */
130 	uint16_t	svc_int_id;			/* 0x0088 */
131 	uint16_t	svc_int_code;			/* 0x008a */
132 	uint16_t	pgm_int_id;			/* 0x008c */
133 	uint16_t	pgm_int_code;			/* 0x008e */
134 	uint32_t	dxc_vxc;			/* 0x0090 */
135 	uint16_t	mon_class_nb;			/* 0x0094 */
136 	uint8_t		per_code;			/* 0x0096 */
137 	uint8_t		per_atmid;			/* 0x0097 */
138 	uint64_t	per_addr;			/* 0x0098 */
139 	uint8_t		exc_acc_id;			/* 0x00a0 */
140 	uint8_t		per_acc_id;			/* 0x00a1 */
141 	uint8_t		op_acc_id;			/* 0x00a2 */
142 	uint8_t		arch_mode_id;			/* 0x00a3 */
143 	uint8_t		pad_0x00a4[0x00a8 - 0x00a4];	/* 0x00a4 */
144 	uint64_t	trans_exc_id;			/* 0x00a8 */
145 	uint64_t	mon_code;			/* 0x00b0 */
146 	uint32_t	subsys_id_word;			/* 0x00b8 */
147 	uint32_t	io_int_param;			/* 0x00bc */
148 	uint32_t	io_int_word;			/* 0x00c0 */
149 	uint8_t		pad_0x00c4[0x00c8 - 0x00c4];	/* 0x00c4 */
150 	uint32_t	stfl;				/* 0x00c8 */
151 	uint8_t		pad_0x00cc[0x00e8 - 0x00cc];	/* 0x00cc */
152 	uint64_t	mcck_int_code;			/* 0x00e8 */
153 	uint8_t		pad_0x00f0[0x00f4 - 0x00f0];	/* 0x00f0 */
154 	uint32_t	ext_damage_code;		/* 0x00f4 */
155 	uint64_t	failing_storage_addr;		/* 0x00f8 */
156 	uint64_t	emon_ca_origin;			/* 0x0100 */
157 	uint32_t	emon_ca_size;			/* 0x0108 */
158 	uint32_t	emon_exc_count;			/* 0x010c */
159 	uint64_t	breaking_event_addr;		/* 0x0110 */
160 	uint8_t		pad_0x0118[0x0120 - 0x0118];	/* 0x0118 */
161 	struct psw	restart_old_psw;		/* 0x0120 */
162 	struct psw	ext_old_psw;			/* 0x0130 */
163 	struct psw	svc_old_psw;			/* 0x0140 */
164 	struct psw	pgm_old_psw;			/* 0x0150 */
165 	struct psw	mcck_old_psw;			/* 0x0160 */
166 	struct psw	io_old_psw;			/* 0x0170 */
167 	uint8_t		pad_0x0180[0x01a0 - 0x0180];	/* 0x0180 */
168 	struct psw	restart_new_psw;		/* 0x01a0 */
169 	struct psw	ext_new_psw;			/* 0x01b0 */
170 	struct psw	svc_new_psw;			/* 0x01c0 */
171 	struct psw	pgm_new_psw;			/* 0x01d0 */
172 	struct psw	mcck_new_psw;			/* 0x01e0 */
173 	struct psw	io_new_psw;			/* 0x01f0 */
174 	/* sw definition: save area for registers in interrupt handlers */
175 	uint64_t	sw_int_grs[16];			/* 0x0200 */
176 	uint8_t		pad_0x0280[0x0308 - 0x0280];	/* 0x0280 */
177 	uint64_t	sw_int_crs[16];			/* 0x0308 */
178 	struct psw	sw_int_psw;			/* 0x0388 */
179 	struct cpu	*this_cpu;			/* 0x0398 */
180 	uint8_t		pad_0x03a0[0x11b0 - 0x03a0];	/* 0x03a0 */
181 	uint64_t	mcck_ext_sa_addr;		/* 0x11b0 */
182 	uint8_t		pad_0x11b8[0x1200 - 0x11b8];	/* 0x11b8 */
183 	uint64_t	fprs_sa[16];			/* 0x1200 */
184 	uint64_t	grs_sa[16];			/* 0x1280 */
185 	struct psw	psw_sa;				/* 0x1300 */
186 	uint8_t		pad_0x1310[0x1318 - 0x1310];	/* 0x1310 */
187 	uint32_t	prefix_sa;			/* 0x1318 */
188 	uint32_t	fpc_sa;				/* 0x131c */
189 	uint8_t		pad_0x1320[0x1324 - 0x1320];	/* 0x1320 */
190 	uint32_t	tod_pr_sa;			/* 0x1324 */
191 	uint64_t	cputm_sa;			/* 0x1328 */
192 	uint64_t	cc_sa;				/* 0x1330 */
193 	uint8_t		pad_0x1338[0x1340 - 0x1338];	/* 0x1338 */
194 	uint32_t	ars_sa[16];			/* 0x1340 */
195 	uint64_t	crs_sa[16];			/* 0x1380 */
196 	uint8_t		pad_0x1400[0x1800 - 0x1400];	/* 0x1400 */
197 	uint8_t		pgm_int_tdb[0x1900 - 0x1800];	/* 0x1800 */
198 } __attribute__ ((__packed__));
199 static_assert(sizeof(struct lowcore) == 0x1900);
200 
201 extern struct lowcore lowcore;
202 
203 #define THIS_CPU (lowcore.this_cpu)
204 
205 #define PGM_INT_CODE_OPERATION			0x01
206 #define PGM_INT_CODE_PRIVILEGED_OPERATION	0x02
207 #define PGM_INT_CODE_EXECUTE			0x03
208 #define PGM_INT_CODE_PROTECTION			0x04
209 #define PGM_INT_CODE_ADDRESSING			0x05
210 #define PGM_INT_CODE_SPECIFICATION		0x06
211 #define PGM_INT_CODE_DATA			0x07
212 #define PGM_INT_CODE_FIXED_POINT_OVERFLOW	0x08
213 #define PGM_INT_CODE_FIXED_POINT_DIVIDE		0x09
214 #define PGM_INT_CODE_DECIMAL_OVERFLOW		0x0a
215 #define PGM_INT_CODE_DECIMAL_DIVIDE		0x0b
216 #define PGM_INT_CODE_HFP_EXPONENT_OVERFLOW	0x0c
217 #define PGM_INT_CODE_HFP_EXPONENT_UNDERFLOW	0x0d
218 #define PGM_INT_CODE_HFP_SIGNIFICANCE		0x0e
219 #define PGM_INT_CODE_HFP_DIVIDE			0x0f
220 #define PGM_INT_CODE_SEGMENT_TRANSLATION	0x10
221 #define PGM_INT_CODE_PAGE_TRANSLATION		0x11
222 #define PGM_INT_CODE_TRANSLATION_SPEC		0x12
223 #define PGM_INT_CODE_SPECIAL_OPERATION		0x13
224 #define PGM_INT_CODE_OPERAND			0x15
225 #define PGM_INT_CODE_TRACE_TABLE		0x16
226 #define PGM_INT_CODE_VECTOR_PROCESSING		0x1b
227 #define PGM_INT_CODE_SPACE_SWITCH_EVENT		0x1c
228 #define PGM_INT_CODE_HFP_SQUARE_ROOT		0x1d
229 #define PGM_INT_CODE_PC_TRANSLATION_SPEC	0x1f
230 #define PGM_INT_CODE_AFX_TRANSLATION		0x20
231 #define PGM_INT_CODE_ASX_TRANSLATION		0x21
232 #define PGM_INT_CODE_LX_TRANSLATION		0x22
233 #define PGM_INT_CODE_EX_TRANSLATION		0x23
234 #define PGM_INT_CODE_PRIMARY_AUTHORITY		0x24
235 #define PGM_INT_CODE_SECONDARY_AUTHORITY	0x25
236 #define PGM_INT_CODE_LFX_TRANSLATION		0x26
237 #define PGM_INT_CODE_LSX_TRANSLATION		0x27
238 #define PGM_INT_CODE_ALET_SPECIFICATION		0x28
239 #define PGM_INT_CODE_ALEN_TRANSLATION		0x29
240 #define PGM_INT_CODE_ALE_SEQUENCE		0x2a
241 #define PGM_INT_CODE_ASTE_VALIDITY		0x2b
242 #define PGM_INT_CODE_ASTE_SEQUENCE		0x2c
243 #define PGM_INT_CODE_EXTENDED_AUTHORITY		0x2d
244 #define PGM_INT_CODE_LSTE_SEQUENCE		0x2e
245 #define PGM_INT_CODE_ASTE_INSTANCE		0x2f
246 #define PGM_INT_CODE_STACK_FULL			0x30
247 #define PGM_INT_CODE_STACK_EMPTY		0x31
248 #define PGM_INT_CODE_STACK_SPECIFICATION	0x32
249 #define PGM_INT_CODE_STACK_TYPE			0x33
250 #define PGM_INT_CODE_STACK_OPERATION		0x34
251 #define PGM_INT_CODE_ASCE_TYPE			0x38
252 #define PGM_INT_CODE_REGION_FIRST_TRANS		0x39
253 #define PGM_INT_CODE_REGION_SECOND_TRANS	0x3a
254 #define PGM_INT_CODE_REGION_THIRD_TRANS		0x3b
255 #define PGM_INT_CODE_SECURE_STOR_ACCESS		0x3d
256 #define PGM_INT_CODE_NON_SECURE_STOR_ACCESS	0x3e
257 #define PGM_INT_CODE_SECURE_STOR_VIOLATION	0x3f
258 #define PGM_INT_CODE_MONITOR_EVENT		0x40
259 #define PGM_INT_CODE_PER			0x80
260 #define PGM_INT_CODE_CRYPTO_OPERATION		0x119
261 #define PGM_INT_CODE_TX_ABORTED_EVENT		0x200
262 
263 struct cpuid {
264 	uint64_t version : 8;
265 	uint64_t id : 24;
266 	uint64_t type : 16;
267 	uint64_t format : 1;
268 	uint64_t reserved : 15;
269 };
270 
271 #define SVC_LEAVE_PSTATE 1
272 
273 static inline unsigned short stap(void)
274 {
275 	unsigned short cpu_address;
276 
277 	asm volatile("stap %0" : "=Q" (cpu_address));
278 	return cpu_address;
279 }
280 
281 static inline uint64_t stidp(void)
282 {
283 	uint64_t cpuid;
284 
285 	asm volatile("stidp %0" : "=Q" (cpuid));
286 
287 	return cpuid;
288 }
289 
290 enum tprot_permission {
291 	TPROT_READ_WRITE = 0,
292 	TPROT_READ = 1,
293 	TPROT_RW_PROTECTED = 2,
294 	TPROT_TRANSL_UNAVAIL = 3,
295 };
296 
297 static inline enum tprot_permission tprot(unsigned long addr, char access_key)
298 {
299 	int cc;
300 
301 	asm volatile(
302 		"	tprot	0(%1),0(%2)\n"
303 		"	ipm	%0\n"
304 		"	srl	%0,28\n"
305 		: "=d" (cc) : "a" (addr), "a" (access_key << 4) : "cc");
306 	return (enum tprot_permission)cc;
307 }
308 
309 static inline void lctlg(int cr, uint64_t value)
310 {
311 	asm volatile(
312 		"	lctlg	%1,%1,%0\n"
313 		: : "Q" (value), "i" (cr));
314 }
315 
316 static inline uint64_t stctg(int cr)
317 {
318 	uint64_t value;
319 
320 	asm volatile(
321 		"	stctg	%1,%1,%0\n"
322 		: "=Q" (value) : "i" (cr) : "memory");
323 	return value;
324 }
325 
326 static inline void ctl_set_bit(int cr, unsigned int bit)
327 {
328         uint64_t reg;
329 
330 	reg = stctg(cr);
331 	reg |= 1UL << bit;
332 	lctlg(cr, reg);
333 }
334 
335 static inline void ctl_clear_bit(int cr, unsigned int bit)
336 {
337         uint64_t reg;
338 
339 	reg = stctg(cr);
340 	reg &= ~(1UL << bit);
341 	lctlg(cr, reg);
342 }
343 
344 static inline uint64_t extract_psw_mask(void)
345 {
346 	uint32_t mask_upper = 0, mask_lower = 0;
347 
348 	asm volatile(
349 		"	epsw	%0,%1\n"
350 		: "=r" (mask_upper), "=a" (mask_lower));
351 
352 	return (uint64_t) mask_upper << 32 | mask_lower;
353 }
354 
355 #define PSW_WITH_CUR_MASK(addr) PSW(extract_psw_mask(), (addr))
356 
357 static inline void load_psw_mask(uint64_t mask)
358 {
359 	struct psw psw = {
360 		.mask = mask,
361 		.addr = 0,
362 	};
363 	uint64_t tmp = 0;
364 
365 	asm volatile(
366 		"	larl	%0,0f\n"
367 		"	stg	%0,8(%1)\n"
368 		"	lpswe	0(%1)\n"
369 		"0:\n"
370 		: "+r" (tmp) :  "a" (&psw) : "memory", "cc" );
371 }
372 
373 static inline void disabled_wait(uint64_t message)
374 {
375 	struct psw psw = {
376 		.mask = PSW_MASK_WAIT,  /* Disabled wait */
377 		.addr = message,
378 	};
379 
380 	asm volatile("  lpswe 0(%0)\n" : : "a" (&psw) : "memory", "cc");
381 }
382 
383 /**
384  * psw_mask_clear_bits - clears bits from the current PSW mask
385  * @clear: bitmask of bits that will be cleared
386  */
387 static inline void psw_mask_clear_bits(uint64_t clear)
388 {
389 	load_psw_mask(extract_psw_mask() & ~clear);
390 }
391 
392 /**
393  * psw_mask_set_bits - sets bits on the current PSW mask
394  * @set: bitmask of bits that will be set
395  */
396 static inline void psw_mask_set_bits(uint64_t set)
397 {
398 	load_psw_mask(extract_psw_mask() | set);
399 }
400 
401 /**
402  * psw_mask_clear_and_set_bits - clears and sets bits on the current PSW mask
403  * @clear: bitmask of bits that will be cleared
404  * @set: bitmask of bits that will be set
405  *
406  * The bits in the @clear mask will be cleared, then the bits in the @set mask
407  * will be set.
408  */
409 static inline void psw_mask_clear_and_set_bits(uint64_t clear, uint64_t set)
410 {
411 	load_psw_mask((extract_psw_mask() & ~clear) | set);
412 }
413 
414 /**
415  * enable_dat - enable the DAT bit in the current PSW
416  */
417 static inline void enable_dat(void)
418 {
419 	psw_mask_set_bits(PSW_MASK_DAT);
420 }
421 
422 /**
423  * disable_dat - disable the DAT bit in the current PSW
424  */
425 static inline void disable_dat(void)
426 {
427 	psw_mask_clear_bits(PSW_MASK_DAT);
428 }
429 
430 static inline void wait_for_interrupt(uint64_t irq_mask)
431 {
432 	uint64_t psw_mask = extract_psw_mask();
433 
434 	load_psw_mask(psw_mask | irq_mask | PSW_MASK_WAIT);
435 	/*
436 	 * After being woken and having processed the interrupt, let's restore
437 	 * the PSW mask.
438 	 */
439 	load_psw_mask(psw_mask);
440 }
441 
442 static inline void enter_pstate(void)
443 {
444 	psw_mask_set_bits(PSW_MASK_PSTATE);
445 }
446 
447 static inline void leave_pstate(void)
448 {
449 	asm volatile("	svc %0\n" : : "i" (SVC_LEAVE_PSTATE));
450 }
451 
452 static inline int stsi(void *addr, int fc, int sel1, int sel2)
453 {
454 	register int r0 asm("0") = (fc << 28) | sel1;
455 	register int r1 asm("1") = sel2;
456 	int cc;
457 
458 	asm volatile(
459 		"stsi	0(%3)\n"
460 		"ipm	%[cc]\n"
461 		"srl	%[cc],28\n"
462 		: "+d" (r0), [cc] "=d" (cc)
463 		: "d" (r1), "a" (addr)
464 		: "cc", "memory");
465 	return cc;
466 }
467 
468 static inline unsigned long stsi_get_fc(void)
469 {
470 	register unsigned long r0 asm("0") = 0;
471 	register unsigned long r1 asm("1") = 0;
472 	int cc;
473 
474 	asm volatile("stsi	0\n"
475 		     "ipm	%[cc]\n"
476 		     "srl	%[cc],28\n"
477 		     : "+d" (r0), [cc] "=d" (cc)
478 		     : "d" (r1)
479 		     : "cc", "memory");
480 	assert(!cc);
481 	return r0 >> 28;
482 }
483 
484 static inline int servc(uint32_t command, unsigned long sccb)
485 {
486 	int cc;
487 
488 	asm volatile(
489 		"       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
490 		"       ipm     %0\n"
491 		"       srl     %0,28"
492 		: "=&d" (cc) : "d" (command), "a" (sccb)
493 		: "cc", "memory");
494 	return cc;
495 }
496 
497 static inline void set_prefix(uint32_t new_prefix)
498 {
499 	asm volatile("	spx %0" : : "Q" (new_prefix) : "memory");
500 }
501 
502 static inline uint32_t get_prefix(void)
503 {
504 	uint32_t current_prefix;
505 
506 	asm volatile("	stpx %0" : "=Q" (current_prefix));
507 	return current_prefix;
508 }
509 
510 static inline void diag44(void)
511 {
512 	asm volatile("diag	0,0,0x44\n");
513 }
514 
515 static inline void diag500(uint64_t val)
516 {
517 	asm volatile(
518 		"lgr	2,%[val]\n"
519 		"diag	0,0,0x500\n"
520 		:
521 		: [val] "d"(val)
522 		: "r2"
523 	);
524 }
525 
526 #endif
527