1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2017 Red Hat Inc
4 *
5 * Authors:
6 * David Hildenbrand <david@redhat.com>
7 */
8 #ifndef _ASMS390X_ARCH_DEF_H_
9 #define _ASMS390X_ARCH_DEF_H_
10
11 struct stack_frame {
12 struct stack_frame *back_chain;
13 uint64_t reserved;
14 /* GRs 2 - 5 */
15 uint64_t argument_area[4];
16 /* GRs 6 - 15 */
17 uint64_t grs[10];
18 /* FPRs 0, 2, 4, 6 */
19 int64_t fprs[4];
20 };
21
22 struct stack_frame_int {
23 struct stack_frame *back_chain;
24 uint64_t reserved;
25 /*
26 * The GRs are offset compatible with struct stack_frame so we
27 * can easily fetch GR14 for backtraces.
28 */
29 /* GRs 2 - 15 */
30 uint64_t grs0[14];
31 /* GRs 0 and 1 */
32 uint64_t grs1[2];
33 uint32_t reserved1;
34 uint32_t fpc;
35 uint64_t fprs[16];
36 uint64_t crs[16];
37 };
38
39 struct psw {
40 union {
41 uint64_t mask;
42 struct {
43 uint64_t reserved00:1;
44 uint64_t per:1;
45 uint64_t reserved02:3;
46 uint64_t dat:1;
47 uint64_t io:1;
48 uint64_t ext:1;
49 uint64_t key:4;
50 uint64_t reserved12:1;
51 uint64_t mchk:1;
52 uint64_t wait:1;
53 uint64_t pstate:1;
54 uint64_t as:2;
55 uint64_t cc:2;
56 uint64_t prg_mask:4;
57 uint64_t reserved24:7;
58 uint64_t ea:1;
59 uint64_t ba:1;
60 uint64_t reserved33:31;
61 };
62 };
63 uint64_t addr;
64 };
65 _Static_assert(sizeof(struct psw) == 16, "PSW size");
66
67 #define PSW(m, a) ((struct psw){ .mask = (m), .addr = (uint64_t)(a) })
68
69 struct short_psw {
70 uint32_t mask;
71 uint32_t addr;
72 };
73
74 struct cpu {
75 struct lowcore *lowcore;
76 uint64_t *stack;
77 void (*pgm_cleanup_func)(struct stack_frame_int *);
78 void (*ext_cleanup_func)(struct stack_frame_int *);
79 uint16_t addr;
80 uint16_t idx;
81 bool active;
82 bool pgm_int_expected;
83 bool ext_int_expected;
84 bool in_interrupt_handler;
85 };
86
87 enum address_space {
88 AS_PRIM = 0,
89 AS_ACCR = 1,
90 AS_SECN = 2,
91 AS_HOME = 3
92 };
93
94 #define PSW_MASK_DAT 0x0400000000000000UL
95 #define PSW_MASK_HOME 0x0000C00000000000UL
96 #define PSW_MASK_IO 0x0200000000000000UL
97 #define PSW_MASK_EXT 0x0100000000000000UL
98 #define PSW_MASK_KEY 0x00F0000000000000UL
99 #define PSW_MASK_WAIT 0x0002000000000000UL
100 #define PSW_MASK_PSTATE 0x0001000000000000UL
101 #define PSW_MASK_EA 0x0000000100000000UL
102 #define PSW_MASK_BA 0x0000000080000000UL
103 #define PSW_MASK_64 (PSW_MASK_BA | PSW_MASK_EA)
104
105 #define CTL0_TRANSACT_EX_CTL (63 - 8)
106 #define CTL0_LOW_ADDR_PROT (63 - 35)
107 #define CTL0_EDAT (63 - 40)
108 #define CTL0_FETCH_PROTECTION_OVERRIDE (63 - 38)
109 #define CTL0_STORAGE_PROTECTION_OVERRIDE (63 - 39)
110 #define CTL0_IEP (63 - 43)
111 #define CTL0_AFP (63 - 45)
112 #define CTL0_VECTOR (63 - 46)
113 #define CTL0_EMERGENCY_SIGNAL (63 - 49)
114 #define CTL0_EXTERNAL_CALL (63 - 50)
115 #define CTL0_CLOCK_COMPARATOR (63 - 52)
116 #define CTL0_CPU_TIMER (63 - 53)
117 #define CTL0_SERVICE_SIGNAL (63 - 54)
118 #define CR0_EXTM_MASK 0x0000000000006200UL /* Combined external masks */
119
120 #define CTL2_GUARDED_STORAGE (63 - 59)
121
122 #define LC_SIZE (2 * PAGE_SIZE)
123 struct lowcore {
124 uint8_t pad_0x0000[0x0080 - 0x0000]; /* 0x0000 */
125 uint32_t ext_int_param; /* 0x0080 */
126 uint16_t cpu_addr; /* 0x0084 */
127 uint16_t ext_int_code; /* 0x0086 */
128 uint16_t svc_int_id; /* 0x0088 */
129 uint16_t svc_int_code; /* 0x008a */
130 uint16_t pgm_int_id; /* 0x008c */
131 uint16_t pgm_int_code; /* 0x008e */
132 uint32_t dxc_vxc; /* 0x0090 */
133 uint16_t mon_class_nb; /* 0x0094 */
134 uint8_t per_code; /* 0x0096 */
135 uint8_t per_atmid; /* 0x0097 */
136 uint64_t per_addr; /* 0x0098 */
137 uint8_t exc_acc_id; /* 0x00a0 */
138 uint8_t per_acc_id; /* 0x00a1 */
139 uint8_t op_acc_id; /* 0x00a2 */
140 uint8_t arch_mode_id; /* 0x00a3 */
141 uint8_t pad_0x00a4[0x00a8 - 0x00a4]; /* 0x00a4 */
142 uint64_t trans_exc_id; /* 0x00a8 */
143 uint64_t mon_code; /* 0x00b0 */
144 uint32_t subsys_id_word; /* 0x00b8 */
145 uint32_t io_int_param; /* 0x00bc */
146 uint32_t io_int_word; /* 0x00c0 */
147 uint8_t pad_0x00c4[0x00c8 - 0x00c4]; /* 0x00c4 */
148 uint32_t stfl; /* 0x00c8 */
149 uint8_t pad_0x00cc[0x00e8 - 0x00cc]; /* 0x00cc */
150 uint64_t mcck_int_code; /* 0x00e8 */
151 uint8_t pad_0x00f0[0x00f4 - 0x00f0]; /* 0x00f0 */
152 uint32_t ext_damage_code; /* 0x00f4 */
153 uint64_t failing_storage_addr; /* 0x00f8 */
154 uint64_t emon_ca_origin; /* 0x0100 */
155 uint32_t emon_ca_size; /* 0x0108 */
156 uint32_t emon_exc_count; /* 0x010c */
157 uint64_t breaking_event_addr; /* 0x0110 */
158 uint8_t pad_0x0118[0x0120 - 0x0118]; /* 0x0118 */
159 struct psw restart_old_psw; /* 0x0120 */
160 struct psw ext_old_psw; /* 0x0130 */
161 struct psw svc_old_psw; /* 0x0140 */
162 struct psw pgm_old_psw; /* 0x0150 */
163 struct psw mcck_old_psw; /* 0x0160 */
164 struct psw io_old_psw; /* 0x0170 */
165 uint8_t pad_0x0180[0x01a0 - 0x0180]; /* 0x0180 */
166 struct psw restart_new_psw; /* 0x01a0 */
167 struct psw ext_new_psw; /* 0x01b0 */
168 struct psw svc_new_psw; /* 0x01c0 */
169 struct psw pgm_new_psw; /* 0x01d0 */
170 struct psw mcck_new_psw; /* 0x01e0 */
171 struct psw io_new_psw; /* 0x01f0 */
172 /* sw definition: save area for registers in interrupt handlers */
173 uint64_t sw_int_grs[16]; /* 0x0200 */
174 uint8_t pad_0x0280[0x0308 - 0x0280]; /* 0x0280 */
175 uint64_t sw_int_crs[16]; /* 0x0308 */
176 struct psw sw_int_psw; /* 0x0388 */
177 struct cpu *this_cpu; /* 0x0398 */
178 uint8_t pad_0x03a0[0x11b0 - 0x03a0]; /* 0x03a0 */
179 uint64_t mcck_ext_sa_addr; /* 0x11b0 */
180 uint8_t pad_0x11b8[0x1200 - 0x11b8]; /* 0x11b8 */
181 uint64_t fprs_sa[16]; /* 0x1200 */
182 uint64_t grs_sa[16]; /* 0x1280 */
183 struct psw psw_sa; /* 0x1300 */
184 uint8_t pad_0x1310[0x1318 - 0x1310]; /* 0x1310 */
185 uint32_t prefix_sa; /* 0x1318 */
186 uint32_t fpc_sa; /* 0x131c */
187 uint8_t pad_0x1320[0x1324 - 0x1320]; /* 0x1320 */
188 uint32_t tod_pr_sa; /* 0x1324 */
189 uint64_t cputm_sa; /* 0x1328 */
190 uint64_t cc_sa; /* 0x1330 */
191 uint8_t pad_0x1338[0x1340 - 0x1338]; /* 0x1338 */
192 uint32_t ars_sa[16]; /* 0x1340 */
193 uint64_t crs_sa[16]; /* 0x1380 */
194 uint8_t pad_0x1400[0x1800 - 0x1400]; /* 0x1400 */
195 uint8_t pgm_int_tdb[0x1900 - 0x1800]; /* 0x1800 */
196 } __attribute__ ((__packed__));
197 _Static_assert(sizeof(struct lowcore) == 0x1900, "Lowcore size");
198
199 extern struct lowcore lowcore;
200
201 #define THIS_CPU (lowcore.this_cpu)
202
203 #define PGM_INT_CODE_OPERATION 0x01
204 #define PGM_INT_CODE_PRIVILEGED_OPERATION 0x02
205 #define PGM_INT_CODE_EXECUTE 0x03
206 #define PGM_INT_CODE_PROTECTION 0x04
207 #define PGM_INT_CODE_ADDRESSING 0x05
208 #define PGM_INT_CODE_SPECIFICATION 0x06
209 #define PGM_INT_CODE_DATA 0x07
210 #define PGM_INT_CODE_FIXED_POINT_OVERFLOW 0x08
211 #define PGM_INT_CODE_FIXED_POINT_DIVIDE 0x09
212 #define PGM_INT_CODE_DECIMAL_OVERFLOW 0x0a
213 #define PGM_INT_CODE_DECIMAL_DIVIDE 0x0b
214 #define PGM_INT_CODE_HFP_EXPONENT_OVERFLOW 0x0c
215 #define PGM_INT_CODE_HFP_EXPONENT_UNDERFLOW 0x0d
216 #define PGM_INT_CODE_HFP_SIGNIFICANCE 0x0e
217 #define PGM_INT_CODE_HFP_DIVIDE 0x0f
218 #define PGM_INT_CODE_SEGMENT_TRANSLATION 0x10
219 #define PGM_INT_CODE_PAGE_TRANSLATION 0x11
220 #define PGM_INT_CODE_TRANSLATION_SPEC 0x12
221 #define PGM_INT_CODE_SPECIAL_OPERATION 0x13
222 #define PGM_INT_CODE_OPERAND 0x15
223 #define PGM_INT_CODE_TRACE_TABLE 0x16
224 #define PGM_INT_CODE_VECTOR_PROCESSING 0x1b
225 #define PGM_INT_CODE_SPACE_SWITCH_EVENT 0x1c
226 #define PGM_INT_CODE_HFP_SQUARE_ROOT 0x1d
227 #define PGM_INT_CODE_PC_TRANSLATION_SPEC 0x1f
228 #define PGM_INT_CODE_AFX_TRANSLATION 0x20
229 #define PGM_INT_CODE_ASX_TRANSLATION 0x21
230 #define PGM_INT_CODE_LX_TRANSLATION 0x22
231 #define PGM_INT_CODE_EX_TRANSLATION 0x23
232 #define PGM_INT_CODE_PRIMARY_AUTHORITY 0x24
233 #define PGM_INT_CODE_SECONDARY_AUTHORITY 0x25
234 #define PGM_INT_CODE_LFX_TRANSLATION 0x26
235 #define PGM_INT_CODE_LSX_TRANSLATION 0x27
236 #define PGM_INT_CODE_ALET_SPECIFICATION 0x28
237 #define PGM_INT_CODE_ALEN_TRANSLATION 0x29
238 #define PGM_INT_CODE_ALE_SEQUENCE 0x2a
239 #define PGM_INT_CODE_ASTE_VALIDITY 0x2b
240 #define PGM_INT_CODE_ASTE_SEQUENCE 0x2c
241 #define PGM_INT_CODE_EXTENDED_AUTHORITY 0x2d
242 #define PGM_INT_CODE_LSTE_SEQUENCE 0x2e
243 #define PGM_INT_CODE_ASTE_INSTANCE 0x2f
244 #define PGM_INT_CODE_STACK_FULL 0x30
245 #define PGM_INT_CODE_STACK_EMPTY 0x31
246 #define PGM_INT_CODE_STACK_SPECIFICATION 0x32
247 #define PGM_INT_CODE_STACK_TYPE 0x33
248 #define PGM_INT_CODE_STACK_OPERATION 0x34
249 #define PGM_INT_CODE_ASCE_TYPE 0x38
250 #define PGM_INT_CODE_REGION_FIRST_TRANS 0x39
251 #define PGM_INT_CODE_REGION_SECOND_TRANS 0x3a
252 #define PGM_INT_CODE_REGION_THIRD_TRANS 0x3b
253 #define PGM_INT_CODE_SECURE_STOR_ACCESS 0x3d
254 #define PGM_INT_CODE_NON_SECURE_STOR_ACCESS 0x3e
255 #define PGM_INT_CODE_SECURE_STOR_VIOLATION 0x3f
256 #define PGM_INT_CODE_MONITOR_EVENT 0x40
257 #define PGM_INT_CODE_PER 0x80
258 #define PGM_INT_CODE_CRYPTO_OPERATION 0x119
259 #define PGM_INT_CODE_TX_ABORTED_EVENT 0x200
260
261 struct cpuid {
262 uint64_t version : 8;
263 uint64_t id : 24;
264 uint64_t type : 16;
265 uint64_t format : 1;
266 uint64_t reserved : 15;
267 };
268
269 #define SVC_LEAVE_PSTATE 1
270
stap(void)271 static inline unsigned short stap(void)
272 {
273 unsigned short cpu_address;
274
275 asm volatile("stap %0" : "=Q" (cpu_address));
276 return cpu_address;
277 }
278
stidp(void)279 static inline uint64_t stidp(void)
280 {
281 uint64_t cpuid;
282
283 asm volatile("stidp %0" : "=Q" (cpuid));
284
285 return cpuid;
286 }
287
288 enum tprot_permission {
289 TPROT_READ_WRITE = 0,
290 TPROT_READ = 1,
291 TPROT_RW_PROTECTED = 2,
292 TPROT_TRANSL_UNAVAIL = 3,
293 };
294
tprot(unsigned long addr,char access_key)295 static inline enum tprot_permission tprot(unsigned long addr, char access_key)
296 {
297 int cc;
298
299 asm volatile(
300 " tprot 0(%1),0(%2)\n"
301 " ipm %0\n"
302 " srl %0,28\n"
303 : "=d" (cc) : "a" (addr), "a" (access_key << 4) : "cc");
304 return (enum tprot_permission)cc;
305 }
306
lctlg(int cr,uint64_t value)307 static inline void lctlg(int cr, uint64_t value)
308 {
309 asm volatile(
310 " lctlg %1,%1,%0\n"
311 : : "Q" (value), "i" (cr));
312 }
313
stctg(int cr)314 static inline uint64_t stctg(int cr)
315 {
316 uint64_t value;
317
318 asm volatile(
319 " stctg %1,%1,%0\n"
320 : "=Q" (value) : "i" (cr) : "memory");
321 return value;
322 }
323
ctl_set_bit(int cr,unsigned int bit)324 static inline void ctl_set_bit(int cr, unsigned int bit)
325 {
326 uint64_t reg;
327
328 reg = stctg(cr);
329 reg |= 1UL << bit;
330 lctlg(cr, reg);
331 }
332
ctl_clear_bit(int cr,unsigned int bit)333 static inline void ctl_clear_bit(int cr, unsigned int bit)
334 {
335 uint64_t reg;
336
337 reg = stctg(cr);
338 reg &= ~(1UL << bit);
339 lctlg(cr, reg);
340 }
341
extract_psw_mask(void)342 static inline uint64_t extract_psw_mask(void)
343 {
344 uint32_t mask_upper = 0, mask_lower = 0;
345
346 asm volatile(
347 " epsw %0,%1\n"
348 : "=r" (mask_upper), "=a" (mask_lower));
349
350 return (uint64_t) mask_upper << 32 | mask_lower;
351 }
352
353 #define PSW_WITH_CUR_MASK(addr) PSW(extract_psw_mask(), (addr))
354
load_psw_mask(uint64_t mask)355 static inline void load_psw_mask(uint64_t mask)
356 {
357 struct psw psw = {
358 .mask = mask,
359 .addr = 0,
360 };
361 uint64_t tmp = 0;
362
363 asm volatile(
364 " larl %0,0f\n"
365 " stg %0,8(%1)\n"
366 " lpswe 0(%1)\n"
367 "0:\n"
368 : "+r" (tmp) : "a" (&psw) : "memory", "cc" );
369 }
370
disabled_wait(uint64_t message)371 static inline void disabled_wait(uint64_t message)
372 {
373 struct psw psw = {
374 .mask = PSW_MASK_WAIT, /* Disabled wait */
375 .addr = message,
376 };
377
378 asm volatile(" lpswe 0(%0)\n" : : "a" (&psw) : "memory", "cc");
379 }
380
381 /**
382 * psw_mask_clear_bits - clears bits from the current PSW mask
383 * @clear: bitmask of bits that will be cleared
384 */
psw_mask_clear_bits(uint64_t clear)385 static inline void psw_mask_clear_bits(uint64_t clear)
386 {
387 load_psw_mask(extract_psw_mask() & ~clear);
388 }
389
390 /**
391 * psw_mask_set_bits - sets bits on the current PSW mask
392 * @set: bitmask of bits that will be set
393 */
psw_mask_set_bits(uint64_t set)394 static inline void psw_mask_set_bits(uint64_t set)
395 {
396 load_psw_mask(extract_psw_mask() | set);
397 }
398
399 /**
400 * psw_mask_clear_and_set_bits - clears and sets bits on the current PSW mask
401 * @clear: bitmask of bits that will be cleared
402 * @set: bitmask of bits that will be set
403 *
404 * The bits in the @clear mask will be cleared, then the bits in the @set mask
405 * will be set.
406 */
psw_mask_clear_and_set_bits(uint64_t clear,uint64_t set)407 static inline void psw_mask_clear_and_set_bits(uint64_t clear, uint64_t set)
408 {
409 load_psw_mask((extract_psw_mask() & ~clear) | set);
410 }
411
412 /**
413 * enable_dat - enable the DAT bit in the current PSW
414 */
enable_dat(void)415 static inline void enable_dat(void)
416 {
417 psw_mask_set_bits(PSW_MASK_DAT);
418 }
419
420 /**
421 * disable_dat - disable the DAT bit in the current PSW
422 */
disable_dat(void)423 static inline void disable_dat(void)
424 {
425 psw_mask_clear_bits(PSW_MASK_DAT);
426 }
427
wait_for_interrupt(uint64_t irq_mask)428 static inline void wait_for_interrupt(uint64_t irq_mask)
429 {
430 uint64_t psw_mask = extract_psw_mask();
431
432 load_psw_mask(psw_mask | irq_mask | PSW_MASK_WAIT);
433 /*
434 * After being woken and having processed the interrupt, let's restore
435 * the PSW mask.
436 */
437 load_psw_mask(psw_mask);
438 }
439
enter_pstate(void)440 static inline void enter_pstate(void)
441 {
442 psw_mask_set_bits(PSW_MASK_PSTATE);
443 }
444
leave_pstate(void)445 static inline void leave_pstate(void)
446 {
447 asm volatile(" svc %0\n" : : "i" (SVC_LEAVE_PSTATE));
448 }
449
stsi(void * addr,int fc,int sel1,int sel2)450 static inline int stsi(void *addr, int fc, int sel1, int sel2)
451 {
452 register int r0 asm("0") = (fc << 28) | sel1;
453 register int r1 asm("1") = sel2;
454 int cc;
455
456 asm volatile(
457 "stsi 0(%3)\n"
458 "ipm %[cc]\n"
459 "srl %[cc],28\n"
460 : "+d" (r0), [cc] "=d" (cc)
461 : "d" (r1), "a" (addr)
462 : "cc", "memory");
463 return cc;
464 }
465
stsi_get_fc(void)466 static inline unsigned long stsi_get_fc(void)
467 {
468 register unsigned long r0 asm("0") = 0;
469 register unsigned long r1 asm("1") = 0;
470 int cc;
471
472 asm volatile("stsi 0\n"
473 "ipm %[cc]\n"
474 "srl %[cc],28\n"
475 : "+d" (r0), [cc] "=d" (cc)
476 : "d" (r1)
477 : "cc", "memory");
478 assert(!cc);
479 return r0 >> 28;
480 }
481
servc(uint32_t command,unsigned long sccb)482 static inline int servc(uint32_t command, unsigned long sccb)
483 {
484 int cc;
485
486 asm volatile(
487 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
488 " ipm %0\n"
489 " srl %0,28"
490 : "=&d" (cc) : "d" (command), "a" (sccb)
491 : "cc", "memory");
492 return cc;
493 }
494
set_prefix(uint32_t new_prefix)495 static inline void set_prefix(uint32_t new_prefix)
496 {
497 asm volatile(" spx %0" : : "Q" (new_prefix) : "memory");
498 }
499
get_prefix(void)500 static inline uint32_t get_prefix(void)
501 {
502 uint32_t current_prefix;
503
504 asm volatile(" stpx %0" : "=Q" (current_prefix));
505 return current_prefix;
506 }
507
diag44(void)508 static inline void diag44(void)
509 {
510 asm volatile("diag 0,0,0x44\n");
511 }
512
diag500(uint64_t val)513 static inline void diag500(uint64_t val)
514 {
515 asm volatile(
516 "lgr 2,%[val]\n"
517 "diag 0,0,0x500\n"
518 :
519 : [val] "d"(val)
520 : "r2"
521 );
522 }
523
524 #endif
525