1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12 #include <linux/kernel_stat.h>
13 #include <linux/mmu_context.h>
14 #include <linux/perf_event.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/jump_label.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/mm.h>
26 #include <linux/compat.h>
27 #include <linux/smp.h>
28 #include <linux/kdebug.h>
29 #include <linux/init.h>
30 #include <linux/console.h>
31 #include <linux/extable.h>
32 #include <linux/hardirq.h>
33 #include <linux/kprobes.h>
34 #include <linux/uaccess.h>
35 #include <linux/hugetlb.h>
36 #include <linux/kfence.h>
37 #include <asm/asm-extable.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/ptrace.h>
40 #include <asm/fault.h>
41 #include <asm/diag.h>
42 #include <asm/gmap.h>
43 #include <asm/irq.h>
44 #include <asm/facility.h>
45 #include <asm/uv.h>
46 #include "../kernel/entry.h"
47
48 enum fault_type {
49 KERNEL_FAULT,
50 USER_FAULT,
51 GMAP_FAULT,
52 };
53
54 static DEFINE_STATIC_KEY_FALSE(have_store_indication);
55
fault_init(void)56 static int __init fault_init(void)
57 {
58 if (test_facility(75))
59 static_branch_enable(&have_store_indication);
60 return 0;
61 }
62 early_initcall(fault_init);
63
64 /*
65 * Find out which address space caused the exception.
66 */
get_fault_type(struct pt_regs * regs)67 static enum fault_type get_fault_type(struct pt_regs *regs)
68 {
69 union teid teid = { .val = regs->int_parm_long };
70
71 if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
72 if (user_mode(regs))
73 return USER_FAULT;
74 if (!IS_ENABLED(CONFIG_PGSTE))
75 return KERNEL_FAULT;
76 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
77 return GMAP_FAULT;
78 return KERNEL_FAULT;
79 }
80 if (teid.as == PSW_BITS_AS_SECONDARY)
81 return USER_FAULT;
82 /* Access register mode, not used in the kernel */
83 if (teid.as == PSW_BITS_AS_ACCREG)
84 return USER_FAULT;
85 /* Home space -> access via kernel ASCE */
86 return KERNEL_FAULT;
87 }
88
get_fault_address(struct pt_regs * regs)89 static unsigned long get_fault_address(struct pt_regs *regs)
90 {
91 union teid teid = { .val = regs->int_parm_long };
92
93 return teid.addr * PAGE_SIZE;
94 }
95
fault_is_write(struct pt_regs * regs)96 static __always_inline bool fault_is_write(struct pt_regs *regs)
97 {
98 union teid teid = { .val = regs->int_parm_long };
99
100 if (static_branch_likely(&have_store_indication))
101 return teid.fsi == TEID_FSI_STORE;
102 return false;
103 }
104
dump_pagetable(unsigned long asce,unsigned long address)105 static void dump_pagetable(unsigned long asce, unsigned long address)
106 {
107 unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
108
109 pr_alert("AS:%016lx ", asce);
110 switch (asce & _ASCE_TYPE_MASK) {
111 case _ASCE_TYPE_REGION1:
112 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
113 if (get_kernel_nofault(entry, table))
114 goto bad;
115 pr_cont("R1:%016lx ", entry);
116 if (entry & _REGION_ENTRY_INVALID)
117 goto out;
118 table = __va(entry & _REGION_ENTRY_ORIGIN);
119 fallthrough;
120 case _ASCE_TYPE_REGION2:
121 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
122 if (get_kernel_nofault(entry, table))
123 goto bad;
124 pr_cont("R2:%016lx ", entry);
125 if (entry & _REGION_ENTRY_INVALID)
126 goto out;
127 table = __va(entry & _REGION_ENTRY_ORIGIN);
128 fallthrough;
129 case _ASCE_TYPE_REGION3:
130 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
131 if (get_kernel_nofault(entry, table))
132 goto bad;
133 pr_cont("R3:%016lx ", entry);
134 if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
135 goto out;
136 table = __va(entry & _REGION_ENTRY_ORIGIN);
137 fallthrough;
138 case _ASCE_TYPE_SEGMENT:
139 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
140 if (get_kernel_nofault(entry, table))
141 goto bad;
142 pr_cont("S:%016lx ", entry);
143 if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
144 goto out;
145 table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
146 }
147 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
148 if (get_kernel_nofault(entry, table))
149 goto bad;
150 pr_cont("P:%016lx ", entry);
151 out:
152 pr_cont("\n");
153 return;
154 bad:
155 pr_cont("BAD\n");
156 }
157
dump_fault_info(struct pt_regs * regs)158 static void dump_fault_info(struct pt_regs *regs)
159 {
160 union teid teid = { .val = regs->int_parm_long };
161 unsigned long asce;
162
163 pr_alert("Failing address: %016lx TEID: %016lx\n",
164 get_fault_address(regs), teid.val);
165 pr_alert("Fault in ");
166 switch (teid.as) {
167 case PSW_BITS_AS_HOME:
168 pr_cont("home space ");
169 break;
170 case PSW_BITS_AS_SECONDARY:
171 pr_cont("secondary space ");
172 break;
173 case PSW_BITS_AS_ACCREG:
174 pr_cont("access register ");
175 break;
176 case PSW_BITS_AS_PRIMARY:
177 pr_cont("primary space ");
178 break;
179 }
180 pr_cont("mode while using ");
181 switch (get_fault_type(regs)) {
182 case USER_FAULT:
183 asce = S390_lowcore.user_asce.val;
184 pr_cont("user ");
185 break;
186 case GMAP_FAULT:
187 asce = ((struct gmap *)S390_lowcore.gmap)->asce;
188 pr_cont("gmap ");
189 break;
190 case KERNEL_FAULT:
191 asce = S390_lowcore.kernel_asce.val;
192 pr_cont("kernel ");
193 break;
194 default:
195 unreachable();
196 }
197 pr_cont("ASCE.\n");
198 dump_pagetable(asce, get_fault_address(regs));
199 }
200
201 int show_unhandled_signals = 1;
202
report_user_fault(struct pt_regs * regs,long signr,int is_mm_fault)203 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204 {
205 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
206
207 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
208 return;
209 if (!unhandled_signal(current, signr))
210 return;
211 if (!__ratelimit(&rs))
212 return;
213 pr_alert("User process fault: interruption code %04x ilc:%d ",
214 regs->int_code & 0xffff, regs->int_code >> 17);
215 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
216 pr_cont("\n");
217 if (is_mm_fault)
218 dump_fault_info(regs);
219 show_regs(regs);
220 }
221
do_sigsegv(struct pt_regs * regs,int si_code)222 static void do_sigsegv(struct pt_regs *regs, int si_code)
223 {
224 report_user_fault(regs, SIGSEGV, 1);
225 force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
226 }
227
handle_fault_error_nolock(struct pt_regs * regs,int si_code)228 static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
229 {
230 enum fault_type fault_type;
231 unsigned long address;
232 bool is_write;
233
234 if (user_mode(regs)) {
235 if (WARN_ON_ONCE(!si_code))
236 si_code = SEGV_MAPERR;
237 return do_sigsegv(regs, si_code);
238 }
239 if (fixup_exception(regs))
240 return;
241 fault_type = get_fault_type(regs);
242 if (fault_type == KERNEL_FAULT) {
243 address = get_fault_address(regs);
244 is_write = fault_is_write(regs);
245 if (kfence_handle_page_fault(address, is_write, regs))
246 return;
247 }
248 if (fault_type == KERNEL_FAULT)
249 pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
250 else
251 pr_alert("Unable to handle kernel paging request in virtual user address space\n");
252 dump_fault_info(regs);
253 die(regs, "Oops");
254 }
255
handle_fault_error(struct pt_regs * regs,int si_code)256 static void handle_fault_error(struct pt_regs *regs, int si_code)
257 {
258 struct mm_struct *mm = current->mm;
259
260 mmap_read_unlock(mm);
261 handle_fault_error_nolock(regs, si_code);
262 }
263
do_sigbus(struct pt_regs * regs)264 static void do_sigbus(struct pt_regs *regs)
265 {
266 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
267 }
268
269 /*
270 * This routine handles page faults. It determines the address,
271 * and the problem, and then passes it off to one of the appropriate
272 * routines.
273 *
274 * interruption code (int_code):
275 * 04 Protection -> Write-Protection (suppression)
276 * 10 Segment translation -> Not present (nullification)
277 * 11 Page translation -> Not present (nullification)
278 * 3b Region third trans. -> Not present (nullification)
279 */
do_exception(struct pt_regs * regs,int access)280 static void do_exception(struct pt_regs *regs, int access)
281 {
282 struct vm_area_struct *vma;
283 unsigned long address;
284 struct mm_struct *mm;
285 enum fault_type type;
286 unsigned int flags;
287 struct gmap *gmap;
288 vm_fault_t fault;
289 bool is_write;
290
291 /*
292 * The instruction that caused the program check has
293 * been nullified. Don't signal single step via SIGTRAP.
294 */
295 clear_thread_flag(TIF_PER_TRAP);
296 if (kprobe_page_fault(regs, 14))
297 return;
298 mm = current->mm;
299 address = get_fault_address(regs);
300 is_write = fault_is_write(regs);
301 type = get_fault_type(regs);
302 switch (type) {
303 case KERNEL_FAULT:
304 return handle_fault_error_nolock(regs, 0);
305 case USER_FAULT:
306 case GMAP_FAULT:
307 if (faulthandler_disabled() || !mm)
308 return handle_fault_error_nolock(regs, 0);
309 break;
310 }
311 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
312 flags = FAULT_FLAG_DEFAULT;
313 if (user_mode(regs))
314 flags |= FAULT_FLAG_USER;
315 if (is_write)
316 access = VM_WRITE;
317 if (access == VM_WRITE)
318 flags |= FAULT_FLAG_WRITE;
319 if (!(flags & FAULT_FLAG_USER))
320 goto lock_mmap;
321 vma = lock_vma_under_rcu(mm, address);
322 if (!vma)
323 goto lock_mmap;
324 if (!(vma->vm_flags & access)) {
325 vma_end_read(vma);
326 goto lock_mmap;
327 }
328 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
329 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
330 vma_end_read(vma);
331 if (!(fault & VM_FAULT_RETRY)) {
332 count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
333 if (unlikely(fault & VM_FAULT_ERROR))
334 goto error;
335 return;
336 }
337 count_vm_vma_lock_event(VMA_LOCK_RETRY);
338 if (fault & VM_FAULT_MAJOR)
339 flags |= FAULT_FLAG_TRIED;
340
341 /* Quick path to respond to signals */
342 if (fault_signal_pending(fault, regs)) {
343 if (!user_mode(regs))
344 handle_fault_error_nolock(regs, 0);
345 return;
346 }
347 lock_mmap:
348 mmap_read_lock(mm);
349 gmap = NULL;
350 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
351 gmap = (struct gmap *)S390_lowcore.gmap;
352 current->thread.gmap_addr = address;
353 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
354 current->thread.gmap_int_code = regs->int_code & 0xffff;
355 address = __gmap_translate(gmap, address);
356 if (address == -EFAULT)
357 return handle_fault_error(regs, SEGV_MAPERR);
358 if (gmap->pfault_enabled)
359 flags |= FAULT_FLAG_RETRY_NOWAIT;
360 }
361 retry:
362 vma = find_vma(mm, address);
363 if (!vma)
364 return handle_fault_error(regs, SEGV_MAPERR);
365 if (unlikely(vma->vm_start > address)) {
366 if (!(vma->vm_flags & VM_GROWSDOWN))
367 return handle_fault_error(regs, SEGV_MAPERR);
368 vma = expand_stack(mm, address);
369 if (!vma)
370 return handle_fault_error_nolock(regs, SEGV_MAPERR);
371 }
372 if (unlikely(!(vma->vm_flags & access)))
373 return handle_fault_error(regs, SEGV_ACCERR);
374 fault = handle_mm_fault(vma, address, flags, regs);
375 if (fault_signal_pending(fault, regs)) {
376 if (flags & FAULT_FLAG_RETRY_NOWAIT)
377 mmap_read_unlock(mm);
378 if (!user_mode(regs))
379 handle_fault_error_nolock(regs, 0);
380 return;
381 }
382 /* The fault is fully completed (including releasing mmap lock) */
383 if (fault & VM_FAULT_COMPLETED) {
384 if (gmap) {
385 mmap_read_lock(mm);
386 goto gmap;
387 }
388 return;
389 }
390 if (unlikely(fault & VM_FAULT_ERROR)) {
391 mmap_read_unlock(mm);
392 goto error;
393 }
394 if (fault & VM_FAULT_RETRY) {
395 if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
396 /*
397 * FAULT_FLAG_RETRY_NOWAIT has been set,
398 * mmap_lock has not been released
399 */
400 current->thread.gmap_pfault = 1;
401 return handle_fault_error(regs, 0);
402 }
403 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
404 flags |= FAULT_FLAG_TRIED;
405 mmap_read_lock(mm);
406 goto retry;
407 }
408 gmap:
409 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
410 address = __gmap_link(gmap, current->thread.gmap_addr,
411 address);
412 if (address == -EFAULT)
413 return handle_fault_error(regs, SEGV_MAPERR);
414 if (address == -ENOMEM) {
415 fault = VM_FAULT_OOM;
416 mmap_read_unlock(mm);
417 goto error;
418 }
419 }
420 mmap_read_unlock(mm);
421 return;
422 error:
423 if (fault & VM_FAULT_OOM) {
424 if (!user_mode(regs))
425 handle_fault_error_nolock(regs, 0);
426 else
427 pagefault_out_of_memory();
428 } else if (fault & VM_FAULT_SIGSEGV) {
429 if (!user_mode(regs))
430 handle_fault_error_nolock(regs, 0);
431 else
432 do_sigsegv(regs, SEGV_MAPERR);
433 } else if (fault & VM_FAULT_SIGBUS) {
434 if (!user_mode(regs))
435 handle_fault_error_nolock(regs, 0);
436 else
437 do_sigbus(regs);
438 } else {
439 BUG();
440 }
441 }
442
do_protection_exception(struct pt_regs * regs)443 void do_protection_exception(struct pt_regs *regs)
444 {
445 union teid teid = { .val = regs->int_parm_long };
446
447 /*
448 * Protection exceptions are suppressing, decrement psw address.
449 * The exception to this rule are aborted transactions, for these
450 * the PSW already points to the correct location.
451 */
452 if (!(regs->int_code & 0x200))
453 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
454 /*
455 * Check for low-address protection. This needs to be treated
456 * as a special case because the translation exception code
457 * field is not guaranteed to contain valid data in this case.
458 */
459 if (unlikely(!teid.b61)) {
460 if (user_mode(regs)) {
461 /* Low-address protection in user mode: cannot happen */
462 die(regs, "Low-address protection");
463 }
464 /*
465 * Low-address protection in kernel mode means
466 * NULL pointer write access in kernel mode.
467 */
468 return handle_fault_error_nolock(regs, 0);
469 }
470 if (unlikely(MACHINE_HAS_NX && teid.b56)) {
471 regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
472 return handle_fault_error_nolock(regs, SEGV_ACCERR);
473 }
474 do_exception(regs, VM_WRITE);
475 }
476 NOKPROBE_SYMBOL(do_protection_exception);
477
do_dat_exception(struct pt_regs * regs)478 void do_dat_exception(struct pt_regs *regs)
479 {
480 do_exception(regs, VM_ACCESS_FLAGS);
481 }
482 NOKPROBE_SYMBOL(do_dat_exception);
483
484 #if IS_ENABLED(CONFIG_PGSTE)
485
do_secure_storage_access(struct pt_regs * regs)486 void do_secure_storage_access(struct pt_regs *regs)
487 {
488 union teid teid = { .val = regs->int_parm_long };
489 unsigned long addr = get_fault_address(regs);
490 struct vm_area_struct *vma;
491 struct mm_struct *mm;
492 struct page *page;
493 struct gmap *gmap;
494 int rc;
495
496 /*
497 * Bit 61 indicates if the address is valid, if it is not the
498 * kernel should be stopped or SIGSEGV should be sent to the
499 * process. Bit 61 is not reliable without the misc UV feature,
500 * therefore this needs to be checked too.
501 */
502 if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
503 /*
504 * When this happens, userspace did something that it
505 * was not supposed to do, e.g. branching into secure
506 * memory. Trigger a segmentation fault.
507 */
508 if (user_mode(regs)) {
509 send_sig(SIGSEGV, current, 0);
510 return;
511 }
512 /*
513 * The kernel should never run into this case and
514 * there is no way out of this situation.
515 */
516 panic("Unexpected PGM 0x3d with TEID bit 61=0");
517 }
518 switch (get_fault_type(regs)) {
519 case GMAP_FAULT:
520 mm = current->mm;
521 gmap = (struct gmap *)S390_lowcore.gmap;
522 mmap_read_lock(mm);
523 addr = __gmap_translate(gmap, addr);
524 mmap_read_unlock(mm);
525 if (IS_ERR_VALUE(addr))
526 return handle_fault_error_nolock(regs, SEGV_MAPERR);
527 fallthrough;
528 case USER_FAULT:
529 mm = current->mm;
530 mmap_read_lock(mm);
531 vma = find_vma(mm, addr);
532 if (!vma)
533 return handle_fault_error(regs, SEGV_MAPERR);
534 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
535 if (IS_ERR_OR_NULL(page)) {
536 mmap_read_unlock(mm);
537 break;
538 }
539 if (arch_make_page_accessible(page))
540 send_sig(SIGSEGV, current, 0);
541 put_page(page);
542 mmap_read_unlock(mm);
543 break;
544 case KERNEL_FAULT:
545 page = phys_to_page(addr);
546 if (unlikely(!try_get_page(page)))
547 break;
548 rc = arch_make_page_accessible(page);
549 put_page(page);
550 if (rc)
551 BUG();
552 break;
553 default:
554 unreachable();
555 }
556 }
557 NOKPROBE_SYMBOL(do_secure_storage_access);
558
do_non_secure_storage_access(struct pt_regs * regs)559 void do_non_secure_storage_access(struct pt_regs *regs)
560 {
561 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
562 unsigned long gaddr = get_fault_address(regs);
563
564 if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
565 return handle_fault_error_nolock(regs, SEGV_MAPERR);
566 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
567 send_sig(SIGSEGV, current, 0);
568 }
569 NOKPROBE_SYMBOL(do_non_secure_storage_access);
570
do_secure_storage_violation(struct pt_regs * regs)571 void do_secure_storage_violation(struct pt_regs *regs)
572 {
573 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
574 unsigned long gaddr = get_fault_address(regs);
575
576 /*
577 * If the VM has been rebooted, its address space might still contain
578 * secure pages from the previous boot.
579 * Clear the page so it can be reused.
580 */
581 if (!gmap_destroy_page(gmap, gaddr))
582 return;
583 /*
584 * Either KVM messed up the secure guest mapping or the same
585 * page is mapped into multiple secure guests.
586 *
587 * This exception is only triggered when a guest 2 is running
588 * and can therefore never occur in kernel context.
589 */
590 pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
591 current->comm, current->pid);
592 send_sig(SIGSEGV, current, 0);
593 }
594
595 #endif /* CONFIG_PGSTE */
596