Lines Matching full:fault
6 * Derived from "arch/i386/mm/fault.c"
109 * 5. T1 : enters fault handler, takes mmap_lock, etc... in bad_access_pkey()
137 vm_fault_t fault) in do_sigbus() argument
144 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus()
147 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", in do_sigbus()
150 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus()
151 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus()
152 if (fault & VM_FAULT_HWPOISON) in do_sigbus()
165 vm_fault_t fault) in mm_fault_error() argument
168 * Kernel page fault interrupted by SIGKILL. We have no reason to in mm_fault_error()
175 if (fault & VM_FAULT_OOM) { in mm_fault_error()
178 * made us unable to handle the page fault gracefully. in mm_fault_error()
184 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| in mm_fault_error()
186 return do_sigbus(regs, addr, fault); in mm_fault_error()
187 else if (fault & VM_FAULT_SIGSEGV) in mm_fault_error()
195 /* Is this a bad kernel fault ? */
209 // Kernel exec fault is always bad in bad_kernel_fault()
220 // Kernel fault on kernel address is bad in bad_kernel_fault()
224 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad in bad_kernel_fault()
228 // Read/write fault in a valid region (the exception table search passed in bad_kernel_fault()
233 // What's left? Kernel fault on user in well defined regions (extable in bad_kernel_fault()
244 * faults just to hit a pkey fault as soon as we fill in a in access_pkey_error()
282 * fault path, handle_mm_fault() also does the same check. To avoid in access_error()
327 * fault instead of DSISR_PROTFAULT. in sanity_check_fault()
336 * sync between D/I cache via fault. But that is handled via low level in sanity_check_fault()
337 * hash fault code (hash_page_do_lazy_icache()) and we should not reach in sanity_check_fault()
346 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON in sanity_check_fault()
349 * For radix, we can get prot fault for autonuma case, because radix in sanity_check_fault()
382 * for a data fault, SRR1 for an instruction fault. For 400-family processors
383 * the error_code parameter is ESR for a data fault, 0 for an instruction
384 * fault.
386 * - DSISR for a non-SLB data access fault,
387 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
388 * - 0 any SLB fault.
390 * The return value is 0 if the fault was handled, or the signal
391 * number if this is a kernel fault that can't be handled here.
402 vm_fault_t fault, major = 0; in __do_page_fault() local
420 * The kernel should never take an execute fault nor should it in __do_page_fault()
421 * take a page fault to a kernel address or a page fault to a user in __do_page_fault()
429 * in a region with pagefaults disabled then we must not take the fault in __do_page_fault()
433 printk_ratelimited(KERN_ERR "Page fault in user mode" in __do_page_fault()
448 * can result in fault, which will cause a deadlock when called with in __do_page_fault()
461 * erroneous fault occurring in a code path which already holds mmap_lock in __do_page_fault()
462 * we will deadlock attempting to validate the fault against the in __do_page_fault()
510 * If for any reason at all we couldn't handle the fault, in __do_page_fault()
512 * the fault. in __do_page_fault()
514 fault = handle_mm_fault(vma, address, flags, regs); in __do_page_fault()
516 major |= fault & VM_FAULT_MAJOR; in __do_page_fault()
518 if (fault_signal_pending(fault, regs)) in __do_page_fault()
525 if (unlikely(fault & VM_FAULT_RETRY)) { in __do_page_fault()
534 if (unlikely(fault & VM_FAULT_ERROR)) in __do_page_fault()
535 return mm_fault_error(regs, address, fault); in __do_page_fault()
538 * Major/minor page fault accounting. in __do_page_fault()
567 /* Are we prepared to handle this fault? */ in bad_page_fault()
594 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", in bad_page_fault()