Lines Matching full:if
54 if (unlikely(tsk->pid < 2)) { in force_sig_info_fault()
77 if (address >= PAGE_OFFSET) in SYSCALL_DEFINE2()
113 if (!pgd_present(*pgd_k)) in vmalloc_sync_one()
118 if (!pud_present(*pud_k)) in vmalloc_sync_one()
123 if (!pmd_present(*pmd_k)) in vmalloc_sync_one()
125 if (!pmd_present(*pmd)) { in vmalloc_sync_one()
142 if (!(address >= VMALLOC_START && address < VMALLOC_END)) in vmalloc_fault()
150 if (!pmd_k) in vmalloc_fault()
152 if (pmd_huge(*pmd_k)) in vmalloc_fault()
155 if (!pte_present(*pte_k)) in vmalloc_fault()
163 if (pte_migrating(*pte)) { in wait_for_migration()
173 if (++retries > bound) in wait_for_migration()
201 * here to see if the PTE seems valid, and if so we retry it.
217 if (pgd_addr_invalid(address)) in handle_migrating_pte()
222 if (!pud || !pud_present(*pud)) in handle_migrating_pte()
225 if (!pmd || !pmd_present(*pmd)) in handle_migrating_pte()
230 if (pte_migrating(pteval)) { in handle_migrating_pte()
235 if (!is_kernel_mode || !pte_present(pteval)) in handle_migrating_pte()
237 if (fault_num == INT_ITLB_MISS) { in handle_migrating_pte()
238 if (pte_exec(pteval)) in handle_migrating_pte()
240 } else if (write) { in handle_migrating_pte()
241 if (pte_write(pteval)) in handle_migrating_pte()
244 if (pte_read(pteval)) in handle_migrating_pte()
254 * It returns true if the fault was successfully handled.
272 if (!is_page_fault) in handle_page_fault()
280 * Check to see if we might be overwriting the stack, and bail in handle_page_fault()
281 * out if so. The page fault code is a relatively likely in handle_page_fault()
286 if (stack_offset < THREAD_SIZE / 8) { in handle_page_fault()
297 * see homecache.c. If we find a migrating PTE, we wait until in handle_page_fault()
304 if (handle_migrating_pte(pgd, fault_num, address, in handle_page_fault()
322 if (unlikely(address >= TASK_SIZE && in handle_page_fault()
324 if (is_kernel_mode && is_page_fault && in handle_page_fault()
328 * Don't take the mm semaphore here. If we fixup a prefetch in handle_page_fault()
337 * If we're trying to touch user-space addresses, we must in handle_page_fault()
346 * If we're in an interrupt, have no user context or are running in an in handle_page_fault()
349 if (in_atomic() || !mm) { in handle_page_fault()
366 * Attempt to lock the address space, if we cannot we then validate the in handle_page_fault()
367 * source. If this is invalid we can skip the address space check, in handle_page_fault()
370 if (!down_read_trylock(&mm->mmap_sem)) { in handle_page_fault()
371 if (is_kernel_mode && in handle_page_fault()
380 if (!vma) in handle_page_fault()
382 if (vma->vm_start <= address) in handle_page_fault()
384 if (!(vma->vm_flags & VM_GROWSDOWN)) in handle_page_fault()
386 if (regs->sp < PAGE_OFFSET) { in handle_page_fault()
390 if (address < regs->sp) in handle_page_fault()
393 if (expand_stack(vma, address)) in handle_page_fault()
402 if (fault_num == INT_ITLB_MISS) { in handle_page_fault()
403 if (!(vma->vm_flags & VM_EXEC)) in handle_page_fault()
405 } else if (write) { in handle_page_fault()
407 if (!is_page_fault && regs->cs == KERNEL_CS) in handle_page_fault()
410 if (!(vma->vm_flags & VM_WRITE)) in handle_page_fault()
413 if (!is_page_fault || !(vma->vm_flags & VM_READ)) in handle_page_fault()
419 * If for any reason at all we couldn't handle the fault, in handle_page_fault()
424 if (unlikely(fault & VM_FAULT_ERROR)) { in handle_page_fault()
425 if (fault & VM_FAULT_OOM) in handle_page_fault()
427 else if (fault & VM_FAULT_SIGBUS) in handle_page_fault()
431 if (fault & VM_FAULT_MAJOR) in handle_page_fault()
436 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() in handle_page_fault()
438 * If this was an asynchronous fault, in handle_page_fault()
442 #if CHIP_HAS_TILE_DMA() in handle_page_fault()
450 #if CHIP_HAS_SN_PROC() in handle_page_fault()
466 * Fix it, but check if it's kernel or user first.. in handle_page_fault()
473 if (!is_kernel_mode) { in handle_page_fault()
486 if (fixup_exception(regs)) in handle_page_fault()
498 if (fault_num == INT_ITLB_MISS) { in handle_page_fault()
501 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) in handle_page_fault()
507 if (address < PAGE_SIZE) in handle_page_fault()
516 if (unlikely(tsk->pid < 2)) { in handle_page_fault()
538 if (is_global_init(tsk)) { in handle_page_fault()
544 if (!is_kernel_mode) in handle_page_fault()
552 if (is_kernel_mode) in handle_page_fault()
598 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || in do_page_fault_ics()
609 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) in do_page_fault_ics()
613 * If we faulted with ICS set in sys_cmpxchg, we are providing in do_page_fault_ics()
623 * This way the backtrace information is correct if we need to in do_page_fault_ics()
628 if (pc >= (unsigned long) sys_cmpxchg && in do_page_fault_ics()
632 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { in do_page_fault_ics()
643 * We may re-fixup again in the real fault handler if it in do_page_fault_ics()
647 else if (pc >= (unsigned long) __start_atomic_asm_code && in do_page_fault_ics()
656 if (!fixup) in do_page_fault_ics()
664 * Now that we have released the atomic lock (if necessary), in do_page_fault_ics()
665 * it's safe to spin if the PTE that caused the fault was migrating. in do_page_fault_ics()
667 if (fault_num == INT_DTLB_ACCESS) in do_page_fault_ics()
669 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) in do_page_fault_ics()
683 * space. For the latter, if we're in kernel mode, we just save the
695 #if CHIP_HAS_TILE_DMA() in do_page_fault()
697 * If it's a DMA fault, suspend the transfer while we're in do_page_fault()
698 * handling the miss; we'll restart after it's handled. If we in do_page_fault()
703 if (fault_num == INT_DMATLB_MISS || in do_page_fault()
714 /* Validate fault num and decide if this is a first-time page fault. */ in do_page_fault()
718 #if CHIP_HAS_TILE_DMA() in do_page_fault()
722 #if CHIP_HAS_SN_PROC() in do_page_fault()
730 #if CHIP_HAS_TILE_DMA() in do_page_fault()
741 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() in do_page_fault()
742 if (EX1_PL(regs->ex1) != USER_PL) { in do_page_fault()
745 #if CHIP_HAS_TILE_DMA() in do_page_fault()
753 #if CHIP_HAS_SN_PROC() in do_page_fault()
762 if (async) { in do_page_fault()
771 if (async->fault_num != 0) { in do_page_fault()
791 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
793 * Check an async_tlb structure to see if a deferred fault is waiting,
794 * and if so pass it to the page-fault code.
799 if (async->fault_num) { in handle_async_page_fault()
802 * handler so that if we re-interrupt before returning in handle_async_page_fault()
820 * Clear thread flag early. If we re-interrupt while processing in do_async_page_fault()
826 #if CHIP_HAS_TILE_DMA() in do_async_page_fault()
829 #if CHIP_HAS_SN_PROC() in do_async_page_fault()
846 * if undone). in vmalloc_sync_all()
854 if (!test_bit(pgd_index(address), insync)) { in vmalloc_sync_all()
860 if (!vmalloc_sync_one(list_to_pgd(pos), in vmalloc_sync_all()
867 if (pos != pgd_list.next) in vmalloc_sync_all()
870 if (address == start && test_bit(pgd_index(address), insync)) in vmalloc_sync_all()