Lines Matching full:ea

1380 static int subpage_protection(struct mm_struct *mm, unsigned long ea)  in subpage_protection()  argument
1389 if (ea >= spt->maxaddr) in subpage_protection()
1391 if (ea < 0x100000000UL) { in subpage_protection()
1395 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; in subpage_protection()
1399 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; in subpage_protection()
1402 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; in subpage_protection()
1405 spp >>= 30 - 2 * ((ea >> 12) & 0xf); in subpage_protection()
1418 static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) in subpage_protection() argument
1424 void hash_failure_debug(unsigned long ea, unsigned long access, in hash_failure_debug() argument
1430 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", in hash_failure_debug()
1431 ea, access, current->comm); in hash_failure_debug()
1436 static void check_paca_psize(unsigned long ea, struct mm_struct *mm, in check_paca_psize() argument
1440 if (psize != get_paca_psize(ea)) { in check_paca_psize()
1459 int hash_page_mm(struct mm_struct *mm, unsigned long ea, in hash_page_mm() argument
1471 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", in hash_page_mm()
1472 ea, access, trap); in hash_page_mm()
1473 trace_hash_fault(ea, access, trap); in hash_page_mm()
1476 switch (get_region_id(ea)) { in hash_page_mm()
1484 psize = get_slice_psize(mm, ea); in hash_page_mm()
1485 ssize = user_segment_size(ea); in hash_page_mm()
1486 vsid = get_user_vsid(&mm->context, ea, ssize); in hash_page_mm()
1489 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in hash_page_mm()
1496 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in hash_page_mm()
1538 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); in hash_page_mm()
1542 ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift); in hash_page_mm()
1571 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, in hash_page_mm()
1575 rc = __hash_page_huge(ea, access, vsid, ptep, trap, in hash_page_mm()
1588 check_paca_psize(ea, mm, psize, user_region); in hash_page_mm()
1603 demote_segment_4k(mm, ea); in hash_page_mm()
1613 demote_segment_4k(mm, ea); in hash_page_mm()
1615 } else if (ea < VMALLOC_END) { in hash_page_mm()
1632 check_paca_psize(ea, mm, psize, user_region); in hash_page_mm()
1636 rc = __hash_page_64K(ea, access, vsid, ptep, trap, in hash_page_mm()
1641 int spp = subpage_protection(mm, ea); in hash_page_mm()
1645 rc = __hash_page_4K(ea, access, vsid, ptep, trap, in hash_page_mm()
1654 hash_failure_debug(ea, access, vsid, trap, ssize, psize, in hash_page_mm()
1669 int hash_page(unsigned long ea, unsigned long access, unsigned long trap, in hash_page() argument
1675 if ((get_region_id(ea) == VMALLOC_REGION_ID) || in hash_page()
1676 (get_region_id(ea) == IO_REGION_ID)) in hash_page()
1682 return hash_page_mm(mm, ea, access, trap, flags); in hash_page()
1688 unsigned long ea = regs->dar; in DEFINE_INTERRUPT_HANDLER() local
1701 region_id = get_region_id(ea); in DEFINE_INTERRUPT_HANDLER()
1727 err = hash_page_mm(mm, ea, access, TRAP(regs), flags); in DEFINE_INTERRUPT_HANDLER()
1732 _exception(SIGSEGV, regs, SEGV_ACCERR, ea); in DEFINE_INTERRUPT_HANDLER()
1734 _exception(SIGBUS, regs, BUS_ADRERR, ea); in DEFINE_INTERRUPT_HANDLER()
1745 static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) in should_hash_preload() argument
1747 int psize = get_slice_psize(mm, ea); in should_hash_preload()
1754 * Don't prefault if subpage protection is enabled for the EA. in should_hash_preload()
1756 if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea))) in should_hash_preload()
1762 static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, in hash_preload() argument
1771 BUG_ON(get_region_id(ea) != USER_REGION_ID); in hash_preload()
1773 if (!should_hash_preload(mm, ea)) in hash_preload()
1776 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," in hash_preload()
1777 " trap=%lx\n", mm, mm->pgd, ea, access, trap); in hash_preload()
1785 ssize = user_segment_size(ea); in hash_preload()
1786 vsid = get_user_vsid(&mm->context, ea, ssize); in hash_preload()
1824 rc = __hash_page_64K(ea, access, vsid, ptep, trap, in hash_preload()
1828 rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, in hash_preload()
1829 ssize, subpage_protection(mm, ea)); in hash_preload()
1835 hash_failure_debug(ea, access, vsid, trap, ssize, in hash_preload()
2077 void hpt_do_stress(unsigned long ea, unsigned long hpte_group) in hpt_do_stress() argument
2100 if (ea >= PAGE_OFFSET) { in hpt_do_stress()