Lines Matching +full:i +full:- +full:tlb +full:- +full:size
2 * PPC Huge TLB Page Support for Kernel.
7 * Based on the IA-32 version:
23 #include <asm/tlb.h>
26 #include <asm/pte-walk.h>
32 #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
41 return __find_linux_pte(mm->pgd, addr, NULL, NULL); in huge_pte_offset()
50 int i; in __hugepte_alloc() local
55 num_hugepd = 1 << (pshift - pdshift); in __hugepte_alloc()
57 cachep = PGT_CACHE(pdshift - pshift); in __hugepte_alloc()
63 return -ENOMEM; in __hugepte_alloc()
72 return -ENOMEM; in __hugepte_alloc()
83 * We have multiple higher-level entries that point to the same in __hugepte_alloc()
86 * right higher-level entry without knowing if it's a hugepage or not. in __hugepte_alloc()
88 for (i = 0; i < num_hugepd; i++, hpdp++) { in __hugepte_alloc()
94 if (i < num_hugepd) { in __hugepte_alloc()
95 for (i = i - 1 ; i >= 0; i--, hpdp--) in __hugepte_alloc()
120 addr &= ~(sz-1); in huge_pte_alloc()
132 ptl = &mm->page_table_lock; in huge_pte_alloc()
160 ptl = &mm->page_table_lock; in huge_pte_alloc()
215 number_of_pages--; in pseries_add_gpage()
225 m = phys_to_virt(gpage_freearray[--nr_gpages]); in pseries_alloc_bootmem_huge_page()
227 list_add(&m->list, &huge_boot_pages); in pseries_alloc_bootmem_huge_page()
228 m->hstate = hstate; in pseries_alloc_bootmem_huge_page()
246 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
260 unsigned int i; in hugepd_free_rcu_callback() local
262 for (i = 0; i < batch->index; i++) in hugepd_free_rcu_callback()
263 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); in hugepd_free_rcu_callback()
268 static void hugepd_free(struct mmu_gather *tlb, void *hugepte) in hugepd_free() argument
274 if (atomic_read(&tlb->mm->mm_users) < 2 || in hugepd_free()
275 mm_is_thread_local(tlb->mm)) { in hugepd_free()
283 (*batchp)->index = 0; in hugepd_free()
286 (*batchp)->ptes[(*batchp)->index++] = hugepte; in hugepd_free()
287 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { in hugepd_free()
288 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback); in hugepd_free()
294 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {} in hugepd_free() argument
297 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, in free_hugepd_range() argument
302 int i; in free_hugepd_range() local
304 unsigned long pdmask = ~((1UL << pdshift) - 1); in free_hugepd_range()
310 num_hugepd = 1 << (shift - pdshift); in free_hugepd_range()
320 if (end - 1 > ceiling - 1) in free_hugepd_range()
323 for (i = 0; i < num_hugepd; i++, hpdp++) in free_hugepd_range()
327 hugepd_free(tlb, hugepte); in free_hugepd_range()
329 pgtable_free_tlb(tlb, hugepte, in free_hugepd_range()
330 get_hugepd_cache_index(pdshift - shift)); in free_hugepd_range()
333 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in hugetlb_free_pte_range() argument
348 if (end - 1 > ceiling - 1) in hugetlb_free_pte_range()
352 pte_free_tlb(tlb, token, addr); in hugetlb_free_pte_range()
353 mm_dec_nr_ptes(tlb->mm); in hugetlb_free_pte_range()
356 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in hugetlb_free_pmd_range() argument
380 hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling); in hugetlb_free_pmd_range()
385 * Increment next by the size of the huge mapping since in hugetlb_free_pmd_range()
394 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, in hugetlb_free_pmd_range()
406 if (end - 1 > ceiling - 1) in hugetlb_free_pmd_range()
411 pmd_free_tlb(tlb, pmd, start); in hugetlb_free_pmd_range()
412 mm_dec_nr_pmds(tlb->mm); in hugetlb_free_pmd_range()
415 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in hugetlb_free_pud_range() argument
430 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, in hugetlb_free_pud_range()
435 * Increment next by the size of the huge mapping since in hugetlb_free_pud_range()
444 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, in hugetlb_free_pud_range()
457 if (end - 1 > ceiling - 1) in hugetlb_free_pud_range()
462 pud_free_tlb(tlb, pud, start); in hugetlb_free_pud_range()
463 mm_dec_nr_puds(tlb->mm); in hugetlb_free_pud_range()
467 * This function frees user-level page tables of a process.
469 void hugetlb_free_pgd_range(struct mmu_gather *tlb, in hugetlb_free_pgd_range() argument
496 pgd = pgd_offset(tlb->mm, addr); in hugetlb_free_pgd_range()
501 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); in hugetlb_free_pgd_range()
505 * Increment next by the size of the huge mapping since in hugetlb_free_pgd_range()
514 free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT, in hugetlb_free_pgd_range()
529 struct mm_struct *mm = vma->vm_mm; in follow_huge_pd()
533 * hugepage directory entries are protected by mm->page_table_lock in follow_huge_pd()
536 ptl = &mm->page_table_lock; in follow_huge_pd()
541 mask = (1UL << shift) - 1; in follow_huge_pd()
578 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); in vma_mmu_pagesize()
585 bool __init arch_hugetlb_valid_size(unsigned long size) in arch_hugetlb_valid_size() argument
587 int shift = __ffs(size); in arch_hugetlb_valid_size()
590 /* Check that it is a page size supported by the hardware and in arch_hugetlb_valid_size()
592 if (size <= PAGE_SIZE || !is_power_of_2(size)) in arch_hugetlb_valid_size()
604 static int __init add_huge_page_size(unsigned long long size) in add_huge_page_size() argument
606 int shift = __ffs(size); in add_huge_page_size()
608 if (!arch_hugetlb_valid_size((unsigned long)size)) in add_huge_page_size()
609 return -EINVAL; in add_huge_page_size()
611 hugetlb_add_hstate(shift - PAGE_SHIFT); in add_huge_page_size()
627 return -ENODEV; in hugetlbpage_init()
664 pgtable_cache_add(pdshift - shift); in hugetlbpage_init()
686 int i; in flush_dcache_icache_hugepage() local
691 for (i = 0; i < compound_nr(page); i++) { in flush_dcache_icache_hugepage()
693 __flush_dcache_icache(page_address(page+i)); in flush_dcache_icache_hugepage()
695 start = kmap_atomic(page+i); in flush_dcache_icache_hugepage()
707 order = PUD_SHIFT - PAGE_SHIFT; in gigantic_hugetlb_cma_reserve()
712 order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; in gigantic_hugetlb_cma_reserve()