Lines Matching +refs:add +refs:addr +refs:attrs
610 long add = 0; in add_reservation_in_range() local
646 add += hugetlb_resv_map_add(resv, iter->link.prev, in add_reservation_in_range()
660 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, in add_reservation_in_range()
663 return add; in add_reservation_in_range()
743 long add = 0, actual_regions_needed = 0; in region_add() local
778 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
783 return add; in region_add()
2327 struct vm_area_struct *vma, unsigned long addr) in alloc_buddy_hugetlb_folio_with_mpol() argument
2335 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2609 struct vm_area_struct *vma, unsigned long addr, in __vma_reservation_common() argument
2621 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2689 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
2691 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2695 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
2697 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2701 struct vm_area_struct *vma, unsigned long addr) in vma_end_reservation() argument
2703 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2707 struct vm_area_struct *vma, unsigned long addr) in vma_add_reservation() argument
2709 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2713 struct vm_area_struct *vma, unsigned long addr) in vma_del_reservation() argument
2715 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); in vma_del_reservation()
3016 unsigned long addr, bool cow_from_owner) in alloc_hugetlb_folio() argument
3043 retval = vma_needs_reservation(h, vma, addr); in alloc_hugetlb_folio()
3090 folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); in alloc_hugetlb_folio()
3093 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); in alloc_hugetlb_folio()
3126 retval = vma_commit_reservation(h, vma, addr); in alloc_hugetlb_folio()
3186 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
4470 .attrs = hstate_attrs,
4480 .attrs = hstate_demote_attrs,
4543 .attrs = per_node_hstate_attrs,
5425 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) in hugetlb_vm_op_split() argument
5427 if (addr & ~(huge_page_mask(hstate_vma(vma)))) in hugetlb_vm_op_split()
5435 if (addr & ~PUD_MASK) { in hugetlb_vm_op_split()
5441 unsigned long floor = addr & PUD_MASK; in hugetlb_vm_op_split()
5546 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, in hugetlb_install_folio() argument
5552 hugetlb_add_new_anon_rmap(new_folio, vma, addr); in hugetlb_install_folio()
5555 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); in hugetlb_install_folio()
5566 unsigned long addr; in copy_hugetlb_page_range() local
5593 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
5595 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range()
5597 addr |= last_addr_mask; in copy_hugetlb_page_range()
5600 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range()
5615 addr |= last_addr_mask; in copy_hugetlb_page_range()
5622 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
5632 set_huge_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
5647 set_huge_pte_at(src, addr, src_pte, entry, sz); in copy_hugetlb_page_range()
5651 set_huge_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
5657 set_huge_pte_at(dst, addr, dst_pte, in copy_hugetlb_page_range()
5660 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
5683 new_folio = alloc_hugetlb_folio(dst_vma, addr, false); in copy_hugetlb_page_range()
5690 addr, dst_vma); in copy_hugetlb_page_range()
5701 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
5703 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range()
5709 hugetlb_install_folio(dst_vma, dst_pte, addr, in copy_hugetlb_page_range()
5724 huge_ptep_set_wrprotect(src, addr, src_pte); in copy_hugetlb_page_range()
5731 set_huge_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
6399 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, in hugetlb_pte_stable() argument
6406 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); in hugetlb_pte_stable()
7258 long chg = -1, add = -1, spool_resv, gbl_resv; in hugetlb_reserve_pages() local
7354 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
7356 if (unlikely(add < 0)) { in hugetlb_reserve_pages()
7359 } else if (unlikely(chg > add)) { in hugetlb_reserve_pages()
7375 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
7378 chg - add); in hugetlb_reserve_pages()
7412 if (chg >= 0 && add < 0) in hugetlb_reserve_pages()
7465 unsigned long addr, pgoff_t idx) in page_table_shareable() argument
7482 if (pmd_index(addr) != pmd_index(saddr) || in page_table_shareable()
7491 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) in want_pmd_share() argument
7493 unsigned long start = addr & PUD_MASK; in want_pmd_share()
7549 unsigned long addr, pud_t *pud) in huge_pmd_share() argument
7552 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
7564 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
7588 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share()
7602 unsigned long addr, pte_t *ptep) in huge_pmd_unshare() argument
7605 pgd_t *pgd = pgd_offset(mm, addr); in huge_pmd_unshare()
7606 p4d_t *p4d = p4d_offset(pgd, addr); in huge_pmd_unshare()
7607 pud_t *pud = pud_offset(p4d, addr); in huge_pmd_unshare()
7625 unsigned long addr, pud_t *pud) in huge_pmd_share() argument
7631 unsigned long addr, pte_t *ptep) in huge_pmd_unshare() argument
7641 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) in want_pmd_share() argument
7649 unsigned long addr, unsigned long sz) in huge_pte_alloc() argument
7656 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
7657 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc()
7660 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc()
7666 if (want_pmd_share(vma, addr) && pud_none(*pud)) in huge_pte_alloc()
7667 pte = huge_pmd_share(mm, vma, addr, pud); in huge_pte_alloc()
7669 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pte_alloc()
7692 unsigned long addr, unsigned long sz) in huge_pte_offset() argument
7699 pgd = pgd_offset(mm, addr); in huge_pte_offset()
7702 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
7706 pud = pud_offset(p4d, addr); in huge_pte_offset()
7714 pmd = pmd_offset(pud, addr); in huge_pte_offset()