Lines Matching +refs:add +refs:addr +refs:attrs

576 	long add = 0;  in add_reservation_in_range()  local
612 add += hugetlb_resv_map_add(resv, iter->link.prev, in add_reservation_in_range()
626 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, in add_reservation_in_range()
629 return add; in add_reservation_in_range()
709 long add = 0, actual_regions_needed = 0; in region_add() local
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
749 return add; in region_add()
2595 struct vm_area_struct *vma, unsigned long addr) in alloc_buddy_hugetlb_folio_with_mpol() argument
2603 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2817 struct vm_area_struct *vma, unsigned long addr, in __vma_reservation_common() argument
2829 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2897 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
2899 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2903 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
2905 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2909 struct vm_area_struct *vma, unsigned long addr) in vma_end_reservation() argument
2911 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2915 struct vm_area_struct *vma, unsigned long addr) in vma_add_reservation() argument
2917 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2921 struct vm_area_struct *vma, unsigned long addr) in vma_del_reservation() argument
2923 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); in vma_del_reservation()
3147 unsigned long addr, int avoid_reserve) in alloc_hugetlb_folio() argument
3173 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_hugetlb_folio()
3225 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_hugetlb_folio()
3228 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); in alloc_hugetlb_folio()
3254 map_commit = vma_commit_reservation(h, vma, addr); in alloc_hugetlb_folio()
3290 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
4272 .attrs = hstate_attrs,
4282 .attrs = hstate_demote_attrs,
4345 .attrs = per_node_hstate_attrs,
5168 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) in hugetlb_vm_op_split() argument
5170 if (addr & ~(huge_page_mask(hstate_vma(vma)))) in hugetlb_vm_op_split()
5178 if (addr & ~PUD_MASK) { in hugetlb_vm_op_split()
5184 unsigned long floor = addr & PUD_MASK; in hugetlb_vm_op_split()
5282 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, in hugetlb_install_folio() argument
5288 hugetlb_add_new_anon_rmap(new_folio, vma, addr); in hugetlb_install_folio()
5291 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); in hugetlb_install_folio()
5302 unsigned long addr; in copy_hugetlb_page_range() local
5329 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
5331 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range()
5333 addr |= last_addr_mask; in copy_hugetlb_page_range()
5336 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range()
5351 addr |= last_addr_mask; in copy_hugetlb_page_range()
5368 set_huge_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
5383 set_huge_pte_at(src, addr, src_pte, entry, sz); in copy_hugetlb_page_range()
5387 set_huge_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
5393 set_huge_pte_at(dst, addr, dst_pte, in copy_hugetlb_page_range()
5419 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); in copy_hugetlb_page_range()
5427 addr, dst_vma); in copy_hugetlb_page_range()
5440 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range()
5446 hugetlb_install_folio(dst_vma, dst_pte, addr, in copy_hugetlb_page_range()
5461 huge_ptep_set_wrprotect(src, addr, src_pte); in copy_hugetlb_page_range()
5468 set_huge_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
6068 unsigned long addr, in hugetlb_handle_userfault() argument
6075 .real_address = addr, in hugetlb_handle_userfault()
7005 long chg = -1, add = -1; in hugetlb_reserve_pages() local
7101 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
7103 if (unlikely(add < 0)) { in hugetlb_reserve_pages()
7106 } else if (unlikely(chg > add)) { in hugetlb_reserve_pages()
7122 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
7125 chg - add); in hugetlb_reserve_pages()
7151 if (chg >= 0 && add < 0) in hugetlb_reserve_pages()
7204 unsigned long addr, pgoff_t idx) in page_table_shareable() argument
7221 if (pmd_index(addr) != pmd_index(saddr) || in page_table_shareable()
7230 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) in want_pmd_share() argument
7232 unsigned long start = addr & PUD_MASK; in want_pmd_share()
7288 unsigned long addr, pud_t *pud) in huge_pmd_share() argument
7291 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
7303 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
7327 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share()
7345 unsigned long addr, pte_t *ptep) in huge_pmd_unshare() argument
7347 pgd_t *pgd = pgd_offset(mm, addr); in huge_pmd_unshare()
7348 p4d_t *p4d = p4d_offset(pgd, addr); in huge_pmd_unshare()
7349 pud_t *pud = pud_offset(p4d, addr); in huge_pmd_unshare()
7366 unsigned long addr, pud_t *pud) in huge_pmd_share() argument
7372 unsigned long addr, pte_t *ptep) in huge_pmd_unshare() argument
7382 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) in want_pmd_share() argument
7390 unsigned long addr, unsigned long sz) in huge_pte_alloc() argument
7397 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
7398 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc()
7401 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc()
7407 if (want_pmd_share(vma, addr) && pud_none(*pud)) in huge_pte_alloc()
7408 pte = huge_pmd_share(mm, vma, addr, pud); in huge_pte_alloc()
7410 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pte_alloc()
7433 unsigned long addr, unsigned long sz) in huge_pte_offset() argument
7440 pgd = pgd_offset(mm, addr); in huge_pte_offset()
7443 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
7447 pud = pud_offset(p4d, addr); in huge_pte_offset()
7455 pmd = pmd_offset(pud, addr); in huge_pte_offset()