Lines Matching full:range
30 struct hmm_range *range; member
41 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill()
46 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
59 * or whenever there is no page directory covering the virtual address range.
88 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
92 * consider the default flags requested for the range. The API can in hmm_pte_need_fault()
96 * fault a range with specific flags. For the latter one it is a in hmm_pte_need_fault()
100 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault()
101 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault()
123 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local
132 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault()
149 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
154 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
156 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_hole()
162 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); in hmm_vma_walk_hole()
166 return hmm_pfns_fill(addr, end, range, 0); in hmm_vma_walk_hole()
174 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, in pmd_to_hmm_pfn_flags() argument
190 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd() local
196 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
213 static inline bool hmm_is_device_private_entry(struct hmm_range *range, in hmm_is_device_private_entry() argument
218 range->dev_private_owner; in hmm_is_device_private_entry()
221 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, in pte_to_hmm_pfn_flags() argument
234 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte() local
256 if (hmm_is_device_private_entry(range, entry)) { in hmm_vma_handle_pte()
287 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
321 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd() local
323 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; in hmm_vma_walk_pmd()
340 return hmm_pfns_fill(start, end, range, 0); in hmm_vma_walk_pmd()
346 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
376 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
395 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, in pud_to_hmm_pfn_flags() argument
409 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pud() local
438 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pud()
440 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_pud()
442 cpu_flags = pud_to_hmm_pfn_flags(range, pud); in hmm_vma_walk_pud()
474 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hugetlb_entry() local
485 i = (start - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hugetlb_entry()
486 pfn_req_flags = range->hmm_pfns[i]; in hmm_vma_walk_hugetlb_entry()
487 cpu_flags = pte_to_hmm_pfn_flags(range, entry) | in hmm_vma_walk_hugetlb_entry()
498 range->hmm_pfns[i] = pfn | cpu_flags; in hmm_vma_walk_hugetlb_entry()
511 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_test() local
526 * If a fault is requested for an unsupported range then it is a hard in hmm_vma_walk_test()
530 range->hmm_pfns + in hmm_vma_walk_test()
531 ((start - range->start) >> PAGE_SHIFT), in hmm_vma_walk_test()
535 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_test()
550 * hmm_range_fault - try to fault some address in a virtual address range
551 * @range: argument structure
558 * -EPERM: Invalid permission (e.g., asking for write and range is read
560 * -EBUSY: The range has been invalidated and the caller needs to wait for
568 int hmm_range_fault(struct hmm_range *range) in hmm_range_fault() argument
571 .range = range, in hmm_range_fault()
572 .last = range->start, in hmm_range_fault()
574 struct mm_struct *mm = range->notifier->mm; in hmm_range_fault()
580 /* If range is no longer valid force retry. */ in hmm_range_fault()
581 if (mmu_interval_check_retry(range->notifier, in hmm_range_fault()
582 range->notifier_seq)) in hmm_range_fault()
584 ret = walk_page_range(mm, hmm_vma_walk.last, range->end, in hmm_range_fault()