Lines Matching full:pages
24 struct iopt_pages *pages; member
43 if (!iter->area->pages) { in iopt_area_contig_init()
66 !iter->area->pages) { in iopt_area_contig_next()
188 * The area takes a slice of the pages from start_bytes to start_byte + length
191 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument
197 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area()
213 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
217 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area()
264 (uintptr_t)elm->pages->uptr + elm->start_byte, length); in iopt_alloc_area_pages()
279 * Areas are created with a NULL pages so that the IOVA space is in iopt_alloc_area_pages()
284 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages()
299 WARN_ON(area->pages); in iopt_abort_area()
316 if (elm->pages) in iopt_free_pages_list()
317 iopt_put_pages(elm->pages); in iopt_free_pages_list()
330 rc = iopt_area_fill_domains(elm->area, elm->pages); in iopt_fill_domains_pages()
340 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages); in iopt_fill_domains_pages()
365 * area->pages must be set inside the domains_rwsem to ensure in iopt_map_pages()
369 elm->area->pages = elm->pages; in iopt_map_pages()
370 elm->pages = NULL; in iopt_map_pages()
391 * page tables this will pin the pages and load them into the domain at iova.
407 elm.pages = iopt_alloc_pages(uptr, length, iommu_prot & IOMMU_WRITE); in iopt_map_user_pages()
408 if (IS_ERR(elm.pages)) in iopt_map_user_pages()
409 return PTR_ERR(elm.pages); in iopt_map_user_pages()
411 elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER) in iopt_map_user_pages()
412 elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM; in iopt_map_user_pages()
413 elm.start_byte = uptr - elm.pages->uptr; in iopt_map_user_pages()
421 if (elm.pages) in iopt_map_user_pages()
422 iopt_put_pages(elm.pages); in iopt_map_user_pages()
560 if (!area->pages) in iopt_clear_dirty_data()
623 elm->pages = area->pages; in iopt_get_pages()
625 kref_get(&elm->pages->kref); in iopt_get_pages()
649 * The domains_rwsem must be held in read mode any time any area->pages in iopt_unmap_iova_range()
659 struct iopt_pages *pages; in iopt_unmap_iova_range() local
662 if (!area->pages) { in iopt_unmap_iova_range()
678 * without the pages->mutex. in iopt_unmap_iova_range()
696 pages = area->pages; in iopt_unmap_iova_range()
697 area->pages = NULL; in iopt_unmap_iova_range()
700 iopt_area_unfill_domains(area, pages); in iopt_unmap_iova_range()
702 iopt_put_pages(pages); in iopt_unmap_iova_range()
887 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local
889 if (!pages) in iopt_unfill_domain()
892 mutex_lock(&pages->mutex); in iopt_unfill_domain()
897 mutex_unlock(&pages->mutex); in iopt_unfill_domain()
906 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local
908 if (!pages) in iopt_unfill_domain()
911 mutex_lock(&pages->mutex); in iopt_unfill_domain()
912 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_unfill_domain()
915 iopt_area_unfill_domain(area, pages, domain); in iopt_unfill_domain()
916 mutex_unlock(&pages->mutex); in iopt_unfill_domain()
940 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local
942 if (!pages) in iopt_fill_domain()
945 mutex_lock(&pages->mutex); in iopt_fill_domain()
948 mutex_unlock(&pages->mutex); in iopt_fill_domain()
955 &pages->domains_itree); in iopt_fill_domain()
957 mutex_unlock(&pages->mutex); in iopt_fill_domain()
965 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local
969 if (!pages) in iopt_fill_domain()
971 mutex_lock(&pages->mutex); in iopt_fill_domain()
974 &pages->domains_itree); in iopt_fill_domain()
977 iopt_area_unfill_domain(area, pages, domain); in iopt_fill_domain()
978 mutex_unlock(&pages->mutex); in iopt_fill_domain()
1175 struct iopt_pages *pages = area->pages; in iopt_area_split() local
1185 if (!pages || area->prevent_access) in iopt_area_split()
1202 mutex_lock(&pages->mutex); in iopt_area_split()
1214 * huge pages. in iopt_area_split()
1222 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova, in iopt_area_split()
1229 rc = iopt_insert_area(iopt, rhs, area->pages, new_start, in iopt_area_split()
1240 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_split()
1241 interval_tree_insert(&lhs->pages_node, &pages->domains_itree); in iopt_area_split()
1242 interval_tree_insert(&rhs->pages_node, &pages->domains_itree); in iopt_area_split()
1246 lhs->pages = area->pages; in iopt_area_split()
1248 rhs->pages = area->pages; in iopt_area_split()
1249 kref_get(&rhs->pages->kref); in iopt_area_split()
1251 mutex_unlock(&pages->mutex); in iopt_area_split()
1254 * No change to domains or accesses because the pages hasn't been in iopt_area_split()
1264 mutex_unlock(&pages->mutex); in iopt_area_split()
1314 /* Won't do it if domains already have pages mapped in them */ in iopt_disable_large_pages()