Lines Matching full:span
938 struct interval_tree_double_span_iter span; member
960 struct interval_tree_double_span_iter span; in pfn_reader_unpin() local
965 interval_tree_for_each_double_span(&span, &pages->access_itree, in pfn_reader_unpin()
967 if (span.is_used) in pfn_reader_unpin()
970 batch_unpin(&pfns->batch, pages, span.start_hole - start, in pfn_reader_unpin()
971 span.last_hole - span.start_hole + 1); in pfn_reader_unpin()
975 /* Process a single span to load it from the proper storage */
978 struct interval_tree_double_span_iter *span = &pfns->span; in pfn_reader_fill_span() local
984 WARN_ON(span->last_used < start_index)) in pfn_reader_fill_span()
987 if (span->is_used == 1) { in pfn_reader_fill_span()
989 start_index, span->last_used); in pfn_reader_fill_span()
993 if (span->is_used == 2) { in pfn_reader_fill_span()
996 * target span. If it is too small then we will be called again in pfn_reader_fill_span()
1006 min(iopt_area_last_index(area), span->last_used)); in pfn_reader_fill_span()
1012 span->last_hole); in pfn_reader_fill_span()
1040 WARN_ON(interval_tree_double_span_iter_done(&pfns->span))) in pfn_reader_next()
1052 if (pfns->batch_end_index == pfns->span.last_used + 1) in pfn_reader_next()
1053 interval_tree_double_span_iter_next(&pfns->span); in pfn_reader_next()
1077 interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, in pfn_reader_init()
1262 struct interval_tree_double_span_iter span; in __iopt_area_unfill_domain() local
1278 * end of any contiguous span, unmap that whole span, and then only in __iopt_area_unfill_domain()
1285 interval_tree_for_each_double_span(&span, &pages->domains_itree, in __iopt_area_unfill_domain()
1288 if (span.is_used) { in __iopt_area_unfill_domain()
1290 span.last_used - span.start_used + 1); in __iopt_area_unfill_domain()
1294 span.start_hole, span.last_hole, in __iopt_area_unfill_domain()
1546 struct interval_tree_double_span_iter span; in iopt_pages_unfill_xarray() local
1553 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_unfill_xarray()
1556 if (!span.is_used) { in iopt_pages_unfill_xarray()
1563 iopt_pages_unpin_xarray(&batch, pages, span.start_hole, in iopt_pages_unfill_xarray()
1564 span.last_hole); in iopt_pages_unfill_xarray()
1565 } else if (span.is_used == 2) { in iopt_pages_unfill_xarray()
1567 clear_xarray(&pages->pinned_pfns, span.start_used, in iopt_pages_unfill_xarray()
1568 span.last_used); in iopt_pages_unfill_xarray()
1674 struct interval_tree_double_span_iter span; in iopt_pages_fill_xarray() local
1683 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_fill_xarray()
1688 if (span.is_used == 1) { in iopt_pages_fill_xarray()
1689 cur_pages = out_pages + (span.start_used - start_index); in iopt_pages_fill_xarray()
1690 iopt_pages_fill_from_xarray(pages, span.start_used, in iopt_pages_fill_xarray()
1691 span.last_used, cur_pages); in iopt_pages_fill_xarray()
1695 if (span.is_used == 2) { in iopt_pages_fill_xarray()
1696 cur_pages = out_pages + (span.start_used - start_index); in iopt_pages_fill_xarray()
1697 iopt_pages_fill_from_domain(pages, span.start_used, in iopt_pages_fill_xarray()
1698 span.last_used, cur_pages); in iopt_pages_fill_xarray()
1700 span.start_used, span.last_used, in iopt_pages_fill_xarray()
1704 xa_end = span.last_used + 1; in iopt_pages_fill_xarray()
1709 cur_pages = out_pages + (span.start_hole - start_index); in iopt_pages_fill_xarray()
1710 rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole, in iopt_pages_fill_xarray()
1711 span.last_hole, cur_pages); in iopt_pages_fill_xarray()
1714 rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, in iopt_pages_fill_xarray()
1715 span.last_hole, cur_pages); in iopt_pages_fill_xarray()
1717 iopt_pages_err_unpin(pages, span.start_hole, in iopt_pages_fill_xarray()
1718 span.last_hole, cur_pages); in iopt_pages_fill_xarray()
1721 xa_end = span.last_hole + 1; in iopt_pages_fill_xarray()