Lines Matching defs:xas

131 	XA_STATE(xas, &mapping->i_pages, folio->index);
134 mapping_set_update(&xas, mapping);
136 xas_set_order(&xas, folio->index, folio_order(folio));
141 xas_store(&xas, shadow);
142 xas_init_marks(&xas);
278 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
283 mapping_set_update(&xas, mapping);
284 xas_for_each(&xas, folio, ULONG_MAX) {
310 xas_store(&xas, NULL);
490 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
498 folio = xas_find(&xas, max);
499 if (xas_retry(&xas, folio))
647 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
655 xas_for_each(&xas, folio, max) {
656 if (xas_retry(&xas, folio))
825 XA_STATE(xas, &mapping->i_pages, offset);
837 xas_lock_irq(&xas);
838 xas_store(&xas, new);
850 xas_unlock_irq(&xas);
860 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
869 mapping_set_update(&xas, mapping);
878 folio->index = xas.xa_index;
884 xas_lock_irq(&xas);
885 xas_for_each_conflict(&xas, entry) {
888 xas_set_err(&xas, -EEXIST);
896 order = xas_get_order(&xas);
908 xas_set_order(&xas, index, split_order);
909 xas_try_split(&xas, old, order);
910 if (xas_error(&xas))
918 xas_reset(&xas);
924 xas_store(&xas, folio);
925 if (xas_error(&xas))
939 xas_unlock_irq(&xas);
941 if (!xas_nomem(&xas, gfp))
945 if (xas_error(&xas))
954 return xas_error(&xas);
1780 XA_STATE(xas, &mapping->i_pages, index);
1784 void *entry = xas_next(&xas);
1786 return xas.xa_index;
1787 if (xas.xa_index == 0)
1817 XA_STATE(xas, &mapping->i_pages, index);
1820 void *entry = xas_prev(&xas);
1823 if (xas.xa_index == ULONG_MAX)
1827 return xas.xa_index;
1865 XA_STATE(xas, &mapping->i_pages, index);
1870 xas_reset(&xas);
1871 folio = xas_load(&xas);
1872 if (xas_retry(&xas, folio))
1884 if (unlikely(folio != xas_reload(&xas))) {
2030 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
2037 folio = xas_find(xas, max);
2039 folio = xas_find_marked(xas, max, mark);
2041 if (xas_retry(xas, folio))
2054 if (unlikely(folio != xas_reload(xas))) {
2061 xas_reset(xas);
2088 XA_STATE(xas, &mapping->i_pages, *start);
2092 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2093 indices[fbatch->nr] = xas.xa_index;
2137 XA_STATE(xas, &mapping->i_pages, *start);
2141 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2159 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2162 nr = 1 << xas_get_order(&xas);
2163 base = xas.xa_index & ~(nr - 1);
2174 indices[fbatch->nr] = xas.xa_index;
2227 XA_STATE(xas, &mapping->i_pages, *start);
2233 for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2234 folio = xas_next(&xas)) {
2235 if (xas_retry(&xas, folio))
2251 if (unlikely(folio != xas_reload(&xas)))
2259 xas_advance(&xas, folio_next_index(folio) - 1);
2265 xas_reset(&xas);
2303 XA_STATE(xas, &mapping->i_pages, *start);
2307 while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2370 XA_STATE(xas, &mapping->i_pages, index);
2374 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2375 if (xas_retry(&xas, folio))
2377 if (xas.xa_index > max || xa_is_value(folio))
2384 if (unlikely(folio != xas_reload(&xas)))
2393 xas_advance(&xas, folio_next_index(folio) - 1);
2398 xas_reset(&xas);
3053 static inline loff_t folio_seek_hole_data(struct xa_state *xas,
3065 xas_pause(xas);
3086 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3089 return PAGE_SIZE << xas_get_order(xas);
3114 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3123 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3124 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3133 seek_size = seek_folio_size(&xas, folio);
3135 start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3142 xas_set(&xas, pos >> PAGE_SHIFT);
3592 static struct folio *next_uptodate_folio(struct xa_state *xas,
3595 struct folio *folio = xas_next_entry(xas, end_pgoff);
3601 if (xas_retry(xas, folio))
3610 if (unlikely(folio != xas_reload(xas)))
3621 if (xas->xa_index >= max_idx)
3628 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3740 XA_STATE(xas, &mapping->i_pages, start_pgoff);
3748 folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3773 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3774 vmf->pte += xas.xa_index - last_pgoff;
3775 last_pgoff = xas.xa_index;
3777 nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
3784 xas.xa_index - folio->index, addr,
3789 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
4430 XA_STATE(xas, &mapping->i_pages, first_index);
4437 xas_for_each(&xas, folio, last_index) {
4453 if (xas_retry(&xas, folio))
4456 order = xas_get_order(&xas);
4458 folio_first_index = round_down(xas.xa_index, 1 << order);
4508 if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
4511 if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
4516 xas_pause(&xas);