| /linux/drivers/dma-buf/ |
| H A D | udmabuf.c | 29 struct folio **folios; member 58 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 76 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 123 pages[pg] = folio_page(ubuf->folios[pg], in vmap_udmabuf() 162 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, in get_sg_table() 210 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); in init_udmabuf() 211 if (!ubuf->folios) in init_udmabuf() 231 kvfree(ubuf->folios); in deinit_udmabuf() 326 loff_t start, loff_t size, struct folio **folios) in udmabuf_pin_folios() argument 337 nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff); in udmabuf_pin_folios() [all …]
|
| /linux/lib/ |
| H A D | test_kho.c | 34 struct folio **folios; member 84 struct folio *folio = state->folios[i]; in kho_test_save_data() 166 state->folios[state->nr_folios++] = folio; in kho_test_generate_data() 178 folio_put(state->folios[i]); in kho_test_generate_data() 186 struct folio **folios; in kho_test_save() local 193 folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL); in kho_test_save() 194 if (!folios) in kho_test_save() 196 state->folios = folios; in kho_test_save() 215 kvfree(folios); in kho_test_save() 320 folio_put(kho_test_state.folios[i]); in kho_test_cleanup() [all …]
|
| /linux/fs/btrfs/ |
| H A D | accessors.c | 59 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 73 kaddr = folio_address(eb->folios[idx + 1]); \ 77 folio_address(eb->folios[idx + 1]), \ 89 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 105 kaddr = folio_address(eb->folios[idx + 1]); \ 109 kaddr = folio_address(eb->folios[idx + 1]); \
|
| H A D | defrag.c | 1154 struct folio **folios, int nr_pages, in defrag_one_locked_target() argument 1176 for (int i = 0; i < nr_pages && folios[i]; i++) { in defrag_one_locked_target() 1177 struct folio *folio = folios[i]; in defrag_one_locked_target() 1200 struct folio **folios; in defrag_one_range() local 1210 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); in defrag_one_range() 1211 if (!folios) in defrag_one_range() 1216 folios[i] = defrag_prepare_one_folio(inode, cur >> PAGE_SHIFT); in defrag_one_range() 1217 if (IS_ERR(folios[i])) { in defrag_one_range() 1218 ret = PTR_ERR(folios[i]); in defrag_one_range() 1219 folios[i] = NULL; in defrag_one_range() [all …]
|
| H A D | extent_io.c | 282 struct folio *folio = fbatch.folios[i]; in __process_folios_contig() 323 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios() 707 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array() 2216 struct folio *folio = eb->folios[i]; in write_one_eb() 2485 struct folio *folio = fbatch.folios[i]; in extent_write_cache_pages() 2953 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_folios() 3004 ASSERT(eb->folios[i]); in cleanup_extent_buffer_folios() 3005 detach_extent_buffer_folio(eb, eb->folios[i]); in cleanup_extent_buffer_folios() 3006 folio_put(eb->folios[i]); in cleanup_extent_buffer_folios() 3007 eb->folios[i] = NULL; in cleanup_extent_buffer_folios() [all …]
|
| H A D | compression.h | 100 u64 start, struct folio **folios, unsigned long *out_folios, 156 u64 start, struct folio **folios, unsigned long *out_folios, 167 u64 start, struct folio **folios, unsigned long *out_folios, 177 u64 start, struct folio **folios, unsigned long *out_folios,
|
| H A D | extent_io.h | 115 struct folio *folios[INLINE_EXTENT_BUFFER_PAGES]; member 162 return offset_in_folio(eb->folios[0], offset + eb->start); in get_eb_offset_in_folio() 296 if (!eb->folios[0]) in num_extent_folios() 298 if (folio_order(eb->folios[0])) in num_extent_folios()
|
| H A D | zlib.c | 148 u64 start, struct folio **folios, unsigned long *out_folios, in zlib_compress_folios() argument 190 folios[0] = out_folio; in zlib_compress_folios() 274 folios[nr_folios] = out_folio; in zlib_compress_folios() 310 folios[nr_folios] = out_folio; in zlib_compress_folios()
|
| /linux/mm/ |
| H A D | swap.c | 165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 376 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio() 951 void folios_put_refs(struct folio_batch *folios, unsigned int *refs) in folios_put_refs() argument 957 for (i = 0, j = 0; i < folios->nr; i++) { in folios_put_refs() 958 struct folio *folio = folios->folios[i]; in folios_put_refs() 990 folios->folios[j] = folio; in folios_put_refs() 996 folio_batch_reinit(folios); in folios_put_refs() 1000 folios->nr = j; in folios_put_refs() 1001 mem_cgroup_uncharge_folios(folios); in folios_put_refs() 1002 free_unref_folios(folios); in folios_put_refs() [all …]
|
| H A D | swap_state.c | 341 struct folio_batch folios; in free_pages_and_swap_cache() local 344 folio_batch_init(&folios); in free_pages_and_swap_cache() 349 refs[folios.nr] = 1; in free_pages_and_swap_cache() 352 refs[folios.nr] = encoded_nr_pages(pages[++i]); in free_pages_and_swap_cache() 354 if (folio_batch_add(&folios, folio) == 0) in free_pages_and_swap_cache() 355 folios_put_refs(&folios, refs); in free_pages_and_swap_cache() 357 if (folios.nr) in free_pages_and_swap_cache() 358 folios_put_refs(&folios, refs); in free_pages_and_swap_cache()
|
| H A D | truncate.c | 73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 81 if (xa_is_value(fbatch->folios[i])) { in truncate_folio_batch_exceptionals() 404 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range() 407 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range() 451 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() 550 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() 693 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
|
| H A D | gup.c | 449 void unpin_folios(struct folio **folios, unsigned long nfolios) in unpin_folios() argument 464 if (folios[i] != folios[j]) in unpin_folios() 467 if (folios[i]) in unpin_folios() 468 gup_put_folio(folios[i], j - i, FOLL_PIN); in unpin_folios() 2210 struct folio **folios; member 2220 return pofs->folios[i]; in pofs_get_folio() 2232 unpin_folios(pofs->folios, pofs->nr_entries); in pofs_unpin() 2421 struct folio **folios) in check_and_migrate_movable_folios() argument 2424 .folios = folios, in check_and_migrate_movable_folios() 2455 struct folio **folios) in check_and_migrate_movable_folios() argument [all …]
|
| H A D | migrate.c | 1994 LIST_HEAD(folios); in migrate_pages_sync() 2000 reason, &folios, split_folios, &astats, in migrate_pages_sync() 2009 list_splice_tail(&folios, ret_folios); in migrate_pages_sync() 2023 list_splice_tail_init(&folios, from); in migrate_pages_sync() 2025 list_move(from->next, &folios); in migrate_pages_sync() 2026 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, in migrate_pages_sync() 2029 list_splice_tail_init(&folios, ret_folios); in migrate_pages_sync() 2071 LIST_HEAD(folios); in migrate_pages() 2099 list_cut_before(&folios, from, &folio2->lru); in migrate_pages() 2101 list_splice_init(from, &folios); in migrate_pages() [all …]
|
| /linux/include/linux/ |
| H A D | pagevec.h | 32 struct folio *folios[PAGEVEC_SIZE]; member 77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add() 93 return fbatch->folios[fbatch->i++]; in folio_batch_next()
|
| H A D | folio_queue.h | 196 folioq->vec.folios[slot] = folio; in folioq_append() 218 folioq->vec.folios[slot] = folio; in folioq_append_mark() 236 return folioq->vec.folios[slot]; in folioq_folio() 277 folioq->vec.folios[slot] = NULL; in folioq_clear()
|
| /linux/tools/mm/ |
| H A D | thpmaps | 252 folios = indexes[index_next:index_end][heads[index_next:index_end]] 256 nr = (int(folios[0]) if len(folios) else index_end) - index_next 261 if len(folios): 264 nr = index_end - int(folios[-1]) 265 folios = folios[:-1] 270 if len(folios): 271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1])) 273 for index, order in zip(folios, folio_orders):
|
| /linux/fs/fuse/ |
| H A D | ioctl.c | 254 ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); in fuse_do_ioctl() 256 if (!ap.folios || !iov_page) in fuse_do_ioctl() 310 ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); in fuse_do_ioctl() 311 if (!ap.folios[ap.num_folios]) in fuse_do_ioctl() 330 c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl() 368 vaddr = kmap_local_folio(ap.folios[0], 0); in fuse_do_ioctl() 397 c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl() 405 folio_put(ap.folios[--ap.num_folios]); in fuse_do_ioctl() 406 kfree(ap.folios); in fuse_do_ioctl()
|
| H A D | file.c | 595 folio_mark_dirty_lock(ap->folios[i]); in fuse_release_user_pages() 597 unpin_folio(ap->folios[i]); in fuse_release_user_pages() 678 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, in fuse_io_alloc() 680 if (!ia->ap.folios) { in fuse_io_alloc() 690 kfree(ia->ap.folios); in fuse_io_free() 793 loff_t pos = folio_pos(ap->folios[0]) + num_read; in fuse_short_read() 812 .ap.folios = &folio, in fuse_do_readfolio() 878 mapping = ap->folios[0]->mapping; in fuse_readpages_end() 890 folio_end_read(ap->folios[i], !err); in fuse_readpages_end() 891 folio_put(ap->folios[i]); in fuse_readpages_end() [all …]
|
| /linux/fs/ramfs/ |
| H A D | file-nommu.c | 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-fs-erofs | 25 compressed folios: 27 - 1 : invalidate cached compressed folios 29 - 3 : drop in-memory pclusters and cached compressed folios
|
| /linux/fs/orangefs/ |
| H A D | inode.c | 74 struct folio **folios; member 91 start = offset_in_folio(ow->folios[0], ow->off); in orangefs_writepages_work() 93 folio_start_writeback(ow->folios[i]); in orangefs_writepages_work() 94 bvec_set_folio(&ow->bv[i], ow->folios[i], in orangefs_writepages_work() 95 folio_size(ow->folios[i]) - start, start); in orangefs_writepages_work() 115 wrp = folio_detach_private(ow->folios[i]); in orangefs_writepages_work() 117 folio_end_writeback(ow->folios[i]); in orangefs_writepages_work() 118 folio_unlock(ow->folios[i]); in orangefs_writepages_work() 144 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback() 156 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback() [all …]
|
| /linux/Documentation/core-api/ |
| H A D | folio_queue.rst | 13 * Adding and removing folios 24 The folio_queue struct forms a single segment in a segmented list of folios 68 the number of folios added. 71 Adding and removing folios 134 of folios added to a segments and the third is a shorthand to indicate if the 137 Not that the count and fullness are not affected by clearing folios from the 194 last segment is reached and the folios it refers to are entirely consumed by
|
| /linux/Documentation/mm/ |
| H A D | unevictable-lru.rst | 13 folios. 28 folios and to hide these folios from vmscan. This mechanism is based on a patch 72 The Unevictable LRU infrastructure maintains unevictable folios as if they were 75 (1) We get to "treat unevictable folios just like we treat other folios in the 80 (2) We want to be able to migrate unevictable folios between nodes for memory 82 can only migrate folios that it can successfully isolate from the LRU 83 lists (or "Movable" folios: outside of consideration here). If we were to 84 maintain folios elsewhere than on an LRU-like list, where they can be 88 anonymous, swap-backed folios. This differentiation is only important 89 while the folios are, in fact, evictable. [all …]
|
| H A D | multigen_lru.rst | 92 truncated generation number is an index to ``lrugen->folios[]``. The 96 ``lrugen->folios[]``; otherwise it stores zero. 100 generations, tiers do not have dedicated ``lrugen->folios[]``. In 131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by 226 since each node and memcg combination has an LRU of folios (see 232 the active/inactive LRU (of folios): 255 The multi-gen LRU (of folios) can be disassembled into the following
|
| /linux/fs/nilfs2/ |
| H A D | page.c | 258 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() 312 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() 372 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages() 531 folio = fbatch.folios[i]; in nilfs_find_uncommitted_extent()
|