Lines Matching full:mapping

182  * @entry may no longer be the entry at the index in the mapping.
347 * A DAX folio is considered shared if it has no mapping set and ->share (which
354 return !folio->mapping && folio->share; in dax_folio_is_shared()
360 * previously been associated with any mappings the ->mapping and ->index
361 * fields will be set. If it has already been associated with a mapping
362 * the mapping will be cleared and the share count set. It's then up to
364 * recover ->mapping and ->index information. For example by implementing
371 * folio->mapping. in dax_folio_make_shared()
373 folio->mapping = NULL; in dax_folio_make_shared()
395 folio->mapping = NULL; in dax_folio_put()
409 new_folio->mapping = NULL; in dax_folio_put()
430 * final mapping. in dax_folio_init()
442 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
456 if (shared && (folio->mapping || dax_folio_is_shared(folio))) { in dax_associate_entry()
457 if (folio->mapping) in dax_associate_entry()
464 WARN_ON_ONCE(folio->mapping); in dax_associate_entry()
467 folio->mapping = mapping; in dax_associate_entry()
472 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
512 /* Ensure folio->mapping isn't freed while we look at it */ in dax_lock_folio()
515 struct address_space *mapping = READ_ONCE(folio->mapping); in dax_lock_folio() local
518 if (!mapping || !dax_mapping(mapping)) in dax_lock_folio()
529 if (S_ISCHR(mapping->host->i_mode)) in dax_lock_folio()
532 xas.xa = &mapping->i_pages; in dax_lock_folio()
534 if (mapping != folio->mapping) { in dax_lock_folio()
556 struct address_space *mapping = folio->mapping; in dax_unlock_folio() local
557 XA_STATE(xas, &mapping->i_pages, folio->index); in dax_unlock_folio()
559 if (S_ISCHR(mapping->host->i_mode)) in dax_unlock_folio()
566 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
567 * @mapping: the file's mapping whose entry we want to lock
574 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, in dax_lock_mapping_entry() argument
583 if (!dax_mapping(mapping)) in dax_lock_mapping_entry()
586 xas.xa = &mapping->i_pages; in dax_lock_mapping_entry()
599 * Because we are looking for entry from file's mapping in dax_lock_mapping_entry()
617 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, in dax_unlock_mapping_entry() argument
620 XA_STATE(xas, &mapping->i_pages, index); in dax_unlock_mapping_entry()
658 struct address_space *mapping, unsigned int order) in grab_mapping_entry() argument
700 unmap_mapping_pages(mapping, in grab_mapping_entry()
707 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
710 mapping->nrpages -= PG_PMD_NR; in grab_mapping_entry()
726 mapping->nrpages += 1UL << order; in grab_mapping_entry()
731 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
744 * dax_layout_busy_page_range - find first pinned page in @mapping
745 * @mapping: address space to scan for a page with ref count > 1
753 * any page in the mapping is busy, i.e. for DMA, or other
761 struct page *dax_layout_busy_page_range(struct address_space *mapping, in dax_layout_busy_page_range() argument
769 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
777 if (!dax_mapping(mapping)) in dax_layout_busy_page_range()
797 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); in dax_layout_busy_page_range()
822 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
824 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); in dax_layout_busy_page()
828 static int __dax_invalidate_entry(struct address_space *mapping, in __dax_invalidate_entry() argument
831 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
843 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
845 mapping->nrpages -= 1UL << dax_entry_order(entry); in __dax_invalidate_entry()
853 static int __dax_clear_dirty_range(struct address_space *mapping, in __dax_clear_dirty_range() argument
856 XA_STATE(xas, &mapping->i_pages, start); in __dax_clear_dirty_range()
883 * Delete DAX entry at @index from @mapping. Wait for it
886 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
888 int ret = __dax_invalidate_entry(mapping, index, true); in dax_delete_mapping_entry()
901 void dax_delete_mapping_range(struct address_space *mapping, in dax_delete_mapping_range() argument
907 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_delete_mapping_range()
922 dax_disassociate_entry(entry, mapping, true); in dax_delete_mapping_range()
924 mapping->nrpages -= 1UL << dax_entry_order(entry); in dax_delete_mapping_range()
948 * DAX mapping entries for the range.
1005 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
1008 return __dax_invalidate_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
1038 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1059 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry() local
1066 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_entry()
1070 /* we are replacing a zero page with block mapping */ in dax_insert_entry()
1072 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_entry()
1075 unmap_mapping_pages(mapping, index, 1, false); in dax_insert_entry()
1083 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
1084 dax_associate_entry(new_entry, mapping, vmf->vma, in dax_insert_entry()
1114 struct address_space *mapping, void *entry) in dax_writeback_one() argument
1121 * A page got tagged dirty in DAX mapping? Something is seriously in dax_writeback_one()
1179 i_mmap_lock_read(mapping); in dax_writeback_one()
1180 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { in dax_writeback_one()
1184 i_mmap_unlock_read(mapping); in dax_writeback_one()
1190 * the pfn mappings are writeprotected and fault waits for mapping in dax_writeback_one()
1199 trace_dax_writeback_one(mapping->host, index, count); in dax_writeback_one()
1208 * Flush the mapping to the persistent domain within the byte range of [start,
1212 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
1215 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
1216 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
1225 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
1230 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
1234 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1236 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1367 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1368 * If this page is ever written to we will re-fault and change the mapping to
1390 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1393 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1463 * Invalidate the mapping because we're about to CoW. in dax_unshare_iter()
1838 * @xas: the dax mapping tree of a file
1903 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1904 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1906 .inode = mapping->host, in dax_iomap_pte_fault()
1929 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
2015 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
2016 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
2018 .inode = mapping->host, in dax_iomap_pmd_fault()
2052 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
2138 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
2139 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
2151 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
2170 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()