Lines Matching full:mapping

181  * @entry may no longer be the entry at the index in the mapping.
331 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
344 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
345 page->mapping = mapping; in dax_associate_entry()
350 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
362 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry()
363 page->mapping = NULL; in dax_disassociate_entry()
394 /* Ensure page->mapping isn't freed while we look at it */ in dax_lock_page()
397 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local
400 if (!mapping || !dax_mapping(mapping)) in dax_lock_page()
411 if (S_ISCHR(mapping->host->i_mode)) in dax_lock_page()
414 xas.xa = &mapping->i_pages; in dax_lock_page()
416 if (mapping != page->mapping) { in dax_lock_page()
438 struct address_space *mapping = page->mapping; in dax_unlock_page() local
439 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
441 if (S_ISCHR(mapping->host->i_mode)) in dax_unlock_page()
477 struct address_space *mapping, unsigned int order) in grab_mapping_entry() argument
518 unmap_mapping_pages(mapping, in grab_mapping_entry()
525 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
528 mapping->nrexceptional--; in grab_mapping_entry()
544 mapping->nrexceptional++; in grab_mapping_entry()
549 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
562 * dax_layout_busy_page_range - find first pinned page in @mapping
563 * @mapping: address space to scan for a page with ref count > 1
571 * any page in the mapping is busy, i.e. for DMA, or other
579 struct page *dax_layout_busy_page_range(struct address_space *mapping, in dax_layout_busy_page_range() argument
587 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
595 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page_range()
615 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); in dax_layout_busy_page_range()
641 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
643 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); in dax_layout_busy_page()
647 static int __dax_invalidate_entry(struct address_space *mapping, in __dax_invalidate_entry() argument
650 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
662 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
664 mapping->nrexceptional--; in __dax_invalidate_entry()
673 * Delete DAX entry at @index from @mapping. Wait for it
676 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
678 int ret = __dax_invalidate_entry(mapping, index, true); in dax_delete_mapping_entry()
694 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
697 return __dax_invalidate_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
733 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
739 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_entry()
743 /* we are replacing a zero page with block mapping */ in dax_insert_entry()
745 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_entry()
748 unmap_mapping_pages(mapping, index, 1, false); in dax_insert_entry()
756 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
757 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
792 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, in dax_entry_mkclean() argument
800 i_mmap_lock_read(mapping); in dax_entry_mkclean()
801 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { in dax_entry_mkclean()
862 i_mmap_unlock_read(mapping); in dax_entry_mkclean()
866 struct address_space *mapping, void *entry) in dax_writeback_one() argument
872 * A page got tagged dirty in DAX mapping? Something is seriously in dax_writeback_one()
928 dax_entry_mkclean(mapping, index, pfn); in dax_writeback_one()
933 * the pfn mappings are writeprotected and fault waits for mapping in dax_writeback_one()
942 trace_dax_writeback_one(mapping->host, index, count); in dax_writeback_one()
951 * Flush the mapping to the persistent domain within the byte range of [start,
955 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
958 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
959 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
968 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
973 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
977 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
979 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1036 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1037 * If this page is ever written to we will re-fault and change the mapping to
1041 struct address_space *mapping, void **entry, in dax_load_hole() argument
1044 struct inode *inode = mapping->host; in dax_load_hole()
1049 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1200 struct address_space *mapping = iocb->ki_filp->f_mapping; in dax_iomap_rw() local
1201 struct inode *inode = mapping->host; in dax_iomap_rw()
1237 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1251 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1252 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1253 struct inode *inode = mapping->host; in dax_iomap_pte_fault()
1280 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1355 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1383 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1420 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1423 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1436 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1473 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
1474 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1479 struct inode *inode = mapping->host; in dax_iomap_pmd_fault()
1533 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1553 * setting up a mapping, so really we're using iomap_begin() as a way in dax_iomap_pmd_fault()
1573 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1677 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
1678 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1689 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1705 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()