Lines Matching +full:data +full:- +full:mapping
1 /* SPDX-License-Identifier: GPL-2.0 */
22 * Bits in mapping->flags.
36 * mapping_set_error - record a writeback error in the address_space
37 * @mapping: the mapping in which an error should be set
38 * @error: the error to set in the mapping
46 * mapping_set_error to record the error in the mapping so that it can be
49 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument
55 __filemap_set_wb_err(mapping, error); in mapping_set_error()
58 if (mapping->host) in mapping_set_error()
59 errseq_set(&mapping->host->i_sb->s_wb_err, error); in mapping_set_error()
62 if (error == -ENOSPC) in mapping_set_error()
63 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error()
65 set_bit(AS_EIO, &mapping->flags); in mapping_set_error()
68 static inline void mapping_set_unevictable(struct address_space *mapping) in mapping_set_unevictable() argument
70 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unevictable()
73 static inline void mapping_clear_unevictable(struct address_space *mapping) in mapping_clear_unevictable() argument
75 clear_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_clear_unevictable()
78 static inline bool mapping_unevictable(struct address_space *mapping) in mapping_unevictable() argument
80 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable()
83 static inline void mapping_set_exiting(struct address_space *mapping) in mapping_set_exiting() argument
85 set_bit(AS_EXITING, &mapping->flags); in mapping_set_exiting()
88 static inline int mapping_exiting(struct address_space *mapping) in mapping_exiting() argument
90 return test_bit(AS_EXITING, &mapping->flags); in mapping_exiting()
93 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) in mapping_set_no_writeback_tags() argument
95 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); in mapping_set_no_writeback_tags()
98 static inline int mapping_use_writeback_tags(struct address_space *mapping) in mapping_use_writeback_tags() argument
100 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); in mapping_use_writeback_tags()
103 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) in mapping_gfp_mask() argument
105 return mapping->gfp_mask; in mapping_gfp_mask()
108 /* Restricts the given gfp_mask to what the mapping allows. */
109 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, in mapping_gfp_constraint() argument
112 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint()
116 * This is non-atomic. Only to be used before the mapping is activated.
121 m->gfp_mask = mask; in mapping_set_gfp_mask()
124 static inline bool mapping_thp_support(struct address_space *mapping) in mapping_thp_support() argument
126 return test_bit(AS_THP_SUPPORT, &mapping->flags); in mapping_thp_support()
129 static inline int filemap_nr_thps(struct address_space *mapping) in filemap_nr_thps() argument
132 return atomic_read(&mapping->nr_thps); in filemap_nr_thps()
138 static inline void filemap_nr_thps_inc(struct address_space *mapping) in filemap_nr_thps_inc() argument
141 if (!mapping_thp_support(mapping)) in filemap_nr_thps_inc()
142 atomic_inc(&mapping->nr_thps); in filemap_nr_thps_inc()
148 static inline void filemap_nr_thps_dec(struct address_space *mapping) in filemap_nr_thps_dec() argument
151 if (!mapping_thp_support(mapping)) in filemap_nr_thps_dec()
152 atomic_dec(&mapping->nr_thps); in filemap_nr_thps_dec()
166 * been used to lookup the page in the pagecache radix-tree (or page table):
177 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
183 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
190 * - 2 runs before A: in this case, A sees elevated refcount and bails out
191 * - A runs before 2: in this case, 2 sees zero refcount and retries;
198 * such a re-insertion, depending on order that locks are granted.
211 * Preempt must be disabled here - we rely on rcu_read_lock doing in __page_cache_add_speculative()
248 * attach_page_private - Attach private data to a page.
249 * @page: Page to attach data to.
250 * @data: Data to attach to page.
252 * Attaching private data to a page increments the page's reference count.
253 * The data must be detached before the page will be freed.
255 static inline void attach_page_private(struct page *page, void *data) in attach_page_private() argument
258 set_page_private(page, (unsigned long)data); in attach_page_private()
263 * detach_page_private - Detach private data from a page.
264 * @page: Page to detach data from.
266 * Removes the data that was previously attached to the page and decrements
269 * Return: Data that was attached to the page.
273 void *data = (void *)page_private(page); in detach_page_private() local
281 return data; in detach_page_private()
305 pgoff_t page_cache_next_miss(struct address_space *mapping,
307 pgoff_t page_cache_prev_miss(struct address_space *mapping,
319 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
323 * find_get_page - find and get a page reference
324 * @mapping: the address_space to search
327 * Looks up the page cache slot at @mapping & @offset. If there is a
332 static inline struct page *find_get_page(struct address_space *mapping, in find_get_page() argument
335 return pagecache_get_page(mapping, offset, 0, 0); in find_get_page()
338 static inline struct page *find_get_page_flags(struct address_space *mapping, in find_get_page_flags() argument
341 return pagecache_get_page(mapping, offset, fgp_flags, 0); in find_get_page_flags()
345 * find_lock_page - locate, pin and lock a pagecache page
346 * @mapping: the address_space to search
349 * Looks up the page cache entry at @mapping & @index. If there is a
357 static inline struct page *find_lock_page(struct address_space *mapping, in find_lock_page() argument
360 return pagecache_get_page(mapping, index, FGP_LOCK, 0); in find_lock_page()
364 * find_lock_head - Locate, pin and lock a pagecache page.
365 * @mapping: The address_space to search.
368 * Looks up the page cache entry at @mapping & @index. If there is a
376 static inline struct page *find_lock_head(struct address_space *mapping, in find_lock_head() argument
379 return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0); in find_lock_head()
383 * find_or_create_page - locate or add a pagecache page
384 * @mapping: the page's address_space
385 * @index: the page's index into the mapping
388 * Looks up the page cache slot at @mapping & @offset. If there is a
401 static inline struct page *find_or_create_page(struct address_space *mapping, in find_or_create_page() argument
404 return pagecache_get_page(mapping, index, in find_or_create_page()
410 * grab_cache_page_nowait - returns locked page at given index in given cache
411 * @mapping: target address_space
415 * This is intended for speculative data generators, where the data can
422 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, in grab_cache_page_nowait() argument
425 return pagecache_get_page(mapping, index, in grab_cache_page_nowait()
427 mapping_gfp_mask(mapping)); in grab_cache_page_nowait()
435 return head->index == index; in thp_contains()
436 return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); in thp_contains()
449 return head + (index & (thp_nr_pages(head) - 1)); in find_subpage()
452 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
455 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
458 static inline unsigned find_get_pages(struct address_space *mapping, in find_get_pages() argument
462 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, in find_get_pages()
465 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
467 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
470 static inline unsigned find_get_pages_tag(struct address_space *mapping, in find_get_pages_tag() argument
474 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, in find_get_pages_tag()
478 struct page *grab_cache_page_write_begin(struct address_space *mapping,
484 static inline struct page *grab_cache_page(struct address_space *mapping, in grab_cache_page() argument
487 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); in grab_cache_page()
490 extern struct page * read_cache_page(struct address_space *mapping,
491 pgoff_t index, filler_t *filler, void *data);
492 extern struct page * read_cache_page_gfp(struct address_space *mapping,
494 extern int read_cache_pages(struct address_space *mapping,
495 struct list_head *pages, filler_t *filler, void *data);
497 static inline struct page *read_mapping_page(struct address_space *mapping, in read_mapping_page() argument
498 pgoff_t index, void *data) in read_mapping_page() argument
500 return read_cache_page(mapping, index, NULL, data); in read_mapping_page()
504 * Get index of the page with in radix-tree
505 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
512 return page->index; in page_to_index()
515 * We don't initialize ->index for tail pages: calculate based on in page_to_index()
518 pgoff = compound_head(page)->index; in page_to_index()
519 pgoff += page - compound_head(page); in page_to_index()
525 * (TODO: hugepage should have ->index in PAGE_SIZE)
530 return page->index << compound_order(page); in page_to_pgoff()
536 * Return byte-offset into filesystem object for page.
540 return ((loff_t)page->index) << PAGE_SHIFT; in page_offset()
557 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; in linear_page_index()
558 pgoff += vma->vm_pgoff; in linear_page_index()
562 /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
578 if (wait_page->page != key->page) in wake_page_match()
580 key->page_match = 1; in wake_page_match()
582 if (wait_page->bit_nr != key->bit_nr) in wake_page_match()
601 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); in trylock_page()
616 * signals. It returns 0 if it locked the page and -EINTR if it was
628 * lock_page_async - Lock the page, unless this would block. If the page
632 * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
644 * lock_page_or_retry - Lock the page, unless this would block and the
668 * ie with increased "page->count" so that the page won't
702 char __user *end = uaddr + size - 1; in fault_in_pages_writeable()
708 return -EFAULT; in fault_in_pages_writeable()
715 return -EFAULT; in fault_in_pages_writeable()
730 const char __user *end = uaddr + size - 1; in fault_in_pages_readable()
736 return -EFAULT; in fault_in_pages_readable()
740 return -EFAULT; in fault_in_pages_readable()
754 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
756 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
761 void delete_from_page_cache_batch(struct address_space *mapping,
769 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache() argument
774 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); in add_to_page_cache()
781 * struct readahead_control - Describes a readahead request.
784 * implement the ->readahead method should call readahead_page() or
793 * @mapping: Readahead this filesystem object.
797 struct address_space *mapping; member
807 .mapping = m, \
821 * page_cache_sync_readahead - generic file readahead
822 * @mapping: address_space which holds the pagecache and I/O vectors
834 void page_cache_sync_readahead(struct address_space *mapping, in page_cache_sync_readahead() argument
838 DEFINE_READAHEAD(ractl, file, mapping, index); in page_cache_sync_readahead()
843 * page_cache_async_readahead - file readahead for marked pages
844 * @mapping: address_space which holds the pagecache and I/O vectors
857 void page_cache_async_readahead(struct address_space *mapping, in page_cache_async_readahead() argument
861 DEFINE_READAHEAD(ractl, file, mapping, index); in page_cache_async_readahead()
866 * readahead_page - Get the next page to read.
878 BUG_ON(rac->_batch_count > rac->_nr_pages); in readahead_page()
879 rac->_nr_pages -= rac->_batch_count; in readahead_page()
880 rac->_index += rac->_batch_count; in readahead_page()
882 if (!rac->_nr_pages) { in readahead_page()
883 rac->_batch_count = 0; in readahead_page()
887 page = xa_load(&rac->mapping->i_pages, rac->_index); in readahead_page()
889 rac->_batch_count = thp_nr_pages(page); in readahead_page()
898 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
901 BUG_ON(rac->_batch_count > rac->_nr_pages); in __readahead_batch()
902 rac->_nr_pages -= rac->_batch_count; in __readahead_batch()
903 rac->_index += rac->_batch_count; in __readahead_batch()
904 rac->_batch_count = 0; in __readahead_batch()
906 xas_set(&xas, rac->_index); in __readahead_batch()
908 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { in __readahead_batch()
914 rac->_batch_count += thp_nr_pages(page); in __readahead_batch()
917 * The page cache isn't using multi-index entries yet, in __readahead_batch()
923 xas_set(&xas, rac->_index + rac->_batch_count); in __readahead_batch()
934 * readahead_page_batch - Get a batch of pages to read.
948 * readahead_pos - The byte offset into the file of this readahead request.
953 return (loff_t)rac->_index * PAGE_SIZE; in readahead_pos()
957 * readahead_length - The number of bytes in this readahead request.
962 return (loff_t)rac->_nr_pages * PAGE_SIZE; in readahead_length()
966 * readahead_index - The index of the first page in this readahead request.
971 return rac->_index; in readahead_index()
975 * readahead_count - The number of pages in this readahead request.
980 return rac->_nr_pages; in readahead_count()
985 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> in dir_pages()
990 * page_mkwrite_check_truncate - check if page was truncated
995 * or -EFAULT if the page was truncated.
1004 if (page->mapping != inode->i_mapping) in page_mkwrite_check_truncate()
1005 return -EFAULT; in page_mkwrite_check_truncate()
1008 if (page->index < index) in page_mkwrite_check_truncate()
1011 if (page->index > index || !offset) in page_mkwrite_check_truncate()
1012 return -EFAULT; in page_mkwrite_check_truncate()
1018 * i_blocks_per_page - How many blocks fit in this page.
1031 return thp_size(page) >> inode->i_blkbits; in i_blocks_per_page()