11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/truncate.c - code for taking down pages from address_spaces 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 6e1f8e874SFrancois Cami * 10Sep2002 Andrew Morton 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 114af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 131da177e4SLinus Torvalds #include <linux/mm.h> 140fd0e6b0SNick Piggin #include <linux/swap.h> 151da177e4SLinus Torvalds #include <linux/module.h> 161da177e4SLinus Torvalds #include <linux/pagemap.h> 1701f2705dSNate Diller #include <linux/highmem.h> 181da177e4SLinus Torvalds #include <linux/pagevec.h> 19e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h> 201da177e4SLinus Torvalds #include <linux/buffer_head.h> /* grr. try_to_release_page, 21aaa4059bSJan Kara do_invalidatepage */ 22ba470de4SRik van Riel #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds 25cf9a2ae8SDavid Howells /** 2628bc44d7SFengguang Wu * do_invalidatepage - invalidate part or all of a page 27cf9a2ae8SDavid Howells * @page: the page which is affected 28cf9a2ae8SDavid Howells * @offset: the index of the truncation point 29cf9a2ae8SDavid Howells * 30cf9a2ae8SDavid Howells * do_invalidatepage() is called when all or part of the page has become 31cf9a2ae8SDavid Howells * invalidated by a truncate operation. 32cf9a2ae8SDavid Howells * 33cf9a2ae8SDavid Howells * do_invalidatepage() does not have to release all buffers, but it must 34cf9a2ae8SDavid Howells * ensure that no dirty buffer is left outside @offset and that no I/O 35cf9a2ae8SDavid Howells * is underway against any of the blocks which are outside the truncation 36cf9a2ae8SDavid Howells * point. Because the caller is about to free (and possibly reuse) those 37cf9a2ae8SDavid Howells * blocks on-disk. 38cf9a2ae8SDavid Howells */ 39cf9a2ae8SDavid Howells void do_invalidatepage(struct page *page, unsigned long offset) 40cf9a2ae8SDavid Howells { 41cf9a2ae8SDavid Howells void (*invalidatepage)(struct page *, unsigned long); 42cf9a2ae8SDavid Howells invalidatepage = page->mapping->a_ops->invalidatepage; 439361401eSDavid Howells #ifdef CONFIG_BLOCK 44cf9a2ae8SDavid Howells if (!invalidatepage) 45cf9a2ae8SDavid Howells invalidatepage = block_invalidatepage; 469361401eSDavid Howells #endif 47cf9a2ae8SDavid Howells if (invalidatepage) 48cf9a2ae8SDavid Howells (*invalidatepage)(page, offset); 49cf9a2ae8SDavid Howells } 50cf9a2ae8SDavid Howells 511da177e4SLinus Torvalds static inline void truncate_partial_page(struct page *page, unsigned partial) 521da177e4SLinus Torvalds { 53eebd2aa3SChristoph Lameter zero_user_segment(page, partial, PAGE_CACHE_SIZE); 54266cf658SDavid Howells if (page_has_private(page)) 551da177e4SLinus Torvalds do_invalidatepage(page, partial); 561da177e4SLinus Torvalds } 571da177e4SLinus Torvalds 58ecdfc978SLinus Torvalds /* 59ecdfc978SLinus Torvalds * This cancels just the dirty bit on the kernel page itself, it 60ecdfc978SLinus Torvalds * does NOT actually remove dirty bits on any mmap's that may be 61ecdfc978SLinus Torvalds * around. It also leaves the page tagged dirty, so any sync 62ecdfc978SLinus Torvalds * activity will still find it on the dirty lists, and in particular, 63ecdfc978SLinus Torvalds * clear_page_dirty_for_io() will still look at the dirty bits in 64ecdfc978SLinus Torvalds * the VM. 65ecdfc978SLinus Torvalds * 66ecdfc978SLinus Torvalds * Doing this should *normally* only ever be done when a page 67ecdfc978SLinus Torvalds * is truncated, and is not actually mapped anywhere at all. However, 68ecdfc978SLinus Torvalds * fs/buffer.c does this when it notices that somebody has cleaned 69ecdfc978SLinus Torvalds * out all the buffers on a page without actually doing it through 70ecdfc978SLinus Torvalds * the VM. Can you say "ext3 is horribly ugly"? Tought you could. 71ecdfc978SLinus Torvalds */ 72fba2591bSLinus Torvalds void cancel_dirty_page(struct page *page, unsigned int account_size) 73fba2591bSLinus Torvalds { 748368e328SLinus Torvalds if (TestClearPageDirty(page)) { 758368e328SLinus Torvalds struct address_space *mapping = page->mapping; 768368e328SLinus Torvalds if (mapping && mapping_cap_account_dirty(mapping)) { 773e67c098SAndrew Morton dec_zone_page_state(page, NR_FILE_DIRTY); 78c9e51e41SPeter Zijlstra dec_bdi_stat(mapping->backing_dev_info, 79c9e51e41SPeter Zijlstra BDI_RECLAIMABLE); 808368e328SLinus Torvalds if (account_size) 81fba2591bSLinus Torvalds task_io_account_cancelled_write(account_size); 82fba2591bSLinus Torvalds } 833e67c098SAndrew Morton } 848368e328SLinus Torvalds } 858368e328SLinus Torvalds EXPORT_SYMBOL(cancel_dirty_page); 86fba2591bSLinus Torvalds 871da177e4SLinus Torvalds /* 881da177e4SLinus Torvalds * If truncate cannot remove the fs-private metadata from the page, the page 8962e1c553SShaohua Li * becomes orphaned. It will be left on the LRU and may even be mapped into 9054cb8821SNick Piggin * user pagetables if we're racing with filemap_fault(). 911da177e4SLinus Torvalds * 921da177e4SLinus Torvalds * We need to bale out if page->mapping is no longer equal to the original 931da177e4SLinus Torvalds * mapping. This happens a) when the VM reclaimed the page while we waited on 94fc0ecff6SAndrew Morton * its lock, b) when a concurrent invalidate_mapping_pages got there first and 951da177e4SLinus Torvalds * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 961da177e4SLinus Torvalds */ 97750b4987SNick Piggin static int 981da177e4SLinus Torvalds truncate_complete_page(struct address_space *mapping, struct page *page) 991da177e4SLinus Torvalds { 1001da177e4SLinus Torvalds if (page->mapping != mapping) 101750b4987SNick Piggin return -EIO; 1021da177e4SLinus Torvalds 103266cf658SDavid Howells if (page_has_private(page)) 1041da177e4SLinus Torvalds do_invalidatepage(page, 0); 1051da177e4SLinus Torvalds 106a2b34564SBjorn Steinbrink cancel_dirty_page(page, PAGE_CACHE_SIZE); 107a2b34564SBjorn Steinbrink 108ba470de4SRik van Riel clear_page_mlock(page); 1091da177e4SLinus Torvalds ClearPageMappedToDisk(page); 1105adc7b51SMinchan Kim delete_from_page_cache(page); 111750b4987SNick Piggin return 0; 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds /* 115fc0ecff6SAndrew Morton * This is for invalidate_mapping_pages(). That function can be called at 1161da177e4SLinus Torvalds * any time, and is not supposed to throw away dirty pages. But pages can 1170fd0e6b0SNick Piggin * be marked dirty at any time too, so use remove_mapping which safely 1180fd0e6b0SNick Piggin * discards clean, unused pages. 1191da177e4SLinus Torvalds * 1201da177e4SLinus Torvalds * Returns non-zero if the page was successfully invalidated. 1211da177e4SLinus Torvalds */ 1221da177e4SLinus Torvalds static int 1231da177e4SLinus Torvalds invalidate_complete_page(struct address_space *mapping, struct page *page) 1241da177e4SLinus Torvalds { 1250fd0e6b0SNick Piggin int ret; 1260fd0e6b0SNick Piggin 1271da177e4SLinus Torvalds if (page->mapping != mapping) 1281da177e4SLinus Torvalds return 0; 1291da177e4SLinus Torvalds 130266cf658SDavid Howells if (page_has_private(page) && !try_to_release_page(page, 0)) 1311da177e4SLinus Torvalds return 0; 1321da177e4SLinus Torvalds 133ba470de4SRik van Riel clear_page_mlock(page); 1340fd0e6b0SNick Piggin ret = remove_mapping(mapping, page); 1350fd0e6b0SNick Piggin 1360fd0e6b0SNick Piggin return ret; 1371da177e4SLinus Torvalds } 1381da177e4SLinus Torvalds 139750b4987SNick Piggin int truncate_inode_page(struct address_space *mapping, struct page *page) 140750b4987SNick Piggin { 141750b4987SNick Piggin if (page_mapped(page)) { 142750b4987SNick Piggin unmap_mapping_range(mapping, 143750b4987SNick Piggin (loff_t)page->index << PAGE_CACHE_SHIFT, 144750b4987SNick Piggin PAGE_CACHE_SIZE, 0); 145750b4987SNick Piggin } 146750b4987SNick Piggin return truncate_complete_page(mapping, page); 147750b4987SNick Piggin } 148750b4987SNick Piggin 14983f78668SWu Fengguang /* 15025718736SAndi Kleen * Used to get rid of pages on hardware memory corruption. 15125718736SAndi Kleen */ 15225718736SAndi Kleen int generic_error_remove_page(struct address_space *mapping, struct page *page) 15325718736SAndi Kleen { 15425718736SAndi Kleen if (!mapping) 15525718736SAndi Kleen return -EINVAL; 15625718736SAndi Kleen /* 15725718736SAndi Kleen * Only punch for normal data pages for now. 15825718736SAndi Kleen * Handling other types like directories would need more auditing. 15925718736SAndi Kleen */ 16025718736SAndi Kleen if (!S_ISREG(mapping->host->i_mode)) 16125718736SAndi Kleen return -EIO; 16225718736SAndi Kleen return truncate_inode_page(mapping, page); 16325718736SAndi Kleen } 16425718736SAndi Kleen EXPORT_SYMBOL(generic_error_remove_page); 16525718736SAndi Kleen 16625718736SAndi Kleen /* 16783f78668SWu Fengguang * Safely invalidate one page from its pagecache mapping. 16883f78668SWu Fengguang * It only drops clean, unused pages. The page must be locked. 16983f78668SWu Fengguang * 17083f78668SWu Fengguang * Returns 1 if the page is successfully invalidated, otherwise 0. 17183f78668SWu Fengguang */ 17283f78668SWu Fengguang int invalidate_inode_page(struct page *page) 17383f78668SWu Fengguang { 17483f78668SWu Fengguang struct address_space *mapping = page_mapping(page); 17583f78668SWu Fengguang if (!mapping) 17683f78668SWu Fengguang return 0; 17783f78668SWu Fengguang if (PageDirty(page) || PageWriteback(page)) 17883f78668SWu Fengguang return 0; 17983f78668SWu Fengguang if (page_mapped(page)) 18083f78668SWu Fengguang return 0; 18183f78668SWu Fengguang return invalidate_complete_page(mapping, page); 18283f78668SWu Fengguang } 18383f78668SWu Fengguang 1841da177e4SLinus Torvalds /** 1850643245fSRandy Dunlap * truncate_inode_pages - truncate range of pages specified by start & end byte offsets 1861da177e4SLinus Torvalds * @mapping: mapping to truncate 1871da177e4SLinus Torvalds * @lstart: offset from which to truncate 188d7339071SHans Reiser * @lend: offset to which to truncate 1891da177e4SLinus Torvalds * 190d7339071SHans Reiser * Truncate the page cache, removing the pages that are between 191d7339071SHans Reiser * specified offsets (and zeroing out partial page 192d7339071SHans Reiser * (if lstart is not page aligned)). 1931da177e4SLinus Torvalds * 1941da177e4SLinus Torvalds * Truncate takes two passes - the first pass is nonblocking. It will not 1951da177e4SLinus Torvalds * block on page locks and it will not block on writeback. The second pass 1961da177e4SLinus Torvalds * will wait. This is to prevent as much IO as possible in the affected region. 1971da177e4SLinus Torvalds * The first pass will remove most pages, so the search cost of the second pass 1981da177e4SLinus Torvalds * is low. 1991da177e4SLinus Torvalds * 2001da177e4SLinus Torvalds * When looking at page->index outside the page lock we need to be careful to 2011da177e4SLinus Torvalds * copy it into a local to avoid races (it could change at any time). 2021da177e4SLinus Torvalds * 2031da177e4SLinus Torvalds * We pass down the cache-hot hint to the page freeing code. Even if the 2041da177e4SLinus Torvalds * mapping is large, it is probably the case that the final pages are the most 2051da177e4SLinus Torvalds * recently touched, and freeing happens in ascending file offset order. 2061da177e4SLinus Torvalds */ 207d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping, 208d7339071SHans Reiser loff_t lstart, loff_t lend) 2091da177e4SLinus Torvalds { 2101da177e4SLinus Torvalds const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 211d7339071SHans Reiser pgoff_t end; 2121da177e4SLinus Torvalds const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 2131da177e4SLinus Torvalds struct pagevec pvec; 2141da177e4SLinus Torvalds pgoff_t next; 2151da177e4SLinus Torvalds int i; 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds if (mapping->nrpages == 0) 2181da177e4SLinus Torvalds return; 2191da177e4SLinus Torvalds 220d7339071SHans Reiser BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 221d7339071SHans Reiser end = (lend >> PAGE_CACHE_SHIFT); 222d7339071SHans Reiser 2231da177e4SLinus Torvalds pagevec_init(&pvec, 0); 2241da177e4SLinus Torvalds next = start; 225d7339071SHans Reiser while (next <= end && 226d7339071SHans Reiser pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 227e5598f8bSHugh Dickins mem_cgroup_uncharge_start(); 2281da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2291da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 2301da177e4SLinus Torvalds pgoff_t page_index = page->index; 2311da177e4SLinus Torvalds 232d7339071SHans Reiser if (page_index > end) { 233d7339071SHans Reiser next = page_index; 234d7339071SHans Reiser break; 235d7339071SHans Reiser } 236d7339071SHans Reiser 2371da177e4SLinus Torvalds if (page_index > next) 2381da177e4SLinus Torvalds next = page_index; 2391da177e4SLinus Torvalds next++; 240529ae9aaSNick Piggin if (!trylock_page(page)) 2411da177e4SLinus Torvalds continue; 2421da177e4SLinus Torvalds if (PageWriteback(page)) { 2431da177e4SLinus Torvalds unlock_page(page); 2441da177e4SLinus Torvalds continue; 2451da177e4SLinus Torvalds } 246750b4987SNick Piggin truncate_inode_page(mapping, page); 2471da177e4SLinus Torvalds unlock_page(page); 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds pagevec_release(&pvec); 250e5598f8bSHugh Dickins mem_cgroup_uncharge_end(); 2511da177e4SLinus Torvalds cond_resched(); 2521da177e4SLinus Torvalds } 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds if (partial) { 2551da177e4SLinus Torvalds struct page *page = find_lock_page(mapping, start - 1); 2561da177e4SLinus Torvalds if (page) { 2571da177e4SLinus Torvalds wait_on_page_writeback(page); 2581da177e4SLinus Torvalds truncate_partial_page(page, partial); 2591da177e4SLinus Torvalds unlock_page(page); 2601da177e4SLinus Torvalds page_cache_release(page); 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds next = start; 2651da177e4SLinus Torvalds for ( ; ; ) { 2661da177e4SLinus Torvalds cond_resched(); 2671da177e4SLinus Torvalds if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2681da177e4SLinus Torvalds if (next == start) 2691da177e4SLinus Torvalds break; 2701da177e4SLinus Torvalds next = start; 2711da177e4SLinus Torvalds continue; 2721da177e4SLinus Torvalds } 273d7339071SHans Reiser if (pvec.pages[0]->index > end) { 274d7339071SHans Reiser pagevec_release(&pvec); 275d7339071SHans Reiser break; 276d7339071SHans Reiser } 277569b846dSKAMEZAWA Hiroyuki mem_cgroup_uncharge_start(); 2781da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2791da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 2801da177e4SLinus Torvalds 281d7339071SHans Reiser if (page->index > end) 282d7339071SHans Reiser break; 2831da177e4SLinus Torvalds lock_page(page); 2841da177e4SLinus Torvalds wait_on_page_writeback(page); 285750b4987SNick Piggin truncate_inode_page(mapping, page); 2861da177e4SLinus Torvalds if (page->index > next) 2871da177e4SLinus Torvalds next = page->index; 2881da177e4SLinus Torvalds next++; 2891da177e4SLinus Torvalds unlock_page(page); 2901da177e4SLinus Torvalds } 2911da177e4SLinus Torvalds pagevec_release(&pvec); 292569b846dSKAMEZAWA Hiroyuki mem_cgroup_uncharge_end(); 2931da177e4SLinus Torvalds } 2941da177e4SLinus Torvalds } 295d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range); 2961da177e4SLinus Torvalds 297d7339071SHans Reiser /** 298d7339071SHans Reiser * truncate_inode_pages - truncate *all* the pages from an offset 299d7339071SHans Reiser * @mapping: mapping to truncate 300d7339071SHans Reiser * @lstart: offset from which to truncate 301d7339071SHans Reiser * 3021b1dcc1bSJes Sorensen * Called under (and serialised by) inode->i_mutex. 303d7339071SHans Reiser */ 304d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 305d7339071SHans Reiser { 306d7339071SHans Reiser truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 307d7339071SHans Reiser } 3081da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages); 3091da177e4SLinus Torvalds 31028697355SMike Waychison /** 31128697355SMike Waychison * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 31228697355SMike Waychison * @mapping: the address_space which holds the pages to invalidate 31328697355SMike Waychison * @start: the offset 'from' which to invalidate 31428697355SMike Waychison * @end: the offset 'to' which to invalidate (inclusive) 31528697355SMike Waychison * 31628697355SMike Waychison * This function only removes the unlocked pages, if you want to 31728697355SMike Waychison * remove all the pages of one inode, you must call truncate_inode_pages. 31828697355SMike Waychison * 31928697355SMike Waychison * invalidate_mapping_pages() will not block on IO activity. It will not 32028697355SMike Waychison * invalidate pages which are dirty, locked, under writeback or mapped into 32128697355SMike Waychison * pagetables. 32228697355SMike Waychison */ 32328697355SMike Waychison unsigned long invalidate_mapping_pages(struct address_space *mapping, 32428697355SMike Waychison pgoff_t start, pgoff_t end) 3251da177e4SLinus Torvalds { 3261da177e4SLinus Torvalds struct pagevec pvec; 3271da177e4SLinus Torvalds pgoff_t next = start; 328*31560180SMinchan Kim unsigned long ret; 329*31560180SMinchan Kim unsigned long count = 0; 3301da177e4SLinus Torvalds int i; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds pagevec_init(&pvec, 0); 3331da177e4SLinus Torvalds while (next <= end && 3341da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 335569b846dSKAMEZAWA Hiroyuki mem_cgroup_uncharge_start(); 3361da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 3371da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 338e0f23603SNeilBrown pgoff_t index; 339e0f23603SNeilBrown int lock_failed; 3401da177e4SLinus Torvalds 341529ae9aaSNick Piggin lock_failed = !trylock_page(page); 342e0f23603SNeilBrown 343e0f23603SNeilBrown /* 344e0f23603SNeilBrown * We really shouldn't be looking at the ->index of an 345e0f23603SNeilBrown * unlocked page. But we're not allowed to lock these 346e0f23603SNeilBrown * pages. So we rely upon nobody altering the ->index 347e0f23603SNeilBrown * of this (pinned-by-us) page. 348e0f23603SNeilBrown */ 349e0f23603SNeilBrown index = page->index; 350e0f23603SNeilBrown if (index > next) 351e0f23603SNeilBrown next = index; 3521da177e4SLinus Torvalds next++; 353e0f23603SNeilBrown if (lock_failed) 3541da177e4SLinus Torvalds continue; 355e0f23603SNeilBrown 356*31560180SMinchan Kim ret = invalidate_inode_page(page); 3571da177e4SLinus Torvalds unlock_page(page); 358*31560180SMinchan Kim /* 359*31560180SMinchan Kim * Invalidation is a hint that the page is no longer 360*31560180SMinchan Kim * of interest and try to speed up its reclaim. 361*31560180SMinchan Kim */ 362*31560180SMinchan Kim if (!ret) 363*31560180SMinchan Kim deactivate_page(page); 364*31560180SMinchan Kim count += ret; 3651da177e4SLinus Torvalds if (next > end) 3661da177e4SLinus Torvalds break; 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds pagevec_release(&pvec); 369569b846dSKAMEZAWA Hiroyuki mem_cgroup_uncharge_end(); 370fc9a07e7SAndrew Morton cond_resched(); 3711da177e4SLinus Torvalds } 372*31560180SMinchan Kim return count; 3731da177e4SLinus Torvalds } 37454bc4855SAnton Altaparmakov EXPORT_SYMBOL(invalidate_mapping_pages); 3751da177e4SLinus Torvalds 376bd4c8ce4SAndrew Morton /* 377bd4c8ce4SAndrew Morton * This is like invalidate_complete_page(), except it ignores the page's 378bd4c8ce4SAndrew Morton * refcount. We do this because invalidate_inode_pages2() needs stronger 379bd4c8ce4SAndrew Morton * invalidation guarantees, and cannot afford to leave pages behind because 3802706a1b8SAnderson Briglia * shrink_page_list() has a temp ref on them, or because they're transiently 3812706a1b8SAnderson Briglia * sitting in the lru_cache_add() pagevecs. 382bd4c8ce4SAndrew Morton */ 383bd4c8ce4SAndrew Morton static int 384bd4c8ce4SAndrew Morton invalidate_complete_page2(struct address_space *mapping, struct page *page) 385bd4c8ce4SAndrew Morton { 386bd4c8ce4SAndrew Morton if (page->mapping != mapping) 387bd4c8ce4SAndrew Morton return 0; 388bd4c8ce4SAndrew Morton 389266cf658SDavid Howells if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) 390bd4c8ce4SAndrew Morton return 0; 391bd4c8ce4SAndrew Morton 39219fd6231SNick Piggin spin_lock_irq(&mapping->tree_lock); 393bd4c8ce4SAndrew Morton if (PageDirty(page)) 394bd4c8ce4SAndrew Morton goto failed; 395bd4c8ce4SAndrew Morton 396ba470de4SRik van Riel clear_page_mlock(page); 397266cf658SDavid Howells BUG_ON(page_has_private(page)); 398e64a782fSMinchan Kim __delete_from_page_cache(page); 39919fd6231SNick Piggin spin_unlock_irq(&mapping->tree_lock); 400e767e056SDaisuke Nishimura mem_cgroup_uncharge_cache_page(page); 4016072d13cSLinus Torvalds 4026072d13cSLinus Torvalds if (mapping->a_ops->freepage) 4036072d13cSLinus Torvalds mapping->a_ops->freepage(page); 4046072d13cSLinus Torvalds 405bd4c8ce4SAndrew Morton page_cache_release(page); /* pagecache ref */ 406bd4c8ce4SAndrew Morton return 1; 407bd4c8ce4SAndrew Morton failed: 40819fd6231SNick Piggin spin_unlock_irq(&mapping->tree_lock); 409bd4c8ce4SAndrew Morton return 0; 410bd4c8ce4SAndrew Morton } 411bd4c8ce4SAndrew Morton 412e3db7691STrond Myklebust static int do_launder_page(struct address_space *mapping, struct page *page) 413e3db7691STrond Myklebust { 414e3db7691STrond Myklebust if (!PageDirty(page)) 415e3db7691STrond Myklebust return 0; 416e3db7691STrond Myklebust if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 417e3db7691STrond Myklebust return 0; 418e3db7691STrond Myklebust return mapping->a_ops->launder_page(page); 419e3db7691STrond Myklebust } 420e3db7691STrond Myklebust 4211da177e4SLinus Torvalds /** 4221da177e4SLinus Torvalds * invalidate_inode_pages2_range - remove range of pages from an address_space 42367be2dd1SMartin Waitz * @mapping: the address_space 4241da177e4SLinus Torvalds * @start: the page offset 'from' which to invalidate 4251da177e4SLinus Torvalds * @end: the page offset 'to' which to invalidate (inclusive) 4261da177e4SLinus Torvalds * 4271da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 4281da177e4SLinus Torvalds * invalidation. 4291da177e4SLinus Torvalds * 4306ccfa806SHisashi Hifumi * Returns -EBUSY if any pages could not be invalidated. 4311da177e4SLinus Torvalds */ 4321da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping, 4331da177e4SLinus Torvalds pgoff_t start, pgoff_t end) 4341da177e4SLinus Torvalds { 4351da177e4SLinus Torvalds struct pagevec pvec; 4361da177e4SLinus Torvalds pgoff_t next; 4371da177e4SLinus Torvalds int i; 4381da177e4SLinus Torvalds int ret = 0; 4390dd1334fSHisashi Hifumi int ret2 = 0; 4401da177e4SLinus Torvalds int did_range_unmap = 0; 4411da177e4SLinus Torvalds int wrapped = 0; 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds pagevec_init(&pvec, 0); 4441da177e4SLinus Torvalds next = start; 4457b965e08STrond Myklebust while (next <= end && !wrapped && 4461da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, 4471da177e4SLinus Torvalds min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 448569b846dSKAMEZAWA Hiroyuki mem_cgroup_uncharge_start(); 4497b965e08STrond Myklebust for (i = 0; i < pagevec_count(&pvec); i++) { 4501da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 4511da177e4SLinus Torvalds pgoff_t page_index; 4521da177e4SLinus Torvalds 4531da177e4SLinus Torvalds lock_page(page); 4541da177e4SLinus Torvalds if (page->mapping != mapping) { 4551da177e4SLinus Torvalds unlock_page(page); 4561da177e4SLinus Torvalds continue; 4571da177e4SLinus Torvalds } 4581da177e4SLinus Torvalds page_index = page->index; 4591da177e4SLinus Torvalds next = page_index + 1; 4601da177e4SLinus Torvalds if (next == 0) 4611da177e4SLinus Torvalds wrapped = 1; 4621da177e4SLinus Torvalds if (page_index > end) { 4631da177e4SLinus Torvalds unlock_page(page); 4641da177e4SLinus Torvalds break; 4651da177e4SLinus Torvalds } 4661da177e4SLinus Torvalds wait_on_page_writeback(page); 467d00806b1SNick Piggin if (page_mapped(page)) { 4681da177e4SLinus Torvalds if (!did_range_unmap) { 4691da177e4SLinus Torvalds /* 4701da177e4SLinus Torvalds * Zap the rest of the file in one hit. 4711da177e4SLinus Torvalds */ 4721da177e4SLinus Torvalds unmap_mapping_range(mapping, 473479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 474479ef592SOleg Drokin (loff_t)(end - page_index + 1) 4751da177e4SLinus Torvalds << PAGE_CACHE_SHIFT, 4761da177e4SLinus Torvalds 0); 4771da177e4SLinus Torvalds did_range_unmap = 1; 4781da177e4SLinus Torvalds } else { 4791da177e4SLinus Torvalds /* 4801da177e4SLinus Torvalds * Just zap this page 4811da177e4SLinus Torvalds */ 4821da177e4SLinus Torvalds unmap_mapping_range(mapping, 483479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 4841da177e4SLinus Torvalds PAGE_CACHE_SIZE, 0); 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds } 487d00806b1SNick Piggin BUG_ON(page_mapped(page)); 4880dd1334fSHisashi Hifumi ret2 = do_launder_page(mapping, page); 4890dd1334fSHisashi Hifumi if (ret2 == 0) { 4900dd1334fSHisashi Hifumi if (!invalidate_complete_page2(mapping, page)) 4916ccfa806SHisashi Hifumi ret2 = -EBUSY; 4920dd1334fSHisashi Hifumi } 4930dd1334fSHisashi Hifumi if (ret2 < 0) 4940dd1334fSHisashi Hifumi ret = ret2; 4951da177e4SLinus Torvalds unlock_page(page); 4961da177e4SLinus Torvalds } 4971da177e4SLinus Torvalds pagevec_release(&pvec); 498569b846dSKAMEZAWA Hiroyuki mem_cgroup_uncharge_end(); 4991da177e4SLinus Torvalds cond_resched(); 5001da177e4SLinus Torvalds } 5011da177e4SLinus Torvalds return ret; 5021da177e4SLinus Torvalds } 5031da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 5041da177e4SLinus Torvalds 5051da177e4SLinus Torvalds /** 5061da177e4SLinus Torvalds * invalidate_inode_pages2 - remove all pages from an address_space 50767be2dd1SMartin Waitz * @mapping: the address_space 5081da177e4SLinus Torvalds * 5091da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 5101da177e4SLinus Torvalds * invalidation. 5111da177e4SLinus Torvalds * 512e9de25ddSPeng Tao * Returns -EBUSY if any pages could not be invalidated. 5131da177e4SLinus Torvalds */ 5141da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping) 5151da177e4SLinus Torvalds { 5161da177e4SLinus Torvalds return invalidate_inode_pages2_range(mapping, 0, -1); 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 51925d9e2d1Snpiggin@suse.de 52025d9e2d1Snpiggin@suse.de /** 52125d9e2d1Snpiggin@suse.de * truncate_pagecache - unmap and remove pagecache that has been truncated 52225d9e2d1Snpiggin@suse.de * @inode: inode 52325d9e2d1Snpiggin@suse.de * @old: old file offset 52425d9e2d1Snpiggin@suse.de * @new: new file offset 52525d9e2d1Snpiggin@suse.de * 52625d9e2d1Snpiggin@suse.de * inode's new i_size must already be written before truncate_pagecache 52725d9e2d1Snpiggin@suse.de * is called. 52825d9e2d1Snpiggin@suse.de * 52925d9e2d1Snpiggin@suse.de * This function should typically be called before the filesystem 53025d9e2d1Snpiggin@suse.de * releases resources associated with the freed range (eg. deallocates 53125d9e2d1Snpiggin@suse.de * blocks). This way, pagecache will always stay logically coherent 53225d9e2d1Snpiggin@suse.de * with on-disk format, and the filesystem would not have to deal with 53325d9e2d1Snpiggin@suse.de * situations such as writepage being called for a page that has already 53425d9e2d1Snpiggin@suse.de * had its underlying blocks deallocated. 53525d9e2d1Snpiggin@suse.de */ 53625d9e2d1Snpiggin@suse.de void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) 53725d9e2d1Snpiggin@suse.de { 53825d9e2d1Snpiggin@suse.de struct address_space *mapping = inode->i_mapping; 53925d9e2d1Snpiggin@suse.de 54025d9e2d1Snpiggin@suse.de /* 54125d9e2d1Snpiggin@suse.de * unmap_mapping_range is called twice, first simply for 54225d9e2d1Snpiggin@suse.de * efficiency so that truncate_inode_pages does fewer 54325d9e2d1Snpiggin@suse.de * single-page unmaps. However after this first call, and 54425d9e2d1Snpiggin@suse.de * before truncate_inode_pages finishes, it is possible for 54525d9e2d1Snpiggin@suse.de * private pages to be COWed, which remain after 54625d9e2d1Snpiggin@suse.de * truncate_inode_pages finishes, hence the second 54725d9e2d1Snpiggin@suse.de * unmap_mapping_range call must be made for correctness. 54825d9e2d1Snpiggin@suse.de */ 54925d9e2d1Snpiggin@suse.de unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); 55025d9e2d1Snpiggin@suse.de truncate_inode_pages(mapping, new); 55125d9e2d1Snpiggin@suse.de unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); 55225d9e2d1Snpiggin@suse.de } 55325d9e2d1Snpiggin@suse.de EXPORT_SYMBOL(truncate_pagecache); 55425d9e2d1Snpiggin@suse.de 55525d9e2d1Snpiggin@suse.de /** 5562c27c65eSChristoph Hellwig * truncate_setsize - update inode and pagecache for a new file size 5572c27c65eSChristoph Hellwig * @inode: inode 5582c27c65eSChristoph Hellwig * @newsize: new file size 5592c27c65eSChristoph Hellwig * 560382e27daSJan Kara * truncate_setsize updates i_size and performs pagecache truncation (if 561382e27daSJan Kara * necessary) to @newsize. It will be typically be called from the filesystem's 562382e27daSJan Kara * setattr function when ATTR_SIZE is passed in. 5632c27c65eSChristoph Hellwig * 564382e27daSJan Kara * Must be called with inode_mutex held and before all filesystem specific 565382e27daSJan Kara * block truncation has been performed. 5662c27c65eSChristoph Hellwig */ 5672c27c65eSChristoph Hellwig void truncate_setsize(struct inode *inode, loff_t newsize) 5682c27c65eSChristoph Hellwig { 5692c27c65eSChristoph Hellwig loff_t oldsize; 5702c27c65eSChristoph Hellwig 5712c27c65eSChristoph Hellwig oldsize = inode->i_size; 5722c27c65eSChristoph Hellwig i_size_write(inode, newsize); 5732c27c65eSChristoph Hellwig 5742c27c65eSChristoph Hellwig truncate_pagecache(inode, oldsize, newsize); 5752c27c65eSChristoph Hellwig } 5762c27c65eSChristoph Hellwig EXPORT_SYMBOL(truncate_setsize); 5772c27c65eSChristoph Hellwig 5782c27c65eSChristoph Hellwig /** 57925d9e2d1Snpiggin@suse.de * vmtruncate - unmap mappings "freed" by truncate() syscall 58025d9e2d1Snpiggin@suse.de * @inode: inode of the file used 58125d9e2d1Snpiggin@suse.de * @offset: file offset to start truncating 58225d9e2d1Snpiggin@suse.de * 5832c27c65eSChristoph Hellwig * This function is deprecated and truncate_setsize or truncate_pagecache 5842c27c65eSChristoph Hellwig * should be used instead, together with filesystem specific block truncation. 58525d9e2d1Snpiggin@suse.de */ 58625d9e2d1Snpiggin@suse.de int vmtruncate(struct inode *inode, loff_t offset) 58725d9e2d1Snpiggin@suse.de { 58825d9e2d1Snpiggin@suse.de int error; 58925d9e2d1Snpiggin@suse.de 5902c27c65eSChristoph Hellwig error = inode_newsize_ok(inode, offset); 59125d9e2d1Snpiggin@suse.de if (error) 59225d9e2d1Snpiggin@suse.de return error; 5937bb46a67Snpiggin@suse.de 5942c27c65eSChristoph Hellwig truncate_setsize(inode, offset); 59525d9e2d1Snpiggin@suse.de if (inode->i_op->truncate) 59625d9e2d1Snpiggin@suse.de inode->i_op->truncate(inode); 5972c27c65eSChristoph Hellwig return 0; 59825d9e2d1Snpiggin@suse.de } 59925d9e2d1Snpiggin@suse.de EXPORT_SYMBOL(vmtruncate); 600