11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/truncate.c - code for taking down pages from address_spaces 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * 10Sep2002 akpm@zip.com.au 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 111da177e4SLinus Torvalds #include <linux/mm.h> 120fd0e6b0SNick Piggin #include <linux/swap.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 151da177e4SLinus Torvalds #include <linux/pagevec.h> 16e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h> 171da177e4SLinus Torvalds #include <linux/buffer_head.h> /* grr. try_to_release_page, 18aaa4059bSJan Kara do_invalidatepage */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds 21cf9a2ae8SDavid Howells /** 22cf9a2ae8SDavid Howells * do_invalidatepage - invalidate part of all of a page 23cf9a2ae8SDavid Howells * @page: the page which is affected 24cf9a2ae8SDavid Howells * @offset: the index of the truncation point 25cf9a2ae8SDavid Howells * 26cf9a2ae8SDavid Howells * do_invalidatepage() is called when all or part of the page has become 27cf9a2ae8SDavid Howells * invalidated by a truncate operation. 28cf9a2ae8SDavid Howells * 29cf9a2ae8SDavid Howells * do_invalidatepage() does not have to release all buffers, but it must 30cf9a2ae8SDavid Howells * ensure that no dirty buffer is left outside @offset and that no I/O 31cf9a2ae8SDavid Howells * is underway against any of the blocks which are outside the truncation 32cf9a2ae8SDavid Howells * point. Because the caller is about to free (and possibly reuse) those 33cf9a2ae8SDavid Howells * blocks on-disk. 34cf9a2ae8SDavid Howells */ 35cf9a2ae8SDavid Howells void do_invalidatepage(struct page *page, unsigned long offset) 36cf9a2ae8SDavid Howells { 37cf9a2ae8SDavid Howells void (*invalidatepage)(struct page *, unsigned long); 38cf9a2ae8SDavid Howells invalidatepage = page->mapping->a_ops->invalidatepage; 399361401eSDavid Howells #ifdef CONFIG_BLOCK 40cf9a2ae8SDavid Howells if (!invalidatepage) 41cf9a2ae8SDavid Howells invalidatepage = block_invalidatepage; 429361401eSDavid Howells #endif 43cf9a2ae8SDavid Howells if (invalidatepage) 44cf9a2ae8SDavid Howells (*invalidatepage)(page, offset); 45cf9a2ae8SDavid Howells } 46cf9a2ae8SDavid Howells 471da177e4SLinus Torvalds static inline void truncate_partial_page(struct page *page, unsigned partial) 481da177e4SLinus Torvalds { 491da177e4SLinus Torvalds memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); 501da177e4SLinus Torvalds if (PagePrivate(page)) 511da177e4SLinus Torvalds do_invalidatepage(page, partial); 521da177e4SLinus Torvalds } 531da177e4SLinus Torvalds 54fba2591bSLinus Torvalds void cancel_dirty_page(struct page *page, unsigned int account_size) 55fba2591bSLinus Torvalds { 56fba2591bSLinus Torvalds /* If we're cancelling the page, it had better not be mapped any more */ 57fba2591bSLinus Torvalds if (page_mapped(page)) { 58fba2591bSLinus Torvalds static unsigned int warncount; 59fba2591bSLinus Torvalds 60fba2591bSLinus Torvalds WARN_ON(++warncount < 5); 61fba2591bSLinus Torvalds } 62fba2591bSLinus Torvalds 638368e328SLinus Torvalds if (TestClearPageDirty(page)) { 648368e328SLinus Torvalds struct address_space *mapping = page->mapping; 658368e328SLinus Torvalds if (mapping && mapping_cap_account_dirty(mapping)) { 663e67c098SAndrew Morton dec_zone_page_state(page, NR_FILE_DIRTY); 678368e328SLinus Torvalds if (account_size) 68fba2591bSLinus Torvalds task_io_account_cancelled_write(account_size); 69fba2591bSLinus Torvalds } 703e67c098SAndrew Morton } 718368e328SLinus Torvalds } 728368e328SLinus Torvalds EXPORT_SYMBOL(cancel_dirty_page); 73fba2591bSLinus Torvalds 741da177e4SLinus Torvalds /* 751da177e4SLinus Torvalds * If truncate cannot remove the fs-private metadata from the page, the page 761da177e4SLinus Torvalds * becomes anonymous. It will be left on the LRU and may even be mapped into 771da177e4SLinus Torvalds * user pagetables if we're racing with filemap_nopage(). 781da177e4SLinus Torvalds * 791da177e4SLinus Torvalds * We need to bale out if page->mapping is no longer equal to the original 801da177e4SLinus Torvalds * mapping. This happens a) when the VM reclaimed the page while we waited on 811da177e4SLinus Torvalds * its lock, b) when a concurrent invalidate_inode_pages got there first and 821da177e4SLinus Torvalds * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 831da177e4SLinus Torvalds */ 841da177e4SLinus Torvalds static void 851da177e4SLinus Torvalds truncate_complete_page(struct address_space *mapping, struct page *page) 861da177e4SLinus Torvalds { 871da177e4SLinus Torvalds if (page->mapping != mapping) 881da177e4SLinus Torvalds return; 891da177e4SLinus Torvalds 903e67c098SAndrew Morton cancel_dirty_page(page, PAGE_CACHE_SIZE); 913e67c098SAndrew Morton 921da177e4SLinus Torvalds if (PagePrivate(page)) 931da177e4SLinus Torvalds do_invalidatepage(page, 0); 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds ClearPageUptodate(page); 961da177e4SLinus Torvalds ClearPageMappedToDisk(page); 971da177e4SLinus Torvalds remove_from_page_cache(page); 981da177e4SLinus Torvalds page_cache_release(page); /* pagecache ref */ 991da177e4SLinus Torvalds } 1001da177e4SLinus Torvalds 1011da177e4SLinus Torvalds /* 1021da177e4SLinus Torvalds * This is for invalidate_inode_pages(). That function can be called at 1031da177e4SLinus Torvalds * any time, and is not supposed to throw away dirty pages. But pages can 1040fd0e6b0SNick Piggin * be marked dirty at any time too, so use remove_mapping which safely 1050fd0e6b0SNick Piggin * discards clean, unused pages. 1061da177e4SLinus Torvalds * 1071da177e4SLinus Torvalds * Returns non-zero if the page was successfully invalidated. 1081da177e4SLinus Torvalds */ 1091da177e4SLinus Torvalds static int 1101da177e4SLinus Torvalds invalidate_complete_page(struct address_space *mapping, struct page *page) 1111da177e4SLinus Torvalds { 1120fd0e6b0SNick Piggin int ret; 1130fd0e6b0SNick Piggin 1141da177e4SLinus Torvalds if (page->mapping != mapping) 1151da177e4SLinus Torvalds return 0; 1161da177e4SLinus Torvalds 1171da177e4SLinus Torvalds if (PagePrivate(page) && !try_to_release_page(page, 0)) 1181da177e4SLinus Torvalds return 0; 1191da177e4SLinus Torvalds 1200fd0e6b0SNick Piggin ret = remove_mapping(mapping, page); 1210fd0e6b0SNick Piggin 1220fd0e6b0SNick Piggin return ret; 1231da177e4SLinus Torvalds } 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds /** 126d7339071SHans Reiser * truncate_inode_pages - truncate range of pages specified by start and 127d7339071SHans Reiser * end byte offsets 1281da177e4SLinus Torvalds * @mapping: mapping to truncate 1291da177e4SLinus Torvalds * @lstart: offset from which to truncate 130d7339071SHans Reiser * @lend: offset to which to truncate 1311da177e4SLinus Torvalds * 132d7339071SHans Reiser * Truncate the page cache, removing the pages that are between 133d7339071SHans Reiser * specified offsets (and zeroing out partial page 134d7339071SHans Reiser * (if lstart is not page aligned)). 1351da177e4SLinus Torvalds * 1361da177e4SLinus Torvalds * Truncate takes two passes - the first pass is nonblocking. It will not 1371da177e4SLinus Torvalds * block on page locks and it will not block on writeback. The second pass 1381da177e4SLinus Torvalds * will wait. This is to prevent as much IO as possible in the affected region. 1391da177e4SLinus Torvalds * The first pass will remove most pages, so the search cost of the second pass 1401da177e4SLinus Torvalds * is low. 1411da177e4SLinus Torvalds * 1421da177e4SLinus Torvalds * When looking at page->index outside the page lock we need to be careful to 1431da177e4SLinus Torvalds * copy it into a local to avoid races (it could change at any time). 1441da177e4SLinus Torvalds * 1451da177e4SLinus Torvalds * We pass down the cache-hot hint to the page freeing code. Even if the 1461da177e4SLinus Torvalds * mapping is large, it is probably the case that the final pages are the most 1471da177e4SLinus Torvalds * recently touched, and freeing happens in ascending file offset order. 1481da177e4SLinus Torvalds */ 149d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping, 150d7339071SHans Reiser loff_t lstart, loff_t lend) 1511da177e4SLinus Torvalds { 1521da177e4SLinus Torvalds const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 153d7339071SHans Reiser pgoff_t end; 1541da177e4SLinus Torvalds const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 1551da177e4SLinus Torvalds struct pagevec pvec; 1561da177e4SLinus Torvalds pgoff_t next; 1571da177e4SLinus Torvalds int i; 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds if (mapping->nrpages == 0) 1601da177e4SLinus Torvalds return; 1611da177e4SLinus Torvalds 162d7339071SHans Reiser BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 163d7339071SHans Reiser end = (lend >> PAGE_CACHE_SHIFT); 164d7339071SHans Reiser 1651da177e4SLinus Torvalds pagevec_init(&pvec, 0); 1661da177e4SLinus Torvalds next = start; 167d7339071SHans Reiser while (next <= end && 168d7339071SHans Reiser pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1691da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 1701da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 1711da177e4SLinus Torvalds pgoff_t page_index = page->index; 1721da177e4SLinus Torvalds 173d7339071SHans Reiser if (page_index > end) { 174d7339071SHans Reiser next = page_index; 175d7339071SHans Reiser break; 176d7339071SHans Reiser } 177d7339071SHans Reiser 1781da177e4SLinus Torvalds if (page_index > next) 1791da177e4SLinus Torvalds next = page_index; 1801da177e4SLinus Torvalds next++; 1811da177e4SLinus Torvalds if (TestSetPageLocked(page)) 1821da177e4SLinus Torvalds continue; 1831da177e4SLinus Torvalds if (PageWriteback(page)) { 1841da177e4SLinus Torvalds unlock_page(page); 1851da177e4SLinus Torvalds continue; 1861da177e4SLinus Torvalds } 1871da177e4SLinus Torvalds truncate_complete_page(mapping, page); 1881da177e4SLinus Torvalds unlock_page(page); 1891da177e4SLinus Torvalds } 1901da177e4SLinus Torvalds pagevec_release(&pvec); 1911da177e4SLinus Torvalds cond_resched(); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds if (partial) { 1951da177e4SLinus Torvalds struct page *page = find_lock_page(mapping, start - 1); 1961da177e4SLinus Torvalds if (page) { 1971da177e4SLinus Torvalds wait_on_page_writeback(page); 1981da177e4SLinus Torvalds truncate_partial_page(page, partial); 1991da177e4SLinus Torvalds unlock_page(page); 2001da177e4SLinus Torvalds page_cache_release(page); 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds } 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds next = start; 2051da177e4SLinus Torvalds for ( ; ; ) { 2061da177e4SLinus Torvalds cond_resched(); 2071da177e4SLinus Torvalds if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2081da177e4SLinus Torvalds if (next == start) 2091da177e4SLinus Torvalds break; 2101da177e4SLinus Torvalds next = start; 2111da177e4SLinus Torvalds continue; 2121da177e4SLinus Torvalds } 213d7339071SHans Reiser if (pvec.pages[0]->index > end) { 214d7339071SHans Reiser pagevec_release(&pvec); 215d7339071SHans Reiser break; 216d7339071SHans Reiser } 2171da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2181da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 2191da177e4SLinus Torvalds 220d7339071SHans Reiser if (page->index > end) 221d7339071SHans Reiser break; 2221da177e4SLinus Torvalds lock_page(page); 2231da177e4SLinus Torvalds wait_on_page_writeback(page); 2241da177e4SLinus Torvalds if (page->index > next) 2251da177e4SLinus Torvalds next = page->index; 2261da177e4SLinus Torvalds next++; 2271da177e4SLinus Torvalds truncate_complete_page(mapping, page); 2281da177e4SLinus Torvalds unlock_page(page); 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds pagevec_release(&pvec); 2311da177e4SLinus Torvalds } 2321da177e4SLinus Torvalds } 233d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range); 2341da177e4SLinus Torvalds 235d7339071SHans Reiser /** 236d7339071SHans Reiser * truncate_inode_pages - truncate *all* the pages from an offset 237d7339071SHans Reiser * @mapping: mapping to truncate 238d7339071SHans Reiser * @lstart: offset from which to truncate 239d7339071SHans Reiser * 2401b1dcc1bSJes Sorensen * Called under (and serialised by) inode->i_mutex. 241d7339071SHans Reiser */ 242d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 243d7339071SHans Reiser { 244d7339071SHans Reiser truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 245d7339071SHans Reiser } 2461da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages); 2471da177e4SLinus Torvalds 2481da177e4SLinus Torvalds /** 2491da177e4SLinus Torvalds * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 2501da177e4SLinus Torvalds * @mapping: the address_space which holds the pages to invalidate 2511da177e4SLinus Torvalds * @start: the offset 'from' which to invalidate 2521da177e4SLinus Torvalds * @end: the offset 'to' which to invalidate (inclusive) 2531da177e4SLinus Torvalds * 2541da177e4SLinus Torvalds * This function only removes the unlocked pages, if you want to 2551da177e4SLinus Torvalds * remove all the pages of one inode, you must call truncate_inode_pages. 2561da177e4SLinus Torvalds * 2571da177e4SLinus Torvalds * invalidate_mapping_pages() will not block on IO activity. It will not 2581da177e4SLinus Torvalds * invalidate pages which are dirty, locked, under writeback or mapped into 2591da177e4SLinus Torvalds * pagetables. 2601da177e4SLinus Torvalds */ 2611da177e4SLinus Torvalds unsigned long invalidate_mapping_pages(struct address_space *mapping, 2621da177e4SLinus Torvalds pgoff_t start, pgoff_t end) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct pagevec pvec; 2651da177e4SLinus Torvalds pgoff_t next = start; 2661da177e4SLinus Torvalds unsigned long ret = 0; 2671da177e4SLinus Torvalds int i; 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds pagevec_init(&pvec, 0); 2701da177e4SLinus Torvalds while (next <= end && 2711da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2721da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2731da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 274e0f23603SNeilBrown pgoff_t index; 275e0f23603SNeilBrown int lock_failed; 2761da177e4SLinus Torvalds 277e0f23603SNeilBrown lock_failed = TestSetPageLocked(page); 278e0f23603SNeilBrown 279e0f23603SNeilBrown /* 280e0f23603SNeilBrown * We really shouldn't be looking at the ->index of an 281e0f23603SNeilBrown * unlocked page. But we're not allowed to lock these 282e0f23603SNeilBrown * pages. So we rely upon nobody altering the ->index 283e0f23603SNeilBrown * of this (pinned-by-us) page. 284e0f23603SNeilBrown */ 285e0f23603SNeilBrown index = page->index; 286e0f23603SNeilBrown if (index > next) 287e0f23603SNeilBrown next = index; 2881da177e4SLinus Torvalds next++; 289e0f23603SNeilBrown if (lock_failed) 2901da177e4SLinus Torvalds continue; 291e0f23603SNeilBrown 2921da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 2931da177e4SLinus Torvalds goto unlock; 2941da177e4SLinus Torvalds if (page_mapped(page)) 2951da177e4SLinus Torvalds goto unlock; 2961da177e4SLinus Torvalds ret += invalidate_complete_page(mapping, page); 2971da177e4SLinus Torvalds unlock: 2981da177e4SLinus Torvalds unlock_page(page); 2991da177e4SLinus Torvalds if (next > end) 3001da177e4SLinus Torvalds break; 3011da177e4SLinus Torvalds } 3021da177e4SLinus Torvalds pagevec_release(&pvec); 3031da177e4SLinus Torvalds } 3041da177e4SLinus Torvalds return ret; 3051da177e4SLinus Torvalds } 3061da177e4SLinus Torvalds 3071da177e4SLinus Torvalds unsigned long invalidate_inode_pages(struct address_space *mapping) 3081da177e4SLinus Torvalds { 3091da177e4SLinus Torvalds return invalidate_mapping_pages(mapping, 0, ~0UL); 3101da177e4SLinus Torvalds } 3111da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_inode_pages); 3121da177e4SLinus Torvalds 313bd4c8ce4SAndrew Morton /* 314bd4c8ce4SAndrew Morton * This is like invalidate_complete_page(), except it ignores the page's 315bd4c8ce4SAndrew Morton * refcount. We do this because invalidate_inode_pages2() needs stronger 316bd4c8ce4SAndrew Morton * invalidation guarantees, and cannot afford to leave pages behind because 317bd4c8ce4SAndrew Morton * shrink_list() has a temp ref on them, or because they're transiently sitting 318bd4c8ce4SAndrew Morton * in the lru_cache_add() pagevecs. 319bd4c8ce4SAndrew Morton */ 320bd4c8ce4SAndrew Morton static int 321bd4c8ce4SAndrew Morton invalidate_complete_page2(struct address_space *mapping, struct page *page) 322bd4c8ce4SAndrew Morton { 323bd4c8ce4SAndrew Morton if (page->mapping != mapping) 324bd4c8ce4SAndrew Morton return 0; 325bd4c8ce4SAndrew Morton 326887ed2f3STrond Myklebust if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 327bd4c8ce4SAndrew Morton return 0; 328bd4c8ce4SAndrew Morton 329bd4c8ce4SAndrew Morton write_lock_irq(&mapping->tree_lock); 330bd4c8ce4SAndrew Morton if (PageDirty(page)) 331bd4c8ce4SAndrew Morton goto failed; 332bd4c8ce4SAndrew Morton 333bd4c8ce4SAndrew Morton BUG_ON(PagePrivate(page)); 334bd4c8ce4SAndrew Morton __remove_from_page_cache(page); 335bd4c8ce4SAndrew Morton write_unlock_irq(&mapping->tree_lock); 336bd4c8ce4SAndrew Morton ClearPageUptodate(page); 337bd4c8ce4SAndrew Morton page_cache_release(page); /* pagecache ref */ 338bd4c8ce4SAndrew Morton return 1; 339bd4c8ce4SAndrew Morton failed: 340bd4c8ce4SAndrew Morton write_unlock_irq(&mapping->tree_lock); 341bd4c8ce4SAndrew Morton return 0; 342bd4c8ce4SAndrew Morton } 343bd4c8ce4SAndrew Morton 344*e3db7691STrond Myklebust static int do_launder_page(struct address_space *mapping, struct page *page) 345*e3db7691STrond Myklebust { 346*e3db7691STrond Myklebust if (!PageDirty(page)) 347*e3db7691STrond Myklebust return 0; 348*e3db7691STrond Myklebust if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 349*e3db7691STrond Myklebust return 0; 350*e3db7691STrond Myklebust return mapping->a_ops->launder_page(page); 351*e3db7691STrond Myklebust } 352*e3db7691STrond Myklebust 3531da177e4SLinus Torvalds /** 3541da177e4SLinus Torvalds * invalidate_inode_pages2_range - remove range of pages from an address_space 35567be2dd1SMartin Waitz * @mapping: the address_space 3561da177e4SLinus Torvalds * @start: the page offset 'from' which to invalidate 3571da177e4SLinus Torvalds * @end: the page offset 'to' which to invalidate (inclusive) 3581da177e4SLinus Torvalds * 3591da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 3601da177e4SLinus Torvalds * invalidation. 3611da177e4SLinus Torvalds * 3621da177e4SLinus Torvalds * Returns -EIO if any pages could not be invalidated. 3631da177e4SLinus Torvalds */ 3641da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping, 3651da177e4SLinus Torvalds pgoff_t start, pgoff_t end) 3661da177e4SLinus Torvalds { 3671da177e4SLinus Torvalds struct pagevec pvec; 3681da177e4SLinus Torvalds pgoff_t next; 3691da177e4SLinus Torvalds int i; 3701da177e4SLinus Torvalds int ret = 0; 3711da177e4SLinus Torvalds int did_range_unmap = 0; 3721da177e4SLinus Torvalds int wrapped = 0; 3731da177e4SLinus Torvalds 3741da177e4SLinus Torvalds pagevec_init(&pvec, 0); 3751da177e4SLinus Torvalds next = start; 3761da177e4SLinus Torvalds while (next <= end && !ret && !wrapped && 3771da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, 3781da177e4SLinus Torvalds min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 3791da177e4SLinus Torvalds for (i = 0; !ret && i < pagevec_count(&pvec); i++) { 3801da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 3811da177e4SLinus Torvalds pgoff_t page_index; 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds lock_page(page); 3841da177e4SLinus Torvalds if (page->mapping != mapping) { 3851da177e4SLinus Torvalds unlock_page(page); 3861da177e4SLinus Torvalds continue; 3871da177e4SLinus Torvalds } 3881da177e4SLinus Torvalds page_index = page->index; 3891da177e4SLinus Torvalds next = page_index + 1; 3901da177e4SLinus Torvalds if (next == 0) 3911da177e4SLinus Torvalds wrapped = 1; 3921da177e4SLinus Torvalds if (page_index > end) { 3931da177e4SLinus Torvalds unlock_page(page); 3941da177e4SLinus Torvalds break; 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds wait_on_page_writeback(page); 3971da177e4SLinus Torvalds while (page_mapped(page)) { 3981da177e4SLinus Torvalds if (!did_range_unmap) { 3991da177e4SLinus Torvalds /* 4001da177e4SLinus Torvalds * Zap the rest of the file in one hit. 4011da177e4SLinus Torvalds */ 4021da177e4SLinus Torvalds unmap_mapping_range(mapping, 403479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 404479ef592SOleg Drokin (loff_t)(end - page_index + 1) 4051da177e4SLinus Torvalds << PAGE_CACHE_SHIFT, 4061da177e4SLinus Torvalds 0); 4071da177e4SLinus Torvalds did_range_unmap = 1; 4081da177e4SLinus Torvalds } else { 4091da177e4SLinus Torvalds /* 4101da177e4SLinus Torvalds * Just zap this page 4111da177e4SLinus Torvalds */ 4121da177e4SLinus Torvalds unmap_mapping_range(mapping, 413479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 4141da177e4SLinus Torvalds PAGE_CACHE_SIZE, 0); 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds } 417*e3db7691STrond Myklebust ret = do_launder_page(mapping, page); 418*e3db7691STrond Myklebust if (ret == 0 && !invalidate_complete_page2(mapping, page)) 4191da177e4SLinus Torvalds ret = -EIO; 4201da177e4SLinus Torvalds unlock_page(page); 4211da177e4SLinus Torvalds } 4221da177e4SLinus Torvalds pagevec_release(&pvec); 4231da177e4SLinus Torvalds cond_resched(); 4241da177e4SLinus Torvalds } 4258258d4a5SAndrew Morton WARN_ON_ONCE(ret); 4261da177e4SLinus Torvalds return ret; 4271da177e4SLinus Torvalds } 4281da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /** 4311da177e4SLinus Torvalds * invalidate_inode_pages2 - remove all pages from an address_space 43267be2dd1SMartin Waitz * @mapping: the address_space 4331da177e4SLinus Torvalds * 4341da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 4351da177e4SLinus Torvalds * invalidation. 4361da177e4SLinus Torvalds * 4371da177e4SLinus Torvalds * Returns -EIO if any pages could not be invalidated. 4381da177e4SLinus Torvalds */ 4391da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping) 4401da177e4SLinus Torvalds { 4411da177e4SLinus Torvalds return invalidate_inode_pages2_range(mapping, 0, -1); 4421da177e4SLinus Torvalds } 4431da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 444