11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/truncate.c - code for taking down pages from address_spaces 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * 10Sep2002 akpm@zip.com.au 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 111da177e4SLinus Torvalds #include <linux/mm.h> 120fd0e6b0SNick Piggin #include <linux/swap.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 1501f2705dSNate Diller #include <linux/highmem.h> 161da177e4SLinus Torvalds #include <linux/pagevec.h> 17e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h> 181da177e4SLinus Torvalds #include <linux/buffer_head.h> /* grr. try_to_release_page, 19aaa4059bSJan Kara do_invalidatepage */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds 22cf9a2ae8SDavid Howells /** 23cf9a2ae8SDavid Howells * do_invalidatepage - invalidate part of all of a page 24cf9a2ae8SDavid Howells * @page: the page which is affected 25cf9a2ae8SDavid Howells * @offset: the index of the truncation point 26cf9a2ae8SDavid Howells * 27cf9a2ae8SDavid Howells * do_invalidatepage() is called when all or part of the page has become 28cf9a2ae8SDavid Howells * invalidated by a truncate operation. 29cf9a2ae8SDavid Howells * 30cf9a2ae8SDavid Howells * do_invalidatepage() does not have to release all buffers, but it must 31cf9a2ae8SDavid Howells * ensure that no dirty buffer is left outside @offset and that no I/O 32cf9a2ae8SDavid Howells * is underway against any of the blocks which are outside the truncation 33cf9a2ae8SDavid Howells * point. Because the caller is about to free (and possibly reuse) those 34cf9a2ae8SDavid Howells * blocks on-disk. 35cf9a2ae8SDavid Howells */ 36cf9a2ae8SDavid Howells void do_invalidatepage(struct page *page, unsigned long offset) 37cf9a2ae8SDavid Howells { 38cf9a2ae8SDavid Howells void (*invalidatepage)(struct page *, unsigned long); 39cf9a2ae8SDavid Howells invalidatepage = page->mapping->a_ops->invalidatepage; 409361401eSDavid Howells #ifdef CONFIG_BLOCK 41cf9a2ae8SDavid Howells if (!invalidatepage) 42cf9a2ae8SDavid Howells invalidatepage = block_invalidatepage; 439361401eSDavid Howells #endif 44cf9a2ae8SDavid Howells if (invalidatepage) 45cf9a2ae8SDavid Howells (*invalidatepage)(page, offset); 46cf9a2ae8SDavid Howells } 47cf9a2ae8SDavid Howells 481da177e4SLinus Torvalds static inline void truncate_partial_page(struct page *page, unsigned partial) 491da177e4SLinus Torvalds { 5001f2705dSNate Diller zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0); 511da177e4SLinus Torvalds if (PagePrivate(page)) 521da177e4SLinus Torvalds do_invalidatepage(page, partial); 531da177e4SLinus Torvalds } 541da177e4SLinus Torvalds 55ecdfc978SLinus Torvalds /* 56ecdfc978SLinus Torvalds * This cancels just the dirty bit on the kernel page itself, it 57ecdfc978SLinus Torvalds * does NOT actually remove dirty bits on any mmap's that may be 58ecdfc978SLinus Torvalds * around. It also leaves the page tagged dirty, so any sync 59ecdfc978SLinus Torvalds * activity will still find it on the dirty lists, and in particular, 60ecdfc978SLinus Torvalds * clear_page_dirty_for_io() will still look at the dirty bits in 61ecdfc978SLinus Torvalds * the VM. 62ecdfc978SLinus Torvalds * 63ecdfc978SLinus Torvalds * Doing this should *normally* only ever be done when a page 64ecdfc978SLinus Torvalds * is truncated, and is not actually mapped anywhere at all. However, 65ecdfc978SLinus Torvalds * fs/buffer.c does this when it notices that somebody has cleaned 66ecdfc978SLinus Torvalds * out all the buffers on a page without actually doing it through 67ecdfc978SLinus Torvalds * the VM. Can you say "ext3 is horribly ugly"? Tought you could. 68ecdfc978SLinus Torvalds */ 69fba2591bSLinus Torvalds void cancel_dirty_page(struct page *page, unsigned int account_size) 70fba2591bSLinus Torvalds { 718368e328SLinus Torvalds if (TestClearPageDirty(page)) { 728368e328SLinus Torvalds struct address_space *mapping = page->mapping; 738368e328SLinus Torvalds if (mapping && mapping_cap_account_dirty(mapping)) { 743e67c098SAndrew Morton dec_zone_page_state(page, NR_FILE_DIRTY); 758368e328SLinus Torvalds if (account_size) 76fba2591bSLinus Torvalds task_io_account_cancelled_write(account_size); 77fba2591bSLinus Torvalds } 783e67c098SAndrew Morton } 798368e328SLinus Torvalds } 808368e328SLinus Torvalds EXPORT_SYMBOL(cancel_dirty_page); 81fba2591bSLinus Torvalds 821da177e4SLinus Torvalds /* 831da177e4SLinus Torvalds * If truncate cannot remove the fs-private metadata from the page, the page 841da177e4SLinus Torvalds * becomes anonymous. It will be left on the LRU and may even be mapped into 851da177e4SLinus Torvalds * user pagetables if we're racing with filemap_nopage(). 861da177e4SLinus Torvalds * 871da177e4SLinus Torvalds * We need to bale out if page->mapping is no longer equal to the original 881da177e4SLinus Torvalds * mapping. This happens a) when the VM reclaimed the page while we waited on 89fc0ecff6SAndrew Morton * its lock, b) when a concurrent invalidate_mapping_pages got there first and 901da177e4SLinus Torvalds * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 911da177e4SLinus Torvalds */ 921da177e4SLinus Torvalds static void 931da177e4SLinus Torvalds truncate_complete_page(struct address_space *mapping, struct page *page) 941da177e4SLinus Torvalds { 951da177e4SLinus Torvalds if (page->mapping != mapping) 961da177e4SLinus Torvalds return; 971da177e4SLinus Torvalds 983e67c098SAndrew Morton cancel_dirty_page(page, PAGE_CACHE_SIZE); 993e67c098SAndrew Morton 1001da177e4SLinus Torvalds if (PagePrivate(page)) 1011da177e4SLinus Torvalds do_invalidatepage(page, 0); 1021da177e4SLinus Torvalds 103787d2214SNick Piggin remove_from_page_cache(page); 1041da177e4SLinus Torvalds ClearPageUptodate(page); 1051da177e4SLinus Torvalds ClearPageMappedToDisk(page); 1061da177e4SLinus Torvalds page_cache_release(page); /* pagecache ref */ 1071da177e4SLinus Torvalds } 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds /* 110fc0ecff6SAndrew Morton * This is for invalidate_mapping_pages(). That function can be called at 1111da177e4SLinus Torvalds * any time, and is not supposed to throw away dirty pages. But pages can 1120fd0e6b0SNick Piggin * be marked dirty at any time too, so use remove_mapping which safely 1130fd0e6b0SNick Piggin * discards clean, unused pages. 1141da177e4SLinus Torvalds * 1151da177e4SLinus Torvalds * Returns non-zero if the page was successfully invalidated. 1161da177e4SLinus Torvalds */ 1171da177e4SLinus Torvalds static int 1181da177e4SLinus Torvalds invalidate_complete_page(struct address_space *mapping, struct page *page) 1191da177e4SLinus Torvalds { 1200fd0e6b0SNick Piggin int ret; 1210fd0e6b0SNick Piggin 1221da177e4SLinus Torvalds if (page->mapping != mapping) 1231da177e4SLinus Torvalds return 0; 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds if (PagePrivate(page) && !try_to_release_page(page, 0)) 1261da177e4SLinus Torvalds return 0; 1271da177e4SLinus Torvalds 1280fd0e6b0SNick Piggin ret = remove_mapping(mapping, page); 1290fd0e6b0SNick Piggin 1300fd0e6b0SNick Piggin return ret; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds /** 134d7339071SHans Reiser * truncate_inode_pages - truncate range of pages specified by start and 135d7339071SHans Reiser * end byte offsets 1361da177e4SLinus Torvalds * @mapping: mapping to truncate 1371da177e4SLinus Torvalds * @lstart: offset from which to truncate 138d7339071SHans Reiser * @lend: offset to which to truncate 1391da177e4SLinus Torvalds * 140d7339071SHans Reiser * Truncate the page cache, removing the pages that are between 141d7339071SHans Reiser * specified offsets (and zeroing out partial page 142d7339071SHans Reiser * (if lstart is not page aligned)). 1431da177e4SLinus Torvalds * 1441da177e4SLinus Torvalds * Truncate takes two passes - the first pass is nonblocking. It will not 1451da177e4SLinus Torvalds * block on page locks and it will not block on writeback. The second pass 1461da177e4SLinus Torvalds * will wait. This is to prevent as much IO as possible in the affected region. 1471da177e4SLinus Torvalds * The first pass will remove most pages, so the search cost of the second pass 1481da177e4SLinus Torvalds * is low. 1491da177e4SLinus Torvalds * 1501da177e4SLinus Torvalds * When looking at page->index outside the page lock we need to be careful to 1511da177e4SLinus Torvalds * copy it into a local to avoid races (it could change at any time). 1521da177e4SLinus Torvalds * 1531da177e4SLinus Torvalds * We pass down the cache-hot hint to the page freeing code. Even if the 1541da177e4SLinus Torvalds * mapping is large, it is probably the case that the final pages are the most 1551da177e4SLinus Torvalds * recently touched, and freeing happens in ascending file offset order. 1561da177e4SLinus Torvalds */ 157d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping, 158d7339071SHans Reiser loff_t lstart, loff_t lend) 1591da177e4SLinus Torvalds { 1601da177e4SLinus Torvalds const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 161d7339071SHans Reiser pgoff_t end; 1621da177e4SLinus Torvalds const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 1631da177e4SLinus Torvalds struct pagevec pvec; 1641da177e4SLinus Torvalds pgoff_t next; 1651da177e4SLinus Torvalds int i; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds if (mapping->nrpages == 0) 1681da177e4SLinus Torvalds return; 1691da177e4SLinus Torvalds 170d7339071SHans Reiser BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 171d7339071SHans Reiser end = (lend >> PAGE_CACHE_SHIFT); 172d7339071SHans Reiser 1731da177e4SLinus Torvalds pagevec_init(&pvec, 0); 1741da177e4SLinus Torvalds next = start; 175d7339071SHans Reiser while (next <= end && 176d7339071SHans Reiser pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1771da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 1781da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 1791da177e4SLinus Torvalds pgoff_t page_index = page->index; 1801da177e4SLinus Torvalds 181d7339071SHans Reiser if (page_index > end) { 182d7339071SHans Reiser next = page_index; 183d7339071SHans Reiser break; 184d7339071SHans Reiser } 185d7339071SHans Reiser 1861da177e4SLinus Torvalds if (page_index > next) 1871da177e4SLinus Torvalds next = page_index; 1881da177e4SLinus Torvalds next++; 1891da177e4SLinus Torvalds if (TestSetPageLocked(page)) 1901da177e4SLinus Torvalds continue; 1911da177e4SLinus Torvalds if (PageWriteback(page)) { 1921da177e4SLinus Torvalds unlock_page(page); 1931da177e4SLinus Torvalds continue; 1941da177e4SLinus Torvalds } 195*d00806b1SNick Piggin if (page_mapped(page)) { 196*d00806b1SNick Piggin unmap_mapping_range(mapping, 197*d00806b1SNick Piggin (loff_t)page_index<<PAGE_CACHE_SHIFT, 198*d00806b1SNick Piggin PAGE_CACHE_SIZE, 0); 199*d00806b1SNick Piggin } 2001da177e4SLinus Torvalds truncate_complete_page(mapping, page); 2011da177e4SLinus Torvalds unlock_page(page); 2021da177e4SLinus Torvalds } 2031da177e4SLinus Torvalds pagevec_release(&pvec); 2041da177e4SLinus Torvalds cond_resched(); 2051da177e4SLinus Torvalds } 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds if (partial) { 2081da177e4SLinus Torvalds struct page *page = find_lock_page(mapping, start - 1); 2091da177e4SLinus Torvalds if (page) { 2101da177e4SLinus Torvalds wait_on_page_writeback(page); 2111da177e4SLinus Torvalds truncate_partial_page(page, partial); 2121da177e4SLinus Torvalds unlock_page(page); 2131da177e4SLinus Torvalds page_cache_release(page); 2141da177e4SLinus Torvalds } 2151da177e4SLinus Torvalds } 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds next = start; 2181da177e4SLinus Torvalds for ( ; ; ) { 2191da177e4SLinus Torvalds cond_resched(); 2201da177e4SLinus Torvalds if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2211da177e4SLinus Torvalds if (next == start) 2221da177e4SLinus Torvalds break; 2231da177e4SLinus Torvalds next = start; 2241da177e4SLinus Torvalds continue; 2251da177e4SLinus Torvalds } 226d7339071SHans Reiser if (pvec.pages[0]->index > end) { 227d7339071SHans Reiser pagevec_release(&pvec); 228d7339071SHans Reiser break; 229d7339071SHans Reiser } 2301da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2311da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 2321da177e4SLinus Torvalds 233d7339071SHans Reiser if (page->index > end) 234d7339071SHans Reiser break; 2351da177e4SLinus Torvalds lock_page(page); 2361da177e4SLinus Torvalds wait_on_page_writeback(page); 237*d00806b1SNick Piggin if (page_mapped(page)) { 238*d00806b1SNick Piggin unmap_mapping_range(mapping, 239*d00806b1SNick Piggin (loff_t)page->index<<PAGE_CACHE_SHIFT, 240*d00806b1SNick Piggin PAGE_CACHE_SIZE, 0); 241*d00806b1SNick Piggin } 2421da177e4SLinus Torvalds if (page->index > next) 2431da177e4SLinus Torvalds next = page->index; 2441da177e4SLinus Torvalds next++; 2451da177e4SLinus Torvalds truncate_complete_page(mapping, page); 2461da177e4SLinus Torvalds unlock_page(page); 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds pagevec_release(&pvec); 2491da177e4SLinus Torvalds } 2501da177e4SLinus Torvalds } 251d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range); 2521da177e4SLinus Torvalds 253d7339071SHans Reiser /** 254d7339071SHans Reiser * truncate_inode_pages - truncate *all* the pages from an offset 255d7339071SHans Reiser * @mapping: mapping to truncate 256d7339071SHans Reiser * @lstart: offset from which to truncate 257d7339071SHans Reiser * 2581b1dcc1bSJes Sorensen * Called under (and serialised by) inode->i_mutex. 259d7339071SHans Reiser */ 260d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 261d7339071SHans Reiser { 262d7339071SHans Reiser truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 263d7339071SHans Reiser } 2641da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages); 2651da177e4SLinus Torvalds 266fc9a07e7SAndrew Morton unsigned long __invalidate_mapping_pages(struct address_space *mapping, 267fc9a07e7SAndrew Morton pgoff_t start, pgoff_t end, bool be_atomic) 2681da177e4SLinus Torvalds { 2691da177e4SLinus Torvalds struct pagevec pvec; 2701da177e4SLinus Torvalds pgoff_t next = start; 2711da177e4SLinus Torvalds unsigned long ret = 0; 2721da177e4SLinus Torvalds int i; 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds pagevec_init(&pvec, 0); 2751da177e4SLinus Torvalds while (next <= end && 2761da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2771da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2781da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 279e0f23603SNeilBrown pgoff_t index; 280e0f23603SNeilBrown int lock_failed; 2811da177e4SLinus Torvalds 282e0f23603SNeilBrown lock_failed = TestSetPageLocked(page); 283e0f23603SNeilBrown 284e0f23603SNeilBrown /* 285e0f23603SNeilBrown * We really shouldn't be looking at the ->index of an 286e0f23603SNeilBrown * unlocked page. But we're not allowed to lock these 287e0f23603SNeilBrown * pages. So we rely upon nobody altering the ->index 288e0f23603SNeilBrown * of this (pinned-by-us) page. 289e0f23603SNeilBrown */ 290e0f23603SNeilBrown index = page->index; 291e0f23603SNeilBrown if (index > next) 292e0f23603SNeilBrown next = index; 2931da177e4SLinus Torvalds next++; 294e0f23603SNeilBrown if (lock_failed) 2951da177e4SLinus Torvalds continue; 296e0f23603SNeilBrown 2971da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 2981da177e4SLinus Torvalds goto unlock; 2991da177e4SLinus Torvalds if (page_mapped(page)) 3001da177e4SLinus Torvalds goto unlock; 3011da177e4SLinus Torvalds ret += invalidate_complete_page(mapping, page); 3021da177e4SLinus Torvalds unlock: 3031da177e4SLinus Torvalds unlock_page(page); 3041da177e4SLinus Torvalds if (next > end) 3051da177e4SLinus Torvalds break; 3061da177e4SLinus Torvalds } 3071da177e4SLinus Torvalds pagevec_release(&pvec); 308fc9a07e7SAndrew Morton if (likely(!be_atomic)) 309fc9a07e7SAndrew Morton cond_resched(); 3101da177e4SLinus Torvalds } 3111da177e4SLinus Torvalds return ret; 3121da177e4SLinus Torvalds } 313fc9a07e7SAndrew Morton 314fc9a07e7SAndrew Morton /** 315fc9a07e7SAndrew Morton * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 316fc9a07e7SAndrew Morton * @mapping: the address_space which holds the pages to invalidate 317fc9a07e7SAndrew Morton * @start: the offset 'from' which to invalidate 318fc9a07e7SAndrew Morton * @end: the offset 'to' which to invalidate (inclusive) 319fc9a07e7SAndrew Morton * 320fc9a07e7SAndrew Morton * This function only removes the unlocked pages, if you want to 321fc9a07e7SAndrew Morton * remove all the pages of one inode, you must call truncate_inode_pages. 322fc9a07e7SAndrew Morton * 323fc9a07e7SAndrew Morton * invalidate_mapping_pages() will not block on IO activity. It will not 324fc9a07e7SAndrew Morton * invalidate pages which are dirty, locked, under writeback or mapped into 325fc9a07e7SAndrew Morton * pagetables. 326fc9a07e7SAndrew Morton */ 327fc9a07e7SAndrew Morton unsigned long invalidate_mapping_pages(struct address_space *mapping, 328fc9a07e7SAndrew Morton pgoff_t start, pgoff_t end) 329fc9a07e7SAndrew Morton { 330fc9a07e7SAndrew Morton return __invalidate_mapping_pages(mapping, start, end, false); 331fc9a07e7SAndrew Morton } 33254bc4855SAnton Altaparmakov EXPORT_SYMBOL(invalidate_mapping_pages); 3331da177e4SLinus Torvalds 334bd4c8ce4SAndrew Morton /* 335bd4c8ce4SAndrew Morton * This is like invalidate_complete_page(), except it ignores the page's 336bd4c8ce4SAndrew Morton * refcount. We do this because invalidate_inode_pages2() needs stronger 337bd4c8ce4SAndrew Morton * invalidation guarantees, and cannot afford to leave pages behind because 3382706a1b8SAnderson Briglia * shrink_page_list() has a temp ref on them, or because they're transiently 3392706a1b8SAnderson Briglia * sitting in the lru_cache_add() pagevecs. 340bd4c8ce4SAndrew Morton */ 341bd4c8ce4SAndrew Morton static int 342bd4c8ce4SAndrew Morton invalidate_complete_page2(struct address_space *mapping, struct page *page) 343bd4c8ce4SAndrew Morton { 344bd4c8ce4SAndrew Morton if (page->mapping != mapping) 345bd4c8ce4SAndrew Morton return 0; 346bd4c8ce4SAndrew Morton 347887ed2f3STrond Myklebust if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 348bd4c8ce4SAndrew Morton return 0; 349bd4c8ce4SAndrew Morton 350bd4c8ce4SAndrew Morton write_lock_irq(&mapping->tree_lock); 351bd4c8ce4SAndrew Morton if (PageDirty(page)) 352bd4c8ce4SAndrew Morton goto failed; 353bd4c8ce4SAndrew Morton 354bd4c8ce4SAndrew Morton BUG_ON(PagePrivate(page)); 355bd4c8ce4SAndrew Morton __remove_from_page_cache(page); 356bd4c8ce4SAndrew Morton write_unlock_irq(&mapping->tree_lock); 357bd4c8ce4SAndrew Morton ClearPageUptodate(page); 358bd4c8ce4SAndrew Morton page_cache_release(page); /* pagecache ref */ 359bd4c8ce4SAndrew Morton return 1; 360bd4c8ce4SAndrew Morton failed: 361bd4c8ce4SAndrew Morton write_unlock_irq(&mapping->tree_lock); 362bd4c8ce4SAndrew Morton return 0; 363bd4c8ce4SAndrew Morton } 364bd4c8ce4SAndrew Morton 365e3db7691STrond Myklebust static int do_launder_page(struct address_space *mapping, struct page *page) 366e3db7691STrond Myklebust { 367e3db7691STrond Myklebust if (!PageDirty(page)) 368e3db7691STrond Myklebust return 0; 369e3db7691STrond Myklebust if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 370e3db7691STrond Myklebust return 0; 371e3db7691STrond Myklebust return mapping->a_ops->launder_page(page); 372e3db7691STrond Myklebust } 373e3db7691STrond Myklebust 3741da177e4SLinus Torvalds /** 3751da177e4SLinus Torvalds * invalidate_inode_pages2_range - remove range of pages from an address_space 37667be2dd1SMartin Waitz * @mapping: the address_space 3771da177e4SLinus Torvalds * @start: the page offset 'from' which to invalidate 3781da177e4SLinus Torvalds * @end: the page offset 'to' which to invalidate (inclusive) 3791da177e4SLinus Torvalds * 3801da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 3811da177e4SLinus Torvalds * invalidation. 3821da177e4SLinus Torvalds * 3831da177e4SLinus Torvalds * Returns -EIO if any pages could not be invalidated. 3841da177e4SLinus Torvalds */ 3851da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping, 3861da177e4SLinus Torvalds pgoff_t start, pgoff_t end) 3871da177e4SLinus Torvalds { 3881da177e4SLinus Torvalds struct pagevec pvec; 3891da177e4SLinus Torvalds pgoff_t next; 3901da177e4SLinus Torvalds int i; 3911da177e4SLinus Torvalds int ret = 0; 3921da177e4SLinus Torvalds int did_range_unmap = 0; 3931da177e4SLinus Torvalds int wrapped = 0; 3941da177e4SLinus Torvalds 3951da177e4SLinus Torvalds pagevec_init(&pvec, 0); 3961da177e4SLinus Torvalds next = start; 3977b965e08STrond Myklebust while (next <= end && !wrapped && 3981da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, 3991da177e4SLinus Torvalds min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 4007b965e08STrond Myklebust for (i = 0; i < pagevec_count(&pvec); i++) { 4011da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 4021da177e4SLinus Torvalds pgoff_t page_index; 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds lock_page(page); 4051da177e4SLinus Torvalds if (page->mapping != mapping) { 4061da177e4SLinus Torvalds unlock_page(page); 4071da177e4SLinus Torvalds continue; 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds page_index = page->index; 4101da177e4SLinus Torvalds next = page_index + 1; 4111da177e4SLinus Torvalds if (next == 0) 4121da177e4SLinus Torvalds wrapped = 1; 4131da177e4SLinus Torvalds if (page_index > end) { 4141da177e4SLinus Torvalds unlock_page(page); 4151da177e4SLinus Torvalds break; 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds wait_on_page_writeback(page); 418*d00806b1SNick Piggin if (page_mapped(page)) { 4191da177e4SLinus Torvalds if (!did_range_unmap) { 4201da177e4SLinus Torvalds /* 4211da177e4SLinus Torvalds * Zap the rest of the file in one hit. 4221da177e4SLinus Torvalds */ 4231da177e4SLinus Torvalds unmap_mapping_range(mapping, 424479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 425479ef592SOleg Drokin (loff_t)(end - page_index + 1) 4261da177e4SLinus Torvalds << PAGE_CACHE_SHIFT, 4271da177e4SLinus Torvalds 0); 4281da177e4SLinus Torvalds did_range_unmap = 1; 4291da177e4SLinus Torvalds } else { 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * Just zap this page 4321da177e4SLinus Torvalds */ 4331da177e4SLinus Torvalds unmap_mapping_range(mapping, 434479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 4351da177e4SLinus Torvalds PAGE_CACHE_SIZE, 0); 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds } 438*d00806b1SNick Piggin BUG_ON(page_mapped(page)); 439e3db7691STrond Myklebust ret = do_launder_page(mapping, page); 440e3db7691STrond Myklebust if (ret == 0 && !invalidate_complete_page2(mapping, page)) 4411da177e4SLinus Torvalds ret = -EIO; 4421da177e4SLinus Torvalds unlock_page(page); 4431da177e4SLinus Torvalds } 4441da177e4SLinus Torvalds pagevec_release(&pvec); 4451da177e4SLinus Torvalds cond_resched(); 4461da177e4SLinus Torvalds } 4471da177e4SLinus Torvalds return ret; 4481da177e4SLinus Torvalds } 4491da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds /** 4521da177e4SLinus Torvalds * invalidate_inode_pages2 - remove all pages from an address_space 45367be2dd1SMartin Waitz * @mapping: the address_space 4541da177e4SLinus Torvalds * 4551da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 4561da177e4SLinus Torvalds * invalidation. 4571da177e4SLinus Torvalds * 4581da177e4SLinus Torvalds * Returns -EIO if any pages could not be invalidated. 4591da177e4SLinus Torvalds */ 4601da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping) 4611da177e4SLinus Torvalds { 4621da177e4SLinus Torvalds return invalidate_inode_pages2_range(mapping, 0, -1); 4631da177e4SLinus Torvalds } 4641da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 465