10d455c12SPhillip Lougher /* 20d455c12SPhillip Lougher * Copyright (c) 2013 30d455c12SPhillip Lougher * Phillip Lougher <phillip@squashfs.org.uk> 40d455c12SPhillip Lougher * 50d455c12SPhillip Lougher * This work is licensed under the terms of the GNU GPL, version 2. See 60d455c12SPhillip Lougher * the COPYING file in the top-level directory. 70d455c12SPhillip Lougher */ 80d455c12SPhillip Lougher 90d455c12SPhillip Lougher #include <linux/fs.h> 100d455c12SPhillip Lougher #include <linux/vfs.h> 110d455c12SPhillip Lougher #include <linux/kernel.h> 120d455c12SPhillip Lougher #include <linux/slab.h> 130d455c12SPhillip Lougher #include <linux/string.h> 140d455c12SPhillip Lougher #include <linux/pagemap.h> 150d455c12SPhillip Lougher #include <linux/mutex.h> 160d455c12SPhillip Lougher 170d455c12SPhillip Lougher #include "squashfs_fs.h" 180d455c12SPhillip Lougher #include "squashfs_fs_sb.h" 190d455c12SPhillip Lougher #include "squashfs_fs_i.h" 200d455c12SPhillip Lougher #include "squashfs.h" 210d455c12SPhillip Lougher #include "page_actor.h" 220d455c12SPhillip Lougher 230d455c12SPhillip Lougher static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 24*a3f94cb9SPhillip Lougher int pages, struct page **page, int bytes); 250d455c12SPhillip Lougher 260d455c12SPhillip Lougher /* Read separately compressed datablock directly into page cache */ 27*a3f94cb9SPhillip Lougher int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, 28*a3f94cb9SPhillip Lougher int expected) 290d455c12SPhillip Lougher 300d455c12SPhillip Lougher { 310d455c12SPhillip Lougher struct inode *inode = target_page->mapping->host; 320d455c12SPhillip Lougher struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 330d455c12SPhillip Lougher 3409cbfeafSKirill A. Shutemov int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; 3509cbfeafSKirill A. Shutemov int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 360d455c12SPhillip Lougher int start_index = target_page->index & ~mask; 370d455c12SPhillip Lougher int end_index = start_index | mask; 380d455c12SPhillip Lougher int i, n, pages, missing_pages, bytes, res = -ENOMEM; 390d455c12SPhillip Lougher struct page **page; 400d455c12SPhillip Lougher struct squashfs_page_actor *actor; 410d455c12SPhillip Lougher void *pageaddr; 420d455c12SPhillip Lougher 430d455c12SPhillip Lougher if (end_index > file_end) 440d455c12SPhillip Lougher end_index = file_end; 450d455c12SPhillip Lougher 460d455c12SPhillip Lougher pages = end_index - start_index + 1; 470d455c12SPhillip Lougher 4814694888SFabian Frederick page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); 490d455c12SPhillip Lougher if (page == NULL) 500d455c12SPhillip Lougher return res; 510d455c12SPhillip Lougher 520d455c12SPhillip Lougher /* 530d455c12SPhillip Lougher * Create a "page actor" which will kmap and kunmap the 540d455c12SPhillip Lougher * page cache pages appropriately within the decompressor 550d455c12SPhillip Lougher */ 560d455c12SPhillip Lougher actor = squashfs_page_actor_init_special(page, pages, 0); 570d455c12SPhillip Lougher if (actor == NULL) 580d455c12SPhillip Lougher goto out; 590d455c12SPhillip Lougher 600d455c12SPhillip Lougher /* Try to grab all the pages covered by the Squashfs block */ 610d455c12SPhillip Lougher for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { 620d455c12SPhillip Lougher page[i] = (n == target_page->index) ? target_page : 630d455c12SPhillip Lougher grab_cache_page_nowait(target_page->mapping, n); 640d455c12SPhillip Lougher 650d455c12SPhillip Lougher if (page[i] == NULL) { 660d455c12SPhillip Lougher missing_pages++; 670d455c12SPhillip Lougher continue; 680d455c12SPhillip Lougher } 690d455c12SPhillip Lougher 700d455c12SPhillip Lougher if (PageUptodate(page[i])) { 710d455c12SPhillip Lougher unlock_page(page[i]); 7209cbfeafSKirill A. Shutemov put_page(page[i]); 730d455c12SPhillip Lougher page[i] = NULL; 740d455c12SPhillip Lougher missing_pages++; 750d455c12SPhillip Lougher } 760d455c12SPhillip Lougher } 770d455c12SPhillip Lougher 780d455c12SPhillip Lougher if (missing_pages) { 790d455c12SPhillip Lougher /* 800d455c12SPhillip Lougher * Couldn't get one or more pages, this page has either 810d455c12SPhillip Lougher * been VM reclaimed, but others are still in the page cache 820d455c12SPhillip Lougher * and uptodate, or we're racing with another thread in 830d455c12SPhillip Lougher * squashfs_readpage also trying to grab them. Fall back to 840d455c12SPhillip Lougher * using an intermediate buffer. 850d455c12SPhillip Lougher */ 860d455c12SPhillip Lougher res = squashfs_read_cache(target_page, block, bsize, pages, 87*a3f94cb9SPhillip Lougher page, expected); 886d565409SPhillip Lougher if (res < 0) 896d565409SPhillip Lougher goto mark_errored; 906d565409SPhillip Lougher 910d455c12SPhillip Lougher goto out; 920d455c12SPhillip Lougher } 930d455c12SPhillip Lougher 940d455c12SPhillip Lougher /* Decompress directly into the page cache buffers */ 950d455c12SPhillip Lougher res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 960d455c12SPhillip Lougher if (res < 0) 970d455c12SPhillip Lougher goto mark_errored; 980d455c12SPhillip Lougher 99*a3f94cb9SPhillip Lougher if (res != expected) { 100*a3f94cb9SPhillip Lougher res = -EIO; 101*a3f94cb9SPhillip Lougher goto mark_errored; 102*a3f94cb9SPhillip Lougher } 103*a3f94cb9SPhillip Lougher 1040d455c12SPhillip Lougher /* Last page may have trailing bytes not filled */ 10509cbfeafSKirill A. Shutemov bytes = res % PAGE_SIZE; 1060d455c12SPhillip Lougher if (bytes) { 1070d455c12SPhillip Lougher pageaddr = kmap_atomic(page[pages - 1]); 10809cbfeafSKirill A. Shutemov memset(pageaddr + bytes, 0, PAGE_SIZE - bytes); 1090d455c12SPhillip Lougher kunmap_atomic(pageaddr); 1100d455c12SPhillip Lougher } 1110d455c12SPhillip Lougher 1120d455c12SPhillip Lougher /* Mark pages as uptodate, unlock and release */ 1130d455c12SPhillip Lougher for (i = 0; i < pages; i++) { 1140d455c12SPhillip Lougher flush_dcache_page(page[i]); 1150d455c12SPhillip Lougher SetPageUptodate(page[i]); 1160d455c12SPhillip Lougher unlock_page(page[i]); 1170d455c12SPhillip Lougher if (page[i] != target_page) 11809cbfeafSKirill A. Shutemov put_page(page[i]); 1190d455c12SPhillip Lougher } 1200d455c12SPhillip Lougher 1210d455c12SPhillip Lougher kfree(actor); 1220d455c12SPhillip Lougher kfree(page); 1230d455c12SPhillip Lougher 1240d455c12SPhillip Lougher return 0; 1250d455c12SPhillip Lougher 1260d455c12SPhillip Lougher mark_errored: 1270d455c12SPhillip Lougher /* Decompression failed, mark pages as errored. Target_page is 1280d455c12SPhillip Lougher * dealt with by the caller 1290d455c12SPhillip Lougher */ 1300d455c12SPhillip Lougher for (i = 0; i < pages; i++) { 1316d565409SPhillip Lougher if (page[i] == NULL || page[i] == target_page) 1320d455c12SPhillip Lougher continue; 1330d455c12SPhillip Lougher flush_dcache_page(page[i]); 1340d455c12SPhillip Lougher SetPageError(page[i]); 1350d455c12SPhillip Lougher unlock_page(page[i]); 13609cbfeafSKirill A. Shutemov put_page(page[i]); 1370d455c12SPhillip Lougher } 1380d455c12SPhillip Lougher 1390d455c12SPhillip Lougher out: 1400d455c12SPhillip Lougher kfree(actor); 1410d455c12SPhillip Lougher kfree(page); 1420d455c12SPhillip Lougher return res; 1430d455c12SPhillip Lougher } 1440d455c12SPhillip Lougher 1450d455c12SPhillip Lougher 1460d455c12SPhillip Lougher static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 147*a3f94cb9SPhillip Lougher int pages, struct page **page, int bytes) 1480d455c12SPhillip Lougher { 1490d455c12SPhillip Lougher struct inode *i = target_page->mapping->host; 1500d455c12SPhillip Lougher struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, 1510d455c12SPhillip Lougher block, bsize); 152*a3f94cb9SPhillip Lougher int res = buffer->error, n, offset = 0; 1530d455c12SPhillip Lougher 1540d455c12SPhillip Lougher if (res) { 1550d455c12SPhillip Lougher ERROR("Unable to read page, block %llx, size %x\n", block, 1560d455c12SPhillip Lougher bsize); 1570d455c12SPhillip Lougher goto out; 1580d455c12SPhillip Lougher } 1590d455c12SPhillip Lougher 1600d455c12SPhillip Lougher for (n = 0; n < pages && bytes > 0; n++, 16109cbfeafSKirill A. Shutemov bytes -= PAGE_SIZE, offset += PAGE_SIZE) { 16209cbfeafSKirill A. Shutemov int avail = min_t(int, bytes, PAGE_SIZE); 1630d455c12SPhillip Lougher 1640d455c12SPhillip Lougher if (page[n] == NULL) 1650d455c12SPhillip Lougher continue; 1660d455c12SPhillip Lougher 167cdbb65c4SLinus Torvalds squashfs_fill_page(page[n], buffer, offset, avail); 1680d455c12SPhillip Lougher unlock_page(page[n]); 1690d455c12SPhillip Lougher if (page[n] != target_page) 17009cbfeafSKirill A. Shutemov put_page(page[n]); 1710d455c12SPhillip Lougher } 1720d455c12SPhillip Lougher 1730d455c12SPhillip Lougher out: 1740d455c12SPhillip Lougher squashfs_cache_put(buffer); 1750d455c12SPhillip Lougher return res; 1760d455c12SPhillip Lougher } 177