1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Squashfs - a compressed read only filesystem for Linux
4  *
5  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
6  * Phillip Lougher <phillip@squashfs.org.uk>
7  *
8  * block.c
9  */
10 
11 /*
12  * This file implements the low-level routines to read and decompress
13  * datablocks and metadata blocks.
14  */
15 
16 #include <linux/blkdev.h>
17 #include <linux/fs.h>
18 #include <linux/vfs.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/string.h>
22 #include <linux/bio.h>
23 
24 #include "squashfs_fs.h"
25 #include "squashfs_fs_sb.h"
26 #include "squashfs.h"
27 #include "decompressor.h"
28 #include "page_actor.h"
29 
30 /*
31  * Returns the amount of bytes copied to the page actor.
32  */
33 static int copy_bio_to_actor(struct bio *bio,
34 			     struct squashfs_page_actor *actor,
35 			     int offset, int req_length)
36 {
37 	void *actor_addr;
38 	struct bvec_iter_all iter_all = {};
39 	struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
40 	int copied_bytes = 0;
41 	int actor_offset = 0;
42 
43 	squashfs_actor_nobuff(actor);
44 	actor_addr = squashfs_first_page(actor);
45 
46 	if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
47 		return 0;
48 
49 	while (copied_bytes < req_length) {
50 		int bytes_to_copy = min_t(int, bvec->bv_len - offset,
51 					  PAGE_SIZE - actor_offset);
52 
53 		bytes_to_copy = min_t(int, bytes_to_copy,
54 				      req_length - copied_bytes);
55 		if (!IS_ERR(actor_addr))
56 			memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
57 					offset, bytes_to_copy);
58 
59 		actor_offset += bytes_to_copy;
60 		copied_bytes += bytes_to_copy;
61 		offset += bytes_to_copy;
62 
63 		if (actor_offset >= PAGE_SIZE) {
64 			actor_addr = squashfs_next_page(actor);
65 			if (!actor_addr)
66 				break;
67 			actor_offset = 0;
68 		}
69 		if (offset >= bvec->bv_len) {
70 			if (!bio_next_segment(bio, &iter_all))
71 				break;
72 			offset = 0;
73 		}
74 	}
75 	squashfs_finish_page(actor);
76 	return copied_bytes;
77 }
78 
79 static int squashfs_bio_read_cached(struct bio *fullbio,
80 		struct address_space *cache_mapping, u64 index, int length,
81 		u64 read_start, u64 read_end, int page_count)
82 {
83 	struct page *head_to_cache = NULL, *tail_to_cache = NULL;
84 	struct block_device *bdev = fullbio->bi_bdev;
85 	int start_idx = 0, end_idx = 0;
86 	struct bvec_iter_all iter_all;
87 	struct bio *bio = NULL;
88 	struct bio_vec *bv;
89 	int idx = 0;
90 	int err = 0;
91 #ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
92 	struct page **cache_pages = kmalloc_array(page_count,
93 			sizeof(void *), GFP_KERNEL | __GFP_ZERO);
94 #endif
95 
96 	bio_for_each_segment_all(bv, fullbio, iter_all) {
97 		struct page *page = bv->bv_page;
98 
99 		if (page->mapping == cache_mapping) {
100 			idx++;
101 			continue;
102 		}
103 
104 		/*
105 		 * We only use this when the device block size is the same as
106 		 * the page size, so read_start and read_end cover full pages.
107 		 *
108 		 * Compare these to the original required index and length to
109 		 * only cache pages which were requested partially, since these
110 		 * are the ones which are likely to be needed when reading
111 		 * adjacent blocks.
112 		 */
113 		if (idx == 0 && index != read_start)
114 			head_to_cache = page;
115 		else if (idx == page_count - 1 && index + length != read_end)
116 			tail_to_cache = page;
117 #ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
118 		/* Cache all pages in the BIO for repeated reads */
119 		else if (cache_pages)
120 			cache_pages[idx] = page;
121 #endif
122 
123 		if (!bio || idx != end_idx) {
124 			struct bio *new = bio_alloc_clone(bdev, fullbio,
125 							  GFP_NOIO, &fs_bio_set);
126 
127 			if (bio) {
128 				bio_trim(bio, start_idx * PAGE_SECTORS,
129 					 (end_idx - start_idx) * PAGE_SECTORS);
130 				bio_chain(bio, new);
131 				submit_bio(bio);
132 			}
133 
134 			bio = new;
135 			start_idx = idx;
136 		}
137 
138 		idx++;
139 		end_idx = idx;
140 	}
141 
142 	if (bio) {
143 		bio_trim(bio, start_idx * PAGE_SECTORS,
144 			 (end_idx - start_idx) * PAGE_SECTORS);
145 		err = submit_bio_wait(bio);
146 		bio_put(bio);
147 	}
148 
149 	if (err)
150 		return err;
151 
152 	if (head_to_cache) {
153 		int ret = add_to_page_cache_lru(head_to_cache, cache_mapping,
154 						read_start >> PAGE_SHIFT,
155 						GFP_NOIO);
156 
157 		if (!ret) {
158 			SetPageUptodate(head_to_cache);
159 			unlock_page(head_to_cache);
160 		}
161 
162 	}
163 
164 	if (tail_to_cache) {
165 		int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping,
166 						(read_end >> PAGE_SHIFT) - 1,
167 						GFP_NOIO);
168 
169 		if (!ret) {
170 			SetPageUptodate(tail_to_cache);
171 			unlock_page(tail_to_cache);
172 		}
173 	}
174 
175 #ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
176 	if (!cache_pages)
177 		goto out;
178 
179 	for (idx = 0; idx < page_count; idx++) {
180 		if (!cache_pages[idx])
181 			continue;
182 		int ret = add_to_page_cache_lru(cache_pages[idx], cache_mapping,
183 						(read_start >> PAGE_SHIFT) + idx,
184 						GFP_NOIO);
185 
186 		if (!ret) {
187 			SetPageUptodate(cache_pages[idx]);
188 			unlock_page(cache_pages[idx]);
189 		}
190 	}
191 	kfree(cache_pages);
192 out:
193 #endif
194 	return 0;
195 }
196 
197 static struct page *squashfs_get_cache_page(struct address_space *mapping,
198 					    pgoff_t index)
199 {
200 	struct page *page;
201 
202 	if (!mapping)
203 		return NULL;
204 
205 	page = find_get_page(mapping, index);
206 	if (!page)
207 		return NULL;
208 
209 	if (!PageUptodate(page)) {
210 		put_page(page);
211 		return NULL;
212 	}
213 
214 	return page;
215 }
216 
217 static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
218 			     struct bio **biop, int *block_offset)
219 {
220 	struct squashfs_sb_info *msblk = sb->s_fs_info;
221 	struct address_space *cache_mapping = msblk->cache_mapping;
222 	const u64 read_start = round_down(index, msblk->devblksize);
223 	const sector_t block = read_start >> msblk->devblksize_log2;
224 	const u64 read_end = round_up(index + length, msblk->devblksize);
225 	const sector_t block_end = read_end >> msblk->devblksize_log2;
226 	int offset = read_start - round_down(index, PAGE_SIZE);
227 	int total_len = (block_end - block) << msblk->devblksize_log2;
228 	const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE);
229 	int error, i;
230 	struct bio *bio;
231 
232 	bio = bio_kmalloc(page_count, GFP_NOIO);
233 	if (!bio)
234 		return -ENOMEM;
235 	bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ);
236 	bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
237 
238 	for (i = 0; i < page_count; ++i) {
239 		unsigned int len =
240 			min_t(unsigned int, PAGE_SIZE - offset, total_len);
241 		pgoff_t index = (read_start >> PAGE_SHIFT) + i;
242 		struct page *page;
243 
244 		page = squashfs_get_cache_page(cache_mapping, index);
245 		if (!page)
246 			page = alloc_page(GFP_NOIO);
247 
248 		if (!page) {
249 			error = -ENOMEM;
250 			goto out_free_bio;
251 		}
252 
253 		/*
254 		 * Use the __ version to avoid merging since we need each page
255 		 * to be separate when we check for and avoid cached pages.
256 		 */
257 		__bio_add_page(bio, page, len, offset);
258 		offset = 0;
259 		total_len -= len;
260 	}
261 
262 	if (cache_mapping)
263 		error = squashfs_bio_read_cached(bio, cache_mapping, index,
264 						 length, read_start, read_end,
265 						 page_count);
266 	else
267 		error = submit_bio_wait(bio);
268 	if (error)
269 		goto out_free_bio;
270 
271 	*biop = bio;
272 	*block_offset = index & ((1 << msblk->devblksize_log2) - 1);
273 	return 0;
274 
275 out_free_bio:
276 	bio_free_pages(bio);
277 	bio_uninit(bio);
278 	kfree(bio);
279 	return error;
280 }
281 
282 /*
283  * Read and decompress a metadata block or datablock.  Length is non-zero
284  * if a datablock is being read (the size is stored elsewhere in the
285  * filesystem), otherwise the length is obtained from the first two bytes of
286  * the metadata block.  A bit in the length field indicates if the block
287  * is stored uncompressed in the filesystem (usually because compression
288  * generated a larger block - this does occasionally happen with compression
289  * algorithms).
290  */
291 int squashfs_read_data(struct super_block *sb, u64 index, int length,
292 		       u64 *next_index, struct squashfs_page_actor *output)
293 {
294 	struct squashfs_sb_info *msblk = sb->s_fs_info;
295 	struct bio *bio = NULL;
296 	int compressed;
297 	int res;
298 	int offset;
299 
300 	if (length) {
301 		/*
302 		 * Datablock.
303 		 */
304 		compressed = SQUASHFS_COMPRESSED_BLOCK(length);
305 		length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
306 		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
307 			index, compressed ? "" : "un", length, output->length);
308 	} else {
309 		/*
310 		 * Metadata block.
311 		 */
312 		const u8 *data;
313 		struct bvec_iter_all iter_all = {};
314 		struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
315 
316 		if (index + 2 > msblk->bytes_used) {
317 			res = -EIO;
318 			goto out;
319 		}
320 		res = squashfs_bio_read(sb, index, 2, &bio, &offset);
321 		if (res)
322 			goto out;
323 
324 		if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
325 			res = -EIO;
326 			goto out_free_bio;
327 		}
328 		/* Extract the length of the metadata block */
329 		data = bvec_virt(bvec);
330 		length = data[offset];
331 		if (offset < bvec->bv_len - 1) {
332 			length |= data[offset + 1] << 8;
333 		} else {
334 			if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
335 				res = -EIO;
336 				goto out_free_bio;
337 			}
338 			data = bvec_virt(bvec);
339 			length |= data[0] << 8;
340 		}
341 		bio_free_pages(bio);
342 		bio_uninit(bio);
343 		kfree(bio);
344 
345 		compressed = SQUASHFS_COMPRESSED(length);
346 		length = SQUASHFS_COMPRESSED_SIZE(length);
347 		index += 2;
348 
349 		TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
350 		      compressed ? "" : "un", length);
351 	}
352 	if (length <= 0 || length > output->length ||
353 			(index + length) > msblk->bytes_used) {
354 		res = -EIO;
355 		goto out;
356 	}
357 
358 	if (next_index)
359 		*next_index = index + length;
360 
361 	res = squashfs_bio_read(sb, index, length, &bio, &offset);
362 	if (res)
363 		goto out;
364 
365 	if (compressed) {
366 		if (!msblk->stream) {
367 			res = -EIO;
368 			goto out_free_bio;
369 		}
370 		res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
371 	} else {
372 		res = copy_bio_to_actor(bio, output, offset, length);
373 	}
374 
375 out_free_bio:
376 	bio_free_pages(bio);
377 	bio_uninit(bio);
378 	kfree(bio);
379 out:
380 	if (res < 0) {
381 		ERROR("Failed to read block 0x%llx: %d\n", index, res);
382 		if (msblk->panic_on_errors)
383 			panic("squashfs read failed");
384 	}
385 
386 	return res;
387 }
388