xref: /linux/fs/verity/verify.c (revision 4b65b859f55b036649a4525f09fa7c5bbbab384e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data verification functions, i.e. hooks for ->readahead()
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 #include "fsverity_private.h"
9 
10 #include <linux/bio.h>
11 #include <linux/export.h>
12 
13 static struct workqueue_struct *fsverity_read_workqueue;
14 
15 /*
16  * Returns true if the hash block with index @hblock_idx in the tree, located in
17  * @hpage, has already been verified.
18  */
is_hash_block_verified(struct fsverity_info * vi,struct page * hpage,unsigned long hblock_idx)19 static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
20 				   unsigned long hblock_idx)
21 {
22 	unsigned int blocks_per_page;
23 	unsigned int i;
24 
25 	/*
26 	 * When the Merkle tree block size and page size are the same, then the
27 	 * ->hash_block_verified bitmap isn't allocated, and we use PG_checked
28 	 * to directly indicate whether the page's block has been verified.
29 	 *
30 	 * Using PG_checked also guarantees that we re-verify hash pages that
31 	 * get evicted and re-instantiated from the backing storage, as new
32 	 * pages always start out with PG_checked cleared.
33 	 */
34 	if (!vi->hash_block_verified)
35 		return PageChecked(hpage);
36 
37 	/*
38 	 * When the Merkle tree block size and page size differ, we use a bitmap
39 	 * to indicate whether each hash block has been verified.
40 	 *
41 	 * However, we still need to ensure that hash pages that get evicted and
42 	 * re-instantiated from the backing storage are re-verified.  To do
43 	 * this, we use PG_checked again, but now it doesn't really mean
44 	 * "checked".  Instead, now it just serves as an indicator for whether
45 	 * the hash page is newly instantiated or not.  If the page is new, as
46 	 * indicated by PG_checked=0, we clear the bitmap bits for the page's
47 	 * blocks since they are untrustworthy, then set PG_checked=1.
48 	 * Otherwise we return the bitmap bit for the requested block.
49 	 *
50 	 * Multiple threads may execute this code concurrently on the same page.
51 	 * This is safe because we use memory barriers to ensure that if a
52 	 * thread sees PG_checked=1, then it also sees the associated bitmap
53 	 * clearing to have occurred.  Also, all writes and their corresponding
54 	 * reads are atomic, and all writes are safe to repeat in the event that
55 	 * multiple threads get into the PG_checked=0 section.  (Clearing a
56 	 * bitmap bit again at worst causes a hash block to be verified
57 	 * redundantly.  That event should be very rare, so it's not worth using
58 	 * a lock to avoid.  Setting PG_checked again has no effect.)
59 	 */
60 	if (PageChecked(hpage)) {
61 		/*
62 		 * A read memory barrier is needed here to give ACQUIRE
63 		 * semantics to the above PageChecked() test.
64 		 */
65 		smp_rmb();
66 		return test_bit(hblock_idx, vi->hash_block_verified);
67 	}
68 	blocks_per_page = vi->tree_params.blocks_per_page;
69 	hblock_idx = round_down(hblock_idx, blocks_per_page);
70 	for (i = 0; i < blocks_per_page; i++)
71 		clear_bit(hblock_idx + i, vi->hash_block_verified);
72 	/*
73 	 * A write memory barrier is needed here to give RELEASE semantics to
74 	 * the below SetPageChecked() operation.
75 	 */
76 	smp_wmb();
77 	SetPageChecked(hpage);
78 	return false;
79 }
80 
81 /*
82  * Verify a single data block against the file's Merkle tree.
83  *
84  * In principle, we need to verify the entire path to the root node.  However,
85  * for efficiency the filesystem may cache the hash blocks.  Therefore we need
86  * only ascend the tree until an already-verified hash block is seen, and then
87  * verify the path to that block.
88  *
89  * Return: %true if the data block is valid, else %false.
90  */
91 static bool
verify_data_block(struct inode * inode,struct fsverity_info * vi,const void * data,u64 data_pos,unsigned long max_ra_pages)92 verify_data_block(struct inode *inode, struct fsverity_info *vi,
93 		  const void *data, u64 data_pos, unsigned long max_ra_pages)
94 {
95 	const struct merkle_tree_params *params = &vi->tree_params;
96 	const unsigned int hsize = params->digest_size;
97 	int level;
98 	u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
99 	const u8 *want_hash;
100 	u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
101 	/* The hash blocks that are traversed, indexed by level */
102 	struct {
103 		/* Page containing the hash block */
104 		struct page *page;
105 		/* Mapped address of the hash block (will be within @page) */
106 		const void *addr;
107 		/* Index of the hash block in the tree overall */
108 		unsigned long index;
109 		/* Byte offset of the wanted hash relative to @addr */
110 		unsigned int hoffset;
111 	} hblocks[FS_VERITY_MAX_LEVELS];
112 	/*
113 	 * The index of the previous level's block within that level; also the
114 	 * index of that block's hash within the current level.
115 	 */
116 	u64 hidx = data_pos >> params->log_blocksize;
117 
118 	/* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */
119 	BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX);
120 
121 	if (unlikely(data_pos >= inode->i_size)) {
122 		/*
123 		 * This can happen in the data page spanning EOF when the Merkle
124 		 * tree block size is less than the page size.  The Merkle tree
125 		 * doesn't cover data blocks fully past EOF.  But the entire
126 		 * page spanning EOF can be visible to userspace via a mmap, and
127 		 * any part past EOF should be all zeroes.  Therefore, we need
128 		 * to verify that any data blocks fully past EOF are all zeroes.
129 		 */
130 		if (memchr_inv(data, 0, params->block_size)) {
131 			fsverity_err(inode,
132 				     "FILE CORRUPTED!  Data past EOF is not zeroed");
133 			return false;
134 		}
135 		return true;
136 	}
137 
138 	/*
139 	 * Starting at the leaf level, ascend the tree saving hash blocks along
140 	 * the way until we find a hash block that has already been verified, or
141 	 * until we reach the root.
142 	 */
143 	for (level = 0; level < params->num_levels; level++) {
144 		unsigned long next_hidx;
145 		unsigned long hblock_idx;
146 		pgoff_t hpage_idx;
147 		unsigned int hblock_offset_in_page;
148 		unsigned int hoffset;
149 		struct page *hpage;
150 		const void *haddr;
151 
152 		/*
153 		 * The index of the block in the current level; also the index
154 		 * of that block's hash within the next level.
155 		 */
156 		next_hidx = hidx >> params->log_arity;
157 
158 		/* Index of the hash block in the tree overall */
159 		hblock_idx = params->level_start[level] + next_hidx;
160 
161 		/* Index of the hash page in the tree overall */
162 		hpage_idx = hblock_idx >> params->log_blocks_per_page;
163 
164 		/* Byte offset of the hash block within the page */
165 		hblock_offset_in_page =
166 			(hblock_idx << params->log_blocksize) & ~PAGE_MASK;
167 
168 		/* Byte offset of the hash within the block */
169 		hoffset = (hidx << params->log_digestsize) &
170 			  (params->block_size - 1);
171 
172 		hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
173 				hpage_idx, level == 0 ? min(max_ra_pages,
174 					params->tree_pages - hpage_idx) : 0);
175 		if (IS_ERR(hpage)) {
176 			fsverity_err(inode,
177 				     "Error %ld reading Merkle tree page %lu",
178 				     PTR_ERR(hpage), hpage_idx);
179 			goto error;
180 		}
181 		haddr = kmap_local_page(hpage) + hblock_offset_in_page;
182 		if (is_hash_block_verified(vi, hpage, hblock_idx)) {
183 			memcpy(_want_hash, haddr + hoffset, hsize);
184 			want_hash = _want_hash;
185 			kunmap_local(haddr);
186 			put_page(hpage);
187 			goto descend;
188 		}
189 		hblocks[level].page = hpage;
190 		hblocks[level].addr = haddr;
191 		hblocks[level].index = hblock_idx;
192 		hblocks[level].hoffset = hoffset;
193 		hidx = next_hidx;
194 	}
195 
196 	want_hash = vi->root_hash;
197 descend:
198 	/* Descend the tree verifying hash blocks. */
199 	for (; level > 0; level--) {
200 		struct page *hpage = hblocks[level - 1].page;
201 		const void *haddr = hblocks[level - 1].addr;
202 		unsigned long hblock_idx = hblocks[level - 1].index;
203 		unsigned int hoffset = hblocks[level - 1].hoffset;
204 
205 		fsverity_hash_block(params, inode, haddr, real_hash);
206 		if (memcmp(want_hash, real_hash, hsize) != 0)
207 			goto corrupted;
208 		/*
209 		 * Mark the hash block as verified.  This must be atomic and
210 		 * idempotent, as the same hash block might be verified by
211 		 * multiple threads concurrently.
212 		 */
213 		if (vi->hash_block_verified)
214 			set_bit(hblock_idx, vi->hash_block_verified);
215 		else
216 			SetPageChecked(hpage);
217 		memcpy(_want_hash, haddr + hoffset, hsize);
218 		want_hash = _want_hash;
219 		kunmap_local(haddr);
220 		put_page(hpage);
221 	}
222 
223 	/* Finally, verify the data block. */
224 	fsverity_hash_block(params, inode, data, real_hash);
225 	if (memcmp(want_hash, real_hash, hsize) != 0)
226 		goto corrupted;
227 	return true;
228 
229 corrupted:
230 	fsverity_err(inode,
231 		     "FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
232 		     data_pos, level - 1,
233 		     params->hash_alg->name, hsize, want_hash,
234 		     params->hash_alg->name, hsize, real_hash);
235 error:
236 	for (; level > 0; level--) {
237 		kunmap_local(hblocks[level - 1].addr);
238 		put_page(hblocks[level - 1].page);
239 	}
240 	return false;
241 }
242 
243 static bool
verify_data_blocks(struct folio * data_folio,size_t len,size_t offset,unsigned long max_ra_pages)244 verify_data_blocks(struct folio *data_folio, size_t len, size_t offset,
245 		   unsigned long max_ra_pages)
246 {
247 	struct inode *inode = data_folio->mapping->host;
248 	struct fsverity_info *vi = inode->i_verity_info;
249 	const unsigned int block_size = vi->tree_params.block_size;
250 	u64 pos = (u64)data_folio->index << PAGE_SHIFT;
251 
252 	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size)))
253 		return false;
254 	if (WARN_ON_ONCE(!folio_test_locked(data_folio) ||
255 			 folio_test_uptodate(data_folio)))
256 		return false;
257 	do {
258 		void *data;
259 		bool valid;
260 
261 		data = kmap_local_folio(data_folio, offset);
262 		valid = verify_data_block(inode, vi, data, pos + offset,
263 					  max_ra_pages);
264 		kunmap_local(data);
265 		if (!valid)
266 			return false;
267 		offset += block_size;
268 		len -= block_size;
269 	} while (len);
270 	return true;
271 }
272 
273 /**
274  * fsverity_verify_blocks() - verify data in a folio
275  * @folio: the folio containing the data to verify
276  * @len: the length of the data to verify in the folio
277  * @offset: the offset of the data to verify in the folio
278  *
279  * Verify data that has just been read from a verity file.  The data must be
280  * located in a pagecache folio that is still locked and not yet uptodate.  The
281  * length and offset of the data must be Merkle tree block size aligned.
282  *
283  * Return: %true if the data is valid, else %false.
284  */
fsverity_verify_blocks(struct folio * folio,size_t len,size_t offset)285 bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
286 {
287 	return verify_data_blocks(folio, len, offset, 0);
288 }
289 EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
290 
291 #ifdef CONFIG_BLOCK
292 /**
293  * fsverity_verify_bio() - verify a 'read' bio that has just completed
294  * @bio: the bio to verify
295  *
296  * Verify the bio's data against the file's Merkle tree.  All bio data segments
297  * must be aligned to the file's Merkle tree block size.  If any data fails
298  * verification, then bio->bi_status is set to an error status.
299  *
300  * This is a helper function for use by the ->readahead() method of filesystems
301  * that issue bios to read data directly into the page cache.  Filesystems that
302  * populate the page cache without issuing bios (e.g. non block-based
303  * filesystems) must instead call fsverity_verify_page() directly on each page.
304  * All filesystems must also call fsverity_verify_page() on holes.
305  */
fsverity_verify_bio(struct bio * bio)306 void fsverity_verify_bio(struct bio *bio)
307 {
308 	struct folio_iter fi;
309 	unsigned long max_ra_pages = 0;
310 
311 	if (bio->bi_opf & REQ_RAHEAD) {
312 		/*
313 		 * If this bio is for data readahead, then we also do readahead
314 		 * of the first (largest) level of the Merkle tree.  Namely,
315 		 * when a Merkle tree page is read, we also try to piggy-back on
316 		 * some additional pages -- up to 1/4 the number of data pages.
317 		 *
318 		 * This improves sequential read performance, as it greatly
319 		 * reduces the number of I/O requests made to the Merkle tree.
320 		 */
321 		max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
322 	}
323 
324 	bio_for_each_folio_all(fi, bio) {
325 		if (!verify_data_blocks(fi.folio, fi.length, fi.offset,
326 					max_ra_pages)) {
327 			bio->bi_status = BLK_STS_IOERR;
328 			break;
329 		}
330 	}
331 }
332 EXPORT_SYMBOL_GPL(fsverity_verify_bio);
333 #endif /* CONFIG_BLOCK */
334 
335 /**
336  * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
337  * @work: the work to enqueue
338  *
339  * Enqueue verification work for asynchronous processing.
340  */
fsverity_enqueue_verify_work(struct work_struct * work)341 void fsverity_enqueue_verify_work(struct work_struct *work)
342 {
343 	queue_work(fsverity_read_workqueue, work);
344 }
345 EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
346 
fsverity_init_workqueue(void)347 void __init fsverity_init_workqueue(void)
348 {
349 	/*
350 	 * Use a high-priority workqueue to prioritize verification work, which
351 	 * blocks reads from completing, over regular application tasks.
352 	 *
353 	 * For performance reasons, don't use an unbound workqueue.  Using an
354 	 * unbound workqueue for crypto operations causes excessive scheduler
355 	 * latency on ARM64.
356 	 */
357 	fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
358 						  WQ_HIGHPRI,
359 						  num_online_cpus());
360 	if (!fsverity_read_workqueue)
361 		panic("failed to allocate fsverity_read_queue");
362 }
363