xref: /linux/fs/hfs/bnode.c (revision cb6bbff7e6fb263dd739514b3f5dfdcd8eaa9836)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfs/bnode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Handle basic btree node operations
10  */
11 
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/swap.h>
15 
16 #include "btree.h"
17 
18 static inline
is_bnode_offset_valid(struct hfs_bnode * node,int off)19 bool is_bnode_offset_valid(struct hfs_bnode *node, int off)
20 {
21 	bool is_valid = off < node->tree->node_size;
22 
23 	if (!is_valid) {
24 		pr_err("requested invalid offset: "
25 		       "NODE: id %u, type %#x, height %u, "
26 		       "node_size %u, offset %d\n",
27 		       node->this, node->type, node->height,
28 		       node->tree->node_size, off);
29 	}
30 
31 	return is_valid;
32 }
33 
34 static inline
check_and_correct_requested_length(struct hfs_bnode * node,int off,int len)35 int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len)
36 {
37 	unsigned int node_size;
38 
39 	if (!is_bnode_offset_valid(node, off))
40 		return 0;
41 
42 	node_size = node->tree->node_size;
43 
44 	if ((off + len) > node_size) {
45 		int new_len = (int)node_size - off;
46 
47 		pr_err("requested length has been corrected: "
48 		       "NODE: id %u, type %#x, height %u, "
49 		       "node_size %u, offset %d, "
50 		       "requested_len %d, corrected_len %d\n",
51 		       node->this, node->type, node->height,
52 		       node->tree->node_size, off, len, new_len);
53 
54 		return new_len;
55 	}
56 
57 	return len;
58 }
59 
hfs_bnode_read(struct hfs_bnode * node,void * buf,int off,int len)60 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
61 {
62 	struct page *page;
63 	int pagenum;
64 	int bytes_read;
65 	int bytes_to_read;
66 
67 	if (!is_bnode_offset_valid(node, off))
68 		return;
69 
70 	if (len == 0) {
71 		pr_err("requested zero length: "
72 		       "NODE: id %u, type %#x, height %u, "
73 		       "node_size %u, offset %d, len %d\n",
74 		       node->this, node->type, node->height,
75 		       node->tree->node_size, off, len);
76 		return;
77 	}
78 
79 	len = check_and_correct_requested_length(node, off, len);
80 
81 	off += node->page_offset;
82 	pagenum = off >> PAGE_SHIFT;
83 	off &= ~PAGE_MASK; /* compute page offset for the first page */
84 
85 	for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
86 		if (pagenum >= node->tree->pages_per_bnode)
87 			break;
88 		page = node->page[pagenum];
89 		bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
90 
91 		memcpy_from_page(buf + bytes_read, page, off, bytes_to_read);
92 
93 		pagenum++;
94 		off = 0; /* page offset only applies to the first page */
95 	}
96 }
97 
hfs_bnode_read_u16(struct hfs_bnode * node,int off)98 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
99 {
100 	__be16 data;
101 	// optimize later...
102 	hfs_bnode_read(node, &data, off, 2);
103 	return be16_to_cpu(data);
104 }
105 
hfs_bnode_read_u8(struct hfs_bnode * node,int off)106 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
107 {
108 	u8 data;
109 	// optimize later...
110 	hfs_bnode_read(node, &data, off, 1);
111 	return data;
112 }
113 
hfs_bnode_read_key(struct hfs_bnode * node,void * key,int off)114 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
115 {
116 	struct hfs_btree *tree;
117 	int key_len;
118 
119 	tree = node->tree;
120 	if (node->type == HFS_NODE_LEAF ||
121 	    tree->attributes & HFS_TREE_VARIDXKEYS)
122 		key_len = hfs_bnode_read_u8(node, off) + 1;
123 	else
124 		key_len = tree->max_key_len + 1;
125 
126 	if (key_len > sizeof(hfs_btree_key) || key_len < 1) {
127 		memset(key, 0, sizeof(hfs_btree_key));
128 		pr_err("hfs: Invalid key length: %d\n", key_len);
129 		return;
130 	}
131 
132 	hfs_bnode_read(node, key, off, key_len);
133 }
134 
hfs_bnode_write(struct hfs_bnode * node,void * buf,int off,int len)135 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
136 {
137 	struct page *page;
138 
139 	if (!is_bnode_offset_valid(node, off))
140 		return;
141 
142 	if (len == 0) {
143 		pr_err("requested zero length: "
144 		       "NODE: id %u, type %#x, height %u, "
145 		       "node_size %u, offset %d, len %d\n",
146 		       node->this, node->type, node->height,
147 		       node->tree->node_size, off, len);
148 		return;
149 	}
150 
151 	len = check_and_correct_requested_length(node, off, len);
152 
153 	off += node->page_offset;
154 	page = node->page[0];
155 
156 	memcpy_to_page(page, off, buf, len);
157 	set_page_dirty(page);
158 }
159 
hfs_bnode_write_u16(struct hfs_bnode * node,int off,u16 data)160 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
161 {
162 	__be16 v = cpu_to_be16(data);
163 	// optimize later...
164 	hfs_bnode_write(node, &v, off, 2);
165 }
166 
hfs_bnode_write_u8(struct hfs_bnode * node,int off,u8 data)167 void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
168 {
169 	// optimize later...
170 	hfs_bnode_write(node, &data, off, 1);
171 }
172 
hfs_bnode_clear(struct hfs_bnode * node,int off,int len)173 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
174 {
175 	struct page *page;
176 
177 	if (!is_bnode_offset_valid(node, off))
178 		return;
179 
180 	if (len == 0) {
181 		pr_err("requested zero length: "
182 		       "NODE: id %u, type %#x, height %u, "
183 		       "node_size %u, offset %d, len %d\n",
184 		       node->this, node->type, node->height,
185 		       node->tree->node_size, off, len);
186 		return;
187 	}
188 
189 	len = check_and_correct_requested_length(node, off, len);
190 
191 	off += node->page_offset;
192 	page = node->page[0];
193 
194 	memzero_page(page, off, len);
195 	set_page_dirty(page);
196 }
197 
hfs_bnode_copy(struct hfs_bnode * dst_node,int dst,struct hfs_bnode * src_node,int src,int len)198 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
199 		struct hfs_bnode *src_node, int src, int len)
200 {
201 	struct page *src_page, *dst_page;
202 
203 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
204 	if (!len)
205 		return;
206 
207 	len = check_and_correct_requested_length(src_node, src, len);
208 	len = check_and_correct_requested_length(dst_node, dst, len);
209 
210 	src += src_node->page_offset;
211 	dst += dst_node->page_offset;
212 	src_page = src_node->page[0];
213 	dst_page = dst_node->page[0];
214 
215 	memcpy_page(dst_page, dst, src_page, src, len);
216 	set_page_dirty(dst_page);
217 }
218 
hfs_bnode_move(struct hfs_bnode * node,int dst,int src,int len)219 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
220 {
221 	struct page *page;
222 	void *ptr;
223 
224 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
225 	if (!len)
226 		return;
227 
228 	len = check_and_correct_requested_length(node, src, len);
229 	len = check_and_correct_requested_length(node, dst, len);
230 
231 	src += node->page_offset;
232 	dst += node->page_offset;
233 	page = node->page[0];
234 	ptr = kmap_local_page(page);
235 	memmove(ptr + dst, ptr + src, len);
236 	kunmap_local(ptr);
237 	set_page_dirty(page);
238 }
239 
hfs_bnode_dump(struct hfs_bnode * node)240 void hfs_bnode_dump(struct hfs_bnode *node)
241 {
242 	struct hfs_bnode_desc desc;
243 	__be32 cnid;
244 	int i, off, key_off;
245 
246 	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
247 	hfs_bnode_read(node, &desc, 0, sizeof(desc));
248 	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
249 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
250 		desc.type, desc.height, be16_to_cpu(desc.num_recs));
251 
252 	off = node->tree->node_size - 2;
253 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
254 		key_off = hfs_bnode_read_u16(node, off);
255 		hfs_dbg_cont(BNODE_MOD, " %d", key_off);
256 		if (i && node->type == HFS_NODE_INDEX) {
257 			int tmp;
258 
259 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
260 				tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
261 			else
262 				tmp = node->tree->max_key_len + 1;
263 			hfs_dbg_cont(BNODE_MOD, " (%d,%d",
264 				     tmp, hfs_bnode_read_u8(node, key_off));
265 			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
266 			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
267 		} else if (i && node->type == HFS_NODE_LEAF) {
268 			int tmp;
269 
270 			tmp = hfs_bnode_read_u8(node, key_off);
271 			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
272 		}
273 	}
274 	hfs_dbg_cont(BNODE_MOD, "\n");
275 }
276 
hfs_bnode_unlink(struct hfs_bnode * node)277 void hfs_bnode_unlink(struct hfs_bnode *node)
278 {
279 	struct hfs_btree *tree;
280 	struct hfs_bnode *tmp;
281 	__be32 cnid;
282 
283 	tree = node->tree;
284 	if (node->prev) {
285 		tmp = hfs_bnode_find(tree, node->prev);
286 		if (IS_ERR(tmp))
287 			return;
288 		tmp->next = node->next;
289 		cnid = cpu_to_be32(tmp->next);
290 		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
291 		hfs_bnode_put(tmp);
292 	} else if (node->type == HFS_NODE_LEAF)
293 		tree->leaf_head = node->next;
294 
295 	if (node->next) {
296 		tmp = hfs_bnode_find(tree, node->next);
297 		if (IS_ERR(tmp))
298 			return;
299 		tmp->prev = node->prev;
300 		cnid = cpu_to_be32(tmp->prev);
301 		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
302 		hfs_bnode_put(tmp);
303 	} else if (node->type == HFS_NODE_LEAF)
304 		tree->leaf_tail = node->prev;
305 
306 	// move down?
307 	if (!node->prev && !node->next) {
308 		printk(KERN_DEBUG "hfs_btree_del_level\n");
309 	}
310 	if (!node->parent) {
311 		tree->root = 0;
312 		tree->depth = 0;
313 	}
314 	set_bit(HFS_BNODE_DELETED, &node->flags);
315 }
316 
hfs_bnode_hash(u32 num)317 static inline int hfs_bnode_hash(u32 num)
318 {
319 	num = (num >> 16) + num;
320 	num += num >> 8;
321 	return num & (NODE_HASH_SIZE - 1);
322 }
323 
hfs_bnode_findhash(struct hfs_btree * tree,u32 cnid)324 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
325 {
326 	struct hfs_bnode *node;
327 
328 	if (cnid >= tree->node_count) {
329 		pr_err("request for non-existent node %d in B*Tree\n", cnid);
330 		return NULL;
331 	}
332 
333 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
334 	     node; node = node->next_hash) {
335 		if (node->this == cnid) {
336 			return node;
337 		}
338 	}
339 	return NULL;
340 }
341 
__hfs_bnode_create(struct hfs_btree * tree,u32 cnid)342 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
343 {
344 	struct hfs_bnode *node, *node2;
345 	struct address_space *mapping;
346 	struct page *page;
347 	int size, block, i, hash;
348 	loff_t off;
349 
350 	if (cnid >= tree->node_count) {
351 		pr_err("request for non-existent node %d in B*Tree\n", cnid);
352 		return NULL;
353 	}
354 
355 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
356 		sizeof(struct page *);
357 	node = kzalloc(size, GFP_KERNEL);
358 	if (!node)
359 		return NULL;
360 	node->tree = tree;
361 	node->this = cnid;
362 	set_bit(HFS_BNODE_NEW, &node->flags);
363 	atomic_set(&node->refcnt, 1);
364 	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
365 		node->tree->cnid, node->this);
366 	init_waitqueue_head(&node->lock_wq);
367 	spin_lock(&tree->hash_lock);
368 	node2 = hfs_bnode_findhash(tree, cnid);
369 	if (!node2) {
370 		hash = hfs_bnode_hash(cnid);
371 		node->next_hash = tree->node_hash[hash];
372 		tree->node_hash[hash] = node;
373 		tree->node_hash_cnt++;
374 	} else {
375 		hfs_bnode_get(node2);
376 		spin_unlock(&tree->hash_lock);
377 		kfree(node);
378 		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
379 		return node2;
380 	}
381 	spin_unlock(&tree->hash_lock);
382 
383 	mapping = tree->inode->i_mapping;
384 	off = (loff_t)cnid * tree->node_size;
385 	block = off >> PAGE_SHIFT;
386 	node->page_offset = off & ~PAGE_MASK;
387 	for (i = 0; i < tree->pages_per_bnode; i++) {
388 		page = read_mapping_page(mapping, block++, NULL);
389 		if (IS_ERR(page))
390 			goto fail;
391 		node->page[i] = page;
392 	}
393 
394 	return node;
395 fail:
396 	set_bit(HFS_BNODE_ERROR, &node->flags);
397 	return node;
398 }
399 
hfs_bnode_unhash(struct hfs_bnode * node)400 void hfs_bnode_unhash(struct hfs_bnode *node)
401 {
402 	struct hfs_bnode **p;
403 
404 	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
405 		node->tree->cnid, node->this, atomic_read(&node->refcnt));
406 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
407 	     *p && *p != node; p = &(*p)->next_hash)
408 		;
409 	BUG_ON(!*p);
410 	*p = node->next_hash;
411 	node->tree->node_hash_cnt--;
412 }
413 
414 /* Load a particular node out of a tree */
hfs_bnode_find(struct hfs_btree * tree,u32 num)415 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
416 {
417 	struct hfs_bnode *node;
418 	struct hfs_bnode_desc *desc;
419 	int i, rec_off, off, next_off;
420 	int entry_size, key_size;
421 
422 	spin_lock(&tree->hash_lock);
423 	node = hfs_bnode_findhash(tree, num);
424 	if (node) {
425 		hfs_bnode_get(node);
426 		spin_unlock(&tree->hash_lock);
427 		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
428 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
429 			goto node_error;
430 		return node;
431 	}
432 	spin_unlock(&tree->hash_lock);
433 	node = __hfs_bnode_create(tree, num);
434 	if (!node)
435 		return ERR_PTR(-ENOMEM);
436 	if (test_bit(HFS_BNODE_ERROR, &node->flags))
437 		goto node_error;
438 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
439 		return node;
440 
441 	desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
442 					 node->page_offset);
443 	node->prev = be32_to_cpu(desc->prev);
444 	node->next = be32_to_cpu(desc->next);
445 	node->num_recs = be16_to_cpu(desc->num_recs);
446 	node->type = desc->type;
447 	node->height = desc->height;
448 	kunmap_local(desc);
449 
450 	switch (node->type) {
451 	case HFS_NODE_HEADER:
452 	case HFS_NODE_MAP:
453 		if (node->height != 0)
454 			goto node_error;
455 		break;
456 	case HFS_NODE_LEAF:
457 		if (node->height != 1)
458 			goto node_error;
459 		break;
460 	case HFS_NODE_INDEX:
461 		if (node->height <= 1 || node->height > tree->depth)
462 			goto node_error;
463 		break;
464 	default:
465 		goto node_error;
466 	}
467 
468 	rec_off = tree->node_size - 2;
469 	off = hfs_bnode_read_u16(node, rec_off);
470 	if (off != sizeof(struct hfs_bnode_desc))
471 		goto node_error;
472 	for (i = 1; i <= node->num_recs; off = next_off, i++) {
473 		rec_off -= 2;
474 		next_off = hfs_bnode_read_u16(node, rec_off);
475 		if (next_off <= off ||
476 		    next_off > tree->node_size ||
477 		    next_off & 1)
478 			goto node_error;
479 		entry_size = next_off - off;
480 		if (node->type != HFS_NODE_INDEX &&
481 		    node->type != HFS_NODE_LEAF)
482 			continue;
483 		key_size = hfs_bnode_read_u8(node, off) + 1;
484 		if (key_size >= entry_size /*|| key_size & 1*/)
485 			goto node_error;
486 	}
487 	clear_bit(HFS_BNODE_NEW, &node->flags);
488 	wake_up(&node->lock_wq);
489 	return node;
490 
491 node_error:
492 	set_bit(HFS_BNODE_ERROR, &node->flags);
493 	clear_bit(HFS_BNODE_NEW, &node->flags);
494 	wake_up(&node->lock_wq);
495 	hfs_bnode_put(node);
496 	return ERR_PTR(-EIO);
497 }
498 
hfs_bnode_free(struct hfs_bnode * node)499 void hfs_bnode_free(struct hfs_bnode *node)
500 {
501 	int i;
502 
503 	for (i = 0; i < node->tree->pages_per_bnode; i++)
504 		if (node->page[i])
505 			put_page(node->page[i]);
506 	kfree(node);
507 }
508 
hfs_bnode_create(struct hfs_btree * tree,u32 num)509 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
510 {
511 	struct hfs_bnode *node;
512 	struct page **pagep;
513 	int i;
514 
515 	spin_lock(&tree->hash_lock);
516 	node = hfs_bnode_findhash(tree, num);
517 	spin_unlock(&tree->hash_lock);
518 	if (node) {
519 		pr_crit("new node %u already hashed?\n", num);
520 		WARN_ON(1);
521 		return node;
522 	}
523 	node = __hfs_bnode_create(tree, num);
524 	if (!node)
525 		return ERR_PTR(-ENOMEM);
526 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
527 		hfs_bnode_put(node);
528 		return ERR_PTR(-EIO);
529 	}
530 
531 	pagep = node->page;
532 	memzero_page(*pagep, node->page_offset,
533 		     min((int)PAGE_SIZE, (int)tree->node_size));
534 	set_page_dirty(*pagep);
535 	for (i = 1; i < tree->pages_per_bnode; i++) {
536 		memzero_page(*++pagep, 0, PAGE_SIZE);
537 		set_page_dirty(*pagep);
538 	}
539 	clear_bit(HFS_BNODE_NEW, &node->flags);
540 	wake_up(&node->lock_wq);
541 
542 	return node;
543 }
544 
hfs_bnode_get(struct hfs_bnode * node)545 void hfs_bnode_get(struct hfs_bnode *node)
546 {
547 	if (node) {
548 		atomic_inc(&node->refcnt);
549 		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
550 			node->tree->cnid, node->this,
551 			atomic_read(&node->refcnt));
552 	}
553 }
554 
555 /* Dispose of resources used by a node */
hfs_bnode_put(struct hfs_bnode * node)556 void hfs_bnode_put(struct hfs_bnode *node)
557 {
558 	if (node) {
559 		struct hfs_btree *tree = node->tree;
560 		int i;
561 
562 		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
563 			node->tree->cnid, node->this,
564 			atomic_read(&node->refcnt));
565 		BUG_ON(!atomic_read(&node->refcnt));
566 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
567 			return;
568 		for (i = 0; i < tree->pages_per_bnode; i++) {
569 			if (!node->page[i])
570 				continue;
571 			mark_page_accessed(node->page[i]);
572 		}
573 
574 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
575 			hfs_bnode_unhash(node);
576 			spin_unlock(&tree->hash_lock);
577 			hfs_bnode_clear(node, 0, tree->node_size);
578 			hfs_bmap_free(node);
579 			hfs_bnode_free(node);
580 			return;
581 		}
582 		spin_unlock(&tree->hash_lock);
583 	}
584 }
585