xref: /linux/fs/hfsplus/bnode.c (revision 4d9981429aa61c31e67371ac09e7dbba6b59de14)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/bnode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Handle basic btree node operations
10  */
11 
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/fs.h>
16 #include <linux/swap.h>
17 
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
20 
21 
22 /* Copy a specified range of bytes from the raw data of a node */
hfs_bnode_read(struct hfs_bnode * node,void * buf,u32 off,u32 len)23 void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
24 {
25 	struct page **pagep;
26 	u32 l;
27 
28 	if (!is_bnode_offset_valid(node, off))
29 		return;
30 
31 	if (len == 0) {
32 		pr_err("requested zero length: "
33 		       "NODE: id %u, type %#x, height %u, "
34 		       "node_size %u, offset %u, len %u\n",
35 		       node->this, node->type, node->height,
36 		       node->tree->node_size, off, len);
37 		return;
38 	}
39 
40 	len = check_and_correct_requested_length(node, off, len);
41 
42 	off += node->page_offset;
43 	pagep = node->page + (off >> PAGE_SHIFT);
44 	off &= ~PAGE_MASK;
45 
46 	l = min_t(u32, len, PAGE_SIZE - off);
47 	memcpy_from_page(buf, *pagep, off, l);
48 
49 	while ((len -= l) != 0) {
50 		buf += l;
51 		l = min_t(u32, len, PAGE_SIZE);
52 		memcpy_from_page(buf, *++pagep, 0, l);
53 	}
54 }
55 
hfs_bnode_read_u16(struct hfs_bnode * node,u32 off)56 u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
57 {
58 	__be16 data;
59 	/* TODO: optimize later... */
60 	hfs_bnode_read(node, &data, off, 2);
61 	return be16_to_cpu(data);
62 }
63 
hfs_bnode_read_u8(struct hfs_bnode * node,u32 off)64 u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
65 {
66 	u8 data;
67 	/* TODO: optimize later... */
68 	hfs_bnode_read(node, &data, off, 1);
69 	return data;
70 }
71 
hfs_bnode_read_key(struct hfs_bnode * node,void * key,u32 off)72 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
73 {
74 	struct hfs_btree *tree;
75 	u32 key_len;
76 
77 	tree = node->tree;
78 	if (node->type == HFS_NODE_LEAF ||
79 	    tree->attributes & HFS_TREE_VARIDXKEYS ||
80 	    node->tree->cnid == HFSPLUS_ATTR_CNID)
81 		key_len = hfs_bnode_read_u16(node, off) + 2;
82 	else
83 		key_len = tree->max_key_len + 2;
84 
85 	if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
86 		memset(key, 0, sizeof(hfsplus_btree_key));
87 		pr_err("hfsplus: Invalid key length: %u\n", key_len);
88 		return;
89 	}
90 
91 	hfs_bnode_read(node, key, off, key_len);
92 }
93 
hfs_bnode_write(struct hfs_bnode * node,void * buf,u32 off,u32 len)94 void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
95 {
96 	struct page **pagep;
97 	u32 l;
98 
99 	if (!is_bnode_offset_valid(node, off))
100 		return;
101 
102 	if (len == 0) {
103 		pr_err("requested zero length: "
104 		       "NODE: id %u, type %#x, height %u, "
105 		       "node_size %u, offset %u, len %u\n",
106 		       node->this, node->type, node->height,
107 		       node->tree->node_size, off, len);
108 		return;
109 	}
110 
111 	len = check_and_correct_requested_length(node, off, len);
112 
113 	off += node->page_offset;
114 	pagep = node->page + (off >> PAGE_SHIFT);
115 	off &= ~PAGE_MASK;
116 
117 	l = min_t(u32, len, PAGE_SIZE - off);
118 	memcpy_to_page(*pagep, off, buf, l);
119 	set_page_dirty(*pagep);
120 
121 	while ((len -= l) != 0) {
122 		buf += l;
123 		l = min_t(u32, len, PAGE_SIZE);
124 		memcpy_to_page(*++pagep, 0, buf, l);
125 		set_page_dirty(*pagep);
126 	}
127 }
128 
hfs_bnode_write_u16(struct hfs_bnode * node,u32 off,u16 data)129 void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
130 {
131 	__be16 v = cpu_to_be16(data);
132 	/* TODO: optimize later... */
133 	hfs_bnode_write(node, &v, off, 2);
134 }
135 
hfs_bnode_clear(struct hfs_bnode * node,u32 off,u32 len)136 void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
137 {
138 	struct page **pagep;
139 	u32 l;
140 
141 	if (!is_bnode_offset_valid(node, off))
142 		return;
143 
144 	if (len == 0) {
145 		pr_err("requested zero length: "
146 		       "NODE: id %u, type %#x, height %u, "
147 		       "node_size %u, offset %u, len %u\n",
148 		       node->this, node->type, node->height,
149 		       node->tree->node_size, off, len);
150 		return;
151 	}
152 
153 	len = check_and_correct_requested_length(node, off, len);
154 
155 	off += node->page_offset;
156 	pagep = node->page + (off >> PAGE_SHIFT);
157 	off &= ~PAGE_MASK;
158 
159 	l = min_t(u32, len, PAGE_SIZE - off);
160 	memzero_page(*pagep, off, l);
161 	set_page_dirty(*pagep);
162 
163 	while ((len -= l) != 0) {
164 		l = min_t(u32, len, PAGE_SIZE);
165 		memzero_page(*++pagep, 0, l);
166 		set_page_dirty(*pagep);
167 	}
168 }
169 
hfs_bnode_copy(struct hfs_bnode * dst_node,u32 dst,struct hfs_bnode * src_node,u32 src,u32 len)170 void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
171 		    struct hfs_bnode *src_node, u32 src, u32 len)
172 {
173 	struct page **src_page, **dst_page;
174 	u32 l;
175 
176 	hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
177 	if (!len)
178 		return;
179 
180 	len = check_and_correct_requested_length(src_node, src, len);
181 	len = check_and_correct_requested_length(dst_node, dst, len);
182 
183 	src += src_node->page_offset;
184 	dst += dst_node->page_offset;
185 	src_page = src_node->page + (src >> PAGE_SHIFT);
186 	src &= ~PAGE_MASK;
187 	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
188 	dst &= ~PAGE_MASK;
189 
190 	if (src == dst) {
191 		l = min_t(u32, len, PAGE_SIZE - src);
192 		memcpy_page(*dst_page, src, *src_page, src, l);
193 		set_page_dirty(*dst_page);
194 
195 		while ((len -= l) != 0) {
196 			l = min_t(u32, len, PAGE_SIZE);
197 			memcpy_page(*++dst_page, 0, *++src_page, 0, l);
198 			set_page_dirty(*dst_page);
199 		}
200 	} else {
201 		void *src_ptr, *dst_ptr;
202 
203 		do {
204 			dst_ptr = kmap_local_page(*dst_page) + dst;
205 			src_ptr = kmap_local_page(*src_page) + src;
206 			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
207 				l = PAGE_SIZE - src;
208 				src = 0;
209 				dst += l;
210 			} else {
211 				l = PAGE_SIZE - dst;
212 				src += l;
213 				dst = 0;
214 			}
215 			l = min(len, l);
216 			memcpy(dst_ptr, src_ptr, l);
217 			kunmap_local(src_ptr);
218 			set_page_dirty(*dst_page);
219 			kunmap_local(dst_ptr);
220 			if (!dst)
221 				dst_page++;
222 			else
223 				src_page++;
224 		} while ((len -= l));
225 	}
226 }
227 
hfs_bnode_move(struct hfs_bnode * node,u32 dst,u32 src,u32 len)228 void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
229 {
230 	struct page **src_page, **dst_page;
231 	void *src_ptr, *dst_ptr;
232 	u32 l;
233 
234 	hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
235 	if (!len)
236 		return;
237 
238 	len = check_and_correct_requested_length(node, src, len);
239 	len = check_and_correct_requested_length(node, dst, len);
240 
241 	src += node->page_offset;
242 	dst += node->page_offset;
243 	if (dst > src) {
244 		src += len - 1;
245 		src_page = node->page + (src >> PAGE_SHIFT);
246 		src = (src & ~PAGE_MASK) + 1;
247 		dst += len - 1;
248 		dst_page = node->page + (dst >> PAGE_SHIFT);
249 		dst = (dst & ~PAGE_MASK) + 1;
250 
251 		if (src == dst) {
252 			while (src < len) {
253 				dst_ptr = kmap_local_page(*dst_page);
254 				src_ptr = kmap_local_page(*src_page);
255 				memmove(dst_ptr, src_ptr, src);
256 				kunmap_local(src_ptr);
257 				set_page_dirty(*dst_page);
258 				kunmap_local(dst_ptr);
259 				len -= src;
260 				src = PAGE_SIZE;
261 				src_page--;
262 				dst_page--;
263 			}
264 			src -= len;
265 			dst_ptr = kmap_local_page(*dst_page);
266 			src_ptr = kmap_local_page(*src_page);
267 			memmove(dst_ptr + src, src_ptr + src, len);
268 			kunmap_local(src_ptr);
269 			set_page_dirty(*dst_page);
270 			kunmap_local(dst_ptr);
271 		} else {
272 			do {
273 				dst_ptr = kmap_local_page(*dst_page) + dst;
274 				src_ptr = kmap_local_page(*src_page) + src;
275 				if (src < dst) {
276 					l = src;
277 					src = PAGE_SIZE;
278 					dst -= l;
279 				} else {
280 					l = dst;
281 					src -= l;
282 					dst = PAGE_SIZE;
283 				}
284 				l = min(len, l);
285 				memmove(dst_ptr - l, src_ptr - l, l);
286 				kunmap_local(src_ptr);
287 				set_page_dirty(*dst_page);
288 				kunmap_local(dst_ptr);
289 				if (dst == PAGE_SIZE)
290 					dst_page--;
291 				else
292 					src_page--;
293 			} while ((len -= l));
294 		}
295 	} else {
296 		src_page = node->page + (src >> PAGE_SHIFT);
297 		src &= ~PAGE_MASK;
298 		dst_page = node->page + (dst >> PAGE_SHIFT);
299 		dst &= ~PAGE_MASK;
300 
301 		if (src == dst) {
302 			l = min_t(u32, len, PAGE_SIZE - src);
303 
304 			dst_ptr = kmap_local_page(*dst_page) + src;
305 			src_ptr = kmap_local_page(*src_page) + src;
306 			memmove(dst_ptr, src_ptr, l);
307 			kunmap_local(src_ptr);
308 			set_page_dirty(*dst_page);
309 			kunmap_local(dst_ptr);
310 
311 			while ((len -= l) != 0) {
312 				l = min_t(u32, len, PAGE_SIZE);
313 				dst_ptr = kmap_local_page(*++dst_page);
314 				src_ptr = kmap_local_page(*++src_page);
315 				memmove(dst_ptr, src_ptr, l);
316 				kunmap_local(src_ptr);
317 				set_page_dirty(*dst_page);
318 				kunmap_local(dst_ptr);
319 			}
320 		} else {
321 			do {
322 				dst_ptr = kmap_local_page(*dst_page) + dst;
323 				src_ptr = kmap_local_page(*src_page) + src;
324 				if (PAGE_SIZE - src <
325 						PAGE_SIZE - dst) {
326 					l = PAGE_SIZE - src;
327 					src = 0;
328 					dst += l;
329 				} else {
330 					l = PAGE_SIZE - dst;
331 					src += l;
332 					dst = 0;
333 				}
334 				l = min(len, l);
335 				memmove(dst_ptr, src_ptr, l);
336 				kunmap_local(src_ptr);
337 				set_page_dirty(*dst_page);
338 				kunmap_local(dst_ptr);
339 				if (!dst)
340 					dst_page++;
341 				else
342 					src_page++;
343 			} while ((len -= l));
344 		}
345 	}
346 }
347 
hfs_bnode_dump(struct hfs_bnode * node)348 void hfs_bnode_dump(struct hfs_bnode *node)
349 {
350 	struct hfs_bnode_desc desc;
351 	__be32 cnid;
352 	int i, off, key_off;
353 
354 	hfs_dbg("node %d\n", node->this);
355 	hfs_bnode_read(node, &desc, 0, sizeof(desc));
356 	hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
357 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
358 		desc.type, desc.height, be16_to_cpu(desc.num_recs));
359 
360 	off = node->tree->node_size - 2;
361 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
362 		key_off = hfs_bnode_read_u16(node, off);
363 		hfs_dbg(" key_off %d", key_off);
364 		if (i && node->type == HFS_NODE_INDEX) {
365 			int tmp;
366 
367 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
368 					node->tree->cnid == HFSPLUS_ATTR_CNID)
369 				tmp = hfs_bnode_read_u16(node, key_off) + 2;
370 			else
371 				tmp = node->tree->max_key_len + 2;
372 			hfs_dbg(" (%d", tmp);
373 			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
374 			hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
375 		} else if (i && node->type == HFS_NODE_LEAF) {
376 			int tmp;
377 
378 			tmp = hfs_bnode_read_u16(node, key_off);
379 			hfs_dbg(" (%d)", tmp);
380 		}
381 	}
382 	hfs_dbg("\n");
383 }
384 
hfs_bnode_unlink(struct hfs_bnode * node)385 void hfs_bnode_unlink(struct hfs_bnode *node)
386 {
387 	struct hfs_btree *tree;
388 	struct hfs_bnode *tmp;
389 	__be32 cnid;
390 
391 	tree = node->tree;
392 	if (node->prev) {
393 		tmp = hfs_bnode_find(tree, node->prev);
394 		if (IS_ERR(tmp))
395 			return;
396 		tmp->next = node->next;
397 		cnid = cpu_to_be32(tmp->next);
398 		hfs_bnode_write(tmp, &cnid,
399 			offsetof(struct hfs_bnode_desc, next), 4);
400 		hfs_bnode_put(tmp);
401 	} else if (node->type == HFS_NODE_LEAF)
402 		tree->leaf_head = node->next;
403 
404 	if (node->next) {
405 		tmp = hfs_bnode_find(tree, node->next);
406 		if (IS_ERR(tmp))
407 			return;
408 		tmp->prev = node->prev;
409 		cnid = cpu_to_be32(tmp->prev);
410 		hfs_bnode_write(tmp, &cnid,
411 			offsetof(struct hfs_bnode_desc, prev), 4);
412 		hfs_bnode_put(tmp);
413 	} else if (node->type == HFS_NODE_LEAF)
414 		tree->leaf_tail = node->prev;
415 
416 	/* move down? */
417 	if (!node->prev && !node->next)
418 		hfs_dbg("btree delete level\n");
419 	if (!node->parent) {
420 		tree->root = 0;
421 		tree->depth = 0;
422 	}
423 
424 	spin_lock(&tree->hash_lock);
425 	set_bit(HFS_BNODE_DELETED, &node->flags);
426 	spin_unlock(&tree->hash_lock);
427 }
428 
hfs_bnode_hash(u32 num)429 static inline int hfs_bnode_hash(u32 num)
430 {
431 	num = (num >> 16) + num;
432 	num += num >> 8;
433 	return num & (NODE_HASH_SIZE - 1);
434 }
435 
hfs_bnode_findhash(struct hfs_btree * tree,u32 cnid)436 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
437 {
438 	struct hfs_bnode *node;
439 
440 	if (cnid >= tree->node_count) {
441 		pr_err("request for non-existent node %d in B*Tree\n",
442 		       cnid);
443 		return NULL;
444 	}
445 
446 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
447 			node; node = node->next_hash)
448 		if (node->this == cnid)
449 			return node;
450 	return NULL;
451 }
452 
__hfs_bnode_create(struct hfs_btree * tree,u32 cnid)453 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
454 {
455 	struct hfs_bnode *node, *node2;
456 	struct address_space *mapping;
457 	struct page *page;
458 	int size, block, i, hash;
459 	loff_t off;
460 
461 	if (cnid >= tree->node_count) {
462 		pr_err("request for non-existent node %d in B*Tree\n",
463 		       cnid);
464 		return NULL;
465 	}
466 
467 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
468 		sizeof(struct page *);
469 	node = kzalloc(size, GFP_KERNEL);
470 	if (!node)
471 		return NULL;
472 	node->tree = tree;
473 	node->this = cnid;
474 	set_bit(HFS_BNODE_NEW, &node->flags);
475 	atomic_set(&node->refcnt, 1);
476 	hfs_dbg("cnid %d, node %d, refcnt 1\n",
477 		node->tree->cnid, node->this);
478 	init_waitqueue_head(&node->lock_wq);
479 	spin_lock(&tree->hash_lock);
480 	node2 = hfs_bnode_findhash(tree, cnid);
481 	if (!node2) {
482 		hash = hfs_bnode_hash(cnid);
483 		node->next_hash = tree->node_hash[hash];
484 		tree->node_hash[hash] = node;
485 		tree->node_hash_cnt++;
486 	} else {
487 		hfs_bnode_get(node2);
488 		spin_unlock(&tree->hash_lock);
489 		kfree(node);
490 		wait_event(node2->lock_wq,
491 			!test_bit(HFS_BNODE_NEW, &node2->flags));
492 		return node2;
493 	}
494 	spin_unlock(&tree->hash_lock);
495 
496 	mapping = tree->inode->i_mapping;
497 	off = (loff_t)cnid << tree->node_size_shift;
498 	block = off >> PAGE_SHIFT;
499 	node->page_offset = off & ~PAGE_MASK;
500 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
501 		page = read_mapping_page(mapping, block, NULL);
502 		if (IS_ERR(page))
503 			goto fail;
504 		node->page[i] = page;
505 	}
506 
507 	return node;
508 fail:
509 	set_bit(HFS_BNODE_ERROR, &node->flags);
510 	return node;
511 }
512 
hfs_bnode_unhash(struct hfs_bnode * node)513 void hfs_bnode_unhash(struct hfs_bnode *node)
514 {
515 	struct hfs_bnode **p;
516 
517 	hfs_dbg("cnid %d, node %d, refcnt %d\n",
518 		node->tree->cnid, node->this, atomic_read(&node->refcnt));
519 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
520 	     *p && *p != node; p = &(*p)->next_hash)
521 		;
522 	BUG_ON(!*p);
523 	*p = node->next_hash;
524 	node->tree->node_hash_cnt--;
525 }
526 
527 /* Load a particular node out of a tree */
hfs_bnode_find(struct hfs_btree * tree,u32 num)528 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
529 {
530 	struct hfs_bnode *node;
531 	struct hfs_bnode_desc *desc;
532 	int i, rec_off, off, next_off;
533 	int entry_size, key_size;
534 
535 	spin_lock(&tree->hash_lock);
536 	node = hfs_bnode_findhash(tree, num);
537 	if (node) {
538 		hfs_bnode_get(node);
539 		spin_unlock(&tree->hash_lock);
540 		wait_event(node->lock_wq,
541 			!test_bit(HFS_BNODE_NEW, &node->flags));
542 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
543 			goto node_error;
544 		return node;
545 	}
546 	spin_unlock(&tree->hash_lock);
547 	node = __hfs_bnode_create(tree, num);
548 	if (!node)
549 		return ERR_PTR(-ENOMEM);
550 	if (test_bit(HFS_BNODE_ERROR, &node->flags))
551 		goto node_error;
552 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
553 		return node;
554 
555 	desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
556 							 node->page_offset);
557 	node->prev = be32_to_cpu(desc->prev);
558 	node->next = be32_to_cpu(desc->next);
559 	node->num_recs = be16_to_cpu(desc->num_recs);
560 	node->type = desc->type;
561 	node->height = desc->height;
562 	kunmap_local(desc);
563 
564 	switch (node->type) {
565 	case HFS_NODE_HEADER:
566 	case HFS_NODE_MAP:
567 		if (node->height != 0)
568 			goto node_error;
569 		break;
570 	case HFS_NODE_LEAF:
571 		if (node->height != 1)
572 			goto node_error;
573 		break;
574 	case HFS_NODE_INDEX:
575 		if (node->height <= 1 || node->height > tree->depth)
576 			goto node_error;
577 		break;
578 	default:
579 		goto node_error;
580 	}
581 
582 	rec_off = tree->node_size - 2;
583 	off = hfs_bnode_read_u16(node, rec_off);
584 	if (off != sizeof(struct hfs_bnode_desc))
585 		goto node_error;
586 	for (i = 1; i <= node->num_recs; off = next_off, i++) {
587 		rec_off -= 2;
588 		next_off = hfs_bnode_read_u16(node, rec_off);
589 		if (next_off <= off ||
590 		    next_off > tree->node_size ||
591 		    next_off & 1)
592 			goto node_error;
593 		entry_size = next_off - off;
594 		if (node->type != HFS_NODE_INDEX &&
595 		    node->type != HFS_NODE_LEAF)
596 			continue;
597 		key_size = hfs_bnode_read_u16(node, off) + 2;
598 		if (key_size >= entry_size || key_size & 1)
599 			goto node_error;
600 	}
601 	clear_bit(HFS_BNODE_NEW, &node->flags);
602 	wake_up(&node->lock_wq);
603 	return node;
604 
605 node_error:
606 	set_bit(HFS_BNODE_ERROR, &node->flags);
607 	clear_bit(HFS_BNODE_NEW, &node->flags);
608 	wake_up(&node->lock_wq);
609 	hfs_bnode_put(node);
610 	return ERR_PTR(-EIO);
611 }
612 
hfs_bnode_free(struct hfs_bnode * node)613 void hfs_bnode_free(struct hfs_bnode *node)
614 {
615 	int i;
616 
617 	for (i = 0; i < node->tree->pages_per_bnode; i++)
618 		if (node->page[i])
619 			put_page(node->page[i]);
620 	kfree(node);
621 }
622 
hfs_bnode_create(struct hfs_btree * tree,u32 num)623 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
624 {
625 	struct hfs_bnode *node;
626 	struct page **pagep;
627 	int i;
628 
629 	spin_lock(&tree->hash_lock);
630 	node = hfs_bnode_findhash(tree, num);
631 	spin_unlock(&tree->hash_lock);
632 	if (node) {
633 		pr_crit("new node %u already hashed?\n", num);
634 		WARN_ON(1);
635 		return ERR_PTR(-EEXIST);
636 	}
637 	node = __hfs_bnode_create(tree, num);
638 	if (!node)
639 		return ERR_PTR(-ENOMEM);
640 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
641 		hfs_bnode_put(node);
642 		return ERR_PTR(-EIO);
643 	}
644 
645 	pagep = node->page;
646 	memzero_page(*pagep, node->page_offset,
647 		     min_t(int, PAGE_SIZE, tree->node_size));
648 	set_page_dirty(*pagep);
649 	for (i = 1; i < tree->pages_per_bnode; i++) {
650 		memzero_page(*++pagep, 0, PAGE_SIZE);
651 		set_page_dirty(*pagep);
652 	}
653 	clear_bit(HFS_BNODE_NEW, &node->flags);
654 	wake_up(&node->lock_wq);
655 
656 	return node;
657 }
658 
hfs_bnode_get(struct hfs_bnode * node)659 void hfs_bnode_get(struct hfs_bnode *node)
660 {
661 	if (node) {
662 		atomic_inc(&node->refcnt);
663 		hfs_dbg("cnid %d, node %d, refcnt %d\n",
664 			node->tree->cnid, node->this,
665 			atomic_read(&node->refcnt));
666 	}
667 }
668 
669 /* Dispose of resources used by a node */
hfs_bnode_put(struct hfs_bnode * node)670 void hfs_bnode_put(struct hfs_bnode *node)
671 {
672 	if (node) {
673 		struct hfs_btree *tree = node->tree;
674 		int i;
675 
676 		hfs_dbg("cnid %d, node %d, refcnt %d\n",
677 			node->tree->cnid, node->this,
678 			atomic_read(&node->refcnt));
679 		BUG_ON(!atomic_read(&node->refcnt));
680 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
681 			return;
682 		for (i = 0; i < tree->pages_per_bnode; i++) {
683 			if (!node->page[i])
684 				continue;
685 			mark_page_accessed(node->page[i]);
686 		}
687 
688 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
689 			hfs_bnode_unhash(node);
690 			spin_unlock(&tree->hash_lock);
691 			if (hfs_bnode_need_zeroout(tree))
692 				hfs_bnode_clear(node, 0, tree->node_size);
693 			hfs_bmap_free(node);
694 			hfs_bnode_free(node);
695 			return;
696 		}
697 		spin_unlock(&tree->hash_lock);
698 	}
699 }
700 
701 /*
702  * Unused nodes have to be zeroed if this is the catalog tree and
703  * a corresponding flag in the volume header is set.
704  */
hfs_bnode_need_zeroout(struct hfs_btree * tree)705 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
706 {
707 	struct super_block *sb = tree->inode->i_sb;
708 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
709 	const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
710 
711 	return volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
712 }
713