xref: /linux/fs/bcachefs/dirent.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "bkey_methods.h"
6 #include "btree_update.h"
7 #include "extents.h"
8 #include "dirent.h"
9 #include "fs.h"
10 #include "keylist.h"
11 #include "str_hash.h"
12 #include "subvolume.h"
13 
14 #include <linux/dcache.h>
15 
16 #ifdef CONFIG_UNICODE
bch2_casefold(struct btree_trans * trans,const struct bch_hash_info * info,const struct qstr * str,struct qstr * out_cf)17 int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
18 		  const struct qstr *str, struct qstr *out_cf)
19 {
20 	*out_cf = (struct qstr) QSTR_INIT(NULL, 0);
21 
22 	if (!bch2_fs_casefold_enabled(trans->c))
23 		return -EOPNOTSUPP;
24 
25 	unsigned char *buf = bch2_trans_kmalloc(trans, BCH_NAME_MAX + 1);
26 	int ret = PTR_ERR_OR_ZERO(buf);
27 	if (ret)
28 		return ret;
29 
30 	ret = utf8_casefold(info->cf_encoding, str, buf, BCH_NAME_MAX + 1);
31 	if (ret <= 0)
32 		return ret;
33 
34 	*out_cf = (struct qstr) QSTR_INIT(buf, ret);
35 	return 0;
36 }
37 #endif
38 
bch2_dirent_name_bytes(struct bkey_s_c_dirent d)39 static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
40 {
41 	if (bkey_val_bytes(d.k) < offsetof(struct bch_dirent, d_name))
42 		return 0;
43 
44 	unsigned bkey_u64s = bkey_val_u64s(d.k);
45 	unsigned bkey_bytes = bkey_u64s * sizeof(u64);
46 	u64 last_u64 = ((u64*)d.v)[bkey_u64s - 1];
47 #if CPU_BIG_ENDIAN
48 	unsigned trailing_nuls = last_u64 ? __builtin_ctzll(last_u64) / 8 : 64 / 8;
49 #else
50 	unsigned trailing_nuls = last_u64 ? __builtin_clzll(last_u64) / 8 : 64 / 8;
51 #endif
52 
53 	return bkey_bytes -
54 		(d.v->d_casefold
55 		? offsetof(struct bch_dirent, d_cf_name_block.d_names)
56 		: offsetof(struct bch_dirent, d_name)) -
57 		trailing_nuls;
58 }
59 
bch2_dirent_get_name(struct bkey_s_c_dirent d)60 struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d)
61 {
62 	if (d.v->d_casefold) {
63 		unsigned name_len = le16_to_cpu(d.v->d_cf_name_block.d_name_len);
64 		return (struct qstr) QSTR_INIT(&d.v->d_cf_name_block.d_names[0], name_len);
65 	} else {
66 		return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
67 	}
68 }
69 
bch2_dirent_get_casefold_name(struct bkey_s_c_dirent d)70 static struct qstr bch2_dirent_get_casefold_name(struct bkey_s_c_dirent d)
71 {
72 	if (d.v->d_casefold) {
73 		unsigned name_len = le16_to_cpu(d.v->d_cf_name_block.d_name_len);
74 		unsigned cf_name_len = le16_to_cpu(d.v->d_cf_name_block.d_cf_name_len);
75 		return (struct qstr) QSTR_INIT(&d.v->d_cf_name_block.d_names[name_len], cf_name_len);
76 	} else {
77 		return (struct qstr) QSTR_INIT(NULL, 0);
78 	}
79 }
80 
bch2_dirent_get_lookup_name(struct bkey_s_c_dirent d)81 static inline struct qstr bch2_dirent_get_lookup_name(struct bkey_s_c_dirent d)
82 {
83 	return d.v->d_casefold
84 		? bch2_dirent_get_casefold_name(d)
85 		: bch2_dirent_get_name(d);
86 }
87 
bch2_dirent_hash(const struct bch_hash_info * info,const struct qstr * name)88 static u64 bch2_dirent_hash(const struct bch_hash_info *info,
89 			    const struct qstr *name)
90 {
91 	struct bch_str_hash_ctx ctx;
92 
93 	bch2_str_hash_init(&ctx, info);
94 	bch2_str_hash_update(&ctx, info, name->name, name->len);
95 
96 	/* [0,2) reserved for dots */
97 	return max_t(u64, bch2_str_hash_end(&ctx, info), 2);
98 }
99 
dirent_hash_key(const struct bch_hash_info * info,const void * key)100 static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key)
101 {
102 	return bch2_dirent_hash(info, key);
103 }
104 
dirent_hash_bkey(const struct bch_hash_info * info,struct bkey_s_c k)105 static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
106 {
107 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
108 	struct qstr name = bch2_dirent_get_lookup_name(d);
109 
110 	return bch2_dirent_hash(info, &name);
111 }
112 
dirent_cmp_key(struct bkey_s_c _l,const void * _r)113 static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
114 {
115 	struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
116 	const struct qstr l_name = bch2_dirent_get_lookup_name(l);
117 	const struct qstr *r_name = _r;
118 
119 	return !qstr_eq(l_name, *r_name);
120 }
121 
dirent_cmp_bkey(struct bkey_s_c _l,struct bkey_s_c _r)122 static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
123 {
124 	struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
125 	struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r);
126 	const struct qstr l_name = bch2_dirent_get_lookup_name(l);
127 	const struct qstr r_name = bch2_dirent_get_lookup_name(r);
128 
129 	return !qstr_eq(l_name, r_name);
130 }
131 
dirent_is_visible(subvol_inum inum,struct bkey_s_c k)132 static bool dirent_is_visible(subvol_inum inum, struct bkey_s_c k)
133 {
134 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
135 
136 	if (d.v->d_type == DT_SUBVOL)
137 		return le32_to_cpu(d.v->d_parent_subvol) == inum.subvol;
138 	return true;
139 }
140 
141 const struct bch_hash_desc bch2_dirent_hash_desc = {
142 	.btree_id	= BTREE_ID_dirents,
143 	.key_type	= KEY_TYPE_dirent,
144 	.hash_key	= dirent_hash_key,
145 	.hash_bkey	= dirent_hash_bkey,
146 	.cmp_key	= dirent_cmp_key,
147 	.cmp_bkey	= dirent_cmp_bkey,
148 	.is_visible	= dirent_is_visible,
149 };
150 
bch2_dirent_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)151 int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k,
152 			 struct bkey_validate_context from)
153 {
154 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
155 	unsigned name_block_len = bch2_dirent_name_bytes(d);
156 	struct qstr d_name = bch2_dirent_get_name(d);
157 	struct qstr d_cf_name = bch2_dirent_get_casefold_name(d);
158 	int ret = 0;
159 
160 	bkey_fsck_err_on(!d_name.len,
161 			 c, dirent_empty_name,
162 			 "empty name");
163 
164 	bkey_fsck_err_on(d_name.len + d_cf_name.len > name_block_len,
165 			 c, dirent_val_too_big,
166 			 "dirent names exceed bkey size (%d + %d > %d)",
167 			 d_name.len, d_cf_name.len, name_block_len);
168 
169 	/*
170 	 * Check new keys don't exceed the max length
171 	 * (older keys may be larger.)
172 	 */
173 	bkey_fsck_err_on((from.flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX,
174 			 c, dirent_name_too_long,
175 			 "dirent name too big (%u > %u)",
176 			 d_name.len, BCH_NAME_MAX);
177 
178 	bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len),
179 			 c, dirent_name_embedded_nul,
180 			 "dirent has stray data after name's NUL");
181 
182 	bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) ||
183 			 (d_name.len == 2 && !memcmp(d_name.name, "..", 2)),
184 			 c, dirent_name_dot_or_dotdot,
185 			 "invalid name");
186 
187 	bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len),
188 			 c, dirent_name_has_slash,
189 			 "name with /");
190 
191 	bkey_fsck_err_on(d.v->d_type != DT_SUBVOL &&
192 			 le64_to_cpu(d.v->d_inum) == d.k->p.inode,
193 			 c, dirent_to_itself,
194 			 "dirent points to own directory");
195 
196 	if (d.v->d_casefold) {
197 		bkey_fsck_err_on(from.from == BKEY_VALIDATE_commit &&
198 				 d_cf_name.len > BCH_NAME_MAX,
199 				 c, dirent_cf_name_too_big,
200 				 "dirent w/ cf name too big (%u > %u)",
201 				 d_cf_name.len, BCH_NAME_MAX);
202 
203 		bkey_fsck_err_on(d_cf_name.len != strnlen(d_cf_name.name, d_cf_name.len),
204 				 c, dirent_stray_data_after_cf_name,
205 				 "dirent has stray data after cf name's NUL");
206 	}
207 fsck_err:
208 	return ret;
209 }
210 
bch2_dirent_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)211 void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
212 {
213 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
214 	struct qstr d_name = bch2_dirent_get_name(d);
215 
216 	prt_printf(out, "%.*s", d_name.len, d_name.name);
217 
218 	if (d.v->d_casefold) {
219 		struct qstr d_name = bch2_dirent_get_lookup_name(d);
220 		prt_printf(out, " (casefold %.*s)", d_name.len, d_name.name);
221 	}
222 
223 	prt_str(out, " ->");
224 
225 	if (d.v->d_type != DT_SUBVOL)
226 		prt_printf(out, " %llu", le64_to_cpu(d.v->d_inum));
227 	else
228 		prt_printf(out, " %u -> %u",
229 			   le32_to_cpu(d.v->d_parent_subvol),
230 			   le32_to_cpu(d.v->d_child_subvol));
231 
232 	prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type));
233 }
234 
bch2_dirent_init_name(struct bch_fs * c,struct bkey_i_dirent * dirent,const struct bch_hash_info * hash_info,const struct qstr * name,const struct qstr * cf_name)235 int bch2_dirent_init_name(struct bch_fs *c,
236 			  struct bkey_i_dirent *dirent,
237 			  const struct bch_hash_info *hash_info,
238 			  const struct qstr *name,
239 			  const struct qstr *cf_name)
240 {
241 	EBUG_ON(hash_info->cf_encoding == NULL && cf_name);
242 	int cf_len = 0;
243 
244 	if (name->len > BCH_NAME_MAX)
245 		return -ENAMETOOLONG;
246 
247 	dirent->v.d_casefold = hash_info->cf_encoding != NULL;
248 
249 	if (!dirent->v.d_casefold) {
250 		memcpy(&dirent->v.d_name[0], name->name, name->len);
251 		memset(&dirent->v.d_name[name->len], 0,
252 		       bkey_val_bytes(&dirent->k) -
253 		       offsetof(struct bch_dirent, d_name) -
254 		       name->len);
255 	} else {
256 		if (!bch2_fs_casefold_enabled(c))
257 			return -EOPNOTSUPP;
258 
259 #ifdef CONFIG_UNICODE
260 		memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
261 
262 		char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len];
263 
264 		if (cf_name) {
265 			cf_len = cf_name->len;
266 
267 			memcpy(cf_out, cf_name->name, cf_name->len);
268 		} else {
269 			cf_len = utf8_casefold(hash_info->cf_encoding, name,
270 					       cf_out,
271 					       bkey_val_end(bkey_i_to_s(&dirent->k_i)) - (void *) cf_out);
272 			if (cf_len <= 0)
273 				return cf_len;
274 		}
275 
276 		memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_len], 0,
277 		       bkey_val_bytes(&dirent->k) -
278 		       offsetof(struct bch_dirent, d_cf_name_block.d_names) -
279 		       name->len + cf_len);
280 
281 		dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
282 		dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_len);
283 
284 		EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len);
285 #endif
286 	}
287 
288 	unsigned u64s = dirent_val_u64s(name->len, cf_len);
289 	BUG_ON(u64s > bkey_val_u64s(&dirent->k));
290 	set_bkey_val_u64s(&dirent->k, u64s);
291 	return 0;
292 }
293 
bch2_dirent_create_key(struct btree_trans * trans,const struct bch_hash_info * hash_info,subvol_inum dir,u8 type,const struct qstr * name,const struct qstr * cf_name,u64 dst)294 struct bkey_i_dirent *bch2_dirent_create_key(struct btree_trans *trans,
295 				const struct bch_hash_info *hash_info,
296 				subvol_inum dir,
297 				u8 type,
298 				const struct qstr *name,
299 				const struct qstr *cf_name,
300 				u64 dst)
301 {
302 	struct bkey_i_dirent *dirent = bch2_trans_kmalloc(trans, BKEY_U64s_MAX * sizeof(u64));
303 	if (IS_ERR(dirent))
304 		return dirent;
305 
306 	bkey_dirent_init(&dirent->k_i);
307 	dirent->k.u64s = BKEY_U64s_MAX;
308 
309 	if (type != DT_SUBVOL) {
310 		dirent->v.d_inum = cpu_to_le64(dst);
311 	} else {
312 		dirent->v.d_parent_subvol = cpu_to_le32(dir.subvol);
313 		dirent->v.d_child_subvol = cpu_to_le32(dst);
314 	}
315 
316 	dirent->v.d_type = type;
317 	dirent->v.d_unused = 0;
318 
319 	int ret = bch2_dirent_init_name(trans->c, dirent, hash_info, name, cf_name);
320 	if (ret)
321 		return ERR_PTR(ret);
322 
323 	EBUG_ON(bch2_dirent_get_name(dirent_i_to_s_c(dirent)).len != name->len);
324 	return dirent;
325 }
326 
bch2_dirent_create_snapshot(struct btree_trans * trans,u32 dir_subvol,u64 dir,u32 snapshot,const struct bch_hash_info * hash_info,u8 type,const struct qstr * name,u64 dst_inum,u64 * dir_offset,enum btree_iter_update_trigger_flags flags)327 int bch2_dirent_create_snapshot(struct btree_trans *trans,
328 			u32 dir_subvol, u64 dir, u32 snapshot,
329 			const struct bch_hash_info *hash_info,
330 			u8 type, const struct qstr *name, u64 dst_inum,
331 			u64 *dir_offset,
332 			enum btree_iter_update_trigger_flags flags)
333 {
334 	subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir };
335 	struct bkey_i_dirent *dirent;
336 	int ret;
337 
338 	dirent = bch2_dirent_create_key(trans, hash_info, dir_inum, type, name, NULL, dst_inum);
339 	ret = PTR_ERR_OR_ZERO(dirent);
340 	if (ret)
341 		return ret;
342 
343 	dirent->k.p.inode	= dir;
344 	dirent->k.p.snapshot	= snapshot;
345 
346 	ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
347 					dir_inum, snapshot, &dirent->k_i, flags);
348 	*dir_offset = dirent->k.p.offset;
349 
350 	return ret;
351 }
352 
bch2_dirent_create(struct btree_trans * trans,subvol_inum dir,const struct bch_hash_info * hash_info,u8 type,const struct qstr * name,u64 dst_inum,u64 * dir_offset,enum btree_iter_update_trigger_flags flags)353 int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
354 		       const struct bch_hash_info *hash_info,
355 		       u8 type, const struct qstr *name, u64 dst_inum,
356 		       u64 *dir_offset,
357 		       enum btree_iter_update_trigger_flags flags)
358 {
359 	struct bkey_i_dirent *dirent;
360 	int ret;
361 
362 	dirent = bch2_dirent_create_key(trans, hash_info, dir, type, name, NULL, dst_inum);
363 	ret = PTR_ERR_OR_ZERO(dirent);
364 	if (ret)
365 		return ret;
366 
367 	ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
368 			    dir, &dirent->k_i, flags);
369 	*dir_offset = dirent->k.p.offset;
370 
371 	return ret;
372 }
373 
bch2_dirent_read_target(struct btree_trans * trans,subvol_inum dir,struct bkey_s_c_dirent d,subvol_inum * target)374 int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
375 			    struct bkey_s_c_dirent d, subvol_inum *target)
376 {
377 	struct bch_subvolume s;
378 	int ret = 0;
379 
380 	if (d.v->d_type == DT_SUBVOL &&
381 	    le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
382 		return 1;
383 
384 	if (likely(d.v->d_type != DT_SUBVOL)) {
385 		target->subvol	= dir.subvol;
386 		target->inum	= le64_to_cpu(d.v->d_inum);
387 	} else {
388 		target->subvol	= le32_to_cpu(d.v->d_child_subvol);
389 
390 		ret = bch2_subvolume_get(trans, target->subvol, true, &s);
391 
392 		target->inum	= le64_to_cpu(s.inode);
393 	}
394 
395 	return ret;
396 }
397 
bch2_dirent_rename(struct btree_trans * trans,subvol_inum src_dir,struct bch_hash_info * src_hash,subvol_inum dst_dir,struct bch_hash_info * dst_hash,const struct qstr * src_name,subvol_inum * src_inum,u64 * src_offset,const struct qstr * dst_name,subvol_inum * dst_inum,u64 * dst_offset,enum bch_rename_mode mode)398 int bch2_dirent_rename(struct btree_trans *trans,
399 		subvol_inum src_dir, struct bch_hash_info *src_hash,
400 		subvol_inum dst_dir, struct bch_hash_info *dst_hash,
401 		const struct qstr *src_name, subvol_inum *src_inum, u64 *src_offset,
402 		const struct qstr *dst_name, subvol_inum *dst_inum, u64 *dst_offset,
403 		enum bch_rename_mode mode)
404 {
405 	struct qstr src_name_lookup, dst_name_lookup;
406 	struct btree_iter src_iter = {};
407 	struct btree_iter dst_iter = {};
408 	struct bkey_s_c old_src, old_dst = bkey_s_c_null;
409 	struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
410 	struct bpos dst_pos =
411 		POS(dst_dir.inum, bch2_dirent_hash(dst_hash, dst_name));
412 	unsigned src_update_flags = 0;
413 	bool delete_src, delete_dst;
414 	int ret = 0;
415 
416 	memset(src_inum, 0, sizeof(*src_inum));
417 	memset(dst_inum, 0, sizeof(*dst_inum));
418 
419 	/* Lookup src: */
420 	ret = bch2_maybe_casefold(trans, src_hash, src_name, &src_name_lookup);
421 	if (ret)
422 		goto out;
423 	old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
424 				   src_hash, src_dir, &src_name_lookup,
425 				   BTREE_ITER_intent);
426 	ret = bkey_err(old_src);
427 	if (ret)
428 		goto out;
429 
430 	ret = bch2_dirent_read_target(trans, src_dir,
431 			bkey_s_c_to_dirent(old_src), src_inum);
432 	if (ret)
433 		goto out;
434 
435 	/* Lookup dst: */
436 	ret = bch2_maybe_casefold(trans, dst_hash, dst_name, &dst_name_lookup);
437 	if (ret)
438 		goto out;
439 	if (mode == BCH_RENAME) {
440 		/*
441 		 * Note that we're _not_ checking if the target already exists -
442 		 * we're relying on the VFS to do that check for us for
443 		 * correctness:
444 		 */
445 		ret = bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc,
446 				     dst_hash, dst_dir, &dst_name_lookup);
447 		if (ret)
448 			goto out;
449 	} else {
450 		old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
451 					    dst_hash, dst_dir, &dst_name_lookup,
452 					    BTREE_ITER_intent);
453 		ret = bkey_err(old_dst);
454 		if (ret)
455 			goto out;
456 
457 		ret = bch2_dirent_read_target(trans, dst_dir,
458 				bkey_s_c_to_dirent(old_dst), dst_inum);
459 		if (ret)
460 			goto out;
461 	}
462 
463 	if (mode != BCH_RENAME_EXCHANGE)
464 		*src_offset = dst_iter.pos.offset;
465 
466 	/* Create new dst key: */
467 	new_dst = bch2_dirent_create_key(trans, dst_hash, dst_dir, 0, dst_name,
468 					 dst_hash->cf_encoding ? &dst_name_lookup : NULL, 0);
469 	ret = PTR_ERR_OR_ZERO(new_dst);
470 	if (ret)
471 		goto out;
472 
473 	dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src));
474 	new_dst->k.p = dst_iter.pos;
475 
476 	/* Create new src key: */
477 	if (mode == BCH_RENAME_EXCHANGE) {
478 		new_src = bch2_dirent_create_key(trans, src_hash, src_dir, 0, src_name,
479 						 src_hash->cf_encoding ? &src_name_lookup : NULL, 0);
480 		ret = PTR_ERR_OR_ZERO(new_src);
481 		if (ret)
482 			goto out;
483 
484 		dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst));
485 		new_src->k.p = src_iter.pos;
486 	} else {
487 		new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
488 		ret = PTR_ERR_OR_ZERO(new_src);
489 		if (ret)
490 			goto out;
491 
492 		bkey_init(&new_src->k);
493 		new_src->k.p = src_iter.pos;
494 
495 		if (bkey_le(dst_pos, src_iter.pos) &&
496 		    bkey_lt(src_iter.pos, dst_iter.pos)) {
497 			/*
498 			 * We have a hash collision for the new dst key,
499 			 * and new_src - the key we're deleting - is between
500 			 * new_dst's hashed slot and the slot we're going to be
501 			 * inserting it into - oops.  This will break the hash
502 			 * table if we don't deal with it:
503 			 */
504 			if (mode == BCH_RENAME) {
505 				/*
506 				 * If we're not overwriting, we can just insert
507 				 * new_dst at the src position:
508 				 */
509 				new_src = new_dst;
510 				new_src->k.p = src_iter.pos;
511 				goto out_set_src;
512 			} else {
513 				/* If we're overwriting, we can't insert new_dst
514 				 * at a different slot because it has to
515 				 * overwrite old_dst - just make sure to use a
516 				 * whiteout when deleting src:
517 				 */
518 				new_src->k.type = KEY_TYPE_hash_whiteout;
519 			}
520 		} else {
521 			/* Check if we need a whiteout to delete src: */
522 			ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc,
523 						       src_hash, &src_iter);
524 			if (ret < 0)
525 				goto out;
526 
527 			if (ret)
528 				new_src->k.type = KEY_TYPE_hash_whiteout;
529 		}
530 	}
531 
532 	if (new_dst->v.d_type == DT_SUBVOL)
533 		new_dst->v.d_parent_subvol = cpu_to_le32(dst_dir.subvol);
534 
535 	if ((mode == BCH_RENAME_EXCHANGE) &&
536 	    new_src->v.d_type == DT_SUBVOL)
537 		new_src->v.d_parent_subvol = cpu_to_le32(src_dir.subvol);
538 
539 	ret = bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0);
540 	if (ret)
541 		goto out;
542 out_set_src:
543 	/*
544 	 * If we're deleting a subvolume we need to really delete the dirent,
545 	 * not just emit a whiteout in the current snapshot - there can only be
546 	 * single dirent that points to a given subvolume.
547 	 *
548 	 * IOW, we don't maintain multiple versions in different snapshots of
549 	 * dirents that point to subvolumes - dirents that point to subvolumes
550 	 * are only visible in one particular subvolume so it's not necessary,
551 	 * and it would be particularly confusing for fsck to have to deal with.
552 	 */
553 	delete_src = bkey_s_c_to_dirent(old_src).v->d_type == DT_SUBVOL &&
554 		new_src->k.p.snapshot != old_src.k->p.snapshot;
555 
556 	delete_dst = old_dst.k &&
557 		bkey_s_c_to_dirent(old_dst).v->d_type == DT_SUBVOL &&
558 		new_dst->k.p.snapshot != old_dst.k->p.snapshot;
559 
560 	if (!delete_src || !bkey_deleted(&new_src->k)) {
561 		ret = bch2_trans_update(trans, &src_iter, &new_src->k_i, src_update_flags);
562 		if (ret)
563 			goto out;
564 	}
565 
566 	if (delete_src) {
567 		bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot);
568 		ret =   bch2_btree_iter_traverse(trans, &src_iter) ?:
569 			bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
570 		if (ret)
571 			goto out;
572 	}
573 
574 	if (delete_dst) {
575 		bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot);
576 		ret =   bch2_btree_iter_traverse(trans, &dst_iter) ?:
577 			bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
578 		if (ret)
579 			goto out;
580 	}
581 
582 	if (mode == BCH_RENAME_EXCHANGE)
583 		*src_offset = new_src->k.p.offset;
584 	*dst_offset = new_dst->k.p.offset;
585 out:
586 	bch2_trans_iter_exit(trans, &src_iter);
587 	bch2_trans_iter_exit(trans, &dst_iter);
588 	return ret;
589 }
590 
bch2_dirent_lookup_trans(struct btree_trans * trans,struct btree_iter * iter,subvol_inum dir,const struct bch_hash_info * hash_info,const struct qstr * name,subvol_inum * inum,unsigned flags)591 int bch2_dirent_lookup_trans(struct btree_trans *trans,
592 			     struct btree_iter *iter,
593 			     subvol_inum dir,
594 			     const struct bch_hash_info *hash_info,
595 			     const struct qstr *name, subvol_inum *inum,
596 			     unsigned flags)
597 {
598 	struct qstr lookup_name;
599 	int ret = bch2_maybe_casefold(trans, hash_info, name, &lookup_name);
600 	if (ret)
601 		return ret;
602 
603 	struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
604 					     hash_info, dir, &lookup_name, flags);
605 	ret = bkey_err(k);
606 	if (ret)
607 		goto err;
608 
609 	ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), inum);
610 	if (ret > 0)
611 		ret = -ENOENT;
612 err:
613 	if (ret)
614 		bch2_trans_iter_exit(trans, iter);
615 	return ret;
616 }
617 
bch2_dirent_lookup(struct bch_fs * c,subvol_inum dir,const struct bch_hash_info * hash_info,const struct qstr * name,subvol_inum * inum)618 u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
619 		       const struct bch_hash_info *hash_info,
620 		       const struct qstr *name, subvol_inum *inum)
621 {
622 	struct btree_trans *trans = bch2_trans_get(c);
623 	struct btree_iter iter = {};
624 
625 	int ret = lockrestart_do(trans,
626 		bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
627 	bch2_trans_iter_exit(trans, &iter);
628 	bch2_trans_put(trans);
629 	return ret;
630 }
631 
bch2_empty_dir_snapshot(struct btree_trans * trans,u64 dir,u32 subvol,u32 snapshot)632 int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 snapshot)
633 {
634 	struct btree_iter iter;
635 	struct bkey_s_c k;
636 	int ret;
637 
638 	for_each_btree_key_max_norestart(trans, iter, BTREE_ID_dirents,
639 			   SPOS(dir, 0, snapshot),
640 			   POS(dir, U64_MAX), 0, k, ret)
641 		if (k.k->type == KEY_TYPE_dirent) {
642 			struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
643 			if (d.v->d_type == DT_SUBVOL && le32_to_cpu(d.v->d_parent_subvol) != subvol)
644 				continue;
645 			ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty);
646 			break;
647 		}
648 	bch2_trans_iter_exit(trans, &iter);
649 
650 	return ret;
651 }
652 
bch2_empty_dir_trans(struct btree_trans * trans,subvol_inum dir)653 int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
654 {
655 	u32 snapshot;
656 
657 	return bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot) ?:
658 		bch2_empty_dir_snapshot(trans, dir.inum, dir.subvol, snapshot);
659 }
660 
bch2_dir_emit(struct dir_context * ctx,struct bkey_s_c_dirent d,subvol_inum target)661 static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subvol_inum target)
662 {
663 	struct qstr name = bch2_dirent_get_name(d);
664 	/*
665 	 * Although not required by the kernel code, updating ctx->pos is needed
666 	 * for the bcachefs FUSE driver. Without this update, the FUSE
667 	 * implementation will be stuck in an infinite loop when reading
668 	 * directories (via the bcachefs_fuse_readdir callback).
669 	 * In kernel space, ctx->pos is updated by the VFS code.
670 	 */
671 	ctx->pos = d.k->p.offset;
672 	bool ret = dir_emit(ctx, name.name,
673 		      name.len,
674 		      target.inum,
675 		      vfs_d_type(d.v->d_type));
676 	if (ret)
677 		ctx->pos = d.k->p.offset + 1;
678 	return !ret;
679 }
680 
bch2_readdir(struct bch_fs * c,subvol_inum inum,struct bch_hash_info * hash_info,struct dir_context * ctx)681 int bch2_readdir(struct bch_fs *c, subvol_inum inum,
682 		 struct bch_hash_info *hash_info,
683 		 struct dir_context *ctx)
684 {
685 	struct bkey_buf sk;
686 	bch2_bkey_buf_init(&sk);
687 
688 	int ret = bch2_trans_run(c,
689 		for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_dirents,
690 				   POS(inum.inum, ctx->pos),
691 				   POS(inum.inum, U64_MAX),
692 				   inum.subvol, 0, k, ({
693 			if (k.k->type != KEY_TYPE_dirent)
694 				continue;
695 
696 			/* dir_emit() can fault and block: */
697 			bch2_bkey_buf_reassemble(&sk, c, k);
698 			struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
699 
700 			subvol_inum target;
701 
702 			bool need_second_pass = false;
703 			int ret2 = bch2_str_hash_check_key(trans, NULL, &bch2_dirent_hash_desc,
704 							   hash_info, &iter, k, &need_second_pass) ?:
705 				bch2_dirent_read_target(trans, inum, dirent, &target);
706 			if (ret2 > 0)
707 				continue;
708 
709 			ret2 ?: (bch2_trans_unlock(trans), bch2_dir_emit(ctx, dirent, target));
710 		})));
711 
712 	bch2_bkey_buf_exit(&sk, c);
713 
714 	return ret < 0 ? ret : 0;
715 }
716 
717 /* fsck */
718 
lookup_first_inode(struct btree_trans * trans,u64 inode_nr,struct bch_inode_unpacked * inode)719 static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
720 			      struct bch_inode_unpacked *inode)
721 {
722 	struct btree_iter iter;
723 	struct bkey_s_c k;
724 	int ret;
725 
726 	for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
727 				     BTREE_ITER_all_snapshots, k, ret) {
728 		if (k.k->p.offset != inode_nr)
729 			break;
730 		if (!bkey_is_inode(k.k))
731 			continue;
732 		ret = bch2_inode_unpack(k, inode);
733 		goto found;
734 	}
735 	ret = bch_err_throw(trans->c, ENOENT_inode);
736 found:
737 	bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
738 	bch2_trans_iter_exit(trans, &iter);
739 	return ret;
740 }
741 
bch2_fsck_remove_dirent(struct btree_trans * trans,struct bpos pos)742 int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
743 {
744 	struct bch_fs *c = trans->c;
745 	struct btree_iter iter;
746 	struct bch_inode_unpacked dir_inode;
747 	struct bch_hash_info dir_hash_info;
748 	int ret;
749 
750 	ret = lookup_first_inode(trans, pos.inode, &dir_inode);
751 	if (ret)
752 		goto err;
753 
754 	dir_hash_info = bch2_hash_info_init(c, &dir_inode);
755 
756 	bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
757 
758 	ret =   bch2_btree_iter_traverse(trans, &iter) ?:
759 		bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
760 				    &dir_hash_info, &iter,
761 				    BTREE_UPDATE_internal_snapshot_node);
762 	bch2_trans_iter_exit(trans, &iter);
763 err:
764 	bch_err_fn(c, ret);
765 	return ret;
766 }
767