1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
4 
5 /*
6  * bcachefs on disk data structures
7  *
8  * OVERVIEW:
9  *
10  * There are three main types of on disk data structures in bcachefs (this is
11  * reduced from 5 in bcache)
12  *
13  *  - superblock
14  *  - journal
15  *  - btree
16  *
17  * The btree is the primary structure; most metadata exists as keys in the
18  * various btrees. There are only a small number of btrees, they're not
19  * sharded - we have one btree for extents, another for inodes, et cetera.
20  *
21  * SUPERBLOCK:
22  *
23  * The superblock contains the location of the journal, the list of devices in
24  * the filesystem, and in general any metadata we need in order to decide
25  * whether we can start a filesystem or prior to reading the journal/btree
26  * roots.
27  *
28  * The superblock is extensible, and most of the contents of the superblock are
29  * in variable length, type tagged fields; see struct bch_sb_field.
30  *
31  * Backup superblocks do not reside in a fixed location; also, superblocks do
32  * not have a fixed size. To locate backup superblocks we have struct
33  * bch_sb_layout; we store a copy of this inside every superblock, and also
34  * before the first superblock.
35  *
36  * JOURNAL:
37  *
38  * The journal primarily records btree updates in the order they occurred;
39  * journal replay consists of just iterating over all the keys in the open
40  * journal entries and re-inserting them into the btrees.
41  *
42  * The journal also contains entry types for the btree roots, and blacklisted
43  * journal sequence numbers (see journal_seq_blacklist.c).
44  *
45  * BTREE:
46  *
47  * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48  * 128k-256k) and log structured. We use struct btree_node for writing the first
49  * entry in a given node (offset 0), and struct btree_node_entry for all
50  * subsequent writes.
51  *
52  * After the header, btree node entries contain a list of keys in sorted order.
53  * Values are stored inline with the keys; since values are variable length (and
54  * keys effectively are variable length too, due to packing) we can't do random
55  * access without building up additional in memory tables in the btree node read
56  * path.
57  *
58  * BTREE KEYS (struct bkey):
59  *
60  * The various btrees share a common format for the key - so as to avoid
61  * switching in fastpath lookup/comparison code - but define their own
62  * structures for the key values.
63  *
64  * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65  * size is just under 2k. The common part also contains a type tag for the
66  * value, and a format field indicating whether the key is packed or not (and
67  * also meant to allow adding new key fields in the future, if desired).
68  *
69  * bkeys, when stored within a btree node, may also be packed. In that case, the
70  * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71  * be generous with field sizes in the common part of the key format (64 bit
72  * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
73  */
74 
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
79 #include <uapi/linux/magic.h>
80 #include "vstructs.h"
81 
82 #ifdef __KERNEL__
83 typedef uuid_t __uuid_t;
84 #endif
85 
86 #define BITMASK(name, type, field, offset, end)				\
87 static const __maybe_unused unsigned	name##_OFFSET = offset;		\
88 static const __maybe_unused unsigned	name##_BITS = (end - offset);	\
89 									\
90 static inline __u64 name(const type *k)					\
91 {									\
92 	return (k->field >> offset) & ~(~0ULL << (end - offset));	\
93 }									\
94 									\
95 static inline void SET_##name(type *k, __u64 v)				\
96 {									\
97 	k->field &= ~(~(~0ULL << (end - offset)) << offset);		\
98 	k->field |= (v & ~(~0ULL << (end - offset))) << offset;		\
99 }
100 
101 #define LE_BITMASK(_bits, name, type, field, offset, end)		\
102 static const __maybe_unused unsigned	name##_OFFSET = offset;		\
103 static const __maybe_unused unsigned	name##_BITS = (end - offset);	\
104 static const __maybe_unused __u##_bits	name##_MAX = (1ULL << (end - offset)) - 1;\
105 									\
106 static inline __u64 name(const type *k)					\
107 {									\
108 	return (__le##_bits##_to_cpu(k->field) >> offset) &		\
109 		~(~0ULL << (end - offset));				\
110 }									\
111 									\
112 static inline void SET_##name(type *k, __u64 v)				\
113 {									\
114 	__u##_bits new = __le##_bits##_to_cpu(k->field);		\
115 									\
116 	new &= ~(~(~0ULL << (end - offset)) << offset);			\
117 	new |= (v & ~(~0ULL << (end - offset))) << offset;		\
118 	k->field = __cpu_to_le##_bits(new);				\
119 }
120 
121 #define LE16_BITMASK(n, t, f, o, e)	LE_BITMASK(16, n, t, f, o, e)
122 #define LE32_BITMASK(n, t, f, o, e)	LE_BITMASK(32, n, t, f, o, e)
123 #define LE64_BITMASK(n, t, f, o, e)	LE_BITMASK(64, n, t, f, o, e)
124 
125 struct bkey_format {
126 	__u8		key_u64s;
127 	__u8		nr_fields;
128 	/* One unused slot for now: */
129 	__u8		bits_per_field[6];
130 	__le64		field_offset[6];
131 };
132 
133 /* Btree keys - all units are in sectors */
134 
135 struct bpos {
136 	/*
137 	 * Word order matches machine byte order - btree code treats a bpos as a
138 	 * single large integer, for search/comparison purposes
139 	 *
140 	 * Note that wherever a bpos is embedded in another on disk data
141 	 * structure, it has to be byte swabbed when reading in metadata that
142 	 * wasn't written in native endian order:
143 	 */
144 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
145 	__u32		snapshot;
146 	__u64		offset;
147 	__u64		inode;
148 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
149 	__u64		inode;
150 	__u64		offset;		/* Points to end of extent - sectors */
151 	__u32		snapshot;
152 #else
153 #error edit for your odd byteorder.
154 #endif
155 } __packed
156 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
157 __aligned(4)
158 #endif
159 ;
160 
161 #define KEY_INODE_MAX			((__u64)~0ULL)
162 #define KEY_OFFSET_MAX			((__u64)~0ULL)
163 #define KEY_SNAPSHOT_MAX		((__u32)~0U)
164 #define KEY_SIZE_MAX			((__u32)~0U)
165 
SPOS(__u64 inode,__u64 offset,__u32 snapshot)166 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
167 {
168 	return (struct bpos) {
169 		.inode		= inode,
170 		.offset		= offset,
171 		.snapshot	= snapshot,
172 	};
173 }
174 
175 #define POS_MIN				SPOS(0, 0, 0)
176 #define POS_MAX				SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
177 #define SPOS_MAX			SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
178 #define POS(_inode, _offset)		SPOS(_inode, _offset, 0)
179 
180 /* Empty placeholder struct, for container_of() */
181 struct bch_val {
182 	__u64		__nothing[0];
183 };
184 
185 struct bversion {
186 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
187 	__u64		lo;
188 	__u32		hi;
189 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
190 	__u32		hi;
191 	__u64		lo;
192 #endif
193 } __packed
194 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
195 __aligned(4)
196 #endif
197 ;
198 
199 struct bkey {
200 	/* Size of combined key and value, in u64s */
201 	__u8		u64s;
202 
203 	/* Format of key (0 for format local to btree node) */
204 #if defined(__LITTLE_ENDIAN_BITFIELD)
205 	__u8		format:7,
206 			needs_whiteout:1;
207 #elif defined (__BIG_ENDIAN_BITFIELD)
208 	__u8		needs_whiteout:1,
209 			format:7;
210 #else
211 #error edit for your odd byteorder.
212 #endif
213 
214 	/* Type of the value */
215 	__u8		type;
216 
217 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
218 	__u8		pad[1];
219 
220 	struct bversion	bversion;
221 	__u32		size;		/* extent size, in sectors */
222 	struct bpos	p;
223 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
224 	struct bpos	p;
225 	__u32		size;		/* extent size, in sectors */
226 	struct bversion	bversion;
227 
228 	__u8		pad[1];
229 #endif
230 } __packed
231 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
232 /*
233  * The big-endian version of bkey can't be compiled by rustc with the "aligned"
234  * attr since it doesn't allow types to have both "packed" and "aligned" attrs.
235  * So for Rust compatibility, don't include this. It can be included in the LE
236  * version because the "packed" attr is redundant in that case.
237  *
238  * History: (quoting Kent)
239  *
240  * Specifically, when i was designing bkey, I wanted the header to be no
241  * bigger than necessary so that bkey_packed could use the rest. That means that
242  * decently offten extent keys will fit into only 8 bytes, instead of spilling over
243  * to 16.
244  *
245  * But packed_bkey treats the part after the header - the packed section -
246  * as a single multi word, variable length integer. And bkey, the unpacked
247  * version, is just a special case version of a bkey_packed; all the packed
248  * bkey code will work on keys in any packed format, the in-memory
249  * representation of an unpacked key also is just one type of packed key...
250  *
251  * So that constrains the key part of a bkig endian bkey to start right
252  * after the header.
253  *
254  * If we ever do a bkey_v2 and need to expand the hedaer by another byte for
255  * some reason - that will clean up this wart.
256  */
257 __aligned(8)
258 #endif
259 ;
260 
261 struct bkey_packed {
262 	__u64		_data[0];
263 
264 	/* Size of combined key and value, in u64s */
265 	__u8		u64s;
266 
267 	/* Format of key (0 for format local to btree node) */
268 
269 	/*
270 	 * XXX: next incompat on disk format change, switch format and
271 	 * needs_whiteout - bkey_packed() will be cheaper if format is the high
272 	 * bits of the bitfield
273 	 */
274 #if defined(__LITTLE_ENDIAN_BITFIELD)
275 	__u8		format:7,
276 			needs_whiteout:1;
277 #elif defined (__BIG_ENDIAN_BITFIELD)
278 	__u8		needs_whiteout:1,
279 			format:7;
280 #endif
281 
282 	/* Type of the value */
283 	__u8		type;
284 	__u8		key_start[0];
285 
286 	/*
287 	 * We copy bkeys with struct assignment in various places, and while
288 	 * that shouldn't be done with packed bkeys we can't disallow it in C,
289 	 * and it's legal to cast a bkey to a bkey_packed  - so padding it out
290 	 * to the same size as struct bkey should hopefully be safest.
291 	 */
292 	__u8		pad[sizeof(struct bkey) - 3];
293 } __packed __aligned(8);
294 
295 typedef struct {
296 	__le64			lo;
297 	__le64			hi;
298 } bch_le128;
299 
300 #define BKEY_U64s			(sizeof(struct bkey) / sizeof(__u64))
301 #define BKEY_U64s_MAX			U8_MAX
302 #define BKEY_VAL_U64s_MAX		(BKEY_U64s_MAX - BKEY_U64s)
303 
304 #define KEY_PACKED_BITS_START		24
305 
306 #define KEY_FORMAT_LOCAL_BTREE		0
307 #define KEY_FORMAT_CURRENT		1
308 
309 enum bch_bkey_fields {
310 	BKEY_FIELD_INODE,
311 	BKEY_FIELD_OFFSET,
312 	BKEY_FIELD_SNAPSHOT,
313 	BKEY_FIELD_SIZE,
314 	BKEY_FIELD_VERSION_HI,
315 	BKEY_FIELD_VERSION_LO,
316 	BKEY_NR_FIELDS,
317 };
318 
319 #define bkey_format_field(name, field)					\
320 	[BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
321 
322 #define BKEY_FORMAT_CURRENT						\
323 ((struct bkey_format) {							\
324 	.key_u64s	= BKEY_U64s,					\
325 	.nr_fields	= BKEY_NR_FIELDS,				\
326 	.bits_per_field = {						\
327 		bkey_format_field(INODE,	p.inode),		\
328 		bkey_format_field(OFFSET,	p.offset),		\
329 		bkey_format_field(SNAPSHOT,	p.snapshot),		\
330 		bkey_format_field(SIZE,		size),			\
331 		bkey_format_field(VERSION_HI,	bversion.hi),		\
332 		bkey_format_field(VERSION_LO,	bversion.lo),		\
333 	},								\
334 })
335 
336 /* bkey with inline value */
337 struct bkey_i {
338 	__u64			_data[0];
339 
340 	struct bkey	k;
341 	struct bch_val	v;
342 };
343 
344 #define POS_KEY(_pos)							\
345 ((struct bkey) {							\
346 	.u64s		= BKEY_U64s,					\
347 	.format		= KEY_FORMAT_CURRENT,				\
348 	.p		= _pos,						\
349 })
350 
351 #define KEY(_inode, _offset, _size)					\
352 ((struct bkey) {							\
353 	.u64s		= BKEY_U64s,					\
354 	.format		= KEY_FORMAT_CURRENT,				\
355 	.p		= POS(_inode, _offset),				\
356 	.size		= _size,					\
357 })
358 
bkey_init(struct bkey * k)359 static inline void bkey_init(struct bkey *k)
360 {
361 	*k = KEY(0, 0, 0);
362 }
363 
364 #define bkey_bytes(_k)		((_k)->u64s * sizeof(__u64))
365 
366 #define __BKEY_PADDED(key, pad)					\
367 	struct bkey_i key; __u64 key ## _pad[pad]
368 
369 enum bch_bkey_type_flags {
370 	BKEY_TYPE_strict_btree_checks	= BIT(0),
371 };
372 
373 /*
374  * - DELETED keys are used internally to mark keys that should be ignored but
375  *   override keys in composition order.  Their version number is ignored.
376  *
377  * - DISCARDED keys indicate that the data is all 0s because it has been
378  *   discarded. DISCARDs may have a version; if the version is nonzero the key
379  *   will be persistent, otherwise the key will be dropped whenever the btree
380  *   node is rewritten (like DELETED keys).
381  *
382  * - ERROR: any read of the data returns a read error, as the data was lost due
383  *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
384  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
385  *   the same or a more recent version number, but not with an older version
386  *   number.
387  *
388  * - WHITEOUT: for hash table btrees
389  */
390 #define BCH_BKEY_TYPES()						\
391 	x(deleted,		0,	0)				\
392 	x(whiteout,		1,	0)				\
393 	x(error,		2,	0)				\
394 	x(cookie,		3,	0)				\
395 	x(hash_whiteout,	4,	BKEY_TYPE_strict_btree_checks)	\
396 	x(btree_ptr,		5,	BKEY_TYPE_strict_btree_checks)	\
397 	x(extent,		6,	BKEY_TYPE_strict_btree_checks)	\
398 	x(reservation,		7,	BKEY_TYPE_strict_btree_checks)	\
399 	x(inode,		8,	BKEY_TYPE_strict_btree_checks)	\
400 	x(inode_generation,	9,	BKEY_TYPE_strict_btree_checks)	\
401 	x(dirent,		10,	BKEY_TYPE_strict_btree_checks)	\
402 	x(xattr,		11,	BKEY_TYPE_strict_btree_checks)	\
403 	x(alloc,		12,	BKEY_TYPE_strict_btree_checks)	\
404 	x(quota,		13,	BKEY_TYPE_strict_btree_checks)	\
405 	x(stripe,		14,	BKEY_TYPE_strict_btree_checks)	\
406 	x(reflink_p,		15,	BKEY_TYPE_strict_btree_checks)	\
407 	x(reflink_v,		16,	BKEY_TYPE_strict_btree_checks)	\
408 	x(inline_data,		17,	BKEY_TYPE_strict_btree_checks)	\
409 	x(btree_ptr_v2,		18,	BKEY_TYPE_strict_btree_checks)	\
410 	x(indirect_inline_data,	19,	BKEY_TYPE_strict_btree_checks)	\
411 	x(alloc_v2,		20,	BKEY_TYPE_strict_btree_checks)	\
412 	x(subvolume,		21,	BKEY_TYPE_strict_btree_checks)	\
413 	x(snapshot,		22,	BKEY_TYPE_strict_btree_checks)	\
414 	x(inode_v2,		23,	BKEY_TYPE_strict_btree_checks)	\
415 	x(alloc_v3,		24,	BKEY_TYPE_strict_btree_checks)	\
416 	x(set,			25,	0)				\
417 	x(lru,			26,	BKEY_TYPE_strict_btree_checks)	\
418 	x(alloc_v4,		27,	BKEY_TYPE_strict_btree_checks)	\
419 	x(backpointer,		28,	BKEY_TYPE_strict_btree_checks)	\
420 	x(inode_v3,		29,	BKEY_TYPE_strict_btree_checks)	\
421 	x(bucket_gens,		30,	BKEY_TYPE_strict_btree_checks)	\
422 	x(snapshot_tree,	31,	BKEY_TYPE_strict_btree_checks)	\
423 	x(logged_op_truncate,	32,	BKEY_TYPE_strict_btree_checks)	\
424 	x(logged_op_finsert,	33,	BKEY_TYPE_strict_btree_checks)	\
425 	x(accounting,		34,	BKEY_TYPE_strict_btree_checks)	\
426 	x(inode_alloc_cursor,	35,	BKEY_TYPE_strict_btree_checks)
427 
428 enum bch_bkey_type {
429 #define x(name, nr, ...) KEY_TYPE_##name	= nr,
430 	BCH_BKEY_TYPES()
431 #undef x
432 	KEY_TYPE_MAX,
433 };
434 
435 struct bch_deleted {
436 	struct bch_val		v;
437 };
438 
439 struct bch_whiteout {
440 	struct bch_val		v;
441 };
442 
443 struct bch_error {
444 	struct bch_val		v;
445 };
446 
447 struct bch_cookie {
448 	struct bch_val		v;
449 	__le64			cookie;
450 };
451 
452 struct bch_hash_whiteout {
453 	struct bch_val		v;
454 };
455 
456 struct bch_set {
457 	struct bch_val		v;
458 };
459 
460 /* 128 bits, sufficient for cryptographic MACs: */
461 struct bch_csum {
462 	__le64			lo;
463 	__le64			hi;
464 } __packed __aligned(8);
465 
466 struct bch_backpointer {
467 	struct bch_val		v;
468 	__u8			btree_id;
469 	__u8			level;
470 	__u8			data_type;
471 	__u8			bucket_gen;
472 	__u32			pad;
473 	__u32			bucket_len;
474 	struct bpos		pos;
475 } __packed __aligned(8);
476 
477 /* Optional/variable size superblock sections: */
478 
479 struct bch_sb_field {
480 	__u64			_data[0];
481 	__le32			u64s;
482 	__le32			type;
483 };
484 
485 #define BCH_SB_FIELDS()				\
486 	x(journal,			0)	\
487 	x(members_v1,			1)	\
488 	x(crypt,			2)	\
489 	x(replicas_v0,			3)	\
490 	x(quota,			4)	\
491 	x(disk_groups,			5)	\
492 	x(clean,			6)	\
493 	x(replicas,			7)	\
494 	x(journal_seq_blacklist,	8)	\
495 	x(journal_v2,			9)	\
496 	x(counters,			10)	\
497 	x(members_v2,			11)	\
498 	x(errors,			12)	\
499 	x(ext,				13)	\
500 	x(downgrade,			14)
501 
502 #include "alloc_background_format.h"
503 #include "dirent_format.h"
504 #include "disk_accounting_format.h"
505 #include "disk_groups_format.h"
506 #include "extents_format.h"
507 #include "ec_format.h"
508 #include "inode_format.h"
509 #include "journal_seq_blacklist_format.h"
510 #include "logged_ops_format.h"
511 #include "lru_format.h"
512 #include "quota_format.h"
513 #include "reflink_format.h"
514 #include "replicas_format.h"
515 #include "snapshot_format.h"
516 #include "subvolume_format.h"
517 #include "sb-counters_format.h"
518 #include "sb-downgrade_format.h"
519 #include "sb-errors_format.h"
520 #include "sb-members_format.h"
521 #include "xattr_format.h"
522 
523 enum bch_sb_field_type {
524 #define x(f, nr)	BCH_SB_FIELD_##f = nr,
525 	BCH_SB_FIELDS()
526 #undef x
527 	BCH_SB_FIELD_NR
528 };
529 
530 /*
531  * Most superblock fields are replicated in all device's superblocks - a few are
532  * not:
533  */
534 #define BCH_SINGLE_DEVICE_SB_FIELDS		\
535 	((1U << BCH_SB_FIELD_journal)|		\
536 	 (1U << BCH_SB_FIELD_journal_v2))
537 
538 /* BCH_SB_FIELD_journal: */
539 
540 struct bch_sb_field_journal {
541 	struct bch_sb_field	field;
542 	__le64			buckets[];
543 };
544 
545 struct bch_sb_field_journal_v2 {
546 	struct bch_sb_field	field;
547 
548 	struct bch_sb_field_journal_v2_entry {
549 		__le64		start;
550 		__le64		nr;
551 	}			d[];
552 };
553 
554 /* BCH_SB_FIELD_crypt: */
555 
556 struct nonce {
557 	__le32			d[4];
558 };
559 
560 struct bch_key {
561 	__le64			key[4];
562 };
563 
564 #define BCH_KEY_MAGIC					\
565 	(((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|		\
566 	 ((__u64) 'h' << 16)|((__u64) '*' << 24)|		\
567 	 ((__u64) '*' << 32)|((__u64) 'k' << 40)|		\
568 	 ((__u64) 'e' << 48)|((__u64) 'y' << 56))
569 
570 struct bch_encrypted_key {
571 	__le64			magic;
572 	struct bch_key		key;
573 };
574 
575 /*
576  * If this field is present in the superblock, it stores an encryption key which
577  * is used encrypt all other data/metadata. The key will normally be encrypted
578  * with the key userspace provides, but if encryption has been turned off we'll
579  * just store the master key unencrypted in the superblock so we can access the
580  * previously encrypted data.
581  */
582 struct bch_sb_field_crypt {
583 	struct bch_sb_field	field;
584 
585 	__le64			flags;
586 	__le64			kdf_flags;
587 	struct bch_encrypted_key key;
588 };
589 
590 LE64_BITMASK(BCH_CRYPT_KDF_TYPE,	struct bch_sb_field_crypt, flags, 0, 4);
591 
592 enum bch_kdf_types {
593 	BCH_KDF_SCRYPT		= 0,
594 	BCH_KDF_NR		= 1,
595 };
596 
597 /* stored as base 2 log of scrypt params: */
598 LE64_BITMASK(BCH_KDF_SCRYPT_N,	struct bch_sb_field_crypt, kdf_flags,  0, 16);
599 LE64_BITMASK(BCH_KDF_SCRYPT_R,	struct bch_sb_field_crypt, kdf_flags, 16, 32);
600 LE64_BITMASK(BCH_KDF_SCRYPT_P,	struct bch_sb_field_crypt, kdf_flags, 32, 48);
601 
602 /*
603  * On clean shutdown, store btree roots and current journal sequence number in
604  * the superblock:
605  */
606 struct jset_entry {
607 	__le16			u64s;
608 	__u8			btree_id;
609 	__u8			level;
610 	__u8			type; /* designates what this jset holds */
611 	__u8			pad[3];
612 
613 	struct bkey_i		start[0];
614 	__u64			_data[];
615 };
616 
617 struct bch_sb_field_clean {
618 	struct bch_sb_field	field;
619 
620 	__le32			flags;
621 	__le16			_read_clock; /* no longer used */
622 	__le16			_write_clock;
623 	__le64			journal_seq;
624 
625 	struct jset_entry	start[0];
626 	__u64			_data[];
627 };
628 
629 struct bch_sb_field_ext {
630 	struct bch_sb_field	field;
631 	__le64			recovery_passes_required[2];
632 	__le64			errors_silent[8];
633 	__le64			btrees_lost_data;
634 };
635 
636 /* Superblock: */
637 
638 /*
639  * New versioning scheme:
640  * One common version number for all on disk data structures - superblock, btree
641  * nodes, journal entries
642  */
643 #define BCH_VERSION_MAJOR(_v)		((__u16) ((_v) >> 10))
644 #define BCH_VERSION_MINOR(_v)		((__u16) ((_v) & ~(~0U << 10)))
645 #define BCH_VERSION(_major, _minor)	(((_major) << 10)|(_minor) << 0)
646 
647 /*
648  * field 1:		version name
649  * field 2:		BCH_VERSION(major, minor)
650  * field 3:		recovery passess required on upgrade
651  */
652 #define BCH_METADATA_VERSIONS()						\
653 	x(bkey_renumber,		BCH_VERSION(0, 10))		\
654 	x(inode_btree_change,		BCH_VERSION(0, 11))		\
655 	x(snapshot,			BCH_VERSION(0, 12))		\
656 	x(inode_backpointers,		BCH_VERSION(0, 13))		\
657 	x(btree_ptr_sectors_written,	BCH_VERSION(0, 14))		\
658 	x(snapshot_2,			BCH_VERSION(0, 15))		\
659 	x(reflink_p_fix,		BCH_VERSION(0, 16))		\
660 	x(subvol_dirent,		BCH_VERSION(0, 17))		\
661 	x(inode_v2,			BCH_VERSION(0, 18))		\
662 	x(freespace,			BCH_VERSION(0, 19))		\
663 	x(alloc_v4,			BCH_VERSION(0, 20))		\
664 	x(new_data_types,		BCH_VERSION(0, 21))		\
665 	x(backpointers,			BCH_VERSION(0, 22))		\
666 	x(inode_v3,			BCH_VERSION(0, 23))		\
667 	x(unwritten_extents,		BCH_VERSION(0, 24))		\
668 	x(bucket_gens,			BCH_VERSION(0, 25))		\
669 	x(lru_v2,			BCH_VERSION(0, 26))		\
670 	x(fragmentation_lru,		BCH_VERSION(0, 27))		\
671 	x(no_bps_in_alloc_keys,		BCH_VERSION(0, 28))		\
672 	x(snapshot_trees,		BCH_VERSION(0, 29))		\
673 	x(major_minor,			BCH_VERSION(1,  0))		\
674 	x(snapshot_skiplists,		BCH_VERSION(1,  1))		\
675 	x(deleted_inodes,		BCH_VERSION(1,  2))		\
676 	x(rebalance_work,		BCH_VERSION(1,  3))		\
677 	x(member_seq,			BCH_VERSION(1,  4))		\
678 	x(subvolume_fs_parent,		BCH_VERSION(1,  5))		\
679 	x(btree_subvolume_children,	BCH_VERSION(1,  6))		\
680 	x(mi_btree_bitmap,		BCH_VERSION(1,  7))		\
681 	x(bucket_stripe_sectors,	BCH_VERSION(1,  8))		\
682 	x(disk_accounting_v2,		BCH_VERSION(1,  9))		\
683 	x(disk_accounting_v3,		BCH_VERSION(1, 10))		\
684 	x(disk_accounting_inum,		BCH_VERSION(1, 11))		\
685 	x(rebalance_work_acct_fix,	BCH_VERSION(1, 12))		\
686 	x(inode_has_child_snapshots,	BCH_VERSION(1, 13))		\
687 	x(backpointer_bucket_gen,	BCH_VERSION(1, 14))		\
688 	x(disk_accounting_big_endian,	BCH_VERSION(1, 15))		\
689 	x(reflink_p_may_update_opts,	BCH_VERSION(1, 16))		\
690 	x(inode_depth,			BCH_VERSION(1, 17))		\
691 	x(persistent_inode_cursors,	BCH_VERSION(1, 18))		\
692 	x(autofix_errors,		BCH_VERSION(1, 19))		\
693 	x(directory_size,		BCH_VERSION(1, 20))		\
694 	x(cached_backpointers,		BCH_VERSION(1, 21))		\
695 	x(stripe_backpointers,		BCH_VERSION(1, 22))		\
696 	x(stripe_lru,			BCH_VERSION(1, 23))		\
697 	x(casefolding,			BCH_VERSION(1, 24))		\
698 	x(extent_flags,			BCH_VERSION(1, 25))
699 
700 enum bcachefs_metadata_version {
701 	bcachefs_metadata_version_min = 9,
702 #define x(t, n)	bcachefs_metadata_version_##t = n,
703 	BCH_METADATA_VERSIONS()
704 #undef x
705 	bcachefs_metadata_version_max
706 };
707 
708 static const __maybe_unused
709 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
710 
711 #define bcachefs_metadata_version_current	(bcachefs_metadata_version_max - 1)
712 
713 #define BCH_SB_SECTOR			8
714 
715 #define BCH_SB_LAYOUT_SIZE_BITS_MAX	16 /* 32 MB */
716 
717 struct bch_sb_layout {
718 	__uuid_t		magic;	/* bcachefs superblock UUID */
719 	__u8			layout_type;
720 	__u8			sb_max_size_bits; /* base 2 of 512 byte sectors */
721 	__u8			nr_superblocks;
722 	__u8			pad[5];
723 	__le64			sb_offset[61];
724 } __packed __aligned(8);
725 
726 #define BCH_SB_LAYOUT_SECTOR	7
727 
728 /*
729  * @offset	- sector where this sb was written
730  * @version	- on disk format version
731  * @version_min	- Oldest metadata version this filesystem contains; so we can
732  *		  safely drop compatibility code and refuse to mount filesystems
733  *		  we'd need it for
734  * @magic	- identifies as a bcachefs superblock (BCHFS_MAGIC)
735  * @seq		- incremented each time superblock is written
736  * @uuid	- used for generating various magic numbers and identifying
737  *                member devices, never changes
738  * @user_uuid	- user visible UUID, may be changed
739  * @label	- filesystem label
740  * @seq		- identifies most recent superblock, incremented each time
741  *		  superblock is written
742  * @features	- enabled incompatible features
743  */
744 struct bch_sb {
745 	struct bch_csum		csum;
746 	__le16			version;
747 	__le16			version_min;
748 	__le16			pad[2];
749 	__uuid_t		magic;
750 	__uuid_t		uuid;
751 	__uuid_t		user_uuid;
752 	__u8			label[BCH_SB_LABEL_SIZE];
753 	__le64			offset;
754 	__le64			seq;
755 
756 	__le16			block_size;
757 	__u8			dev_idx;
758 	__u8			nr_devices;
759 	__le32			u64s;
760 
761 	__le64			time_base_lo;
762 	__le32			time_base_hi;
763 	__le32			time_precision;
764 
765 	__le64			flags[7];
766 	__le64			write_time;
767 	__le64			features[2];
768 	__le64			compat[2];
769 
770 	struct bch_sb_layout	layout;
771 
772 	struct bch_sb_field	start[0];
773 	__le64			_data[];
774 } __packed __aligned(8);
775 
776 /*
777  * Flags:
778  * BCH_SB_INITALIZED	- set on first mount
779  * BCH_SB_CLEAN		- did we shut down cleanly? Just a hint, doesn't affect
780  *			  behaviour of mount/recovery path:
781  * BCH_SB_INODE_32BIT	- limit inode numbers to 32 bits
782  * BCH_SB_128_BIT_MACS	- 128 bit macs instead of 80
783  * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
784  *			   DATA/META_CSUM_TYPE. Also indicates encryption
785  *			   algorithm in use, if/when we get more than one
786  */
787 
788 LE16_BITMASK(BCH_SB_BLOCK_SIZE,		struct bch_sb, block_size, 0, 16);
789 
790 LE64_BITMASK(BCH_SB_INITIALIZED,	struct bch_sb, flags[0],  0,  1);
791 LE64_BITMASK(BCH_SB_CLEAN,		struct bch_sb, flags[0],  1,  2);
792 LE64_BITMASK(BCH_SB_CSUM_TYPE,		struct bch_sb, flags[0],  2,  8);
793 LE64_BITMASK(BCH_SB_ERROR_ACTION,	struct bch_sb, flags[0],  8, 12);
794 
795 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,	struct bch_sb, flags[0], 12, 28);
796 
797 LE64_BITMASK(BCH_SB_GC_RESERVE,		struct bch_sb, flags[0], 28, 33);
798 LE64_BITMASK(BCH_SB_ROOT_RESERVE,	struct bch_sb, flags[0], 33, 40);
799 
800 LE64_BITMASK(BCH_SB_META_CSUM_TYPE,	struct bch_sb, flags[0], 40, 44);
801 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,	struct bch_sb, flags[0], 44, 48);
802 
803 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT,	struct bch_sb, flags[0], 48, 52);
804 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT,	struct bch_sb, flags[0], 52, 56);
805 
806 LE64_BITMASK(BCH_SB_POSIX_ACL,		struct bch_sb, flags[0], 56, 57);
807 LE64_BITMASK(BCH_SB_USRQUOTA,		struct bch_sb, flags[0], 57, 58);
808 LE64_BITMASK(BCH_SB_GRPQUOTA,		struct bch_sb, flags[0], 58, 59);
809 LE64_BITMASK(BCH_SB_PRJQUOTA,		struct bch_sb, flags[0], 59, 60);
810 
811 LE64_BITMASK(BCH_SB_HAS_ERRORS,		struct bch_sb, flags[0], 60, 61);
812 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
813 
814 LE64_BITMASK(BCH_SB_BIG_ENDIAN,		struct bch_sb, flags[0], 62, 63);
815 LE64_BITMASK(BCH_SB_PROMOTE_WHOLE_EXTENTS,
816 					struct bch_sb, flags[0], 63, 64);
817 
818 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,	struct bch_sb, flags[1],  0,  4);
819 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
820 LE64_BITMASK(BCH_SB_INODE_32BIT,	struct bch_sb, flags[1],  8,  9);
821 
822 LE64_BITMASK(BCH_SB_128_BIT_MACS,	struct bch_sb, flags[1],  9, 10);
823 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,	struct bch_sb, flags[1], 10, 14);
824 
825 /*
826  * Max size of an extent that may require bouncing to read or write
827  * (checksummed, compressed): 64k
828  */
829 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
830 					struct bch_sb, flags[1], 14, 20);
831 
832 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ,	struct bch_sb, flags[1], 20, 24);
833 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ,	struct bch_sb, flags[1], 24, 28);
834 
835 LE64_BITMASK(BCH_SB_PROMOTE_TARGET,	struct bch_sb, flags[1], 28, 40);
836 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET,	struct bch_sb, flags[1], 40, 52);
837 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET,	struct bch_sb, flags[1], 52, 64);
838 
839 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
840 					struct bch_sb, flags[2],  0,  4);
841 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,	struct bch_sb, flags[2],  4, 64);
842 
843 LE64_BITMASK(BCH_SB_ERASURE_CODE,	struct bch_sb, flags[3],  0, 16);
844 LE64_BITMASK(BCH_SB_METADATA_TARGET,	struct bch_sb, flags[3], 16, 28);
845 LE64_BITMASK(BCH_SB_SHARD_INUMS,	struct bch_sb, flags[3], 28, 29);
846 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
847 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
848 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
849 /* one free bit */
850 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
851 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
852 LE64_BITMASK(BCH_SB_NOCOW,		struct bch_sb, flags[4], 33, 34);
853 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE,	struct bch_sb, flags[4], 34, 54);
854 LE64_BITMASK(BCH_SB_VERSION_UPGRADE,	struct bch_sb, flags[4], 54, 56);
855 
856 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
857 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
858 					struct bch_sb, flags[4], 60, 64);
859 
860 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
861 					struct bch_sb, flags[5],  0, 16);
862 LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT,
863 					struct bch_sb, flags[5], 16, 32);
864 LE64_BITMASK(BCH_SB_VERSION_INCOMPAT,	struct bch_sb, flags[5], 32, 48);
865 LE64_BITMASK(BCH_SB_VERSION_INCOMPAT_ALLOWED,
866 					struct bch_sb, flags[5], 48, 64);
867 LE64_BITMASK(BCH_SB_SHARD_INUMS_NBITS,	struct bch_sb, flags[6],  0,  4);
868 LE64_BITMASK(BCH_SB_WRITE_ERROR_TIMEOUT,struct bch_sb, flags[6],  4, 14);
869 LE64_BITMASK(BCH_SB_CSUM_ERR_RETRY_NR,	struct bch_sb, flags[6], 14, 20);
870 LE64_BITMASK(BCH_SB_CASEFOLD,		struct bch_sb, flags[6], 22, 23);
871 
BCH_SB_COMPRESSION_TYPE(const struct bch_sb * sb)872 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
873 {
874 	return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
875 }
876 
SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb * sb,__u64 v)877 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
878 {
879 	SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
880 	SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
881 }
882 
BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb * sb)883 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
884 {
885 	return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
886 		(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
887 }
888 
SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb * sb,__u64 v)889 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
890 {
891 	SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
892 	SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
893 }
894 
895 /*
896  * Features:
897  *
898  * journal_seq_blacklist_v3:	gates BCH_SB_FIELD_journal_seq_blacklist
899  * reflink:			gates KEY_TYPE_reflink
900  * inline_data:			gates KEY_TYPE_inline_data
901  * new_siphash:			gates BCH_STR_HASH_siphash
902  * new_extent_overwrite:	gates BTREE_NODE_NEW_EXTENT_OVERWRITE
903  */
904 #define BCH_SB_FEATURES()			\
905 	x(lz4,				0)	\
906 	x(gzip,				1)	\
907 	x(zstd,				2)	\
908 	x(atomic_nlink,			3)	\
909 	x(ec,				4)	\
910 	x(journal_seq_blacklist_v3,	5)	\
911 	x(reflink,			6)	\
912 	x(new_siphash,			7)	\
913 	x(inline_data,			8)	\
914 	x(new_extent_overwrite,		9)	\
915 	x(incompressible,		10)	\
916 	x(btree_ptr_v2,			11)	\
917 	x(extents_above_btree_updates,	12)	\
918 	x(btree_updates_journalled,	13)	\
919 	x(reflink_inline_data,		14)	\
920 	x(new_varint,			15)	\
921 	x(journal_no_flush,		16)	\
922 	x(alloc_v2,			17)	\
923 	x(extents_across_btree_nodes,	18)	\
924 	x(incompat_version_field,	19)	\
925 	x(casefolding,			20)
926 
927 #define BCH_SB_FEATURES_ALWAYS				\
928 	(BIT_ULL(BCH_FEATURE_new_extent_overwrite)|	\
929 	 BIT_ULL(BCH_FEATURE_extents_above_btree_updates)|\
930 	 BIT_ULL(BCH_FEATURE_btree_updates_journalled)|\
931 	 BIT_ULL(BCH_FEATURE_alloc_v2)|\
932 	 BIT_ULL(BCH_FEATURE_extents_across_btree_nodes))
933 
934 #define BCH_SB_FEATURES_ALL				\
935 	(BCH_SB_FEATURES_ALWAYS|			\
936 	 BIT_ULL(BCH_FEATURE_new_siphash)|		\
937 	 BIT_ULL(BCH_FEATURE_btree_ptr_v2)|		\
938 	 BIT_ULL(BCH_FEATURE_new_varint)|		\
939 	 BIT_ULL(BCH_FEATURE_journal_no_flush)|		\
940 	 BIT_ULL(BCH_FEATURE_incompat_version_field))
941 
942 enum bch_sb_feature {
943 #define x(f, n) BCH_FEATURE_##f,
944 	BCH_SB_FEATURES()
945 #undef x
946 	BCH_FEATURE_NR,
947 };
948 
949 #define BCH_SB_COMPAT()					\
950 	x(alloc_info,				0)	\
951 	x(alloc_metadata,			1)	\
952 	x(extents_above_btree_updates_done,	2)	\
953 	x(bformat_overflow_done,		3)
954 
955 enum bch_sb_compat {
956 #define x(f, n) BCH_COMPAT_##f,
957 	BCH_SB_COMPAT()
958 #undef x
959 	BCH_COMPAT_NR,
960 };
961 
962 /* options: */
963 
964 #define BCH_VERSION_UPGRADE_OPTS()	\
965 	x(compatible,		0)	\
966 	x(incompatible,		1)	\
967 	x(none,			2)
968 
969 enum bch_version_upgrade_opts {
970 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
971 	BCH_VERSION_UPGRADE_OPTS()
972 #undef x
973 };
974 
975 #define BCH_REPLICAS_MAX		4U
976 
977 #define BCH_BKEY_PTRS_MAX		16U
978 
979 #define BCH_ERROR_ACTIONS()		\
980 	x(continue,		0)	\
981 	x(fix_safe,		1)	\
982 	x(panic,		2)	\
983 	x(ro,			3)
984 
985 enum bch_error_actions {
986 #define x(t, n) BCH_ON_ERROR_##t = n,
987 	BCH_ERROR_ACTIONS()
988 #undef x
989 	BCH_ON_ERROR_NR
990 };
991 
992 #define BCH_STR_HASH_TYPES()		\
993 	x(crc32c,		0)	\
994 	x(crc64,		1)	\
995 	x(siphash_old,		2)	\
996 	x(siphash,		3)
997 
998 enum bch_str_hash_type {
999 #define x(t, n) BCH_STR_HASH_##t = n,
1000 	BCH_STR_HASH_TYPES()
1001 #undef x
1002 	BCH_STR_HASH_NR
1003 };
1004 
1005 #define BCH_STR_HASH_OPTS()		\
1006 	x(crc32c,		0)	\
1007 	x(crc64,		1)	\
1008 	x(siphash,		2)
1009 
1010 enum bch_str_hash_opts {
1011 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1012 	BCH_STR_HASH_OPTS()
1013 #undef x
1014 	BCH_STR_HASH_OPT_NR
1015 };
1016 
1017 #define BCH_CSUM_TYPES()			\
1018 	x(none,				0)	\
1019 	x(crc32c_nonzero,		1)	\
1020 	x(crc64_nonzero,		2)	\
1021 	x(chacha20_poly1305_80,		3)	\
1022 	x(chacha20_poly1305_128,	4)	\
1023 	x(crc32c,			5)	\
1024 	x(crc64,			6)	\
1025 	x(xxhash,			7)
1026 
1027 enum bch_csum_type {
1028 #define x(t, n) BCH_CSUM_##t = n,
1029 	BCH_CSUM_TYPES()
1030 #undef x
1031 	BCH_CSUM_NR
1032 };
1033 
1034 static const __maybe_unused unsigned bch_crc_bytes[] = {
1035 	[BCH_CSUM_none]				= 0,
1036 	[BCH_CSUM_crc32c_nonzero]		= 4,
1037 	[BCH_CSUM_crc32c]			= 4,
1038 	[BCH_CSUM_crc64_nonzero]		= 8,
1039 	[BCH_CSUM_crc64]			= 8,
1040 	[BCH_CSUM_xxhash]			= 8,
1041 	[BCH_CSUM_chacha20_poly1305_80]		= 10,
1042 	[BCH_CSUM_chacha20_poly1305_128]	= 16,
1043 };
1044 
bch2_csum_type_is_encryption(enum bch_csum_type type)1045 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1046 {
1047 	switch (type) {
1048 	case BCH_CSUM_chacha20_poly1305_80:
1049 	case BCH_CSUM_chacha20_poly1305_128:
1050 		return true;
1051 	default:
1052 		return false;
1053 	}
1054 }
1055 
1056 #define BCH_CSUM_OPTS()			\
1057 	x(none,			0)	\
1058 	x(crc32c,		1)	\
1059 	x(crc64,		2)	\
1060 	x(xxhash,		3)
1061 
1062 enum bch_csum_opt {
1063 #define x(t, n) BCH_CSUM_OPT_##t = n,
1064 	BCH_CSUM_OPTS()
1065 #undef x
1066 	BCH_CSUM_OPT_NR
1067 };
1068 
1069 #define BCH_COMPRESSION_TYPES()		\
1070 	x(none,			0)	\
1071 	x(lz4_old,		1)	\
1072 	x(gzip,			2)	\
1073 	x(lz4,			3)	\
1074 	x(zstd,			4)	\
1075 	x(incompressible,	5)
1076 
1077 enum bch_compression_type {
1078 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1079 	BCH_COMPRESSION_TYPES()
1080 #undef x
1081 	BCH_COMPRESSION_TYPE_NR
1082 };
1083 
1084 #define BCH_COMPRESSION_OPTS()		\
1085 	x(none,		0)		\
1086 	x(lz4,		1)		\
1087 	x(gzip,		2)		\
1088 	x(zstd,		3)
1089 
1090 enum bch_compression_opts {
1091 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1092 	BCH_COMPRESSION_OPTS()
1093 #undef x
1094 	BCH_COMPRESSION_OPT_NR
1095 };
1096 
1097 /*
1098  * Magic numbers
1099  *
1100  * The various other data structures have their own magic numbers, which are
1101  * xored with the first part of the cache set's UUID
1102  */
1103 
1104 #define BCACHE_MAGIC							\
1105 	UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,				\
1106 		  0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1107 #define BCHFS_MAGIC							\
1108 	UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,				\
1109 		  0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
1110 
1111 #define BCACHEFS_STATFS_MAGIC		BCACHEFS_SUPER_MAGIC
1112 
1113 #define JSET_MAGIC		__cpu_to_le64(0x245235c1a3625032ULL)
1114 #define BSET_MAGIC		__cpu_to_le64(0x90135c78b99e07f5ULL)
1115 
__bch2_sb_magic(struct bch_sb * sb)1116 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1117 {
1118 	__le64 ret;
1119 
1120 	memcpy(&ret, &sb->uuid, sizeof(ret));
1121 	return ret;
1122 }
1123 
__jset_magic(struct bch_sb * sb)1124 static inline __u64 __jset_magic(struct bch_sb *sb)
1125 {
1126 	return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1127 }
1128 
__bset_magic(struct bch_sb * sb)1129 static inline __u64 __bset_magic(struct bch_sb *sb)
1130 {
1131 	return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1132 }
1133 
1134 /* Journal */
1135 
1136 #define JSET_KEYS_U64s	(sizeof(struct jset_entry) / sizeof(__u64))
1137 
1138 #define BCH_JSET_ENTRY_TYPES()			\
1139 	x(btree_keys,		0)		\
1140 	x(btree_root,		1)		\
1141 	x(prio_ptrs,		2)		\
1142 	x(blacklist,		3)		\
1143 	x(blacklist_v2,		4)		\
1144 	x(usage,		5)		\
1145 	x(data_usage,		6)		\
1146 	x(clock,		7)		\
1147 	x(dev_usage,		8)		\
1148 	x(log,			9)		\
1149 	x(overwrite,		10)		\
1150 	x(write_buffer_keys,	11)		\
1151 	x(datetime,		12)		\
1152 	x(log_bkey,		13)
1153 
1154 enum bch_jset_entry_type {
1155 #define x(f, nr)	BCH_JSET_ENTRY_##f	= nr,
1156 	BCH_JSET_ENTRY_TYPES()
1157 #undef x
1158 	BCH_JSET_ENTRY_NR
1159 };
1160 
jset_entry_is_key(struct jset_entry * e)1161 static inline bool jset_entry_is_key(struct jset_entry *e)
1162 {
1163 	switch (e->type) {
1164 	case BCH_JSET_ENTRY_btree_keys:
1165 	case BCH_JSET_ENTRY_btree_root:
1166 	case BCH_JSET_ENTRY_write_buffer_keys:
1167 		return true;
1168 	}
1169 
1170 	return false;
1171 }
1172 
1173 /*
1174  * Journal sequence numbers can be blacklisted: bsets record the max sequence
1175  * number of all the journal entries they contain updates for, so that on
1176  * recovery we can ignore those bsets that contain index updates newer that what
1177  * made it into the journal.
1178  *
1179  * This means that we can't reuse that journal_seq - we have to skip it, and
1180  * then record that we skipped it so that the next time we crash and recover we
1181  * don't think there was a missing journal entry.
1182  */
1183 struct jset_entry_blacklist {
1184 	struct jset_entry	entry;
1185 	__le64			seq;
1186 };
1187 
1188 struct jset_entry_blacklist_v2 {
1189 	struct jset_entry	entry;
1190 	__le64			start;
1191 	__le64			end;
1192 };
1193 
1194 #define BCH_FS_USAGE_TYPES()			\
1195 	x(reserved,		0)		\
1196 	x(inodes,		1)		\
1197 	x(key_version,		2)
1198 
1199 enum bch_fs_usage_type {
1200 #define x(f, nr)	BCH_FS_USAGE_##f	= nr,
1201 	BCH_FS_USAGE_TYPES()
1202 #undef x
1203 	BCH_FS_USAGE_NR
1204 };
1205 
1206 struct jset_entry_usage {
1207 	struct jset_entry	entry;
1208 	__le64			v;
1209 } __packed;
1210 
1211 struct jset_entry_data_usage {
1212 	struct jset_entry	entry;
1213 	__le64			v;
1214 	struct bch_replicas_entry_v1 r;
1215 } __packed;
1216 
1217 struct jset_entry_clock {
1218 	struct jset_entry	entry;
1219 	__u8			rw;
1220 	__u8			pad[7];
1221 	__le64			time;
1222 } __packed;
1223 
1224 struct jset_entry_dev_usage_type {
1225 	__le64			buckets;
1226 	__le64			sectors;
1227 	__le64			fragmented;
1228 } __packed;
1229 
1230 struct jset_entry_dev_usage {
1231 	struct jset_entry	entry;
1232 	__le32			dev;
1233 	__u32			pad;
1234 
1235 	__le64			_buckets_ec;		/* No longer used */
1236 	__le64			_buckets_unavailable;	/* No longer used */
1237 
1238 	struct jset_entry_dev_usage_type d[];
1239 };
1240 
jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage * u)1241 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
1242 {
1243 	return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
1244 		sizeof(struct jset_entry_dev_usage_type);
1245 }
1246 
1247 struct jset_entry_log {
1248 	struct jset_entry	entry;
1249 	u8			d[];
1250 } __packed __aligned(8);
1251 
jset_entry_log_msg_bytes(struct jset_entry_log * l)1252 static inline unsigned jset_entry_log_msg_bytes(struct jset_entry_log *l)
1253 {
1254 	unsigned b = vstruct_bytes(&l->entry) - offsetof(struct jset_entry_log, d);
1255 
1256 	while (b && !l->d[b - 1])
1257 		--b;
1258 	return b;
1259 }
1260 
1261 struct jset_entry_datetime {
1262 	struct jset_entry	entry;
1263 	__le64			seconds;
1264 } __packed __aligned(8);
1265 
1266 /*
1267  * On disk format for a journal entry:
1268  * seq is monotonically increasing; every journal entry has its own unique
1269  * sequence number.
1270  *
1271  * last_seq is the oldest journal entry that still has keys the btree hasn't
1272  * flushed to disk yet.
1273  *
1274  * version is for on disk format changes.
1275  */
1276 struct jset {
1277 	struct bch_csum		csum;
1278 
1279 	__le64			magic;
1280 	__le64			seq;
1281 	__le32			version;
1282 	__le32			flags;
1283 
1284 	__le32			u64s; /* size of d[] in u64s */
1285 
1286 	__u8			encrypted_start[0];
1287 
1288 	__le16			_read_clock; /* no longer used */
1289 	__le16			_write_clock;
1290 
1291 	/* Sequence number of oldest dirty journal entry */
1292 	__le64			last_seq;
1293 
1294 
1295 	struct jset_entry	start[0];
1296 	__u64			_data[];
1297 } __packed __aligned(8);
1298 
1299 LE32_BITMASK(JSET_CSUM_TYPE,	struct jset, flags, 0, 4);
1300 LE32_BITMASK(JSET_BIG_ENDIAN,	struct jset, flags, 4, 5);
1301 LE32_BITMASK(JSET_NO_FLUSH,	struct jset, flags, 5, 6);
1302 
1303 #define BCH_JOURNAL_BUCKETS_MIN		8
1304 
1305 /* Btree: */
1306 
1307 enum btree_id_flags {
1308 	BTREE_IS_extents	= BIT(0),
1309 	BTREE_IS_snapshots	= BIT(1),
1310 	BTREE_IS_snapshot_field	= BIT(2),
1311 	BTREE_IS_data		= BIT(3),
1312 	BTREE_IS_write_buffer	= BIT(4),
1313 };
1314 
1315 #define BCH_BTREE_IDS()								\
1316 	x(extents,		0,						\
1317 	  BTREE_IS_extents|							\
1318 	  BTREE_IS_snapshots|							\
1319 	  BTREE_IS_data,							\
1320 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1321 	  BIT_ULL(KEY_TYPE_error)|						\
1322 	  BIT_ULL(KEY_TYPE_cookie)|						\
1323 	  BIT_ULL(KEY_TYPE_extent)|						\
1324 	  BIT_ULL(KEY_TYPE_reservation)|					\
1325 	  BIT_ULL(KEY_TYPE_reflink_p)|						\
1326 	  BIT_ULL(KEY_TYPE_inline_data))					\
1327 	x(inodes,		1,						\
1328 	  BTREE_IS_snapshots,							\
1329 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1330 	  BIT_ULL(KEY_TYPE_inode)|						\
1331 	  BIT_ULL(KEY_TYPE_inode_v2)|						\
1332 	  BIT_ULL(KEY_TYPE_inode_v3)|						\
1333 	  BIT_ULL(KEY_TYPE_inode_generation))					\
1334 	x(dirents,		2,						\
1335 	  BTREE_IS_snapshots,							\
1336 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1337 	  BIT_ULL(KEY_TYPE_hash_whiteout)|					\
1338 	  BIT_ULL(KEY_TYPE_dirent))						\
1339 	x(xattrs,		3,						\
1340 	  BTREE_IS_snapshots,							\
1341 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1342 	  BIT_ULL(KEY_TYPE_cookie)|						\
1343 	  BIT_ULL(KEY_TYPE_hash_whiteout)|					\
1344 	  BIT_ULL(KEY_TYPE_xattr))						\
1345 	x(alloc,		4,	0,					\
1346 	  BIT_ULL(KEY_TYPE_alloc)|						\
1347 	  BIT_ULL(KEY_TYPE_alloc_v2)|						\
1348 	  BIT_ULL(KEY_TYPE_alloc_v3)|						\
1349 	  BIT_ULL(KEY_TYPE_alloc_v4))						\
1350 	x(quotas,		5,	0,					\
1351 	  BIT_ULL(KEY_TYPE_quota))						\
1352 	x(stripes,		6,	0,					\
1353 	  BIT_ULL(KEY_TYPE_stripe))						\
1354 	x(reflink,		7,						\
1355 	  BTREE_IS_extents|							\
1356 	  BTREE_IS_data,							\
1357 	  BIT_ULL(KEY_TYPE_reflink_v)|						\
1358 	  BIT_ULL(KEY_TYPE_indirect_inline_data)|				\
1359 	  BIT_ULL(KEY_TYPE_error))						\
1360 	x(subvolumes,		8,	0,					\
1361 	  BIT_ULL(KEY_TYPE_subvolume))						\
1362 	x(snapshots,		9,	0,					\
1363 	  BIT_ULL(KEY_TYPE_snapshot))						\
1364 	x(lru,			10,						\
1365 	  BTREE_IS_write_buffer,						\
1366 	  BIT_ULL(KEY_TYPE_set))						\
1367 	x(freespace,		11,						\
1368 	  BTREE_IS_extents,							\
1369 	  BIT_ULL(KEY_TYPE_set))						\
1370 	x(need_discard,		12,	0,					\
1371 	  BIT_ULL(KEY_TYPE_set))						\
1372 	x(backpointers,		13,						\
1373 	  BTREE_IS_write_buffer,						\
1374 	  BIT_ULL(KEY_TYPE_backpointer))					\
1375 	x(bucket_gens,		14,	0,					\
1376 	  BIT_ULL(KEY_TYPE_bucket_gens))					\
1377 	x(snapshot_trees,	15,	0,					\
1378 	  BIT_ULL(KEY_TYPE_snapshot_tree))					\
1379 	x(deleted_inodes,	16,						\
1380 	  BTREE_IS_snapshot_field|						\
1381 	  BTREE_IS_write_buffer,						\
1382 	  BIT_ULL(KEY_TYPE_set))						\
1383 	x(logged_ops,		17,	0,					\
1384 	  BIT_ULL(KEY_TYPE_logged_op_truncate)|					\
1385 	  BIT_ULL(KEY_TYPE_logged_op_finsert)|					\
1386 	  BIT_ULL(KEY_TYPE_inode_alloc_cursor))					\
1387 	x(rebalance_work,	18,						\
1388 	  BTREE_IS_snapshot_field|						\
1389 	  BTREE_IS_write_buffer,						\
1390 	  BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))			\
1391 	x(subvolume_children,	19,	0,					\
1392 	  BIT_ULL(KEY_TYPE_set))						\
1393 	x(accounting,		20,						\
1394 	  BTREE_IS_snapshot_field|						\
1395 	  BTREE_IS_write_buffer,						\
1396 	  BIT_ULL(KEY_TYPE_accounting))						\
1397 
1398 enum btree_id {
1399 #define x(name, nr, ...) BTREE_ID_##name = nr,
1400 	BCH_BTREE_IDS()
1401 #undef x
1402 	BTREE_ID_NR
1403 };
1404 
1405 /*
1406  * Maximum number of btrees that we will _ever_ have under the current scheme,
1407  * where we refer to them with 64 bit bitfields - and we also need a bit for
1408  * the interior btree node type:
1409  */
1410 #define BTREE_ID_NR_MAX		63
1411 
btree_id_is_alloc(enum btree_id id)1412 static inline bool btree_id_is_alloc(enum btree_id id)
1413 {
1414 	switch (id) {
1415 	case BTREE_ID_alloc:
1416 	case BTREE_ID_backpointers:
1417 	case BTREE_ID_need_discard:
1418 	case BTREE_ID_freespace:
1419 	case BTREE_ID_bucket_gens:
1420 	case BTREE_ID_lru:
1421 	case BTREE_ID_accounting:
1422 		return true;
1423 	default:
1424 		return false;
1425 	}
1426 }
1427 
1428 #define BTREE_MAX_DEPTH		4U
1429 
1430 /* Btree nodes */
1431 
1432 /*
1433  * Btree nodes
1434  *
1435  * On disk a btree node is a list/log of these; within each set the keys are
1436  * sorted
1437  */
1438 struct bset {
1439 	__le64			seq;
1440 
1441 	/*
1442 	 * Highest journal entry this bset contains keys for.
1443 	 * If on recovery we don't see that journal entry, this bset is ignored:
1444 	 * this allows us to preserve the order of all index updates after a
1445 	 * crash, since the journal records a total order of all index updates
1446 	 * and anything that didn't make it to the journal doesn't get used.
1447 	 */
1448 	__le64			journal_seq;
1449 
1450 	__le32			flags;
1451 	__le16			version;
1452 	__le16			u64s; /* count of d[] in u64s */
1453 
1454 	struct bkey_packed	start[0];
1455 	__u64			_data[];
1456 } __packed __aligned(8);
1457 
1458 LE32_BITMASK(BSET_CSUM_TYPE,	struct bset, flags, 0, 4);
1459 
1460 LE32_BITMASK(BSET_BIG_ENDIAN,	struct bset, flags, 4, 5);
1461 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1462 				struct bset, flags, 5, 6);
1463 
1464 /* Sector offset within the btree node: */
1465 LE32_BITMASK(BSET_OFFSET,	struct bset, flags, 16, 32);
1466 
1467 struct btree_node {
1468 	struct bch_csum		csum;
1469 	__le64			magic;
1470 
1471 	/* this flags field is encrypted, unlike bset->flags: */
1472 	__le64			flags;
1473 
1474 	/* Closed interval: */
1475 	struct bpos		min_key;
1476 	struct bpos		max_key;
1477 	struct bch_extent_ptr	_ptr; /* not used anymore */
1478 	struct bkey_format	format;
1479 
1480 	union {
1481 	struct bset		keys;
1482 	struct {
1483 		__u8		pad[22];
1484 		__le16		u64s;
1485 		__u64		_data[0];
1486 
1487 	};
1488 	};
1489 } __packed __aligned(8);
1490 
1491 LE64_BITMASK(BTREE_NODE_ID_LO,	struct btree_node, flags,  0,  4);
1492 LE64_BITMASK(BTREE_NODE_LEVEL,	struct btree_node, flags,  4,  8);
1493 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1494 				struct btree_node, flags,  8,  9);
1495 LE64_BITMASK(BTREE_NODE_ID_HI,	struct btree_node, flags,  9, 25);
1496 /* 25-32 unused */
1497 LE64_BITMASK(BTREE_NODE_SEQ,	struct btree_node, flags, 32, 64);
1498 
BTREE_NODE_ID(struct btree_node * n)1499 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
1500 {
1501 	return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
1502 }
1503 
SET_BTREE_NODE_ID(struct btree_node * n,__u64 v)1504 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
1505 {
1506 	SET_BTREE_NODE_ID_LO(n, v);
1507 	SET_BTREE_NODE_ID_HI(n, v >> 4);
1508 }
1509 
1510 struct btree_node_entry {
1511 	struct bch_csum		csum;
1512 
1513 	union {
1514 	struct bset		keys;
1515 	struct {
1516 		__u8		pad[22];
1517 		__le16		u64s;
1518 		__u64		_data[0];
1519 	};
1520 	};
1521 } __packed __aligned(8);
1522 
1523 #endif /* _BCACHEFS_FORMAT_H */
1524