1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_CHECKSUM_H
3 #define _BCACHEFS_CHECKSUM_H
4
5 #include "bcachefs.h"
6 #include "extents_types.h"
7 #include "super-io.h"
8
9 #include <linux/crc64.h>
10 #include <crypto/chacha.h>
11
bch2_checksum_mergeable(unsigned type)12 static inline bool bch2_checksum_mergeable(unsigned type)
13 {
14
15 switch (type) {
16 case BCH_CSUM_none:
17 case BCH_CSUM_crc32c:
18 case BCH_CSUM_crc64:
19 return true;
20 default:
21 return false;
22 }
23 }
24
25 struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum,
26 struct bch_csum, size_t);
27
28 #define BCH_NONCE_EXTENT cpu_to_le32(1 << 28)
29 #define BCH_NONCE_BTREE cpu_to_le32(2 << 28)
30 #define BCH_NONCE_JOURNAL cpu_to_le32(3 << 28)
31 #define BCH_NONCE_PRIO cpu_to_le32(4 << 28)
32 #define BCH_NONCE_POLY cpu_to_le32(1 << 31)
33
34 struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
35 const void *, size_t);
36
37 /*
38 * This is used for various on disk data structures - bch_sb, prio_set, bset,
39 * jset: The checksum is _always_ the first field of these structs
40 */
41 #define csum_vstruct(_c, _type, _nonce, _i) \
42 ({ \
43 const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\
44 \
45 bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\
46 })
47
bch2_csum_to_text(struct printbuf * out,enum bch_csum_type type,struct bch_csum csum)48 static inline void bch2_csum_to_text(struct printbuf *out,
49 enum bch_csum_type type,
50 struct bch_csum csum)
51 {
52 const u8 *p = (u8 *) &csum;
53 unsigned bytes = type < BCH_CSUM_NR ? bch_crc_bytes[type] : 16;
54
55 for (unsigned i = 0; i < bytes; i++)
56 prt_hex_byte(out, p[i]);
57 }
58
bch2_csum_err_msg(struct printbuf * out,enum bch_csum_type type,struct bch_csum expected,struct bch_csum got)59 static inline void bch2_csum_err_msg(struct printbuf *out,
60 enum bch_csum_type type,
61 struct bch_csum expected,
62 struct bch_csum got)
63 {
64 prt_str(out, "checksum error, type ");
65 bch2_prt_csum_type(out, type);
66 prt_str(out, ": got ");
67 bch2_csum_to_text(out, type, got);
68 prt_str(out, " should be ");
69 bch2_csum_to_text(out, type, expected);
70 }
71
72 int bch2_request_key(struct bch_sb *, struct bch_key *);
73 #ifndef __KERNEL__
74 int bch2_revoke_key(struct bch_sb *);
75 #endif
76
77 int bch2_encrypt(struct bch_fs *, unsigned, struct nonce,
78 void *data, size_t);
79
80 struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned,
81 struct nonce, struct bio *);
82
83 int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion,
84 struct bch_extent_crc_unpacked,
85 struct bch_extent_crc_unpacked *,
86 struct bch_extent_crc_unpacked *,
87 unsigned, unsigned, unsigned);
88
89 int __bch2_encrypt_bio(struct bch_fs *, unsigned,
90 struct nonce, struct bio *);
91
bch2_encrypt_bio(struct bch_fs * c,unsigned type,struct nonce nonce,struct bio * bio)92 static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
93 struct nonce nonce, struct bio *bio)
94 {
95 return bch2_csum_type_is_encryption(type)
96 ? __bch2_encrypt_bio(c, type, nonce, bio)
97 : 0;
98 }
99
100 extern const struct bch_sb_field_ops bch_sb_field_ops_crypt;
101
102 int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
103 struct bch_key *);
104
105 #if 0
106 int bch2_disable_encryption(struct bch_fs *);
107 int bch2_enable_encryption(struct bch_fs *, bool);
108 #endif
109
110 void bch2_fs_encryption_exit(struct bch_fs *);
111 int bch2_fs_encryption_init(struct bch_fs *);
112
bch2_csum_opt_to_type(enum bch_csum_opt type,bool data)113 static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opt type,
114 bool data)
115 {
116 switch (type) {
117 case BCH_CSUM_OPT_none:
118 return BCH_CSUM_none;
119 case BCH_CSUM_OPT_crc32c:
120 return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
121 case BCH_CSUM_OPT_crc64:
122 return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
123 case BCH_CSUM_OPT_xxhash:
124 return BCH_CSUM_xxhash;
125 default:
126 BUG();
127 }
128 }
129
bch2_data_checksum_type(struct bch_fs * c,struct bch_io_opts opts)130 static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
131 struct bch_io_opts opts)
132 {
133 if (opts.nocow)
134 return 0;
135
136 if (c->sb.encryption_type)
137 return c->opts.wide_macs
138 ? BCH_CSUM_chacha20_poly1305_128
139 : BCH_CSUM_chacha20_poly1305_80;
140
141 return bch2_csum_opt_to_type(opts.data_checksum, true);
142 }
143
bch2_meta_checksum_type(struct bch_fs * c)144 static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
145 {
146 if (c->sb.encryption_type)
147 return BCH_CSUM_chacha20_poly1305_128;
148
149 return bch2_csum_opt_to_type(c->opts.metadata_checksum, false);
150 }
151
bch2_checksum_type_valid(const struct bch_fs * c,unsigned type)152 static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
153 unsigned type)
154 {
155 if (type >= BCH_CSUM_NR)
156 return false;
157
158 if (bch2_csum_type_is_encryption(type) && !c->chacha20_key_set)
159 return false;
160
161 return true;
162 }
163
164 /* returns true if not equal */
bch2_crc_cmp(struct bch_csum l,struct bch_csum r)165 static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
166 {
167 /*
168 * XXX: need some way of preventing the compiler from optimizing this
169 * into a form that isn't constant time..
170 */
171 return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0;
172 }
173
174 /* for skipping ahead and encrypting/decrypting at an offset: */
nonce_add(struct nonce nonce,unsigned offset)175 static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
176 {
177 EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
178
179 le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
180 return nonce;
181 }
182
null_nonce(void)183 static inline struct nonce null_nonce(void)
184 {
185 struct nonce ret;
186
187 memset(&ret, 0, sizeof(ret));
188 return ret;
189 }
190
extent_nonce(struct bversion version,struct bch_extent_crc_unpacked crc)191 static inline struct nonce extent_nonce(struct bversion version,
192 struct bch_extent_crc_unpacked crc)
193 {
194 unsigned compression_type = crc_is_compressed(crc)
195 ? crc.compression_type
196 : 0;
197 unsigned size = compression_type ? crc.uncompressed_size : 0;
198 struct nonce nonce = (struct nonce) {{
199 [0] = cpu_to_le32(size << 22),
200 [1] = cpu_to_le32(version.lo),
201 [2] = cpu_to_le32(version.lo >> 32),
202 [3] = cpu_to_le32(version.hi|
203 (compression_type << 24))^BCH_NONCE_EXTENT,
204 }};
205
206 return nonce_add(nonce, crc.nonce << 9);
207 }
208
bch2_key_is_encrypted(struct bch_encrypted_key * key)209 static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key)
210 {
211 return le64_to_cpu(key->magic) != BCH_KEY_MAGIC;
212 }
213
__bch2_sb_key_nonce(struct bch_sb * sb)214 static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb)
215 {
216 __le64 magic = __bch2_sb_magic(sb);
217
218 return (struct nonce) {{
219 [0] = 0,
220 [1] = 0,
221 [2] = ((__le32 *) &magic)[0],
222 [3] = ((__le32 *) &magic)[1],
223 }};
224 }
225
bch2_sb_key_nonce(struct bch_fs * c)226 static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c)
227 {
228 __le64 magic = bch2_sb_magic(c);
229
230 return (struct nonce) {{
231 [0] = 0,
232 [1] = 0,
233 [2] = ((__le32 *) &magic)[0],
234 [3] = ((__le32 *) &magic)[1],
235 }};
236 }
237
238 #endif /* _BCACHEFS_CHECKSUM_H */
239