1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/node.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 /* start node id of a node block dedicated to the given node id */
9 #define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
10
11 /* node block offset on the NAT area dedicated to the given start node id */
12 #define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK)
13
14 /* # of pages to perform synchronous readahead before building free nids */
15 #define FREE_NID_PAGES 8
16 #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
17
18 /* size of free nid batch when shrinking */
19 #define SHRINK_NID_BATCH_SIZE 8
20
21 #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
22
23 /* maximum readahead size for node during getting data blocks */
24 #define MAX_RA_NODE 128
25
26 /* control the memory footprint threshold (10MB per 1GB ram) */
27 #define DEF_RAM_THRESHOLD 1
28
29 /* control dirty nats ratio threshold (default: 10% over max nid count) */
30 #define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
31 /* control total # of nats */
32 #define DEF_NAT_CACHE_THRESHOLD 100000
33
34 /* control total # of node writes used for roll-forward recovery */
35 #define DEF_RF_NODE_BLOCKS 0
36
37 /* vector size for gang look-up from nat cache that consists of radix tree */
38 #define NAT_VEC_SIZE 32
39
40 /* return value for read_node_page */
41 #define LOCKED_PAGE 1
42
43 /* check pinned file's alignment status of physical blocks */
44 #define FILE_NOT_ALIGNED 1
45
46 /* For flag in struct node_info */
47 enum {
48 IS_CHECKPOINTED, /* is it checkpointed before? */
49 HAS_FSYNCED_INODE, /* is the inode fsynced before? */
50 HAS_LAST_FSYNC, /* has the latest node fsync mark? */
51 IS_DIRTY, /* this nat entry is dirty? */
52 IS_PREALLOC, /* nat entry is preallocated */
53 };
54
55 /* For node type in __get_node_folio() */
56 enum node_type {
57 NODE_TYPE_REGULAR,
58 NODE_TYPE_INODE,
59 NODE_TYPE_XATTR,
60 };
61
62 /*
63 * For node information
64 */
65 struct node_info {
66 nid_t nid; /* node id */
67 nid_t ino; /* inode number of the node's owner */
68 block_t blk_addr; /* block address of the node */
69 unsigned char version; /* version of the node */
70 unsigned char flag; /* for node information bits */
71 };
72
73 struct nat_entry {
74 struct list_head list; /* for clean or dirty nat list */
75 struct node_info ni; /* in-memory node information */
76 };
77
78 #define nat_get_nid(nat) ((nat)->ni.nid)
79 #define nat_set_nid(nat, n) ((nat)->ni.nid = (n))
80 #define nat_get_blkaddr(nat) ((nat)->ni.blk_addr)
81 #define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b))
82 #define nat_get_ino(nat) ((nat)->ni.ino)
83 #define nat_set_ino(nat, i) ((nat)->ni.ino = (i))
84 #define nat_get_version(nat) ((nat)->ni.version)
85 #define nat_set_version(nat, v) ((nat)->ni.version = (v))
86
87 #define inc_node_version(version) (++(version))
88
copy_node_info(struct node_info * dst,struct node_info * src)89 static inline void copy_node_info(struct node_info *dst,
90 struct node_info *src)
91 {
92 dst->nid = src->nid;
93 dst->ino = src->ino;
94 dst->blk_addr = src->blk_addr;
95 dst->version = src->version;
96 /* should not copy flag here */
97 }
98
set_nat_flag(struct nat_entry * ne,unsigned int type,bool set)99 static inline void set_nat_flag(struct nat_entry *ne,
100 unsigned int type, bool set)
101 {
102 if (set)
103 ne->ni.flag |= BIT(type);
104 else
105 ne->ni.flag &= ~BIT(type);
106 }
107
get_nat_flag(struct nat_entry * ne,unsigned int type)108 static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
109 {
110 return ne->ni.flag & BIT(type);
111 }
112
nat_reset_flag(struct nat_entry * ne)113 static inline void nat_reset_flag(struct nat_entry *ne)
114 {
115 /* these states can be set only after checkpoint was done */
116 set_nat_flag(ne, IS_CHECKPOINTED, true);
117 set_nat_flag(ne, HAS_FSYNCED_INODE, false);
118 set_nat_flag(ne, HAS_LAST_FSYNC, true);
119 }
120
node_info_from_raw_nat(struct node_info * ni,struct f2fs_nat_entry * raw_ne)121 static inline void node_info_from_raw_nat(struct node_info *ni,
122 struct f2fs_nat_entry *raw_ne)
123 {
124 ni->ino = le32_to_cpu(raw_ne->ino);
125 ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
126 ni->version = raw_ne->version;
127 }
128
raw_nat_from_node_info(struct f2fs_nat_entry * raw_ne,struct node_info * ni)129 static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
130 struct node_info *ni)
131 {
132 raw_ne->ino = cpu_to_le32(ni->ino);
133 raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
134 raw_ne->version = ni->version;
135 }
136
excess_dirty_nats(struct f2fs_sb_info * sbi)137 static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
138 {
139 return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
140 NM_I(sbi)->dirty_nats_ratio / 100;
141 }
142
excess_cached_nats(struct f2fs_sb_info * sbi)143 static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
144 {
145 return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
146 }
147
148 enum mem_type {
149 FREE_NIDS, /* indicates the free nid list */
150 NAT_ENTRIES, /* indicates the cached nat entry */
151 DIRTY_DENTS, /* indicates dirty dentry pages */
152 INO_ENTRIES, /* indicates inode entries */
153 READ_EXTENT_CACHE, /* indicates read extent cache */
154 AGE_EXTENT_CACHE, /* indicates age extent cache */
155 DISCARD_CACHE, /* indicates memory of cached discard cmds */
156 COMPRESS_PAGE, /* indicates memory of cached compressed pages */
157 BASE_CHECK, /* check kernel status */
158 };
159
160 struct nat_entry_set {
161 struct list_head set_list; /* link with other nat sets */
162 struct list_head entry_list; /* link with dirty nat entries */
163 nid_t set; /* set number*/
164 unsigned int entry_cnt; /* the # of nat entries in set */
165 };
166
167 struct free_nid {
168 struct list_head list; /* for free node id list */
169 nid_t nid; /* node id */
170 int state; /* in use or not: FREE_NID or PREALLOC_NID */
171 };
172
next_free_nid(struct f2fs_sb_info * sbi,nid_t * nid)173 static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
174 {
175 struct f2fs_nm_info *nm_i = NM_I(sbi);
176 struct free_nid *fnid;
177
178 spin_lock(&nm_i->nid_list_lock);
179 if (nm_i->nid_cnt[FREE_NID] <= 0) {
180 spin_unlock(&nm_i->nid_list_lock);
181 return;
182 }
183 fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list);
184 *nid = fnid->nid;
185 spin_unlock(&nm_i->nid_list_lock);
186 }
187
188 /*
189 * inline functions
190 */
get_nat_bitmap(struct f2fs_sb_info * sbi,void * addr)191 static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
192 {
193 struct f2fs_nm_info *nm_i = NM_I(sbi);
194
195 #ifdef CONFIG_F2FS_CHECK_FS
196 if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir,
197 nm_i->bitmap_size))
198 f2fs_bug_on(sbi, 1);
199 #endif
200 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
201 }
202
current_nat_addr(struct f2fs_sb_info * sbi,nid_t start)203 static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
204 {
205 struct f2fs_nm_info *nm_i = NM_I(sbi);
206 pgoff_t block_off;
207 pgoff_t block_addr;
208
209 /*
210 * block_off = segment_off * 512 + off_in_segment
211 * OLD = (segment_off * 512) * 2 + off_in_segment
212 * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment
213 */
214 block_off = NAT_BLOCK_OFFSET(start);
215
216 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
217 (block_off << 1) -
218 (block_off & (BLKS_PER_SEG(sbi) - 1)));
219
220 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
221 block_addr += BLKS_PER_SEG(sbi);
222
223 return block_addr;
224 }
225
next_nat_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)226 static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
227 pgoff_t block_addr)
228 {
229 struct f2fs_nm_info *nm_i = NM_I(sbi);
230
231 block_addr -= nm_i->nat_blkaddr;
232 block_addr ^= BIT(sbi->log_blocks_per_seg);
233 return block_addr + nm_i->nat_blkaddr;
234 }
235
set_to_next_nat(struct f2fs_nm_info * nm_i,nid_t start_nid)236 static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
237 {
238 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
239
240 f2fs_change_bit(block_off, nm_i->nat_bitmap);
241 #ifdef CONFIG_F2FS_CHECK_FS
242 f2fs_change_bit(block_off, nm_i->nat_bitmap_mir);
243 #endif
244 }
245
ino_of_node(const struct folio * node_folio)246 static inline nid_t ino_of_node(const struct folio *node_folio)
247 {
248 struct f2fs_node *rn = F2FS_NODE(node_folio);
249 return le32_to_cpu(rn->footer.ino);
250 }
251
nid_of_node(const struct folio * node_folio)252 static inline nid_t nid_of_node(const struct folio *node_folio)
253 {
254 struct f2fs_node *rn = F2FS_NODE(node_folio);
255 return le32_to_cpu(rn->footer.nid);
256 }
257
ofs_of_node(const struct folio * node_folio)258 static inline unsigned int ofs_of_node(const struct folio *node_folio)
259 {
260 struct f2fs_node *rn = F2FS_NODE(node_folio);
261 unsigned flag = le32_to_cpu(rn->footer.flag);
262 return flag >> OFFSET_BIT_SHIFT;
263 }
264
cpver_of_node(const struct folio * node_folio)265 static inline __u64 cpver_of_node(const struct folio *node_folio)
266 {
267 struct f2fs_node *rn = F2FS_NODE(node_folio);
268 return le64_to_cpu(rn->footer.cp_ver);
269 }
270
next_blkaddr_of_node(const struct folio * node_folio)271 static inline block_t next_blkaddr_of_node(const struct folio *node_folio)
272 {
273 struct f2fs_node *rn = F2FS_NODE(node_folio);
274 return le32_to_cpu(rn->footer.next_blkaddr);
275 }
276
fill_node_footer(const struct folio * folio,nid_t nid,nid_t ino,unsigned int ofs,bool reset)277 static inline void fill_node_footer(const struct folio *folio, nid_t nid,
278 nid_t ino, unsigned int ofs, bool reset)
279 {
280 struct f2fs_node *rn = F2FS_NODE(folio);
281 unsigned int old_flag = 0;
282
283 if (reset)
284 memset(rn, 0, sizeof(*rn));
285 else
286 old_flag = le32_to_cpu(rn->footer.flag);
287
288 rn->footer.nid = cpu_to_le32(nid);
289 rn->footer.ino = cpu_to_le32(ino);
290
291 /* should remain old flag bits such as COLD_BIT_SHIFT */
292 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
293 (old_flag & OFFSET_BIT_MASK));
294 }
295
copy_node_footer(const struct folio * dst,const struct folio * src)296 static inline void copy_node_footer(const struct folio *dst,
297 const struct folio *src)
298 {
299 struct f2fs_node *src_rn = F2FS_NODE(src);
300 struct f2fs_node *dst_rn = F2FS_NODE(dst);
301 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
302 }
303
fill_node_footer_blkaddr(struct folio * folio,block_t blkaddr)304 static inline void fill_node_footer_blkaddr(struct folio *folio, block_t blkaddr)
305 {
306 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio));
307 struct f2fs_node *rn = F2FS_NODE(folio);
308 __u64 cp_ver = cur_cp_version(ckpt);
309
310 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
311 cp_ver |= (cur_cp_crc(ckpt) << 32);
312
313 rn->footer.cp_ver = cpu_to_le64(cp_ver);
314 rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
315 }
316
is_recoverable_dnode(const struct folio * folio)317 static inline bool is_recoverable_dnode(const struct folio *folio)
318 {
319 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio));
320 __u64 cp_ver = cur_cp_version(ckpt);
321
322 /* Don't care crc part, if fsck.f2fs sets it. */
323 if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
324 return (cp_ver << 32) == (cpver_of_node(folio) << 32);
325
326 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
327 cp_ver |= (cur_cp_crc(ckpt) << 32);
328
329 return cp_ver == cpver_of_node(folio);
330 }
331
332 /*
333 * f2fs assigns the following node offsets described as (num).
334 * N = NIDS_PER_BLOCK
335 *
336 * Inode block (0)
337 * |- direct node (1)
338 * |- direct node (2)
339 * |- indirect node (3)
340 * | `- direct node (4 => 4 + N - 1)
341 * |- indirect node (4 + N)
342 * | `- direct node (5 + N => 5 + 2N - 1)
343 * `- double indirect node (5 + 2N)
344 * `- indirect node (6 + 2N)
345 * `- direct node
346 * ......
347 * `- indirect node ((6 + 2N) + x(N + 1))
348 * `- direct node
349 * ......
350 * `- indirect node ((6 + 2N) + (N - 1)(N + 1))
351 * `- direct node
352 */
IS_DNODE(const struct folio * node_folio)353 static inline bool IS_DNODE(const struct folio *node_folio)
354 {
355 unsigned int ofs = ofs_of_node(node_folio);
356
357 if (f2fs_has_xattr_block(ofs))
358 return true;
359
360 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
361 ofs == 5 + 2 * NIDS_PER_BLOCK)
362 return false;
363 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
364 ofs -= 6 + 2 * NIDS_PER_BLOCK;
365 if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
366 return false;
367 }
368 return true;
369 }
370
set_nid(struct folio * folio,int off,nid_t nid,bool i)371 static inline int set_nid(struct folio *folio, int off, nid_t nid, bool i)
372 {
373 struct f2fs_node *rn = F2FS_NODE(folio);
374
375 f2fs_folio_wait_writeback(folio, NODE, true, true);
376
377 if (i)
378 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
379 else
380 rn->in.nid[off] = cpu_to_le32(nid);
381 return folio_mark_dirty(folio);
382 }
383
get_nid(const struct folio * folio,int off,bool i)384 static inline nid_t get_nid(const struct folio *folio, int off, bool i)
385 {
386 struct f2fs_node *rn = F2FS_NODE(folio);
387
388 if (i)
389 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
390 return le32_to_cpu(rn->in.nid[off]);
391 }
392
393 /*
394 * Coldness identification:
395 * - Mark cold files in f2fs_inode_info
396 * - Mark cold node blocks in their node footer
397 * - Mark cold data pages in page cache
398 */
399
is_node(const struct folio * folio,int type)400 static inline int is_node(const struct folio *folio, int type)
401 {
402 struct f2fs_node *rn = F2FS_NODE(folio);
403 return le32_to_cpu(rn->footer.flag) & BIT(type);
404 }
405
406 #define is_cold_node(folio) is_node(folio, COLD_BIT_SHIFT)
407 #define is_fsync_dnode(folio) is_node(folio, FSYNC_BIT_SHIFT)
408 #define is_dent_dnode(folio) is_node(folio, DENT_BIT_SHIFT)
409
set_cold_node(const struct folio * folio,bool is_dir)410 static inline void set_cold_node(const struct folio *folio, bool is_dir)
411 {
412 struct f2fs_node *rn = F2FS_NODE(folio);
413 unsigned int flag = le32_to_cpu(rn->footer.flag);
414
415 if (is_dir)
416 flag &= ~BIT(COLD_BIT_SHIFT);
417 else
418 flag |= BIT(COLD_BIT_SHIFT);
419 rn->footer.flag = cpu_to_le32(flag);
420 }
421
set_mark(struct folio * folio,int mark,int type)422 static inline void set_mark(struct folio *folio, int mark, int type)
423 {
424 struct f2fs_node *rn = F2FS_NODE(folio);
425 unsigned int flag = le32_to_cpu(rn->footer.flag);
426 if (mark)
427 flag |= BIT(type);
428 else
429 flag &= ~BIT(type);
430 rn->footer.flag = cpu_to_le32(flag);
431
432 #ifdef CONFIG_F2FS_CHECK_FS
433 f2fs_inode_chksum_set(F2FS_F_SB(folio), folio);
434 #endif
435 }
436 #define set_dentry_mark(folio, mark) set_mark(folio, mark, DENT_BIT_SHIFT)
437 #define set_fsync_mark(folio, mark) set_mark(folio, mark, FSYNC_BIT_SHIFT)
438