1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10
11 /* constant macro */
12 #define NULL_SEGNO ((unsigned int)(~0))
13 #define NULL_SECNO ((unsigned int)(~0))
14
15 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
17
18 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19 #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
20
21 #define INVALID_MTIME ULLONG_MAX /* no valid blocks in a segment/section */
22
23 /* L: Logical segment # in volume, R: Relative segment # in main area */
24 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
25 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
26
27 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
28 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
29 #define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA))
30
sanity_check_seg_type(struct f2fs_sb_info * sbi,unsigned short seg_type)31 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
32 unsigned short seg_type)
33 {
34 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
35 }
36
37 #define MAIN_BLKADDR(sbi) \
38 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
39 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
40 #define SEG0_BLKADDR(sbi) \
41 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
42 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
43
44 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
45 #define MAIN_SECS(sbi) ((sbi)->total_sections)
46
47 #define TOTAL_SEGS(sbi) \
48 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
49 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
50 #define TOTAL_BLKS(sbi) (SEGS_TO_BLKS(sbi, TOTAL_SEGS(sbi)))
51
52 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
53 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
54 (sbi)->log_blocks_per_seg))
55
56 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
57 (SEGS_TO_BLKS(sbi, GET_R2L_SEGNO(FREE_I(sbi), segno))))
58
59 #define NEXT_FREE_BLKADDR(sbi, curseg) \
60 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
61
62 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
63 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
64 (BLKS_TO_SEGS(sbi, GET_SEGOFF_FROM_SEG0(sbi, blk_addr)))
65 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
66 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
67
68 #define GET_SEGNO(sbi, blk_addr) \
69 ((!__is_valid_data_blkaddr(blk_addr)) ? \
70 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
71 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
72 #ifdef CONFIG_BLK_DEV_ZONED
73 #define CAP_BLKS_PER_SEC(sbi) \
74 (BLKS_PER_SEC(sbi) - (sbi)->unusable_blocks_per_sec)
75 #define CAP_SEGS_PER_SEC(sbi) \
76 (SEGS_PER_SEC(sbi) - \
77 BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec))
78 #else
79 #define CAP_BLKS_PER_SEC(sbi) BLKS_PER_SEC(sbi)
80 #define CAP_SEGS_PER_SEC(sbi) SEGS_PER_SEC(sbi)
81 #endif
82 #define GET_START_SEG_FROM_SEC(sbi, segno) \
83 (rounddown(segno, SEGS_PER_SEC(sbi)))
84 #define GET_SEC_FROM_SEG(sbi, segno) \
85 (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
86 #define GET_SEG_FROM_SEC(sbi, secno) \
87 ((secno) * SEGS_PER_SEC(sbi))
88 #define GET_ZONE_FROM_SEC(sbi, secno) \
89 (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
90 #define GET_ZONE_FROM_SEG(sbi, segno) \
91 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
92
93 #define GET_SUM_BLOCK(sbi, segno) \
94 (SM_I(sbi)->ssa_blkaddr + (segno / (sbi)->sums_per_block))
95 #define GET_SUM_BLKOFF(sbi, segno) (segno % (sbi)->sums_per_block)
96 #define SUM_BLK_PAGE_ADDR(sbi, folio, segno) \
97 (folio_address(folio) + GET_SUM_BLKOFF(sbi, segno) * (sbi)->sum_blocksize)
98
99 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
100 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
101
102 #define SIT_ENTRY_OFFSET(sit_i, segno) \
103 ((segno) % (sit_i)->sents_per_block)
104 #define SIT_BLOCK_OFFSET(segno) \
105 ((segno) / SIT_ENTRY_PER_BLOCK)
106 #define START_SEGNO(segno) \
107 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
108 #define SIT_BLK_CNT(sbi) \
109 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
110 #define f2fs_bitmap_size(nr) \
111 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
112
113 #define SECTOR_FROM_BLOCK(blk_addr) \
114 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
115 #define SECTOR_TO_BLOCK(sectors) \
116 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
117
118 /*
119 * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
120 * LFS writes data sequentially with cleaning operations.
121 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
122 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
123 * fragmented segment which has similar aging degree.
124 */
125 enum {
126 LFS = 0,
127 SSR,
128 AT_SSR,
129 };
130
131 /*
132 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes.
133 * GC_CB is based on cost-benefit algorithm.
134 * GC_GREEDY is based on greedy algorithm.
135 * GC_AT is based on age-threshold algorithm.
136 */
137 enum {
138 GC_CB = 0,
139 GC_GREEDY,
140 GC_AT,
141 ALLOC_NEXT,
142 FLUSH_DEVICE,
143 MAX_GC_POLICY,
144 };
145
146 /*
147 * BG_GC means the background cleaning job.
148 * FG_GC means the on-demand cleaning job.
149 */
150 enum {
151 BG_GC = 0,
152 FG_GC,
153 };
154
155 /* for a function parameter to select a victim segment */
156 struct victim_sel_policy {
157 int alloc_mode; /* LFS or SSR */
158 int gc_mode; /* GC_CB or GC_GREEDY */
159 unsigned long *dirty_bitmap; /* dirty segment/section bitmap */
160 unsigned int max_search; /*
161 * maximum # of segments/sections
162 * to search
163 */
164 unsigned int offset; /* last scanned bitmap offset */
165 unsigned int ofs_unit; /* bitmap search unit */
166 unsigned int min_cost; /* minimum cost */
167 unsigned long long oldest_age; /* oldest age of segments having the same min cost */
168 unsigned int min_segno; /* segment # having min. cost */
169 unsigned long long age; /* mtime of GCed section*/
170 unsigned long long age_threshold;/* age threshold */
171 bool one_time_gc; /* one time GC */
172 };
173
174 struct seg_entry {
175 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
176 unsigned int valid_blocks:10; /* # of valid blocks */
177 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
178 unsigned int padding:6; /* padding */
179 unsigned char *cur_valid_map; /* validity bitmap of blocks */
180 #ifdef CONFIG_F2FS_CHECK_FS
181 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
182 #endif
183 /*
184 * # of valid blocks and the validity bitmap stored in the last
185 * checkpoint pack. This information is used by the SSR mode.
186 */
187 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
188 unsigned char *discard_map;
189 unsigned long long mtime; /* modification time of the segment */
190 };
191
192 struct sec_entry {
193 unsigned int valid_blocks; /* # of valid blocks in a section */
194 unsigned int ckpt_valid_blocks; /* # of valid blocks last cp in a section */
195 };
196
197 #define MAX_SKIP_GC_COUNT 16
198
199 struct revoke_entry {
200 struct list_head list;
201 block_t old_addr; /* for revoking when fail to commit */
202 pgoff_t index;
203 };
204
205 struct sit_info {
206 block_t sit_base_addr; /* start block address of SIT area */
207 block_t sit_blocks; /* # of blocks used by SIT area */
208 block_t written_valid_blocks; /* # of valid blocks in main area */
209 char *bitmap; /* all bitmaps pointer */
210 char *sit_bitmap; /* SIT bitmap pointer */
211 #ifdef CONFIG_F2FS_CHECK_FS
212 char *sit_bitmap_mir; /* SIT bitmap mirror */
213
214 /* bitmap of segments to be ignored by GC in case of errors */
215 unsigned long *invalid_segmap;
216 #endif
217 unsigned int bitmap_size; /* SIT bitmap size */
218
219 unsigned long *tmp_map; /* bitmap for temporal use */
220 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
221 unsigned int dirty_sentries; /* # of dirty sentries */
222 unsigned int sents_per_block; /* # of SIT entries per block */
223 struct rw_semaphore sentry_lock; /* to protect SIT cache */
224 struct seg_entry *sentries; /* SIT segment-level cache */
225 struct sec_entry *sec_entries; /* SIT section-level cache */
226
227 /* for cost-benefit algorithm in cleaning procedure */
228 unsigned long long elapsed_time; /* elapsed time after mount */
229 unsigned long long mounted_time; /* mount time */
230 unsigned long long min_mtime; /* min. modification time */
231 unsigned long long max_mtime; /* max. modification time */
232 unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */
233 unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */
234
235 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
236 };
237
238 struct free_segmap_info {
239 unsigned int start_segno; /* start segment number logically */
240 unsigned int free_segments; /* # of free segments */
241 unsigned int free_sections; /* # of free sections */
242 spinlock_t segmap_lock; /* free segmap lock */
243 unsigned long *free_segmap; /* free segment bitmap */
244 unsigned long *free_secmap; /* free section bitmap */
245 };
246
247 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
248 enum dirty_type {
249 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
250 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
251 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
252 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
253 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
254 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
255 DIRTY, /* to count # of dirty segments */
256 PRE, /* to count # of entirely obsolete segments */
257 NR_DIRTY_TYPE
258 };
259
260 struct dirty_seglist_info {
261 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
262 unsigned long *dirty_secmap;
263 struct mutex seglist_lock; /* lock for segment bitmaps */
264 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
265 unsigned long *victim_secmap; /* background GC victims */
266 unsigned long *pinned_secmap; /* pinned victims from foreground GC */
267 unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */
268 bool enable_pin_section; /* enable pinning section */
269 };
270
271 /* for active log information */
272 struct curseg_info {
273 struct mutex curseg_mutex; /* lock for consistency */
274 struct f2fs_summary_block *sum_blk; /* cached summary block */
275 struct rw_semaphore journal_rwsem; /* protect journal area */
276 struct f2fs_journal *journal; /* cached journal info */
277 unsigned char alloc_type; /* current allocation type */
278 unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */
279 unsigned int segno; /* current segment number */
280 unsigned short next_blkoff; /* next block offset to write */
281 unsigned int zone; /* current zone number */
282 unsigned int next_segno; /* preallocated segment */
283 int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */
284 bool inited; /* indicate inmem log is inited */
285 };
286
287 struct sit_entry_set {
288 struct list_head set_list; /* link with all sit sets */
289 unsigned int start_segno; /* start segno of sits in set */
290 unsigned int entry_cnt; /* the # of sit entries in set */
291 };
292
293 /*
294 * inline functions
295 */
CURSEG_I(struct f2fs_sb_info * sbi,int type)296 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
297 {
298 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
299 }
300
is_curseg(struct f2fs_sb_info * sbi,unsigned int segno)301 static inline bool is_curseg(struct f2fs_sb_info *sbi, unsigned int segno)
302 {
303 int i;
304
305 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
306 if (segno == CURSEG_I(sbi, i)->segno)
307 return true;
308 }
309 return false;
310 }
311
is_cursec(struct f2fs_sb_info * sbi,unsigned int secno)312 static inline bool is_cursec(struct f2fs_sb_info *sbi, unsigned int secno)
313 {
314 int i;
315
316 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
317 if (secno == GET_SEC_FROM_SEG(sbi, CURSEG_I(sbi, i)->segno))
318 return true;
319 }
320 return false;
321 }
322
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)323 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
324 unsigned int segno)
325 {
326 struct sit_info *sit_i = SIT_I(sbi);
327 return &sit_i->sentries[segno];
328 }
329
get_sec_entry(struct f2fs_sb_info * sbi,unsigned int segno)330 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
331 unsigned int segno)
332 {
333 struct sit_info *sit_i = SIT_I(sbi);
334 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
335 }
336
get_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)337 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
338 unsigned int segno, bool use_section)
339 {
340 /*
341 * In order to get # of valid blocks in a section instantly from many
342 * segments, f2fs manages two counting structures separately.
343 */
344 if (use_section && __is_large_section(sbi))
345 return get_sec_entry(sbi, segno)->valid_blocks;
346 else
347 return get_seg_entry(sbi, segno)->valid_blocks;
348 }
349
get_ckpt_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)350 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
351 unsigned int segno, bool use_section)
352 {
353 if (use_section && __is_large_section(sbi))
354 return get_sec_entry(sbi, segno)->ckpt_valid_blocks;
355 else
356 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
357 }
358
set_ckpt_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno)359 static inline void set_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
360 unsigned int segno)
361 {
362 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
363 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
364 unsigned int blocks = 0;
365 int i;
366
367 for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
368 struct seg_entry *se = get_seg_entry(sbi, start_segno);
369
370 blocks += se->ckpt_valid_blocks;
371 }
372 get_sec_entry(sbi, segno)->ckpt_valid_blocks = blocks;
373 }
374
375 #ifdef CONFIG_F2FS_CHECK_FS
sanity_check_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno)376 static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi,
377 unsigned int segno)
378 {
379 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
380 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
381 unsigned int blocks = 0;
382 int i;
383
384 for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
385 struct seg_entry *se = get_seg_entry(sbi, start_segno);
386
387 blocks += se->ckpt_valid_blocks;
388 }
389
390 if (blocks != get_sec_entry(sbi, segno)->ckpt_valid_blocks) {
391 f2fs_err(sbi,
392 "Inconsistent ckpt valid blocks: "
393 "seg entry(%d) vs sec entry(%d) at secno %d",
394 blocks, get_sec_entry(sbi, segno)->ckpt_valid_blocks, secno);
395 f2fs_bug_on(sbi, 1);
396 }
397 }
398 #else
sanity_check_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno)399 static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi,
400 unsigned int segno)
401 {
402 }
403 #endif
seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)404 static inline void seg_info_from_raw_sit(struct seg_entry *se,
405 struct f2fs_sit_entry *rs)
406 {
407 se->valid_blocks = GET_SIT_VBLOCKS(rs);
408 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
409 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
410 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
411 #ifdef CONFIG_F2FS_CHECK_FS
412 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
413 #endif
414 se->type = GET_SIT_TYPE(rs);
415 se->mtime = le64_to_cpu(rs->mtime);
416 }
417
__seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)418 static inline void __seg_info_to_raw_sit(struct seg_entry *se,
419 struct f2fs_sit_entry *rs)
420 {
421 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
422 se->valid_blocks;
423 rs->vblocks = cpu_to_le16(raw_vblocks);
424 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
425 rs->mtime = cpu_to_le64(se->mtime);
426 }
427
seg_info_to_sit_folio(struct f2fs_sb_info * sbi,struct folio * folio,unsigned int start)428 static inline void seg_info_to_sit_folio(struct f2fs_sb_info *sbi,
429 struct folio *folio, unsigned int start)
430 {
431 struct f2fs_sit_block *raw_sit;
432 struct seg_entry *se;
433 struct f2fs_sit_entry *rs;
434 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
435 (unsigned long)MAIN_SEGS(sbi));
436 int i;
437
438 raw_sit = folio_address(folio);
439 memset(raw_sit, 0, PAGE_SIZE);
440 for (i = 0; i < end - start; i++) {
441 rs = &raw_sit->entries[i];
442 se = get_seg_entry(sbi, start + i);
443 __seg_info_to_raw_sit(se, rs);
444 }
445 }
446
seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)447 static inline void seg_info_to_raw_sit(struct seg_entry *se,
448 struct f2fs_sit_entry *rs)
449 {
450 __seg_info_to_raw_sit(se, rs);
451
452 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
453 se->ckpt_valid_blocks = se->valid_blocks;
454 }
455
find_next_inuse(struct free_segmap_info * free_i,unsigned int max,unsigned int segno)456 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
457 unsigned int max, unsigned int segno)
458 {
459 unsigned int ret;
460 spin_lock(&free_i->segmap_lock);
461 ret = find_next_bit(free_i->free_segmap, max, segno);
462 spin_unlock(&free_i->segmap_lock);
463 return ret;
464 }
465
__set_free(struct f2fs_sb_info * sbi,unsigned int segno)466 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
467 {
468 struct free_segmap_info *free_i = FREE_I(sbi);
469 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
470 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
471 unsigned int next;
472
473 spin_lock(&free_i->segmap_lock);
474 clear_bit(segno, free_i->free_segmap);
475 free_i->free_segments++;
476
477 next = find_next_bit(free_i->free_segmap,
478 start_segno + SEGS_PER_SEC(sbi), start_segno);
479 if (next >= start_segno + f2fs_usable_segs_in_sec(sbi)) {
480 clear_bit(secno, free_i->free_secmap);
481 free_i->free_sections++;
482 }
483 spin_unlock(&free_i->segmap_lock);
484 }
485
__set_inuse(struct f2fs_sb_info * sbi,unsigned int segno)486 static inline void __set_inuse(struct f2fs_sb_info *sbi,
487 unsigned int segno)
488 {
489 struct free_segmap_info *free_i = FREE_I(sbi);
490 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
491
492 set_bit(segno, free_i->free_segmap);
493 free_i->free_segments--;
494 if (!test_and_set_bit(secno, free_i->free_secmap))
495 free_i->free_sections--;
496 }
497
__set_test_and_free(struct f2fs_sb_info * sbi,unsigned int segno,bool inmem)498 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
499 unsigned int segno, bool inmem)
500 {
501 struct free_segmap_info *free_i = FREE_I(sbi);
502 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
503 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
504 unsigned int next;
505 bool ret;
506
507 spin_lock(&free_i->segmap_lock);
508 ret = test_and_clear_bit(segno, free_i->free_segmap);
509 if (!ret)
510 goto unlock_out;
511
512 free_i->free_segments++;
513
514 if (!inmem && is_cursec(sbi, secno))
515 goto unlock_out;
516
517 /* check large section */
518 next = find_next_bit(free_i->free_segmap,
519 start_segno + SEGS_PER_SEC(sbi), start_segno);
520 if (next < start_segno + f2fs_usable_segs_in_sec(sbi))
521 goto unlock_out;
522
523 ret = test_and_clear_bit(secno, free_i->free_secmap);
524 if (!ret)
525 goto unlock_out;
526
527 free_i->free_sections++;
528
529 if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[BG_GC]) == secno)
530 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
531 if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[FG_GC]) == secno)
532 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
533
534 unlock_out:
535 spin_unlock(&free_i->segmap_lock);
536 }
537
__set_test_and_inuse(struct f2fs_sb_info * sbi,unsigned int segno)538 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
539 unsigned int segno)
540 {
541 struct free_segmap_info *free_i = FREE_I(sbi);
542 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
543
544 spin_lock(&free_i->segmap_lock);
545 if (!test_and_set_bit(segno, free_i->free_segmap)) {
546 free_i->free_segments--;
547 if (!test_and_set_bit(secno, free_i->free_secmap))
548 free_i->free_sections--;
549 }
550 spin_unlock(&free_i->segmap_lock);
551 }
552
get_sit_bitmap(struct f2fs_sb_info * sbi,void * dst_addr)553 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
554 void *dst_addr)
555 {
556 struct sit_info *sit_i = SIT_I(sbi);
557
558 #ifdef CONFIG_F2FS_CHECK_FS
559 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
560 sit_i->bitmap_size))
561 f2fs_bug_on(sbi, 1);
562 #endif
563 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
564 }
565
written_block_count(struct f2fs_sb_info * sbi)566 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
567 {
568 return SIT_I(sbi)->written_valid_blocks;
569 }
570
free_segments(struct f2fs_sb_info * sbi)571 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
572 {
573 return FREE_I(sbi)->free_segments;
574 }
575
reserved_segments(struct f2fs_sb_info * sbi)576 static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
577 {
578 return SM_I(sbi)->reserved_segments;
579 }
580
free_sections(struct f2fs_sb_info * sbi)581 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
582 {
583 return FREE_I(sbi)->free_sections;
584 }
585
prefree_segments(struct f2fs_sb_info * sbi)586 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
587 {
588 return DIRTY_I(sbi)->nr_dirty[PRE];
589 }
590
dirty_segments(struct f2fs_sb_info * sbi)591 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
592 {
593 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
594 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
595 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
596 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
597 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
598 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
599 }
600
overprovision_segments(struct f2fs_sb_info * sbi)601 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
602 {
603 return SM_I(sbi)->ovp_segments;
604 }
605
reserved_sections(struct f2fs_sb_info * sbi)606 static inline int reserved_sections(struct f2fs_sb_info *sbi)
607 {
608 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
609 }
610
get_left_section_blocks(struct f2fs_sb_info * sbi,enum log_type type,unsigned int segno)611 static inline unsigned int get_left_section_blocks(struct f2fs_sb_info *sbi,
612 enum log_type type, unsigned int segno)
613 {
614 if (f2fs_lfs_mode(sbi)) {
615 unsigned int used_blocks = __is_large_section(sbi) ? SEGS_TO_BLKS(sbi,
616 (segno - GET_START_SEG_FROM_SEC(sbi, segno))) : 0;
617 return CAP_BLKS_PER_SEC(sbi) - used_blocks -
618 CURSEG_I(sbi, type)->next_blkoff;
619 }
620 return CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true);
621 }
622
get_additional_blocks_required(struct f2fs_sb_info * sbi,unsigned int * total_node_blocks,unsigned int * total_data_blocks,unsigned int * total_dent_blocks,bool separate_dent)623 static inline void get_additional_blocks_required(struct f2fs_sb_info *sbi,
624 unsigned int *total_node_blocks, unsigned int *total_data_blocks,
625 unsigned int *total_dent_blocks, bool separate_dent)
626 {
627 unsigned int segno, left_blocks;
628 int i;
629 unsigned int min_free_node_blocks = CAP_BLKS_PER_SEC(sbi);
630 unsigned int min_free_dent_blocks = CAP_BLKS_PER_SEC(sbi);
631 unsigned int min_free_data_blocks = CAP_BLKS_PER_SEC(sbi);
632
633 /* check current data/node sections in the worst case. */
634 for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
635 segno = CURSEG_I(sbi, i)->segno;
636
637 if (unlikely(segno == NULL_SEGNO))
638 return;
639
640 left_blocks = get_left_section_blocks(sbi, i, segno);
641
642 if (i > CURSEG_COLD_DATA)
643 min_free_node_blocks = min(min_free_node_blocks, left_blocks);
644 else if (i == CURSEG_HOT_DATA && separate_dent)
645 min_free_dent_blocks = left_blocks;
646 else
647 min_free_data_blocks = min(min_free_data_blocks, left_blocks);
648 }
649
650 *total_node_blocks = (*total_node_blocks > min_free_node_blocks) ?
651 *total_node_blocks - min_free_node_blocks : 0;
652 *total_dent_blocks = (*total_dent_blocks > min_free_dent_blocks) ?
653 *total_dent_blocks - min_free_dent_blocks : 0;
654 *total_data_blocks = (*total_data_blocks > min_free_data_blocks) ?
655 *total_data_blocks - min_free_data_blocks : 0;
656 }
657
658 /*
659 * call get_additional_blocks_required to calculate dirty blocks
660 * needing to be placed in free sections, please note that, it
661 * needs to account dirty data as well in lfs mode when checkpoint
662 * is disabled.
663 */
__get_secs_required(struct f2fs_sb_info * sbi)664 static inline int __get_secs_required(struct f2fs_sb_info *sbi)
665 {
666 unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
667 get_pages(sbi, F2FS_DIRTY_DENTS) +
668 get_pages(sbi, F2FS_DIRTY_IMETA);
669 unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
670 unsigned int total_data_blocks = 0;
671 bool separate_dent = true;
672
673 if (f2fs_lfs_mode(sbi))
674 total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
675
676 /*
677 * When active_logs != 4, dentry blocks and data blocks can be
678 * mixed in the same logs, so check their space together.
679 */
680 if (F2FS_OPTION(sbi).active_logs != 4) {
681 total_data_blocks += total_dent_blocks;
682 total_dent_blocks = 0;
683 separate_dent = false;
684 }
685
686 get_additional_blocks_required(sbi, &total_node_blocks, &total_dent_blocks,
687 &total_data_blocks, separate_dent);
688
689 return DIV_ROUND_UP(total_node_blocks, CAP_BLKS_PER_SEC(sbi)) +
690 DIV_ROUND_UP(total_dent_blocks, CAP_BLKS_PER_SEC(sbi)) +
691 DIV_ROUND_UP(total_data_blocks, CAP_BLKS_PER_SEC(sbi));
692 }
693
has_not_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)694 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
695 int freed, int needed)
696 {
697 unsigned int free_secs, required_secs;
698
699 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
700 return false;
701
702 free_secs = free_sections(sbi) + freed;
703 required_secs = needed + reserved_sections(sbi) +
704 __get_secs_required(sbi);
705
706 return free_secs < required_secs;
707 }
708
has_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)709 static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi,
710 int freed, int needed)
711 {
712 return !has_not_enough_free_secs(sbi, freed, needed);
713 }
714
has_enough_free_blks(struct f2fs_sb_info * sbi)715 static inline bool has_enough_free_blks(struct f2fs_sb_info *sbi)
716 {
717 unsigned int total_free_blocks = 0;
718 unsigned int avail_user_block_count;
719
720 spin_lock(&sbi->stat_lock);
721
722 avail_user_block_count = get_available_block_count(sbi, NULL, true);
723 total_free_blocks = avail_user_block_count - (unsigned int)valid_user_blocks(sbi);
724
725 spin_unlock(&sbi->stat_lock);
726
727 return total_free_blocks > 0;
728 }
729
f2fs_is_checkpoint_ready(struct f2fs_sb_info * sbi)730 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
731 {
732 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
733 return true;
734 if (likely(has_enough_free_secs(sbi, 0, 0)))
735 return true;
736 if (!f2fs_lfs_mode(sbi) &&
737 likely(has_enough_free_blks(sbi)))
738 return true;
739 return false;
740 }
741
excess_prefree_segs(struct f2fs_sb_info * sbi)742 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
743 {
744 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
745 }
746
utilization(struct f2fs_sb_info * sbi)747 static inline int utilization(struct f2fs_sb_info *sbi)
748 {
749 return div_u64((u64)valid_user_blocks(sbi) * 100,
750 sbi->user_block_count);
751 }
752
753 /*
754 * Sometimes f2fs may be better to drop out-of-place update policy.
755 * And, users can control the policy through sysfs entries.
756 * There are five policies with triggering conditions as follows.
757 * F2FS_IPU_FORCE - all the time,
758 * F2FS_IPU_SSR - if SSR mode is activated,
759 * F2FS_IPU_UTIL - if FS utilization is over threashold,
760 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
761 * threashold,
762 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
763 * storages. IPU will be triggered only if the # of dirty
764 * pages over min_fsync_blocks. (=default option)
765 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
766 * F2FS_IPU_NOCACHE - disable IPU bio cache.
767 * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
768 * FI_OPU_WRITE flag.
769 * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
770 */
771 #define DEF_MIN_IPU_UTIL 70
772 #define DEF_MIN_FSYNC_BLOCKS 8
773 #define DEF_MIN_HOT_BLOCKS 16
774
775 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
776
777 #define F2FS_IPU_DISABLE 0
778
779 /* Modification on enum should be synchronized with ipu_mode_names array */
780 enum {
781 F2FS_IPU_FORCE,
782 F2FS_IPU_SSR,
783 F2FS_IPU_UTIL,
784 F2FS_IPU_SSR_UTIL,
785 F2FS_IPU_FSYNC,
786 F2FS_IPU_ASYNC,
787 F2FS_IPU_NOCACHE,
788 F2FS_IPU_HONOR_OPU_WRITE,
789 F2FS_IPU_MAX,
790 };
791
IS_F2FS_IPU_DISABLE(struct f2fs_sb_info * sbi)792 static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
793 {
794 return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE;
795 }
796
797 #define F2FS_IPU_POLICY(name) \
798 static inline bool IS_##name(struct f2fs_sb_info *sbi) \
799 { \
800 return SM_I(sbi)->ipu_policy & BIT(name); \
801 }
802
803 F2FS_IPU_POLICY(F2FS_IPU_FORCE);
804 F2FS_IPU_POLICY(F2FS_IPU_SSR);
805 F2FS_IPU_POLICY(F2FS_IPU_UTIL);
806 F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
807 F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
808 F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
809 F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
810 F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);
811
curseg_segno(struct f2fs_sb_info * sbi,int type)812 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
813 int type)
814 {
815 struct curseg_info *curseg = CURSEG_I(sbi, type);
816 return curseg->segno;
817 }
818
curseg_alloc_type(struct f2fs_sb_info * sbi,int type)819 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
820 int type)
821 {
822 struct curseg_info *curseg = CURSEG_I(sbi, type);
823 return curseg->alloc_type;
824 }
825
valid_main_segno(struct f2fs_sb_info * sbi,unsigned int segno)826 static inline bool valid_main_segno(struct f2fs_sb_info *sbi,
827 unsigned int segno)
828 {
829 return segno <= (MAIN_SEGS(sbi) - 1);
830 }
831
verify_fio_blkaddr(struct f2fs_io_info * fio)832 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
833 {
834 struct f2fs_sb_info *sbi = fio->sbi;
835
836 if (__is_valid_data_blkaddr(fio->old_blkaddr))
837 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
838 META_GENERIC : DATA_GENERIC);
839 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
840 META_GENERIC : DATA_GENERIC_ENHANCE);
841 }
842
843 /*
844 * Summary block is always treated as an invalid block
845 */
check_block_count(struct f2fs_sb_info * sbi,int segno,struct f2fs_sit_entry * raw_sit)846 static inline int check_block_count(struct f2fs_sb_info *sbi,
847 int segno, struct f2fs_sit_entry *raw_sit)
848 {
849 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
850 int valid_blocks = 0;
851 int cur_pos = 0, next_pos;
852 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
853
854 /* check bitmap with valid block count */
855 do {
856 if (is_valid) {
857 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
858 usable_blks_per_seg,
859 cur_pos);
860 valid_blocks += next_pos - cur_pos;
861 } else
862 next_pos = find_next_bit_le(&raw_sit->valid_map,
863 usable_blks_per_seg,
864 cur_pos);
865 cur_pos = next_pos;
866 is_valid = !is_valid;
867 } while (cur_pos < usable_blks_per_seg);
868
869 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
870 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
871 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
872 set_sbi_flag(sbi, SBI_NEED_FSCK);
873 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
874 return -EFSCORRUPTED;
875 }
876
877 if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
878 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
879 BLKS_PER_SEG(sbi),
880 usable_blks_per_seg) != BLKS_PER_SEG(sbi));
881
882 /* check segment usage, and check boundary of a given segment number */
883 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
884 || !valid_main_segno(sbi, segno))) {
885 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
886 GET_SIT_VBLOCKS(raw_sit), segno);
887 set_sbi_flag(sbi, SBI_NEED_FSCK);
888 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
889 return -EFSCORRUPTED;
890 }
891 return 0;
892 }
893
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int start)894 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
895 unsigned int start)
896 {
897 struct sit_info *sit_i = SIT_I(sbi);
898 unsigned int offset = SIT_BLOCK_OFFSET(start);
899 block_t blk_addr = sit_i->sit_base_addr + offset;
900
901 f2fs_bug_on(sbi, !valid_main_segno(sbi, start));
902
903 #ifdef CONFIG_F2FS_CHECK_FS
904 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
905 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
906 f2fs_bug_on(sbi, 1);
907 #endif
908
909 /* calculate sit block address */
910 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
911 blk_addr += sit_i->sit_blocks;
912
913 return blk_addr;
914 }
915
next_sit_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)916 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
917 pgoff_t block_addr)
918 {
919 struct sit_info *sit_i = SIT_I(sbi);
920 block_addr -= sit_i->sit_base_addr;
921 if (block_addr < sit_i->sit_blocks)
922 block_addr += sit_i->sit_blocks;
923 else
924 block_addr -= sit_i->sit_blocks;
925
926 return block_addr + sit_i->sit_base_addr;
927 }
928
set_to_next_sit(struct sit_info * sit_i,unsigned int start)929 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
930 {
931 unsigned int block_off = SIT_BLOCK_OFFSET(start);
932
933 f2fs_change_bit(block_off, sit_i->sit_bitmap);
934 #ifdef CONFIG_F2FS_CHECK_FS
935 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
936 #endif
937 }
938
get_mtime(struct f2fs_sb_info * sbi,bool base_time)939 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
940 bool base_time)
941 {
942 struct sit_info *sit_i = SIT_I(sbi);
943 time64_t diff, now = ktime_get_boottime_seconds();
944
945 if (now >= sit_i->mounted_time)
946 return sit_i->elapsed_time + now - sit_i->mounted_time;
947
948 /* system time is set to the past */
949 if (!base_time) {
950 diff = sit_i->mounted_time - now;
951 if (sit_i->elapsed_time >= diff)
952 return sit_i->elapsed_time - diff;
953 return 0;
954 }
955 return sit_i->elapsed_time;
956 }
957
set_summary(struct f2fs_summary * sum,nid_t nid,unsigned int ofs_in_node,unsigned char version)958 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
959 unsigned int ofs_in_node, unsigned char version)
960 {
961 sum->nid = cpu_to_le32(nid);
962 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
963 sum->version = version;
964 }
965
start_sum_block(struct f2fs_sb_info * sbi)966 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
967 {
968 return __start_cp_addr(sbi) +
969 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
970 }
971
sum_blk_addr(struct f2fs_sb_info * sbi,int base,int type)972 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
973 {
974 return __start_cp_addr(sbi) +
975 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
976 - (base + 1) + type;
977 }
978
sec_usage_check(struct f2fs_sb_info * sbi,unsigned int secno)979 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
980 {
981 if (is_cursec(sbi, secno) || (sbi->cur_victim_sec == secno))
982 return true;
983 return false;
984 }
985
986 /*
987 * It is very important to gather dirty pages and write at once, so that we can
988 * submit a big bio without interfering other data writes.
989 * By default, 512 pages for directory data,
990 * 512 pages (2MB) * 8 for nodes, and
991 * 256 pages * 8 for meta are set.
992 */
nr_pages_to_skip(struct f2fs_sb_info * sbi,int type)993 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
994 {
995 if (sbi->sb->s_bdi->wb.dirty_exceeded)
996 return 0;
997
998 if (type == DATA)
999 return BLKS_PER_SEG(sbi);
1000 else if (type == NODE)
1001 return SEGS_TO_BLKS(sbi, 8);
1002 else if (type == META)
1003 return 8 * BIO_MAX_VECS;
1004 else
1005 return 0;
1006 }
1007
1008 /*
1009 * When writing pages, it'd better align nr_to_write for segment size.
1010 */
nr_pages_to_write(struct f2fs_sb_info * sbi,int type,struct writeback_control * wbc)1011 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
1012 struct writeback_control *wbc)
1013 {
1014 long nr_to_write, desired;
1015
1016 if (wbc->sync_mode != WB_SYNC_NONE)
1017 return 0;
1018
1019 nr_to_write = wbc->nr_to_write;
1020 desired = BIO_MAX_VECS;
1021 if (type == NODE)
1022 desired <<= 1;
1023
1024 wbc->nr_to_write = desired;
1025 return desired - nr_to_write;
1026 }
1027
wake_up_discard_thread(struct f2fs_sb_info * sbi,bool force)1028 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
1029 {
1030 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1031 bool wakeup = false;
1032 int i;
1033
1034 if (force)
1035 goto wake_up;
1036
1037 mutex_lock(&dcc->cmd_lock);
1038 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1039 if (i + 1 < dcc->discard_granularity)
1040 break;
1041 if (!list_empty(&dcc->pend_list[i])) {
1042 wakeup = true;
1043 break;
1044 }
1045 }
1046 mutex_unlock(&dcc->cmd_lock);
1047 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
1048 return;
1049 wake_up:
1050 dcc->discard_wake = true;
1051 wake_up_interruptible_all(&dcc->discard_wait_queue);
1052 }
1053