1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/segment.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *revoke_entry_slab;
34
__reverse_ulong(unsigned char * str)35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 unsigned long tmp = 0;
38 int shift = 24, idx = 0;
39
40 #if BITS_PER_LONG == 64
41 shift = 56;
42 #endif
43 while (shift >= 0) {
44 tmp |= (unsigned long)str[idx++] << shift;
45 shift -= BITS_PER_BYTE;
46 }
47 return tmp;
48 }
49
50 /*
51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52 * MSB and LSB are reversed in a byte by f2fs_set_bit.
53 */
__reverse_ffs(unsigned long word)54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 int num = 0;
57
58 #if BITS_PER_LONG == 64
59 if ((word & 0xffffffff00000000UL) == 0)
60 num += 32;
61 else
62 word >>= 32;
63 #endif
64 if ((word & 0xffff0000) == 0)
65 num += 16;
66 else
67 word >>= 16;
68
69 if ((word & 0xff00) == 0)
70 num += 8;
71 else
72 word >>= 8;
73
74 if ((word & 0xf0) == 0)
75 num += 4;
76 else
77 word >>= 4;
78
79 if ((word & 0xc) == 0)
80 num += 2;
81 else
82 word >>= 2;
83
84 if ((word & 0x2) == 0)
85 num += 1;
86 return num;
87 }
88
89 /*
90 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91 * f2fs_set_bit makes MSB and LSB reversed in a byte.
92 * @size must be integral times of unsigned long.
93 * Example:
94 * MSB <--> LSB
95 * f2fs_set_bit(0, bitmap) => 1000 0000
96 * f2fs_set_bit(7, bitmap) => 0000 0001
97 */
__find_rev_next_bit(const unsigned long * addr,unsigned long size,unsigned long offset)98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 unsigned long size, unsigned long offset)
100 {
101 const unsigned long *p = addr + BIT_WORD(offset);
102 unsigned long result = size;
103 unsigned long tmp;
104
105 if (offset >= size)
106 return size;
107
108 size -= (offset & ~(BITS_PER_LONG - 1));
109 offset %= BITS_PER_LONG;
110
111 while (1) {
112 if (*p == 0)
113 goto pass;
114
115 tmp = __reverse_ulong((unsigned char *)p);
116
117 tmp &= ~0UL >> offset;
118 if (size < BITS_PER_LONG)
119 tmp &= (~0UL << (BITS_PER_LONG - size));
120 if (tmp)
121 goto found;
122 pass:
123 if (size <= BITS_PER_LONG)
124 break;
125 size -= BITS_PER_LONG;
126 offset = 0;
127 p++;
128 }
129 return result;
130 found:
131 return result - size + __reverse_ffs(tmp);
132 }
133
__find_rev_next_zero_bit(const unsigned long * addr,unsigned long size,unsigned long offset)134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 unsigned long size, unsigned long offset)
136 {
137 const unsigned long *p = addr + BIT_WORD(offset);
138 unsigned long result = size;
139 unsigned long tmp;
140
141 if (offset >= size)
142 return size;
143
144 size -= (offset & ~(BITS_PER_LONG - 1));
145 offset %= BITS_PER_LONG;
146
147 while (1) {
148 if (*p == ~0UL)
149 goto pass;
150
151 tmp = __reverse_ulong((unsigned char *)p);
152
153 if (offset)
154 tmp |= ~0UL << (BITS_PER_LONG - offset);
155 if (size < BITS_PER_LONG)
156 tmp |= ~0UL >> size;
157 if (tmp != ~0UL)
158 goto found;
159 pass:
160 if (size <= BITS_PER_LONG)
161 break;
162 size -= BITS_PER_LONG;
163 offset = 0;
164 p++;
165 }
166 return result;
167 found:
168 return result - size + __reverse_ffz(tmp);
169 }
170
f2fs_need_SSR(struct f2fs_sb_info * sbi)171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176
177 if (f2fs_lfs_mode(sbi))
178 return false;
179 if (sbi->gc_mode == GC_URGENT_HIGH)
180 return true;
181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 return true;
183
184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187
f2fs_abort_atomic_write(struct inode * inode,bool clean)188 void f2fs_abort_atomic_write(struct inode *inode, bool clean)
189 {
190 struct f2fs_inode_info *fi = F2FS_I(inode);
191
192 if (!f2fs_is_atomic_file(inode))
193 return;
194
195 if (clean)
196 truncate_inode_pages_final(inode->i_mapping);
197
198 release_atomic_write_cnt(inode);
199 clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
200 clear_inode_flag(inode, FI_ATOMIC_REPLACE);
201 clear_inode_flag(inode, FI_ATOMIC_FILE);
202 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
203 clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
204 /*
205 * The vfs inode keeps clean during commit, but the f2fs inode
206 * doesn't. So clear the dirty state after commit and let
207 * f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
208 */
209 f2fs_inode_synced(inode);
210 f2fs_mark_inode_dirty_sync(inode, true);
211 }
212 stat_dec_atomic_inode(inode);
213
214 F2FS_I(inode)->atomic_write_task = NULL;
215
216 if (clean) {
217 f2fs_i_size_write(inode, fi->original_i_size);
218 fi->original_i_size = 0;
219 }
220 /* avoid stale dirty inode during eviction */
221 sync_inode_metadata(inode, 0);
222 }
223
__replace_atomic_write_block(struct inode * inode,pgoff_t index,block_t new_addr,block_t * old_addr,bool recover)224 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
225 block_t new_addr, block_t *old_addr, bool recover)
226 {
227 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
228 struct dnode_of_data dn;
229 struct node_info ni;
230 int err;
231
232 retry:
233 set_new_dnode(&dn, inode, NULL, NULL, 0);
234 err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
235 if (err) {
236 if (err == -ENOMEM) {
237 memalloc_retry_wait(GFP_NOFS);
238 goto retry;
239 }
240 return err;
241 }
242
243 err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
244 if (err) {
245 f2fs_put_dnode(&dn);
246 return err;
247 }
248
249 if (recover) {
250 /* dn.data_blkaddr is always valid */
251 if (!__is_valid_data_blkaddr(new_addr)) {
252 if (new_addr == NULL_ADDR)
253 dec_valid_block_count(sbi, inode, 1);
254 f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
255 f2fs_update_data_blkaddr(&dn, new_addr);
256 } else {
257 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
258 new_addr, ni.version, true, true);
259 }
260 } else {
261 blkcnt_t count = 1;
262
263 err = inc_valid_block_count(sbi, inode, &count, true);
264 if (err) {
265 f2fs_put_dnode(&dn);
266 return err;
267 }
268
269 *old_addr = dn.data_blkaddr;
270 f2fs_truncate_data_blocks_range(&dn, 1);
271 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
272
273 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
274 ni.version, true, false);
275 }
276
277 f2fs_put_dnode(&dn);
278
279 trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
280 index, old_addr ? *old_addr : 0, new_addr, recover);
281 return 0;
282 }
283
__complete_revoke_list(struct inode * inode,struct list_head * head,bool revoke)284 static void __complete_revoke_list(struct inode *inode, struct list_head *head,
285 bool revoke)
286 {
287 struct revoke_entry *cur, *tmp;
288 pgoff_t start_index = 0;
289 bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
290
291 list_for_each_entry_safe(cur, tmp, head, list) {
292 if (revoke) {
293 __replace_atomic_write_block(inode, cur->index,
294 cur->old_addr, NULL, true);
295 } else if (truncate) {
296 f2fs_truncate_hole(inode, start_index, cur->index);
297 start_index = cur->index + 1;
298 }
299
300 list_del(&cur->list);
301 kmem_cache_free(revoke_entry_slab, cur);
302 }
303
304 if (!revoke && truncate)
305 f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
306 }
307
__f2fs_commit_atomic_write(struct inode * inode)308 static int __f2fs_commit_atomic_write(struct inode *inode)
309 {
310 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
311 struct f2fs_inode_info *fi = F2FS_I(inode);
312 struct inode *cow_inode = fi->cow_inode;
313 struct revoke_entry *new;
314 struct list_head revoke_list;
315 block_t blkaddr;
316 struct dnode_of_data dn;
317 pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
318 pgoff_t off = 0, blen, index;
319 int ret = 0, i;
320
321 INIT_LIST_HEAD(&revoke_list);
322
323 while (len) {
324 blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
325
326 set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
327 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
328 if (ret && ret != -ENOENT) {
329 goto out;
330 } else if (ret == -ENOENT) {
331 ret = 0;
332 if (dn.max_level == 0)
333 goto out;
334 goto next;
335 }
336
337 blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, cow_inode),
338 len);
339 index = off;
340 for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
341 blkaddr = f2fs_data_blkaddr(&dn);
342
343 if (!__is_valid_data_blkaddr(blkaddr)) {
344 continue;
345 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
346 DATA_GENERIC_ENHANCE)) {
347 f2fs_put_dnode(&dn);
348 ret = -EFSCORRUPTED;
349 goto out;
350 }
351
352 new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
353 true, NULL);
354
355 ret = __replace_atomic_write_block(inode, index, blkaddr,
356 &new->old_addr, false);
357 if (ret) {
358 f2fs_put_dnode(&dn);
359 kmem_cache_free(revoke_entry_slab, new);
360 goto out;
361 }
362
363 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
364 new->index = index;
365 list_add_tail(&new->list, &revoke_list);
366 }
367 f2fs_put_dnode(&dn);
368 next:
369 off += blen;
370 len -= blen;
371 }
372
373 out:
374 if (time_to_inject(sbi, FAULT_ATOMIC_TIMEOUT))
375 f2fs_schedule_timeout_killable(DEFAULT_FAULT_TIMEOUT, true);
376
377 if (ret) {
378 sbi->revoked_atomic_block += fi->atomic_write_cnt;
379 } else {
380 sbi->committed_atomic_block += fi->atomic_write_cnt;
381 set_inode_flag(inode, FI_ATOMIC_COMMITTED);
382
383 /*
384 * inode may has no FI_ATOMIC_DIRTIED flag due to no write
385 * before commit.
386 */
387 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
388 /* clear atomic dirty status and set vfs dirty status */
389 clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
390 f2fs_mark_inode_dirty_sync(inode, true);
391 }
392 }
393
394 __complete_revoke_list(inode, &revoke_list, ret ? true : false);
395
396 return ret;
397 }
398
f2fs_commit_atomic_write(struct inode * inode)399 int f2fs_commit_atomic_write(struct inode *inode)
400 {
401 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
402 struct f2fs_inode_info *fi = F2FS_I(inode);
403 struct f2fs_lock_context lc;
404 int err;
405
406 err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
407 if (err)
408 return err;
409
410 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
411 f2fs_lock_op(sbi, &lc);
412
413 err = __f2fs_commit_atomic_write(inode);
414
415 f2fs_unlock_op(sbi, &lc);
416 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
417
418 return err;
419 }
420
421 /*
422 * This function balances dirty node and dentry pages.
423 * In addition, it controls garbage collection.
424 */
f2fs_balance_fs(struct f2fs_sb_info * sbi,bool need)425 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
426 {
427 if (f2fs_cp_error(sbi))
428 return;
429
430 if (time_to_inject(sbi, FAULT_CHECKPOINT))
431 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
432
433 /* balance_fs_bg is able to be pending */
434 if (need && excess_cached_nats(sbi))
435 f2fs_balance_fs_bg(sbi, false);
436
437 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
438 return;
439
440 /*
441 * We should do GC or end up with checkpoint, if there are so many dirty
442 * dir/node pages without enough free segments.
443 */
444 if (has_enough_free_secs(sbi, 0, 0))
445 return;
446
447 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
448 sbi->gc_thread->f2fs_gc_task) {
449 DEFINE_WAIT(wait);
450
451 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
452 TASK_UNINTERRUPTIBLE);
453 wake_up(&sbi->gc_thread->gc_wait_queue_head);
454 io_schedule();
455 finish_wait(&sbi->gc_thread->fggc_wq, &wait);
456 } else {
457 struct f2fs_gc_control gc_control = {
458 .victim_segno = NULL_SEGNO,
459 .init_gc_type = f2fs_sb_has_blkzoned(sbi) ?
460 FG_GC : BG_GC,
461 .no_bg_gc = true,
462 .should_migrate_blocks = false,
463 .err_gc_skipped = false,
464 .nr_free_secs = 1 };
465 f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
466 stat_inc_gc_call_count(sbi, FOREGROUND);
467 f2fs_gc(sbi, &gc_control);
468 }
469 }
470
excess_dirty_threshold(struct f2fs_sb_info * sbi)471 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
472 {
473 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
474 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
475 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
476 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
477 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
478 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
479 unsigned int threshold =
480 SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));
481 unsigned int global_threshold = threshold * 3 / 2;
482
483 if (dents >= threshold || qdata >= threshold ||
484 nodes >= threshold || meta >= threshold ||
485 imeta >= threshold)
486 return true;
487 return dents + qdata + nodes + meta + imeta > global_threshold;
488 }
489
f2fs_balance_fs_bg(struct f2fs_sb_info * sbi,bool from_bg)490 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
491 {
492 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
493 return;
494
495 /* try to shrink extent cache when there is no enough memory */
496 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
497 f2fs_shrink_read_extent_tree(sbi,
498 READ_EXTENT_CACHE_SHRINK_NUMBER);
499
500 /* try to shrink age extent cache when there is no enough memory */
501 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
502 f2fs_shrink_age_extent_tree(sbi,
503 AGE_EXTENT_CACHE_SHRINK_NUMBER);
504
505 /* check the # of cached NAT entries */
506 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
507 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
508
509 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
510 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
511 else
512 f2fs_build_free_nids(sbi, false, false);
513
514 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
515 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
516 goto do_sync;
517
518 /* there is background inflight IO or foreground operation recently */
519 if (is_inflight_io(sbi, REQ_TIME) ||
520 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
521 return;
522
523 /* exceed periodical checkpoint timeout threshold */
524 if (f2fs_time_over(sbi, CP_TIME))
525 goto do_sync;
526
527 /* checkpoint is the only way to shrink partial cached entries */
528 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
529 f2fs_available_free_memory(sbi, INO_ENTRIES))
530 return;
531
532 do_sync:
533 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
534 struct blk_plug plug;
535
536 mutex_lock(&sbi->flush_lock);
537
538 blk_start_plug(&plug);
539 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
540 blk_finish_plug(&plug);
541
542 mutex_unlock(&sbi->flush_lock);
543 }
544 stat_inc_cp_call_count(sbi, BACKGROUND);
545 f2fs_sync_fs(sbi->sb, 1);
546 }
547
__submit_flush_wait(struct f2fs_sb_info * sbi,struct block_device * bdev)548 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
549 struct block_device *bdev)
550 {
551 int ret = blkdev_issue_flush(bdev);
552
553 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
554 test_opt(sbi, FLUSH_MERGE), ret);
555 if (!ret)
556 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
557 return ret;
558 }
559
submit_flush_wait(struct f2fs_sb_info * sbi,nid_t ino)560 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
561 {
562 int ret = 0;
563 int i;
564
565 if (!f2fs_is_multi_device(sbi))
566 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
567
568 for (i = 0; i < sbi->s_ndevs; i++) {
569 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
570 continue;
571 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
572 if (ret)
573 break;
574 }
575 return ret;
576 }
577
issue_flush_thread(void * data)578 static int issue_flush_thread(void *data)
579 {
580 struct f2fs_sb_info *sbi = data;
581 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
582 wait_queue_head_t *q = &fcc->flush_wait_queue;
583 repeat:
584 if (kthread_should_stop())
585 return 0;
586
587 if (!llist_empty(&fcc->issue_list)) {
588 struct flush_cmd *cmd, *next;
589 int ret;
590
591 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
592 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
593
594 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
595
596 ret = submit_flush_wait(sbi, cmd->ino);
597 atomic_inc(&fcc->issued_flush);
598
599 llist_for_each_entry_safe(cmd, next,
600 fcc->dispatch_list, llnode) {
601 cmd->ret = ret;
602 complete(&cmd->wait);
603 }
604 fcc->dispatch_list = NULL;
605 }
606
607 wait_event_interruptible(*q,
608 kthread_should_stop() || !llist_empty(&fcc->issue_list));
609 goto repeat;
610 }
611
f2fs_issue_flush(struct f2fs_sb_info * sbi,nid_t ino)612 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
613 {
614 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
615 struct flush_cmd cmd;
616 int ret;
617
618 if (test_opt(sbi, NOBARRIER))
619 return 0;
620
621 if (!test_opt(sbi, FLUSH_MERGE)) {
622 atomic_inc(&fcc->queued_flush);
623 ret = submit_flush_wait(sbi, ino);
624 atomic_dec(&fcc->queued_flush);
625 atomic_inc(&fcc->issued_flush);
626 return ret;
627 }
628
629 if (atomic_inc_return(&fcc->queued_flush) == 1 ||
630 f2fs_is_multi_device(sbi)) {
631 ret = submit_flush_wait(sbi, ino);
632 atomic_dec(&fcc->queued_flush);
633
634 atomic_inc(&fcc->issued_flush);
635 return ret;
636 }
637
638 cmd.ino = ino;
639 init_completion(&cmd.wait);
640
641 llist_add(&cmd.llnode, &fcc->issue_list);
642
643 /*
644 * update issue_list before we wake up issue_flush thread, this
645 * smp_mb() pairs with another barrier in ___wait_event(), see
646 * more details in comments of waitqueue_active().
647 */
648 smp_mb();
649
650 if (waitqueue_active(&fcc->flush_wait_queue))
651 wake_up(&fcc->flush_wait_queue);
652
653 if (fcc->f2fs_issue_flush) {
654 wait_for_completion(&cmd.wait);
655 atomic_dec(&fcc->queued_flush);
656 } else {
657 struct llist_node *list;
658
659 list = llist_del_all(&fcc->issue_list);
660 if (!list) {
661 wait_for_completion(&cmd.wait);
662 atomic_dec(&fcc->queued_flush);
663 } else {
664 struct flush_cmd *tmp, *next;
665
666 ret = submit_flush_wait(sbi, ino);
667
668 llist_for_each_entry_safe(tmp, next, list, llnode) {
669 if (tmp == &cmd) {
670 cmd.ret = ret;
671 atomic_dec(&fcc->queued_flush);
672 continue;
673 }
674 tmp->ret = ret;
675 complete(&tmp->wait);
676 }
677 }
678 }
679
680 return cmd.ret;
681 }
682
f2fs_create_flush_cmd_control(struct f2fs_sb_info * sbi)683 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
684 {
685 dev_t dev = sbi->sb->s_bdev->bd_dev;
686 struct flush_cmd_control *fcc;
687
688 if (SM_I(sbi)->fcc_info) {
689 fcc = SM_I(sbi)->fcc_info;
690 if (fcc->f2fs_issue_flush)
691 return 0;
692 goto init_thread;
693 }
694
695 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
696 if (!fcc)
697 return -ENOMEM;
698 atomic_set(&fcc->issued_flush, 0);
699 atomic_set(&fcc->queued_flush, 0);
700 init_waitqueue_head(&fcc->flush_wait_queue);
701 init_llist_head(&fcc->issue_list);
702 SM_I(sbi)->fcc_info = fcc;
703 if (!test_opt(sbi, FLUSH_MERGE))
704 return 0;
705
706 init_thread:
707 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
708 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
709 if (IS_ERR(fcc->f2fs_issue_flush)) {
710 int err = PTR_ERR(fcc->f2fs_issue_flush);
711
712 fcc->f2fs_issue_flush = NULL;
713 return err;
714 }
715
716 return 0;
717 }
718
f2fs_destroy_flush_cmd_control(struct f2fs_sb_info * sbi,bool free)719 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
720 {
721 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
722
723 if (fcc && fcc->f2fs_issue_flush) {
724 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
725
726 fcc->f2fs_issue_flush = NULL;
727 kthread_stop(flush_thread);
728 }
729 if (free) {
730 kfree(fcc);
731 SM_I(sbi)->fcc_info = NULL;
732 }
733 }
734
f2fs_flush_device_cache(struct f2fs_sb_info * sbi)735 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
736 {
737 int ret = 0, i;
738
739 if (!f2fs_is_multi_device(sbi))
740 return 0;
741
742 if (test_opt(sbi, NOBARRIER))
743 return 0;
744
745 for (i = 1; i < sbi->s_ndevs; i++) {
746 int count = DEFAULT_RETRY_IO_COUNT;
747
748 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
749 continue;
750
751 do {
752 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
753 if (ret)
754 f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
755 } while (ret && --count);
756
757 if (ret) {
758 f2fs_stop_checkpoint(sbi, false,
759 STOP_CP_REASON_FLUSH_FAIL);
760 break;
761 }
762
763 spin_lock(&sbi->dev_lock);
764 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
765 spin_unlock(&sbi->dev_lock);
766 }
767
768 return ret;
769 }
770
__locate_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno,enum dirty_type dirty_type)771 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
772 enum dirty_type dirty_type)
773 {
774 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
775
776 /* need not be added */
777 if (is_curseg(sbi, segno))
778 return;
779
780 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
781 dirty_i->nr_dirty[dirty_type]++;
782
783 if (dirty_type == DIRTY) {
784 struct seg_entry *sentry = get_seg_entry(sbi, segno);
785 enum dirty_type t = sentry->type;
786
787 if (unlikely(t >= DIRTY)) {
788 f2fs_bug_on(sbi, 1);
789 return;
790 }
791 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
792 dirty_i->nr_dirty[t]++;
793
794 if (__is_large_section(sbi)) {
795 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
796 block_t valid_blocks =
797 get_valid_blocks(sbi, segno, true);
798
799 f2fs_bug_on(sbi,
800 (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
801 !valid_blocks) ||
802 valid_blocks == CAP_BLKS_PER_SEC(sbi));
803
804 if (!is_cursec(sbi, secno))
805 set_bit(secno, dirty_i->dirty_secmap);
806 }
807 }
808 }
809
__remove_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno,enum dirty_type dirty_type)810 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
811 enum dirty_type dirty_type)
812 {
813 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
814 block_t valid_blocks;
815
816 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
817 dirty_i->nr_dirty[dirty_type]--;
818
819 if (dirty_type == DIRTY) {
820 struct seg_entry *sentry = get_seg_entry(sbi, segno);
821 enum dirty_type t = sentry->type;
822
823 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
824 dirty_i->nr_dirty[t]--;
825
826 valid_blocks = get_valid_blocks(sbi, segno, true);
827 if (valid_blocks == 0) {
828 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
829 dirty_i->victim_secmap);
830 #ifdef CONFIG_F2FS_CHECK_FS
831 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
832 #endif
833 }
834 if (__is_large_section(sbi)) {
835 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
836
837 if (!valid_blocks ||
838 valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
839 clear_bit(secno, dirty_i->dirty_secmap);
840 return;
841 }
842
843 if (!is_cursec(sbi, secno))
844 set_bit(secno, dirty_i->dirty_secmap);
845 }
846 }
847 }
848
849 /*
850 * Should not occur error such as -ENOMEM.
851 * Adding dirty entry into seglist is not critical operation.
852 * If a given segment is one of current working segments, it won't be added.
853 */
locate_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno)854 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
855 {
856 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
857 unsigned short valid_blocks, ckpt_valid_blocks;
858 unsigned int usable_blocks;
859
860 if (segno == NULL_SEGNO || is_curseg(sbi, segno))
861 return;
862
863 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
864 mutex_lock(&dirty_i->seglist_lock);
865
866 valid_blocks = get_valid_blocks(sbi, segno, false);
867 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
868
869 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
870 ckpt_valid_blocks == usable_blocks)) {
871 __locate_dirty_segment(sbi, segno, PRE);
872 __remove_dirty_segment(sbi, segno, DIRTY);
873 } else if (valid_blocks < usable_blocks) {
874 __locate_dirty_segment(sbi, segno, DIRTY);
875 } else {
876 /* Recovery routine with SSR needs this */
877 __remove_dirty_segment(sbi, segno, DIRTY);
878 }
879
880 mutex_unlock(&dirty_i->seglist_lock);
881 }
882
883 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
f2fs_dirty_to_prefree(struct f2fs_sb_info * sbi)884 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
885 {
886 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
887 unsigned int segno;
888
889 mutex_lock(&dirty_i->seglist_lock);
890 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
891 if (get_valid_blocks(sbi, segno, false))
892 continue;
893 if (is_curseg(sbi, segno))
894 continue;
895 __locate_dirty_segment(sbi, segno, PRE);
896 __remove_dirty_segment(sbi, segno, DIRTY);
897 }
898 mutex_unlock(&dirty_i->seglist_lock);
899 }
900
f2fs_get_unusable_blocks(struct f2fs_sb_info * sbi)901 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
902 {
903 int ovp_hole_segs =
904 (overprovision_segments(sbi) - reserved_segments(sbi));
905 block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);
906 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
907 block_t holes[2] = {0, 0}; /* DATA and NODE */
908 block_t unusable;
909 struct seg_entry *se;
910 unsigned int segno;
911
912 mutex_lock(&dirty_i->seglist_lock);
913 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
914 se = get_seg_entry(sbi, segno);
915 if (IS_NODESEG(se->type))
916 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
917 se->valid_blocks;
918 else
919 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
920 se->valid_blocks;
921 }
922 mutex_unlock(&dirty_i->seglist_lock);
923
924 unusable = max(holes[DATA], holes[NODE]);
925 if (unusable > ovp_holes)
926 return unusable - ovp_holes;
927 return 0;
928 }
929
f2fs_disable_cp_again(struct f2fs_sb_info * sbi,block_t unusable)930 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
931 {
932 int ovp_hole_segs =
933 (overprovision_segments(sbi) - reserved_segments(sbi));
934
935 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
936 return 0;
937 if (unusable > F2FS_OPTION(sbi).unusable_cap)
938 return -EAGAIN;
939 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
940 dirty_segments(sbi) > ovp_hole_segs)
941 return -EAGAIN;
942 if (has_not_enough_free_secs(sbi, 0, 0))
943 return -EAGAIN;
944 return 0;
945 }
946
947 /* This is only used by SBI_CP_DISABLED */
get_free_segment(struct f2fs_sb_info * sbi)948 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
949 {
950 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
951 unsigned int segno = 0;
952
953 mutex_lock(&dirty_i->seglist_lock);
954 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
955 if (get_valid_blocks(sbi, segno, false))
956 continue;
957 if (get_ckpt_valid_blocks(sbi, segno, false))
958 continue;
959 mutex_unlock(&dirty_i->seglist_lock);
960 return segno;
961 }
962 mutex_unlock(&dirty_i->seglist_lock);
963 return NULL_SEGNO;
964 }
965
__create_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)966 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
967 struct block_device *bdev, block_t lstart,
968 block_t start, block_t len)
969 {
970 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
971 struct list_head *pend_list;
972 struct discard_cmd *dc;
973
974 f2fs_bug_on(sbi, !len);
975
976 pend_list = &dcc->pend_list[plist_idx(len)];
977
978 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
979 INIT_LIST_HEAD(&dc->list);
980 dc->bdev = bdev;
981 dc->di.lstart = lstart;
982 dc->di.start = start;
983 dc->di.len = len;
984 dc->ref = 0;
985 dc->state = D_PREP;
986 dc->queued = 0;
987 dc->error = 0;
988 init_completion(&dc->wait);
989 list_add_tail(&dc->list, pend_list);
990 spin_lock_init(&dc->lock);
991 dc->bio_ref = 0;
992 atomic_inc(&dcc->discard_cmd_cnt);
993 dcc->undiscard_blks += len;
994
995 return dc;
996 }
997
f2fs_check_discard_tree(struct f2fs_sb_info * sbi)998 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
999 {
1000 #ifdef CONFIG_F2FS_CHECK_FS
1001 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1002 struct rb_node *cur = rb_first_cached(&dcc->root), *next;
1003 struct discard_cmd *cur_dc, *next_dc;
1004
1005 while (cur) {
1006 next = rb_next(cur);
1007 if (!next)
1008 return true;
1009
1010 cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
1011 next_dc = rb_entry(next, struct discard_cmd, rb_node);
1012
1013 if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
1014 f2fs_info(sbi, "broken discard_rbtree, "
1015 "cur(%u, %u) next(%u, %u)",
1016 cur_dc->di.lstart, cur_dc->di.len,
1017 next_dc->di.lstart, next_dc->di.len);
1018 return false;
1019 }
1020 cur = next;
1021 }
1022 #endif
1023 return true;
1024 }
1025
__lookup_discard_cmd(struct f2fs_sb_info * sbi,block_t blkaddr)1026 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
1027 block_t blkaddr)
1028 {
1029 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1030 struct rb_node *node = dcc->root.rb_root.rb_node;
1031 struct discard_cmd *dc;
1032
1033 while (node) {
1034 dc = rb_entry(node, struct discard_cmd, rb_node);
1035
1036 if (blkaddr < dc->di.lstart)
1037 node = node->rb_left;
1038 else if (blkaddr >= dc->di.lstart + dc->di.len)
1039 node = node->rb_right;
1040 else
1041 return dc;
1042 }
1043 return NULL;
1044 }
1045
__lookup_discard_cmd_ret(struct rb_root_cached * root,block_t blkaddr,struct discard_cmd ** prev_entry,struct discard_cmd ** next_entry,struct rb_node *** insert_p,struct rb_node ** insert_parent)1046 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
1047 block_t blkaddr,
1048 struct discard_cmd **prev_entry,
1049 struct discard_cmd **next_entry,
1050 struct rb_node ***insert_p,
1051 struct rb_node **insert_parent)
1052 {
1053 struct rb_node **pnode = &root->rb_root.rb_node;
1054 struct rb_node *parent = NULL, *tmp_node;
1055 struct discard_cmd *dc;
1056
1057 *insert_p = NULL;
1058 *insert_parent = NULL;
1059 *prev_entry = NULL;
1060 *next_entry = NULL;
1061
1062 if (RB_EMPTY_ROOT(&root->rb_root))
1063 return NULL;
1064
1065 while (*pnode) {
1066 parent = *pnode;
1067 dc = rb_entry(*pnode, struct discard_cmd, rb_node);
1068
1069 if (blkaddr < dc->di.lstart)
1070 pnode = &(*pnode)->rb_left;
1071 else if (blkaddr >= dc->di.lstart + dc->di.len)
1072 pnode = &(*pnode)->rb_right;
1073 else
1074 goto lookup_neighbors;
1075 }
1076
1077 *insert_p = pnode;
1078 *insert_parent = parent;
1079
1080 dc = rb_entry(parent, struct discard_cmd, rb_node);
1081 tmp_node = parent;
1082 if (parent && blkaddr > dc->di.lstart)
1083 tmp_node = rb_next(parent);
1084 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1085
1086 tmp_node = parent;
1087 if (parent && blkaddr < dc->di.lstart)
1088 tmp_node = rb_prev(parent);
1089 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1090 return NULL;
1091
1092 lookup_neighbors:
1093 /* lookup prev node for merging backward later */
1094 tmp_node = rb_prev(&dc->rb_node);
1095 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1096
1097 /* lookup next node for merging frontward later */
1098 tmp_node = rb_next(&dc->rb_node);
1099 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1100 return dc;
1101 }
1102
__detach_discard_cmd(struct discard_cmd_control * dcc,struct discard_cmd * dc)1103 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1104 struct discard_cmd *dc)
1105 {
1106 if (dc->state == D_DONE)
1107 atomic_sub(dc->queued, &dcc->queued_discard);
1108
1109 list_del(&dc->list);
1110 rb_erase_cached(&dc->rb_node, &dcc->root);
1111 dcc->undiscard_blks -= dc->di.len;
1112
1113 kmem_cache_free(discard_cmd_slab, dc);
1114
1115 atomic_dec(&dcc->discard_cmd_cnt);
1116 }
1117
__remove_discard_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc)1118 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1119 struct discard_cmd *dc)
1120 {
1121 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1122 unsigned long flags;
1123
1124 trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
1125
1126 spin_lock_irqsave(&dc->lock, flags);
1127 if (dc->bio_ref) {
1128 spin_unlock_irqrestore(&dc->lock, flags);
1129 return;
1130 }
1131 spin_unlock_irqrestore(&dc->lock, flags);
1132
1133 f2fs_bug_on(sbi, dc->ref);
1134
1135 if (dc->error == -EOPNOTSUPP)
1136 dc->error = 0;
1137
1138 if (dc->error)
1139 f2fs_info_ratelimited(sbi,
1140 "Issue discard(%u, %u, %u) failed, ret: %d",
1141 dc->di.lstart, dc->di.start, dc->di.len, dc->error);
1142 __detach_discard_cmd(dcc, dc);
1143 }
1144
f2fs_submit_discard_endio(struct bio * bio)1145 static void f2fs_submit_discard_endio(struct bio *bio)
1146 {
1147 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1148 unsigned long flags;
1149
1150 spin_lock_irqsave(&dc->lock, flags);
1151 if (!dc->error)
1152 dc->error = blk_status_to_errno(bio->bi_status);
1153 dc->bio_ref--;
1154 if (!dc->bio_ref && dc->state == D_SUBMIT) {
1155 dc->state = D_DONE;
1156 complete_all(&dc->wait);
1157 }
1158 spin_unlock_irqrestore(&dc->lock, flags);
1159 bio_put(bio);
1160 }
1161
__check_sit_bitmap(struct f2fs_sb_info * sbi,block_t start,block_t end)1162 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1163 block_t start, block_t end)
1164 {
1165 #ifdef CONFIG_F2FS_CHECK_FS
1166 struct seg_entry *sentry;
1167 unsigned int segno;
1168 block_t blk = start;
1169 unsigned long offset, size, *map;
1170
1171 while (blk < end) {
1172 segno = GET_SEGNO(sbi, blk);
1173 sentry = get_seg_entry(sbi, segno);
1174 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1175
1176 if (end < START_BLOCK(sbi, segno + 1))
1177 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1178 else
1179 size = BLKS_PER_SEG(sbi);
1180 map = (unsigned long *)(sentry->cur_valid_map);
1181 offset = __find_rev_next_bit(map, size, offset);
1182 f2fs_bug_on(sbi, offset != size);
1183 blk = START_BLOCK(sbi, segno + 1);
1184 }
1185 #endif
1186 }
1187
__init_discard_policy(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,int discard_type,unsigned int granularity)1188 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1189 struct discard_policy *dpolicy,
1190 int discard_type, unsigned int granularity)
1191 {
1192 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1193
1194 /* common policy */
1195 dpolicy->type = discard_type;
1196 dpolicy->sync = true;
1197 dpolicy->ordered = false;
1198 dpolicy->granularity = granularity;
1199
1200 dpolicy->max_requests = dcc->max_discard_request;
1201 dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
1202 dpolicy->timeout = false;
1203
1204 if (discard_type == DPOLICY_BG) {
1205 dpolicy->min_interval = dcc->min_discard_issue_time;
1206 dpolicy->mid_interval = dcc->mid_discard_issue_time;
1207 dpolicy->max_interval = dcc->max_discard_issue_time;
1208 if (dcc->discard_io_aware == DPOLICY_IO_AWARE_ENABLE)
1209 dpolicy->io_aware = true;
1210 else if (dcc->discard_io_aware == DPOLICY_IO_AWARE_DISABLE)
1211 dpolicy->io_aware = false;
1212 dpolicy->sync = false;
1213 dpolicy->ordered = true;
1214 if (utilization(sbi) > dcc->discard_urgent_util) {
1215 dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1216 if (atomic_read(&dcc->discard_cmd_cnt))
1217 dpolicy->max_interval =
1218 dcc->min_discard_issue_time;
1219 }
1220 } else if (discard_type == DPOLICY_FORCE) {
1221 dpolicy->min_interval = dcc->min_discard_issue_time;
1222 dpolicy->mid_interval = dcc->mid_discard_issue_time;
1223 dpolicy->max_interval = dcc->max_discard_issue_time;
1224 dpolicy->io_aware = false;
1225 } else if (discard_type == DPOLICY_FSTRIM) {
1226 dpolicy->io_aware = false;
1227 } else if (discard_type == DPOLICY_UMOUNT) {
1228 dpolicy->io_aware = false;
1229 /* we need to issue all to keep CP_TRIMMED_FLAG */
1230 dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1231 dpolicy->timeout = true;
1232 }
1233 }
1234
1235 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1236 struct block_device *bdev, block_t lstart,
1237 block_t start, block_t len);
1238
1239 #ifdef CONFIG_BLK_DEV_ZONED
__submit_zone_reset_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc,blk_opf_t flag,struct list_head * wait_list,unsigned int * issued)1240 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1241 struct discard_cmd *dc, blk_opf_t flag,
1242 struct list_head *wait_list,
1243 unsigned int *issued)
1244 {
1245 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1246 struct block_device *bdev = dc->bdev;
1247 struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
1248 unsigned long flags;
1249
1250 trace_f2fs_issue_reset_zone(bdev, dc->di.start);
1251
1252 spin_lock_irqsave(&dc->lock, flags);
1253 dc->state = D_SUBMIT;
1254 dc->bio_ref++;
1255 spin_unlock_irqrestore(&dc->lock, flags);
1256
1257 if (issued)
1258 (*issued)++;
1259
1260 atomic_inc(&dcc->queued_discard);
1261 dc->queued++;
1262 list_move_tail(&dc->list, wait_list);
1263
1264 /* sanity check on discard range */
1265 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1266
1267 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
1268 bio->bi_private = dc;
1269 bio->bi_end_io = f2fs_submit_discard_endio;
1270 submit_bio(bio);
1271
1272 atomic_inc(&dcc->issued_discard);
1273 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1274 }
1275 #endif
1276
1277 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
__submit_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,struct discard_cmd * dc,int * issued)1278 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1279 struct discard_policy *dpolicy,
1280 struct discard_cmd *dc, int *issued)
1281 {
1282 struct block_device *bdev = dc->bdev;
1283 unsigned int max_discard_blocks =
1284 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1285 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1286 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1287 &(dcc->fstrim_list) : &(dcc->wait_list);
1288 blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
1289 block_t lstart, start, len, total_len;
1290
1291 if (dc->state != D_PREP)
1292 return 0;
1293
1294 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1295 return 0;
1296
1297 #ifdef CONFIG_BLK_DEV_ZONED
1298 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1299 int devi = f2fs_bdev_index(sbi, bdev);
1300
1301 if (devi < 0)
1302 return -EINVAL;
1303
1304 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1305 __submit_zone_reset_cmd(sbi, dc, flag,
1306 wait_list, issued);
1307 return 0;
1308 }
1309 }
1310 #endif
1311
1312 /*
1313 * stop issuing discard for any of below cases:
1314 * 1. device is conventional zone, but it doesn't support discard.
1315 * 2. device is regulare device, after snapshot it doesn't support
1316 * discard.
1317 */
1318 if (!bdev_max_discard_sectors(bdev))
1319 return -EOPNOTSUPP;
1320
1321 trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
1322
1323 lstart = dc->di.lstart;
1324 start = dc->di.start;
1325 len = dc->di.len;
1326 total_len = len;
1327
1328 dc->di.len = 0;
1329
1330 while (total_len && *issued < dpolicy->max_requests) {
1331 struct bio *bio = NULL;
1332 unsigned long flags;
1333 bool last = true;
1334
1335 if (len > max_discard_blocks) {
1336 len = max_discard_blocks;
1337 last = false;
1338 }
1339
1340 (*issued)++;
1341 if (*issued == dpolicy->max_requests)
1342 last = true;
1343
1344 dc->di.len += len;
1345
1346 __blkdev_issue_discard(bdev, SECTOR_FROM_BLOCK(start),
1347 SECTOR_FROM_BLOCK(len), GFP_NOFS, &bio);
1348 f2fs_bug_on(sbi, !bio);
1349
1350 /*
1351 * should keep before submission to avoid D_DONE
1352 * right away
1353 */
1354 spin_lock_irqsave(&dc->lock, flags);
1355 if (last)
1356 dc->state = D_SUBMIT;
1357 else
1358 dc->state = D_PARTIAL;
1359 dc->bio_ref++;
1360 spin_unlock_irqrestore(&dc->lock, flags);
1361
1362 atomic_inc(&dcc->queued_discard);
1363 dc->queued++;
1364 list_move_tail(&dc->list, wait_list);
1365
1366 /* sanity check on discard range */
1367 __check_sit_bitmap(sbi, lstart, lstart + len);
1368
1369 bio->bi_private = dc;
1370 bio->bi_end_io = f2fs_submit_discard_endio;
1371 bio->bi_opf |= flag;
1372 submit_bio(bio);
1373
1374 atomic_inc(&dcc->issued_discard);
1375
1376 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1377
1378 lstart += len;
1379 start += len;
1380 total_len -= len;
1381 len = total_len;
1382 }
1383
1384 if (len) {
1385 dcc->undiscard_blks -= len;
1386 __update_discard_tree_range(sbi, bdev, lstart, start, len);
1387 }
1388 return 0;
1389 }
1390
__insert_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)1391 static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1392 struct block_device *bdev, block_t lstart,
1393 block_t start, block_t len)
1394 {
1395 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1396 struct rb_node **p = &dcc->root.rb_root.rb_node;
1397 struct rb_node *parent = NULL;
1398 struct discard_cmd *dc;
1399 bool leftmost = true;
1400
1401 /* look up rb tree to find parent node */
1402 while (*p) {
1403 parent = *p;
1404 dc = rb_entry(parent, struct discard_cmd, rb_node);
1405
1406 if (lstart < dc->di.lstart) {
1407 p = &(*p)->rb_left;
1408 } else if (lstart >= dc->di.lstart + dc->di.len) {
1409 p = &(*p)->rb_right;
1410 leftmost = false;
1411 } else {
1412 /* Let's skip to add, if exists */
1413 return;
1414 }
1415 }
1416
1417 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1418
1419 rb_link_node(&dc->rb_node, parent, p);
1420 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1421 }
1422
__relocate_discard_cmd(struct discard_cmd_control * dcc,struct discard_cmd * dc)1423 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1424 struct discard_cmd *dc)
1425 {
1426 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
1427 }
1428
__punch_discard_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc,block_t blkaddr)1429 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1430 struct discard_cmd *dc, block_t blkaddr)
1431 {
1432 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1433 struct discard_info di = dc->di;
1434 bool modified = false;
1435
1436 if (dc->state == D_DONE || dc->di.len == 1) {
1437 __remove_discard_cmd(sbi, dc);
1438 return;
1439 }
1440
1441 dcc->undiscard_blks -= di.len;
1442
1443 if (blkaddr > di.lstart) {
1444 dc->di.len = blkaddr - dc->di.lstart;
1445 dcc->undiscard_blks += dc->di.len;
1446 __relocate_discard_cmd(dcc, dc);
1447 modified = true;
1448 }
1449
1450 if (blkaddr < di.lstart + di.len - 1) {
1451 if (modified) {
1452 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1453 di.start + blkaddr + 1 - di.lstart,
1454 di.lstart + di.len - 1 - blkaddr);
1455 } else {
1456 dc->di.lstart++;
1457 dc->di.len--;
1458 dc->di.start++;
1459 dcc->undiscard_blks += dc->di.len;
1460 __relocate_discard_cmd(dcc, dc);
1461 }
1462 }
1463 }
1464
__update_discard_tree_range(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)1465 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1466 struct block_device *bdev, block_t lstart,
1467 block_t start, block_t len)
1468 {
1469 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1470 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1471 struct discard_cmd *dc;
1472 struct discard_info di = {0};
1473 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1474 unsigned int max_discard_blocks =
1475 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1476 block_t end = lstart + len;
1477
1478 dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
1479 &prev_dc, &next_dc, &insert_p, &insert_parent);
1480 if (dc)
1481 prev_dc = dc;
1482
1483 if (!prev_dc) {
1484 di.lstart = lstart;
1485 di.len = next_dc ? next_dc->di.lstart - lstart : len;
1486 di.len = min(di.len, len);
1487 di.start = start;
1488 }
1489
1490 while (1) {
1491 struct rb_node *node;
1492 bool merged = false;
1493 struct discard_cmd *tdc = NULL;
1494
1495 if (prev_dc) {
1496 di.lstart = prev_dc->di.lstart + prev_dc->di.len;
1497 if (di.lstart < lstart)
1498 di.lstart = lstart;
1499 if (di.lstart >= end)
1500 break;
1501
1502 if (!next_dc || next_dc->di.lstart > end)
1503 di.len = end - di.lstart;
1504 else
1505 di.len = next_dc->di.lstart - di.lstart;
1506 di.start = start + di.lstart - lstart;
1507 }
1508
1509 if (!di.len)
1510 goto next;
1511
1512 if (prev_dc && prev_dc->state == D_PREP &&
1513 prev_dc->bdev == bdev &&
1514 __is_discard_back_mergeable(&di, &prev_dc->di,
1515 max_discard_blocks)) {
1516 prev_dc->di.len += di.len;
1517 dcc->undiscard_blks += di.len;
1518 __relocate_discard_cmd(dcc, prev_dc);
1519 di = prev_dc->di;
1520 tdc = prev_dc;
1521 merged = true;
1522 }
1523
1524 if (next_dc && next_dc->state == D_PREP &&
1525 next_dc->bdev == bdev &&
1526 __is_discard_front_mergeable(&di, &next_dc->di,
1527 max_discard_blocks)) {
1528 next_dc->di.lstart = di.lstart;
1529 next_dc->di.len += di.len;
1530 next_dc->di.start = di.start;
1531 dcc->undiscard_blks += di.len;
1532 __relocate_discard_cmd(dcc, next_dc);
1533 if (tdc)
1534 __remove_discard_cmd(sbi, tdc);
1535 merged = true;
1536 }
1537
1538 if (!merged)
1539 __insert_discard_cmd(sbi, bdev,
1540 di.lstart, di.start, di.len);
1541 next:
1542 prev_dc = next_dc;
1543 if (!prev_dc)
1544 break;
1545
1546 node = rb_next(&prev_dc->rb_node);
1547 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1548 }
1549 }
1550
1551 #ifdef CONFIG_BLK_DEV_ZONED
__queue_zone_reset_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t lblkstart,block_t blklen)1552 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1553 struct block_device *bdev, block_t blkstart, block_t lblkstart,
1554 block_t blklen)
1555 {
1556 trace_f2fs_queue_reset_zone(bdev, blkstart);
1557
1558 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1559 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1560 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1561 }
1562 #endif
1563
__queue_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)1564 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1565 struct block_device *bdev, block_t blkstart, block_t blklen)
1566 {
1567 block_t lblkstart = blkstart;
1568
1569 if (!f2fs_bdev_support_discard(bdev))
1570 return;
1571
1572 trace_f2fs_queue_discard(bdev, blkstart, blklen);
1573
1574 if (f2fs_is_multi_device(sbi)) {
1575 int devi = f2fs_target_device_index(sbi, blkstart);
1576
1577 blkstart -= FDEV(devi).start_blk;
1578 }
1579 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1580 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1581 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1582 }
1583
__issue_discard_cmd_orderly(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,int * issued)1584 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1585 struct discard_policy *dpolicy, int *issued)
1586 {
1587 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1588 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1589 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1590 struct discard_cmd *dc;
1591 struct blk_plug plug;
1592 bool io_interrupted = false;
1593
1594 mutex_lock(&dcc->cmd_lock);
1595 dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
1596 &prev_dc, &next_dc, &insert_p, &insert_parent);
1597 if (!dc)
1598 dc = next_dc;
1599
1600 blk_start_plug(&plug);
1601
1602 while (dc) {
1603 struct rb_node *node;
1604 int err = 0;
1605
1606 if (dc->state != D_PREP)
1607 goto next;
1608
1609 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1610 io_interrupted = true;
1611 break;
1612 }
1613
1614 dcc->next_pos = dc->di.lstart + dc->di.len;
1615 err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1616
1617 if (*issued >= dpolicy->max_requests)
1618 break;
1619 next:
1620 node = rb_next(&dc->rb_node);
1621 if (err)
1622 __remove_discard_cmd(sbi, dc);
1623 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1624 }
1625
1626 blk_finish_plug(&plug);
1627
1628 if (!dc)
1629 dcc->next_pos = 0;
1630
1631 mutex_unlock(&dcc->cmd_lock);
1632
1633 if (!(*issued) && io_interrupted)
1634 *issued = -1;
1635 }
1636 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1637 struct discard_policy *dpolicy);
1638
__issue_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy)1639 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1640 struct discard_policy *dpolicy)
1641 {
1642 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1643 struct list_head *pend_list;
1644 struct discard_cmd *dc, *tmp;
1645 struct blk_plug plug;
1646 int i, issued;
1647 bool io_interrupted = false;
1648
1649 if (dpolicy->timeout)
1650 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1651
1652 retry:
1653 issued = 0;
1654 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1655 if (dpolicy->timeout &&
1656 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1657 break;
1658
1659 if (i + 1 < dpolicy->granularity)
1660 break;
1661
1662 if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
1663 __issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1664 return issued;
1665 }
1666
1667 pend_list = &dcc->pend_list[i];
1668
1669 mutex_lock(&dcc->cmd_lock);
1670 if (list_empty(pend_list))
1671 goto next;
1672 if (unlikely(dcc->rbtree_check))
1673 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1674 blk_start_plug(&plug);
1675 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1676 f2fs_bug_on(sbi, dc->state != D_PREP);
1677
1678 if (dpolicy->timeout &&
1679 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1680 break;
1681
1682 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1683 !is_idle(sbi, DISCARD_TIME)) {
1684 io_interrupted = true;
1685 break;
1686 }
1687
1688 __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1689
1690 if (issued >= dpolicy->max_requests)
1691 break;
1692 }
1693 blk_finish_plug(&plug);
1694 next:
1695 mutex_unlock(&dcc->cmd_lock);
1696
1697 if (issued >= dpolicy->max_requests || io_interrupted)
1698 break;
1699 }
1700
1701 if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1702 __wait_all_discard_cmd(sbi, dpolicy);
1703 goto retry;
1704 }
1705
1706 if (!issued && io_interrupted)
1707 issued = -1;
1708
1709 return issued;
1710 }
1711
__drop_discard_cmd(struct f2fs_sb_info * sbi)1712 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1713 {
1714 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1715 struct list_head *pend_list;
1716 struct discard_cmd *dc, *tmp;
1717 int i;
1718 bool dropped = false;
1719
1720 mutex_lock(&dcc->cmd_lock);
1721 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1722 pend_list = &dcc->pend_list[i];
1723 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1724 f2fs_bug_on(sbi, dc->state != D_PREP);
1725 __remove_discard_cmd(sbi, dc);
1726 dropped = true;
1727 }
1728 }
1729 mutex_unlock(&dcc->cmd_lock);
1730
1731 return dropped;
1732 }
1733
f2fs_drop_discard_cmd(struct f2fs_sb_info * sbi)1734 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1735 {
1736 __drop_discard_cmd(sbi);
1737 }
1738
__wait_one_discard_bio(struct f2fs_sb_info * sbi,struct discard_cmd * dc)1739 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1740 struct discard_cmd *dc)
1741 {
1742 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1743 unsigned int len = 0;
1744
1745 wait_for_completion_io(&dc->wait);
1746 mutex_lock(&dcc->cmd_lock);
1747 f2fs_bug_on(sbi, dc->state != D_DONE);
1748 dc->ref--;
1749 if (!dc->ref) {
1750 if (!dc->error)
1751 len = dc->di.len;
1752 __remove_discard_cmd(sbi, dc);
1753 }
1754 mutex_unlock(&dcc->cmd_lock);
1755
1756 return len;
1757 }
1758
__wait_discard_cmd_range(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,block_t start,block_t end)1759 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1760 struct discard_policy *dpolicy,
1761 block_t start, block_t end)
1762 {
1763 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1764 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1765 &(dcc->fstrim_list) : &(dcc->wait_list);
1766 struct discard_cmd *dc = NULL, *iter, *tmp;
1767 unsigned int trimmed = 0;
1768
1769 next:
1770 dc = NULL;
1771
1772 mutex_lock(&dcc->cmd_lock);
1773 list_for_each_entry_safe(iter, tmp, wait_list, list) {
1774 if (iter->di.lstart + iter->di.len <= start ||
1775 end <= iter->di.lstart)
1776 continue;
1777 if (iter->di.len < dpolicy->granularity)
1778 continue;
1779 if (iter->state == D_DONE && !iter->ref) {
1780 wait_for_completion_io(&iter->wait);
1781 if (!iter->error)
1782 trimmed += iter->di.len;
1783 __remove_discard_cmd(sbi, iter);
1784 } else {
1785 iter->ref++;
1786 dc = iter;
1787 break;
1788 }
1789 }
1790 mutex_unlock(&dcc->cmd_lock);
1791
1792 if (dc) {
1793 trimmed += __wait_one_discard_bio(sbi, dc);
1794 goto next;
1795 }
1796
1797 return trimmed;
1798 }
1799
__wait_all_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy)1800 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1801 struct discard_policy *dpolicy)
1802 {
1803 struct discard_policy dp;
1804 unsigned int discard_blks;
1805
1806 if (dpolicy)
1807 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1808
1809 /* wait all */
1810 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1811 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1812 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1813 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1814
1815 return discard_blks;
1816 }
1817
1818 /* This should be covered by global mutex, &sit_i->sentry_lock */
f2fs_wait_discard_bio(struct f2fs_sb_info * sbi,block_t blkaddr)1819 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1820 {
1821 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1822 struct discard_cmd *dc;
1823 bool need_wait = false;
1824
1825 mutex_lock(&dcc->cmd_lock);
1826 dc = __lookup_discard_cmd(sbi, blkaddr);
1827 #ifdef CONFIG_BLK_DEV_ZONED
1828 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1829 int devi = f2fs_bdev_index(sbi, dc->bdev);
1830
1831 if (devi < 0) {
1832 mutex_unlock(&dcc->cmd_lock);
1833 return;
1834 }
1835
1836 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1837 /* force submit zone reset */
1838 if (dc->state == D_PREP)
1839 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1840 &dcc->wait_list, NULL);
1841 dc->ref++;
1842 mutex_unlock(&dcc->cmd_lock);
1843 /* wait zone reset */
1844 __wait_one_discard_bio(sbi, dc);
1845 return;
1846 }
1847 }
1848 #endif
1849 if (dc) {
1850 if (dc->state == D_PREP) {
1851 __punch_discard_cmd(sbi, dc, blkaddr);
1852 } else {
1853 dc->ref++;
1854 need_wait = true;
1855 }
1856 }
1857 mutex_unlock(&dcc->cmd_lock);
1858
1859 if (need_wait)
1860 __wait_one_discard_bio(sbi, dc);
1861 }
1862
f2fs_stop_discard_thread(struct f2fs_sb_info * sbi)1863 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1864 {
1865 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1866
1867 if (dcc && dcc->f2fs_issue_discard) {
1868 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1869
1870 dcc->f2fs_issue_discard = NULL;
1871 kthread_stop(discard_thread);
1872 }
1873 }
1874
1875 /**
1876 * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
1877 * @sbi: the f2fs_sb_info data for discard cmd to issue
1878 *
1879 * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
1880 *
1881 * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
1882 */
f2fs_issue_discard_timeout(struct f2fs_sb_info * sbi)1883 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1884 {
1885 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1886 struct discard_policy dpolicy;
1887 bool dropped;
1888
1889 if (!atomic_read(&dcc->discard_cmd_cnt))
1890 return true;
1891
1892 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1893 dcc->discard_granularity);
1894 __issue_discard_cmd(sbi, &dpolicy);
1895 dropped = __drop_discard_cmd(sbi);
1896
1897 /* just to make sure there is no pending discard commands */
1898 __wait_all_discard_cmd(sbi, NULL);
1899
1900 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1901 return !dropped;
1902 }
1903
issue_discard_thread(void * data)1904 static int issue_discard_thread(void *data)
1905 {
1906 struct f2fs_sb_info *sbi = data;
1907 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1908 wait_queue_head_t *q = &dcc->discard_wait_queue;
1909 struct discard_policy dpolicy;
1910 unsigned int wait_ms = dcc->min_discard_issue_time;
1911 int issued;
1912
1913 set_freezable();
1914
1915 do {
1916 wait_event_freezable_timeout(*q,
1917 kthread_should_stop() || dcc->discard_wake,
1918 msecs_to_jiffies(wait_ms));
1919
1920 if (sbi->gc_mode == GC_URGENT_HIGH ||
1921 !f2fs_available_free_memory(sbi, DISCARD_CACHE))
1922 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1923 MIN_DISCARD_GRANULARITY);
1924 else
1925 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1926 dcc->discard_granularity);
1927
1928 if (dcc->discard_wake)
1929 dcc->discard_wake = false;
1930
1931 /* clean up pending candidates before going to sleep */
1932 if (atomic_read(&dcc->queued_discard))
1933 __wait_all_discard_cmd(sbi, NULL);
1934
1935 if (f2fs_readonly(sbi->sb))
1936 continue;
1937 if (kthread_should_stop())
1938 return 0;
1939 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1940 !atomic_read(&dcc->discard_cmd_cnt)) {
1941 wait_ms = dpolicy.max_interval;
1942 continue;
1943 }
1944
1945 sb_start_intwrite(sbi->sb);
1946
1947 issued = __issue_discard_cmd(sbi, &dpolicy);
1948 if (issued > 0) {
1949 __wait_all_discard_cmd(sbi, &dpolicy);
1950 wait_ms = dpolicy.min_interval;
1951 } else if (issued == -1) {
1952 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1953 if (!wait_ms)
1954 wait_ms = dpolicy.mid_interval;
1955 } else {
1956 wait_ms = dpolicy.max_interval;
1957 }
1958 if (!atomic_read(&dcc->discard_cmd_cnt))
1959 wait_ms = dpolicy.max_interval;
1960
1961 sb_end_intwrite(sbi->sb);
1962
1963 } while (!kthread_should_stop());
1964 return 0;
1965 }
1966
1967 #ifdef CONFIG_BLK_DEV_ZONED
__f2fs_issue_discard_zone(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)1968 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1969 struct block_device *bdev, block_t blkstart, block_t blklen)
1970 {
1971 sector_t sector, nr_sects;
1972 block_t lblkstart = blkstart;
1973 int devi = 0;
1974 u64 remainder = 0;
1975
1976 if (f2fs_is_multi_device(sbi)) {
1977 devi = f2fs_target_device_index(sbi, blkstart);
1978 if (blkstart < FDEV(devi).start_blk ||
1979 blkstart > FDEV(devi).end_blk) {
1980 f2fs_err(sbi, "Invalid block %x", blkstart);
1981 return -EIO;
1982 }
1983 blkstart -= FDEV(devi).start_blk;
1984 }
1985
1986 /* For sequential zones, reset the zone write pointer */
1987 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1988 sector = SECTOR_FROM_BLOCK(blkstart);
1989 nr_sects = SECTOR_FROM_BLOCK(blklen);
1990 div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
1991
1992 if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
1993 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1994 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1995 blkstart, blklen);
1996 return -EIO;
1997 }
1998
1999 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
2000 unsigned int nofs_flags;
2001 int ret;
2002
2003 trace_f2fs_issue_reset_zone(bdev, blkstart);
2004 nofs_flags = memalloc_nofs_save();
2005 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
2006 sector, nr_sects);
2007 memalloc_nofs_restore(nofs_flags);
2008 return ret;
2009 }
2010
2011 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
2012 return 0;
2013 }
2014
2015 /* For conventional zones, use regular discard if supported */
2016 __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
2017 return 0;
2018 }
2019 #endif
2020
__issue_discard_async(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)2021 static int __issue_discard_async(struct f2fs_sb_info *sbi,
2022 struct block_device *bdev, block_t blkstart, block_t blklen)
2023 {
2024 #ifdef CONFIG_BLK_DEV_ZONED
2025 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
2026 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
2027 #endif
2028 __queue_discard_cmd(sbi, bdev, blkstart, blklen);
2029 return 0;
2030 }
2031
f2fs_issue_discard(struct f2fs_sb_info * sbi,block_t blkstart,block_t blklen)2032 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2033 block_t blkstart, block_t blklen)
2034 {
2035 sector_t start = blkstart, len = 0;
2036 struct block_device *bdev;
2037 struct seg_entry *se;
2038 unsigned int offset;
2039 block_t i;
2040 int err = 0;
2041
2042 bdev = f2fs_target_device(sbi, blkstart, NULL);
2043
2044 for (i = blkstart; i < blkstart + blklen; i++, len++) {
2045 if (i != start) {
2046 struct block_device *bdev2 =
2047 f2fs_target_device(sbi, i, NULL);
2048
2049 if (bdev2 != bdev) {
2050 err = __issue_discard_async(sbi, bdev,
2051 start, len);
2052 if (err)
2053 return err;
2054 bdev = bdev2;
2055 start = i;
2056 len = 0;
2057 }
2058 }
2059
2060 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2061 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2062
2063 if (f2fs_block_unit_discard(sbi) &&
2064 !f2fs_test_and_set_bit(offset, se->discard_map))
2065 sbi->discard_blks--;
2066 }
2067
2068 if (len)
2069 err = __issue_discard_async(sbi, bdev, start, len);
2070 return err;
2071 }
2072
add_discard_addrs(struct f2fs_sb_info * sbi,struct cp_control * cpc,bool check_only)2073 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2074 bool check_only)
2075 {
2076 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2077 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2078 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2079 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2080 unsigned long *discard_map = (unsigned long *)se->discard_map;
2081 unsigned long *dmap = SIT_I(sbi)->tmp_map;
2082 unsigned int start = 0, end = -1;
2083 bool force = (cpc->reason & CP_DISCARD);
2084 struct discard_entry *de = NULL;
2085 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2086 int i;
2087
2088 if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
2089 !f2fs_hw_support_discard(sbi) ||
2090 !f2fs_block_unit_discard(sbi))
2091 return false;
2092
2093 if (!force) {
2094 if (!f2fs_realtime_discard_enable(sbi) ||
2095 (!se->valid_blocks &&
2096 !is_curseg(sbi, cpc->trim_start)) ||
2097 SM_I(sbi)->dcc_info->nr_discards >=
2098 SM_I(sbi)->dcc_info->max_discards)
2099 return false;
2100 }
2101
2102 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
2103 for (i = 0; i < entries; i++)
2104 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
2105 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
2106
2107 while (force || SM_I(sbi)->dcc_info->nr_discards <=
2108 SM_I(sbi)->dcc_info->max_discards) {
2109 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
2110 if (start >= BLKS_PER_SEG(sbi))
2111 break;
2112
2113 end = __find_rev_next_zero_bit(dmap,
2114 BLKS_PER_SEG(sbi), start + 1);
2115 if (force && start && end != BLKS_PER_SEG(sbi) &&
2116 (end - start) < cpc->trim_minlen)
2117 continue;
2118
2119 if (check_only)
2120 return true;
2121
2122 if (!de) {
2123 de = f2fs_kmem_cache_alloc(discard_entry_slab,
2124 GFP_F2FS_ZERO, true, NULL);
2125 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2126 list_add_tail(&de->list, head);
2127 }
2128
2129 for (i = start; i < end; i++)
2130 __set_bit_le(i, (void *)de->discard_map);
2131
2132 SM_I(sbi)->dcc_info->nr_discards += end - start;
2133 }
2134 return false;
2135 }
2136
release_discard_addr(struct discard_entry * entry)2137 static void release_discard_addr(struct discard_entry *entry)
2138 {
2139 list_del(&entry->list);
2140 kmem_cache_free(discard_entry_slab, entry);
2141 }
2142
f2fs_release_discard_addrs(struct f2fs_sb_info * sbi)2143 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2144 {
2145 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2146 struct discard_entry *entry, *this;
2147
2148 /* drop caches */
2149 list_for_each_entry_safe(entry, this, head, list)
2150 release_discard_addr(entry);
2151 }
2152
2153 /*
2154 * Should call f2fs_clear_prefree_segments after checkpoint is done.
2155 */
set_prefree_as_free_segments(struct f2fs_sb_info * sbi)2156 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2157 {
2158 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2159 unsigned int segno;
2160
2161 mutex_lock(&dirty_i->seglist_lock);
2162 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2163 __set_test_and_free(sbi, segno, false);
2164 mutex_unlock(&dirty_i->seglist_lock);
2165 }
2166
f2fs_clear_prefree_segments(struct f2fs_sb_info * sbi,struct cp_control * cpc)2167 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2168 struct cp_control *cpc)
2169 {
2170 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2171 struct list_head *head = &dcc->entry_list;
2172 struct discard_entry *entry, *this;
2173 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2174 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2175 unsigned int start = 0, end = -1;
2176 unsigned int secno, start_segno;
2177 bool force = (cpc->reason & CP_DISCARD);
2178 bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2179 DISCARD_UNIT_SECTION;
2180
2181 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2182 section_alignment = true;
2183
2184 mutex_lock(&dirty_i->seglist_lock);
2185
2186 while (1) {
2187 int i;
2188
2189 if (section_alignment && end != -1)
2190 end--;
2191 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2192 if (start >= MAIN_SEGS(sbi))
2193 break;
2194 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2195 start + 1);
2196
2197 if (section_alignment) {
2198 start = rounddown(start, SEGS_PER_SEC(sbi));
2199 end = roundup(end, SEGS_PER_SEC(sbi));
2200 }
2201
2202 for (i = start; i < end; i++) {
2203 if (test_and_clear_bit(i, prefree_map))
2204 dirty_i->nr_dirty[PRE]--;
2205 }
2206
2207 if (!f2fs_realtime_discard_enable(sbi))
2208 continue;
2209
2210 if (force && start >= cpc->trim_start &&
2211 (end - 1) <= cpc->trim_end)
2212 continue;
2213
2214 /* Should cover 2MB zoned device for zone-based reset */
2215 if (!f2fs_sb_has_blkzoned(sbi) &&
2216 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2217 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2218 SEGS_TO_BLKS(sbi, end - start));
2219 continue;
2220 }
2221 next:
2222 secno = GET_SEC_FROM_SEG(sbi, start);
2223 start_segno = GET_SEG_FROM_SEC(sbi, secno);
2224 if (!is_cursec(sbi, secno) &&
2225 !get_valid_blocks(sbi, start, true))
2226 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2227 BLKS_PER_SEC(sbi));
2228
2229 start = start_segno + SEGS_PER_SEC(sbi);
2230 if (start < end)
2231 goto next;
2232 else
2233 end = start - 1;
2234 }
2235 mutex_unlock(&dirty_i->seglist_lock);
2236
2237 if (!f2fs_block_unit_discard(sbi))
2238 goto wakeup;
2239
2240 /* send small discards */
2241 list_for_each_entry_safe(entry, this, head, list) {
2242 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2243 bool is_valid = test_bit_le(0, entry->discard_map);
2244
2245 find_next:
2246 if (is_valid) {
2247 next_pos = find_next_zero_bit_le(entry->discard_map,
2248 BLKS_PER_SEG(sbi), cur_pos);
2249 len = next_pos - cur_pos;
2250
2251 if (f2fs_sb_has_blkzoned(sbi) ||
2252 (force && len < cpc->trim_minlen))
2253 goto skip;
2254
2255 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2256 len);
2257 total_len += len;
2258 } else {
2259 next_pos = find_next_bit_le(entry->discard_map,
2260 BLKS_PER_SEG(sbi), cur_pos);
2261 }
2262 skip:
2263 cur_pos = next_pos;
2264 is_valid = !is_valid;
2265
2266 if (cur_pos < BLKS_PER_SEG(sbi))
2267 goto find_next;
2268
2269 release_discard_addr(entry);
2270 dcc->nr_discards -= total_len;
2271 }
2272
2273 wakeup:
2274 wake_up_discard_thread(sbi, false);
2275 }
2276
f2fs_start_discard_thread(struct f2fs_sb_info * sbi)2277 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2278 {
2279 dev_t dev = sbi->sb->s_bdev->bd_dev;
2280 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2281 int err = 0;
2282
2283 if (f2fs_sb_has_readonly(sbi)) {
2284 f2fs_info(sbi,
2285 "Skip to start discard thread for readonly image");
2286 return 0;
2287 }
2288
2289 if (!f2fs_realtime_discard_enable(sbi))
2290 return 0;
2291
2292 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2293 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2294 if (IS_ERR(dcc->f2fs_issue_discard)) {
2295 err = PTR_ERR(dcc->f2fs_issue_discard);
2296 dcc->f2fs_issue_discard = NULL;
2297 }
2298
2299 return err;
2300 }
2301
create_discard_cmd_control(struct f2fs_sb_info * sbi)2302 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2303 {
2304 struct discard_cmd_control *dcc;
2305 int err = 0, i;
2306
2307 if (SM_I(sbi)->dcc_info) {
2308 dcc = SM_I(sbi)->dcc_info;
2309 goto init_thread;
2310 }
2311
2312 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2313 if (!dcc)
2314 return -ENOMEM;
2315
2316 dcc->discard_io_aware_gran = MAX_PLIST_NUM;
2317 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2318 dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
2319 dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
2320 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT ||
2321 F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2322 dcc->discard_granularity = BLKS_PER_SEG(sbi);
2323
2324 INIT_LIST_HEAD(&dcc->entry_list);
2325 for (i = 0; i < MAX_PLIST_NUM; i++)
2326 INIT_LIST_HEAD(&dcc->pend_list[i]);
2327 INIT_LIST_HEAD(&dcc->wait_list);
2328 INIT_LIST_HEAD(&dcc->fstrim_list);
2329 mutex_init(&dcc->cmd_lock);
2330 atomic_set(&dcc->issued_discard, 0);
2331 atomic_set(&dcc->queued_discard, 0);
2332 atomic_set(&dcc->discard_cmd_cnt, 0);
2333 dcc->nr_discards = 0;
2334 dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));
2335 dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2336 dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2337 dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2338 dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2339 dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2340 dcc->undiscard_blks = 0;
2341 dcc->next_pos = 0;
2342 dcc->root = RB_ROOT_CACHED;
2343 dcc->rbtree_check = false;
2344
2345 init_waitqueue_head(&dcc->discard_wait_queue);
2346 SM_I(sbi)->dcc_info = dcc;
2347 init_thread:
2348 err = f2fs_start_discard_thread(sbi);
2349 if (err) {
2350 kfree(dcc);
2351 SM_I(sbi)->dcc_info = NULL;
2352 }
2353
2354 return err;
2355 }
2356
destroy_discard_cmd_control(struct f2fs_sb_info * sbi)2357 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2358 {
2359 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2360
2361 if (!dcc)
2362 return;
2363
2364 f2fs_stop_discard_thread(sbi);
2365
2366 /*
2367 * Recovery can cache discard commands, so in error path of
2368 * fill_super(), it needs to give a chance to handle them.
2369 */
2370 f2fs_issue_discard_timeout(sbi);
2371
2372 kfree(dcc);
2373 SM_I(sbi)->dcc_info = NULL;
2374 }
2375
__mark_sit_entry_dirty(struct f2fs_sb_info * sbi,unsigned int segno)2376 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2377 {
2378 struct sit_info *sit_i = SIT_I(sbi);
2379
2380 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2381 sit_i->dirty_sentries++;
2382 return false;
2383 }
2384
2385 return true;
2386 }
2387
__set_sit_entry_type(struct f2fs_sb_info * sbi,int type,unsigned int segno,int modified)2388 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2389 unsigned int segno, int modified)
2390 {
2391 struct seg_entry *se = get_seg_entry(sbi, segno);
2392
2393 se->type = type;
2394 if (modified)
2395 __mark_sit_entry_dirty(sbi, segno);
2396 }
2397
get_segment_mtime(struct f2fs_sb_info * sbi,block_t blkaddr)2398 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2399 block_t blkaddr)
2400 {
2401 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2402
2403 if (segno == NULL_SEGNO)
2404 return 0;
2405 return get_seg_entry(sbi, segno)->mtime;
2406 }
2407
update_segment_mtime(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned long long old_mtime)2408 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2409 unsigned long long old_mtime)
2410 {
2411 struct seg_entry *se;
2412 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2413 unsigned long long ctime = get_mtime(sbi, false);
2414 unsigned long long mtime = old_mtime ? old_mtime : ctime;
2415
2416 if (segno == NULL_SEGNO)
2417 return;
2418
2419 se = get_seg_entry(sbi, segno);
2420
2421 if (!se->mtime)
2422 se->mtime = mtime;
2423 else
2424 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2425 se->valid_blocks + 1);
2426
2427 if (ctime > SIT_I(sbi)->max_mtime)
2428 SIT_I(sbi)->max_mtime = ctime;
2429 }
2430
2431 /*
2432 * NOTE: when updating multiple blocks at the same time, please ensure
2433 * that the consecutive input blocks belong to the same segment.
2434 */
update_sit_entry_for_release(struct f2fs_sb_info * sbi,struct seg_entry * se,unsigned int segno,block_t blkaddr,unsigned int offset,int del)2435 static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
2436 unsigned int segno, block_t blkaddr, unsigned int offset, int del)
2437 {
2438 bool exist;
2439 #ifdef CONFIG_F2FS_CHECK_FS
2440 bool mir_exist;
2441 #endif
2442 int i;
2443 int del_count = -del;
2444
2445 f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
2446
2447 for (i = 0; i < del_count; i++) {
2448 exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
2449 #ifdef CONFIG_F2FS_CHECK_FS
2450 mir_exist = f2fs_test_and_clear_bit(offset + i,
2451 se->cur_valid_map_mir);
2452 if (unlikely(exist != mir_exist)) {
2453 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2454 blkaddr + i, exist);
2455 f2fs_bug_on(sbi, 1);
2456 }
2457 #endif
2458 if (unlikely(!exist)) {
2459 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
2460 f2fs_bug_on(sbi, 1);
2461 se->valid_blocks++;
2462 del += 1;
2463 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2464 /*
2465 * If checkpoints are off, we must not reuse data that
2466 * was used in the previous checkpoint. If it was used
2467 * before, we must track that to know how much space we
2468 * really have.
2469 */
2470 if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
2471 spin_lock(&sbi->stat_lock);
2472 sbi->unusable_block_count++;
2473 spin_unlock(&sbi->stat_lock);
2474 }
2475 }
2476
2477 if (f2fs_block_unit_discard(sbi) &&
2478 f2fs_test_and_clear_bit(offset + i, se->discard_map))
2479 sbi->discard_blks++;
2480
2481 if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
2482 se->ckpt_valid_blocks -= 1;
2483 if (__is_large_section(sbi))
2484 get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
2485 }
2486 }
2487
2488 if (__is_large_section(sbi))
2489 sanity_check_valid_blocks(sbi, segno);
2490
2491 return del;
2492 }
2493
update_sit_entry_for_alloc(struct f2fs_sb_info * sbi,struct seg_entry * se,unsigned int segno,block_t blkaddr,unsigned int offset,int del)2494 static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
2495 unsigned int segno, block_t blkaddr, unsigned int offset, int del)
2496 {
2497 bool exist;
2498 #ifdef CONFIG_F2FS_CHECK_FS
2499 bool mir_exist;
2500 #endif
2501
2502 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2503 #ifdef CONFIG_F2FS_CHECK_FS
2504 mir_exist = f2fs_test_and_set_bit(offset,
2505 se->cur_valid_map_mir);
2506 if (unlikely(exist != mir_exist)) {
2507 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2508 blkaddr, exist);
2509 f2fs_bug_on(sbi, 1);
2510 }
2511 #endif
2512 if (unlikely(exist)) {
2513 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
2514 f2fs_bug_on(sbi, 1);
2515 se->valid_blocks--;
2516 del = 0;
2517 }
2518
2519 if (f2fs_block_unit_discard(sbi) &&
2520 !f2fs_test_and_set_bit(offset, se->discard_map))
2521 sbi->discard_blks--;
2522
2523 /*
2524 * SSR should never reuse block which is checkpointed
2525 * or newly invalidated.
2526 */
2527 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2528 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) {
2529 se->ckpt_valid_blocks++;
2530 if (__is_large_section(sbi))
2531 get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
2532 }
2533 }
2534
2535 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) {
2536 se->ckpt_valid_blocks += del;
2537 if (__is_large_section(sbi))
2538 get_sec_entry(sbi, segno)->ckpt_valid_blocks += del;
2539 }
2540
2541 if (__is_large_section(sbi))
2542 sanity_check_valid_blocks(sbi, segno);
2543
2544 return del;
2545 }
2546
2547 /*
2548 * If releasing blocks, this function supports updating multiple consecutive blocks
2549 * at one time, but please note that these consecutive blocks need to belong to the
2550 * same segment.
2551 */
update_sit_entry(struct f2fs_sb_info * sbi,block_t blkaddr,int del)2552 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2553 {
2554 struct seg_entry *se;
2555 unsigned int segno, offset;
2556 long int new_vblocks;
2557
2558 segno = GET_SEGNO(sbi, blkaddr);
2559 if (segno == NULL_SEGNO)
2560 return;
2561
2562 se = get_seg_entry(sbi, segno);
2563 new_vblocks = se->valid_blocks + del;
2564 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2565
2566 f2fs_bug_on(sbi, (new_vblocks < 0 ||
2567 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2568
2569 se->valid_blocks = new_vblocks;
2570
2571 /* Update valid block bitmap */
2572 if (del > 0) {
2573 del = update_sit_entry_for_alloc(sbi, se, segno, blkaddr, offset, del);
2574 } else {
2575 del = update_sit_entry_for_release(sbi, se, segno, blkaddr, offset, del);
2576 }
2577
2578 __mark_sit_entry_dirty(sbi, segno);
2579
2580 /* update total number of valid blocks to be written in ckpt area */
2581 SIT_I(sbi)->written_valid_blocks += del;
2582
2583 if (__is_large_section(sbi))
2584 get_sec_entry(sbi, segno)->valid_blocks += del;
2585 }
2586
f2fs_invalidate_blocks(struct f2fs_sb_info * sbi,block_t addr,unsigned int len)2587 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
2588 unsigned int len)
2589 {
2590 unsigned int segno = GET_SEGNO(sbi, addr);
2591 struct sit_info *sit_i = SIT_I(sbi);
2592 block_t addr_start = addr, addr_end = addr + len - 1;
2593 unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
2594 unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
2595
2596 f2fs_bug_on(sbi, addr == NULL_ADDR);
2597 if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2598 return;
2599
2600 f2fs_invalidate_internal_cache(sbi, addr, len);
2601
2602 /* add it into sit main buffer */
2603 down_write(&sit_i->sentry_lock);
2604
2605 if (seg_num == 1)
2606 cnt = len;
2607 else
2608 cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
2609
2610 do {
2611 update_segment_mtime(sbi, addr_start, 0);
2612 update_sit_entry(sbi, addr_start, -cnt);
2613
2614 /* add it into dirty seglist */
2615 locate_dirty_segment(sbi, segno);
2616
2617 /* update @addr_start and @cnt and @segno */
2618 addr_start = START_BLOCK(sbi, ++segno);
2619 if (++i == seg_num)
2620 cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
2621 else
2622 cnt = max_blocks;
2623 } while (i <= seg_num);
2624
2625 up_write(&sit_i->sentry_lock);
2626 }
2627
f2fs_is_checkpointed_data(struct f2fs_sb_info * sbi,block_t blkaddr)2628 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2629 {
2630 struct sit_info *sit_i = SIT_I(sbi);
2631 unsigned int segno, offset;
2632 struct seg_entry *se;
2633 bool is_cp = false;
2634
2635 if (!__is_valid_data_blkaddr(blkaddr))
2636 return true;
2637
2638 down_read(&sit_i->sentry_lock);
2639
2640 segno = GET_SEGNO(sbi, blkaddr);
2641 se = get_seg_entry(sbi, segno);
2642 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2643
2644 if (f2fs_test_bit(offset, se->ckpt_valid_map))
2645 is_cp = true;
2646
2647 up_read(&sit_i->sentry_lock);
2648
2649 return is_cp;
2650 }
2651
f2fs_curseg_valid_blocks(struct f2fs_sb_info * sbi,int type)2652 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2653 {
2654 struct curseg_info *curseg = CURSEG_I(sbi, type);
2655
2656 if (sbi->ckpt->alloc_type[type] == SSR)
2657 return BLKS_PER_SEG(sbi);
2658 return curseg->next_blkoff;
2659 }
2660
2661 /*
2662 * Calculate the number of current summary pages for writing
2663 */
f2fs_npages_for_summary_flush(struct f2fs_sb_info * sbi,bool for_ra)2664 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2665 {
2666 int valid_sum_count = 0;
2667 int i, sum_in_page;
2668
2669 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2670 if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2671 valid_sum_count +=
2672 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2673 else
2674 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2675 }
2676
2677 sum_in_page = (sbi->blocksize - 2 * sbi->sum_journal_size -
2678 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2679 if (valid_sum_count <= sum_in_page)
2680 return 1;
2681 else if ((valid_sum_count - sum_in_page) <=
2682 (sbi->blocksize - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2683 return 2;
2684 return 3;
2685 }
2686
2687 /*
2688 * Caller should put this summary folio
2689 */
f2fs_get_sum_folio(struct f2fs_sb_info * sbi,unsigned int segno)2690 struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno)
2691 {
2692 if (unlikely(f2fs_cp_error(sbi)))
2693 return ERR_PTR(-EIO);
2694 return f2fs_get_meta_folio_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2695 }
2696
f2fs_update_meta_page(struct f2fs_sb_info * sbi,void * src,block_t blk_addr)2697 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2698 void *src, block_t blk_addr)
2699 {
2700 struct folio *folio;
2701
2702 if (!f2fs_sb_has_packed_ssa(sbi))
2703 folio = f2fs_grab_meta_folio(sbi, blk_addr);
2704 else
2705 folio = f2fs_get_meta_folio_retry(sbi, blk_addr);
2706
2707 if (IS_ERR(folio))
2708 return;
2709
2710 memcpy(folio_address(folio), src, PAGE_SIZE);
2711 folio_mark_dirty(folio);
2712 f2fs_folio_put(folio, true);
2713 }
2714
write_sum_page(struct f2fs_sb_info * sbi,struct f2fs_summary_block * sum_blk,unsigned int segno)2715 static void write_sum_page(struct f2fs_sb_info *sbi,
2716 struct f2fs_summary_block *sum_blk, unsigned int segno)
2717 {
2718 struct folio *folio;
2719
2720 if (!f2fs_sb_has_packed_ssa(sbi))
2721 return f2fs_update_meta_page(sbi, (void *)sum_blk,
2722 GET_SUM_BLOCK(sbi, segno));
2723
2724 folio = f2fs_get_sum_folio(sbi, segno);
2725 if (IS_ERR(folio))
2726 return;
2727
2728 memcpy(SUM_BLK_PAGE_ADDR(sbi, folio, segno), sum_blk,
2729 sbi->sum_blocksize);
2730 folio_mark_dirty(folio);
2731 f2fs_folio_put(folio, true);
2732 }
2733
write_current_sum_page(struct f2fs_sb_info * sbi,int type,block_t blk_addr)2734 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2735 int type, block_t blk_addr)
2736 {
2737 struct curseg_info *curseg = CURSEG_I(sbi, type);
2738 struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
2739 struct f2fs_summary_block *src = curseg->sum_blk;
2740 struct f2fs_summary_block *dst;
2741
2742 dst = folio_address(folio);
2743 memset(dst, 0, PAGE_SIZE);
2744
2745 mutex_lock(&curseg->curseg_mutex);
2746
2747 down_read(&curseg->journal_rwsem);
2748 memcpy(sum_journal(sbi, dst), curseg->journal, sbi->sum_journal_size);
2749 up_read(&curseg->journal_rwsem);
2750
2751 memcpy(sum_entries(dst), sum_entries(src), sbi->sum_entry_size);
2752 memcpy(sum_footer(sbi, dst), sum_footer(sbi, src), SUM_FOOTER_SIZE);
2753
2754 mutex_unlock(&curseg->curseg_mutex);
2755
2756 folio_mark_dirty(folio);
2757 f2fs_folio_put(folio, true);
2758 }
2759
is_next_segment_free(struct f2fs_sb_info * sbi,struct curseg_info * curseg)2760 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2761 struct curseg_info *curseg)
2762 {
2763 unsigned int segno = curseg->segno + 1;
2764 struct free_segmap_info *free_i = FREE_I(sbi);
2765
2766 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
2767 return !test_bit(segno, free_i->free_segmap);
2768 return 0;
2769 }
2770
2771 /*
2772 * Find a new segment from the free segments bitmap to right order
2773 * This function should be returned with success, otherwise BUG
2774 */
get_new_segment(struct f2fs_sb_info * sbi,unsigned int * newseg,bool new_sec,bool pinning)2775 static int get_new_segment(struct f2fs_sb_info *sbi,
2776 unsigned int *newseg, bool new_sec, bool pinning)
2777 {
2778 struct free_segmap_info *free_i = FREE_I(sbi);
2779 unsigned int segno, secno, zoneno;
2780 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2781 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2782 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2783 unsigned int alloc_policy = sbi->allocate_section_policy;
2784 unsigned int alloc_hint = sbi->allocate_section_hint;
2785 bool init = true;
2786 int i;
2787 int ret = 0;
2788
2789 spin_lock(&free_i->segmap_lock);
2790
2791 if (time_to_inject(sbi, FAULT_NO_SEGMENT)) {
2792 ret = -ENOSPC;
2793 goto out_unlock;
2794 }
2795
2796 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
2797 segno = find_next_zero_bit(free_i->free_segmap,
2798 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2799 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2800 goto got_it;
2801 }
2802
2803 #ifdef CONFIG_BLK_DEV_ZONED
2804 /*
2805 * If we format f2fs on zoned storage, let's try to get pinned sections
2806 * from beginning of the storage, which should be a conventional one.
2807 */
2808 if (f2fs_sb_has_blkzoned(sbi)) {
2809 /* Prioritize writing to conventional zones */
2810 if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
2811 segno = 0;
2812 else
2813 segno = max(sbi->first_seq_zone_segno, *newseg);
2814 hint = GET_SEC_FROM_SEG(sbi, segno);
2815 }
2816 #endif
2817
2818 /*
2819 * Prevent allocate_section_hint from exceeding MAIN_SECS()
2820 * due to desynchronization.
2821 */
2822 if (alloc_policy != ALLOCATE_FORWARD_NOHINT &&
2823 alloc_hint > MAIN_SECS(sbi))
2824 alloc_hint = MAIN_SECS(sbi);
2825
2826 if (alloc_policy == ALLOCATE_FORWARD_FROM_HINT &&
2827 hint < alloc_hint)
2828 hint = alloc_hint;
2829 else if (alloc_policy == ALLOCATE_FORWARD_WITHIN_HINT &&
2830 hint >= alloc_hint)
2831 hint = 0;
2832
2833 find_other_zone:
2834 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2835
2836 #ifdef CONFIG_BLK_DEV_ZONED
2837 if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
2838 /* Write only to sequential zones */
2839 if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
2840 hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno);
2841 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2842 } else
2843 secno = find_first_zero_bit(free_i->free_secmap,
2844 MAIN_SECS(sbi));
2845 if (secno >= MAIN_SECS(sbi)) {
2846 ret = -ENOSPC;
2847 f2fs_bug_on(sbi, 1);
2848 goto out_unlock;
2849 }
2850 }
2851 #endif
2852
2853 if (secno >= MAIN_SECS(sbi)) {
2854 secno = find_first_zero_bit(free_i->free_secmap,
2855 MAIN_SECS(sbi));
2856 if (secno >= MAIN_SECS(sbi)) {
2857 ret = -ENOSPC;
2858 f2fs_bug_on(sbi, !pinning);
2859 goto out_unlock;
2860 }
2861 }
2862 segno = GET_SEG_FROM_SEC(sbi, secno);
2863 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2864
2865 /* give up on finding another zone */
2866 if (!init)
2867 goto got_it;
2868 if (sbi->secs_per_zone == 1)
2869 goto got_it;
2870 if (zoneno == old_zoneno)
2871 goto got_it;
2872 for (i = 0; i < NR_CURSEG_TYPE; i++)
2873 if (CURSEG_I(sbi, i)->zone == zoneno)
2874 break;
2875
2876 if (i < NR_CURSEG_TYPE) {
2877 /* zone is in user, try another */
2878 if (zoneno + 1 >= total_zones)
2879 hint = 0;
2880 else
2881 hint = (zoneno + 1) * sbi->secs_per_zone;
2882 init = false;
2883 goto find_other_zone;
2884 }
2885 got_it:
2886 /* set it as dirty segment in free segmap */
2887 if (test_bit(segno, free_i->free_segmap)) {
2888 ret = -EFSCORRUPTED;
2889 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP);
2890 goto out_unlock;
2891 }
2892
2893 /* no free section in conventional device or conventional zone */
2894 if (new_sec && pinning &&
2895 f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) {
2896 ret = -EAGAIN;
2897 goto out_unlock;
2898 }
2899 __set_inuse(sbi, segno);
2900 *newseg = segno;
2901 out_unlock:
2902 spin_unlock(&free_i->segmap_lock);
2903
2904 if (ret == -ENOSPC && !pinning)
2905 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
2906 return ret;
2907 }
2908
reset_curseg(struct f2fs_sb_info * sbi,int type,int modified)2909 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2910 {
2911 struct curseg_info *curseg = CURSEG_I(sbi, type);
2912 struct summary_footer *sum_footer;
2913 unsigned short seg_type = curseg->seg_type;
2914
2915 /* only happen when get_new_segment() fails */
2916 if (curseg->next_segno == NULL_SEGNO)
2917 return;
2918
2919 curseg->inited = true;
2920 curseg->segno = curseg->next_segno;
2921 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2922 curseg->next_blkoff = 0;
2923 curseg->next_segno = NULL_SEGNO;
2924
2925 sum_footer = sum_footer(sbi, curseg->sum_blk);
2926 memset(sum_footer, 0, sizeof(struct summary_footer));
2927
2928 sanity_check_seg_type(sbi, seg_type);
2929
2930 if (IS_DATASEG(seg_type))
2931 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2932 if (IS_NODESEG(seg_type))
2933 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2934 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2935 }
2936
__get_next_segno(struct f2fs_sb_info * sbi,int type)2937 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2938 {
2939 struct curseg_info *curseg = CURSEG_I(sbi, type);
2940 unsigned short seg_type = curseg->seg_type;
2941
2942 sanity_check_seg_type(sbi, seg_type);
2943 if (__is_large_section(sbi)) {
2944 if (f2fs_need_rand_seg(sbi)) {
2945 unsigned int hint = GET_SEC_FROM_SEG(sbi, curseg->segno);
2946
2947 if (GET_SEC_FROM_SEG(sbi, curseg->segno + 1) != hint)
2948 return curseg->segno;
2949 return get_random_u32_inclusive(curseg->segno + 1,
2950 GET_SEG_FROM_SEC(sbi, hint + 1) - 1);
2951 }
2952 return curseg->segno;
2953 } else if (f2fs_need_rand_seg(sbi)) {
2954 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
2955 }
2956
2957 /* inmem log may not locate on any segment after mount */
2958 if (!curseg->inited)
2959 return 0;
2960
2961 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2962 return 0;
2963
2964 if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
2965 return 0;
2966
2967 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2968 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2969
2970 /* find segments from 0 to reuse freed segments */
2971 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2972 return 0;
2973
2974 return curseg->segno;
2975 }
2976
reset_curseg_fields(struct curseg_info * curseg)2977 static void reset_curseg_fields(struct curseg_info *curseg)
2978 {
2979 curseg->inited = false;
2980 curseg->segno = NULL_SEGNO;
2981 curseg->next_segno = 0;
2982 }
2983
2984 /*
2985 * Allocate a current working segment.
2986 * This function always allocates a free segment in LFS manner.
2987 */
new_curseg(struct f2fs_sb_info * sbi,int type,bool new_sec)2988 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2989 {
2990 struct curseg_info *curseg = CURSEG_I(sbi, type);
2991 unsigned int segno = curseg->segno;
2992 bool pinning = type == CURSEG_COLD_DATA_PINNED;
2993 int ret;
2994
2995 if (curseg->inited)
2996 write_sum_page(sbi, curseg->sum_blk, segno);
2997
2998 segno = __get_next_segno(sbi, type);
2999 ret = get_new_segment(sbi, &segno, new_sec, pinning);
3000 if (ret) {
3001 if (ret == -ENOSPC)
3002 reset_curseg_fields(curseg);
3003 return ret;
3004 }
3005
3006 curseg->next_segno = segno;
3007 reset_curseg(sbi, type, 1);
3008 curseg->alloc_type = LFS;
3009 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3010 curseg->fragment_remained_chunk =
3011 get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3012 return 0;
3013 }
3014
__next_free_blkoff(struct f2fs_sb_info * sbi,int segno,block_t start)3015 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
3016 int segno, block_t start)
3017 {
3018 struct seg_entry *se = get_seg_entry(sbi, segno);
3019 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
3020 unsigned long *target_map = SIT_I(sbi)->tmp_map;
3021 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
3022 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
3023 int i;
3024
3025 for (i = 0; i < entries; i++)
3026 target_map[i] = ckpt_map[i] | cur_map[i];
3027
3028 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
3029 }
3030
f2fs_find_next_ssr_block(struct f2fs_sb_info * sbi,struct curseg_info * seg)3031 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
3032 struct curseg_info *seg)
3033 {
3034 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
3035 }
3036
f2fs_segment_has_free_slot(struct f2fs_sb_info * sbi,int segno)3037 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
3038 {
3039 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
3040 }
3041
3042 /*
3043 * This function always allocates a used segment(from dirty seglist) by SSR
3044 * manner, so it should recover the existing segment information of valid blocks
3045 */
change_curseg(struct f2fs_sb_info * sbi,int type)3046 static int change_curseg(struct f2fs_sb_info *sbi, int type)
3047 {
3048 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3049 struct curseg_info *curseg = CURSEG_I(sbi, type);
3050 unsigned int new_segno = curseg->next_segno;
3051 struct f2fs_summary_block *sum_node;
3052 struct folio *sum_folio;
3053
3054 if (curseg->inited)
3055 write_sum_page(sbi, curseg->sum_blk, curseg->segno);
3056
3057 __set_test_and_inuse(sbi, new_segno);
3058
3059 mutex_lock(&dirty_i->seglist_lock);
3060 __remove_dirty_segment(sbi, new_segno, PRE);
3061 __remove_dirty_segment(sbi, new_segno, DIRTY);
3062 mutex_unlock(&dirty_i->seglist_lock);
3063
3064 reset_curseg(sbi, type, 1);
3065 curseg->alloc_type = SSR;
3066 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
3067
3068 sum_folio = f2fs_get_sum_folio(sbi, new_segno);
3069 if (IS_ERR(sum_folio)) {
3070 /* GC won't be able to use stale summary pages by cp_error */
3071 memset(curseg->sum_blk, 0, sbi->sum_entry_size);
3072 return PTR_ERR(sum_folio);
3073 }
3074 sum_node = SUM_BLK_PAGE_ADDR(sbi, sum_folio, new_segno);
3075 memcpy(curseg->sum_blk, sum_node, sbi->sum_entry_size);
3076 f2fs_folio_put(sum_folio, true);
3077 return 0;
3078 }
3079
3080 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3081 int alloc_mode, unsigned long long age);
3082
get_atssr_segment(struct f2fs_sb_info * sbi,int type,int target_type,int alloc_mode,unsigned long long age)3083 static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
3084 int target_type, int alloc_mode,
3085 unsigned long long age)
3086 {
3087 struct curseg_info *curseg = CURSEG_I(sbi, type);
3088 int ret = 0;
3089
3090 curseg->seg_type = target_type;
3091
3092 if (get_ssr_segment(sbi, type, alloc_mode, age)) {
3093 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
3094
3095 curseg->seg_type = se->type;
3096 ret = change_curseg(sbi, type);
3097 } else {
3098 /* allocate cold segment by default */
3099 curseg->seg_type = CURSEG_COLD_DATA;
3100 ret = new_curseg(sbi, type, true);
3101 }
3102 stat_inc_seg_type(sbi, curseg);
3103 return ret;
3104 }
3105
__f2fs_init_atgc_curseg(struct f2fs_sb_info * sbi,bool force)3106 static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi, bool force)
3107 {
3108 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
3109 int ret = 0;
3110
3111 if (!sbi->am.atgc_enabled && !force)
3112 return 0;
3113
3114 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3115
3116 mutex_lock(&curseg->curseg_mutex);
3117 down_write(&SIT_I(sbi)->sentry_lock);
3118
3119 ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC,
3120 CURSEG_COLD_DATA, SSR, 0);
3121
3122 up_write(&SIT_I(sbi)->sentry_lock);
3123 mutex_unlock(&curseg->curseg_mutex);
3124
3125 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3126 return ret;
3127 }
3128
f2fs_init_inmem_curseg(struct f2fs_sb_info * sbi)3129 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
3130 {
3131 return __f2fs_init_atgc_curseg(sbi, false);
3132 }
3133
f2fs_reinit_atgc_curseg(struct f2fs_sb_info * sbi)3134 int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi)
3135 {
3136 int ret;
3137
3138 if (!test_opt(sbi, ATGC))
3139 return 0;
3140 if (sbi->am.atgc_enabled)
3141 return 0;
3142 if (le64_to_cpu(F2FS_CKPT(sbi)->elapsed_time) <
3143 sbi->am.age_threshold)
3144 return 0;
3145
3146 ret = __f2fs_init_atgc_curseg(sbi, true);
3147 if (!ret) {
3148 sbi->am.atgc_enabled = true;
3149 f2fs_info(sbi, "reenabled age threshold GC");
3150 }
3151 return ret;
3152 }
3153
__f2fs_save_inmem_curseg(struct f2fs_sb_info * sbi,int type)3154 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
3155 {
3156 struct curseg_info *curseg = CURSEG_I(sbi, type);
3157
3158 mutex_lock(&curseg->curseg_mutex);
3159 if (!curseg->inited)
3160 goto out;
3161
3162 if (get_valid_blocks(sbi, curseg->segno, false)) {
3163 write_sum_page(sbi, curseg->sum_blk, curseg->segno);
3164 } else {
3165 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
3166 __set_test_and_free(sbi, curseg->segno, true);
3167 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3168 }
3169 out:
3170 mutex_unlock(&curseg->curseg_mutex);
3171 }
3172
f2fs_save_inmem_curseg(struct f2fs_sb_info * sbi)3173 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
3174 {
3175 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3176
3177 if (sbi->am.atgc_enabled)
3178 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3179 }
3180
__f2fs_restore_inmem_curseg(struct f2fs_sb_info * sbi,int type)3181 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
3182 {
3183 struct curseg_info *curseg = CURSEG_I(sbi, type);
3184
3185 mutex_lock(&curseg->curseg_mutex);
3186 if (!curseg->inited)
3187 goto out;
3188 if (get_valid_blocks(sbi, curseg->segno, false))
3189 goto out;
3190
3191 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
3192 __set_test_and_inuse(sbi, curseg->segno);
3193 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3194 out:
3195 mutex_unlock(&curseg->curseg_mutex);
3196 }
3197
f2fs_restore_inmem_curseg(struct f2fs_sb_info * sbi)3198 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
3199 {
3200 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3201
3202 if (sbi->am.atgc_enabled)
3203 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3204 }
3205
get_ssr_segment(struct f2fs_sb_info * sbi,int type,int alloc_mode,unsigned long long age)3206 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3207 int alloc_mode, unsigned long long age)
3208 {
3209 struct curseg_info *curseg = CURSEG_I(sbi, type);
3210 unsigned segno = NULL_SEGNO;
3211 unsigned short seg_type = curseg->seg_type;
3212 int i, cnt;
3213 bool reversed = false;
3214
3215 sanity_check_seg_type(sbi, seg_type);
3216
3217 /* f2fs_need_SSR() already forces to do this */
3218 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
3219 alloc_mode, age, false)) {
3220 curseg->next_segno = segno;
3221 return 1;
3222 }
3223
3224 /* For node segments, let's do SSR more intensively */
3225 if (IS_NODESEG(seg_type)) {
3226 if (seg_type >= CURSEG_WARM_NODE) {
3227 reversed = true;
3228 i = CURSEG_COLD_NODE;
3229 } else {
3230 i = CURSEG_HOT_NODE;
3231 }
3232 cnt = NR_CURSEG_NODE_TYPE;
3233 } else {
3234 if (seg_type >= CURSEG_WARM_DATA) {
3235 reversed = true;
3236 i = CURSEG_COLD_DATA;
3237 } else {
3238 i = CURSEG_HOT_DATA;
3239 }
3240 cnt = NR_CURSEG_DATA_TYPE;
3241 }
3242
3243 for (; cnt-- > 0; reversed ? i-- : i++) {
3244 if (i == seg_type)
3245 continue;
3246 if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
3247 alloc_mode, age, false)) {
3248 curseg->next_segno = segno;
3249 return 1;
3250 }
3251 }
3252
3253 /* find valid_blocks=0 in dirty list */
3254 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3255 segno = get_free_segment(sbi);
3256 if (segno != NULL_SEGNO) {
3257 curseg->next_segno = segno;
3258 return 1;
3259 }
3260 }
3261 return 0;
3262 }
3263
need_new_seg(struct f2fs_sb_info * sbi,int type)3264 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3265 {
3266 struct curseg_info *curseg = CURSEG_I(sbi, type);
3267
3268 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3269 curseg->seg_type == CURSEG_WARM_NODE)
3270 return true;
3271 if (curseg->alloc_type == LFS && is_next_segment_free(sbi, curseg) &&
3272 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3273 return true;
3274 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3275 return true;
3276 return false;
3277 }
3278
f2fs_allocate_segment_for_resize(struct f2fs_sb_info * sbi,int type,unsigned int start,unsigned int end)3279 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3280 unsigned int start, unsigned int end)
3281 {
3282 struct curseg_info *curseg = CURSEG_I(sbi, type);
3283 unsigned int segno;
3284 int ret = 0;
3285
3286 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3287 mutex_lock(&curseg->curseg_mutex);
3288 down_write(&SIT_I(sbi)->sentry_lock);
3289
3290 segno = CURSEG_I(sbi, type)->segno;
3291 if (segno < start || segno > end)
3292 goto unlock;
3293
3294 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3295 ret = change_curseg(sbi, type);
3296 else
3297 ret = new_curseg(sbi, type, true);
3298
3299 stat_inc_seg_type(sbi, curseg);
3300
3301 locate_dirty_segment(sbi, segno);
3302 unlock:
3303 up_write(&SIT_I(sbi)->sentry_lock);
3304
3305 if (segno != curseg->segno)
3306 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3307 type, segno, curseg->segno);
3308
3309 mutex_unlock(&curseg->curseg_mutex);
3310 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3311 return ret;
3312 }
3313
__allocate_new_segment(struct f2fs_sb_info * sbi,int type,bool new_sec,bool force)3314 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3315 bool new_sec, bool force)
3316 {
3317 struct curseg_info *curseg = CURSEG_I(sbi, type);
3318 unsigned int old_segno;
3319 int err = 0;
3320
3321 if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited)
3322 goto allocate;
3323
3324 if (!force && curseg->inited &&
3325 !curseg->next_blkoff &&
3326 !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3327 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3328 return 0;
3329
3330 allocate:
3331 old_segno = curseg->segno;
3332 err = new_curseg(sbi, type, true);
3333 if (err)
3334 return err;
3335 stat_inc_seg_type(sbi, curseg);
3336 locate_dirty_segment(sbi, old_segno);
3337 return 0;
3338 }
3339
f2fs_allocate_new_section(struct f2fs_sb_info * sbi,int type,bool force)3340 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3341 {
3342 int ret;
3343
3344 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3345 down_write(&SIT_I(sbi)->sentry_lock);
3346 ret = __allocate_new_segment(sbi, type, true, force);
3347 up_write(&SIT_I(sbi)->sentry_lock);
3348 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3349
3350 return ret;
3351 }
3352
f2fs_allocate_pinning_section(struct f2fs_sb_info * sbi)3353 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
3354 {
3355 struct f2fs_lock_context lc;
3356 int err;
3357 bool gc_required = true;
3358
3359 retry:
3360 f2fs_lock_op(sbi, &lc);
3361 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3362 f2fs_unlock_op(sbi, &lc);
3363
3364 if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
3365 f2fs_down_write_trace(&sbi->gc_lock, &lc);
3366 err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1,
3367 true, ZONED_PIN_SEC_REQUIRED_COUNT);
3368 f2fs_up_write_trace(&sbi->gc_lock, &lc);
3369
3370 gc_required = false;
3371 if (!err)
3372 goto retry;
3373 }
3374
3375 return err;
3376 }
3377
f2fs_allocate_new_segments(struct f2fs_sb_info * sbi)3378 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3379 {
3380 int i;
3381 int err = 0;
3382
3383 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3384 down_write(&SIT_I(sbi)->sentry_lock);
3385 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3386 err += __allocate_new_segment(sbi, i, false, false);
3387 up_write(&SIT_I(sbi)->sentry_lock);
3388 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3389
3390 return err;
3391 }
3392
f2fs_exist_trim_candidates(struct f2fs_sb_info * sbi,struct cp_control * cpc)3393 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3394 struct cp_control *cpc)
3395 {
3396 __u64 trim_start = cpc->trim_start;
3397 bool has_candidate = false;
3398
3399 down_write(&SIT_I(sbi)->sentry_lock);
3400 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3401 if (add_discard_addrs(sbi, cpc, true)) {
3402 has_candidate = true;
3403 break;
3404 }
3405 }
3406 up_write(&SIT_I(sbi)->sentry_lock);
3407
3408 cpc->trim_start = trim_start;
3409 return has_candidate;
3410 }
3411
__issue_discard_cmd_range(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,unsigned int start,unsigned int end)3412 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3413 struct discard_policy *dpolicy,
3414 unsigned int start, unsigned int end)
3415 {
3416 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3417 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3418 struct rb_node **insert_p = NULL, *insert_parent = NULL;
3419 struct discard_cmd *dc;
3420 struct blk_plug plug;
3421 int issued;
3422 unsigned int trimmed = 0;
3423
3424 next:
3425 issued = 0;
3426
3427 mutex_lock(&dcc->cmd_lock);
3428 if (unlikely(dcc->rbtree_check))
3429 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3430
3431 dc = __lookup_discard_cmd_ret(&dcc->root, start,
3432 &prev_dc, &next_dc, &insert_p, &insert_parent);
3433 if (!dc)
3434 dc = next_dc;
3435
3436 blk_start_plug(&plug);
3437
3438 while (dc && dc->di.lstart <= end) {
3439 struct rb_node *node;
3440 int err = 0;
3441
3442 if (dc->di.len < dpolicy->granularity)
3443 goto skip;
3444
3445 if (dc->state != D_PREP) {
3446 list_move_tail(&dc->list, &dcc->fstrim_list);
3447 goto skip;
3448 }
3449
3450 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3451
3452 if (issued >= dpolicy->max_requests) {
3453 start = dc->di.lstart + dc->di.len;
3454
3455 if (err)
3456 __remove_discard_cmd(sbi, dc);
3457
3458 blk_finish_plug(&plug);
3459 mutex_unlock(&dcc->cmd_lock);
3460 trimmed += __wait_all_discard_cmd(sbi, NULL);
3461 f2fs_schedule_timeout(DEFAULT_DISCARD_INTERVAL);
3462 goto next;
3463 }
3464 skip:
3465 node = rb_next(&dc->rb_node);
3466 if (err)
3467 __remove_discard_cmd(sbi, dc);
3468 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3469
3470 if (fatal_signal_pending(current))
3471 break;
3472 }
3473
3474 blk_finish_plug(&plug);
3475 mutex_unlock(&dcc->cmd_lock);
3476
3477 return trimmed;
3478 }
3479
f2fs_trim_fs(struct f2fs_sb_info * sbi,struct fstrim_range * range)3480 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3481 {
3482 __u64 start = F2FS_BYTES_TO_BLK(range->start);
3483 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3484 unsigned int start_segno, end_segno;
3485 block_t start_block, end_block;
3486 struct cp_control cpc;
3487 struct discard_policy dpolicy;
3488 struct f2fs_lock_context lc;
3489 unsigned long long trimmed = 0;
3490 int err = 0;
3491 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3492
3493 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3494 return -EINVAL;
3495
3496 if (end < MAIN_BLKADDR(sbi))
3497 goto out;
3498
3499 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3500 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3501 return -EFSCORRUPTED;
3502 }
3503
3504 /* start/end segment number in main_area */
3505 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3506 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3507 GET_SEGNO(sbi, end);
3508 if (need_align) {
3509 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
3510 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
3511 }
3512
3513 cpc.reason = CP_DISCARD;
3514 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3515 cpc.trim_start = start_segno;
3516 cpc.trim_end = end_segno;
3517
3518 if (sbi->discard_blks == 0)
3519 goto out;
3520
3521 f2fs_down_write_trace(&sbi->gc_lock, &lc);
3522 stat_inc_cp_call_count(sbi, TOTAL_CALL);
3523 err = f2fs_write_checkpoint(sbi, &cpc);
3524 f2fs_up_write_trace(&sbi->gc_lock, &lc);
3525 if (err)
3526 goto out;
3527
3528 /*
3529 * We filed discard candidates, but actually we don't need to wait for
3530 * all of them, since they'll be issued in idle time along with runtime
3531 * discard option. User configuration looks like using runtime discard
3532 * or periodic fstrim instead of it.
3533 */
3534 if (f2fs_realtime_discard_enable(sbi))
3535 goto out;
3536
3537 start_block = START_BLOCK(sbi, start_segno);
3538 end_block = START_BLOCK(sbi, end_segno + 1);
3539
3540 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3541 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3542 start_block, end_block);
3543
3544 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3545 start_block, end_block);
3546 out:
3547 if (!err)
3548 range->len = F2FS_BLK_TO_BYTES(trimmed);
3549 return err;
3550 }
3551
f2fs_rw_hint_to_seg_type(struct f2fs_sb_info * sbi,enum rw_hint hint)3552 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint)
3553 {
3554 if (F2FS_OPTION(sbi).active_logs == 2)
3555 return CURSEG_HOT_DATA;
3556 else if (F2FS_OPTION(sbi).active_logs == 4)
3557 return CURSEG_COLD_DATA;
3558
3559 /* active_log == 6 */
3560 switch (hint) {
3561 case WRITE_LIFE_SHORT:
3562 return CURSEG_HOT_DATA;
3563 case WRITE_LIFE_EXTREME:
3564 return CURSEG_COLD_DATA;
3565 default:
3566 return CURSEG_WARM_DATA;
3567 }
3568 }
3569
3570 /*
3571 * This returns write hints for each segment type. This hints will be
3572 * passed down to block layer as below by default.
3573 *
3574 * User F2FS Block
3575 * ---- ---- -----
3576 * META WRITE_LIFE_NONE|REQ_META
3577 * HOT_NODE WRITE_LIFE_NONE
3578 * WARM_NODE WRITE_LIFE_MEDIUM
3579 * COLD_NODE WRITE_LIFE_LONG
3580 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
3581 * extension list " "
3582 *
3583 * -- buffered io
3584 * COLD_DATA WRITE_LIFE_EXTREME
3585 * HOT_DATA WRITE_LIFE_SHORT
3586 * WARM_DATA WRITE_LIFE_NOT_SET
3587 *
3588 * -- direct io
3589 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3590 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3591 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3592 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
3593 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
3594 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
3595 */
f2fs_io_type_to_rw_hint(struct f2fs_sb_info * sbi,enum page_type type,enum temp_type temp)3596 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3597 enum page_type type, enum temp_type temp)
3598 {
3599 switch (type) {
3600 case DATA:
3601 switch (temp) {
3602 case WARM:
3603 return WRITE_LIFE_NOT_SET;
3604 case HOT:
3605 return WRITE_LIFE_SHORT;
3606 case COLD:
3607 return WRITE_LIFE_EXTREME;
3608 default:
3609 return WRITE_LIFE_NONE;
3610 }
3611 case NODE:
3612 switch (temp) {
3613 case WARM:
3614 return WRITE_LIFE_MEDIUM;
3615 case HOT:
3616 return WRITE_LIFE_NONE;
3617 case COLD:
3618 return WRITE_LIFE_LONG;
3619 default:
3620 return WRITE_LIFE_NONE;
3621 }
3622 case META:
3623 return WRITE_LIFE_NONE;
3624 default:
3625 return WRITE_LIFE_NONE;
3626 }
3627 }
3628
__get_segment_type_2(struct f2fs_io_info * fio)3629 static int __get_segment_type_2(struct f2fs_io_info *fio)
3630 {
3631 if (fio->type == DATA)
3632 return CURSEG_HOT_DATA;
3633 else
3634 return CURSEG_HOT_NODE;
3635 }
3636
__get_segment_type_4(struct f2fs_io_info * fio)3637 static int __get_segment_type_4(struct f2fs_io_info *fio)
3638 {
3639 if (fio->type == DATA) {
3640 struct inode *inode = fio_inode(fio);
3641
3642 if (S_ISDIR(inode->i_mode))
3643 return CURSEG_HOT_DATA;
3644 else
3645 return CURSEG_COLD_DATA;
3646 } else {
3647 if (IS_DNODE(fio->folio) && is_cold_node(fio->folio))
3648 return CURSEG_WARM_NODE;
3649 else
3650 return CURSEG_COLD_NODE;
3651 }
3652 }
3653
__get_age_segment_type(struct inode * inode,pgoff_t pgofs)3654 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
3655 {
3656 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3657 struct extent_info ei = {};
3658
3659 if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
3660 if (!ei.age)
3661 return NO_CHECK_TYPE;
3662 if (ei.age <= sbi->hot_data_age_threshold)
3663 return CURSEG_HOT_DATA;
3664 if (ei.age <= sbi->warm_data_age_threshold)
3665 return CURSEG_WARM_DATA;
3666 return CURSEG_COLD_DATA;
3667 }
3668 return NO_CHECK_TYPE;
3669 }
3670
__get_segment_type_6(struct f2fs_io_info * fio)3671 static int __get_segment_type_6(struct f2fs_io_info *fio)
3672 {
3673 if (fio->type == DATA) {
3674 struct inode *inode = fio_inode(fio);
3675 int type;
3676
3677 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3678 return CURSEG_COLD_DATA_PINNED;
3679
3680 if (page_private_gcing(fio->page)) {
3681 if (fio->sbi->am.atgc_enabled &&
3682 (fio->io_type == FS_DATA_IO) &&
3683 (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
3684 __is_valid_data_blkaddr(fio->old_blkaddr) &&
3685 !is_inode_flag_set(inode, FI_OPU_WRITE))
3686 return CURSEG_ALL_DATA_ATGC;
3687 else
3688 return CURSEG_COLD_DATA;
3689 }
3690 if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3691 return CURSEG_COLD_DATA;
3692
3693 type = __get_age_segment_type(inode, fio->folio->index);
3694 if (type != NO_CHECK_TYPE)
3695 return type;
3696
3697 if (file_is_hot(inode) ||
3698 is_inode_flag_set(inode, FI_HOT_DATA) ||
3699 f2fs_is_cow_file(inode) ||
3700 is_inode_flag_set(inode, FI_NEED_IPU))
3701 return CURSEG_HOT_DATA;
3702 return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
3703 inode->i_write_hint);
3704 } else {
3705 if (IS_DNODE(fio->folio))
3706 return is_cold_node(fio->folio) ? CURSEG_WARM_NODE :
3707 CURSEG_HOT_NODE;
3708 return CURSEG_COLD_NODE;
3709 }
3710 }
3711
f2fs_get_segment_temp(struct f2fs_sb_info * sbi,enum log_type type)3712 enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
3713 enum log_type type)
3714 {
3715 struct curseg_info *curseg = CURSEG_I(sbi, type);
3716 enum temp_type temp = COLD;
3717
3718 switch (curseg->seg_type) {
3719 case CURSEG_HOT_NODE:
3720 case CURSEG_HOT_DATA:
3721 temp = HOT;
3722 break;
3723 case CURSEG_WARM_NODE:
3724 case CURSEG_WARM_DATA:
3725 temp = WARM;
3726 break;
3727 case CURSEG_COLD_NODE:
3728 case CURSEG_COLD_DATA:
3729 temp = COLD;
3730 break;
3731 default:
3732 f2fs_bug_on(sbi, 1);
3733 }
3734
3735 return temp;
3736 }
3737
__get_segment_type(struct f2fs_io_info * fio)3738 static int __get_segment_type(struct f2fs_io_info *fio)
3739 {
3740 enum log_type type = CURSEG_HOT_DATA;
3741
3742 switch (F2FS_OPTION(fio->sbi).active_logs) {
3743 case 2:
3744 type = __get_segment_type_2(fio);
3745 break;
3746 case 4:
3747 type = __get_segment_type_4(fio);
3748 break;
3749 case 6:
3750 type = __get_segment_type_6(fio);
3751 break;
3752 default:
3753 f2fs_bug_on(fio->sbi, true);
3754 }
3755
3756 fio->temp = f2fs_get_segment_temp(fio->sbi, type);
3757
3758 return type;
3759 }
3760
f2fs_randomize_chunk(struct f2fs_sb_info * sbi,struct curseg_info * seg)3761 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3762 struct curseg_info *seg)
3763 {
3764 /* To allocate block chunks in different sizes, use random number */
3765 if (--seg->fragment_remained_chunk > 0)
3766 return;
3767
3768 seg->fragment_remained_chunk =
3769 get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3770 seg->next_blkoff +=
3771 get_random_u32_inclusive(1, sbi->max_fragment_hole);
3772 }
3773
f2fs_allocate_data_block(struct f2fs_sb_info * sbi,struct folio * folio,block_t old_blkaddr,block_t * new_blkaddr,struct f2fs_summary * sum,int type,struct f2fs_io_info * fio)3774 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
3775 block_t old_blkaddr, block_t *new_blkaddr,
3776 struct f2fs_summary *sum, int type,
3777 struct f2fs_io_info *fio)
3778 {
3779 struct sit_info *sit_i = SIT_I(sbi);
3780 struct curseg_info *curseg = CURSEG_I(sbi, type);
3781 unsigned long long old_mtime;
3782 bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3783 struct seg_entry *se = NULL;
3784 bool segment_full = false;
3785 int ret = 0;
3786
3787 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3788
3789 mutex_lock(&curseg->curseg_mutex);
3790 down_write(&sit_i->sentry_lock);
3791
3792 if (curseg->segno == NULL_SEGNO) {
3793 ret = -ENOSPC;
3794 goto out_err;
3795 }
3796
3797 if (from_gc) {
3798 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3799 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3800 sanity_check_seg_type(sbi, se->type);
3801 f2fs_bug_on(sbi, IS_NODESEG(se->type));
3802 }
3803 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3804
3805 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
3806
3807 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3808
3809 sum_entries(curseg->sum_blk)[curseg->next_blkoff] = *sum;
3810 if (curseg->alloc_type == SSR) {
3811 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3812 } else {
3813 curseg->next_blkoff++;
3814 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3815 f2fs_randomize_chunk(sbi, curseg);
3816 }
3817 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3818 segment_full = true;
3819 stat_inc_block_count(sbi, curseg);
3820
3821 if (from_gc) {
3822 old_mtime = get_segment_mtime(sbi, old_blkaddr);
3823 } else {
3824 update_segment_mtime(sbi, old_blkaddr, 0);
3825 old_mtime = 0;
3826 }
3827 update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3828
3829 /*
3830 * SIT information should be updated before segment allocation,
3831 * since SSR needs latest valid block information.
3832 */
3833 update_sit_entry(sbi, *new_blkaddr, 1);
3834 update_sit_entry(sbi, old_blkaddr, -1);
3835
3836 /*
3837 * If the current segment is full, flush it out and replace it with a
3838 * new segment.
3839 */
3840 if (segment_full) {
3841 if (type == CURSEG_COLD_DATA_PINNED &&
3842 !((curseg->segno + 1) % sbi->segs_per_sec)) {
3843 write_sum_page(sbi, curseg->sum_blk, curseg->segno);
3844 reset_curseg_fields(curseg);
3845 goto skip_new_segment;
3846 }
3847
3848 if (from_gc) {
3849 ret = get_atssr_segment(sbi, type, se->type,
3850 AT_SSR, se->mtime);
3851 } else {
3852 if (need_new_seg(sbi, type))
3853 ret = new_curseg(sbi, type, false);
3854 else
3855 ret = change_curseg(sbi, type);
3856 stat_inc_seg_type(sbi, curseg);
3857 }
3858
3859 if (ret)
3860 goto out_err;
3861 }
3862
3863 skip_new_segment:
3864 /*
3865 * segment dirty status should be updated after segment allocation,
3866 * so we just need to update status only one time after previous
3867 * segment being closed.
3868 */
3869 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3870 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3871
3872 if (IS_DATASEG(curseg->seg_type)) {
3873 unsigned long long new_val;
3874
3875 new_val = atomic64_inc_return(&sbi->allocated_data_blocks);
3876 if (unlikely(new_val == ULLONG_MAX))
3877 atomic64_set(&sbi->allocated_data_blocks, 0);
3878 }
3879
3880 up_write(&sit_i->sentry_lock);
3881
3882 if (folio && IS_NODESEG(curseg->seg_type)) {
3883 fill_node_footer_blkaddr(folio, NEXT_FREE_BLKADDR(sbi, curseg));
3884
3885 f2fs_inode_chksum_set(sbi, folio);
3886 }
3887
3888 if (fio) {
3889 struct f2fs_bio_info *io;
3890
3891 INIT_LIST_HEAD(&fio->list);
3892 fio->in_list = 1;
3893 io = sbi->write_io[fio->type] + fio->temp;
3894 spin_lock(&io->io_lock);
3895 list_add_tail(&fio->list, &io->io_list);
3896 spin_unlock(&io->io_lock);
3897 }
3898
3899 mutex_unlock(&curseg->curseg_mutex);
3900 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3901 return 0;
3902
3903 out_err:
3904 *new_blkaddr = NULL_ADDR;
3905 up_write(&sit_i->sentry_lock);
3906 mutex_unlock(&curseg->curseg_mutex);
3907 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3908 return ret;
3909 }
3910
f2fs_update_device_state(struct f2fs_sb_info * sbi,nid_t ino,block_t blkaddr,unsigned int blkcnt)3911 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3912 block_t blkaddr, unsigned int blkcnt)
3913 {
3914 if (!f2fs_is_multi_device(sbi))
3915 return;
3916
3917 while (1) {
3918 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3919 unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3920
3921 /* update device state for fsync */
3922 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3923
3924 /* update device state for checkpoint */
3925 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3926 spin_lock(&sbi->dev_lock);
3927 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3928 spin_unlock(&sbi->dev_lock);
3929 }
3930
3931 if (blkcnt <= blks)
3932 break;
3933 blkcnt -= blks;
3934 blkaddr += blks;
3935 }
3936 }
3937
log_type_to_seg_type(enum log_type type)3938 static int log_type_to_seg_type(enum log_type type)
3939 {
3940 int seg_type = CURSEG_COLD_DATA;
3941
3942 switch (type) {
3943 case CURSEG_HOT_DATA:
3944 case CURSEG_WARM_DATA:
3945 case CURSEG_COLD_DATA:
3946 case CURSEG_HOT_NODE:
3947 case CURSEG_WARM_NODE:
3948 case CURSEG_COLD_NODE:
3949 seg_type = (int)type;
3950 break;
3951 case CURSEG_COLD_DATA_PINNED:
3952 case CURSEG_ALL_DATA_ATGC:
3953 seg_type = CURSEG_COLD_DATA;
3954 break;
3955 default:
3956 break;
3957 }
3958 return seg_type;
3959 }
3960
do_write_page(struct f2fs_summary * sum,struct f2fs_io_info * fio)3961 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3962 {
3963 struct folio *folio = fio->folio;
3964 enum log_type type = __get_segment_type(fio);
3965 int seg_type = log_type_to_seg_type(type);
3966 bool keep_order = (f2fs_lfs_mode(fio->sbi) &&
3967 seg_type == CURSEG_COLD_DATA);
3968 int err;
3969
3970 if (keep_order)
3971 f2fs_down_read(&fio->sbi->io_order_lock);
3972
3973 err = f2fs_allocate_data_block(fio->sbi, folio, fio->old_blkaddr,
3974 &fio->new_blkaddr, sum, type, fio);
3975 if (unlikely(err)) {
3976 f2fs_err_ratelimited(fio->sbi,
3977 "%s Failed to allocate data block, ino:%u, index:%lu, type:%d, old_blkaddr:0x%x, new_blkaddr:0x%x, err:%d",
3978 __func__, fio->ino, folio->index, type,
3979 fio->old_blkaddr, fio->new_blkaddr, err);
3980 if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
3981 fscrypt_finalize_bounce_page(&fio->encrypted_page);
3982 folio_end_writeback(folio);
3983 if (f2fs_in_warm_node_list(fio->sbi, folio))
3984 f2fs_del_fsync_node_entry(fio->sbi, folio);
3985 f2fs_bug_on(fio->sbi, !is_set_ckpt_flags(fio->sbi,
3986 CP_ERROR_FLAG));
3987 goto out;
3988 }
3989
3990 f2fs_bug_on(fio->sbi, !f2fs_is_valid_blkaddr_raw(fio->sbi,
3991 fio->new_blkaddr, DATA_GENERIC_ENHANCE));
3992
3993 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3994 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
3995
3996 /* writeout dirty page into bdev */
3997 f2fs_submit_page_write(fio);
3998
3999 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
4000 out:
4001 if (keep_order)
4002 f2fs_up_read(&fio->sbi->io_order_lock);
4003 }
4004
f2fs_do_write_meta_page(struct f2fs_sb_info * sbi,struct folio * folio,enum iostat_type io_type)4005 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
4006 enum iostat_type io_type)
4007 {
4008 struct f2fs_io_info fio = {
4009 .sbi = sbi,
4010 .type = META,
4011 .temp = HOT,
4012 .op = REQ_OP_WRITE,
4013 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
4014 .old_blkaddr = folio->index,
4015 .new_blkaddr = folio->index,
4016 .folio = folio,
4017 .encrypted_page = NULL,
4018 .in_list = 0,
4019 };
4020
4021 if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
4022 fio.op_flags &= ~REQ_META;
4023
4024 folio_start_writeback(folio);
4025 f2fs_submit_page_write(&fio);
4026
4027 stat_inc_meta_count(sbi, folio->index);
4028 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
4029 }
4030
f2fs_do_write_node_page(unsigned int nid,struct f2fs_io_info * fio)4031 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
4032 {
4033 struct f2fs_summary sum;
4034
4035 set_summary(&sum, nid, 0, 0);
4036 do_write_page(&sum, fio);
4037
4038 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
4039 }
4040
f2fs_outplace_write_data(struct dnode_of_data * dn,struct f2fs_io_info * fio)4041 void f2fs_outplace_write_data(struct dnode_of_data *dn,
4042 struct f2fs_io_info *fio)
4043 {
4044 struct f2fs_sb_info *sbi = fio->sbi;
4045 struct f2fs_summary sum;
4046
4047 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
4048 if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
4049 f2fs_update_age_extent_cache(dn);
4050 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
4051 do_write_page(&sum, fio);
4052 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
4053
4054 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
4055 }
4056
f2fs_inplace_write_data(struct f2fs_io_info * fio)4057 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
4058 {
4059 int err;
4060 struct f2fs_sb_info *sbi = fio->sbi;
4061 unsigned int segno;
4062
4063 fio->new_blkaddr = fio->old_blkaddr;
4064 /* i/o temperature is needed for passing down write hints */
4065 __get_segment_type(fio);
4066
4067 segno = GET_SEGNO(sbi, fio->new_blkaddr);
4068
4069 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
4070 set_sbi_flag(sbi, SBI_NEED_FSCK);
4071 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
4072 __func__, segno);
4073 err = -EFSCORRUPTED;
4074 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4075 goto drop_bio;
4076 }
4077
4078 if (f2fs_cp_error(sbi)) {
4079 err = -EIO;
4080 goto drop_bio;
4081 }
4082
4083 if (fio->meta_gc)
4084 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
4085
4086 stat_inc_inplace_blocks(fio->sbi);
4087
4088 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
4089 err = f2fs_merge_page_bio(fio);
4090 else
4091 err = f2fs_submit_page_bio(fio);
4092 if (!err) {
4093 f2fs_update_device_state(fio->sbi, fio->ino,
4094 fio->new_blkaddr, 1);
4095 f2fs_update_iostat(fio->sbi, fio_inode(fio),
4096 fio->io_type, F2FS_BLKSIZE);
4097 }
4098
4099 return err;
4100 drop_bio:
4101 if (fio->bio && *(fio->bio)) {
4102 struct bio *bio = *(fio->bio);
4103
4104 bio->bi_status = BLK_STS_IOERR;
4105 bio_endio(bio);
4106 *(fio->bio) = NULL;
4107 }
4108 return err;
4109 }
4110
__f2fs_get_curseg(struct f2fs_sb_info * sbi,unsigned int segno)4111 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
4112 unsigned int segno)
4113 {
4114 int i;
4115
4116 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
4117 if (CURSEG_I(sbi, i)->segno == segno)
4118 break;
4119 }
4120 return i;
4121 }
4122
f2fs_do_replace_block(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,block_t old_blkaddr,block_t new_blkaddr,bool recover_curseg,bool recover_newaddr,bool from_gc)4123 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
4124 block_t old_blkaddr, block_t new_blkaddr,
4125 bool recover_curseg, bool recover_newaddr,
4126 bool from_gc)
4127 {
4128 struct sit_info *sit_i = SIT_I(sbi);
4129 struct curseg_info *curseg;
4130 unsigned int segno, old_cursegno;
4131 struct seg_entry *se;
4132 int type;
4133 unsigned short old_blkoff;
4134 unsigned char old_alloc_type;
4135
4136 segno = GET_SEGNO(sbi, new_blkaddr);
4137 se = get_seg_entry(sbi, segno);
4138 type = se->type;
4139
4140 f2fs_down_write(&SM_I(sbi)->curseg_lock);
4141
4142 if (!recover_curseg) {
4143 /* for recovery flow */
4144 if (se->valid_blocks == 0 && !is_curseg(sbi, segno)) {
4145 if (old_blkaddr == NULL_ADDR)
4146 type = CURSEG_COLD_DATA;
4147 else
4148 type = CURSEG_WARM_DATA;
4149 }
4150 } else {
4151 if (is_curseg(sbi, segno)) {
4152 /* se->type is volatile as SSR allocation */
4153 type = __f2fs_get_curseg(sbi, segno);
4154 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
4155 } else {
4156 type = CURSEG_WARM_DATA;
4157 }
4158 }
4159
4160 curseg = CURSEG_I(sbi, type);
4161 f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type));
4162
4163 mutex_lock(&curseg->curseg_mutex);
4164 down_write(&sit_i->sentry_lock);
4165
4166 old_cursegno = curseg->segno;
4167 old_blkoff = curseg->next_blkoff;
4168 old_alloc_type = curseg->alloc_type;
4169
4170 /* change the current segment */
4171 if (segno != curseg->segno) {
4172 curseg->next_segno = segno;
4173 if (change_curseg(sbi, type))
4174 goto out_unlock;
4175 }
4176
4177 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
4178 sum_entries(curseg->sum_blk)[curseg->next_blkoff] = *sum;
4179
4180 if (!recover_curseg || recover_newaddr) {
4181 if (!from_gc)
4182 update_segment_mtime(sbi, new_blkaddr, 0);
4183 update_sit_entry(sbi, new_blkaddr, 1);
4184 }
4185 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
4186 f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
4187 if (!from_gc)
4188 update_segment_mtime(sbi, old_blkaddr, 0);
4189 update_sit_entry(sbi, old_blkaddr, -1);
4190 }
4191
4192 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
4193 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
4194
4195 locate_dirty_segment(sbi, old_cursegno);
4196
4197 if (recover_curseg) {
4198 if (old_cursegno != curseg->segno) {
4199 curseg->next_segno = old_cursegno;
4200 if (change_curseg(sbi, type))
4201 goto out_unlock;
4202 }
4203 curseg->next_blkoff = old_blkoff;
4204 curseg->alloc_type = old_alloc_type;
4205 }
4206
4207 out_unlock:
4208 up_write(&sit_i->sentry_lock);
4209 mutex_unlock(&curseg->curseg_mutex);
4210 f2fs_up_write(&SM_I(sbi)->curseg_lock);
4211 }
4212
f2fs_replace_block(struct f2fs_sb_info * sbi,struct dnode_of_data * dn,block_t old_addr,block_t new_addr,unsigned char version,bool recover_curseg,bool recover_newaddr)4213 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
4214 block_t old_addr, block_t new_addr,
4215 unsigned char version, bool recover_curseg,
4216 bool recover_newaddr)
4217 {
4218 struct f2fs_summary sum;
4219
4220 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
4221
4222 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
4223 recover_curseg, recover_newaddr, false);
4224
4225 f2fs_update_data_blkaddr(dn, new_addr);
4226 }
4227
f2fs_folio_wait_writeback(struct folio * folio,enum page_type type,bool ordered,bool locked)4228 void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
4229 bool ordered, bool locked)
4230 {
4231 if (folio_test_writeback(folio)) {
4232 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
4233
4234 /* submit cached LFS IO */
4235 f2fs_submit_merged_write_folio(sbi, folio, type);
4236 /* submit cached IPU IO */
4237 f2fs_submit_merged_ipu_write(sbi, NULL, folio);
4238 if (ordered) {
4239 folio_wait_writeback(folio);
4240 f2fs_bug_on(sbi, locked && folio_test_writeback(folio));
4241 } else {
4242 folio_wait_stable(folio);
4243 }
4244 }
4245 }
4246
f2fs_wait_on_block_writeback(struct inode * inode,block_t blkaddr)4247 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
4248 {
4249 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4250 struct folio *cfolio;
4251
4252 if (!f2fs_meta_inode_gc_required(inode))
4253 return;
4254
4255 if (!__is_valid_data_blkaddr(blkaddr))
4256 return;
4257
4258 cfolio = filemap_lock_folio(META_MAPPING(sbi), blkaddr);
4259 if (!IS_ERR(cfolio)) {
4260 f2fs_folio_wait_writeback(cfolio, DATA, true, true);
4261 f2fs_folio_put(cfolio, true);
4262 }
4263 }
4264
f2fs_wait_on_block_writeback_range(struct inode * inode,block_t blkaddr,block_t len)4265 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
4266 block_t len)
4267 {
4268 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4269 block_t i;
4270
4271 if (!f2fs_meta_inode_gc_required(inode))
4272 return;
4273
4274 for (i = 0; i < len; i++)
4275 f2fs_wait_on_block_writeback(inode, blkaddr + i);
4276
4277 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
4278 }
4279
read_compacted_summaries(struct f2fs_sb_info * sbi)4280 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
4281 {
4282 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4283 struct curseg_info *seg_i;
4284 unsigned char *kaddr;
4285 struct folio *folio;
4286 block_t start;
4287 int i, j, offset;
4288
4289 start = start_sum_block(sbi);
4290
4291 folio = f2fs_get_meta_folio(sbi, start++);
4292 if (IS_ERR(folio))
4293 return PTR_ERR(folio);
4294 kaddr = folio_address(folio);
4295
4296 /* Step 1: restore nat cache */
4297 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4298 memcpy(seg_i->journal, kaddr, sbi->sum_journal_size);
4299
4300 /* Step 2: restore sit cache */
4301 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4302 memcpy(seg_i->journal, kaddr + sbi->sum_journal_size, sbi->sum_journal_size);
4303 offset = 2 * sbi->sum_journal_size;
4304
4305 /* Step 3: restore summary entries */
4306 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4307 unsigned short blk_off;
4308 unsigned int segno;
4309
4310 seg_i = CURSEG_I(sbi, i);
4311 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
4312 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
4313 seg_i->next_segno = segno;
4314 reset_curseg(sbi, i, 0);
4315 seg_i->alloc_type = ckpt->alloc_type[i];
4316 seg_i->next_blkoff = blk_off;
4317
4318 if (seg_i->alloc_type == SSR)
4319 blk_off = BLKS_PER_SEG(sbi);
4320
4321 for (j = 0; j < blk_off; j++) {
4322 struct f2fs_summary *s;
4323
4324 s = (struct f2fs_summary *)(kaddr + offset);
4325 sum_entries(seg_i->sum_blk)[j] = *s;
4326 offset += SUMMARY_SIZE;
4327 if (offset + SUMMARY_SIZE <= sbi->blocksize -
4328 SUM_FOOTER_SIZE)
4329 continue;
4330
4331 f2fs_folio_put(folio, true);
4332
4333 folio = f2fs_get_meta_folio(sbi, start++);
4334 if (IS_ERR(folio))
4335 return PTR_ERR(folio);
4336 kaddr = folio_address(folio);
4337 offset = 0;
4338 }
4339 }
4340 f2fs_folio_put(folio, true);
4341 return 0;
4342 }
4343
read_normal_summaries(struct f2fs_sb_info * sbi,int type)4344 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
4345 {
4346 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4347 struct f2fs_summary_block *sum;
4348 struct curseg_info *curseg;
4349 struct folio *new;
4350 unsigned short blk_off;
4351 unsigned int segno = 0;
4352 block_t blk_addr = 0;
4353 int err = 0;
4354
4355 /* get segment number and block addr */
4356 if (IS_DATASEG(type)) {
4357 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
4358 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
4359 CURSEG_HOT_DATA]);
4360 if (__exist_node_summaries(sbi))
4361 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
4362 else
4363 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
4364 } else {
4365 segno = le32_to_cpu(ckpt->cur_node_segno[type -
4366 CURSEG_HOT_NODE]);
4367 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
4368 CURSEG_HOT_NODE]);
4369 if (__exist_node_summaries(sbi))
4370 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
4371 type - CURSEG_HOT_NODE);
4372 else
4373 blk_addr = GET_SUM_BLOCK(sbi, segno);
4374 }
4375
4376 new = f2fs_get_meta_folio(sbi, blk_addr);
4377 if (IS_ERR(new))
4378 return PTR_ERR(new);
4379 sum = folio_address(new);
4380
4381 if (IS_NODESEG(type)) {
4382 if (__exist_node_summaries(sbi)) {
4383 struct f2fs_summary *ns = sum_entries(sum);
4384 int i;
4385
4386 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
4387 ns->version = 0;
4388 ns->ofs_in_node = 0;
4389 }
4390 } else {
4391 err = f2fs_restore_node_summary(sbi, segno, sum);
4392 if (err)
4393 goto out;
4394 }
4395 }
4396
4397 /* set uncompleted segment to curseg */
4398 curseg = CURSEG_I(sbi, type);
4399 mutex_lock(&curseg->curseg_mutex);
4400
4401 /* update journal info */
4402 down_write(&curseg->journal_rwsem);
4403 memcpy(curseg->journal, sum_journal(sbi, sum), sbi->sum_journal_size);
4404 up_write(&curseg->journal_rwsem);
4405
4406 memcpy(sum_entries(curseg->sum_blk), sum_entries(sum),
4407 sbi->sum_entry_size);
4408 memcpy(sum_footer(sbi, curseg->sum_blk), sum_footer(sbi, sum),
4409 SUM_FOOTER_SIZE);
4410 curseg->next_segno = segno;
4411 reset_curseg(sbi, type, 0);
4412 curseg->alloc_type = ckpt->alloc_type[type];
4413 curseg->next_blkoff = blk_off;
4414 mutex_unlock(&curseg->curseg_mutex);
4415 out:
4416 f2fs_folio_put(new, true);
4417 return err;
4418 }
4419
restore_curseg_summaries(struct f2fs_sb_info * sbi)4420 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4421 {
4422 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
4423 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4424 int type = CURSEG_HOT_DATA;
4425 int err;
4426
4427 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4428 int npages = f2fs_npages_for_summary_flush(sbi, true);
4429
4430 if (npages >= 2)
4431 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4432 META_CP, true);
4433
4434 /* restore for compacted data summary */
4435 err = read_compacted_summaries(sbi);
4436 if (err)
4437 return err;
4438 type = CURSEG_HOT_NODE;
4439 }
4440
4441 if (__exist_node_summaries(sbi))
4442 f2fs_ra_meta_pages(sbi,
4443 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4444 NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4445
4446 for (; type <= CURSEG_COLD_NODE; type++) {
4447 err = read_normal_summaries(sbi, type);
4448 if (err)
4449 return err;
4450 }
4451
4452 /* sanity check for summary blocks */
4453 if (nats_in_cursum(nat_j) > sbi->nat_journal_entries ||
4454 sits_in_cursum(sit_j) > sbi->sit_journal_entries) {
4455 f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4456 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4457 return -EINVAL;
4458 }
4459
4460 return 0;
4461 }
4462
write_compacted_summaries(struct f2fs_sb_info * sbi,block_t blkaddr)4463 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4464 {
4465 struct folio *folio;
4466 unsigned char *kaddr;
4467 struct f2fs_summary *summary;
4468 struct curseg_info *seg_i;
4469 int written_size = 0;
4470 int i, j;
4471
4472 folio = f2fs_grab_meta_folio(sbi, blkaddr++);
4473 kaddr = folio_address(folio);
4474 memset(kaddr, 0, PAGE_SIZE);
4475
4476 /* Step 1: write nat cache */
4477 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4478 memcpy(kaddr, seg_i->journal, sbi->sum_journal_size);
4479 written_size += sbi->sum_journal_size;
4480
4481 /* Step 2: write sit cache */
4482 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4483 memcpy(kaddr + written_size, seg_i->journal, sbi->sum_journal_size);
4484 written_size += sbi->sum_journal_size;
4485
4486 /* Step 3: write summary entries */
4487 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4488 seg_i = CURSEG_I(sbi, i);
4489 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4490 if (!folio) {
4491 folio = f2fs_grab_meta_folio(sbi, blkaddr++);
4492 kaddr = folio_address(folio);
4493 memset(kaddr, 0, PAGE_SIZE);
4494 written_size = 0;
4495 }
4496 summary = (struct f2fs_summary *)(kaddr + written_size);
4497 *summary = sum_entries(seg_i->sum_blk)[j];
4498 written_size += SUMMARY_SIZE;
4499
4500 if (written_size + SUMMARY_SIZE <= sbi->blocksize -
4501 SUM_FOOTER_SIZE)
4502 continue;
4503
4504 folio_mark_dirty(folio);
4505 f2fs_folio_put(folio, true);
4506 folio = NULL;
4507 }
4508 }
4509 if (folio) {
4510 folio_mark_dirty(folio);
4511 f2fs_folio_put(folio, true);
4512 }
4513 }
4514
write_normal_summaries(struct f2fs_sb_info * sbi,block_t blkaddr,int type)4515 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4516 block_t blkaddr, int type)
4517 {
4518 int i, end;
4519
4520 if (IS_DATASEG(type))
4521 end = type + NR_CURSEG_DATA_TYPE;
4522 else
4523 end = type + NR_CURSEG_NODE_TYPE;
4524
4525 for (i = type; i < end; i++)
4526 write_current_sum_page(sbi, i, blkaddr + (i - type));
4527 }
4528
f2fs_write_data_summaries(struct f2fs_sb_info * sbi,block_t start_blk)4529 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4530 {
4531 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4532 write_compacted_summaries(sbi, start_blk);
4533 else
4534 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4535 }
4536
f2fs_write_node_summaries(struct f2fs_sb_info * sbi,block_t start_blk)4537 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4538 {
4539 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4540 }
4541
f2fs_lookup_journal_in_cursum(struct f2fs_sb_info * sbi,struct f2fs_journal * journal,int type,unsigned int val,int alloc)4542 int f2fs_lookup_journal_in_cursum(struct f2fs_sb_info *sbi,
4543 struct f2fs_journal *journal, int type,
4544 unsigned int val, int alloc)
4545 {
4546 int i;
4547
4548 if (type == NAT_JOURNAL) {
4549 for (i = 0; i < nats_in_cursum(journal); i++) {
4550 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4551 return i;
4552 }
4553 if (alloc && __has_cursum_space(sbi, journal, 1, NAT_JOURNAL))
4554 return update_nats_in_cursum(journal, 1);
4555 } else if (type == SIT_JOURNAL) {
4556 for (i = 0; i < sits_in_cursum(journal); i++)
4557 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4558 return i;
4559 if (alloc && __has_cursum_space(sbi, journal, 1, SIT_JOURNAL))
4560 return update_sits_in_cursum(journal, 1);
4561 }
4562 return -1;
4563 }
4564
get_current_sit_folio(struct f2fs_sb_info * sbi,unsigned int segno)4565 static struct folio *get_current_sit_folio(struct f2fs_sb_info *sbi,
4566 unsigned int segno)
4567 {
4568 return f2fs_get_meta_folio(sbi, current_sit_addr(sbi, segno));
4569 }
4570
get_next_sit_folio(struct f2fs_sb_info * sbi,unsigned int start)4571 static struct folio *get_next_sit_folio(struct f2fs_sb_info *sbi,
4572 unsigned int start)
4573 {
4574 struct sit_info *sit_i = SIT_I(sbi);
4575 struct folio *folio;
4576 pgoff_t src_off, dst_off;
4577
4578 src_off = current_sit_addr(sbi, start);
4579 dst_off = next_sit_addr(sbi, src_off);
4580
4581 folio = f2fs_grab_meta_folio(sbi, dst_off);
4582 seg_info_to_sit_folio(sbi, folio, start);
4583
4584 folio_mark_dirty(folio);
4585 set_to_next_sit(sit_i, start);
4586
4587 return folio;
4588 }
4589
grab_sit_entry_set(void)4590 static struct sit_entry_set *grab_sit_entry_set(void)
4591 {
4592 struct sit_entry_set *ses =
4593 f2fs_kmem_cache_alloc(sit_entry_set_slab,
4594 GFP_NOFS, true, NULL);
4595
4596 ses->entry_cnt = 0;
4597 INIT_LIST_HEAD(&ses->set_list);
4598 return ses;
4599 }
4600
release_sit_entry_set(struct sit_entry_set * ses)4601 static void release_sit_entry_set(struct sit_entry_set *ses)
4602 {
4603 list_del(&ses->set_list);
4604 kmem_cache_free(sit_entry_set_slab, ses);
4605 }
4606
adjust_sit_entry_set(struct sit_entry_set * ses,struct list_head * head)4607 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4608 struct list_head *head)
4609 {
4610 struct sit_entry_set *next = ses;
4611
4612 if (list_is_last(&ses->set_list, head))
4613 return;
4614
4615 list_for_each_entry_continue(next, head, set_list)
4616 if (ses->entry_cnt <= next->entry_cnt) {
4617 list_move_tail(&ses->set_list, &next->set_list);
4618 return;
4619 }
4620
4621 list_move_tail(&ses->set_list, head);
4622 }
4623
add_sit_entry(unsigned int segno,struct list_head * head)4624 static void add_sit_entry(unsigned int segno, struct list_head *head)
4625 {
4626 struct sit_entry_set *ses;
4627 unsigned int start_segno = START_SEGNO(segno);
4628
4629 list_for_each_entry(ses, head, set_list) {
4630 if (ses->start_segno == start_segno) {
4631 ses->entry_cnt++;
4632 adjust_sit_entry_set(ses, head);
4633 return;
4634 }
4635 }
4636
4637 ses = grab_sit_entry_set();
4638
4639 ses->start_segno = start_segno;
4640 ses->entry_cnt++;
4641 list_add(&ses->set_list, head);
4642 }
4643
add_sits_in_set(struct f2fs_sb_info * sbi)4644 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4645 {
4646 struct f2fs_sm_info *sm_info = SM_I(sbi);
4647 struct list_head *set_list = &sm_info->sit_entry_set;
4648 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4649 unsigned int segno;
4650
4651 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4652 add_sit_entry(segno, set_list);
4653 }
4654
remove_sits_in_journal(struct f2fs_sb_info * sbi)4655 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4656 {
4657 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4658 struct f2fs_journal *journal = curseg->journal;
4659 int i;
4660
4661 down_write(&curseg->journal_rwsem);
4662 for (i = 0; i < sits_in_cursum(journal); i++) {
4663 unsigned int segno;
4664 bool dirtied;
4665
4666 segno = le32_to_cpu(segno_in_journal(journal, i));
4667 dirtied = __mark_sit_entry_dirty(sbi, segno);
4668
4669 if (!dirtied)
4670 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4671 }
4672 update_sits_in_cursum(journal, -i);
4673 up_write(&curseg->journal_rwsem);
4674 }
4675
4676 /*
4677 * CP calls this function, which flushes SIT entries including sit_journal,
4678 * and moves prefree segs to free segs.
4679 */
f2fs_flush_sit_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)4680 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4681 {
4682 struct sit_info *sit_i = SIT_I(sbi);
4683 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4684 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4685 struct f2fs_journal *journal = curseg->journal;
4686 struct sit_entry_set *ses, *tmp;
4687 struct list_head *head = &SM_I(sbi)->sit_entry_set;
4688 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4689 struct seg_entry *se;
4690
4691 down_write(&sit_i->sentry_lock);
4692
4693 if (!sit_i->dirty_sentries)
4694 goto out;
4695
4696 /*
4697 * add and account sit entries of dirty bitmap in sit entry
4698 * set temporarily
4699 */
4700 add_sits_in_set(sbi);
4701
4702 /*
4703 * if there are no enough space in journal to store dirty sit
4704 * entries, remove all entries from journal and add and account
4705 * them in sit entry set.
4706 */
4707 if (!__has_cursum_space(sbi, journal,
4708 sit_i->dirty_sentries, SIT_JOURNAL) || !to_journal)
4709 remove_sits_in_journal(sbi);
4710
4711 /*
4712 * there are two steps to flush sit entries:
4713 * #1, flush sit entries to journal in current cold data summary block.
4714 * #2, flush sit entries to sit page.
4715 */
4716 list_for_each_entry_safe(ses, tmp, head, set_list) {
4717 struct folio *folio = NULL;
4718 struct f2fs_sit_block *raw_sit = NULL;
4719 unsigned int start_segno = ses->start_segno;
4720 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4721 (unsigned long)MAIN_SEGS(sbi));
4722 unsigned int segno = start_segno;
4723
4724 if (to_journal &&
4725 !__has_cursum_space(sbi, journal, ses->entry_cnt,
4726 SIT_JOURNAL))
4727 to_journal = false;
4728
4729 if (to_journal) {
4730 down_write(&curseg->journal_rwsem);
4731 } else {
4732 folio = get_next_sit_folio(sbi, start_segno);
4733 raw_sit = folio_address(folio);
4734 }
4735
4736 /* flush dirty sit entries in region of current sit set */
4737 for_each_set_bit_from(segno, bitmap, end) {
4738 int offset, sit_offset;
4739
4740 se = get_seg_entry(sbi, segno);
4741 #ifdef CONFIG_F2FS_CHECK_FS
4742 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4743 SIT_VBLOCK_MAP_SIZE))
4744 f2fs_bug_on(sbi, 1);
4745 #endif
4746
4747 /* add discard candidates */
4748 if (!(cpc->reason & CP_DISCARD)) {
4749 cpc->trim_start = segno;
4750 add_discard_addrs(sbi, cpc, false);
4751 }
4752
4753 if (to_journal) {
4754 offset = f2fs_lookup_journal_in_cursum(sbi, journal,
4755 SIT_JOURNAL, segno, 1);
4756 f2fs_bug_on(sbi, offset < 0);
4757 segno_in_journal(journal, offset) =
4758 cpu_to_le32(segno);
4759 seg_info_to_raw_sit(se,
4760 &sit_in_journal(journal, offset));
4761 check_block_count(sbi, segno,
4762 &sit_in_journal(journal, offset));
4763 } else {
4764 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4765 seg_info_to_raw_sit(se,
4766 &raw_sit->entries[sit_offset]);
4767 check_block_count(sbi, segno,
4768 &raw_sit->entries[sit_offset]);
4769 }
4770
4771 /* update ckpt_valid_block */
4772 if (__is_large_section(sbi)) {
4773 set_ckpt_valid_blocks(sbi, segno);
4774 sanity_check_valid_blocks(sbi, segno);
4775 }
4776
4777 __clear_bit(segno, bitmap);
4778 sit_i->dirty_sentries--;
4779 ses->entry_cnt--;
4780 }
4781
4782 if (to_journal)
4783 up_write(&curseg->journal_rwsem);
4784 else
4785 f2fs_folio_put(folio, true);
4786
4787 f2fs_bug_on(sbi, ses->entry_cnt);
4788 release_sit_entry_set(ses);
4789 }
4790
4791 f2fs_bug_on(sbi, !list_empty(head));
4792 f2fs_bug_on(sbi, sit_i->dirty_sentries);
4793 out:
4794 if (cpc->reason & CP_DISCARD) {
4795 __u64 trim_start = cpc->trim_start;
4796
4797 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4798 add_discard_addrs(sbi, cpc, false);
4799
4800 cpc->trim_start = trim_start;
4801 }
4802 up_write(&sit_i->sentry_lock);
4803
4804 set_prefree_as_free_segments(sbi);
4805 }
4806
build_sit_info(struct f2fs_sb_info * sbi)4807 static int build_sit_info(struct f2fs_sb_info *sbi)
4808 {
4809 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4810 struct sit_info *sit_i;
4811 unsigned int sit_segs, start;
4812 char *src_bitmap, *bitmap;
4813 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4814 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4815
4816 /* allocate memory for SIT information */
4817 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4818 if (!sit_i)
4819 return -ENOMEM;
4820
4821 SM_I(sbi)->sit_info = sit_i;
4822
4823 sit_i->sentries =
4824 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4825 MAIN_SEGS(sbi)),
4826 GFP_KERNEL);
4827 if (!sit_i->sentries)
4828 return -ENOMEM;
4829
4830 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4831 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4832 GFP_KERNEL);
4833 if (!sit_i->dirty_sentries_bitmap)
4834 return -ENOMEM;
4835
4836 #ifdef CONFIG_F2FS_CHECK_FS
4837 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4838 #else
4839 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4840 #endif
4841 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4842 if (!sit_i->bitmap)
4843 return -ENOMEM;
4844
4845 bitmap = sit_i->bitmap;
4846
4847 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4848 sit_i->sentries[start].cur_valid_map = bitmap;
4849 bitmap += SIT_VBLOCK_MAP_SIZE;
4850
4851 sit_i->sentries[start].ckpt_valid_map = bitmap;
4852 bitmap += SIT_VBLOCK_MAP_SIZE;
4853
4854 #ifdef CONFIG_F2FS_CHECK_FS
4855 sit_i->sentries[start].cur_valid_map_mir = bitmap;
4856 bitmap += SIT_VBLOCK_MAP_SIZE;
4857 #endif
4858
4859 if (discard_map) {
4860 sit_i->sentries[start].discard_map = bitmap;
4861 bitmap += SIT_VBLOCK_MAP_SIZE;
4862 }
4863 }
4864
4865 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4866 if (!sit_i->tmp_map)
4867 return -ENOMEM;
4868
4869 if (__is_large_section(sbi)) {
4870 sit_i->sec_entries =
4871 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4872 MAIN_SECS(sbi)),
4873 GFP_KERNEL);
4874 if (!sit_i->sec_entries)
4875 return -ENOMEM;
4876 }
4877
4878 /* get information related with SIT */
4879 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4880
4881 /* setup SIT bitmap from ckeckpoint pack */
4882 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4883 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4884
4885 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4886 if (!sit_i->sit_bitmap)
4887 return -ENOMEM;
4888
4889 #ifdef CONFIG_F2FS_CHECK_FS
4890 sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4891 sit_bitmap_size, GFP_KERNEL);
4892 if (!sit_i->sit_bitmap_mir)
4893 return -ENOMEM;
4894
4895 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4896 main_bitmap_size, GFP_KERNEL);
4897 if (!sit_i->invalid_segmap)
4898 return -ENOMEM;
4899 #endif
4900
4901 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4902 sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);
4903 sit_i->written_valid_blocks = 0;
4904 sit_i->bitmap_size = sit_bitmap_size;
4905 sit_i->dirty_sentries = 0;
4906 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4907 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4908 sit_i->mounted_time = ktime_get_boottime_seconds();
4909 init_rwsem(&sit_i->sentry_lock);
4910 return 0;
4911 }
4912
build_free_segmap(struct f2fs_sb_info * sbi)4913 static int build_free_segmap(struct f2fs_sb_info *sbi)
4914 {
4915 struct free_segmap_info *free_i;
4916 unsigned int bitmap_size, sec_bitmap_size;
4917
4918 /* allocate memory for free segmap information */
4919 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4920 if (!free_i)
4921 return -ENOMEM;
4922
4923 SM_I(sbi)->free_info = free_i;
4924
4925 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4926 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4927 if (!free_i->free_segmap)
4928 return -ENOMEM;
4929
4930 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4931 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4932 if (!free_i->free_secmap)
4933 return -ENOMEM;
4934
4935 /* set all segments as dirty temporarily */
4936 memset(free_i->free_segmap, 0xff, bitmap_size);
4937 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4938
4939 /* init free segmap information */
4940 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4941 free_i->free_segments = 0;
4942 free_i->free_sections = 0;
4943 spin_lock_init(&free_i->segmap_lock);
4944 return 0;
4945 }
4946
build_curseg(struct f2fs_sb_info * sbi)4947 static int build_curseg(struct f2fs_sb_info *sbi)
4948 {
4949 struct curseg_info *array;
4950 int i;
4951
4952 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4953 sizeof(*array)), GFP_KERNEL);
4954 if (!array)
4955 return -ENOMEM;
4956
4957 SM_I(sbi)->curseg_array = array;
4958
4959 for (i = 0; i < NO_CHECK_TYPE; i++) {
4960 mutex_init(&array[i].curseg_mutex);
4961 array[i].sum_blk = f2fs_kzalloc(sbi, sbi->sum_blocksize,
4962 GFP_KERNEL);
4963 if (!array[i].sum_blk)
4964 return -ENOMEM;
4965 init_rwsem(&array[i].journal_rwsem);
4966 array[i].journal = f2fs_kzalloc(sbi,
4967 sbi->sum_journal_size, GFP_KERNEL);
4968 if (!array[i].journal)
4969 return -ENOMEM;
4970 array[i].seg_type = log_type_to_seg_type(i);
4971 reset_curseg_fields(&array[i]);
4972 }
4973 return restore_curseg_summaries(sbi);
4974 }
4975
build_sit_entries(struct f2fs_sb_info * sbi)4976 static int build_sit_entries(struct f2fs_sb_info *sbi)
4977 {
4978 struct sit_info *sit_i = SIT_I(sbi);
4979 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4980 struct f2fs_journal *journal = curseg->journal;
4981 struct seg_entry *se;
4982 struct f2fs_sit_entry sit;
4983 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4984 unsigned int i, start, end;
4985 unsigned int readed, start_blk = 0;
4986 int err = 0;
4987 block_t sit_valid_blocks[2] = {0, 0};
4988
4989 do {
4990 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4991 META_SIT, true);
4992
4993 start = start_blk * sit_i->sents_per_block;
4994 end = (start_blk + readed) * sit_i->sents_per_block;
4995
4996 for (; start < end && start < MAIN_SEGS(sbi); start++) {
4997 struct f2fs_sit_block *sit_blk;
4998 struct folio *folio;
4999
5000 se = &sit_i->sentries[start];
5001 folio = get_current_sit_folio(sbi, start);
5002 if (IS_ERR(folio))
5003 return PTR_ERR(folio);
5004 sit_blk = folio_address(folio);
5005 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
5006 f2fs_folio_put(folio, true);
5007
5008 err = check_block_count(sbi, start, &sit);
5009 if (err)
5010 return err;
5011 seg_info_from_raw_sit(se, &sit);
5012
5013 if (se->type >= NR_PERSISTENT_LOG) {
5014 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
5015 se->type, start);
5016 f2fs_handle_error(sbi,
5017 ERROR_INCONSISTENT_SUM_TYPE);
5018 return -EFSCORRUPTED;
5019 }
5020
5021 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
5022
5023 if (!f2fs_block_unit_discard(sbi))
5024 goto init_discard_map_done;
5025
5026 /* build discard map only one time */
5027 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
5028 memset(se->discard_map, 0xff,
5029 SIT_VBLOCK_MAP_SIZE);
5030 goto init_discard_map_done;
5031 }
5032 memcpy(se->discard_map, se->cur_valid_map,
5033 SIT_VBLOCK_MAP_SIZE);
5034 sbi->discard_blks += BLKS_PER_SEG(sbi) -
5035 se->valid_blocks;
5036 init_discard_map_done:
5037 if (__is_large_section(sbi))
5038 get_sec_entry(sbi, start)->valid_blocks +=
5039 se->valid_blocks;
5040 }
5041 start_blk += readed;
5042 } while (start_blk < sit_blk_cnt);
5043
5044 down_read(&curseg->journal_rwsem);
5045 for (i = 0; i < sits_in_cursum(journal); i++) {
5046 unsigned int old_valid_blocks;
5047
5048 start = le32_to_cpu(segno_in_journal(journal, i));
5049 if (start >= MAIN_SEGS(sbi)) {
5050 f2fs_err(sbi, "Wrong journal entry on segno %u",
5051 start);
5052 err = -EFSCORRUPTED;
5053 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
5054 break;
5055 }
5056
5057 se = &sit_i->sentries[start];
5058 sit = sit_in_journal(journal, i);
5059
5060 old_valid_blocks = se->valid_blocks;
5061
5062 sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
5063
5064 err = check_block_count(sbi, start, &sit);
5065 if (err)
5066 break;
5067 seg_info_from_raw_sit(se, &sit);
5068
5069 if (se->type >= NR_PERSISTENT_LOG) {
5070 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
5071 se->type, start);
5072 err = -EFSCORRUPTED;
5073 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
5074 break;
5075 }
5076
5077 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
5078
5079 if (f2fs_block_unit_discard(sbi)) {
5080 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
5081 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
5082 } else {
5083 memcpy(se->discard_map, se->cur_valid_map,
5084 SIT_VBLOCK_MAP_SIZE);
5085 sbi->discard_blks += old_valid_blocks;
5086 sbi->discard_blks -= se->valid_blocks;
5087 }
5088 }
5089
5090 if (__is_large_section(sbi)) {
5091 get_sec_entry(sbi, start)->valid_blocks +=
5092 se->valid_blocks;
5093 get_sec_entry(sbi, start)->valid_blocks -=
5094 old_valid_blocks;
5095 }
5096 }
5097 up_read(&curseg->journal_rwsem);
5098
5099 /* update ckpt_valid_block */
5100 if (__is_large_section(sbi)) {
5101 unsigned int segno;
5102
5103 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5104 set_ckpt_valid_blocks(sbi, segno);
5105 sanity_check_valid_blocks(sbi, segno);
5106 }
5107 }
5108
5109 if (err)
5110 return err;
5111
5112 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
5113 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
5114 sit_valid_blocks[NODE], valid_node_count(sbi));
5115 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
5116 return -EFSCORRUPTED;
5117 }
5118
5119 if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
5120 valid_user_blocks(sbi)) {
5121 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
5122 sit_valid_blocks[DATA], sit_valid_blocks[NODE],
5123 valid_user_blocks(sbi));
5124 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
5125 return -EFSCORRUPTED;
5126 }
5127
5128 return 0;
5129 }
5130
init_free_segmap(struct f2fs_sb_info * sbi)5131 static void init_free_segmap(struct f2fs_sb_info *sbi)
5132 {
5133 unsigned int start;
5134 int type;
5135 struct seg_entry *sentry;
5136
5137 for (start = 0; start < MAIN_SEGS(sbi); start++) {
5138 if (f2fs_usable_blks_in_seg(sbi, start) == 0)
5139 continue;
5140 sentry = get_seg_entry(sbi, start);
5141 if (!sentry->valid_blocks)
5142 __set_free(sbi, start);
5143 else
5144 SIT_I(sbi)->written_valid_blocks +=
5145 sentry->valid_blocks;
5146 }
5147
5148 /* set use the current segments */
5149 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
5150 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
5151
5152 __set_test_and_inuse(sbi, curseg_t->segno);
5153 }
5154 }
5155
init_dirty_segmap(struct f2fs_sb_info * sbi)5156 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
5157 {
5158 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5159 struct free_segmap_info *free_i = FREE_I(sbi);
5160 unsigned int segno = 0, offset = 0, secno;
5161 block_t valid_blocks, usable_blks_in_seg;
5162
5163 while (1) {
5164 /* find dirty segment based on free segmap */
5165 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
5166 if (segno >= MAIN_SEGS(sbi))
5167 break;
5168 offset = segno + 1;
5169 valid_blocks = get_valid_blocks(sbi, segno, false);
5170 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
5171 if (valid_blocks == usable_blks_in_seg || !valid_blocks)
5172 continue;
5173 if (valid_blocks > usable_blks_in_seg) {
5174 f2fs_bug_on(sbi, 1);
5175 continue;
5176 }
5177 mutex_lock(&dirty_i->seglist_lock);
5178 __locate_dirty_segment(sbi, segno, DIRTY);
5179 mutex_unlock(&dirty_i->seglist_lock);
5180 }
5181
5182 if (!__is_large_section(sbi))
5183 return;
5184
5185 mutex_lock(&dirty_i->seglist_lock);
5186 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5187 valid_blocks = get_valid_blocks(sbi, segno, true);
5188 secno = GET_SEC_FROM_SEG(sbi, segno);
5189
5190 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
5191 continue;
5192 if (is_cursec(sbi, secno))
5193 continue;
5194 set_bit(secno, dirty_i->dirty_secmap);
5195 }
5196 mutex_unlock(&dirty_i->seglist_lock);
5197 }
5198
init_victim_secmap(struct f2fs_sb_info * sbi)5199 static int init_victim_secmap(struct f2fs_sb_info *sbi)
5200 {
5201 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5202 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
5203
5204 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5205 if (!dirty_i->victim_secmap)
5206 return -ENOMEM;
5207
5208 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5209 if (!dirty_i->pinned_secmap)
5210 return -ENOMEM;
5211
5212 dirty_i->pinned_secmap_cnt = 0;
5213 dirty_i->enable_pin_section = true;
5214 return 0;
5215 }
5216
build_dirty_segmap(struct f2fs_sb_info * sbi)5217 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
5218 {
5219 struct dirty_seglist_info *dirty_i;
5220 unsigned int bitmap_size, i;
5221
5222 /* allocate memory for dirty segments list information */
5223 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
5224 GFP_KERNEL);
5225 if (!dirty_i)
5226 return -ENOMEM;
5227
5228 SM_I(sbi)->dirty_info = dirty_i;
5229 mutex_init(&dirty_i->seglist_lock);
5230
5231 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
5232
5233 for (i = 0; i < NR_DIRTY_TYPE; i++) {
5234 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
5235 GFP_KERNEL);
5236 if (!dirty_i->dirty_segmap[i])
5237 return -ENOMEM;
5238 }
5239
5240 if (__is_large_section(sbi)) {
5241 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
5242 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
5243 bitmap_size, GFP_KERNEL);
5244 if (!dirty_i->dirty_secmap)
5245 return -ENOMEM;
5246 }
5247
5248 init_dirty_segmap(sbi);
5249 return init_victim_secmap(sbi);
5250 }
5251
sanity_check_curseg(struct f2fs_sb_info * sbi)5252 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
5253 {
5254 int i;
5255
5256 /*
5257 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
5258 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
5259 */
5260 for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5261 struct curseg_info *curseg = CURSEG_I(sbi, i);
5262 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
5263 unsigned int blkofs = curseg->next_blkoff;
5264
5265 if (f2fs_sb_has_readonly(sbi) &&
5266 i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
5267 continue;
5268
5269 sanity_check_seg_type(sbi, curseg->seg_type);
5270
5271 if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
5272 f2fs_err(sbi,
5273 "Current segment has invalid alloc_type:%d",
5274 curseg->alloc_type);
5275 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
5276 return -EFSCORRUPTED;
5277 }
5278
5279 if (f2fs_test_bit(blkofs, se->cur_valid_map))
5280 goto out;
5281
5282 if (curseg->alloc_type == SSR)
5283 continue;
5284
5285 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
5286 if (!f2fs_test_bit(blkofs, se->cur_valid_map))
5287 continue;
5288 out:
5289 f2fs_err(sbi,
5290 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
5291 i, curseg->segno, curseg->alloc_type,
5292 curseg->next_blkoff, blkofs);
5293 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
5294 return -EFSCORRUPTED;
5295 }
5296 }
5297 return 0;
5298 }
5299
5300 #ifdef CONFIG_BLK_DEV_ZONED
check_zone_write_pointer(struct f2fs_sb_info * sbi,struct f2fs_dev_info * fdev,struct blk_zone * zone)5301 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
5302 struct f2fs_dev_info *fdev,
5303 struct blk_zone *zone)
5304 {
5305 unsigned int zone_segno;
5306 block_t zone_block, valid_block_cnt;
5307 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5308 int ret;
5309 unsigned int nofs_flags;
5310
5311 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5312 return 0;
5313
5314 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
5315 zone_segno = GET_SEGNO(sbi, zone_block);
5316
5317 /*
5318 * Skip check of zones cursegs point to, since
5319 * fix_curseg_write_pointer() checks them.
5320 */
5321 if (zone_segno >= MAIN_SEGS(sbi))
5322 return 0;
5323
5324 /*
5325 * Get # of valid block of the zone.
5326 */
5327 valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
5328 if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
5329 f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
5330 zone_segno, valid_block_cnt,
5331 blk_zone_cond_str(zone->cond));
5332 return 0;
5333 }
5334
5335 if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
5336 (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
5337 return 0;
5338
5339 if (!valid_block_cnt) {
5340 f2fs_notice(sbi, "Zone without valid block has non-zero write "
5341 "pointer. Reset the write pointer: cond[%s]",
5342 blk_zone_cond_str(zone->cond));
5343 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
5344 zone->len >> log_sectors_per_block);
5345 if (ret)
5346 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5347 fdev->path, ret);
5348 return ret;
5349 }
5350
5351 /*
5352 * If there are valid blocks and the write pointer doesn't match
5353 * with them, we need to report the inconsistency and fill
5354 * the zone till the end to close the zone. This inconsistency
5355 * does not cause write error because the zone will not be
5356 * selected for write operation until it get discarded.
5357 */
5358 f2fs_notice(sbi, "Valid blocks are not aligned with write "
5359 "pointer: valid block[0x%x,0x%x] cond[%s]",
5360 zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond));
5361
5362 nofs_flags = memalloc_nofs_save();
5363 ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
5364 zone->start, zone->len);
5365 memalloc_nofs_restore(nofs_flags);
5366 if (ret == -EOPNOTSUPP) {
5367 ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
5368 zone->len - (zone->wp - zone->start),
5369 GFP_NOFS, 0);
5370 if (ret)
5371 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
5372 fdev->path, ret);
5373 } else if (ret) {
5374 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
5375 fdev->path, ret);
5376 }
5377
5378 return ret;
5379 }
5380
get_target_zoned_dev(struct f2fs_sb_info * sbi,block_t zone_blkaddr)5381 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
5382 block_t zone_blkaddr)
5383 {
5384 int i;
5385
5386 for (i = 0; i < sbi->s_ndevs; i++) {
5387 if (!bdev_is_zoned(FDEV(i).bdev))
5388 continue;
5389 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
5390 zone_blkaddr <= FDEV(i).end_blk))
5391 return &FDEV(i);
5392 }
5393
5394 return NULL;
5395 }
5396
report_one_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)5397 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
5398 void *data)
5399 {
5400 memcpy(data, zone, sizeof(struct blk_zone));
5401 return 0;
5402 }
5403
do_fix_curseg_write_pointer(struct f2fs_sb_info * sbi,int type)5404 static int do_fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
5405 {
5406 struct curseg_info *cs = CURSEG_I(sbi, type);
5407 struct f2fs_dev_info *zbd;
5408 struct blk_zone zone;
5409 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
5410 block_t cs_zone_block, wp_block;
5411 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5412 sector_t zone_sector;
5413 int err;
5414
5415 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5416 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5417
5418 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5419 if (!zbd)
5420 return 0;
5421
5422 /* report zone for the sector the curseg points to */
5423 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5424 << log_sectors_per_block;
5425 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5426 report_one_zone_cb, &zone);
5427 if (err != 1) {
5428 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5429 zbd->path, err);
5430 return err;
5431 }
5432
5433 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5434 return 0;
5435
5436 /*
5437 * When safely unmounted in the previous mount, we could use current
5438 * segments. Otherwise, allocate new sections.
5439 */
5440 if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5441 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
5442 wp_segno = GET_SEGNO(sbi, wp_block);
5443 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5444 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
5445
5446 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
5447 wp_sector_off == 0)
5448 return 0;
5449
5450 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5451 "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
5452 cs->next_blkoff, wp_segno, wp_blkoff);
5453 }
5454
5455 /* Allocate a new section if it's not new. */
5456 if (cs->next_blkoff ||
5457 cs->segno != GET_SEG_FROM_SEC(sbi, GET_ZONE_FROM_SEC(sbi, cs_section))) {
5458 unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
5459
5460 f2fs_allocate_new_section(sbi, type, true);
5461 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5462 "[0x%x,0x%x] -> [0x%x,0x%x]",
5463 type, old_segno, old_blkoff,
5464 cs->segno, cs->next_blkoff);
5465 }
5466
5467 /* check consistency of the zone curseg pointed to */
5468 if (check_zone_write_pointer(sbi, zbd, &zone))
5469 return -EIO;
5470
5471 /* check newly assigned zone */
5472 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5473 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5474
5475 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5476 if (!zbd)
5477 return 0;
5478
5479 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5480 << log_sectors_per_block;
5481 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5482 report_one_zone_cb, &zone);
5483 if (err != 1) {
5484 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5485 zbd->path, err);
5486 return err;
5487 }
5488
5489 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5490 return 0;
5491
5492 if (zone.wp != zone.start) {
5493 f2fs_notice(sbi,
5494 "New zone for curseg[%d] is not yet discarded. "
5495 "Reset the zone: curseg[0x%x,0x%x]",
5496 type, cs->segno, cs->next_blkoff);
5497 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
5498 zone.len >> log_sectors_per_block);
5499 if (err) {
5500 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5501 zbd->path, err);
5502 return err;
5503 }
5504 }
5505
5506 return 0;
5507 }
5508
fix_curseg_write_pointer(struct f2fs_sb_info * sbi)5509 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5510 {
5511 int i, ret;
5512
5513 for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5514 ret = do_fix_curseg_write_pointer(sbi, i);
5515 if (ret)
5516 return ret;
5517 }
5518
5519 return 0;
5520 }
5521
5522 struct check_zone_write_pointer_args {
5523 struct f2fs_sb_info *sbi;
5524 struct f2fs_dev_info *fdev;
5525 };
5526
check_zone_write_pointer_cb(struct blk_zone * zone,unsigned int idx,void * data)5527 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5528 void *data)
5529 {
5530 struct check_zone_write_pointer_args *args;
5531
5532 args = (struct check_zone_write_pointer_args *)data;
5533
5534 return check_zone_write_pointer(args->sbi, args->fdev, zone);
5535 }
5536
check_write_pointer(struct f2fs_sb_info * sbi)5537 static int check_write_pointer(struct f2fs_sb_info *sbi)
5538 {
5539 int i, ret;
5540 struct check_zone_write_pointer_args args;
5541
5542 for (i = 0; i < sbi->s_ndevs; i++) {
5543 if (!bdev_is_zoned(FDEV(i).bdev))
5544 continue;
5545
5546 args.sbi = sbi;
5547 args.fdev = &FDEV(i);
5548 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5549 check_zone_write_pointer_cb, &args);
5550 if (ret < 0)
5551 return ret;
5552 }
5553
5554 return 0;
5555 }
5556
f2fs_check_and_fix_write_pointer(struct f2fs_sb_info * sbi)5557 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
5558 {
5559 int ret;
5560
5561 if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
5562 f2fs_hw_is_readonly(sbi))
5563 return 0;
5564
5565 f2fs_notice(sbi, "Checking entire write pointers");
5566 ret = fix_curseg_write_pointer(sbi);
5567 if (!ret)
5568 ret = check_write_pointer(sbi);
5569 return ret;
5570 }
5571
5572 /*
5573 * Return the number of usable blocks in a segment. The number of blocks
5574 * returned is always equal to the number of blocks in a segment for
5575 * segments fully contained within a sequential zone capacity or a
5576 * conventional zone. For segments partially contained in a sequential
5577 * zone capacity, the number of usable blocks up to the zone capacity
5578 * is returned. 0 is returned in all other cases.
5579 */
f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5580 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5581 struct f2fs_sb_info *sbi, unsigned int segno)
5582 {
5583 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5584 unsigned int secno;
5585
5586 if (!sbi->unusable_blocks_per_sec)
5587 return BLKS_PER_SEG(sbi);
5588
5589 secno = GET_SEC_FROM_SEG(sbi, segno);
5590 seg_start = START_BLOCK(sbi, segno);
5591 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5592 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5593
5594 /*
5595 * If segment starts before zone capacity and spans beyond
5596 * zone capacity, then usable blocks are from seg start to
5597 * zone capacity. If the segment starts after the zone capacity,
5598 * then there are no usable blocks.
5599 */
5600 if (seg_start >= sec_cap_blkaddr)
5601 return 0;
5602 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
5603 return sec_cap_blkaddr - seg_start;
5604
5605 return BLKS_PER_SEG(sbi);
5606 }
5607 #else
f2fs_check_and_fix_write_pointer(struct f2fs_sb_info * sbi)5608 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
5609 {
5610 return 0;
5611 }
5612
f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5613 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5614 unsigned int segno)
5615 {
5616 return 0;
5617 }
5618
5619 #endif
f2fs_usable_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5620 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5621 unsigned int segno)
5622 {
5623 if (f2fs_sb_has_blkzoned(sbi))
5624 return f2fs_usable_zone_blks_in_seg(sbi, segno);
5625
5626 return BLKS_PER_SEG(sbi);
5627 }
5628
f2fs_usable_segs_in_sec(struct f2fs_sb_info * sbi)5629 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
5630 {
5631 if (f2fs_sb_has_blkzoned(sbi))
5632 return CAP_SEGS_PER_SEC(sbi);
5633
5634 return SEGS_PER_SEC(sbi);
5635 }
5636
f2fs_get_section_mtime(struct f2fs_sb_info * sbi,unsigned int segno)5637 unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
5638 unsigned int segno)
5639 {
5640 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
5641 unsigned int secno = 0, start = 0;
5642 unsigned int total_valid_blocks = 0;
5643 unsigned long long mtime = 0;
5644 unsigned int i = 0;
5645
5646 secno = GET_SEC_FROM_SEG(sbi, segno);
5647 start = GET_SEG_FROM_SEC(sbi, secno);
5648
5649 if (!__is_large_section(sbi)) {
5650 mtime = get_seg_entry(sbi, start + i)->mtime;
5651 goto out;
5652 }
5653
5654 for (i = 0; i < usable_segs_per_sec; i++) {
5655 /* for large section, only check the mtime of valid segments */
5656 struct seg_entry *se = get_seg_entry(sbi, start+i);
5657
5658 mtime += se->mtime * se->valid_blocks;
5659 total_valid_blocks += se->valid_blocks;
5660 }
5661
5662 if (total_valid_blocks == 0)
5663 return INVALID_MTIME;
5664
5665 mtime = div_u64(mtime, total_valid_blocks);
5666 out:
5667 if (unlikely(mtime == INVALID_MTIME))
5668 mtime -= 1;
5669 return mtime;
5670 }
5671
5672 /*
5673 * Update min, max modified time for cost-benefit GC algorithm
5674 */
init_min_max_mtime(struct f2fs_sb_info * sbi)5675 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5676 {
5677 struct sit_info *sit_i = SIT_I(sbi);
5678 unsigned int segno;
5679
5680 down_write(&sit_i->sentry_lock);
5681
5682 sit_i->min_mtime = ULLONG_MAX;
5683
5684 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5685 unsigned long long mtime = 0;
5686
5687 mtime = f2fs_get_section_mtime(sbi, segno);
5688
5689 if (sit_i->min_mtime > mtime)
5690 sit_i->min_mtime = mtime;
5691 }
5692 sit_i->max_mtime = get_mtime(sbi, false);
5693 sit_i->dirty_max_mtime = 0;
5694 up_write(&sit_i->sentry_lock);
5695 }
5696
f2fs_build_segment_manager(struct f2fs_sb_info * sbi)5697 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5698 {
5699 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5700 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5701 struct f2fs_sm_info *sm_info;
5702 int err;
5703
5704 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5705 if (!sm_info)
5706 return -ENOMEM;
5707
5708 /* init sm info */
5709 sbi->sm_info = sm_info;
5710 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5711 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5712 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5713 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5714 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5715 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5716 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5717 sm_info->rec_prefree_segments = sm_info->main_segments *
5718 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5719 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5720 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5721
5722 if (!f2fs_lfs_mode(sbi))
5723 sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
5724 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5725 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5726 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
5727 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5728 sm_info->min_ssr_sections = reserved_sections(sbi);
5729
5730 INIT_LIST_HEAD(&sm_info->sit_entry_set);
5731
5732 init_f2fs_rwsem(&sm_info->curseg_lock);
5733
5734 err = f2fs_create_flush_cmd_control(sbi);
5735 if (err)
5736 return err;
5737
5738 err = create_discard_cmd_control(sbi);
5739 if (err)
5740 return err;
5741
5742 err = build_sit_info(sbi);
5743 if (err)
5744 return err;
5745 err = build_free_segmap(sbi);
5746 if (err)
5747 return err;
5748 err = build_curseg(sbi);
5749 if (err)
5750 return err;
5751
5752 /* reinit free segmap based on SIT */
5753 err = build_sit_entries(sbi);
5754 if (err)
5755 return err;
5756
5757 init_free_segmap(sbi);
5758 err = build_dirty_segmap(sbi);
5759 if (err)
5760 return err;
5761
5762 err = sanity_check_curseg(sbi);
5763 if (err)
5764 return err;
5765
5766 init_min_max_mtime(sbi);
5767 return 0;
5768 }
5769
discard_dirty_segmap(struct f2fs_sb_info * sbi,enum dirty_type dirty_type)5770 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5771 enum dirty_type dirty_type)
5772 {
5773 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5774
5775 mutex_lock(&dirty_i->seglist_lock);
5776 kvfree(dirty_i->dirty_segmap[dirty_type]);
5777 dirty_i->nr_dirty[dirty_type] = 0;
5778 mutex_unlock(&dirty_i->seglist_lock);
5779 }
5780
destroy_victim_secmap(struct f2fs_sb_info * sbi)5781 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5782 {
5783 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5784
5785 kvfree(dirty_i->pinned_secmap);
5786 kvfree(dirty_i->victim_secmap);
5787 }
5788
destroy_dirty_segmap(struct f2fs_sb_info * sbi)5789 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5790 {
5791 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5792 int i;
5793
5794 if (!dirty_i)
5795 return;
5796
5797 /* discard pre-free/dirty segments list */
5798 for (i = 0; i < NR_DIRTY_TYPE; i++)
5799 discard_dirty_segmap(sbi, i);
5800
5801 if (__is_large_section(sbi)) {
5802 mutex_lock(&dirty_i->seglist_lock);
5803 kvfree(dirty_i->dirty_secmap);
5804 mutex_unlock(&dirty_i->seglist_lock);
5805 }
5806
5807 destroy_victim_secmap(sbi);
5808 SM_I(sbi)->dirty_info = NULL;
5809 kfree(dirty_i);
5810 }
5811
destroy_curseg(struct f2fs_sb_info * sbi)5812 static void destroy_curseg(struct f2fs_sb_info *sbi)
5813 {
5814 struct curseg_info *array = SM_I(sbi)->curseg_array;
5815 int i;
5816
5817 if (!array)
5818 return;
5819 SM_I(sbi)->curseg_array = NULL;
5820 for (i = 0; i < NR_CURSEG_TYPE; i++) {
5821 kfree(array[i].sum_blk);
5822 kfree(array[i].journal);
5823 }
5824 kfree(array);
5825 }
5826
destroy_free_segmap(struct f2fs_sb_info * sbi)5827 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5828 {
5829 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5830
5831 if (!free_i)
5832 return;
5833 SM_I(sbi)->free_info = NULL;
5834 kvfree(free_i->free_segmap);
5835 kvfree(free_i->free_secmap);
5836 kfree(free_i);
5837 }
5838
destroy_sit_info(struct f2fs_sb_info * sbi)5839 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5840 {
5841 struct sit_info *sit_i = SIT_I(sbi);
5842
5843 if (!sit_i)
5844 return;
5845
5846 if (sit_i->sentries)
5847 kvfree(sit_i->bitmap);
5848 kfree(sit_i->tmp_map);
5849
5850 kvfree(sit_i->sentries);
5851 kvfree(sit_i->sec_entries);
5852 kvfree(sit_i->dirty_sentries_bitmap);
5853
5854 SM_I(sbi)->sit_info = NULL;
5855 kfree(sit_i->sit_bitmap);
5856 #ifdef CONFIG_F2FS_CHECK_FS
5857 kfree(sit_i->sit_bitmap_mir);
5858 kvfree(sit_i->invalid_segmap);
5859 #endif
5860 kfree(sit_i);
5861 }
5862
f2fs_destroy_segment_manager(struct f2fs_sb_info * sbi)5863 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5864 {
5865 struct f2fs_sm_info *sm_info = SM_I(sbi);
5866
5867 if (!sm_info)
5868 return;
5869 f2fs_destroy_flush_cmd_control(sbi, true);
5870 destroy_discard_cmd_control(sbi);
5871 destroy_dirty_segmap(sbi);
5872 destroy_curseg(sbi);
5873 destroy_free_segmap(sbi);
5874 destroy_sit_info(sbi);
5875 sbi->sm_info = NULL;
5876 kfree(sm_info);
5877 }
5878
f2fs_create_segment_manager_caches(void)5879 int __init f2fs_create_segment_manager_caches(void)
5880 {
5881 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5882 sizeof(struct discard_entry));
5883 if (!discard_entry_slab)
5884 goto fail;
5885
5886 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5887 sizeof(struct discard_cmd));
5888 if (!discard_cmd_slab)
5889 goto destroy_discard_entry;
5890
5891 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5892 sizeof(struct sit_entry_set));
5893 if (!sit_entry_set_slab)
5894 goto destroy_discard_cmd;
5895
5896 revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
5897 sizeof(struct revoke_entry));
5898 if (!revoke_entry_slab)
5899 goto destroy_sit_entry_set;
5900 return 0;
5901
5902 destroy_sit_entry_set:
5903 kmem_cache_destroy(sit_entry_set_slab);
5904 destroy_discard_cmd:
5905 kmem_cache_destroy(discard_cmd_slab);
5906 destroy_discard_entry:
5907 kmem_cache_destroy(discard_entry_slab);
5908 fail:
5909 return -ENOMEM;
5910 }
5911
f2fs_destroy_segment_manager_caches(void)5912 void f2fs_destroy_segment_manager_caches(void)
5913 {
5914 kmem_cache_destroy(sit_entry_set_slab);
5915 kmem_cache_destroy(discard_cmd_slab);
5916 kmem_cache_destroy(discard_entry_slab);
5917 kmem_cache_destroy(revoke_entry_slab);
5918 }
5919