Lines Matching +full:cache +full:- +full:block
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009-2011 Red Hat, Inc.
10 #include <linux/dm-bufio.h>
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
65 * dm_buffer->list_mode
73 /*--------------------------------------------------------------*/
101 /*--------------*/
105 lru->cursor = NULL; in lru_init()
106 lru->count = 0; in lru_init()
107 INIT_LIST_HEAD(&lru->iterators); in lru_init()
112 WARN_ON_ONCE(lru->cursor); in lru_destroy()
113 WARN_ON_ONCE(!list_empty(&lru->iterators)); in lru_destroy()
125 atomic_set(&le->referenced, 0); in lru_insert()
127 if (lru->cursor) { in lru_insert()
128 list_add_tail(&le->list, lru->cursor); in lru_insert()
130 INIT_LIST_HEAD(&le->list); in lru_insert()
131 lru->cursor = &le->list; in lru_insert()
133 lru->count++; in lru_insert()
136 /*--------------*/
151 it->lru = lru; in lru_iter_begin()
152 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; in lru_iter_begin()
153 it->e = lru->cursor ? to_le(lru->cursor) : NULL; in lru_iter_begin()
154 list_add(&it->list, &lru->iterators); in lru_iter_begin()
162 list_del(&it->list); in lru_iter_end()
178 while (it->e) { in lru_iter_next()
179 e = it->e; in lru_iter_next()
182 if (it->e == it->stop) in lru_iter_next()
183 it->e = NULL; in lru_iter_next()
185 it->e = to_le(it->e->list.next); in lru_iter_next()
202 list_for_each_entry(it, &lru->iterators, list) { in lru_iter_invalidate()
203 /* Move c->e forwards if necc. */ in lru_iter_invalidate()
204 if (it->e == e) { in lru_iter_invalidate()
205 it->e = to_le(it->e->list.next); in lru_iter_invalidate()
206 if (it->e == e) in lru_iter_invalidate()
207 it->e = NULL; in lru_iter_invalidate()
210 /* Move it->stop backwards if necc. */ in lru_iter_invalidate()
211 if (it->stop == e) { in lru_iter_invalidate()
212 it->stop = to_le(it->stop->list.prev); in lru_iter_invalidate()
213 if (it->stop == e) in lru_iter_invalidate()
214 it->stop = NULL; in lru_iter_invalidate()
219 /*--------------*/
227 if (lru->count == 1) { in lru_remove()
228 lru->cursor = NULL; in lru_remove()
230 if (lru->cursor == &le->list) in lru_remove()
231 lru->cursor = lru->cursor->next; in lru_remove()
232 list_del(&le->list); in lru_remove()
234 lru->count--; in lru_remove()
242 atomic_set(&le->referenced, 1); in lru_reference()
245 /*--------------*/
262 struct list_head *h = lru->cursor; in lru_evict()
272 while (tested < lru->count) { in lru_evict()
275 if (atomic_read(&le->referenced)) { in lru_evict()
276 atomic_set(&le->referenced, 0); in lru_evict()
285 lru->cursor = le->list.next; in lru_evict()
293 lru->cursor = le->list.next; in lru_evict()
298 h = h->next; in lru_evict()
307 /*--------------------------------------------------------------*/
317 * Describes how the block was allocated:
334 sector_t block; member
368 /*--------------------------------------------------------------*/
371 * The buffer cache manages buffers, particularly:
372 * - inc/dec of holder count
373 * - setting the last_accessed field
374 * - maintains clean/dirty state along with lru
375 * - selecting buffers that match predicates
378 * - allocation/freeing of buffers.
379 * - IO
380 * - Eviction or cache sizing.
409 static inline unsigned int cache_index(sector_t block, unsigned int num_locks) in cache_index() argument
411 return dm_hash_locks_index(block, num_locks); in cache_index()
414 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) in cache_read_lock() argument
416 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_lock()
417 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_lock()
419 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_lock()
422 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
424 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_unlock()
425 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_unlock()
427 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_unlock()
430 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
432 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_lock()
433 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_lock()
435 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_lock()
438 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
440 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_unlock()
441 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_unlock()
443 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_unlock()
451 struct dm_buffer_cache *cache; member
457 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) in lh_init() argument
459 lh->cache = cache; in lh_init()
460 lh->write = write; in lh_init()
461 lh->no_previous = cache->num_locks; in lh_init()
462 lh->previous = lh->no_previous; in lh_init()
467 if (lh->write) { in __lh_lock()
468 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) in __lh_lock()
469 write_lock_bh(&lh->cache->trees[index].u.spinlock); in __lh_lock()
471 down_write(&lh->cache->trees[index].u.lock); in __lh_lock()
473 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) in __lh_lock()
474 read_lock_bh(&lh->cache->trees[index].u.spinlock); in __lh_lock()
476 down_read(&lh->cache->trees[index].u.lock); in __lh_lock()
482 if (lh->write) { in __lh_unlock()
483 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) in __lh_unlock()
484 write_unlock_bh(&lh->cache->trees[index].u.spinlock); in __lh_unlock()
486 up_write(&lh->cache->trees[index].u.lock); in __lh_unlock()
488 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) in __lh_unlock()
489 read_unlock_bh(&lh->cache->trees[index].u.spinlock); in __lh_unlock()
491 up_read(&lh->cache->trees[index].u.lock); in __lh_unlock()
500 if (lh->previous != lh->no_previous) { in lh_exit()
501 __lh_unlock(lh, lh->previous); in lh_exit()
502 lh->previous = lh->no_previous; in lh_exit()
512 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ in lh_next()
514 if (lh->previous != lh->no_previous) { in lh_next()
515 if (lh->previous != index) { in lh_next()
516 __lh_unlock(lh, lh->previous); in lh_next()
518 lh->previous = index; in lh_next()
522 lh->previous = index; in lh_next()
542 bc->num_locks = num_locks; in cache_init()
543 bc->no_sleep = no_sleep; in cache_init()
545 for (i = 0; i < bc->num_locks; i++) { in cache_init()
547 rwlock_init(&bc->trees[i].u.spinlock); in cache_init()
549 init_rwsem(&bc->trees[i].u.lock); in cache_init()
550 bc->trees[i].root = RB_ROOT; in cache_init()
553 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
554 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
561 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
562 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
564 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
565 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
568 /*--------------*/
575 return bc->lru[list_mode].count; in cache_count()
583 /*--------------*/
586 * Gets a specific buffer, indexed by block.
592 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) in __cache_get() argument
594 struct rb_node *n = root->rb_node; in __cache_get()
600 if (b->block == block) in __cache_get()
603 n = block < b->block ? n->rb_left : n->rb_right; in __cache_get()
611 atomic_inc(&b->hold_count); in __cache_inc_buffer()
612 WRITE_ONCE(b->last_accessed, jiffies); in __cache_inc_buffer()
615 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
619 cache_read_lock(bc, block); in cache_get()
620 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
622 lru_reference(&b->lru); in cache_get()
625 cache_read_unlock(bc, block); in cache_get()
630 /*--------------*/
640 cache_read_lock(bc, b->block); in cache_put()
641 BUG_ON(!atomic_read(&b->hold_count)); in cache_put()
642 r = atomic_dec_and_test(&b->hold_count); in cache_put()
643 cache_read_unlock(bc, b->block); in cache_put()
648 /*--------------*/
672 lh_next(w->lh, b->block); in __evict_pred()
674 if (atomic_read(&b->hold_count)) in __evict_pred()
677 return w->pred(b, w->context); in __evict_pred()
688 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); in __cache_evict()
694 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
712 /*--------------*/
719 cache_write_lock(bc, b->block); in cache_mark()
720 if (list_mode != b->list_mode) { in cache_mark()
721 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
722 b->list_mode = list_mode; in cache_mark()
723 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
725 cache_write_unlock(bc, b->block); in cache_mark()
728 /*--------------*/
742 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); in __cache_mark_many()
747 b->list_mode = new_mode; in __cache_mark_many()
748 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
762 /*--------------*/
783 struct lru *lru = &bc->lru[list_mode]; in __cache_iterate()
786 if (!lru->cursor) in __cache_iterate()
789 first = le = to_le(lru->cursor); in __cache_iterate()
793 lh_next(lh, b->block); in __cache_iterate()
804 le = to_le(le->list.next); in __cache_iterate()
818 /*--------------*/
821 * Passes ownership of the buffer to the cache. Returns false if the
831 struct rb_node **new = &root->rb_node, *parent = NULL; in __cache_insert()
837 if (found->block == b->block) in __cache_insert()
841 new = b->block < found->block ? in __cache_insert()
842 &found->node.rb_left : &found->node.rb_right; in __cache_insert()
845 rb_link_node(&b->node, parent, new); in __cache_insert()
846 rb_insert_color(&b->node, root); in __cache_insert()
855 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) in cache_insert()
858 cache_write_lock(bc, b->block); in cache_insert()
859 BUG_ON(atomic_read(&b->hold_count) != 1); in cache_insert()
860 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
862 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
863 cache_write_unlock(bc, b->block); in cache_insert()
868 /*--------------*/
871 * Removes buffer from cache, ownership of the buffer passes back to the caller.
880 cache_write_lock(bc, b->block); in cache_remove()
882 if (atomic_read(&b->hold_count) != 1) { in cache_remove()
886 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
887 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
890 cache_write_unlock(bc, b->block); in cache_remove()
895 /*--------------*/
899 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) in __find_next() argument
901 struct rb_node *n = root->rb_node; in __find_next()
908 if (b->block == block) in __find_next()
911 if (block <= b->block) { in __find_next()
912 n = n->rb_left; in __find_next()
915 n = n->rb_right; in __find_next()
933 if (!b || (b->block >= end)) in __remove_range()
936 begin = b->block + 1; in __remove_range()
938 if (atomic_read(&b->hold_count)) in __remove_range()
942 rb_erase(&b->node, root); in __remove_range()
943 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
955 BUG_ON(bc->no_sleep); in cache_remove_range()
956 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
957 down_write(&bc->trees[i].u.lock); in cache_remove_range()
958 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
959 up_write(&bc->trees[i].u.lock); in cache_remove_range()
963 /*----------------------------------------------------------------*/
976 * context), so some clean-not-writing buffers can be held on
1017 struct dm_buffer_cache cache; /* must be last member */ member
1020 /*----------------------------------------------------------------*/
1022 #define dm_bufio_in_request() (!!current->bio_list)
1026 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_lock()
1027 spin_lock_bh(&c->spinlock); in dm_bufio_lock()
1029 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
1034 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_unlock()
1035 spin_unlock_bh(&c->spinlock); in dm_bufio_unlock()
1037 mutex_unlock(&c->lock); in dm_bufio_unlock()
1040 /*----------------------------------------------------------------*/
1043 * Default cache size: available memory divided by the ratio.
1048 * Total cache size set by the user.
1054 * at any time. If it disagrees, the user has changed cache size.
1073 /*----------------------------------------------------------------*/
1098 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); in buffer_record_stack()
1102 /*----------------------------------------------------------------*/
1116 data_mode = b->data_mode; in adjust_total_allocated()
1117 diff = (long)b->c->block_size; in adjust_total_allocated()
1119 diff = -diff; in adjust_total_allocated()
1139 * Change the number of clients and recalculate per-client limit.
1151 * Use default if set to 0 and report the actual cache size used. in __cache_size_refresh()
1184 if (unlikely(c->slab_cache != NULL)) { in alloc_buffer_data()
1186 return kmem_cache_alloc(c->slab_cache, gfp_mask); in alloc_buffer_data()
1189 if (unlikely(c->block_size < PAGE_SIZE)) { in alloc_buffer_data()
1191 return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE); in alloc_buffer_data()
1194 if (c->block_size <= KMALLOC_MAX_SIZE && in alloc_buffer_data()
1198 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in alloc_buffer_data()
1203 return __vmalloc(c->block_size, gfp_mask); in alloc_buffer_data()
1214 kmem_cache_free(c->slab_cache, data); in free_buffer_data()
1223 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in free_buffer_data()
1242 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer()
1247 b->c = c; in alloc_buffer()
1249 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
1250 if (!b->data) { in alloc_buffer()
1251 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
1257 b->stack_len = 0; in alloc_buffer()
1267 struct dm_bufio_client *c = b->c; in free_buffer()
1270 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
1271 kmem_cache_free(c->slab_buffer, b); in free_buffer()
1275 *--------------------------------------------------------------------------
1280 * memory-consumption per buffer, so it is not viable);
1282 * the memory must be direct-mapped, not vmalloced;
1288 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1289 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1291 *--------------------------------------------------------------------------
1295 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1302 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete()
1314 .client = b->c->dm_io, in use_dmio()
1317 .bdev = b->c->bdev, in use_dmio()
1322 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
1324 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio()
1327 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio()
1332 b->end_io(b, errno_to_blk_status(r)); in use_dmio()
1337 struct dm_buffer *b = bio->bi_private; in bio_complete()
1338 blk_status_t status = bio->bi_status; in bio_complete()
1342 b->end_io(b, status); in bio_complete()
1358 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); in use_bio()
1359 bio->bi_iter.bi_sector = sector; in use_bio()
1360 bio->bi_end_io = bio_complete; in use_bio()
1361 bio->bi_private = b; in use_bio()
1362 bio->bi_ioprio = ioprio; in use_bio()
1364 ptr = (char *)b->data + offset; in use_bio()
1372 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) in block_to_sector() argument
1376 if (likely(c->sectors_per_block_bits >= 0)) in block_to_sector()
1377 sector = block << c->sectors_per_block_bits; in block_to_sector()
1379 sector = block * (c->block_size >> SECTOR_SHIFT); in block_to_sector()
1380 sector += c->start; in block_to_sector()
1392 b->end_io = end_io; in submit_io()
1394 sector = block_to_sector(b->c, b->block); in submit_io()
1397 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
1400 if (b->c->write_callback) in submit_io()
1401 b->c->write_callback(b); in submit_io()
1402 offset = b->write_start; in submit_io()
1403 end = b->write_end; in submit_io()
1404 offset &= -DM_BUFIO_WRITE_ALIGN; in submit_io()
1405 end += DM_BUFIO_WRITE_ALIGN - 1; in submit_io()
1406 end &= -DM_BUFIO_WRITE_ALIGN; in submit_io()
1407 if (unlikely(end > b->c->block_size)) in submit_io()
1408 end = b->c->block_size; in submit_io()
1411 n_sectors = (end - offset) >> SECTOR_SHIFT; in submit_io()
1414 if (b->data_mode != DATA_MODE_VMALLOC) in submit_io()
1421 *--------------------------------------------------------------
1423 *--------------------------------------------------------------
1434 b->write_error = status; in write_endio()
1436 struct dm_bufio_client *c = b->c; in write_endio()
1438 (void)cmpxchg(&c->async_write_error, 0, in write_endio()
1442 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
1445 clear_bit(B_WRITING, &b->state); in write_endio()
1448 wake_up_bit(&b->state, B_WRITING); in write_endio()
1454 * - If the buffer is not dirty, exit.
1455 * - If there some previous write going on, wait for it to finish (we can't
1457 * - Submit our write and don't wait on it. We set B_WRITING indicating
1463 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
1466 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
1467 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
1469 b->write_start = b->dirty_start; in __write_dirty_buffer()
1470 b->write_end = b->dirty_end; in __write_dirty_buffer()
1475 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
1485 list_entry(write_list->next, struct dm_buffer, write_list); in __flush_write_list()
1486 list_del(&b->write_list); in __flush_write_list()
1500 BUG_ON(atomic_read(&b->hold_count)); in __make_buffer_clean()
1503 if (!smp_load_acquire(&b->state)) /* fast case */ in __make_buffer_clean()
1506 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
1508 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
1516 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) in is_clean()
1518 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) in is_clean()
1520 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) in is_clean()
1523 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && in is_clean()
1524 unlikely(test_bit(B_READING, &b->state))) in is_clean()
1533 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) in is_dirty()
1535 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) in is_dirty()
1549 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); in __get_unclaimed_buffer()
1556 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in __get_unclaimed_buffer()
1559 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); in __get_unclaimed_buffer()
1572 * This function is entered with c->lock held, drops it and regains it
1579 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1585 * hold c->lock when wake_up is called. So we have a timeout here, in __wait_for_free_buffer()
1590 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1614 * dm-bufio is resistant to allocation failures (it just keeps in __alloc_buffer_wait_no_callback()
1623 * For debugging, if we set the cache size to 1, no new buffers will in __alloc_buffer_wait_no_callback()
1645 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
1646 b = list_to_buffer(c->reserved_buffers.next); in __alloc_buffer_wait_no_callback()
1647 list_del(&b->lru.list); in __alloc_buffer_wait_no_callback()
1648 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
1668 if (c->alloc_callback) in __alloc_buffer_wait()
1669 c->alloc_callback(b); in __alloc_buffer_wait()
1679 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
1681 b->block = -1; in __free_buffer_wake()
1682 if (!c->need_reserved_buffers) in __free_buffer_wake()
1685 list_add(&b->lru.list, &c->reserved_buffers); in __free_buffer_wake()
1686 c->need_reserved_buffers--; in __free_buffer_wake()
1693 if (unlikely(waitqueue_active(&c->free_buffer_wait))) in __free_buffer_wake()
1694 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
1699 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) in cleaned()
1702 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) in cleaned()
1710 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); in __move_clean_buffers()
1722 if (wc->no_wait && test_bit(B_WRITING, &b->state)) in write_one()
1725 __write_dirty_buffer(b, wc->write_list); in write_one()
1735 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); in __write_dirty_buffers_async()
1741 * If we're over "limit_buffers", block until we get under the limit.
1746 if (cache_count(&c->cache, LIST_DIRTY) > in __check_watermark()
1747 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) in __check_watermark()
1752 *--------------------------------------------------------------
1754 *--------------------------------------------------------------
1763 if (cache_put(&c->cache, b) && in cache_put_and_wake()
1764 unlikely(waitqueue_active(&c->free_buffer_wait))) in cache_put_and_wake()
1765 wake_up(&c->free_buffer_wait); in cache_put_and_wake()
1769 * This assumes you have already checked the cache to see if the buffer
1772 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
1792 b = cache_get(&c->cache, block); in __bufio_new()
1801 atomic_set(&b->hold_count, 1); in __bufio_new()
1802 WRITE_ONCE(b->last_accessed, jiffies); in __bufio_new()
1803 b->block = block; in __bufio_new()
1804 b->read_error = 0; in __bufio_new()
1805 b->write_error = 0; in __bufio_new()
1806 b->list_mode = LIST_CLEAN; in __bufio_new()
1809 b->state = 0; in __bufio_new()
1811 b->state = 1 << B_READING; in __bufio_new()
1816 * We mustn't insert into the cache until the B_READING state in __bufio_new()
1820 cache_insert(&c->cache, b); in __bufio_new()
1837 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { in __bufio_new()
1851 b->read_error = status; in read_endio()
1853 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1856 clear_bit(B_READING, &b->state); in read_endio()
1859 wake_up_bit(&b->state, B_READING); in read_endio()
1868 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1880 * Fast path, hopefully the block is already in the cache. No need in new_read()
1883 b = cache_get(&c->cache, block); in new_read()
1897 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { in new_read()
1908 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1913 if (b && (atomic_read(&b->hold_count) == 1)) in new_read()
1926 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1928 if (b->read_error) { in new_read()
1929 int error = blk_status_to_errno(b->read_error); in new_read()
1938 return b->data; in new_read()
1941 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1944 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT); in dm_bufio_get()
1948 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block, in __dm_bufio_read() argument
1952 return ERR_PTR(-EINVAL); in __dm_bufio_read()
1954 return new_read(c, block, NF_READ, bp, ioprio); in __dm_bufio_read()
1957 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1960 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT); in dm_bufio_read()
1964 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, in dm_bufio_read_with_ioprio() argument
1967 return __dm_bufio_read(c, block, bp, ioprio); in dm_bufio_read_with_ioprio()
1971 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1975 return ERR_PTR(-EINVAL); in dm_bufio_new()
1977 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT); in dm_bufio_new()
1982 sector_t block, unsigned int n_blocks, in __dm_bufio_prefetch() argument
1994 for (; n_blocks--; block++) { in __dm_bufio_prefetch()
1998 b = cache_get(&c->cache, block); in __dm_bufio_prefetch()
2000 /* already in cache */ in __dm_bufio_prefetch()
2006 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in __dm_bufio_prefetch()
2035 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) in dm_bufio_prefetch() argument
2037 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT); in dm_bufio_prefetch()
2041 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, in dm_bufio_prefetch_with_ioprio() argument
2044 return __dm_bufio_prefetch(c, block, n_blocks, ioprio); in dm_bufio_prefetch_with_ioprio()
2050 struct dm_bufio_client *c = b->c; in dm_bufio_release()
2057 if ((b->read_error || b->write_error) && in dm_bufio_release()
2058 !test_bit_acquire(B_READING, &b->state) && in dm_bufio_release()
2059 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
2060 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
2063 /* cache remove can fail if there are other holders */ in dm_bufio_release()
2064 if (cache_remove(&c->cache, b)) { in dm_bufio_release()
2080 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty()
2083 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
2087 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_partial_buffer_dirty()
2089 if (!test_and_set_bit(B_DIRTY, &b->state)) { in dm_bufio_mark_partial_buffer_dirty()
2090 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
2091 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
2092 cache_mark(&c->cache, b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
2094 if (start < b->dirty_start) in dm_bufio_mark_partial_buffer_dirty()
2095 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
2096 if (end > b->dirty_end) in dm_bufio_mark_partial_buffer_dirty()
2097 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
2106 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
2126 * and simultaneously (so that the block layer can merge the writes) and then
2129 * Finally, we flush hardware disk cache.
2135 return test_bit(B_WRITING, &b->state); in is_writing()
2153 nr_buffers = cache_count(&c->cache, LIST_DIRTY); in dm_bufio_write_dirty_buffers()
2154 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); in dm_bufio_write_dirty_buffers()
2159 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
2162 nr_buffers--; in dm_bufio_write_dirty_buffers()
2164 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in dm_bufio_write_dirty_buffers()
2167 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in dm_bufio_write_dirty_buffers()
2170 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
2171 cache_mark(&c->cache, b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
2179 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
2182 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
2192 * Use dm-io to send an empty barrier to flush the device.
2200 .client = c->dm_io, in dm_bufio_issue_flush()
2203 .bdev = c->bdev, in dm_bufio_issue_flush()
2209 return -EINVAL; in dm_bufio_issue_flush()
2216 * Use dm-io to send a discard request to flush the device.
2218 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) in dm_bufio_issue_discard() argument
2224 .client = c->dm_io, in dm_bufio_issue_discard()
2227 .bdev = c->bdev, in dm_bufio_issue_discard()
2228 .sector = block_to_sector(c, block), in dm_bufio_issue_discard()
2233 return -EINVAL; /* discards are optional */ in dm_bufio_issue_discard()
2239 static void forget_buffer(struct dm_bufio_client *c, sector_t block) in forget_buffer() argument
2243 b = cache_get(&c->cache, block); in forget_buffer()
2245 if (likely(!smp_load_acquire(&b->state))) { in forget_buffer()
2246 if (cache_remove(&c->cache, b)) in forget_buffer()
2262 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
2265 forget_buffer(c, block); in dm_bufio_forget()
2272 return b->state ? ER_DONT_EVICT : ER_EVICT; in idle()
2275 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) in dm_bufio_forget_buffers() argument
2278 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); in dm_bufio_forget_buffers()
2285 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
2291 return c->block_size; in dm_bufio_get_block_size()
2297 sector_t s = bdev_nr_sectors(c->bdev); in dm_bufio_get_device_size()
2299 if (s >= c->start) in dm_bufio_get_device_size()
2300 s -= c->start; in dm_bufio_get_device_size()
2303 if (likely(c->sectors_per_block_bits >= 0)) in dm_bufio_get_device_size()
2304 s >>= c->sectors_per_block_bits; in dm_bufio_get_device_size()
2306 sector_div(s, c->block_size >> SECTOR_SHIFT); in dm_bufio_get_device_size()
2313 return c->dm_io; in dm_bufio_get_dm_io_client()
2319 return b->block; in dm_bufio_get_block_number()
2325 return b->data; in dm_bufio_get_block_data()
2337 return b->c; in dm_bufio_get_client()
2348 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); in warn_leak()
2350 stack_trace_print(b->stack_entries, b->stack_len, 1); in warn_leak()
2352 atomic_set(&b->hold_count, 0); in warn_leak()
2366 * An optimization so that the buffers are not written one-by-one. in drop_buffers()
2378 cache_iterate(&c->cache, i, warn_leak, &warned); in drop_buffers()
2387 WARN_ON(cache_count(&c->cache, i)); in drop_buffers()
2396 if (likely(c->sectors_per_block_bits >= 0)) in get_retain_buffers()
2397 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; in get_retain_buffers()
2399 retain_bytes /= c->block_size; in get_retain_buffers()
2410 unsigned long count = cache_total(&c->cache); in __scan()
2414 if (count - freed <= retain_target) in __scan()
2415 atomic_long_set(&c->need_shrink, 0); in __scan()
2416 if (!atomic_long_read(&c->need_shrink)) in __scan()
2419 b = cache_evict(&c->cache, l, in __scan()
2427 atomic_long_dec(&c->need_shrink); in __scan()
2452 c = shrink->private_data; in dm_bufio_shrink_scan()
2453 atomic_long_add(sc->nr_to_scan, &c->need_shrink); in dm_bufio_shrink_scan()
2454 queue_work(dm_bufio_wq, &c->shrink_work); in dm_bufio_shrink_scan()
2456 return sc->nr_to_scan; in dm_bufio_shrink_scan()
2461 struct dm_bufio_client *c = shrink->private_data; in dm_bufio_shrink_count()
2462 unsigned long count = cache_total(&c->cache); in dm_bufio_shrink_count()
2464 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); in dm_bufio_shrink_count()
2469 count -= retain_target; in dm_bufio_shrink_count()
2474 count -= queued_for_cleanup; in dm_bufio_shrink_count()
2494 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { in dm_bufio_client_create()
2495 DMERR("%s: block size not specified or is not multiple of 512b", __func__); in dm_bufio_client_create()
2496 r = -EINVAL; in dm_bufio_client_create()
2503 r = -ENOMEM; in dm_bufio_client_create()
2506 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); in dm_bufio_client_create()
2508 c->bdev = bdev; in dm_bufio_client_create()
2509 c->block_size = block_size; in dm_bufio_client_create()
2511 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
2513 c->sectors_per_block_bits = -1; in dm_bufio_client_create()
2515 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
2516 c->write_callback = write_callback; in dm_bufio_client_create()
2519 c->no_sleep = true; in dm_bufio_client_create()
2523 mutex_init(&c->lock); in dm_bufio_client_create()
2524 spin_lock_init(&c->spinlock); in dm_bufio_client_create()
2525 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
2526 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
2530 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
2531 c->async_write_error = 0; in dm_bufio_client_create()
2533 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
2534 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
2535 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
2542 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u", in dm_bufio_client_create()
2544 c->slab_cache = kmem_cache_create(slab_name, block_size, align, in dm_bufio_client_create()
2546 if (!c->slab_cache) { in dm_bufio_client_create()
2547 r = -ENOMEM; in dm_bufio_client_create()
2552 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u", in dm_bufio_client_create()
2555 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", in dm_bufio_client_create()
2557 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, in dm_bufio_client_create()
2559 if (!c->slab_buffer) { in dm_bufio_client_create()
2560 r = -ENOMEM; in dm_bufio_client_create()
2564 while (c->need_reserved_buffers) { in dm_bufio_client_create()
2568 r = -ENOMEM; in dm_bufio_client_create()
2574 INIT_WORK(&c->shrink_work, shrink_work); in dm_bufio_client_create()
2575 atomic_long_set(&c->need_shrink, 0); in dm_bufio_client_create()
2577 c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)", in dm_bufio_client_create()
2578 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); in dm_bufio_client_create()
2579 if (!c->shrinker) { in dm_bufio_client_create()
2580 r = -ENOMEM; in dm_bufio_client_create()
2584 c->shrinker->count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
2585 c->shrinker->scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
2586 c->shrinker->seeks = 1; in dm_bufio_client_create()
2587 c->shrinker->batch = 0; in dm_bufio_client_create()
2588 c->shrinker->private_data = c; in dm_bufio_client_create()
2590 shrinker_register(c->shrinker); in dm_bufio_client_create()
2594 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
2601 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
2602 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_create()
2604 list_del(&b->lru.list); in dm_bufio_client_create()
2607 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_create()
2608 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_create()
2609 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
2611 mutex_destroy(&c->lock); in dm_bufio_client_create()
2612 if (c->no_sleep) in dm_bufio_client_create()
2630 shrinker_free(c->shrinker); in dm_bufio_client_destroy()
2631 flush_work(&c->shrink_work); in dm_bufio_client_destroy()
2635 list_del(&c->client_list); in dm_bufio_client_destroy()
2636 dm_bufio_client_count--; in dm_bufio_client_destroy()
2641 WARN_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
2643 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
2644 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_destroy()
2646 list_del(&b->lru.list); in dm_bufio_client_destroy()
2651 if (cache_count(&c->cache, i)) in dm_bufio_client_destroy()
2652 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2655 WARN_ON(cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2657 cache_destroy(&c->cache); in dm_bufio_client_destroy()
2658 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_destroy()
2659 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_destroy()
2660 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
2661 mutex_destroy(&c->lock); in dm_bufio_client_destroy()
2662 if (c->no_sleep) in dm_bufio_client_destroy()
2671 flush_work(&c->shrink_work); in dm_bufio_client_reset()
2677 c->start = start; in dm_bufio_set_sector_offset()
2681 /*--------------------------------------------------------------*/
2695 return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); in older_than()
2722 if (!(params->gfp & __GFP_FS) || in select_for_evict()
2723 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { in select_for_evict()
2724 if (test_bit_acquire(B_READING, &b->state) || in select_for_evict()
2725 test_bit(B_WRITING, &b->state) || in select_for_evict()
2726 test_bit(B_DIRTY, &b->state)) in select_for_evict()
2730 return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP; in select_for_evict()
2742 b = cache_evict(&c->cache, list_mode, select_for_evict, params); in __evict_many()
2746 last_accessed = READ_ONCE(b->last_accessed); in __evict_many()
2747 if (time_after_eq(params->last_accessed, last_accessed)) in __evict_many()
2748 params->last_accessed = last_accessed; in __evict_many()
2775 count = cache_total(&c->cache); in evict_old_buffers()
2777 __evict_many(c, ¶ms, LIST_CLEAN, count - retain); in evict_old_buffers()
2805 /*--------------------------------------------------------------*/
2836 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) in __insert_client()
2838 h = h->next; in __insert_client()
2841 list_add_tail(&new_client->client_list, h); in __insert_client()
2864 c->oldest_buffer = params.last_accessed; in __evict_a_few()
2888 unsigned long threshold = dm_bufio_cache_size - in evict_old()
2907 *--------------------------------------------------------------
2909 *--------------------------------------------------------------
2926 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), in dm_bufio_init()
2945 return -ENOMEM; in dm_bufio_init()
2996 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
3020 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
3022 MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");