1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2009-2011 Red Hat, Inc.
4  *
5  * Author: Mikulas Patocka <mpatocka@redhat.com>
6  *
7  * This file is released under the GPL.
8  */
9 
10 #include <linux/dm-bufio.h>
11 
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/slab.h>
15 #include <linux/sched/mm.h>
16 #include <linux/jiffies.h>
17 #include <linux/vmalloc.h>
18 #include <linux/shrinker.h>
19 #include <linux/module.h>
20 #include <linux/rbtree.h>
21 #include <linux/stacktrace.h>
22 #include <linux/jump_label.h>
23 
24 #include "dm.h"
25 
26 #define DM_MSG_PREFIX "bufio"
27 
28 /*
29  * Memory management policy:
30  *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
31  *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
32  *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
33  *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
34  *	dirty buffers.
35  */
36 #define DM_BUFIO_MIN_BUFFERS		8
37 
38 #define DM_BUFIO_MEMORY_PERCENT		2
39 #define DM_BUFIO_VMALLOC_PERCENT	25
40 #define DM_BUFIO_WRITEBACK_RATIO	3
41 #define DM_BUFIO_LOW_WATERMARK_RATIO	16
42 
43 /*
44  * Check buffer ages in this interval (seconds)
45  */
46 #define DM_BUFIO_WORK_TIMER_SECS	30
47 
48 /*
49  * Free buffers when they are older than this (seconds)
50  */
51 #define DM_BUFIO_DEFAULT_AGE_SECS	300
52 
53 /*
54  * The nr of bytes of cached data to keep around.
55  */
56 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
57 
58 /*
59  * Align buffer writes to this boundary.
60  * Tests show that SSDs have the highest IOPS when using 4k writes.
61  */
62 #define DM_BUFIO_WRITE_ALIGN		4096
63 
64 /*
65  * dm_buffer->list_mode
66  */
67 #define LIST_CLEAN	0
68 #define LIST_DIRTY	1
69 #define LIST_SIZE	2
70 
71 #define SCAN_RESCHED_CYCLE	16
72 
73 /*--------------------------------------------------------------*/
74 
75 /*
76  * Rather than use an LRU list, we use a clock algorithm where entries
77  * are held in a circular list.  When an entry is 'hit' a reference bit
78  * is set.  The least recently used entry is approximated by running a
79  * cursor around the list selecting unreferenced entries. Referenced
80  * entries have their reference bit cleared as the cursor passes them.
81  */
82 struct lru_entry {
83 	struct list_head list;
84 	atomic_t referenced;
85 };
86 
87 struct lru_iter {
88 	struct lru *lru;
89 	struct list_head list;
90 	struct lru_entry *stop;
91 	struct lru_entry *e;
92 };
93 
94 struct lru {
95 	struct list_head *cursor;
96 	unsigned long count;
97 
98 	struct list_head iterators;
99 };
100 
101 /*--------------*/
102 
lru_init(struct lru * lru)103 static void lru_init(struct lru *lru)
104 {
105 	lru->cursor = NULL;
106 	lru->count = 0;
107 	INIT_LIST_HEAD(&lru->iterators);
108 }
109 
lru_destroy(struct lru * lru)110 static void lru_destroy(struct lru *lru)
111 {
112 	WARN_ON_ONCE(lru->cursor);
113 	WARN_ON_ONCE(!list_empty(&lru->iterators));
114 }
115 
116 /*
117  * Insert a new entry into the lru.
118  */
lru_insert(struct lru * lru,struct lru_entry * le)119 static void lru_insert(struct lru *lru, struct lru_entry *le)
120 {
121 	/*
122 	 * Don't be tempted to set to 1, makes the lru aspect
123 	 * perform poorly.
124 	 */
125 	atomic_set(&le->referenced, 0);
126 
127 	if (lru->cursor) {
128 		list_add_tail(&le->list, lru->cursor);
129 	} else {
130 		INIT_LIST_HEAD(&le->list);
131 		lru->cursor = &le->list;
132 	}
133 	lru->count++;
134 }
135 
136 /*--------------*/
137 
138 /*
139  * Convert a list_head pointer to an lru_entry pointer.
140  */
to_le(struct list_head * l)141 static inline struct lru_entry *to_le(struct list_head *l)
142 {
143 	return container_of(l, struct lru_entry, list);
144 }
145 
146 /*
147  * Initialize an lru_iter and add it to the list of cursors in the lru.
148  */
lru_iter_begin(struct lru * lru,struct lru_iter * it)149 static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
150 {
151 	it->lru = lru;
152 	it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
153 	it->e = lru->cursor ? to_le(lru->cursor) : NULL;
154 	list_add(&it->list, &lru->iterators);
155 }
156 
157 /*
158  * Remove an lru_iter from the list of cursors in the lru.
159  */
lru_iter_end(struct lru_iter * it)160 static inline void lru_iter_end(struct lru_iter *it)
161 {
162 	list_del(&it->list);
163 }
164 
165 /* Predicate function type to be used with lru_iter_next */
166 typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
167 
168 /*
169  * Advance the cursor to the next entry that passes the
170  * predicate, and return that entry.  Returns NULL if the
171  * iteration is complete.
172  */
lru_iter_next(struct lru_iter * it,iter_predicate pred,void * context)173 static struct lru_entry *lru_iter_next(struct lru_iter *it,
174 				       iter_predicate pred, void *context)
175 {
176 	struct lru_entry *e;
177 
178 	while (it->e) {
179 		e = it->e;
180 
181 		/* advance the cursor */
182 		if (it->e == it->stop)
183 			it->e = NULL;
184 		else
185 			it->e = to_le(it->e->list.next);
186 
187 		if (pred(e, context))
188 			return e;
189 	}
190 
191 	return NULL;
192 }
193 
194 /*
195  * Invalidate a specific lru_entry and update all cursors in
196  * the lru accordingly.
197  */
lru_iter_invalidate(struct lru * lru,struct lru_entry * e)198 static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
199 {
200 	struct lru_iter *it;
201 
202 	list_for_each_entry(it, &lru->iterators, list) {
203 		/* Move c->e forwards if necc. */
204 		if (it->e == e) {
205 			it->e = to_le(it->e->list.next);
206 			if (it->e == e)
207 				it->e = NULL;
208 		}
209 
210 		/* Move it->stop backwards if necc. */
211 		if (it->stop == e) {
212 			it->stop = to_le(it->stop->list.prev);
213 			if (it->stop == e)
214 				it->stop = NULL;
215 		}
216 	}
217 }
218 
219 /*--------------*/
220 
221 /*
222  * Remove a specific entry from the lru.
223  */
lru_remove(struct lru * lru,struct lru_entry * le)224 static void lru_remove(struct lru *lru, struct lru_entry *le)
225 {
226 	lru_iter_invalidate(lru, le);
227 	if (lru->count == 1) {
228 		lru->cursor = NULL;
229 	} else {
230 		if (lru->cursor == &le->list)
231 			lru->cursor = lru->cursor->next;
232 		list_del(&le->list);
233 	}
234 	lru->count--;
235 }
236 
237 /*
238  * Mark as referenced.
239  */
lru_reference(struct lru_entry * le)240 static inline void lru_reference(struct lru_entry *le)
241 {
242 	atomic_set(&le->referenced, 1);
243 }
244 
245 /*--------------*/
246 
247 /*
248  * Remove the least recently used entry (approx), that passes the predicate.
249  * Returns NULL on failure.
250  */
251 enum evict_result {
252 	ER_EVICT,
253 	ER_DONT_EVICT,
254 	ER_STOP, /* stop looking for something to evict */
255 };
256 
257 typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
258 
lru_evict(struct lru * lru,le_predicate pred,void * context,bool no_sleep)259 static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
260 {
261 	unsigned long tested = 0;
262 	struct list_head *h = lru->cursor;
263 	struct lru_entry *le;
264 
265 	if (!h)
266 		return NULL;
267 	/*
268 	 * In the worst case we have to loop around twice. Once to clear
269 	 * the reference flags, and then again to discover the predicate
270 	 * fails for all entries.
271 	 */
272 	while (tested < lru->count) {
273 		le = container_of(h, struct lru_entry, list);
274 
275 		if (atomic_read(&le->referenced)) {
276 			atomic_set(&le->referenced, 0);
277 		} else {
278 			tested++;
279 			switch (pred(le, context)) {
280 			case ER_EVICT:
281 				/*
282 				 * Adjust the cursor, so we start the next
283 				 * search from here.
284 				 */
285 				lru->cursor = le->list.next;
286 				lru_remove(lru, le);
287 				return le;
288 
289 			case ER_DONT_EVICT:
290 				break;
291 
292 			case ER_STOP:
293 				lru->cursor = le->list.next;
294 				return NULL;
295 			}
296 		}
297 
298 		h = h->next;
299 
300 		if (!no_sleep)
301 			cond_resched();
302 	}
303 
304 	return NULL;
305 }
306 
307 /*--------------------------------------------------------------*/
308 
309 /*
310  * Buffer state bits.
311  */
312 #define B_READING	0
313 #define B_WRITING	1
314 #define B_DIRTY		2
315 
316 /*
317  * Describes how the block was allocated:
318  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
319  * See the comment at alloc_buffer_data.
320  */
321 enum data_mode {
322 	DATA_MODE_SLAB = 0,
323 	DATA_MODE_KMALLOC = 1,
324 	DATA_MODE_GET_FREE_PAGES = 2,
325 	DATA_MODE_VMALLOC = 3,
326 	DATA_MODE_LIMIT = 4
327 };
328 
329 struct dm_buffer {
330 	/* protected by the locks in dm_buffer_cache */
331 	struct rb_node node;
332 
333 	/* immutable, so don't need protecting */
334 	sector_t block;
335 	void *data;
336 	unsigned char data_mode;		/* DATA_MODE_* */
337 
338 	/*
339 	 * These two fields are used in isolation, so do not need
340 	 * a surrounding lock.
341 	 */
342 	atomic_t hold_count;
343 	unsigned long last_accessed;
344 
345 	/*
346 	 * Everything else is protected by the mutex in
347 	 * dm_bufio_client
348 	 */
349 	unsigned long state;
350 	struct lru_entry lru;
351 	unsigned char list_mode;		/* LIST_* */
352 	blk_status_t read_error;
353 	blk_status_t write_error;
354 	unsigned int dirty_start;
355 	unsigned int dirty_end;
356 	unsigned int write_start;
357 	unsigned int write_end;
358 	struct list_head write_list;
359 	struct dm_bufio_client *c;
360 	void (*end_io)(struct dm_buffer *b, blk_status_t bs);
361 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
362 #define MAX_STACK 10
363 	unsigned int stack_len;
364 	unsigned long stack_entries[MAX_STACK];
365 #endif
366 };
367 
368 /*--------------------------------------------------------------*/
369 
370 /*
371  * The buffer cache manages buffers, particularly:
372  *  - inc/dec of holder count
373  *  - setting the last_accessed field
374  *  - maintains clean/dirty state along with lru
375  *  - selecting buffers that match predicates
376  *
377  * It does *not* handle:
378  *  - allocation/freeing of buffers.
379  *  - IO
380  *  - Eviction or cache sizing.
381  *
382  * cache_get() and cache_put() are threadsafe, you do not need to
383  * protect these calls with a surrounding mutex.  All the other
384  * methods are not threadsafe; they do use locking primitives, but
385  * only enough to ensure get/put are threadsafe.
386  */
387 
388 struct buffer_tree {
389 	union {
390 		struct rw_semaphore lock;
391 		rwlock_t spinlock;
392 	} u;
393 	struct rb_root root;
394 } ____cacheline_aligned_in_smp;
395 
396 struct dm_buffer_cache {
397 	struct lru lru[LIST_SIZE];
398 	/*
399 	 * We spread entries across multiple trees to reduce contention
400 	 * on the locks.
401 	 */
402 	unsigned int num_locks;
403 	bool no_sleep;
404 	struct buffer_tree trees[];
405 };
406 
407 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
408 
cache_index(sector_t block,unsigned int num_locks)409 static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
410 {
411 	return dm_hash_locks_index(block, num_locks);
412 }
413 
cache_read_lock(struct dm_buffer_cache * bc,sector_t block)414 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
415 {
416 	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
417 		read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
418 	else
419 		down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
420 }
421 
cache_read_unlock(struct dm_buffer_cache * bc,sector_t block)422 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
423 {
424 	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
425 		read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
426 	else
427 		up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
428 }
429 
cache_write_lock(struct dm_buffer_cache * bc,sector_t block)430 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
431 {
432 	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
433 		write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
434 	else
435 		down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
436 }
437 
cache_write_unlock(struct dm_buffer_cache * bc,sector_t block)438 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
439 {
440 	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
441 		write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
442 	else
443 		up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
444 }
445 
446 /*
447  * Sometimes we want to repeatedly get and drop locks as part of an iteration.
448  * This struct helps avoid redundant drop and gets of the same lock.
449  */
450 struct lock_history {
451 	struct dm_buffer_cache *cache;
452 	bool write;
453 	unsigned int previous;
454 	unsigned int no_previous;
455 };
456 
lh_init(struct lock_history * lh,struct dm_buffer_cache * cache,bool write)457 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
458 {
459 	lh->cache = cache;
460 	lh->write = write;
461 	lh->no_previous = cache->num_locks;
462 	lh->previous = lh->no_previous;
463 }
464 
__lh_lock(struct lock_history * lh,unsigned int index)465 static void __lh_lock(struct lock_history *lh, unsigned int index)
466 {
467 	if (lh->write) {
468 		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
469 			write_lock_bh(&lh->cache->trees[index].u.spinlock);
470 		else
471 			down_write(&lh->cache->trees[index].u.lock);
472 	} else {
473 		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
474 			read_lock_bh(&lh->cache->trees[index].u.spinlock);
475 		else
476 			down_read(&lh->cache->trees[index].u.lock);
477 	}
478 }
479 
__lh_unlock(struct lock_history * lh,unsigned int index)480 static void __lh_unlock(struct lock_history *lh, unsigned int index)
481 {
482 	if (lh->write) {
483 		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
484 			write_unlock_bh(&lh->cache->trees[index].u.spinlock);
485 		else
486 			up_write(&lh->cache->trees[index].u.lock);
487 	} else {
488 		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
489 			read_unlock_bh(&lh->cache->trees[index].u.spinlock);
490 		else
491 			up_read(&lh->cache->trees[index].u.lock);
492 	}
493 }
494 
495 /*
496  * Make sure you call this since it will unlock the final lock.
497  */
lh_exit(struct lock_history * lh)498 static void lh_exit(struct lock_history *lh)
499 {
500 	if (lh->previous != lh->no_previous) {
501 		__lh_unlock(lh, lh->previous);
502 		lh->previous = lh->no_previous;
503 	}
504 }
505 
506 /*
507  * Named 'next' because there is no corresponding
508  * 'up/unlock' call since it's done automatically.
509  */
lh_next(struct lock_history * lh,sector_t b)510 static void lh_next(struct lock_history *lh, sector_t b)
511 {
512 	unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
513 
514 	if (lh->previous != lh->no_previous) {
515 		if (lh->previous != index) {
516 			__lh_unlock(lh, lh->previous);
517 			__lh_lock(lh, index);
518 			lh->previous = index;
519 		}
520 	} else {
521 		__lh_lock(lh, index);
522 		lh->previous = index;
523 	}
524 }
525 
le_to_buffer(struct lru_entry * le)526 static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
527 {
528 	return container_of(le, struct dm_buffer, lru);
529 }
530 
list_to_buffer(struct list_head * l)531 static struct dm_buffer *list_to_buffer(struct list_head *l)
532 {
533 	struct lru_entry *le = list_entry(l, struct lru_entry, list);
534 
535 	return le_to_buffer(le);
536 }
537 
cache_init(struct dm_buffer_cache * bc,unsigned int num_locks,bool no_sleep)538 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
539 {
540 	unsigned int i;
541 
542 	bc->num_locks = num_locks;
543 	bc->no_sleep = no_sleep;
544 
545 	for (i = 0; i < bc->num_locks; i++) {
546 		if (no_sleep)
547 			rwlock_init(&bc->trees[i].u.spinlock);
548 		else
549 			init_rwsem(&bc->trees[i].u.lock);
550 		bc->trees[i].root = RB_ROOT;
551 	}
552 
553 	lru_init(&bc->lru[LIST_CLEAN]);
554 	lru_init(&bc->lru[LIST_DIRTY]);
555 }
556 
cache_destroy(struct dm_buffer_cache * bc)557 static void cache_destroy(struct dm_buffer_cache *bc)
558 {
559 	unsigned int i;
560 
561 	for (i = 0; i < bc->num_locks; i++)
562 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
563 
564 	lru_destroy(&bc->lru[LIST_CLEAN]);
565 	lru_destroy(&bc->lru[LIST_DIRTY]);
566 }
567 
568 /*--------------*/
569 
570 /*
571  * not threadsafe, or racey depending how you look at it
572  */
cache_count(struct dm_buffer_cache * bc,int list_mode)573 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
574 {
575 	return bc->lru[list_mode].count;
576 }
577 
cache_total(struct dm_buffer_cache * bc)578 static inline unsigned long cache_total(struct dm_buffer_cache *bc)
579 {
580 	return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
581 }
582 
583 /*--------------*/
584 
585 /*
586  * Gets a specific buffer, indexed by block.
587  * If the buffer is found then its holder count will be incremented and
588  * lru_reference will be called.
589  *
590  * threadsafe
591  */
__cache_get(const struct rb_root * root,sector_t block)592 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
593 {
594 	struct rb_node *n = root->rb_node;
595 	struct dm_buffer *b;
596 
597 	while (n) {
598 		b = container_of(n, struct dm_buffer, node);
599 
600 		if (b->block == block)
601 			return b;
602 
603 		n = block < b->block ? n->rb_left : n->rb_right;
604 	}
605 
606 	return NULL;
607 }
608 
__cache_inc_buffer(struct dm_buffer * b)609 static void __cache_inc_buffer(struct dm_buffer *b)
610 {
611 	atomic_inc(&b->hold_count);
612 	WRITE_ONCE(b->last_accessed, jiffies);
613 }
614 
cache_get(struct dm_buffer_cache * bc,sector_t block)615 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
616 {
617 	struct dm_buffer *b;
618 
619 	cache_read_lock(bc, block);
620 	b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
621 	if (b) {
622 		lru_reference(&b->lru);
623 		__cache_inc_buffer(b);
624 	}
625 	cache_read_unlock(bc, block);
626 
627 	return b;
628 }
629 
630 /*--------------*/
631 
632 /*
633  * Returns true if the hold count hits zero.
634  * threadsafe
635  */
cache_put(struct dm_buffer_cache * bc,struct dm_buffer * b)636 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
637 {
638 	bool r;
639 
640 	cache_read_lock(bc, b->block);
641 	BUG_ON(!atomic_read(&b->hold_count));
642 	r = atomic_dec_and_test(&b->hold_count);
643 	cache_read_unlock(bc, b->block);
644 
645 	return r;
646 }
647 
648 /*--------------*/
649 
650 typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
651 
652 /*
653  * Evicts a buffer based on a predicate.  The oldest buffer that
654  * matches the predicate will be selected.  In addition to the
655  * predicate the hold_count of the selected buffer will be zero.
656  */
657 struct evict_wrapper {
658 	struct lock_history *lh;
659 	b_predicate pred;
660 	void *context;
661 };
662 
663 /*
664  * Wraps the buffer predicate turning it into an lru predicate.  Adds
665  * extra test for hold_count.
666  */
__evict_pred(struct lru_entry * le,void * context)667 static enum evict_result __evict_pred(struct lru_entry *le, void *context)
668 {
669 	struct evict_wrapper *w = context;
670 	struct dm_buffer *b = le_to_buffer(le);
671 
672 	lh_next(w->lh, b->block);
673 
674 	if (atomic_read(&b->hold_count))
675 		return ER_DONT_EVICT;
676 
677 	return w->pred(b, w->context);
678 }
679 
__cache_evict(struct dm_buffer_cache * bc,int list_mode,b_predicate pred,void * context,struct lock_history * lh)680 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
681 				       b_predicate pred, void *context,
682 				       struct lock_history *lh)
683 {
684 	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
685 	struct lru_entry *le;
686 	struct dm_buffer *b;
687 
688 	le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
689 	if (!le)
690 		return NULL;
691 
692 	b = le_to_buffer(le);
693 	/* __evict_pred will have locked the appropriate tree. */
694 	rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
695 
696 	return b;
697 }
698 
cache_evict(struct dm_buffer_cache * bc,int list_mode,b_predicate pred,void * context)699 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
700 				     b_predicate pred, void *context)
701 {
702 	struct dm_buffer *b;
703 	struct lock_history lh;
704 
705 	lh_init(&lh, bc, true);
706 	b = __cache_evict(bc, list_mode, pred, context, &lh);
707 	lh_exit(&lh);
708 
709 	return b;
710 }
711 
712 /*--------------*/
713 
714 /*
715  * Mark a buffer as clean or dirty. Not threadsafe.
716  */
cache_mark(struct dm_buffer_cache * bc,struct dm_buffer * b,int list_mode)717 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
718 {
719 	cache_write_lock(bc, b->block);
720 	if (list_mode != b->list_mode) {
721 		lru_remove(&bc->lru[b->list_mode], &b->lru);
722 		b->list_mode = list_mode;
723 		lru_insert(&bc->lru[b->list_mode], &b->lru);
724 	}
725 	cache_write_unlock(bc, b->block);
726 }
727 
728 /*--------------*/
729 
730 /*
731  * Runs through the lru associated with 'old_mode', if the predicate matches then
732  * it moves them to 'new_mode'.  Not threadsafe.
733  */
__cache_mark_many(struct dm_buffer_cache * bc,int old_mode,int new_mode,b_predicate pred,void * context,struct lock_history * lh)734 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
735 			      b_predicate pred, void *context, struct lock_history *lh)
736 {
737 	struct lru_entry *le;
738 	struct dm_buffer *b;
739 	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
740 
741 	while (true) {
742 		le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
743 		if (!le)
744 			break;
745 
746 		b = le_to_buffer(le);
747 		b->list_mode = new_mode;
748 		lru_insert(&bc->lru[b->list_mode], &b->lru);
749 	}
750 }
751 
cache_mark_many(struct dm_buffer_cache * bc,int old_mode,int new_mode,b_predicate pred,void * context)752 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
753 			    b_predicate pred, void *context)
754 {
755 	struct lock_history lh;
756 
757 	lh_init(&lh, bc, true);
758 	__cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
759 	lh_exit(&lh);
760 }
761 
762 /*--------------*/
763 
764 /*
765  * Iterates through all clean or dirty entries calling a function for each
766  * entry.  The callback may terminate the iteration early.  Not threadsafe.
767  */
768 
769 /*
770  * Iterator functions should return one of these actions to indicate
771  * how the iteration should proceed.
772  */
773 enum it_action {
774 	IT_NEXT,
775 	IT_COMPLETE,
776 };
777 
778 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
779 
__cache_iterate(struct dm_buffer_cache * bc,int list_mode,iter_fn fn,void * context,struct lock_history * lh)780 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
781 			    iter_fn fn, void *context, struct lock_history *lh)
782 {
783 	struct lru *lru = &bc->lru[list_mode];
784 	struct lru_entry *le, *first;
785 
786 	if (!lru->cursor)
787 		return;
788 
789 	first = le = to_le(lru->cursor);
790 	do {
791 		struct dm_buffer *b = le_to_buffer(le);
792 
793 		lh_next(lh, b->block);
794 
795 		switch (fn(b, context)) {
796 		case IT_NEXT:
797 			break;
798 
799 		case IT_COMPLETE:
800 			return;
801 		}
802 		cond_resched();
803 
804 		le = to_le(le->list.next);
805 	} while (le != first);
806 }
807 
cache_iterate(struct dm_buffer_cache * bc,int list_mode,iter_fn fn,void * context)808 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
809 			  iter_fn fn, void *context)
810 {
811 	struct lock_history lh;
812 
813 	lh_init(&lh, bc, false);
814 	__cache_iterate(bc, list_mode, fn, context, &lh);
815 	lh_exit(&lh);
816 }
817 
818 /*--------------*/
819 
820 /*
821  * Passes ownership of the buffer to the cache. Returns false if the
822  * buffer was already present (in which case ownership does not pass).
823  * eg, a race with another thread.
824  *
825  * Holder count should be 1 on insertion.
826  *
827  * Not threadsafe.
828  */
__cache_insert(struct rb_root * root,struct dm_buffer * b)829 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
830 {
831 	struct rb_node **new = &root->rb_node, *parent = NULL;
832 	struct dm_buffer *found;
833 
834 	while (*new) {
835 		found = container_of(*new, struct dm_buffer, node);
836 
837 		if (found->block == b->block)
838 			return false;
839 
840 		parent = *new;
841 		new = b->block < found->block ?
842 			&found->node.rb_left : &found->node.rb_right;
843 	}
844 
845 	rb_link_node(&b->node, parent, new);
846 	rb_insert_color(&b->node, root);
847 
848 	return true;
849 }
850 
cache_insert(struct dm_buffer_cache * bc,struct dm_buffer * b)851 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
852 {
853 	bool r;
854 
855 	if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
856 		return false;
857 
858 	cache_write_lock(bc, b->block);
859 	BUG_ON(atomic_read(&b->hold_count) != 1);
860 	r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
861 	if (r)
862 		lru_insert(&bc->lru[b->list_mode], &b->lru);
863 	cache_write_unlock(bc, b->block);
864 
865 	return r;
866 }
867 
868 /*--------------*/
869 
870 /*
871  * Removes buffer from cache, ownership of the buffer passes back to the caller.
872  * Fails if the hold_count is not one (ie. the caller holds the only reference).
873  *
874  * Not threadsafe.
875  */
cache_remove(struct dm_buffer_cache * bc,struct dm_buffer * b)876 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
877 {
878 	bool r;
879 
880 	cache_write_lock(bc, b->block);
881 
882 	if (atomic_read(&b->hold_count) != 1) {
883 		r = false;
884 	} else {
885 		r = true;
886 		rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
887 		lru_remove(&bc->lru[b->list_mode], &b->lru);
888 	}
889 
890 	cache_write_unlock(bc, b->block);
891 
892 	return r;
893 }
894 
895 /*--------------*/
896 
897 typedef void (*b_release)(struct dm_buffer *);
898 
__find_next(struct rb_root * root,sector_t block)899 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
900 {
901 	struct rb_node *n = root->rb_node;
902 	struct dm_buffer *b;
903 	struct dm_buffer *best = NULL;
904 
905 	while (n) {
906 		b = container_of(n, struct dm_buffer, node);
907 
908 		if (b->block == block)
909 			return b;
910 
911 		if (block <= b->block) {
912 			n = n->rb_left;
913 			best = b;
914 		} else {
915 			n = n->rb_right;
916 		}
917 	}
918 
919 	return best;
920 }
921 
__remove_range(struct dm_buffer_cache * bc,struct rb_root * root,sector_t begin,sector_t end,b_predicate pred,b_release release)922 static void __remove_range(struct dm_buffer_cache *bc,
923 			   struct rb_root *root,
924 			   sector_t begin, sector_t end,
925 			   b_predicate pred, b_release release)
926 {
927 	struct dm_buffer *b;
928 
929 	while (true) {
930 		cond_resched();
931 
932 		b = __find_next(root, begin);
933 		if (!b || (b->block >= end))
934 			break;
935 
936 		begin = b->block + 1;
937 
938 		if (atomic_read(&b->hold_count))
939 			continue;
940 
941 		if (pred(b, NULL) == ER_EVICT) {
942 			rb_erase(&b->node, root);
943 			lru_remove(&bc->lru[b->list_mode], &b->lru);
944 			release(b);
945 		}
946 	}
947 }
948 
cache_remove_range(struct dm_buffer_cache * bc,sector_t begin,sector_t end,b_predicate pred,b_release release)949 static void cache_remove_range(struct dm_buffer_cache *bc,
950 			       sector_t begin, sector_t end,
951 			       b_predicate pred, b_release release)
952 {
953 	unsigned int i;
954 
955 	BUG_ON(bc->no_sleep);
956 	for (i = 0; i < bc->num_locks; i++) {
957 		down_write(&bc->trees[i].u.lock);
958 		__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
959 		up_write(&bc->trees[i].u.lock);
960 	}
961 }
962 
963 /*----------------------------------------------------------------*/
964 
965 /*
966  * Linking of buffers:
967  *	All buffers are linked to buffer_cache with their node field.
968  *
969  *	Clean buffers that are not being written (B_WRITING not set)
970  *	are linked to lru[LIST_CLEAN] with their lru_list field.
971  *
972  *	Dirty and clean buffers that are being written are linked to
973  *	lru[LIST_DIRTY] with their lru_list field. When the write
974  *	finishes, the buffer cannot be relinked immediately (because we
975  *	are in an interrupt context and relinking requires process
976  *	context), so some clean-not-writing buffers can be held on
977  *	dirty_lru too.  They are later added to lru in the process
978  *	context.
979  */
980 struct dm_bufio_client {
981 	struct block_device *bdev;
982 	unsigned int block_size;
983 	s8 sectors_per_block_bits;
984 
985 	bool no_sleep;
986 	struct mutex lock;
987 	spinlock_t spinlock;
988 
989 	int async_write_error;
990 
991 	void (*alloc_callback)(struct dm_buffer *buf);
992 	void (*write_callback)(struct dm_buffer *buf);
993 	struct kmem_cache *slab_buffer;
994 	struct kmem_cache *slab_cache;
995 	struct dm_io_client *dm_io;
996 
997 	struct list_head reserved_buffers;
998 	unsigned int need_reserved_buffers;
999 
1000 	unsigned int minimum_buffers;
1001 
1002 	sector_t start;
1003 
1004 	struct shrinker *shrinker;
1005 	struct work_struct shrink_work;
1006 	atomic_long_t need_shrink;
1007 
1008 	wait_queue_head_t free_buffer_wait;
1009 
1010 	struct list_head client_list;
1011 
1012 	/*
1013 	 * Used by global_cleanup to sort the clients list.
1014 	 */
1015 	unsigned long oldest_buffer;
1016 
1017 	struct dm_buffer_cache cache; /* must be last member */
1018 };
1019 
1020 /*----------------------------------------------------------------*/
1021 
1022 #define dm_bufio_in_request()	(!!current->bio_list)
1023 
dm_bufio_lock(struct dm_bufio_client * c)1024 static void dm_bufio_lock(struct dm_bufio_client *c)
1025 {
1026 	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1027 		spin_lock_bh(&c->spinlock);
1028 	else
1029 		mutex_lock_nested(&c->lock, dm_bufio_in_request());
1030 }
1031 
dm_bufio_unlock(struct dm_bufio_client * c)1032 static void dm_bufio_unlock(struct dm_bufio_client *c)
1033 {
1034 	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1035 		spin_unlock_bh(&c->spinlock);
1036 	else
1037 		mutex_unlock(&c->lock);
1038 }
1039 
1040 /*----------------------------------------------------------------*/
1041 
1042 /*
1043  * Default cache size: available memory divided by the ratio.
1044  */
1045 static unsigned long dm_bufio_default_cache_size;
1046 
1047 /*
1048  * Total cache size set by the user.
1049  */
1050 static unsigned long dm_bufio_cache_size;
1051 
1052 /*
1053  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1054  * at any time.  If it disagrees, the user has changed cache size.
1055  */
1056 static unsigned long dm_bufio_cache_size_latch;
1057 
1058 static DEFINE_SPINLOCK(global_spinlock);
1059 
1060 /*
1061  * Buffers are freed after this timeout
1062  */
1063 static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1064 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1065 
1066 static unsigned long dm_bufio_peak_allocated;
1067 static unsigned long dm_bufio_allocated_kmem_cache;
1068 static unsigned long dm_bufio_allocated_kmalloc;
1069 static unsigned long dm_bufio_allocated_get_free_pages;
1070 static unsigned long dm_bufio_allocated_vmalloc;
1071 static unsigned long dm_bufio_current_allocated;
1072 
1073 /*----------------------------------------------------------------*/
1074 
1075 /*
1076  * The current number of clients.
1077  */
1078 static int dm_bufio_client_count;
1079 
1080 /*
1081  * The list of all clients.
1082  */
1083 static LIST_HEAD(dm_bufio_all_clients);
1084 
1085 /*
1086  * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1087  */
1088 static DEFINE_MUTEX(dm_bufio_clients_lock);
1089 
1090 static struct workqueue_struct *dm_bufio_wq;
1091 static struct delayed_work dm_bufio_cleanup_old_work;
1092 static struct work_struct dm_bufio_replacement_work;
1093 
1094 
1095 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)1096 static void buffer_record_stack(struct dm_buffer *b)
1097 {
1098 	b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1099 }
1100 #endif
1101 
1102 /*----------------------------------------------------------------*/
1103 
adjust_total_allocated(struct dm_buffer * b,bool unlink)1104 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1105 {
1106 	unsigned char data_mode;
1107 	long diff;
1108 
1109 	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1110 		&dm_bufio_allocated_kmem_cache,
1111 		&dm_bufio_allocated_kmalloc,
1112 		&dm_bufio_allocated_get_free_pages,
1113 		&dm_bufio_allocated_vmalloc,
1114 	};
1115 
1116 	data_mode = b->data_mode;
1117 	diff = (long)b->c->block_size;
1118 	if (unlink)
1119 		diff = -diff;
1120 
1121 	spin_lock(&global_spinlock);
1122 
1123 	*class_ptr[data_mode] += diff;
1124 
1125 	dm_bufio_current_allocated += diff;
1126 
1127 	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1128 		dm_bufio_peak_allocated = dm_bufio_current_allocated;
1129 
1130 	if (!unlink) {
1131 		if (dm_bufio_current_allocated > dm_bufio_cache_size)
1132 			queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1133 	}
1134 
1135 	spin_unlock(&global_spinlock);
1136 }
1137 
1138 /*
1139  * Change the number of clients and recalculate per-client limit.
1140  */
__cache_size_refresh(void)1141 static void __cache_size_refresh(void)
1142 {
1143 	if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1144 		return;
1145 	if (WARN_ON(dm_bufio_client_count < 0))
1146 		return;
1147 
1148 	dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1149 
1150 	/*
1151 	 * Use default if set to 0 and report the actual cache size used.
1152 	 */
1153 	if (!dm_bufio_cache_size_latch) {
1154 		(void)cmpxchg(&dm_bufio_cache_size, 0,
1155 			      dm_bufio_default_cache_size);
1156 		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1157 	}
1158 }
1159 
1160 /*
1161  * Allocating buffer data.
1162  *
1163  * Small buffers are allocated with kmem_cache, to use space optimally.
1164  *
1165  * For large buffers, we choose between get_free_pages and vmalloc.
1166  * Each has advantages and disadvantages.
1167  *
1168  * __get_free_pages can randomly fail if the memory is fragmented.
1169  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1170  * as low as 128M) so using it for caching is not appropriate.
1171  *
1172  * If the allocation may fail we use __get_free_pages. Memory fragmentation
1173  * won't have a fatal effect here, but it just causes flushes of some other
1174  * buffers and more I/O will be performed. Don't use __get_free_pages if it
1175  * always fails (i.e. order > MAX_PAGE_ORDER).
1176  *
1177  * If the allocation shouldn't fail we use __vmalloc. This is only for the
1178  * initial reserve allocation, so there's no risk of wasting all vmalloc
1179  * space.
1180  */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)1181 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1182 			       unsigned char *data_mode)
1183 {
1184 	if (unlikely(c->slab_cache != NULL)) {
1185 		*data_mode = DATA_MODE_SLAB;
1186 		return kmem_cache_alloc(c->slab_cache, gfp_mask);
1187 	}
1188 
1189 	if (unlikely(c->block_size < PAGE_SIZE)) {
1190 		*data_mode = DATA_MODE_KMALLOC;
1191 		return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE);
1192 	}
1193 
1194 	if (c->block_size <= KMALLOC_MAX_SIZE &&
1195 	    gfp_mask & __GFP_NORETRY) {
1196 		*data_mode = DATA_MODE_GET_FREE_PAGES;
1197 		return (void *)__get_free_pages(gfp_mask,
1198 						c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1199 	}
1200 
1201 	*data_mode = DATA_MODE_VMALLOC;
1202 
1203 	return __vmalloc(c->block_size, gfp_mask);
1204 }
1205 
1206 /*
1207  * Free buffer's data.
1208  */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)1209 static void free_buffer_data(struct dm_bufio_client *c,
1210 			     void *data, unsigned char data_mode)
1211 {
1212 	switch (data_mode) {
1213 	case DATA_MODE_SLAB:
1214 		kmem_cache_free(c->slab_cache, data);
1215 		break;
1216 
1217 	case DATA_MODE_KMALLOC:
1218 		kfree(data);
1219 		break;
1220 
1221 	case DATA_MODE_GET_FREE_PAGES:
1222 		free_pages((unsigned long)data,
1223 			   c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1224 		break;
1225 
1226 	case DATA_MODE_VMALLOC:
1227 		vfree(data);
1228 		break;
1229 
1230 	default:
1231 		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1232 		       data_mode);
1233 		BUG();
1234 	}
1235 }
1236 
1237 /*
1238  * Allocate buffer and its data.
1239  */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)1240 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1241 {
1242 	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1243 
1244 	if (!b)
1245 		return NULL;
1246 
1247 	b->c = c;
1248 
1249 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1250 	if (!b->data) {
1251 		kmem_cache_free(c->slab_buffer, b);
1252 		return NULL;
1253 	}
1254 	adjust_total_allocated(b, false);
1255 
1256 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1257 	b->stack_len = 0;
1258 #endif
1259 	return b;
1260 }
1261 
1262 /*
1263  * Free buffer and its data.
1264  */
free_buffer(struct dm_buffer * b)1265 static void free_buffer(struct dm_buffer *b)
1266 {
1267 	struct dm_bufio_client *c = b->c;
1268 
1269 	adjust_total_allocated(b, true);
1270 	free_buffer_data(c, b->data, b->data_mode);
1271 	kmem_cache_free(c->slab_buffer, b);
1272 }
1273 
1274 /*
1275  *--------------------------------------------------------------------------
1276  * Submit I/O on the buffer.
1277  *
1278  * Bio interface is faster but it has some problems:
1279  *	the vector list is limited (increasing this limit increases
1280  *	memory-consumption per buffer, so it is not viable);
1281  *
1282  *	the memory must be direct-mapped, not vmalloced;
1283  *
1284  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1285  * it is not vmalloced, try using the bio interface.
1286  *
1287  * If the buffer is big, if it is vmalloced or if the underlying device
1288  * rejects the bio because it is too large, use dm-io layer to do the I/O.
1289  * The dm-io layer splits the I/O into multiple requests, avoiding the above
1290  * shortcomings.
1291  *--------------------------------------------------------------------------
1292  */
1293 
1294 /*
1295  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1296  * that the request was handled directly with bio interface.
1297  */
dmio_complete(unsigned long error,void * context)1298 static void dmio_complete(unsigned long error, void *context)
1299 {
1300 	struct dm_buffer *b = context;
1301 
1302 	b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1303 }
1304 
use_dmio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset,unsigned short ioprio)1305 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1306 		     unsigned int n_sectors, unsigned int offset,
1307 		     unsigned short ioprio)
1308 {
1309 	int r;
1310 	struct dm_io_request io_req = {
1311 		.bi_opf = op,
1312 		.notify.fn = dmio_complete,
1313 		.notify.context = b,
1314 		.client = b->c->dm_io,
1315 	};
1316 	struct dm_io_region region = {
1317 		.bdev = b->c->bdev,
1318 		.sector = sector,
1319 		.count = n_sectors,
1320 	};
1321 
1322 	if (b->data_mode != DATA_MODE_VMALLOC) {
1323 		io_req.mem.type = DM_IO_KMEM;
1324 		io_req.mem.ptr.addr = (char *)b->data + offset;
1325 	} else {
1326 		io_req.mem.type = DM_IO_VMA;
1327 		io_req.mem.ptr.vma = (char *)b->data + offset;
1328 	}
1329 
1330 	r = dm_io(&io_req, 1, &region, NULL, ioprio);
1331 	if (unlikely(r))
1332 		b->end_io(b, errno_to_blk_status(r));
1333 }
1334 
bio_complete(struct bio * bio)1335 static void bio_complete(struct bio *bio)
1336 {
1337 	struct dm_buffer *b = bio->bi_private;
1338 	blk_status_t status = bio->bi_status;
1339 
1340 	bio_uninit(bio);
1341 	kfree(bio);
1342 	b->end_io(b, status);
1343 }
1344 
use_bio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset,unsigned short ioprio)1345 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1346 		    unsigned int n_sectors, unsigned int offset,
1347 		    unsigned short ioprio)
1348 {
1349 	struct bio *bio;
1350 	char *ptr;
1351 	unsigned int len;
1352 
1353 	bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1354 	if (!bio) {
1355 		use_dmio(b, op, sector, n_sectors, offset, ioprio);
1356 		return;
1357 	}
1358 	bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1359 	bio->bi_iter.bi_sector = sector;
1360 	bio->bi_end_io = bio_complete;
1361 	bio->bi_private = b;
1362 	bio->bi_ioprio = ioprio;
1363 
1364 	ptr = (char *)b->data + offset;
1365 	len = n_sectors << SECTOR_SHIFT;
1366 
1367 	__bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1368 
1369 	submit_bio(bio);
1370 }
1371 
block_to_sector(struct dm_bufio_client * c,sector_t block)1372 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1373 {
1374 	sector_t sector;
1375 
1376 	if (likely(c->sectors_per_block_bits >= 0))
1377 		sector = block << c->sectors_per_block_bits;
1378 	else
1379 		sector = block * (c->block_size >> SECTOR_SHIFT);
1380 	sector += c->start;
1381 
1382 	return sector;
1383 }
1384 
submit_io(struct dm_buffer * b,enum req_op op,unsigned short ioprio,void (* end_io)(struct dm_buffer *,blk_status_t))1385 static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
1386 		      void (*end_io)(struct dm_buffer *, blk_status_t))
1387 {
1388 	unsigned int n_sectors;
1389 	sector_t sector;
1390 	unsigned int offset, end;
1391 
1392 	b->end_io = end_io;
1393 
1394 	sector = block_to_sector(b->c, b->block);
1395 
1396 	if (op != REQ_OP_WRITE) {
1397 		n_sectors = b->c->block_size >> SECTOR_SHIFT;
1398 		offset = 0;
1399 	} else {
1400 		if (b->c->write_callback)
1401 			b->c->write_callback(b);
1402 		offset = b->write_start;
1403 		end = b->write_end;
1404 		offset &= -DM_BUFIO_WRITE_ALIGN;
1405 		end += DM_BUFIO_WRITE_ALIGN - 1;
1406 		end &= -DM_BUFIO_WRITE_ALIGN;
1407 		if (unlikely(end > b->c->block_size))
1408 			end = b->c->block_size;
1409 
1410 		sector += offset >> SECTOR_SHIFT;
1411 		n_sectors = (end - offset) >> SECTOR_SHIFT;
1412 	}
1413 
1414 	if (b->data_mode != DATA_MODE_VMALLOC)
1415 		use_bio(b, op, sector, n_sectors, offset, ioprio);
1416 	else
1417 		use_dmio(b, op, sector, n_sectors, offset, ioprio);
1418 }
1419 
1420 /*
1421  *--------------------------------------------------------------
1422  * Writing dirty buffers
1423  *--------------------------------------------------------------
1424  */
1425 
1426 /*
1427  * The endio routine for write.
1428  *
1429  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1430  * it.
1431  */
write_endio(struct dm_buffer * b,blk_status_t status)1432 static void write_endio(struct dm_buffer *b, blk_status_t status)
1433 {
1434 	b->write_error = status;
1435 	if (unlikely(status)) {
1436 		struct dm_bufio_client *c = b->c;
1437 
1438 		(void)cmpxchg(&c->async_write_error, 0,
1439 				blk_status_to_errno(status));
1440 	}
1441 
1442 	BUG_ON(!test_bit(B_WRITING, &b->state));
1443 
1444 	smp_mb__before_atomic();
1445 	clear_bit(B_WRITING, &b->state);
1446 	smp_mb__after_atomic();
1447 
1448 	wake_up_bit(&b->state, B_WRITING);
1449 }
1450 
1451 /*
1452  * Initiate a write on a dirty buffer, but don't wait for it.
1453  *
1454  * - If the buffer is not dirty, exit.
1455  * - If there some previous write going on, wait for it to finish (we can't
1456  *   have two writes on the same buffer simultaneously).
1457  * - Submit our write and don't wait on it. We set B_WRITING indicating
1458  *   that there is a write in progress.
1459  */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)1460 static void __write_dirty_buffer(struct dm_buffer *b,
1461 				 struct list_head *write_list)
1462 {
1463 	if (!test_bit(B_DIRTY, &b->state))
1464 		return;
1465 
1466 	clear_bit(B_DIRTY, &b->state);
1467 	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1468 
1469 	b->write_start = b->dirty_start;
1470 	b->write_end = b->dirty_end;
1471 
1472 	if (!write_list)
1473 		submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1474 	else
1475 		list_add_tail(&b->write_list, write_list);
1476 }
1477 
__flush_write_list(struct list_head * write_list)1478 static void __flush_write_list(struct list_head *write_list)
1479 {
1480 	struct blk_plug plug;
1481 
1482 	blk_start_plug(&plug);
1483 	while (!list_empty(write_list)) {
1484 		struct dm_buffer *b =
1485 			list_entry(write_list->next, struct dm_buffer, write_list);
1486 		list_del(&b->write_list);
1487 		submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1488 		cond_resched();
1489 	}
1490 	blk_finish_plug(&plug);
1491 }
1492 
1493 /*
1494  * Wait until any activity on the buffer finishes.  Possibly write the
1495  * buffer if it is dirty.  When this function finishes, there is no I/O
1496  * running on the buffer and the buffer is not dirty.
1497  */
__make_buffer_clean(struct dm_buffer * b)1498 static void __make_buffer_clean(struct dm_buffer *b)
1499 {
1500 	BUG_ON(atomic_read(&b->hold_count));
1501 
1502 	/* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1503 	if (!smp_load_acquire(&b->state))	/* fast case */
1504 		return;
1505 
1506 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1507 	__write_dirty_buffer(b, NULL);
1508 	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1509 }
1510 
is_clean(struct dm_buffer * b,void * context)1511 static enum evict_result is_clean(struct dm_buffer *b, void *context)
1512 {
1513 	struct dm_bufio_client *c = context;
1514 
1515 	/* These should never happen */
1516 	if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1517 		return ER_DONT_EVICT;
1518 	if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1519 		return ER_DONT_EVICT;
1520 	if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1521 		return ER_DONT_EVICT;
1522 
1523 	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1524 	    unlikely(test_bit(B_READING, &b->state)))
1525 		return ER_DONT_EVICT;
1526 
1527 	return ER_EVICT;
1528 }
1529 
is_dirty(struct dm_buffer * b,void * context)1530 static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1531 {
1532 	/* These should never happen */
1533 	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1534 		return ER_DONT_EVICT;
1535 	if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1536 		return ER_DONT_EVICT;
1537 
1538 	return ER_EVICT;
1539 }
1540 
1541 /*
1542  * Find some buffer that is not held by anybody, clean it, unlink it and
1543  * return it.
1544  */
__get_unclaimed_buffer(struct dm_bufio_client * c)1545 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1546 {
1547 	struct dm_buffer *b;
1548 
1549 	b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1550 	if (b) {
1551 		/* this also waits for pending reads */
1552 		__make_buffer_clean(b);
1553 		return b;
1554 	}
1555 
1556 	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1557 		return NULL;
1558 
1559 	b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1560 	if (b) {
1561 		__make_buffer_clean(b);
1562 		return b;
1563 	}
1564 
1565 	return NULL;
1566 }
1567 
1568 /*
1569  * Wait until some other threads free some buffer or release hold count on
1570  * some buffer.
1571  *
1572  * This function is entered with c->lock held, drops it and regains it
1573  * before exiting.
1574  */
__wait_for_free_buffer(struct dm_bufio_client * c)1575 static void __wait_for_free_buffer(struct dm_bufio_client *c)
1576 {
1577 	DECLARE_WAITQUEUE(wait, current);
1578 
1579 	add_wait_queue(&c->free_buffer_wait, &wait);
1580 	set_current_state(TASK_UNINTERRUPTIBLE);
1581 	dm_bufio_unlock(c);
1582 
1583 	/*
1584 	 * It's possible to miss a wake up event since we don't always
1585 	 * hold c->lock when wake_up is called.  So we have a timeout here,
1586 	 * just in case.
1587 	 */
1588 	io_schedule_timeout(5 * HZ);
1589 
1590 	remove_wait_queue(&c->free_buffer_wait, &wait);
1591 
1592 	dm_bufio_lock(c);
1593 }
1594 
1595 enum new_flag {
1596 	NF_FRESH = 0,
1597 	NF_READ = 1,
1598 	NF_GET = 2,
1599 	NF_PREFETCH = 3
1600 };
1601 
1602 /*
1603  * Allocate a new buffer. If the allocation is not possible, wait until
1604  * some other thread frees a buffer.
1605  *
1606  * May drop the lock and regain it.
1607  */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)1608 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1609 {
1610 	struct dm_buffer *b;
1611 	bool tried_noio_alloc = false;
1612 
1613 	/*
1614 	 * dm-bufio is resistant to allocation failures (it just keeps
1615 	 * one buffer reserved in cases all the allocations fail).
1616 	 * So set flags to not try too hard:
1617 	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1618 	 *		    mutex and wait ourselves.
1619 	 *	__GFP_NORETRY: don't retry and rather return failure
1620 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
1621 	 *	__GFP_NOWARN: don't print a warning in case of failure
1622 	 *
1623 	 * For debugging, if we set the cache size to 1, no new buffers will
1624 	 * be allocated.
1625 	 */
1626 	while (1) {
1627 		if (dm_bufio_cache_size_latch != 1) {
1628 			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1629 			if (b)
1630 				return b;
1631 		}
1632 
1633 		if (nf == NF_PREFETCH)
1634 			return NULL;
1635 
1636 		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1637 			dm_bufio_unlock(c);
1638 			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1639 			dm_bufio_lock(c);
1640 			if (b)
1641 				return b;
1642 			tried_noio_alloc = true;
1643 		}
1644 
1645 		if (!list_empty(&c->reserved_buffers)) {
1646 			b = list_to_buffer(c->reserved_buffers.next);
1647 			list_del(&b->lru.list);
1648 			c->need_reserved_buffers++;
1649 
1650 			return b;
1651 		}
1652 
1653 		b = __get_unclaimed_buffer(c);
1654 		if (b)
1655 			return b;
1656 
1657 		__wait_for_free_buffer(c);
1658 	}
1659 }
1660 
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)1661 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1662 {
1663 	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1664 
1665 	if (!b)
1666 		return NULL;
1667 
1668 	if (c->alloc_callback)
1669 		c->alloc_callback(b);
1670 
1671 	return b;
1672 }
1673 
1674 /*
1675  * Free a buffer and wake other threads waiting for free buffers.
1676  */
__free_buffer_wake(struct dm_buffer * b)1677 static void __free_buffer_wake(struct dm_buffer *b)
1678 {
1679 	struct dm_bufio_client *c = b->c;
1680 
1681 	b->block = -1;
1682 	if (!c->need_reserved_buffers)
1683 		free_buffer(b);
1684 	else {
1685 		list_add(&b->lru.list, &c->reserved_buffers);
1686 		c->need_reserved_buffers--;
1687 	}
1688 
1689 	/*
1690 	 * We hold the bufio lock here, so no one can add entries to the
1691 	 * wait queue anyway.
1692 	 */
1693 	if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1694 		wake_up(&c->free_buffer_wait);
1695 }
1696 
cleaned(struct dm_buffer * b,void * context)1697 static enum evict_result cleaned(struct dm_buffer *b, void *context)
1698 {
1699 	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1700 		return ER_DONT_EVICT; /* should never happen */
1701 
1702 	if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1703 		return ER_DONT_EVICT;
1704 	else
1705 		return ER_EVICT;
1706 }
1707 
__move_clean_buffers(struct dm_bufio_client * c)1708 static void __move_clean_buffers(struct dm_bufio_client *c)
1709 {
1710 	cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1711 }
1712 
1713 struct write_context {
1714 	int no_wait;
1715 	struct list_head *write_list;
1716 };
1717 
write_one(struct dm_buffer * b,void * context)1718 static enum it_action write_one(struct dm_buffer *b, void *context)
1719 {
1720 	struct write_context *wc = context;
1721 
1722 	if (wc->no_wait && test_bit(B_WRITING, &b->state))
1723 		return IT_COMPLETE;
1724 
1725 	__write_dirty_buffer(b, wc->write_list);
1726 	return IT_NEXT;
1727 }
1728 
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)1729 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1730 					struct list_head *write_list)
1731 {
1732 	struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1733 
1734 	__move_clean_buffers(c);
1735 	cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1736 }
1737 
1738 /*
1739  * Check if we're over watermark.
1740  * If we are over threshold_buffers, start freeing buffers.
1741  * If we're over "limit_buffers", block until we get under the limit.
1742  */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)1743 static void __check_watermark(struct dm_bufio_client *c,
1744 			      struct list_head *write_list)
1745 {
1746 	if (cache_count(&c->cache, LIST_DIRTY) >
1747 	    cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1748 		__write_dirty_buffers_async(c, 1, write_list);
1749 }
1750 
1751 /*
1752  *--------------------------------------------------------------
1753  * Getting a buffer
1754  *--------------------------------------------------------------
1755  */
1756 
cache_put_and_wake(struct dm_bufio_client * c,struct dm_buffer * b)1757 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1758 {
1759 	/*
1760 	 * Relying on waitqueue_active() is racey, but we sleep
1761 	 * with schedule_timeout anyway.
1762 	 */
1763 	if (cache_put(&c->cache, b) &&
1764 	    unlikely(waitqueue_active(&c->free_buffer_wait)))
1765 		wake_up(&c->free_buffer_wait);
1766 }
1767 
1768 /*
1769  * This assumes you have already checked the cache to see if the buffer
1770  * is already present (it will recheck after dropping the lock for allocation).
1771  */
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)1772 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1773 				     enum new_flag nf, int *need_submit,
1774 				     struct list_head *write_list)
1775 {
1776 	struct dm_buffer *b, *new_b = NULL;
1777 
1778 	*need_submit = 0;
1779 
1780 	/* This can't be called with NF_GET */
1781 	if (WARN_ON_ONCE(nf == NF_GET))
1782 		return NULL;
1783 
1784 	new_b = __alloc_buffer_wait(c, nf);
1785 	if (!new_b)
1786 		return NULL;
1787 
1788 	/*
1789 	 * We've had a period where the mutex was unlocked, so need to
1790 	 * recheck the buffer tree.
1791 	 */
1792 	b = cache_get(&c->cache, block);
1793 	if (b) {
1794 		__free_buffer_wake(new_b);
1795 		goto found_buffer;
1796 	}
1797 
1798 	__check_watermark(c, write_list);
1799 
1800 	b = new_b;
1801 	atomic_set(&b->hold_count, 1);
1802 	WRITE_ONCE(b->last_accessed, jiffies);
1803 	b->block = block;
1804 	b->read_error = 0;
1805 	b->write_error = 0;
1806 	b->list_mode = LIST_CLEAN;
1807 
1808 	if (nf == NF_FRESH)
1809 		b->state = 0;
1810 	else {
1811 		b->state = 1 << B_READING;
1812 		*need_submit = 1;
1813 	}
1814 
1815 	/*
1816 	 * We mustn't insert into the cache until the B_READING state
1817 	 * is set.  Otherwise another thread could get it and use
1818 	 * it before it had been read.
1819 	 */
1820 	cache_insert(&c->cache, b);
1821 
1822 	return b;
1823 
1824 found_buffer:
1825 	if (nf == NF_PREFETCH) {
1826 		cache_put_and_wake(c, b);
1827 		return NULL;
1828 	}
1829 
1830 	/*
1831 	 * Note: it is essential that we don't wait for the buffer to be
1832 	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1833 	 * dm_bufio_prefetch can be used in the driver request routine.
1834 	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1835 	 * the same buffer, it would deadlock if we waited.
1836 	 */
1837 	if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1838 		cache_put_and_wake(c, b);
1839 		return NULL;
1840 	}
1841 
1842 	return b;
1843 }
1844 
1845 /*
1846  * The endio routine for reading: set the error, clear the bit and wake up
1847  * anyone waiting on the buffer.
1848  */
read_endio(struct dm_buffer * b,blk_status_t status)1849 static void read_endio(struct dm_buffer *b, blk_status_t status)
1850 {
1851 	b->read_error = status;
1852 
1853 	BUG_ON(!test_bit(B_READING, &b->state));
1854 
1855 	smp_mb__before_atomic();
1856 	clear_bit(B_READING, &b->state);
1857 	smp_mb__after_atomic();
1858 
1859 	wake_up_bit(&b->state, B_READING);
1860 }
1861 
1862 /*
1863  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1864  * functions is similar except that dm_bufio_new doesn't read the
1865  * buffer from the disk (assuming that the caller overwrites all the data
1866  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1867  */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp,unsigned short ioprio)1868 static void *new_read(struct dm_bufio_client *c, sector_t block,
1869 		      enum new_flag nf, struct dm_buffer **bp,
1870 		      unsigned short ioprio)
1871 {
1872 	int need_submit = 0;
1873 	struct dm_buffer *b;
1874 
1875 	LIST_HEAD(write_list);
1876 
1877 	*bp = NULL;
1878 
1879 	/*
1880 	 * Fast path, hopefully the block is already in the cache.  No need
1881 	 * to get the client lock for this.
1882 	 */
1883 	b = cache_get(&c->cache, block);
1884 	if (b) {
1885 		if (nf == NF_PREFETCH) {
1886 			cache_put_and_wake(c, b);
1887 			return NULL;
1888 		}
1889 
1890 		/*
1891 		 * Note: it is essential that we don't wait for the buffer to be
1892 		 * read if dm_bufio_get function is used. Both dm_bufio_get and
1893 		 * dm_bufio_prefetch can be used in the driver request routine.
1894 		 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1895 		 * the same buffer, it would deadlock if we waited.
1896 		 */
1897 		if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1898 			cache_put_and_wake(c, b);
1899 			return NULL;
1900 		}
1901 	}
1902 
1903 	if (!b) {
1904 		if (nf == NF_GET)
1905 			return NULL;
1906 
1907 		dm_bufio_lock(c);
1908 		b = __bufio_new(c, block, nf, &need_submit, &write_list);
1909 		dm_bufio_unlock(c);
1910 	}
1911 
1912 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1913 	if (b && (atomic_read(&b->hold_count) == 1))
1914 		buffer_record_stack(b);
1915 #endif
1916 
1917 	__flush_write_list(&write_list);
1918 
1919 	if (!b)
1920 		return NULL;
1921 
1922 	if (need_submit)
1923 		submit_io(b, REQ_OP_READ, ioprio, read_endio);
1924 
1925 	if (nf != NF_GET)	/* we already tested this condition above */
1926 		wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1927 
1928 	if (b->read_error) {
1929 		int error = blk_status_to_errno(b->read_error);
1930 
1931 		dm_bufio_release(b);
1932 
1933 		return ERR_PTR(error);
1934 	}
1935 
1936 	*bp = b;
1937 
1938 	return b->data;
1939 }
1940 
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1941 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1942 		   struct dm_buffer **bp)
1943 {
1944 	return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
1945 }
1946 EXPORT_SYMBOL_GPL(dm_bufio_get);
1947 
__dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp,unsigned short ioprio)1948 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1949 			struct dm_buffer **bp, unsigned short ioprio)
1950 {
1951 	if (WARN_ON_ONCE(dm_bufio_in_request()))
1952 		return ERR_PTR(-EINVAL);
1953 
1954 	return new_read(c, block, NF_READ, bp, ioprio);
1955 }
1956 
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1957 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1958 		    struct dm_buffer **bp)
1959 {
1960 	return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
1961 }
1962 EXPORT_SYMBOL_GPL(dm_bufio_read);
1963 
dm_bufio_read_with_ioprio(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp,unsigned short ioprio)1964 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
1965 				struct dm_buffer **bp, unsigned short ioprio)
1966 {
1967 	return __dm_bufio_read(c, block, bp, ioprio);
1968 }
1969 EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
1970 
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1971 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1972 		   struct dm_buffer **bp)
1973 {
1974 	if (WARN_ON_ONCE(dm_bufio_in_request()))
1975 		return ERR_PTR(-EINVAL);
1976 
1977 	return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
1978 }
1979 EXPORT_SYMBOL_GPL(dm_bufio_new);
1980 
__dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks,unsigned short ioprio)1981 static void __dm_bufio_prefetch(struct dm_bufio_client *c,
1982 			sector_t block, unsigned int n_blocks,
1983 			unsigned short ioprio)
1984 {
1985 	struct blk_plug plug;
1986 
1987 	LIST_HEAD(write_list);
1988 
1989 	if (WARN_ON_ONCE(dm_bufio_in_request()))
1990 		return; /* should never happen */
1991 
1992 	blk_start_plug(&plug);
1993 
1994 	for (; n_blocks--; block++) {
1995 		int need_submit;
1996 		struct dm_buffer *b;
1997 
1998 		b = cache_get(&c->cache, block);
1999 		if (b) {
2000 			/* already in cache */
2001 			cache_put_and_wake(c, b);
2002 			continue;
2003 		}
2004 
2005 		dm_bufio_lock(c);
2006 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
2007 				&write_list);
2008 		if (unlikely(!list_empty(&write_list))) {
2009 			dm_bufio_unlock(c);
2010 			blk_finish_plug(&plug);
2011 			__flush_write_list(&write_list);
2012 			blk_start_plug(&plug);
2013 			dm_bufio_lock(c);
2014 		}
2015 		if (unlikely(b != NULL)) {
2016 			dm_bufio_unlock(c);
2017 
2018 			if (need_submit)
2019 				submit_io(b, REQ_OP_READ, ioprio, read_endio);
2020 			dm_bufio_release(b);
2021 
2022 			cond_resched();
2023 
2024 			if (!n_blocks)
2025 				goto flush_plug;
2026 			dm_bufio_lock(c);
2027 		}
2028 		dm_bufio_unlock(c);
2029 	}
2030 
2031 flush_plug:
2032 	blk_finish_plug(&plug);
2033 }
2034 
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks)2035 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
2036 {
2037 	return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
2038 }
2039 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2040 
dm_bufio_prefetch_with_ioprio(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks,unsigned short ioprio)2041 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
2042 				unsigned int n_blocks, unsigned short ioprio)
2043 {
2044 	return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
2045 }
2046 EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
2047 
dm_bufio_release(struct dm_buffer * b)2048 void dm_bufio_release(struct dm_buffer *b)
2049 {
2050 	struct dm_bufio_client *c = b->c;
2051 
2052 	/*
2053 	 * If there were errors on the buffer, and the buffer is not
2054 	 * to be written, free the buffer. There is no point in caching
2055 	 * invalid buffer.
2056 	 */
2057 	if ((b->read_error || b->write_error) &&
2058 	    !test_bit_acquire(B_READING, &b->state) &&
2059 	    !test_bit(B_WRITING, &b->state) &&
2060 	    !test_bit(B_DIRTY, &b->state)) {
2061 		dm_bufio_lock(c);
2062 
2063 		/* cache remove can fail if there are other holders */
2064 		if (cache_remove(&c->cache, b)) {
2065 			__free_buffer_wake(b);
2066 			dm_bufio_unlock(c);
2067 			return;
2068 		}
2069 
2070 		dm_bufio_unlock(c);
2071 	}
2072 
2073 	cache_put_and_wake(c, b);
2074 }
2075 EXPORT_SYMBOL_GPL(dm_bufio_release);
2076 
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned int start,unsigned int end)2077 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2078 					unsigned int start, unsigned int end)
2079 {
2080 	struct dm_bufio_client *c = b->c;
2081 
2082 	BUG_ON(start >= end);
2083 	BUG_ON(end > b->c->block_size);
2084 
2085 	dm_bufio_lock(c);
2086 
2087 	BUG_ON(test_bit(B_READING, &b->state));
2088 
2089 	if (!test_and_set_bit(B_DIRTY, &b->state)) {
2090 		b->dirty_start = start;
2091 		b->dirty_end = end;
2092 		cache_mark(&c->cache, b, LIST_DIRTY);
2093 	} else {
2094 		if (start < b->dirty_start)
2095 			b->dirty_start = start;
2096 		if (end > b->dirty_end)
2097 			b->dirty_end = end;
2098 	}
2099 
2100 	dm_bufio_unlock(c);
2101 }
2102 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2103 
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)2104 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2105 {
2106 	dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2107 }
2108 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2109 
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)2110 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2111 {
2112 	LIST_HEAD(write_list);
2113 
2114 	if (WARN_ON_ONCE(dm_bufio_in_request()))
2115 		return; /* should never happen */
2116 
2117 	dm_bufio_lock(c);
2118 	__write_dirty_buffers_async(c, 0, &write_list);
2119 	dm_bufio_unlock(c);
2120 	__flush_write_list(&write_list);
2121 }
2122 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2123 
2124 /*
2125  * For performance, it is essential that the buffers are written asynchronously
2126  * and simultaneously (so that the block layer can merge the writes) and then
2127  * waited upon.
2128  *
2129  * Finally, we flush hardware disk cache.
2130  */
is_writing(struct lru_entry * e,void * context)2131 static bool is_writing(struct lru_entry *e, void *context)
2132 {
2133 	struct dm_buffer *b = le_to_buffer(e);
2134 
2135 	return test_bit(B_WRITING, &b->state);
2136 }
2137 
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)2138 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2139 {
2140 	int a, f;
2141 	unsigned long nr_buffers;
2142 	struct lru_entry *e;
2143 	struct lru_iter it;
2144 
2145 	LIST_HEAD(write_list);
2146 
2147 	dm_bufio_lock(c);
2148 	__write_dirty_buffers_async(c, 0, &write_list);
2149 	dm_bufio_unlock(c);
2150 	__flush_write_list(&write_list);
2151 	dm_bufio_lock(c);
2152 
2153 	nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2154 	lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2155 	while ((e = lru_iter_next(&it, is_writing, c))) {
2156 		struct dm_buffer *b = le_to_buffer(e);
2157 		__cache_inc_buffer(b);
2158 
2159 		BUG_ON(test_bit(B_READING, &b->state));
2160 
2161 		if (nr_buffers) {
2162 			nr_buffers--;
2163 			dm_bufio_unlock(c);
2164 			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2165 			dm_bufio_lock(c);
2166 		} else {
2167 			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2168 		}
2169 
2170 		if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2171 			cache_mark(&c->cache, b, LIST_CLEAN);
2172 
2173 		cache_put_and_wake(c, b);
2174 
2175 		cond_resched();
2176 	}
2177 	lru_iter_end(&it);
2178 
2179 	wake_up(&c->free_buffer_wait);
2180 	dm_bufio_unlock(c);
2181 
2182 	a = xchg(&c->async_write_error, 0);
2183 	f = dm_bufio_issue_flush(c);
2184 	if (a)
2185 		return a;
2186 
2187 	return f;
2188 }
2189 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2190 
2191 /*
2192  * Use dm-io to send an empty barrier to flush the device.
2193  */
dm_bufio_issue_flush(struct dm_bufio_client * c)2194 int dm_bufio_issue_flush(struct dm_bufio_client *c)
2195 {
2196 	struct dm_io_request io_req = {
2197 		.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2198 		.mem.type = DM_IO_KMEM,
2199 		.mem.ptr.addr = NULL,
2200 		.client = c->dm_io,
2201 	};
2202 	struct dm_io_region io_reg = {
2203 		.bdev = c->bdev,
2204 		.sector = 0,
2205 		.count = 0,
2206 	};
2207 
2208 	if (WARN_ON_ONCE(dm_bufio_in_request()))
2209 		return -EINVAL;
2210 
2211 	return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2212 }
2213 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2214 
2215 /*
2216  * Use dm-io to send a discard request to flush the device.
2217  */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)2218 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2219 {
2220 	struct dm_io_request io_req = {
2221 		.bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2222 		.mem.type = DM_IO_KMEM,
2223 		.mem.ptr.addr = NULL,
2224 		.client = c->dm_io,
2225 	};
2226 	struct dm_io_region io_reg = {
2227 		.bdev = c->bdev,
2228 		.sector = block_to_sector(c, block),
2229 		.count = block_to_sector(c, count),
2230 	};
2231 
2232 	if (WARN_ON_ONCE(dm_bufio_in_request()))
2233 		return -EINVAL; /* discards are optional */
2234 
2235 	return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2236 }
2237 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2238 
forget_buffer(struct dm_bufio_client * c,sector_t block)2239 static void forget_buffer(struct dm_bufio_client *c, sector_t block)
2240 {
2241 	struct dm_buffer *b;
2242 
2243 	b = cache_get(&c->cache, block);
2244 	if (b) {
2245 		if (likely(!smp_load_acquire(&b->state))) {
2246 			if (cache_remove(&c->cache, b))
2247 				__free_buffer_wake(b);
2248 			else
2249 				cache_put_and_wake(c, b);
2250 		} else {
2251 			cache_put_and_wake(c, b);
2252 		}
2253 	}
2254 }
2255 
2256 /*
2257  * Free the given buffer.
2258  *
2259  * This is just a hint, if the buffer is in use or dirty, this function
2260  * does nothing.
2261  */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)2262 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2263 {
2264 	dm_bufio_lock(c);
2265 	forget_buffer(c, block);
2266 	dm_bufio_unlock(c);
2267 }
2268 EXPORT_SYMBOL_GPL(dm_bufio_forget);
2269 
idle(struct dm_buffer * b,void * context)2270 static enum evict_result idle(struct dm_buffer *b, void *context)
2271 {
2272 	return b->state ? ER_DONT_EVICT : ER_EVICT;
2273 }
2274 
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)2275 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2276 {
2277 	dm_bufio_lock(c);
2278 	cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2279 	dm_bufio_unlock(c);
2280 }
2281 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2282 
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned int n)2283 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2284 {
2285 	c->minimum_buffers = n;
2286 }
2287 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2288 
dm_bufio_get_block_size(struct dm_bufio_client * c)2289 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2290 {
2291 	return c->block_size;
2292 }
2293 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2294 
dm_bufio_get_device_size(struct dm_bufio_client * c)2295 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2296 {
2297 	sector_t s = bdev_nr_sectors(c->bdev);
2298 
2299 	if (s >= c->start)
2300 		s -= c->start;
2301 	else
2302 		s = 0;
2303 	if (likely(c->sectors_per_block_bits >= 0))
2304 		s >>= c->sectors_per_block_bits;
2305 	else
2306 		sector_div(s, c->block_size >> SECTOR_SHIFT);
2307 	return s;
2308 }
2309 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2310 
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)2311 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2312 {
2313 	return c->dm_io;
2314 }
2315 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2316 
dm_bufio_get_block_number(struct dm_buffer * b)2317 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2318 {
2319 	return b->block;
2320 }
2321 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2322 
dm_bufio_get_block_data(struct dm_buffer * b)2323 void *dm_bufio_get_block_data(struct dm_buffer *b)
2324 {
2325 	return b->data;
2326 }
2327 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2328 
dm_bufio_get_aux_data(struct dm_buffer * b)2329 void *dm_bufio_get_aux_data(struct dm_buffer *b)
2330 {
2331 	return b + 1;
2332 }
2333 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2334 
dm_bufio_get_client(struct dm_buffer * b)2335 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2336 {
2337 	return b->c;
2338 }
2339 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2340 
warn_leak(struct dm_buffer * b,void * context)2341 static enum it_action warn_leak(struct dm_buffer *b, void *context)
2342 {
2343 	bool *warned = context;
2344 
2345 	WARN_ON(!(*warned));
2346 	*warned = true;
2347 	DMERR("leaked buffer %llx, hold count %u, list %d",
2348 	      (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2349 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2350 	stack_trace_print(b->stack_entries, b->stack_len, 1);
2351 	/* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2352 	atomic_set(&b->hold_count, 0);
2353 #endif
2354 	return IT_NEXT;
2355 }
2356 
drop_buffers(struct dm_bufio_client * c)2357 static void drop_buffers(struct dm_bufio_client *c)
2358 {
2359 	int i;
2360 	struct dm_buffer *b;
2361 
2362 	if (WARN_ON(dm_bufio_in_request()))
2363 		return; /* should never happen */
2364 
2365 	/*
2366 	 * An optimization so that the buffers are not written one-by-one.
2367 	 */
2368 	dm_bufio_write_dirty_buffers_async(c);
2369 
2370 	dm_bufio_lock(c);
2371 
2372 	while ((b = __get_unclaimed_buffer(c)))
2373 		__free_buffer_wake(b);
2374 
2375 	for (i = 0; i < LIST_SIZE; i++) {
2376 		bool warned = false;
2377 
2378 		cache_iterate(&c->cache, i, warn_leak, &warned);
2379 	}
2380 
2381 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2382 	while ((b = __get_unclaimed_buffer(c)))
2383 		__free_buffer_wake(b);
2384 #endif
2385 
2386 	for (i = 0; i < LIST_SIZE; i++)
2387 		WARN_ON(cache_count(&c->cache, i));
2388 
2389 	dm_bufio_unlock(c);
2390 }
2391 
get_retain_buffers(struct dm_bufio_client * c)2392 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2393 {
2394 	unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2395 
2396 	if (likely(c->sectors_per_block_bits >= 0))
2397 		retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2398 	else
2399 		retain_bytes /= c->block_size;
2400 
2401 	return retain_bytes;
2402 }
2403 
__scan(struct dm_bufio_client * c)2404 static void __scan(struct dm_bufio_client *c)
2405 {
2406 	int l;
2407 	struct dm_buffer *b;
2408 	unsigned long freed = 0;
2409 	unsigned long retain_target = get_retain_buffers(c);
2410 	unsigned long count = cache_total(&c->cache);
2411 
2412 	for (l = 0; l < LIST_SIZE; l++) {
2413 		while (true) {
2414 			if (count - freed <= retain_target)
2415 				atomic_long_set(&c->need_shrink, 0);
2416 			if (!atomic_long_read(&c->need_shrink))
2417 				break;
2418 
2419 			b = cache_evict(&c->cache, l,
2420 					l == LIST_CLEAN ? is_clean : is_dirty, c);
2421 			if (!b)
2422 				break;
2423 
2424 			__make_buffer_clean(b);
2425 			__free_buffer_wake(b);
2426 
2427 			atomic_long_dec(&c->need_shrink);
2428 			freed++;
2429 
2430 			if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
2431 				dm_bufio_unlock(c);
2432 				cond_resched();
2433 				dm_bufio_lock(c);
2434 			}
2435 		}
2436 	}
2437 }
2438 
shrink_work(struct work_struct * w)2439 static void shrink_work(struct work_struct *w)
2440 {
2441 	struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2442 
2443 	dm_bufio_lock(c);
2444 	__scan(c);
2445 	dm_bufio_unlock(c);
2446 }
2447 
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)2448 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2449 {
2450 	struct dm_bufio_client *c;
2451 
2452 	c = shrink->private_data;
2453 	atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2454 	queue_work(dm_bufio_wq, &c->shrink_work);
2455 
2456 	return sc->nr_to_scan;
2457 }
2458 
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)2459 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2460 {
2461 	struct dm_bufio_client *c = shrink->private_data;
2462 	unsigned long count = cache_total(&c->cache);
2463 	unsigned long retain_target = get_retain_buffers(c);
2464 	unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2465 
2466 	if (unlikely(count < retain_target))
2467 		count = 0;
2468 	else
2469 		count -= retain_target;
2470 
2471 	if (unlikely(count < queued_for_cleanup))
2472 		count = 0;
2473 	else
2474 		count -= queued_for_cleanup;
2475 
2476 	return count;
2477 }
2478 
2479 /*
2480  * Create the buffering interface
2481  */
dm_bufio_client_create(struct block_device * bdev,unsigned int block_size,unsigned int reserved_buffers,unsigned int aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *),unsigned int flags)2482 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2483 					       unsigned int reserved_buffers, unsigned int aux_size,
2484 					       void (*alloc_callback)(struct dm_buffer *),
2485 					       void (*write_callback)(struct dm_buffer *),
2486 					       unsigned int flags)
2487 {
2488 	int r;
2489 	unsigned int num_locks;
2490 	struct dm_bufio_client *c;
2491 	char slab_name[64];
2492 	static atomic_t seqno = ATOMIC_INIT(0);
2493 
2494 	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2495 		DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2496 		r = -EINVAL;
2497 		goto bad_client;
2498 	}
2499 
2500 	num_locks = dm_num_hash_locks();
2501 	c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2502 	if (!c) {
2503 		r = -ENOMEM;
2504 		goto bad_client;
2505 	}
2506 	cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2507 
2508 	c->bdev = bdev;
2509 	c->block_size = block_size;
2510 	if (is_power_of_2(block_size))
2511 		c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2512 	else
2513 		c->sectors_per_block_bits = -1;
2514 
2515 	c->alloc_callback = alloc_callback;
2516 	c->write_callback = write_callback;
2517 
2518 	if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2519 		c->no_sleep = true;
2520 		static_branch_inc(&no_sleep_enabled);
2521 	}
2522 
2523 	mutex_init(&c->lock);
2524 	spin_lock_init(&c->spinlock);
2525 	INIT_LIST_HEAD(&c->reserved_buffers);
2526 	c->need_reserved_buffers = reserved_buffers;
2527 
2528 	dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2529 
2530 	init_waitqueue_head(&c->free_buffer_wait);
2531 	c->async_write_error = 0;
2532 
2533 	c->dm_io = dm_io_client_create();
2534 	if (IS_ERR(c->dm_io)) {
2535 		r = PTR_ERR(c->dm_io);
2536 		goto bad_dm_io;
2537 	}
2538 
2539 	if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) {
2540 		unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2541 
2542 		snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
2543 					block_size, atomic_inc_return(&seqno));
2544 		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2545 						  SLAB_RECLAIM_ACCOUNT, NULL);
2546 		if (!c->slab_cache) {
2547 			r = -ENOMEM;
2548 			goto bad;
2549 		}
2550 	}
2551 	if (aux_size)
2552 		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
2553 					aux_size, atomic_inc_return(&seqno));
2554 	else
2555 		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
2556 					atomic_inc_return(&seqno));
2557 	c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2558 					   0, SLAB_RECLAIM_ACCOUNT, NULL);
2559 	if (!c->slab_buffer) {
2560 		r = -ENOMEM;
2561 		goto bad;
2562 	}
2563 
2564 	while (c->need_reserved_buffers) {
2565 		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2566 
2567 		if (!b) {
2568 			r = -ENOMEM;
2569 			goto bad;
2570 		}
2571 		__free_buffer_wake(b);
2572 	}
2573 
2574 	INIT_WORK(&c->shrink_work, shrink_work);
2575 	atomic_long_set(&c->need_shrink, 0);
2576 
2577 	c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2578 				     MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2579 	if (!c->shrinker) {
2580 		r = -ENOMEM;
2581 		goto bad;
2582 	}
2583 
2584 	c->shrinker->count_objects = dm_bufio_shrink_count;
2585 	c->shrinker->scan_objects = dm_bufio_shrink_scan;
2586 	c->shrinker->seeks = 1;
2587 	c->shrinker->batch = 0;
2588 	c->shrinker->private_data = c;
2589 
2590 	shrinker_register(c->shrinker);
2591 
2592 	mutex_lock(&dm_bufio_clients_lock);
2593 	dm_bufio_client_count++;
2594 	list_add(&c->client_list, &dm_bufio_all_clients);
2595 	__cache_size_refresh();
2596 	mutex_unlock(&dm_bufio_clients_lock);
2597 
2598 	return c;
2599 
2600 bad:
2601 	while (!list_empty(&c->reserved_buffers)) {
2602 		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2603 
2604 		list_del(&b->lru.list);
2605 		free_buffer(b);
2606 	}
2607 	kmem_cache_destroy(c->slab_cache);
2608 	kmem_cache_destroy(c->slab_buffer);
2609 	dm_io_client_destroy(c->dm_io);
2610 bad_dm_io:
2611 	mutex_destroy(&c->lock);
2612 	if (c->no_sleep)
2613 		static_branch_dec(&no_sleep_enabled);
2614 	kfree(c);
2615 bad_client:
2616 	return ERR_PTR(r);
2617 }
2618 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2619 
2620 /*
2621  * Free the buffering interface.
2622  * It is required that there are no references on any buffers.
2623  */
dm_bufio_client_destroy(struct dm_bufio_client * c)2624 void dm_bufio_client_destroy(struct dm_bufio_client *c)
2625 {
2626 	unsigned int i;
2627 
2628 	drop_buffers(c);
2629 
2630 	shrinker_free(c->shrinker);
2631 	flush_work(&c->shrink_work);
2632 
2633 	mutex_lock(&dm_bufio_clients_lock);
2634 
2635 	list_del(&c->client_list);
2636 	dm_bufio_client_count--;
2637 	__cache_size_refresh();
2638 
2639 	mutex_unlock(&dm_bufio_clients_lock);
2640 
2641 	WARN_ON(c->need_reserved_buffers);
2642 
2643 	while (!list_empty(&c->reserved_buffers)) {
2644 		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2645 
2646 		list_del(&b->lru.list);
2647 		free_buffer(b);
2648 	}
2649 
2650 	for (i = 0; i < LIST_SIZE; i++)
2651 		if (cache_count(&c->cache, i))
2652 			DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2653 
2654 	for (i = 0; i < LIST_SIZE; i++)
2655 		WARN_ON(cache_count(&c->cache, i));
2656 
2657 	cache_destroy(&c->cache);
2658 	kmem_cache_destroy(c->slab_cache);
2659 	kmem_cache_destroy(c->slab_buffer);
2660 	dm_io_client_destroy(c->dm_io);
2661 	mutex_destroy(&c->lock);
2662 	if (c->no_sleep)
2663 		static_branch_dec(&no_sleep_enabled);
2664 	kfree(c);
2665 }
2666 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2667 
dm_bufio_client_reset(struct dm_bufio_client * c)2668 void dm_bufio_client_reset(struct dm_bufio_client *c)
2669 {
2670 	drop_buffers(c);
2671 	flush_work(&c->shrink_work);
2672 }
2673 EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2674 
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)2675 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2676 {
2677 	c->start = start;
2678 }
2679 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2680 
2681 /*--------------------------------------------------------------*/
2682 
get_max_age_hz(void)2683 static unsigned int get_max_age_hz(void)
2684 {
2685 	unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2686 
2687 	if (max_age > UINT_MAX / HZ)
2688 		max_age = UINT_MAX / HZ;
2689 
2690 	return max_age * HZ;
2691 }
2692 
older_than(struct dm_buffer * b,unsigned long age_hz)2693 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2694 {
2695 	return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2696 }
2697 
2698 struct evict_params {
2699 	gfp_t gfp;
2700 	unsigned long age_hz;
2701 
2702 	/*
2703 	 * This gets updated with the largest last_accessed (ie. most
2704 	 * recently used) of the evicted buffers.  It will not be reinitialised
2705 	 * by __evict_many(), so you can use it across multiple invocations.
2706 	 */
2707 	unsigned long last_accessed;
2708 };
2709 
2710 /*
2711  * We may not be able to evict this buffer if IO pending or the client
2712  * is still using it.
2713  *
2714  * And if GFP_NOFS is used, we must not do any I/O because we hold
2715  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2716  * rerouted to different bufio client.
2717  */
select_for_evict(struct dm_buffer * b,void * context)2718 static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2719 {
2720 	struct evict_params *params = context;
2721 
2722 	if (!(params->gfp & __GFP_FS) ||
2723 	    (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2724 		if (test_bit_acquire(B_READING, &b->state) ||
2725 		    test_bit(B_WRITING, &b->state) ||
2726 		    test_bit(B_DIRTY, &b->state))
2727 			return ER_DONT_EVICT;
2728 	}
2729 
2730 	return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2731 }
2732 
__evict_many(struct dm_bufio_client * c,struct evict_params * params,int list_mode,unsigned long max_count)2733 static unsigned long __evict_many(struct dm_bufio_client *c,
2734 				  struct evict_params *params,
2735 				  int list_mode, unsigned long max_count)
2736 {
2737 	unsigned long count;
2738 	unsigned long last_accessed;
2739 	struct dm_buffer *b;
2740 
2741 	for (count = 0; count < max_count; count++) {
2742 		b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2743 		if (!b)
2744 			break;
2745 
2746 		last_accessed = READ_ONCE(b->last_accessed);
2747 		if (time_after_eq(params->last_accessed, last_accessed))
2748 			params->last_accessed = last_accessed;
2749 
2750 		__make_buffer_clean(b);
2751 		__free_buffer_wake(b);
2752 
2753 		cond_resched();
2754 	}
2755 
2756 	return count;
2757 }
2758 
evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)2759 static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2760 {
2761 	struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2762 	unsigned long retain = get_retain_buffers(c);
2763 	unsigned long count;
2764 	LIST_HEAD(write_list);
2765 
2766 	dm_bufio_lock(c);
2767 
2768 	__check_watermark(c, &write_list);
2769 	if (unlikely(!list_empty(&write_list))) {
2770 		dm_bufio_unlock(c);
2771 		__flush_write_list(&write_list);
2772 		dm_bufio_lock(c);
2773 	}
2774 
2775 	count = cache_total(&c->cache);
2776 	if (count > retain)
2777 		__evict_many(c, &params, LIST_CLEAN, count - retain);
2778 
2779 	dm_bufio_unlock(c);
2780 }
2781 
cleanup_old_buffers(void)2782 static void cleanup_old_buffers(void)
2783 {
2784 	unsigned long max_age_hz = get_max_age_hz();
2785 	struct dm_bufio_client *c;
2786 
2787 	mutex_lock(&dm_bufio_clients_lock);
2788 
2789 	__cache_size_refresh();
2790 
2791 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2792 		evict_old_buffers(c, max_age_hz);
2793 
2794 	mutex_unlock(&dm_bufio_clients_lock);
2795 }
2796 
work_fn(struct work_struct * w)2797 static void work_fn(struct work_struct *w)
2798 {
2799 	cleanup_old_buffers();
2800 
2801 	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2802 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2803 }
2804 
2805 /*--------------------------------------------------------------*/
2806 
2807 /*
2808  * Global cleanup tries to evict the oldest buffers from across _all_
2809  * the clients.  It does this by repeatedly evicting a few buffers from
2810  * the client that holds the oldest buffer.  It's approximate, but hopefully
2811  * good enough.
2812  */
__pop_client(void)2813 static struct dm_bufio_client *__pop_client(void)
2814 {
2815 	struct list_head *h;
2816 
2817 	if (list_empty(&dm_bufio_all_clients))
2818 		return NULL;
2819 
2820 	h = dm_bufio_all_clients.next;
2821 	list_del(h);
2822 	return container_of(h, struct dm_bufio_client, client_list);
2823 }
2824 
2825 /*
2826  * Inserts the client in the global client list based on its
2827  * 'oldest_buffer' field.
2828  */
__insert_client(struct dm_bufio_client * new_client)2829 static void __insert_client(struct dm_bufio_client *new_client)
2830 {
2831 	struct dm_bufio_client *c;
2832 	struct list_head *h = dm_bufio_all_clients.next;
2833 
2834 	while (h != &dm_bufio_all_clients) {
2835 		c = container_of(h, struct dm_bufio_client, client_list);
2836 		if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2837 			break;
2838 		h = h->next;
2839 	}
2840 
2841 	list_add_tail(&new_client->client_list, h);
2842 }
2843 
__evict_a_few(unsigned long nr_buffers)2844 static unsigned long __evict_a_few(unsigned long nr_buffers)
2845 {
2846 	unsigned long count;
2847 	struct dm_bufio_client *c;
2848 	struct evict_params params = {
2849 		.gfp = GFP_KERNEL,
2850 		.age_hz = 0,
2851 		/* set to jiffies in case there are no buffers in this client */
2852 		.last_accessed = jiffies
2853 	};
2854 
2855 	c = __pop_client();
2856 	if (!c)
2857 		return 0;
2858 
2859 	dm_bufio_lock(c);
2860 	count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
2861 	dm_bufio_unlock(c);
2862 
2863 	if (count)
2864 		c->oldest_buffer = params.last_accessed;
2865 	__insert_client(c);
2866 
2867 	return count;
2868 }
2869 
check_watermarks(void)2870 static void check_watermarks(void)
2871 {
2872 	LIST_HEAD(write_list);
2873 	struct dm_bufio_client *c;
2874 
2875 	mutex_lock(&dm_bufio_clients_lock);
2876 	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2877 		dm_bufio_lock(c);
2878 		__check_watermark(c, &write_list);
2879 		dm_bufio_unlock(c);
2880 	}
2881 	mutex_unlock(&dm_bufio_clients_lock);
2882 
2883 	__flush_write_list(&write_list);
2884 }
2885 
evict_old(void)2886 static void evict_old(void)
2887 {
2888 	unsigned long threshold = dm_bufio_cache_size -
2889 		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2890 
2891 	mutex_lock(&dm_bufio_clients_lock);
2892 	while (dm_bufio_current_allocated > threshold) {
2893 		if (!__evict_a_few(64))
2894 			break;
2895 		cond_resched();
2896 	}
2897 	mutex_unlock(&dm_bufio_clients_lock);
2898 }
2899 
do_global_cleanup(struct work_struct * w)2900 static void do_global_cleanup(struct work_struct *w)
2901 {
2902 	check_watermarks();
2903 	evict_old();
2904 }
2905 
2906 /*
2907  *--------------------------------------------------------------
2908  * Module setup
2909  *--------------------------------------------------------------
2910  */
2911 
2912 /*
2913  * This is called only once for the whole dm_bufio module.
2914  * It initializes memory limit.
2915  */
dm_bufio_init(void)2916 static int __init dm_bufio_init(void)
2917 {
2918 	__u64 mem;
2919 
2920 	dm_bufio_allocated_kmem_cache = 0;
2921 	dm_bufio_allocated_kmalloc = 0;
2922 	dm_bufio_allocated_get_free_pages = 0;
2923 	dm_bufio_allocated_vmalloc = 0;
2924 	dm_bufio_current_allocated = 0;
2925 
2926 	mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2927 			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2928 
2929 	if (mem > ULONG_MAX)
2930 		mem = ULONG_MAX;
2931 
2932 #ifdef CONFIG_MMU
2933 	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2934 		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2935 #endif
2936 
2937 	dm_bufio_default_cache_size = mem;
2938 
2939 	mutex_lock(&dm_bufio_clients_lock);
2940 	__cache_size_refresh();
2941 	mutex_unlock(&dm_bufio_clients_lock);
2942 
2943 	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2944 	if (!dm_bufio_wq)
2945 		return -ENOMEM;
2946 
2947 	INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2948 	INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2949 	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2950 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2951 
2952 	return 0;
2953 }
2954 
2955 /*
2956  * This is called once when unloading the dm_bufio module.
2957  */
dm_bufio_exit(void)2958 static void __exit dm_bufio_exit(void)
2959 {
2960 	int bug = 0;
2961 
2962 	cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2963 	destroy_workqueue(dm_bufio_wq);
2964 
2965 	if (dm_bufio_client_count) {
2966 		DMCRIT("%s: dm_bufio_client_count leaked: %d",
2967 			__func__, dm_bufio_client_count);
2968 		bug = 1;
2969 	}
2970 
2971 	if (dm_bufio_current_allocated) {
2972 		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2973 			__func__, dm_bufio_current_allocated);
2974 		bug = 1;
2975 	}
2976 
2977 	if (dm_bufio_allocated_get_free_pages) {
2978 		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2979 		       __func__, dm_bufio_allocated_get_free_pages);
2980 		bug = 1;
2981 	}
2982 
2983 	if (dm_bufio_allocated_vmalloc) {
2984 		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2985 		       __func__, dm_bufio_allocated_vmalloc);
2986 		bug = 1;
2987 	}
2988 
2989 	WARN_ON(bug); /* leaks are not worth crashing the system */
2990 }
2991 
2992 module_init(dm_bufio_init)
2993 module_exit(dm_bufio_exit)
2994 
2995 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2996 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2997 
2998 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2999 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
3000 
3001 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
3002 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
3003 
3004 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
3005 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
3006 
3007 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
3008 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
3009 
3010 module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444);
3011 MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc");
3012 
3013 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
3014 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
3015 
3016 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
3017 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
3018 
3019 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
3020 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
3021 
3022 MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
3023 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
3024 MODULE_LICENSE("GPL");
3025