xref: /linux/mm/page-writeback.c (revision 66d64899eae85dc9b96c5433933787cdcd9b21e4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/page-writeback.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * Contains functions related to writing back dirty pages at the
9  * address_space level.
10  *
11  * 10Apr2002	Andrew Morton
12  *		Initial version
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/math64.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/slab.h>
23 #include <linux/pagemap.h>
24 #include <linux/writeback.h>
25 #include <linux/init.h>
26 #include <linux/backing-dev.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/blkdev.h>
29 #include <linux/mpage.h>
30 #include <linux/rmap.h>
31 #include <linux/percpu.h>
32 #include <linux/smp.h>
33 #include <linux/sysctl.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagevec.h>
37 #include <linux/timer.h>
38 #include <linux/sched/rt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/mm_inline.h>
41 #include <linux/shmem_fs.h>
42 #include <trace/events/writeback.h>
43 
44 #include "internal.h"
45 
46 /*
47  * Sleep at most 200ms at a time in balance_dirty_pages().
48  */
49 #define MAX_PAUSE		max(HZ/5, 1)
50 
51 /*
52  * Try to keep balance_dirty_pages() call intervals higher than this many pages
53  * by raising pause time to max_pause when falls below it.
54  */
55 #define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
56 
57 /*
58  * Estimate write bandwidth or update dirty limit at 200ms intervals.
59  */
60 #define BANDWIDTH_INTERVAL	max(HZ/5, 1)
61 
62 #define RATELIMIT_CALC_SHIFT	10
63 
64 /*
65  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
66  * will look to see if it needs to force writeback or throttling.
67  */
68 static long ratelimit_pages = 32;
69 
70 /* The following parameters are exported via /proc/sys/vm */
71 
72 /*
73  * Start background writeback (via writeback threads) at this percentage
74  */
75 static int dirty_background_ratio = 10;
76 
77 /*
78  * dirty_background_bytes starts at 0 (disabled) so that it is a function of
79  * dirty_background_ratio * the amount of dirtyable memory
80  */
81 static unsigned long dirty_background_bytes;
82 
83 /*
84  * free highmem will not be subtracted from the total free memory
85  * for calculating free ratios if vm_highmem_is_dirtyable is true
86  */
87 static int vm_highmem_is_dirtyable;
88 
89 /*
90  * The generator of dirty data starts writeback at this percentage
91  */
92 static int vm_dirty_ratio = 20;
93 
94 /*
95  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
96  * vm_dirty_ratio * the amount of dirtyable memory
97  */
98 static unsigned long vm_dirty_bytes;
99 
100 /*
101  * The interval between `kupdate'-style writebacks
102  */
103 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
104 
105 EXPORT_SYMBOL_GPL(dirty_writeback_interval);
106 
107 /*
108  * The longest time for which data is allowed to remain dirty
109  */
110 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
111 
112 /* End of sysctl-exported parameters */
113 
114 struct wb_domain global_wb_domain;
115 
116 /*
117  * Length of period for aging writeout fractions of bdis. This is an
118  * arbitrarily chosen number. The longer the period, the slower fractions will
119  * reflect changes in current writeout rate.
120  */
121 #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
122 
123 #ifdef CONFIG_CGROUP_WRITEBACK
124 
125 #define GDTC_INIT(__wb)		.wb = (__wb),				\
126 				.dom = &global_wb_domain,		\
127 				.wb_completions = &(__wb)->completions
128 
129 #define GDTC_INIT_NO_WB		.dom = &global_wb_domain
130 
131 #define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
132 				.dom = mem_cgroup_wb_domain(__wb),	\
133 				.wb_completions = &(__wb)->memcg_completions, \
134 				.gdtc = __gdtc
135 
mdtc_valid(struct dirty_throttle_control * dtc)136 static bool mdtc_valid(struct dirty_throttle_control *dtc)
137 {
138 	return dtc->dom;
139 }
140 
dtc_dom(struct dirty_throttle_control * dtc)141 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
142 {
143 	return dtc->dom;
144 }
145 
mdtc_gdtc(struct dirty_throttle_control * mdtc)146 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
147 {
148 	return mdtc->gdtc;
149 }
150 
wb_memcg_completions(struct bdi_writeback * wb)151 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
152 {
153 	return &wb->memcg_completions;
154 }
155 
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)156 static void wb_min_max_ratio(struct bdi_writeback *wb,
157 			     unsigned long *minp, unsigned long *maxp)
158 {
159 	unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
160 	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
161 	unsigned long long min = wb->bdi->min_ratio;
162 	unsigned long long max = wb->bdi->max_ratio;
163 
164 	/*
165 	 * @wb may already be clean by the time control reaches here and
166 	 * the total may not include its bw.
167 	 */
168 	if (this_bw < tot_bw) {
169 		if (min) {
170 			min *= this_bw;
171 			min = div64_ul(min, tot_bw);
172 		}
173 		if (max < 100 * BDI_RATIO_SCALE) {
174 			max *= this_bw;
175 			max = div64_ul(max, tot_bw);
176 		}
177 	}
178 
179 	*minp = min;
180 	*maxp = max;
181 }
182 
183 #else	/* CONFIG_CGROUP_WRITEBACK */
184 
185 #define GDTC_INIT(__wb)		.wb = (__wb),                           \
186 				.wb_completions = &(__wb)->completions
187 #define GDTC_INIT_NO_WB
188 #define MDTC_INIT(__wb, __gdtc)
189 
mdtc_valid(struct dirty_throttle_control * dtc)190 static bool mdtc_valid(struct dirty_throttle_control *dtc)
191 {
192 	return false;
193 }
194 
dtc_dom(struct dirty_throttle_control * dtc)195 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
196 {
197 	return &global_wb_domain;
198 }
199 
mdtc_gdtc(struct dirty_throttle_control * mdtc)200 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
201 {
202 	return NULL;
203 }
204 
wb_memcg_completions(struct bdi_writeback * wb)205 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
206 {
207 	return NULL;
208 }
209 
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)210 static void wb_min_max_ratio(struct bdi_writeback *wb,
211 			     unsigned long *minp, unsigned long *maxp)
212 {
213 	*minp = wb->bdi->min_ratio;
214 	*maxp = wb->bdi->max_ratio;
215 }
216 
217 #endif	/* CONFIG_CGROUP_WRITEBACK */
218 
219 /*
220  * In a memory zone, there is a certain amount of pages we consider
221  * available for the page cache, which is essentially the number of
222  * free and reclaimable pages, minus some zone reserves to protect
223  * lowmem and the ability to uphold the zone's watermarks without
224  * requiring writeback.
225  *
226  * This number of dirtyable pages is the base value of which the
227  * user-configurable dirty ratio is the effective number of pages that
228  * are allowed to be actually dirtied.  Per individual zone, or
229  * globally by using the sum of dirtyable pages over all zones.
230  *
231  * Because the user is allowed to specify the dirty limit globally as
232  * absolute number of bytes, calculating the per-zone dirty limit can
233  * require translating the configured limit into a percentage of
234  * global dirtyable memory first.
235  */
236 
237 /**
238  * node_dirtyable_memory - number of dirtyable pages in a node
239  * @pgdat: the node
240  *
241  * Return: the node's number of pages potentially available for dirty
242  * page cache.  This is the base value for the per-node dirty limits.
243  */
node_dirtyable_memory(struct pglist_data * pgdat)244 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
245 {
246 	unsigned long nr_pages = 0;
247 	int z;
248 
249 	for (z = 0; z < MAX_NR_ZONES; z++) {
250 		struct zone *zone = pgdat->node_zones + z;
251 
252 		if (!populated_zone(zone))
253 			continue;
254 
255 		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
256 	}
257 
258 	/*
259 	 * Pages reserved for the kernel should not be considered
260 	 * dirtyable, to prevent a situation where reclaim has to
261 	 * clean pages in order to balance the zones.
262 	 */
263 	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
264 
265 	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
266 	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
267 
268 	return nr_pages;
269 }
270 
highmem_dirtyable_memory(unsigned long total)271 static unsigned long highmem_dirtyable_memory(unsigned long total)
272 {
273 #ifdef CONFIG_HIGHMEM
274 	int node;
275 	unsigned long x = 0;
276 	int i;
277 
278 	for_each_node_state(node, N_HIGH_MEMORY) {
279 		for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
280 			struct zone *z;
281 			unsigned long nr_pages;
282 
283 			if (!is_highmem_idx(i))
284 				continue;
285 
286 			z = &NODE_DATA(node)->node_zones[i];
287 			if (!populated_zone(z))
288 				continue;
289 
290 			nr_pages = zone_page_state(z, NR_FREE_PAGES);
291 			/* watch for underflows */
292 			nr_pages -= min(nr_pages, high_wmark_pages(z));
293 			nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
294 			nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
295 			x += nr_pages;
296 		}
297 	}
298 
299 	/*
300 	 * Make sure that the number of highmem pages is never larger
301 	 * than the number of the total dirtyable memory. This can only
302 	 * occur in very strange VM situations but we want to make sure
303 	 * that this does not occur.
304 	 */
305 	return min(x, total);
306 #else
307 	return 0;
308 #endif
309 }
310 
311 /**
312  * global_dirtyable_memory - number of globally dirtyable pages
313  *
314  * Return: the global number of pages potentially available for dirty
315  * page cache.  This is the base value for the global dirty limits.
316  */
global_dirtyable_memory(void)317 static unsigned long global_dirtyable_memory(void)
318 {
319 	unsigned long x;
320 
321 	x = global_zone_page_state(NR_FREE_PAGES);
322 	/*
323 	 * Pages reserved for the kernel should not be considered
324 	 * dirtyable, to prevent a situation where reclaim has to
325 	 * clean pages in order to balance the zones.
326 	 */
327 	x -= min(x, totalreserve_pages);
328 
329 	x += global_node_page_state(NR_INACTIVE_FILE);
330 	x += global_node_page_state(NR_ACTIVE_FILE);
331 
332 	if (!vm_highmem_is_dirtyable)
333 		x -= highmem_dirtyable_memory(x);
334 
335 	return x + 1;	/* Ensure that we never return 0 */
336 }
337 
338 /**
339  * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
340  * @dtc: dirty_throttle_control of interest
341  *
342  * Calculate @dtc->thresh and ->bg_thresh considering
343  * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
344  * must ensure that @dtc->avail is set before calling this function.  The
345  * dirty limits will be lifted by 1/4 for real-time tasks.
346  */
domain_dirty_limits(struct dirty_throttle_control * dtc)347 static void domain_dirty_limits(struct dirty_throttle_control *dtc)
348 {
349 	const unsigned long available_memory = dtc->avail;
350 	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
351 	unsigned long bytes = vm_dirty_bytes;
352 	unsigned long bg_bytes = dirty_background_bytes;
353 	/* convert ratios to per-PAGE_SIZE for higher precision */
354 	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
355 	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
356 	unsigned long thresh;
357 	unsigned long bg_thresh;
358 	struct task_struct *tsk;
359 
360 	/* gdtc is !NULL iff @dtc is for memcg domain */
361 	if (gdtc) {
362 		unsigned long global_avail = gdtc->avail;
363 
364 		/*
365 		 * The byte settings can't be applied directly to memcg
366 		 * domains.  Convert them to ratios by scaling against
367 		 * globally available memory.  As the ratios are in
368 		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
369 		 * number of pages.
370 		 */
371 		if (bytes)
372 			ratio = min(DIV_ROUND_UP(bytes, global_avail),
373 				    PAGE_SIZE);
374 		if (bg_bytes)
375 			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
376 				       PAGE_SIZE);
377 		bytes = bg_bytes = 0;
378 	}
379 
380 	if (bytes)
381 		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
382 	else
383 		thresh = (ratio * available_memory) / PAGE_SIZE;
384 
385 	if (bg_bytes)
386 		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
387 	else
388 		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
389 
390 	tsk = current;
391 	if (rt_or_dl_task(tsk)) {
392 		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
393 		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
394 	}
395 	/*
396 	 * Dirty throttling logic assumes the limits in page units fit into
397 	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
398 	 */
399 	if (thresh > UINT_MAX)
400 		thresh = UINT_MAX;
401 	/* This makes sure bg_thresh is within 32-bits as well */
402 	if (bg_thresh >= thresh)
403 		bg_thresh = thresh / 2;
404 	dtc->thresh = thresh;
405 	dtc->bg_thresh = bg_thresh;
406 
407 	/* we should eventually report the domain in the TP */
408 	if (!gdtc)
409 		trace_global_dirty_state(bg_thresh, thresh);
410 }
411 
412 /**
413  * global_dirty_limits - background-writeback and dirty-throttling thresholds
414  * @pbackground: out parameter for bg_thresh
415  * @pdirty: out parameter for thresh
416  *
417  * Calculate bg_thresh and thresh for global_wb_domain.  See
418  * domain_dirty_limits() for details.
419  */
global_dirty_limits(unsigned long * pbackground,unsigned long * pdirty)420 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
421 {
422 	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
423 
424 	gdtc.avail = global_dirtyable_memory();
425 	domain_dirty_limits(&gdtc);
426 
427 	*pbackground = gdtc.bg_thresh;
428 	*pdirty = gdtc.thresh;
429 }
430 
431 /**
432  * node_dirty_limit - maximum number of dirty pages allowed in a node
433  * @pgdat: the node
434  *
435  * Return: the maximum number of dirty pages allowed in a node, based
436  * on the node's dirtyable memory.
437  */
node_dirty_limit(struct pglist_data * pgdat)438 static unsigned long node_dirty_limit(struct pglist_data *pgdat)
439 {
440 	unsigned long node_memory = node_dirtyable_memory(pgdat);
441 	struct task_struct *tsk = current;
442 	unsigned long dirty;
443 
444 	if (vm_dirty_bytes)
445 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
446 			node_memory / global_dirtyable_memory();
447 	else
448 		dirty = vm_dirty_ratio * node_memory / 100;
449 
450 	if (rt_or_dl_task(tsk))
451 		dirty += dirty / 4;
452 
453 	/*
454 	 * Dirty throttling logic assumes the limits in page units fit into
455 	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
456 	 */
457 	return min_t(unsigned long, dirty, UINT_MAX);
458 }
459 
460 /**
461  * node_dirty_ok - tells whether a node is within its dirty limits
462  * @pgdat: the node to check
463  *
464  * Return: %true when the dirty pages in @pgdat are within the node's
465  * dirty limit, %false if the limit is exceeded.
466  */
node_dirty_ok(struct pglist_data * pgdat)467 bool node_dirty_ok(struct pglist_data *pgdat)
468 {
469 	unsigned long limit = node_dirty_limit(pgdat);
470 	unsigned long nr_pages = 0;
471 
472 	nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
473 	nr_pages += node_page_state(pgdat, NR_WRITEBACK);
474 
475 	return nr_pages <= limit;
476 }
477 
478 #ifdef CONFIG_SYSCTL
dirty_background_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)479 static int dirty_background_ratio_handler(const struct ctl_table *table, int write,
480 		void *buffer, size_t *lenp, loff_t *ppos)
481 {
482 	int ret;
483 
484 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
485 	if (ret == 0 && write)
486 		dirty_background_bytes = 0;
487 	return ret;
488 }
489 
dirty_background_bytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)490 static int dirty_background_bytes_handler(const struct ctl_table *table, int write,
491 		void *buffer, size_t *lenp, loff_t *ppos)
492 {
493 	int ret;
494 	unsigned long old_bytes = dirty_background_bytes;
495 
496 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
497 	if (ret == 0 && write) {
498 		if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
499 								UINT_MAX) {
500 			dirty_background_bytes = old_bytes;
501 			return -ERANGE;
502 		}
503 		dirty_background_ratio = 0;
504 	}
505 	return ret;
506 }
507 
dirty_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)508 static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer,
509 		size_t *lenp, loff_t *ppos)
510 {
511 	int old_ratio = vm_dirty_ratio;
512 	int ret;
513 
514 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
515 	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
516 		vm_dirty_bytes = 0;
517 		writeback_set_ratelimit();
518 	}
519 	return ret;
520 }
521 
dirty_bytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)522 static int dirty_bytes_handler(const struct ctl_table *table, int write,
523 		void *buffer, size_t *lenp, loff_t *ppos)
524 {
525 	unsigned long old_bytes = vm_dirty_bytes;
526 	int ret;
527 
528 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
529 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
530 		if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
531 			vm_dirty_bytes = old_bytes;
532 			return -ERANGE;
533 		}
534 		writeback_set_ratelimit();
535 		vm_dirty_ratio = 0;
536 	}
537 	return ret;
538 }
539 #endif
540 
wp_next_time(unsigned long cur_time)541 static unsigned long wp_next_time(unsigned long cur_time)
542 {
543 	cur_time += VM_COMPLETIONS_PERIOD_LEN;
544 	/* 0 has a special meaning... */
545 	if (!cur_time)
546 		return 1;
547 	return cur_time;
548 }
549 
wb_domain_writeout_add(struct wb_domain * dom,struct fprop_local_percpu * completions,unsigned int max_prop_frac,long nr)550 static void wb_domain_writeout_add(struct wb_domain *dom,
551 				   struct fprop_local_percpu *completions,
552 				   unsigned int max_prop_frac, long nr)
553 {
554 	__fprop_add_percpu_max(&dom->completions, completions,
555 			       max_prop_frac, nr);
556 	/* First event after period switching was turned off? */
557 	if (unlikely(!dom->period_time)) {
558 		/*
559 		 * We can race with other wb_domain_writeout_add calls here but
560 		 * it does not cause any harm since the resulting time when
561 		 * timer will fire and what is in writeout_period_time will be
562 		 * roughly the same.
563 		 */
564 		dom->period_time = wp_next_time(jiffies);
565 		mod_timer(&dom->period_timer, dom->period_time);
566 	}
567 }
568 
569 /*
570  * Increment @wb's writeout completion count and the global writeout
571  * completion count. Called from __folio_end_writeback().
572  */
__wb_writeout_add(struct bdi_writeback * wb,long nr)573 static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
574 {
575 	struct wb_domain *cgdom;
576 
577 	wb_stat_mod(wb, WB_WRITTEN, nr);
578 	wb_domain_writeout_add(&global_wb_domain, &wb->completions,
579 			       wb->bdi->max_prop_frac, nr);
580 
581 	cgdom = mem_cgroup_wb_domain(wb);
582 	if (cgdom)
583 		wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
584 				       wb->bdi->max_prop_frac, nr);
585 }
586 
wb_writeout_inc(struct bdi_writeback * wb)587 void wb_writeout_inc(struct bdi_writeback *wb)
588 {
589 	unsigned long flags;
590 
591 	local_irq_save(flags);
592 	__wb_writeout_add(wb, 1);
593 	local_irq_restore(flags);
594 }
595 EXPORT_SYMBOL_GPL(wb_writeout_inc);
596 
597 /*
598  * On idle system, we can be called long after we scheduled because we use
599  * deferred timers so count with missed periods.
600  */
writeout_period(struct timer_list * t)601 static void writeout_period(struct timer_list *t)
602 {
603 	struct wb_domain *dom = timer_container_of(dom, t, period_timer);
604 	int miss_periods = (jiffies - dom->period_time) /
605 						 VM_COMPLETIONS_PERIOD_LEN;
606 
607 	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
608 		dom->period_time = wp_next_time(dom->period_time +
609 				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
610 		mod_timer(&dom->period_timer, dom->period_time);
611 	} else {
612 		/*
613 		 * Aging has zeroed all fractions. Stop wasting CPU on period
614 		 * updates.
615 		 */
616 		dom->period_time = 0;
617 	}
618 }
619 
wb_domain_init(struct wb_domain * dom,gfp_t gfp)620 int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
621 {
622 	memset(dom, 0, sizeof(*dom));
623 
624 	spin_lock_init(&dom->lock);
625 
626 	timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
627 
628 	dom->dirty_limit_tstamp = jiffies;
629 
630 	return fprop_global_init(&dom->completions, gfp);
631 }
632 
633 #ifdef CONFIG_CGROUP_WRITEBACK
wb_domain_exit(struct wb_domain * dom)634 void wb_domain_exit(struct wb_domain *dom)
635 {
636 	timer_delete_sync(&dom->period_timer);
637 	fprop_global_destroy(&dom->completions);
638 }
639 #endif
640 
641 /*
642  * bdi_min_ratio keeps the sum of the minimum dirty shares of all
643  * registered backing devices, which, for obvious reasons, can not
644  * exceed 100%.
645  */
646 static unsigned int bdi_min_ratio;
647 
bdi_check_pages_limit(unsigned long pages)648 static int bdi_check_pages_limit(unsigned long pages)
649 {
650 	unsigned long max_dirty_pages = global_dirtyable_memory();
651 
652 	if (pages > max_dirty_pages)
653 		return -EINVAL;
654 
655 	return 0;
656 }
657 
bdi_ratio_from_pages(unsigned long pages)658 static unsigned long bdi_ratio_from_pages(unsigned long pages)
659 {
660 	unsigned long background_thresh;
661 	unsigned long dirty_thresh;
662 	unsigned long ratio;
663 
664 	global_dirty_limits(&background_thresh, &dirty_thresh);
665 	if (!dirty_thresh)
666 		return -EINVAL;
667 	ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
668 
669 	return ratio;
670 }
671 
bdi_get_bytes(unsigned int ratio)672 static u64 bdi_get_bytes(unsigned int ratio)
673 {
674 	unsigned long background_thresh;
675 	unsigned long dirty_thresh;
676 	u64 bytes;
677 
678 	global_dirty_limits(&background_thresh, &dirty_thresh);
679 	bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
680 
681 	return bytes;
682 }
683 
__bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)684 static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
685 {
686 	unsigned int delta;
687 	int ret = 0;
688 
689 	if (min_ratio > 100 * BDI_RATIO_SCALE)
690 		return -EINVAL;
691 
692 	spin_lock_bh(&bdi_lock);
693 	if (min_ratio > bdi->max_ratio) {
694 		ret = -EINVAL;
695 	} else {
696 		if (min_ratio < bdi->min_ratio) {
697 			delta = bdi->min_ratio - min_ratio;
698 			bdi_min_ratio -= delta;
699 			bdi->min_ratio = min_ratio;
700 		} else {
701 			delta = min_ratio - bdi->min_ratio;
702 			if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
703 				bdi_min_ratio += delta;
704 				bdi->min_ratio = min_ratio;
705 			} else {
706 				ret = -EINVAL;
707 			}
708 		}
709 	}
710 	spin_unlock_bh(&bdi_lock);
711 
712 	return ret;
713 }
714 
__bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)715 static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
716 {
717 	int ret = 0;
718 
719 	if (max_ratio > 100 * BDI_RATIO_SCALE)
720 		return -EINVAL;
721 
722 	spin_lock_bh(&bdi_lock);
723 	if (bdi->min_ratio > max_ratio) {
724 		ret = -EINVAL;
725 	} else {
726 		bdi->max_ratio = max_ratio;
727 		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
728 						(100 * BDI_RATIO_SCALE);
729 	}
730 	spin_unlock_bh(&bdi_lock);
731 
732 	return ret;
733 }
734 
bdi_set_min_ratio_no_scale(struct backing_dev_info * bdi,unsigned int min_ratio)735 int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
736 {
737 	return __bdi_set_min_ratio(bdi, min_ratio);
738 }
739 
bdi_set_max_ratio_no_scale(struct backing_dev_info * bdi,unsigned int max_ratio)740 int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
741 {
742 	return __bdi_set_max_ratio(bdi, max_ratio);
743 }
744 
bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)745 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
746 {
747 	return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
748 }
749 
bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)750 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
751 {
752 	return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
753 }
754 EXPORT_SYMBOL(bdi_set_max_ratio);
755 
bdi_get_min_bytes(struct backing_dev_info * bdi)756 u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
757 {
758 	return bdi_get_bytes(bdi->min_ratio);
759 }
760 
bdi_set_min_bytes(struct backing_dev_info * bdi,u64 min_bytes)761 int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
762 {
763 	int ret;
764 	unsigned long pages = min_bytes >> PAGE_SHIFT;
765 	long min_ratio;
766 
767 	ret = bdi_check_pages_limit(pages);
768 	if (ret)
769 		return ret;
770 
771 	min_ratio = bdi_ratio_from_pages(pages);
772 	if (min_ratio < 0)
773 		return min_ratio;
774 	return __bdi_set_min_ratio(bdi, min_ratio);
775 }
776 
bdi_get_max_bytes(struct backing_dev_info * bdi)777 u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
778 {
779 	return bdi_get_bytes(bdi->max_ratio);
780 }
781 
bdi_set_max_bytes(struct backing_dev_info * bdi,u64 max_bytes)782 int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
783 {
784 	int ret;
785 	unsigned long pages = max_bytes >> PAGE_SHIFT;
786 	long max_ratio;
787 
788 	ret = bdi_check_pages_limit(pages);
789 	if (ret)
790 		return ret;
791 
792 	max_ratio = bdi_ratio_from_pages(pages);
793 	if (max_ratio < 0)
794 		return max_ratio;
795 	return __bdi_set_max_ratio(bdi, max_ratio);
796 }
797 
bdi_set_strict_limit(struct backing_dev_info * bdi,unsigned int strict_limit)798 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
799 {
800 	if (strict_limit > 1)
801 		return -EINVAL;
802 
803 	spin_lock_bh(&bdi_lock);
804 	if (strict_limit)
805 		bdi->capabilities |= BDI_CAP_STRICTLIMIT;
806 	else
807 		bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
808 	spin_unlock_bh(&bdi_lock);
809 
810 	return 0;
811 }
812 
dirty_freerun_ceiling(unsigned long thresh,unsigned long bg_thresh)813 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
814 					   unsigned long bg_thresh)
815 {
816 	return (thresh + bg_thresh) / 2;
817 }
818 
hard_dirty_limit(struct wb_domain * dom,unsigned long thresh)819 static unsigned long hard_dirty_limit(struct wb_domain *dom,
820 				      unsigned long thresh)
821 {
822 	return max(thresh, dom->dirty_limit);
823 }
824 
825 /*
826  * Memory which can be further allocated to a memcg domain is capped by
827  * system-wide clean memory excluding the amount being used in the domain.
828  */
mdtc_calc_avail(struct dirty_throttle_control * mdtc,unsigned long filepages,unsigned long headroom)829 static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
830 			    unsigned long filepages, unsigned long headroom)
831 {
832 	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
833 	unsigned long clean = filepages - min(filepages, mdtc->dirty);
834 	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
835 	unsigned long other_clean = global_clean - min(global_clean, clean);
836 
837 	mdtc->avail = filepages + min(headroom, other_clean);
838 }
839 
dtc_is_global(struct dirty_throttle_control * dtc)840 static inline bool dtc_is_global(struct dirty_throttle_control *dtc)
841 {
842 	return mdtc_gdtc(dtc) == NULL;
843 }
844 
845 /*
846  * Dirty background will ignore pages being written as we're trying to
847  * decide whether to put more under writeback.
848  */
domain_dirty_avail(struct dirty_throttle_control * dtc,bool include_writeback)849 static void domain_dirty_avail(struct dirty_throttle_control *dtc,
850 			       bool include_writeback)
851 {
852 	if (dtc_is_global(dtc)) {
853 		dtc->avail = global_dirtyable_memory();
854 		dtc->dirty = global_node_page_state(NR_FILE_DIRTY);
855 		if (include_writeback)
856 			dtc->dirty += global_node_page_state(NR_WRITEBACK);
857 	} else {
858 		unsigned long filepages = 0, headroom = 0, writeback = 0;
859 
860 		mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty,
861 				    &writeback);
862 		if (include_writeback)
863 			dtc->dirty += writeback;
864 		mdtc_calc_avail(dtc, filepages, headroom);
865 	}
866 }
867 
868 /**
869  * __wb_calc_thresh - @wb's share of dirty threshold
870  * @dtc: dirty_throttle_context of interest
871  * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc
872  *
873  * Note that balance_dirty_pages() will only seriously take dirty throttling
874  * threshold as a hard limit when sleeping max_pause per page is not enough
875  * to keep the dirty pages under control. For example, when the device is
876  * completely stalled due to some error conditions, or when there are 1000
877  * dd tasks writing to a slow 10MB/s USB key.
878  * In the other normal situations, it acts more gently by throttling the tasks
879  * more (rather than completely block them) when the wb dirty pages go high.
880  *
881  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
882  * - starving fast devices
883  * - piling up dirty pages (that will take long time to sync) on slow devices
884  *
885  * The wb's share of dirty limit will be adapting to its throughput and
886  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
887  *
888  * Return: @wb's dirty limit in pages. For dirty throttling limit, the term
889  * "dirty" in the context of dirty balancing includes all PG_dirty and
890  * PG_writeback pages.
891  */
__wb_calc_thresh(struct dirty_throttle_control * dtc,unsigned long thresh)892 static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
893 				      unsigned long thresh)
894 {
895 	struct wb_domain *dom = dtc_dom(dtc);
896 	struct bdi_writeback *wb = dtc->wb;
897 	u64 wb_thresh;
898 	u64 wb_max_thresh;
899 	unsigned long numerator, denominator;
900 	unsigned long wb_min_ratio, wb_max_ratio;
901 
902 	/*
903 	 * Calculate this wb's share of the thresh ratio.
904 	 */
905 	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
906 			      &numerator, &denominator);
907 
908 	wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
909 	wb_thresh *= numerator;
910 	wb_thresh = div64_ul(wb_thresh, denominator);
911 
912 	wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);
913 
914 	wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
915 
916 	/*
917 	 * It's very possible that wb_thresh is close to 0 not because the
918 	 * device is slow, but that it has remained inactive for long time.
919 	 * Honour such devices a reasonable good (hopefully IO efficient)
920 	 * threshold, so that the occasional writes won't be blocked and active
921 	 * writes can rampup the threshold quickly.
922 	 */
923 	if (thresh > dtc->dirty) {
924 		if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT))
925 			wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100);
926 		else
927 			wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8);
928 	}
929 
930 	wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
931 	if (wb_thresh > wb_max_thresh)
932 		wb_thresh = wb_max_thresh;
933 
934 	return wb_thresh;
935 }
936 
wb_calc_thresh(struct bdi_writeback * wb,unsigned long thresh)937 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
938 {
939 	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
940 
941 	domain_dirty_avail(&gdtc, true);
942 	return __wb_calc_thresh(&gdtc, thresh);
943 }
944 
cgwb_calc_thresh(struct bdi_writeback * wb)945 unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
946 {
947 	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
948 	struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
949 
950 	domain_dirty_avail(&gdtc, true);
951 	domain_dirty_avail(&mdtc, true);
952 	domain_dirty_limits(&mdtc);
953 
954 	return __wb_calc_thresh(&mdtc, mdtc.thresh);
955 }
956 
957 /*
958  *                           setpoint - dirty 3
959  *        f(dirty) := 1.0 + (----------------)
960  *                           limit - setpoint
961  *
962  * it's a 3rd order polynomial that subjects to
963  *
964  * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
965  * (2) f(setpoint) = 1.0 => the balance point
966  * (3) f(limit)    = 0   => the hard limit
967  * (4) df/dx      <= 0	 => negative feedback control
968  * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
969  *     => fast response on large errors; small oscillation near setpoint
970  */
pos_ratio_polynom(unsigned long setpoint,unsigned long dirty,unsigned long limit)971 static long long pos_ratio_polynom(unsigned long setpoint,
972 					  unsigned long dirty,
973 					  unsigned long limit)
974 {
975 	long long pos_ratio;
976 	long x;
977 
978 	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
979 		      (limit - setpoint) | 1);
980 	pos_ratio = x;
981 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
982 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
983 	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
984 
985 	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
986 }
987 
988 /*
989  * Dirty position control.
990  *
991  * (o) global/bdi setpoints
992  *
993  * We want the dirty pages be balanced around the global/wb setpoints.
994  * When the number of dirty pages is higher/lower than the setpoint, the
995  * dirty position control ratio (and hence task dirty ratelimit) will be
996  * decreased/increased to bring the dirty pages back to the setpoint.
997  *
998  *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
999  *
1000  *     if (dirty < setpoint) scale up   pos_ratio
1001  *     if (dirty > setpoint) scale down pos_ratio
1002  *
1003  *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
1004  *     if (wb_dirty > wb_setpoint) scale down pos_ratio
1005  *
1006  *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
1007  *
1008  * (o) global control line
1009  *
1010  *     ^ pos_ratio
1011  *     |
1012  *     |            |<===== global dirty control scope ======>|
1013  * 2.0  * * * * * * *
1014  *     |            .*
1015  *     |            . *
1016  *     |            .   *
1017  *     |            .     *
1018  *     |            .        *
1019  *     |            .            *
1020  * 1.0 ................................*
1021  *     |            .                  .     *
1022  *     |            .                  .          *
1023  *     |            .                  .              *
1024  *     |            .                  .                 *
1025  *     |            .                  .                    *
1026  *   0 +------------.------------------.----------------------*------------->
1027  *           freerun^          setpoint^                 limit^   dirty pages
1028  *
1029  * (o) wb control line
1030  *
1031  *     ^ pos_ratio
1032  *     |
1033  *     |            *
1034  *     |              *
1035  *     |                *
1036  *     |                  *
1037  *     |                    * |<=========== span ============>|
1038  * 1.0 .......................*
1039  *     |                      . *
1040  *     |                      .   *
1041  *     |                      .     *
1042  *     |                      .       *
1043  *     |                      .         *
1044  *     |                      .           *
1045  *     |                      .             *
1046  *     |                      .               *
1047  *     |                      .                 *
1048  *     |                      .                   *
1049  *     |                      .                     *
1050  * 1/4 ...............................................* * * * * * * * * * * *
1051  *     |                      .                         .
1052  *     |                      .                           .
1053  *     |                      .                             .
1054  *   0 +----------------------.-------------------------------.------------->
1055  *                wb_setpoint^                    x_intercept^
1056  *
1057  * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
1058  * be smoothly throttled down to normal if it starts high in situations like
1059  * - start writing to a slow SD card and a fast disk at the same time. The SD
1060  *   card's wb_dirty may rush to many times higher than wb_setpoint.
1061  * - the wb dirty thresh drops quickly due to change of JBOD workload
1062  */
wb_position_ratio(struct dirty_throttle_control * dtc)1063 static void wb_position_ratio(struct dirty_throttle_control *dtc)
1064 {
1065 	struct bdi_writeback *wb = dtc->wb;
1066 	unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
1067 	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1068 	unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1069 	unsigned long wb_thresh = dtc->wb_thresh;
1070 	unsigned long x_intercept;
1071 	unsigned long setpoint;		/* dirty pages' target balance point */
1072 	unsigned long wb_setpoint;
1073 	unsigned long span;
1074 	long long pos_ratio;		/* for scaling up/down the rate limit */
1075 	long x;
1076 
1077 	dtc->pos_ratio = 0;
1078 
1079 	if (unlikely(dtc->dirty >= limit))
1080 		return;
1081 
1082 	/*
1083 	 * global setpoint
1084 	 *
1085 	 * See comment for pos_ratio_polynom().
1086 	 */
1087 	setpoint = (freerun + limit) / 2;
1088 	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
1089 
1090 	/*
1091 	 * The strictlimit feature is a tool preventing mistrusted filesystems
1092 	 * from growing a large number of dirty pages before throttling. For
1093 	 * such filesystems balance_dirty_pages always checks wb counters
1094 	 * against wb limits. Even if global "nr_dirty" is under "freerun".
1095 	 * This is especially important for fuse which sets bdi->max_ratio to
1096 	 * 1% by default.
1097 	 *
1098 	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
1099 	 * two values: wb_dirty and wb_thresh. Let's consider an example:
1100 	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1101 	 * limits are set by default to 10% and 20% (background and throttle).
1102 	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
1103 	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1104 	 * about ~6K pages (as the average of background and throttle wb
1105 	 * limits). The 3rd order polynomial will provide positive feedback if
1106 	 * wb_dirty is under wb_setpoint and vice versa.
1107 	 *
1108 	 * Note, that we cannot use global counters in these calculations
1109 	 * because we want to throttle process writing to a strictlimit wb
1110 	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1111 	 * in the example above).
1112 	 */
1113 	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1114 		long long wb_pos_ratio;
1115 
1116 		if (dtc->wb_dirty >= wb_thresh)
1117 			return;
1118 
1119 		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1120 						    dtc->wb_bg_thresh);
1121 
1122 		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1123 			return;
1124 
1125 		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1126 						 wb_thresh);
1127 
1128 		/*
1129 		 * Typically, for strictlimit case, wb_setpoint << setpoint
1130 		 * and pos_ratio >> wb_pos_ratio. In the other words global
1131 		 * state ("dirty") is not limiting factor and we have to
1132 		 * make decision based on wb counters. But there is an
1133 		 * important case when global pos_ratio should get precedence:
1134 		 * global limits are exceeded (e.g. due to activities on other
1135 		 * wb's) while given strictlimit wb is below limit.
1136 		 *
1137 		 * "pos_ratio * wb_pos_ratio" would work for the case above,
1138 		 * but it would look too non-natural for the case of all
1139 		 * activity in the system coming from a single strictlimit wb
1140 		 * with bdi->max_ratio == 100%.
1141 		 *
1142 		 * Note that min() below somewhat changes the dynamics of the
1143 		 * control system. Normally, pos_ratio value can be well over 3
1144 		 * (when globally we are at freerun and wb is well below wb
1145 		 * setpoint). Now the maximum pos_ratio in the same situation
1146 		 * is 2. We might want to tweak this if we observe the control
1147 		 * system is too slow to adapt.
1148 		 */
1149 		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1150 		return;
1151 	}
1152 
1153 	/*
1154 	 * We have computed basic pos_ratio above based on global situation. If
1155 	 * the wb is over/under its share of dirty pages, we want to scale
1156 	 * pos_ratio further down/up. That is done by the following mechanism.
1157 	 */
1158 
1159 	/*
1160 	 * wb setpoint
1161 	 *
1162 	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1163 	 *
1164 	 *                        x_intercept - wb_dirty
1165 	 *                     := --------------------------
1166 	 *                        x_intercept - wb_setpoint
1167 	 *
1168 	 * The main wb control line is a linear function that subjects to
1169 	 *
1170 	 * (1) f(wb_setpoint) = 1.0
1171 	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
1172 	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
1173 	 *
1174 	 * For single wb case, the dirty pages are observed to fluctuate
1175 	 * regularly within range
1176 	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1177 	 * for various filesystems, where (2) can yield in a reasonable 12.5%
1178 	 * fluctuation range for pos_ratio.
1179 	 *
1180 	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1181 	 * own size, so move the slope over accordingly and choose a slope that
1182 	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1183 	 */
1184 	if (unlikely(wb_thresh > dtc->thresh))
1185 		wb_thresh = dtc->thresh;
1186 	/*
1187 	 * scale global setpoint to wb's:
1188 	 *	wb_setpoint = setpoint * wb_thresh / thresh
1189 	 */
1190 	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1191 	wb_setpoint = setpoint * (u64)x >> 16;
1192 	/*
1193 	 * Use span=(8*write_bw) in single wb case as indicated by
1194 	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1195 	 *
1196 	 *        wb_thresh                    thresh - wb_thresh
1197 	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1198 	 *         thresh                           thresh
1199 	 */
1200 	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1201 	x_intercept = wb_setpoint + span;
1202 
1203 	if (dtc->wb_dirty < x_intercept - span / 4) {
1204 		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1205 				      (x_intercept - wb_setpoint) | 1);
1206 	} else
1207 		pos_ratio /= 4;
1208 
1209 	/*
1210 	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1211 	 * It may push the desired control point of global dirty pages higher
1212 	 * than setpoint.
1213 	 */
1214 	x_intercept = wb_thresh / 2;
1215 	if (dtc->wb_dirty < x_intercept) {
1216 		if (dtc->wb_dirty > x_intercept / 8)
1217 			pos_ratio = div_u64(pos_ratio * x_intercept,
1218 					    dtc->wb_dirty);
1219 		else
1220 			pos_ratio *= 8;
1221 	}
1222 
1223 	dtc->pos_ratio = pos_ratio;
1224 }
1225 
wb_update_write_bandwidth(struct bdi_writeback * wb,unsigned long elapsed,unsigned long written)1226 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1227 				      unsigned long elapsed,
1228 				      unsigned long written)
1229 {
1230 	const unsigned long period = roundup_pow_of_two(3 * HZ);
1231 	unsigned long avg = wb->avg_write_bandwidth;
1232 	unsigned long old = wb->write_bandwidth;
1233 	u64 bw;
1234 
1235 	/*
1236 	 * bw = written * HZ / elapsed
1237 	 *
1238 	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1239 	 * write_bandwidth = ---------------------------------------------------
1240 	 *                                          period
1241 	 *
1242 	 * @written may have decreased due to folio_redirty_for_writepage().
1243 	 * Avoid underflowing @bw calculation.
1244 	 */
1245 	bw = written - min(written, wb->written_stamp);
1246 	bw *= HZ;
1247 	if (unlikely(elapsed > period)) {
1248 		bw = div64_ul(bw, elapsed);
1249 		avg = bw;
1250 		goto out;
1251 	}
1252 	bw += (u64)wb->write_bandwidth * (period - elapsed);
1253 	bw >>= ilog2(period);
1254 
1255 	/*
1256 	 * one more level of smoothing, for filtering out sudden spikes
1257 	 */
1258 	if (avg > old && old >= (unsigned long)bw)
1259 		avg -= (avg - old) >> 3;
1260 
1261 	if (avg < old && old <= (unsigned long)bw)
1262 		avg += (old - avg) >> 3;
1263 
1264 out:
1265 	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1266 	avg = max(avg, 1LU);
1267 	if (wb_has_dirty_io(wb)) {
1268 		long delta = avg - wb->avg_write_bandwidth;
1269 		WARN_ON_ONCE(atomic_long_add_return(delta,
1270 					&wb->bdi->tot_write_bandwidth) <= 0);
1271 	}
1272 	wb->write_bandwidth = bw;
1273 	WRITE_ONCE(wb->avg_write_bandwidth, avg);
1274 }
1275 
update_dirty_limit(struct dirty_throttle_control * dtc)1276 static void update_dirty_limit(struct dirty_throttle_control *dtc)
1277 {
1278 	struct wb_domain *dom = dtc_dom(dtc);
1279 	unsigned long thresh = dtc->thresh;
1280 	unsigned long limit = dom->dirty_limit;
1281 
1282 	/*
1283 	 * Follow up in one step.
1284 	 */
1285 	if (limit < thresh) {
1286 		limit = thresh;
1287 		goto update;
1288 	}
1289 
1290 	/*
1291 	 * Follow down slowly. Use the higher one as the target, because thresh
1292 	 * may drop below dirty. This is exactly the reason to introduce
1293 	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1294 	 */
1295 	thresh = max(thresh, dtc->dirty);
1296 	if (limit > thresh) {
1297 		limit -= (limit - thresh) >> 5;
1298 		goto update;
1299 	}
1300 	return;
1301 update:
1302 	dom->dirty_limit = limit;
1303 }
1304 
domain_update_dirty_limit(struct dirty_throttle_control * dtc,unsigned long now)1305 static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1306 				      unsigned long now)
1307 {
1308 	struct wb_domain *dom = dtc_dom(dtc);
1309 
1310 	/*
1311 	 * check locklessly first to optimize away locking for the most time
1312 	 */
1313 	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1314 		return;
1315 
1316 	spin_lock(&dom->lock);
1317 	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1318 		update_dirty_limit(dtc);
1319 		dom->dirty_limit_tstamp = now;
1320 	}
1321 	spin_unlock(&dom->lock);
1322 }
1323 
1324 /*
1325  * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1326  *
1327  * Normal wb tasks will be curbed at or below it in long term.
1328  * Obviously it should be around (write_bw / N) when there are N dd tasks.
1329  */
wb_update_dirty_ratelimit(struct dirty_throttle_control * dtc,unsigned long dirtied,unsigned long elapsed)1330 static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1331 				      unsigned long dirtied,
1332 				      unsigned long elapsed)
1333 {
1334 	struct bdi_writeback *wb = dtc->wb;
1335 	unsigned long dirty = dtc->dirty;
1336 	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1337 	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1338 	unsigned long setpoint = (freerun + limit) / 2;
1339 	unsigned long write_bw = wb->avg_write_bandwidth;
1340 	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1341 	unsigned long dirty_rate;
1342 	unsigned long task_ratelimit;
1343 	unsigned long balanced_dirty_ratelimit;
1344 	unsigned long step;
1345 	unsigned long x;
1346 	unsigned long shift;
1347 
1348 	/*
1349 	 * The dirty rate will match the writeout rate in long term, except
1350 	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1351 	 */
1352 	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1353 
1354 	/*
1355 	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1356 	 */
1357 	task_ratelimit = (u64)dirty_ratelimit *
1358 					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1359 	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1360 
1361 	/*
1362 	 * A linear estimation of the "balanced" throttle rate. The theory is,
1363 	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1364 	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1365 	 * formula will yield the balanced rate limit (write_bw / N).
1366 	 *
1367 	 * Note that the expanded form is not a pure rate feedback:
1368 	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1369 	 * but also takes pos_ratio into account:
1370 	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1371 	 *
1372 	 * (1) is not realistic because pos_ratio also takes part in balancing
1373 	 * the dirty rate.  Consider the state
1374 	 *	pos_ratio = 0.5						     (3)
1375 	 *	rate = 2 * (write_bw / N)				     (4)
1376 	 * If (1) is used, it will stuck in that state! Because each dd will
1377 	 * be throttled at
1378 	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1379 	 * yielding
1380 	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1381 	 * put (6) into (1) we get
1382 	 *	rate_(i+1) = rate_(i)					     (7)
1383 	 *
1384 	 * So we end up using (2) to always keep
1385 	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1386 	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1387 	 * pos_ratio is able to drive itself to 1.0, which is not only where
1388 	 * the dirty count meet the setpoint, but also where the slope of
1389 	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1390 	 */
1391 	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1392 					   dirty_rate | 1);
1393 	/*
1394 	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1395 	 */
1396 	if (unlikely(balanced_dirty_ratelimit > write_bw))
1397 		balanced_dirty_ratelimit = write_bw;
1398 
1399 	/*
1400 	 * We could safely do this and return immediately:
1401 	 *
1402 	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1403 	 *
1404 	 * However to get a more stable dirty_ratelimit, the below elaborated
1405 	 * code makes use of task_ratelimit to filter out singular points and
1406 	 * limit the step size.
1407 	 *
1408 	 * The below code essentially only uses the relative value of
1409 	 *
1410 	 *	task_ratelimit - dirty_ratelimit
1411 	 *	= (pos_ratio - 1) * dirty_ratelimit
1412 	 *
1413 	 * which reflects the direction and size of dirty position error.
1414 	 */
1415 
1416 	/*
1417 	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1418 	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1419 	 * For example, when
1420 	 * - dirty_ratelimit > balanced_dirty_ratelimit
1421 	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1422 	 * lowering dirty_ratelimit will help meet both the position and rate
1423 	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1424 	 * only help meet the rate target. After all, what the users ultimately
1425 	 * feel and care are stable dirty rate and small position error.
1426 	 *
1427 	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1428 	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1429 	 * keeps jumping around randomly and can even leap far away at times
1430 	 * due to the small 200ms estimation period of dirty_rate (we want to
1431 	 * keep that period small to reduce time lags).
1432 	 */
1433 	step = 0;
1434 
1435 	/*
1436 	 * For strictlimit case, calculations above were based on wb counters
1437 	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1438 	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1439 	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1440 	 * "dirty" and wb_setpoint as "setpoint".
1441 	 */
1442 	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1443 		dirty = dtc->wb_dirty;
1444 		setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1445 	}
1446 
1447 	if (dirty < setpoint) {
1448 		x = min3(wb->balanced_dirty_ratelimit,
1449 			 balanced_dirty_ratelimit, task_ratelimit);
1450 		if (dirty_ratelimit < x)
1451 			step = x - dirty_ratelimit;
1452 	} else {
1453 		x = max3(wb->balanced_dirty_ratelimit,
1454 			 balanced_dirty_ratelimit, task_ratelimit);
1455 		if (dirty_ratelimit > x)
1456 			step = dirty_ratelimit - x;
1457 	}
1458 
1459 	/*
1460 	 * Don't pursue 100% rate matching. It's impossible since the balanced
1461 	 * rate itself is constantly fluctuating. So decrease the track speed
1462 	 * when it gets close to the target. Helps eliminate pointless tremors.
1463 	 */
1464 	shift = dirty_ratelimit / (2 * step + 1);
1465 	if (shift < BITS_PER_LONG)
1466 		step = DIV_ROUND_UP(step >> shift, 8);
1467 	else
1468 		step = 0;
1469 
1470 	if (dirty_ratelimit < balanced_dirty_ratelimit)
1471 		dirty_ratelimit += step;
1472 	else
1473 		dirty_ratelimit -= step;
1474 
1475 	WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1476 	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1477 
1478 	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1479 }
1480 
__wb_update_bandwidth(struct dirty_throttle_control * gdtc,struct dirty_throttle_control * mdtc,bool update_ratelimit)1481 static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1482 				  struct dirty_throttle_control *mdtc,
1483 				  bool update_ratelimit)
1484 {
1485 	struct bdi_writeback *wb = gdtc->wb;
1486 	unsigned long now = jiffies;
1487 	unsigned long elapsed;
1488 	unsigned long dirtied;
1489 	unsigned long written;
1490 
1491 	spin_lock(&wb->list_lock);
1492 
1493 	/*
1494 	 * Lockless checks for elapsed time are racy and delayed update after
1495 	 * IO completion doesn't do it at all (to make sure written pages are
1496 	 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1497 	 * division errors.
1498 	 */
1499 	elapsed = max(now - wb->bw_time_stamp, 1UL);
1500 	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1501 	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1502 
1503 	if (update_ratelimit) {
1504 		domain_update_dirty_limit(gdtc, now);
1505 		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1506 
1507 		/*
1508 		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1509 		 * compiler has no way to figure that out.  Help it.
1510 		 */
1511 		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1512 			domain_update_dirty_limit(mdtc, now);
1513 			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1514 		}
1515 	}
1516 	wb_update_write_bandwidth(wb, elapsed, written);
1517 
1518 	wb->dirtied_stamp = dirtied;
1519 	wb->written_stamp = written;
1520 	WRITE_ONCE(wb->bw_time_stamp, now);
1521 	spin_unlock(&wb->list_lock);
1522 }
1523 
wb_update_bandwidth(struct bdi_writeback * wb)1524 void wb_update_bandwidth(struct bdi_writeback *wb)
1525 {
1526 	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1527 
1528 	__wb_update_bandwidth(&gdtc, NULL, false);
1529 }
1530 
1531 /* Interval after which we consider wb idle and don't estimate bandwidth */
1532 #define WB_BANDWIDTH_IDLE_JIF (HZ)
1533 
wb_bandwidth_estimate_start(struct bdi_writeback * wb)1534 static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1535 {
1536 	unsigned long now = jiffies;
1537 	unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1538 
1539 	if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1540 	    !atomic_read(&wb->writeback_inodes)) {
1541 		spin_lock(&wb->list_lock);
1542 		wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1543 		wb->written_stamp = wb_stat(wb, WB_WRITTEN);
1544 		WRITE_ONCE(wb->bw_time_stamp, now);
1545 		spin_unlock(&wb->list_lock);
1546 	}
1547 }
1548 
1549 /*
1550  * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1551  * will look to see if it needs to start dirty throttling.
1552  *
1553  * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1554  * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1555  * (the number of pages we may dirty without exceeding the dirty limits).
1556  */
dirty_poll_interval(unsigned long dirty,unsigned long thresh)1557 static unsigned long dirty_poll_interval(unsigned long dirty,
1558 					 unsigned long thresh)
1559 {
1560 	if (thresh > dirty)
1561 		return 1UL << (ilog2(thresh - dirty) >> 1);
1562 
1563 	return 1;
1564 }
1565 
wb_max_pause(struct bdi_writeback * wb,unsigned long wb_dirty)1566 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1567 				  unsigned long wb_dirty)
1568 {
1569 	unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1570 	unsigned long t;
1571 
1572 	/*
1573 	 * Limit pause time for small memory systems. If sleeping for too long
1574 	 * time, a small pool of dirty/writeback pages may go empty and disk go
1575 	 * idle.
1576 	 *
1577 	 * 8 serves as the safety ratio.
1578 	 */
1579 	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1580 	t++;
1581 
1582 	return min_t(unsigned long, t, MAX_PAUSE);
1583 }
1584 
wb_min_pause(struct bdi_writeback * wb,long max_pause,unsigned long task_ratelimit,unsigned long dirty_ratelimit,int * nr_dirtied_pause)1585 static long wb_min_pause(struct bdi_writeback *wb,
1586 			 long max_pause,
1587 			 unsigned long task_ratelimit,
1588 			 unsigned long dirty_ratelimit,
1589 			 int *nr_dirtied_pause)
1590 {
1591 	long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1592 	long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
1593 	long t;		/* target pause */
1594 	long pause;	/* estimated next pause */
1595 	int pages;	/* target nr_dirtied_pause */
1596 
1597 	/* target for 10ms pause on 1-dd case */
1598 	t = max(1, HZ / 100);
1599 
1600 	/*
1601 	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1602 	 * overheads.
1603 	 *
1604 	 * (N * 10ms) on 2^N concurrent tasks.
1605 	 */
1606 	if (hi > lo)
1607 		t += (hi - lo) * (10 * HZ) / 1024;
1608 
1609 	/*
1610 	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1611 	 * on the much more stable dirty_ratelimit. However the next pause time
1612 	 * will be computed based on task_ratelimit and the two rate limits may
1613 	 * depart considerably at some time. Especially if task_ratelimit goes
1614 	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1615 	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1616 	 * result task_ratelimit won't be executed faithfully, which could
1617 	 * eventually bring down dirty_ratelimit.
1618 	 *
1619 	 * We apply two rules to fix it up:
1620 	 * 1) try to estimate the next pause time and if necessary, use a lower
1621 	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1622 	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1623 	 * 2) limit the target pause time to max_pause/2, so that the normal
1624 	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1625 	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1626 	 */
1627 	t = min(t, 1 + max_pause / 2);
1628 	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1629 
1630 	/*
1631 	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1632 	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1633 	 * When the 16 consecutive reads are often interrupted by some dirty
1634 	 * throttling pause during the async writes, cfq will go into idles
1635 	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1636 	 * until reaches DIRTY_POLL_THRESH=32 pages.
1637 	 */
1638 	if (pages < DIRTY_POLL_THRESH) {
1639 		t = max_pause;
1640 		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1641 		if (pages > DIRTY_POLL_THRESH) {
1642 			pages = DIRTY_POLL_THRESH;
1643 			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1644 		}
1645 	}
1646 
1647 	pause = HZ * pages / (task_ratelimit + 1);
1648 	if (pause > max_pause) {
1649 		t = max_pause;
1650 		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1651 	}
1652 
1653 	*nr_dirtied_pause = pages;
1654 	/*
1655 	 * The minimal pause time will normally be half the target pause time.
1656 	 */
1657 	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1658 }
1659 
wb_dirty_limits(struct dirty_throttle_control * dtc)1660 static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1661 {
1662 	struct bdi_writeback *wb = dtc->wb;
1663 	unsigned long wb_reclaimable;
1664 
1665 	/*
1666 	 * wb_thresh is not treated as some limiting factor as
1667 	 * dirty_thresh, due to reasons
1668 	 * - in JBOD setup, wb_thresh can fluctuate a lot
1669 	 * - in a system with HDD and USB key, the USB key may somehow
1670 	 *   go into state (wb_dirty >> wb_thresh) either because
1671 	 *   wb_dirty starts high, or because wb_thresh drops low.
1672 	 *   In this case we don't want to hard throttle the USB key
1673 	 *   dirtiers for 100 seconds until wb_dirty drops under
1674 	 *   wb_thresh. Instead the auxiliary wb control line in
1675 	 *   wb_position_ratio() will let the dirtier task progress
1676 	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1677 	 */
1678 	dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
1679 	dtc->wb_bg_thresh = dtc->thresh ?
1680 		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1681 
1682 	/*
1683 	 * In order to avoid the stacked BDI deadlock we need
1684 	 * to ensure we accurately count the 'dirty' pages when
1685 	 * the threshold is low.
1686 	 *
1687 	 * Otherwise it would be possible to get thresh+n pages
1688 	 * reported dirty, even though there are thresh-m pages
1689 	 * actually dirty; with m+n sitting in the percpu
1690 	 * deltas.
1691 	 */
1692 	if (dtc->wb_thresh < 2 * wb_stat_error()) {
1693 		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1694 		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1695 	} else {
1696 		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1697 		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1698 	}
1699 }
1700 
domain_poll_intv(struct dirty_throttle_control * dtc,bool strictlimit)1701 static unsigned long domain_poll_intv(struct dirty_throttle_control *dtc,
1702 				      bool strictlimit)
1703 {
1704 	unsigned long dirty, thresh;
1705 
1706 	if (strictlimit) {
1707 		dirty = dtc->wb_dirty;
1708 		thresh = dtc->wb_thresh;
1709 	} else {
1710 		dirty = dtc->dirty;
1711 		thresh = dtc->thresh;
1712 	}
1713 
1714 	return dirty_poll_interval(dirty, thresh);
1715 }
1716 
1717 /*
1718  * Throttle it only when the background writeback cannot catch-up. This avoids
1719  * (excessively) small writeouts when the wb limits are ramping up in case of
1720  * !strictlimit.
1721  *
1722  * In strictlimit case make decision based on the wb counters and limits. Small
1723  * writeouts when the wb limits are ramping up are the price we consciously pay
1724  * for strictlimit-ing.
1725  */
domain_dirty_freerun(struct dirty_throttle_control * dtc,bool strictlimit)1726 static void domain_dirty_freerun(struct dirty_throttle_control *dtc,
1727 				 bool strictlimit)
1728 {
1729 	unsigned long dirty, thresh, bg_thresh;
1730 
1731 	if (unlikely(strictlimit)) {
1732 		wb_dirty_limits(dtc);
1733 		dirty = dtc->wb_dirty;
1734 		thresh = dtc->wb_thresh;
1735 		bg_thresh = dtc->wb_bg_thresh;
1736 	} else {
1737 		dirty = dtc->dirty;
1738 		thresh = dtc->thresh;
1739 		bg_thresh = dtc->bg_thresh;
1740 	}
1741 	dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh);
1742 }
1743 
balance_domain_limits(struct dirty_throttle_control * dtc,bool strictlimit)1744 static void balance_domain_limits(struct dirty_throttle_control *dtc,
1745 				  bool strictlimit)
1746 {
1747 	domain_dirty_avail(dtc, true);
1748 	domain_dirty_limits(dtc);
1749 	domain_dirty_freerun(dtc, strictlimit);
1750 }
1751 
wb_dirty_freerun(struct dirty_throttle_control * dtc,bool strictlimit)1752 static void wb_dirty_freerun(struct dirty_throttle_control *dtc,
1753 			     bool strictlimit)
1754 {
1755 	dtc->freerun = false;
1756 
1757 	/* was already handled in domain_dirty_freerun */
1758 	if (strictlimit)
1759 		return;
1760 
1761 	wb_dirty_limits(dtc);
1762 	/*
1763 	 * LOCAL_THROTTLE tasks must not be throttled when below the per-wb
1764 	 * freerun ceiling.
1765 	 */
1766 	if (!(current->flags & PF_LOCAL_THROTTLE))
1767 		return;
1768 
1769 	dtc->freerun = dtc->wb_dirty <
1770 		       dirty_freerun_ceiling(dtc->wb_thresh, dtc->wb_bg_thresh);
1771 }
1772 
wb_dirty_exceeded(struct dirty_throttle_control * dtc,bool strictlimit)1773 static inline void wb_dirty_exceeded(struct dirty_throttle_control *dtc,
1774 				     bool strictlimit)
1775 {
1776 	dtc->dirty_exceeded = (dtc->wb_dirty > dtc->wb_thresh) &&
1777 		((dtc->dirty > dtc->thresh) || strictlimit);
1778 }
1779 
1780 /*
1781  * The limits fields dirty_exceeded and pos_ratio won't be updated if wb is
1782  * in freerun state. Please don't use these invalid fields in freerun case.
1783  */
balance_wb_limits(struct dirty_throttle_control * dtc,bool strictlimit)1784 static void balance_wb_limits(struct dirty_throttle_control *dtc,
1785 			      bool strictlimit)
1786 {
1787 	wb_dirty_freerun(dtc, strictlimit);
1788 	if (dtc->freerun)
1789 		return;
1790 
1791 	wb_dirty_exceeded(dtc, strictlimit);
1792 	wb_position_ratio(dtc);
1793 }
1794 
1795 /*
1796  * balance_dirty_pages() must be called by processes which are generating dirty
1797  * data.  It looks at the number of dirty pages in the machine and will force
1798  * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1799  * If we're over `background_thresh' then the writeback threads are woken to
1800  * perform some writeout.
1801  */
balance_dirty_pages(struct bdi_writeback * wb,unsigned long pages_dirtied,unsigned int flags)1802 static int balance_dirty_pages(struct bdi_writeback *wb,
1803 			       unsigned long pages_dirtied, unsigned int flags)
1804 {
1805 	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1806 	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1807 	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1808 	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1809 						     &mdtc_stor : NULL;
1810 	struct dirty_throttle_control *sdtc;
1811 	unsigned long nr_dirty;
1812 	long period;
1813 	long pause;
1814 	long max_pause;
1815 	long min_pause;
1816 	int nr_dirtied_pause;
1817 	unsigned long task_ratelimit;
1818 	unsigned long dirty_ratelimit;
1819 	struct backing_dev_info *bdi = wb->bdi;
1820 	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1821 	unsigned long start_time = jiffies;
1822 	int ret = 0;
1823 
1824 	for (;;) {
1825 		unsigned long now = jiffies;
1826 
1827 		nr_dirty = global_node_page_state(NR_FILE_DIRTY);
1828 
1829 		balance_domain_limits(gdtc, strictlimit);
1830 		if (mdtc) {
1831 			/*
1832 			 * If @wb belongs to !root memcg, repeat the same
1833 			 * basic calculations for the memcg domain.
1834 			 */
1835 			balance_domain_limits(mdtc, strictlimit);
1836 		}
1837 
1838 		if (nr_dirty > gdtc->bg_thresh && !writeback_in_progress(wb))
1839 			wb_start_background_writeback(wb);
1840 
1841 		/*
1842 		 * If memcg domain is in effect, @dirty should be under
1843 		 * both global and memcg freerun ceilings.
1844 		 */
1845 		if (gdtc->freerun && (!mdtc || mdtc->freerun)) {
1846 			unsigned long intv;
1847 			unsigned long m_intv;
1848 
1849 free_running:
1850 			intv = domain_poll_intv(gdtc, strictlimit);
1851 			m_intv = ULONG_MAX;
1852 
1853 			current->dirty_paused_when = now;
1854 			current->nr_dirtied = 0;
1855 			if (mdtc)
1856 				m_intv = domain_poll_intv(mdtc, strictlimit);
1857 			current->nr_dirtied_pause = min(intv, m_intv);
1858 			break;
1859 		}
1860 
1861 		/*
1862 		 * Unconditionally start background writeback if it's not
1863 		 * already in progress. We need to do this because the global
1864 		 * dirty threshold check above (nr_dirty > gdtc->bg_thresh)
1865 		 * doesn't account for these cases:
1866 		 *
1867 		 * a) strictlimit BDIs: throttling is calculated using per-wb
1868 		 * thresholds. The per-wb threshold can be exceeded even when
1869 		 * nr_dirty < gdtc->bg_thresh
1870 		 *
1871 		 * b) memcg-based throttling: memcg uses its own dirty count and
1872 		 * thresholds and can trigger throttling even when global
1873 		 * nr_dirty < gdtc->bg_thresh
1874 		 *
1875 		 * Writeback needs to be started else the writer stalls in the
1876 		 * throttle loop waiting for dirty pages to be written back
1877 		 * while no writeback is running.
1878 		 */
1879 		if (unlikely(!writeback_in_progress(wb)))
1880 			wb_start_background_writeback(wb);
1881 
1882 		mem_cgroup_flush_foreign(wb);
1883 
1884 		/*
1885 		 * Calculate global domain's pos_ratio and select the
1886 		 * global dtc by default.
1887 		 */
1888 		balance_wb_limits(gdtc, strictlimit);
1889 		if (gdtc->freerun)
1890 			goto free_running;
1891 		sdtc = gdtc;
1892 
1893 		if (mdtc) {
1894 			/*
1895 			 * If memcg domain is in effect, calculate its
1896 			 * pos_ratio.  @wb should satisfy constraints from
1897 			 * both global and memcg domains.  Choose the one
1898 			 * w/ lower pos_ratio.
1899 			 */
1900 			balance_wb_limits(mdtc, strictlimit);
1901 			if (mdtc->freerun)
1902 				goto free_running;
1903 			if (mdtc->pos_ratio < gdtc->pos_ratio)
1904 				sdtc = mdtc;
1905 		}
1906 
1907 		wb->dirty_exceeded = gdtc->dirty_exceeded ||
1908 				     (mdtc && mdtc->dirty_exceeded);
1909 		if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
1910 					   BANDWIDTH_INTERVAL))
1911 			__wb_update_bandwidth(gdtc, mdtc, true);
1912 
1913 		/* throttle according to the chosen dtc */
1914 		dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1915 		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1916 							RATELIMIT_CALC_SHIFT;
1917 		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1918 		min_pause = wb_min_pause(wb, max_pause,
1919 					 task_ratelimit, dirty_ratelimit,
1920 					 &nr_dirtied_pause);
1921 
1922 		if (unlikely(task_ratelimit == 0)) {
1923 			period = max_pause;
1924 			pause = max_pause;
1925 			goto pause;
1926 		}
1927 		period = HZ * pages_dirtied / task_ratelimit;
1928 		pause = period;
1929 		if (current->dirty_paused_when)
1930 			pause -= now - current->dirty_paused_when;
1931 		/*
1932 		 * For less than 1s think time (ext3/4 may block the dirtier
1933 		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1934 		 * however at much less frequency), try to compensate it in
1935 		 * future periods by updating the virtual time; otherwise just
1936 		 * do a reset, as it may be a light dirtier.
1937 		 */
1938 		if (pause < min_pause) {
1939 			trace_balance_dirty_pages(wb,
1940 						  sdtc,
1941 						  dirty_ratelimit,
1942 						  task_ratelimit,
1943 						  pages_dirtied,
1944 						  period,
1945 						  min(pause, 0L),
1946 						  start_time);
1947 			if (pause < -HZ) {
1948 				current->dirty_paused_when = now;
1949 				current->nr_dirtied = 0;
1950 			} else if (period) {
1951 				current->dirty_paused_when += period;
1952 				current->nr_dirtied = 0;
1953 			} else if (current->nr_dirtied_pause <= pages_dirtied)
1954 				current->nr_dirtied_pause += pages_dirtied;
1955 			break;
1956 		}
1957 		if (unlikely(pause > max_pause)) {
1958 			/* for occasional dropped task_ratelimit */
1959 			now += min(pause - max_pause, max_pause);
1960 			pause = max_pause;
1961 		}
1962 
1963 pause:
1964 		trace_balance_dirty_pages(wb,
1965 					  sdtc,
1966 					  dirty_ratelimit,
1967 					  task_ratelimit,
1968 					  pages_dirtied,
1969 					  period,
1970 					  pause,
1971 					  start_time);
1972 		if (flags & BDP_ASYNC) {
1973 			ret = -EAGAIN;
1974 			break;
1975 		}
1976 		__set_current_state(TASK_KILLABLE);
1977 		bdi->last_bdp_sleep = jiffies;
1978 		io_schedule_timeout(pause);
1979 
1980 		current->dirty_paused_when = now + pause;
1981 		current->nr_dirtied = 0;
1982 		current->nr_dirtied_pause = nr_dirtied_pause;
1983 
1984 		/*
1985 		 * This is typically equal to (dirty < thresh) and can also
1986 		 * keep "1000+ dd on a slow USB stick" under control.
1987 		 */
1988 		if (task_ratelimit)
1989 			break;
1990 
1991 		/*
1992 		 * In the case of an unresponsive NFS server and the NFS dirty
1993 		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1994 		 * to go through, so that tasks on them still remain responsive.
1995 		 *
1996 		 * In theory 1 page is enough to keep the consumer-producer
1997 		 * pipe going: the flusher cleans 1 page => the task dirties 1
1998 		 * more page. However wb_dirty has accounting errors.  So use
1999 		 * the larger and more IO friendly wb_stat_error.
2000 		 */
2001 		if (sdtc->wb_dirty <= wb_stat_error())
2002 			break;
2003 
2004 		if (fatal_signal_pending(current))
2005 			break;
2006 	}
2007 	return ret;
2008 }
2009 
2010 static DEFINE_PER_CPU(int, bdp_ratelimits);
2011 
2012 /*
2013  * Normal tasks are throttled by
2014  *	loop {
2015  *		dirty tsk->nr_dirtied_pause pages;
2016  *		take a snap in balance_dirty_pages();
2017  *	}
2018  * However there is a worst case. If every task exit immediately when dirtied
2019  * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
2020  * called to throttle the page dirties. The solution is to save the not yet
2021  * throttled page dirties in dirty_throttle_leaks on task exit and charge them
2022  * randomly into the running tasks. This works well for the above worst case,
2023  * as the new task will pick up and accumulate the old task's leaked dirty
2024  * count and eventually get throttled.
2025  */
2026 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
2027 
2028 /**
2029  * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
2030  * @mapping: address_space which was dirtied.
2031  * @flags: BDP flags.
2032  *
2033  * Processes which are dirtying memory should call in here once for each page
2034  * which was newly dirtied.  The function will periodically check the system's
2035  * dirty state and will initiate writeback if needed.
2036  *
2037  * See balance_dirty_pages_ratelimited() for details.
2038  *
2039  * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
2040  * indicate that memory is out of balance and the caller must wait
2041  * for I/O to complete.  Otherwise, it will return 0 to indicate
2042  * that either memory was already in balance, or it was able to sleep
2043  * until the amount of dirty memory returned to balance.
2044  */
balance_dirty_pages_ratelimited_flags(struct address_space * mapping,unsigned int flags)2045 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
2046 					unsigned int flags)
2047 {
2048 	struct inode *inode = mapping->host;
2049 	struct backing_dev_info *bdi = inode_to_bdi(inode);
2050 	struct bdi_writeback *wb = NULL;
2051 	int ratelimit;
2052 	int ret = 0;
2053 	int *p;
2054 
2055 	if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2056 		return ret;
2057 
2058 	if (inode_cgwb_enabled(inode))
2059 		wb = wb_get_create_current(bdi, GFP_KERNEL);
2060 	if (!wb)
2061 		wb = &bdi->wb;
2062 
2063 	ratelimit = current->nr_dirtied_pause;
2064 	if (wb->dirty_exceeded)
2065 		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
2066 
2067 	preempt_disable();
2068 	/*
2069 	 * This prevents one CPU to accumulate too many dirtied pages without
2070 	 * calling into balance_dirty_pages(), which can happen when there are
2071 	 * 1000+ tasks, all of them start dirtying pages at exactly the same
2072 	 * time, hence all honoured too large initial task->nr_dirtied_pause.
2073 	 */
2074 	p =  this_cpu_ptr(&bdp_ratelimits);
2075 	if (unlikely(current->nr_dirtied >= ratelimit))
2076 		*p = 0;
2077 	else if (unlikely(*p >= ratelimit_pages)) {
2078 		*p = 0;
2079 		ratelimit = 0;
2080 	}
2081 	/*
2082 	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
2083 	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2084 	 * the dirty throttling and livelock other long-run dirtiers.
2085 	 */
2086 	p = this_cpu_ptr(&dirty_throttle_leaks);
2087 	if (*p > 0 && current->nr_dirtied < ratelimit) {
2088 		unsigned long nr_pages_dirtied;
2089 		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2090 		*p -= nr_pages_dirtied;
2091 		current->nr_dirtied += nr_pages_dirtied;
2092 	}
2093 	preempt_enable();
2094 
2095 	if (unlikely(current->nr_dirtied >= ratelimit))
2096 		ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2097 
2098 	wb_put(wb);
2099 	return ret;
2100 }
2101 EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2102 
2103 /**
2104  * balance_dirty_pages_ratelimited - balance dirty memory state.
2105  * @mapping: address_space which was dirtied.
2106  *
2107  * Processes which are dirtying memory should call in here once for each page
2108  * which was newly dirtied.  The function will periodically check the system's
2109  * dirty state and will initiate writeback if needed.
2110  *
2111  * Once we're over the dirty memory limit we decrease the ratelimiting
2112  * by a lot, to prevent individual processes from overshooting the limit
2113  * by (ratelimit_pages) each.
2114  */
balance_dirty_pages_ratelimited(struct address_space * mapping)2115 void balance_dirty_pages_ratelimited(struct address_space *mapping)
2116 {
2117 	balance_dirty_pages_ratelimited_flags(mapping, 0);
2118 }
2119 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
2120 
2121 /*
2122  * Similar to wb_dirty_limits, wb_bg_dirty_limits also calculates dirty
2123  * and thresh, but it's for background writeback.
2124  */
wb_bg_dirty_limits(struct dirty_throttle_control * dtc)2125 static void wb_bg_dirty_limits(struct dirty_throttle_control *dtc)
2126 {
2127 	struct bdi_writeback *wb = dtc->wb;
2128 
2129 	dtc->wb_bg_thresh = __wb_calc_thresh(dtc, dtc->bg_thresh);
2130 	if (dtc->wb_bg_thresh < 2 * wb_stat_error())
2131 		dtc->wb_dirty = wb_stat_sum(wb, WB_RECLAIMABLE);
2132 	else
2133 		dtc->wb_dirty = wb_stat(wb, WB_RECLAIMABLE);
2134 }
2135 
domain_over_bg_thresh(struct dirty_throttle_control * dtc)2136 static bool domain_over_bg_thresh(struct dirty_throttle_control *dtc)
2137 {
2138 	domain_dirty_avail(dtc, false);
2139 	domain_dirty_limits(dtc);
2140 	if (dtc->dirty > dtc->bg_thresh)
2141 		return true;
2142 
2143 	wb_bg_dirty_limits(dtc);
2144 	if (dtc->wb_dirty > dtc->wb_bg_thresh)
2145 		return true;
2146 
2147 	return false;
2148 }
2149 
2150 /**
2151  * wb_over_bg_thresh - does @wb need to be written back?
2152  * @wb: bdi_writeback of interest
2153  *
2154  * Determines whether background writeback should keep writing @wb or it's
2155  * clean enough.
2156  *
2157  * Return: %true if writeback should continue.
2158  */
wb_over_bg_thresh(struct bdi_writeback * wb)2159 bool wb_over_bg_thresh(struct bdi_writeback *wb)
2160 {
2161 	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
2162 	struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
2163 
2164 	if (domain_over_bg_thresh(&gdtc))
2165 		return true;
2166 
2167 	if (mdtc_valid(&mdtc))
2168 		return domain_over_bg_thresh(&mdtc);
2169 
2170 	return false;
2171 }
2172 
2173 #ifdef CONFIG_SYSCTL
2174 /*
2175  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2176  */
dirty_writeback_centisecs_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)2177 static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write,
2178 		void *buffer, size_t *length, loff_t *ppos)
2179 {
2180 	unsigned int old_interval = dirty_writeback_interval;
2181 	int ret;
2182 
2183 	ret = proc_dointvec(table, write, buffer, length, ppos);
2184 
2185 	/*
2186 	 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2187 	 * and a different non-zero value will wakeup the writeback threads.
2188 	 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2189 	 * iterate over all bdis and wbs.
2190 	 * The reason we do this is to make the change take effect immediately.
2191 	 */
2192 	if (!ret && write && dirty_writeback_interval &&
2193 		dirty_writeback_interval != old_interval)
2194 		wakeup_flusher_threads(WB_REASON_PERIODIC);
2195 
2196 	return ret;
2197 }
2198 #endif
2199 
2200 /*
2201  * If ratelimit_pages is too high then we can get into dirty-data overload
2202  * if a large number of processes all perform writes at the same time.
2203  *
2204  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2205  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2206  * thresholds.
2207  */
2208 
writeback_set_ratelimit(void)2209 void writeback_set_ratelimit(void)
2210 {
2211 	struct wb_domain *dom = &global_wb_domain;
2212 	unsigned long background_thresh;
2213 	unsigned long dirty_thresh;
2214 
2215 	global_dirty_limits(&background_thresh, &dirty_thresh);
2216 	dom->dirty_limit = dirty_thresh;
2217 	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2218 	if (ratelimit_pages < 16)
2219 		ratelimit_pages = 16;
2220 }
2221 
page_writeback_cpu_online(unsigned int cpu)2222 static int page_writeback_cpu_online(unsigned int cpu)
2223 {
2224 	writeback_set_ratelimit();
2225 	return 0;
2226 }
2227 
2228 #ifdef CONFIG_SYSCTL
2229 
2230 static int laptop_mode;
laptop_mode_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2231 static int laptop_mode_handler(const struct ctl_table *table, int write,
2232 			       void *buffer, size_t *lenp, loff_t *ppos)
2233 {
2234 	int ret = proc_dointvec_jiffies(table, write, buffer, lenp, ppos);
2235 
2236 	if (!ret && write)
2237 		pr_warn("%s: vm.laptop_mode is deprecated. Ignoring setting.\n",
2238 			current->comm);
2239 
2240 	return ret;
2241 }
2242 
2243 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2244 static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2245 
2246 static const struct ctl_table vm_page_writeback_sysctls[] = {
2247 	{
2248 		.procname   = "dirty_background_ratio",
2249 		.data       = &dirty_background_ratio,
2250 		.maxlen     = sizeof(dirty_background_ratio),
2251 		.mode       = 0644,
2252 		.proc_handler   = dirty_background_ratio_handler,
2253 		.extra1     = SYSCTL_ZERO,
2254 		.extra2     = SYSCTL_ONE_HUNDRED,
2255 	},
2256 	{
2257 		.procname   = "dirty_background_bytes",
2258 		.data       = &dirty_background_bytes,
2259 		.maxlen     = sizeof(dirty_background_bytes),
2260 		.mode       = 0644,
2261 		.proc_handler   = dirty_background_bytes_handler,
2262 		.extra1     = SYSCTL_LONG_ONE,
2263 	},
2264 	{
2265 		.procname   = "dirty_ratio",
2266 		.data       = &vm_dirty_ratio,
2267 		.maxlen     = sizeof(vm_dirty_ratio),
2268 		.mode       = 0644,
2269 		.proc_handler   = dirty_ratio_handler,
2270 		.extra1     = SYSCTL_ZERO,
2271 		.extra2     = SYSCTL_ONE_HUNDRED,
2272 	},
2273 	{
2274 		.procname   = "dirty_bytes",
2275 		.data       = &vm_dirty_bytes,
2276 		.maxlen     = sizeof(vm_dirty_bytes),
2277 		.mode       = 0644,
2278 		.proc_handler   = dirty_bytes_handler,
2279 		.extra1     = (void *)&dirty_bytes_min,
2280 	},
2281 	{
2282 		.procname   = "dirty_writeback_centisecs",
2283 		.data       = &dirty_writeback_interval,
2284 		.maxlen     = sizeof(dirty_writeback_interval),
2285 		.mode       = 0644,
2286 		.proc_handler   = dirty_writeback_centisecs_handler,
2287 	},
2288 	{
2289 		.procname   = "dirty_expire_centisecs",
2290 		.data       = &dirty_expire_interval,
2291 		.maxlen     = sizeof(dirty_expire_interval),
2292 		.mode       = 0644,
2293 		.proc_handler   = proc_dointvec_minmax,
2294 		.extra1     = SYSCTL_ZERO,
2295 	},
2296 #ifdef CONFIG_HIGHMEM
2297 	{
2298 		.procname	= "highmem_is_dirtyable",
2299 		.data		= &vm_highmem_is_dirtyable,
2300 		.maxlen		= sizeof(vm_highmem_is_dirtyable),
2301 		.mode		= 0644,
2302 		.proc_handler	= proc_dointvec_minmax,
2303 		.extra1		= SYSCTL_ZERO,
2304 		.extra2		= SYSCTL_ONE,
2305 	},
2306 #endif
2307 	{
2308 		.procname	= "laptop_mode",
2309 		.data		= &laptop_mode,
2310 		.maxlen		= sizeof(laptop_mode),
2311 		.mode		= 0644,
2312 		.proc_handler	= laptop_mode_handler,
2313 	},
2314 };
2315 #endif
2316 
2317 /*
2318  * Called early on to tune the page writeback dirty limits.
2319  *
2320  * We used to scale dirty pages according to how total memory
2321  * related to pages that could be allocated for buffers.
2322  *
2323  * However, that was when we used "dirty_ratio" to scale with
2324  * all memory, and we don't do that any more. "dirty_ratio"
2325  * is now applied to total non-HIGHPAGE memory, and as such we can't
2326  * get into the old insane situation any more where we had
2327  * large amounts of dirty pages compared to a small amount of
2328  * non-HIGHMEM memory.
2329  *
2330  * But we might still want to scale the dirty_ratio by how
2331  * much memory the box has..
2332  */
page_writeback_init(void)2333 void __init page_writeback_init(void)
2334 {
2335 	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2336 
2337 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2338 			  page_writeback_cpu_online, NULL);
2339 	cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2340 			  page_writeback_cpu_online);
2341 #ifdef CONFIG_SYSCTL
2342 	register_sysctl_init("vm", vm_page_writeback_sysctls);
2343 #endif
2344 }
2345 
2346 /**
2347  * tag_pages_for_writeback - tag pages to be written by writeback
2348  * @mapping: address space structure to write
2349  * @start: starting page index
2350  * @end: ending page index (inclusive)
2351  *
2352  * This function scans the page range from @start to @end (inclusive) and tags
2353  * all pages that have DIRTY tag set with a special TOWRITE tag.  The caller
2354  * can then use the TOWRITE tag to identify pages eligible for writeback.
2355  * This mechanism is used to avoid livelocking of writeback by a process
2356  * steadily creating new dirty pages in the file (thus it is important for this
2357  * function to be quick so that it can tag pages faster than a dirtying process
2358  * can create them).
2359  */
tag_pages_for_writeback(struct address_space * mapping,pgoff_t start,pgoff_t end)2360 void tag_pages_for_writeback(struct address_space *mapping,
2361 			     pgoff_t start, pgoff_t end)
2362 {
2363 	XA_STATE(xas, &mapping->i_pages, start);
2364 	unsigned int tagged = 0;
2365 	void *page;
2366 
2367 	xas_lock_irq(&xas);
2368 	xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2369 		xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2370 		if (++tagged % XA_CHECK_SCHED)
2371 			continue;
2372 
2373 		xas_pause(&xas);
2374 		xas_unlock_irq(&xas);
2375 		cond_resched();
2376 		xas_lock_irq(&xas);
2377 	}
2378 	xas_unlock_irq(&xas);
2379 }
2380 EXPORT_SYMBOL(tag_pages_for_writeback);
2381 
folio_prepare_writeback(struct address_space * mapping,struct writeback_control * wbc,struct folio * folio)2382 static bool folio_prepare_writeback(struct address_space *mapping,
2383 		struct writeback_control *wbc, struct folio *folio)
2384 {
2385 	/*
2386 	 * Folio truncated or invalidated. We can freely skip it then,
2387 	 * even for data integrity operations: the folio has disappeared
2388 	 * concurrently, so there could be no real expectation of this
2389 	 * data integrity operation even if there is now a new, dirty
2390 	 * folio at the same pagecache index.
2391 	 */
2392 	if (unlikely(folio->mapping != mapping))
2393 		return false;
2394 
2395 	/*
2396 	 * Did somebody else write it for us?
2397 	 */
2398 	if (!folio_test_dirty(folio))
2399 		return false;
2400 
2401 	if (folio_test_writeback(folio)) {
2402 		if (wbc->sync_mode == WB_SYNC_NONE)
2403 			return false;
2404 		folio_wait_writeback(folio);
2405 	}
2406 	BUG_ON(folio_test_writeback(folio));
2407 
2408 	if (!folio_clear_dirty_for_io(folio))
2409 		return false;
2410 
2411 	return true;
2412 }
2413 
2414 
wbc_end(struct writeback_control * wbc)2415 static pgoff_t wbc_end(struct writeback_control *wbc)
2416 {
2417 	if (wbc->range_cyclic)
2418 		return -1;
2419 	return wbc->range_end >> PAGE_SHIFT;
2420 }
2421 
writeback_get_folio(struct address_space * mapping,struct writeback_control * wbc)2422 static struct folio *writeback_get_folio(struct address_space *mapping,
2423 		struct writeback_control *wbc)
2424 {
2425 	struct folio *folio;
2426 
2427 retry:
2428 	folio = folio_batch_next(&wbc->fbatch);
2429 	if (!folio) {
2430 		folio_batch_release(&wbc->fbatch);
2431 		cond_resched();
2432 		filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
2433 				wbc_to_tag(wbc), &wbc->fbatch);
2434 		folio = folio_batch_next(&wbc->fbatch);
2435 		if (!folio)
2436 			return NULL;
2437 	}
2438 
2439 	folio_lock(folio);
2440 	if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) {
2441 		folio_unlock(folio);
2442 		goto retry;
2443 	}
2444 
2445 	trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2446 	return folio;
2447 }
2448 
2449 /**
2450  * writeback_iter - iterate folio of a mapping for writeback
2451  * @mapping: address space structure to write
2452  * @wbc: writeback context
2453  * @folio: previously iterated folio (%NULL to start)
2454  * @error: in-out pointer for writeback errors (see below)
2455  *
2456  * This function returns the next folio for the writeback operation described by
2457  * @wbc on @mapping and  should be called in a while loop in the ->writepages
2458  * implementation.
2459  *
2460  * To start the writeback operation, %NULL is passed in the @folio argument, and
2461  * for every subsequent iteration the folio returned previously should be passed
2462  * back in.
2463  *
2464  * If there was an error in the per-folio writeback inside the writeback_iter()
2465  * loop, @error should be set to the error value.
2466  *
2467  * Once the writeback described in @wbc has finished, this function will return
2468  * %NULL and if there was an error in any iteration restore it to @error.
2469  *
2470  * Note: callers should not manually break out of the loop using break or goto
2471  * but must keep calling writeback_iter() until it returns %NULL.
2472  *
2473  * Return: the folio to write or %NULL if the loop is done.
2474  */
writeback_iter(struct address_space * mapping,struct writeback_control * wbc,struct folio * folio,int * error)2475 struct folio *writeback_iter(struct address_space *mapping,
2476 		struct writeback_control *wbc, struct folio *folio, int *error)
2477 {
2478 	if (!folio) {
2479 		folio_batch_init(&wbc->fbatch);
2480 		wbc->saved_err = *error = 0;
2481 
2482 		/*
2483 		 * For range cyclic writeback we remember where we stopped so
2484 		 * that we can continue where we stopped.
2485 		 *
2486 		 * For non-cyclic writeback we always start at the beginning of
2487 		 * the passed in range.
2488 		 */
2489 		if (wbc->range_cyclic)
2490 			wbc->index = mapping->writeback_index;
2491 		else
2492 			wbc->index = wbc->range_start >> PAGE_SHIFT;
2493 
2494 		/*
2495 		 * To avoid livelocks when other processes dirty new pages, we
2496 		 * first tag pages which should be written back and only then
2497 		 * start writing them.
2498 		 *
2499 		 * For data-integrity writeback we have to be careful so that we
2500 		 * do not miss some pages (e.g., because some other process has
2501 		 * cleared the TOWRITE tag we set).  The rule we follow is that
2502 		 * TOWRITE tag can be cleared only by the process clearing the
2503 		 * DIRTY tag (and submitting the page for I/O).
2504 		 */
2505 		if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2506 			tag_pages_for_writeback(mapping, wbc->index,
2507 					wbc_end(wbc));
2508 	} else {
2509 		wbc->nr_to_write -= folio_nr_pages(folio);
2510 
2511 		WARN_ON_ONCE(*error > 0);
2512 
2513 		/*
2514 		 * For integrity writeback we have to keep going until we have
2515 		 * written all the folios we tagged for writeback above, even if
2516 		 * we run past wbc->nr_to_write or encounter errors.
2517 		 * We stash away the first error we encounter in wbc->saved_err
2518 		 * so that it can be retrieved when we're done.  This is because
2519 		 * the file system may still have state to clear for each folio.
2520 		 *
2521 		 * For background writeback we exit as soon as we run past
2522 		 * wbc->nr_to_write or encounter the first error.
2523 		 */
2524 		if (wbc->sync_mode == WB_SYNC_ALL) {
2525 			if (*error && !wbc->saved_err)
2526 				wbc->saved_err = *error;
2527 		} else {
2528 			if (*error || wbc->nr_to_write <= 0)
2529 				goto done;
2530 		}
2531 	}
2532 
2533 	folio = writeback_get_folio(mapping, wbc);
2534 	if (!folio) {
2535 		/*
2536 		 * To avoid deadlocks between range_cyclic writeback and callers
2537 		 * that hold folios in writeback to aggregate I/O until
2538 		 * the writeback iteration finishes, we do not loop back to the
2539 		 * start of the file.  Doing so causes a folio lock/folio
2540 		 * writeback access order inversion - we should only ever lock
2541 		 * multiple folios in ascending folio->index order, and looping
2542 		 * back to the start of the file violates that rule and causes
2543 		 * deadlocks.
2544 		 */
2545 		if (wbc->range_cyclic)
2546 			mapping->writeback_index = 0;
2547 
2548 		/*
2549 		 * Return the first error we encountered (if there was any) to
2550 		 * the caller.
2551 		 */
2552 		*error = wbc->saved_err;
2553 	}
2554 	return folio;
2555 
2556 done:
2557 	if (wbc->range_cyclic)
2558 		mapping->writeback_index = folio_next_index(folio);
2559 	folio_batch_release(&wbc->fbatch);
2560 	return NULL;
2561 }
2562 EXPORT_SYMBOL_GPL(writeback_iter);
2563 
do_writepages(struct address_space * mapping,struct writeback_control * wbc)2564 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2565 {
2566 	int ret;
2567 	struct bdi_writeback *wb;
2568 
2569 	if (wbc->nr_to_write <= 0)
2570 		return 0;
2571 	wb = inode_to_wb_wbc(mapping->host, wbc);
2572 	wb_bandwidth_estimate_start(wb);
2573 	while (1) {
2574 		if (mapping->a_ops->writepages)
2575 			ret = mapping->a_ops->writepages(mapping, wbc);
2576 		else
2577 			/* deal with chardevs and other special files */
2578 			ret = 0;
2579 		if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
2580 			break;
2581 
2582 		/*
2583 		 * Lacking an allocation context or the locality or writeback
2584 		 * state of any of the inode's pages, throttle based on
2585 		 * writeback activity on the local node. It's as good a
2586 		 * guess as any.
2587 		 */
2588 		reclaim_throttle(NODE_DATA(numa_node_id()),
2589 			VMSCAN_THROTTLE_WRITEBACK);
2590 	}
2591 	/*
2592 	 * Usually few pages are written by now from those we've just submitted
2593 	 * but if there's constant writeback being submitted, this makes sure
2594 	 * writeback bandwidth is updated once in a while.
2595 	 */
2596 	if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2597 				   BANDWIDTH_INTERVAL))
2598 		wb_update_bandwidth(wb);
2599 	return ret;
2600 }
2601 
2602 /*
2603  * For address_spaces which do not use buffers nor write back.
2604  */
noop_dirty_folio(struct address_space * mapping,struct folio * folio)2605 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
2606 {
2607 	if (!folio_test_dirty(folio))
2608 		return !folio_test_set_dirty(folio);
2609 	return false;
2610 }
2611 EXPORT_SYMBOL(noop_dirty_folio);
2612 
2613 /*
2614  * Helper function for set_page_dirty family.
2615  *
2616  * NOTE: This relies on being atomic wrt interrupts.
2617  */
folio_account_dirtied(struct folio * folio,struct address_space * mapping)2618 static void folio_account_dirtied(struct folio *folio,
2619 		struct address_space *mapping)
2620 {
2621 	struct inode *inode = mapping->host;
2622 
2623 	trace_writeback_dirty_folio(folio, mapping);
2624 
2625 	if (mapping_can_writeback(mapping)) {
2626 		struct bdi_writeback *wb;
2627 		long nr = folio_nr_pages(folio);
2628 
2629 		inode_attach_wb(inode, folio);
2630 		wb = inode_to_wb(inode);
2631 
2632 		lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2633 		__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2634 		__node_stat_mod_folio(folio, NR_DIRTIED, nr);
2635 		wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2636 		wb_stat_mod(wb, WB_DIRTIED, nr);
2637 		task_io_account_write(nr * PAGE_SIZE);
2638 		current->nr_dirtied += nr;
2639 		__this_cpu_add(bdp_ratelimits, nr);
2640 
2641 		mem_cgroup_track_foreign_dirty(folio, wb);
2642 	}
2643 }
2644 
2645 /*
2646  * Helper function for deaccounting dirty page without writeback.
2647  *
2648  */
folio_account_cleaned(struct folio * folio,struct bdi_writeback * wb)2649 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2650 {
2651 	long nr = folio_nr_pages(folio);
2652 
2653 	lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2654 	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2655 	wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2656 	task_io_account_cancelled_write(nr * PAGE_SIZE);
2657 }
2658 
2659 /*
2660  * Mark the folio dirty, and set it dirty in the page cache.
2661  *
2662  * If warn is true, then emit a warning if the folio is not uptodate and has
2663  * not been truncated.
2664  *
2665  * It is the caller's responsibility to prevent the folio from being truncated
2666  * while this function is in progress, although it may have been truncated
2667  * before this function is called.  Most callers have the folio locked.
2668  * A few have the folio blocked from truncation through other means (e.g.
2669  * zap_vma_pages() has it mapped and is holding the page table lock).
2670  * When called from mark_buffer_dirty(), the filesystem should hold a
2671  * reference to the buffer_head that is being marked dirty, which causes
2672  * try_to_free_buffers() to fail.
2673  */
__folio_mark_dirty(struct folio * folio,struct address_space * mapping,int warn)2674 void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
2675 			     int warn)
2676 {
2677 	unsigned long flags;
2678 
2679 	/*
2680 	 * Shmem writeback relies on swap, and swap writeback is LRU based,
2681 	 * not using the dirty mark.
2682 	 */
2683 	VM_WARN_ON_ONCE(folio_test_swapcache(folio) || shmem_mapping(mapping));
2684 
2685 	xa_lock_irqsave(&mapping->i_pages, flags);
2686 	if (folio->mapping) {	/* Race with truncate? */
2687 		WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2688 		folio_account_dirtied(folio, mapping);
2689 		__xa_set_mark(&mapping->i_pages, folio->index,
2690 			      PAGECACHE_TAG_DIRTY);
2691 	}
2692 	xa_unlock_irqrestore(&mapping->i_pages, flags);
2693 }
2694 
2695 /**
2696  * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2697  * @mapping: Address space this folio belongs to.
2698  * @folio: Folio to be marked as dirty.
2699  *
2700  * Filesystems which do not use buffer heads should call this function
2701  * from their dirty_folio address space operation.  It ignores the
2702  * contents of folio_get_private(), so if the filesystem marks individual
2703  * blocks as dirty, the filesystem should handle that itself.
2704  *
2705  * This is also sometimes used by filesystems which use buffer_heads when
2706  * a single buffer is being dirtied: we want to set the folio dirty in
2707  * that case, but not all the buffers.  This is a "bottom-up" dirtying,
2708  * whereas block_dirty_folio() is a "top-down" dirtying.
2709  *
2710  * The caller must ensure this doesn't race with truncation.  Most will
2711  * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2712  * folio mapped and the pte lock held, which also locks out truncation.
2713  */
filemap_dirty_folio(struct address_space * mapping,struct folio * folio)2714 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
2715 {
2716 	if (folio_test_set_dirty(folio))
2717 		return false;
2718 
2719 	__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2720 
2721 	if (mapping->host) {
2722 		/* !PageAnon && !swapper_space */
2723 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2724 	}
2725 	return true;
2726 }
2727 EXPORT_SYMBOL(filemap_dirty_folio);
2728 
2729 /**
2730  * folio_redirty_for_writepage - Decline to write a dirty folio.
2731  * @wbc: The writeback control.
2732  * @folio: The folio.
2733  *
2734  * When a writepage implementation decides that it doesn't want to write
2735  * @folio for some reason, it should call this function, unlock @folio and
2736  * return 0.
2737  *
2738  * Return: True if we redirtied the folio.  False if someone else dirtied
2739  * it first.
2740  */
folio_redirty_for_writepage(struct writeback_control * wbc,struct folio * folio)2741 bool folio_redirty_for_writepage(struct writeback_control *wbc,
2742 		struct folio *folio)
2743 {
2744 	struct address_space *mapping = folio->mapping;
2745 	long nr = folio_nr_pages(folio);
2746 	bool ret;
2747 
2748 	wbc->pages_skipped += nr;
2749 	ret = filemap_dirty_folio(mapping, folio);
2750 	if (mapping && mapping_can_writeback(mapping)) {
2751 		struct inode *inode = mapping->host;
2752 		struct bdi_writeback *wb;
2753 		struct wb_lock_cookie cookie = {};
2754 
2755 		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2756 		current->nr_dirtied -= nr;
2757 		node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2758 		wb_stat_mod(wb, WB_DIRTIED, -nr);
2759 		unlocked_inode_to_wb_end(inode, &cookie);
2760 	}
2761 	return ret;
2762 }
2763 EXPORT_SYMBOL(folio_redirty_for_writepage);
2764 
2765 /**
2766  * folio_mark_dirty - Mark a folio as being modified.
2767  * @folio: The folio.
2768  *
2769  * The folio may not be truncated while this function is running.
2770  * Holding the folio lock is sufficient to prevent truncation, but some
2771  * callers cannot acquire a sleeping lock.  These callers instead hold
2772  * the page table lock for a page table which contains at least one page
2773  * in this folio.  Truncation will block on the page table lock as it
2774  * unmaps pages before removing the folio from its mapping.
2775  *
2776  * Return: True if the folio was newly dirtied, false if it was already dirty.
2777  */
folio_mark_dirty(struct folio * folio)2778 bool folio_mark_dirty(struct folio *folio)
2779 {
2780 	struct address_space *mapping = folio_mapping(folio);
2781 
2782 	if (likely(mapping)) {
2783 		/*
2784 		 * readahead/folio_deactivate could remain
2785 		 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2786 		 * About readahead, if the folio is written, the flags would be
2787 		 * reset. So no problem.
2788 		 * About folio_deactivate, if the folio is redirtied,
2789 		 * the flag will be reset. So no problem. but if the
2790 		 * folio is used by readahead it will confuse readahead
2791 		 * and make it restart the size rampup process. But it's
2792 		 * a trivial problem.
2793 		 */
2794 		if (folio_test_reclaim(folio))
2795 			folio_clear_reclaim(folio);
2796 		return mapping->a_ops->dirty_folio(mapping, folio);
2797 	}
2798 
2799 	return noop_dirty_folio(mapping, folio);
2800 }
2801 EXPORT_SYMBOL(folio_mark_dirty);
2802 
2803 /*
2804  * folio_mark_dirty() is racy if the caller has no reference against
2805  * folio->mapping->host, and if the folio is unlocked.  This is because another
2806  * CPU could truncate the folio off the mapping and then free the mapping.
2807  *
2808  * Usually, the folio _is_ locked, or the caller is a user-space process which
2809  * holds a reference on the inode by having an open file.
2810  *
2811  * In other cases, the folio should be locked before running folio_mark_dirty().
2812  */
folio_mark_dirty_lock(struct folio * folio)2813 bool folio_mark_dirty_lock(struct folio *folio)
2814 {
2815 	bool ret;
2816 
2817 	folio_lock(folio);
2818 	ret = folio_mark_dirty(folio);
2819 	folio_unlock(folio);
2820 	return ret;
2821 }
2822 EXPORT_SYMBOL(folio_mark_dirty_lock);
2823 
2824 /*
2825  * This cancels just the dirty bit on the kernel page itself, it does NOT
2826  * actually remove dirty bits on any mmap's that may be around. It also
2827  * leaves the page tagged dirty, so any sync activity will still find it on
2828  * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2829  * look at the dirty bits in the VM.
2830  *
2831  * Doing this should *normally* only ever be done when a page is truncated,
2832  * and is not actually mapped anywhere at all. However, fs/buffer.c does
2833  * this when it notices that somebody has cleaned out all the buffers on a
2834  * page without actually doing it through the VM. Can you say "ext3 is
2835  * horribly ugly"? Thought you could.
2836  */
__folio_cancel_dirty(struct folio * folio)2837 void __folio_cancel_dirty(struct folio *folio)
2838 {
2839 	struct address_space *mapping = folio_mapping(folio);
2840 
2841 	if (mapping_can_writeback(mapping)) {
2842 		struct inode *inode = mapping->host;
2843 		struct bdi_writeback *wb;
2844 		struct wb_lock_cookie cookie = {};
2845 
2846 		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2847 
2848 		if (folio_test_clear_dirty(folio))
2849 			folio_account_cleaned(folio, wb);
2850 
2851 		unlocked_inode_to_wb_end(inode, &cookie);
2852 	} else {
2853 		folio_clear_dirty(folio);
2854 	}
2855 }
2856 EXPORT_SYMBOL(__folio_cancel_dirty);
2857 
2858 /*
2859  * Clear a folio's dirty flag, while caring for dirty memory accounting.
2860  * Returns true if the folio was previously dirty.
2861  *
2862  * This is for preparing to put the folio under writeout.  We leave
2863  * the folio tagged as dirty in the xarray so that a concurrent
2864  * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2865  * The ->writepage implementation will run either folio_start_writeback()
2866  * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2867  * and xarray dirty tag back into sync.
2868  *
2869  * This incoherency between the folio's dirty flag and xarray tag is
2870  * unfortunate, but it only exists while the folio is locked.
2871  */
folio_clear_dirty_for_io(struct folio * folio)2872 bool folio_clear_dirty_for_io(struct folio *folio)
2873 {
2874 	struct address_space *mapping = folio_mapping(folio);
2875 	bool ret = false;
2876 
2877 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2878 
2879 	if (mapping && mapping_can_writeback(mapping)) {
2880 		struct inode *inode = mapping->host;
2881 		struct bdi_writeback *wb;
2882 		struct wb_lock_cookie cookie = {};
2883 
2884 		/*
2885 		 * Yes, Virginia, this is indeed insane.
2886 		 *
2887 		 * We use this sequence to make sure that
2888 		 *  (a) we account for dirty stats properly
2889 		 *  (b) we tell the low-level filesystem to
2890 		 *      mark the whole folio dirty if it was
2891 		 *      dirty in a pagetable. Only to then
2892 		 *  (c) clean the folio again and return 1 to
2893 		 *      cause the writeback.
2894 		 *
2895 		 * This way we avoid all nasty races with the
2896 		 * dirty bit in multiple places and clearing
2897 		 * them concurrently from different threads.
2898 		 *
2899 		 * Note! Normally the "folio_mark_dirty(folio)"
2900 		 * has no effect on the actual dirty bit - since
2901 		 * that will already usually be set. But we
2902 		 * need the side effects, and it can help us
2903 		 * avoid races.
2904 		 *
2905 		 * We basically use the folio "master dirty bit"
2906 		 * as a serialization point for all the different
2907 		 * threads doing their things.
2908 		 */
2909 		if (folio_mkclean(folio))
2910 			folio_mark_dirty(folio);
2911 		/*
2912 		 * We carefully synchronise fault handlers against
2913 		 * installing a dirty pte and marking the folio dirty
2914 		 * at this point.  We do this by having them hold the
2915 		 * page lock while dirtying the folio, and folios are
2916 		 * always locked coming in here, so we get the desired
2917 		 * exclusion.
2918 		 */
2919 		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2920 		if (folio_test_clear_dirty(folio)) {
2921 			long nr = folio_nr_pages(folio);
2922 			lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2923 			zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2924 			wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2925 			ret = true;
2926 		}
2927 		unlocked_inode_to_wb_end(inode, &cookie);
2928 		return ret;
2929 	}
2930 	return folio_test_clear_dirty(folio);
2931 }
2932 EXPORT_SYMBOL(folio_clear_dirty_for_io);
2933 
wb_inode_writeback_start(struct bdi_writeback * wb)2934 static void wb_inode_writeback_start(struct bdi_writeback *wb)
2935 {
2936 	atomic_inc(&wb->writeback_inodes);
2937 }
2938 
wb_inode_writeback_end(struct bdi_writeback * wb)2939 static void wb_inode_writeback_end(struct bdi_writeback *wb)
2940 {
2941 	unsigned long flags;
2942 	atomic_dec(&wb->writeback_inodes);
2943 	/*
2944 	 * Make sure estimate of writeback throughput gets updated after
2945 	 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
2946 	 * (which is the interval other bandwidth updates use for batching) so
2947 	 * that if multiple inodes end writeback at a similar time, they get
2948 	 * batched into one bandwidth update.
2949 	 */
2950 	spin_lock_irqsave(&wb->work_lock, flags);
2951 	if (test_bit(WB_registered, &wb->state))
2952 		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2953 	spin_unlock_irqrestore(&wb->work_lock, flags);
2954 }
2955 
__folio_end_writeback(struct folio * folio)2956 bool __folio_end_writeback(struct folio *folio)
2957 {
2958 	long nr = folio_nr_pages(folio);
2959 	struct address_space *mapping = folio_mapping(folio);
2960 	bool ret;
2961 
2962 	if (mapping && mapping_use_writeback_tags(mapping)) {
2963 		struct inode *inode = mapping->host;
2964 		struct bdi_writeback *wb;
2965 		unsigned long flags;
2966 
2967 		xa_lock_irqsave(&mapping->i_pages, flags);
2968 		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2969 		__xa_clear_mark(&mapping->i_pages, folio->index,
2970 					PAGECACHE_TAG_WRITEBACK);
2971 
2972 		wb = inode_to_wb(inode);
2973 		wb_stat_mod(wb, WB_WRITEBACK, -nr);
2974 		__wb_writeout_add(wb, nr);
2975 		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
2976 			wb_inode_writeback_end(wb);
2977 			if (mapping->host)
2978 				sb_clear_inode_writeback(mapping->host);
2979 		}
2980 
2981 		xa_unlock_irqrestore(&mapping->i_pages, flags);
2982 	} else {
2983 		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2984 	}
2985 
2986 	lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2987 	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2988 	node_stat_mod_folio(folio, NR_WRITTEN, nr);
2989 
2990 	return ret;
2991 }
2992 
__folio_start_writeback(struct folio * folio,bool keep_write)2993 void __folio_start_writeback(struct folio *folio, bool keep_write)
2994 {
2995 	long nr = folio_nr_pages(folio);
2996 	struct address_space *mapping = folio_mapping(folio);
2997 	int access_ret;
2998 
2999 	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
3000 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3001 
3002 	if (mapping && mapping_use_writeback_tags(mapping)) {
3003 		XA_STATE(xas, &mapping->i_pages, folio->index);
3004 		struct inode *inode = mapping->host;
3005 		struct bdi_writeback *wb;
3006 		unsigned long flags;
3007 		bool on_wblist;
3008 
3009 		xas_lock_irqsave(&xas, flags);
3010 		xas_load(&xas);
3011 		folio_test_set_writeback(folio);
3012 
3013 		on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
3014 
3015 		xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3016 		wb = inode_to_wb(inode);
3017 		wb_stat_mod(wb, WB_WRITEBACK, nr);
3018 		if (!on_wblist) {
3019 			wb_inode_writeback_start(wb);
3020 			/*
3021 			 * We can come through here when swapping anonymous
3022 			 * folios, so we don't necessarily have an inode to
3023 			 * track for sync.
3024 			 */
3025 			if (mapping->host)
3026 				sb_mark_inode_writeback(mapping->host);
3027 		}
3028 
3029 		if (!folio_test_dirty(folio))
3030 			xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
3031 		if (!keep_write)
3032 			xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3033 		xas_unlock_irqrestore(&xas, flags);
3034 	} else {
3035 		folio_test_set_writeback(folio);
3036 	}
3037 
3038 	lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3039 	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3040 
3041 	access_ret = arch_make_folio_accessible(folio);
3042 	/*
3043 	 * If writeback has been triggered on a page that cannot be made
3044 	 * accessible, it is too late to recover here.
3045 	 */
3046 	VM_BUG_ON_FOLIO(access_ret != 0, folio);
3047 }
3048 EXPORT_SYMBOL(__folio_start_writeback);
3049 
3050 /**
3051  * folio_wait_writeback - Wait for a folio to finish writeback.
3052  * @folio: The folio to wait for.
3053  *
3054  * If the folio is currently being written back to storage, wait for the
3055  * I/O to complete.
3056  *
3057  * Context: Sleeps.  Must be called in process context and with
3058  * no spinlocks held.  Caller should hold a reference on the folio.
3059  * If the folio is not locked, writeback may start again after writeback
3060  * has finished.
3061  */
folio_wait_writeback(struct folio * folio)3062 void folio_wait_writeback(struct folio *folio)
3063 {
3064 	while (folio_test_writeback(folio)) {
3065 		trace_folio_wait_writeback(folio, folio_mapping(folio));
3066 		folio_wait_bit(folio, PG_writeback);
3067 	}
3068 }
3069 EXPORT_SYMBOL_GPL(folio_wait_writeback);
3070 
3071 /**
3072  * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3073  * @folio: The folio to wait for.
3074  *
3075  * If the folio is currently being written back to storage, wait for the
3076  * I/O to complete or a fatal signal to arrive.
3077  *
3078  * Context: Sleeps.  Must be called in process context and with
3079  * no spinlocks held.  Caller should hold a reference on the folio.
3080  * If the folio is not locked, writeback may start again after writeback
3081  * has finished.
3082  * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3083  */
folio_wait_writeback_killable(struct folio * folio)3084 int folio_wait_writeback_killable(struct folio *folio)
3085 {
3086 	while (folio_test_writeback(folio)) {
3087 		trace_folio_wait_writeback(folio, folio_mapping(folio));
3088 		if (folio_wait_bit_killable(folio, PG_writeback))
3089 			return -EINTR;
3090 	}
3091 
3092 	return 0;
3093 }
3094 EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3095 
3096 /**
3097  * folio_wait_stable() - wait for writeback to finish, if necessary.
3098  * @folio: The folio to wait on.
3099  *
3100  * This function determines if the given folio is related to a backing
3101  * device that requires folio contents to be held stable during writeback.
3102  * If so, then it will wait for any pending writeback to complete.
3103  *
3104  * Context: Sleeps.  Must be called in process context and with
3105  * no spinlocks held.  Caller should hold a reference on the folio.
3106  * If the folio is not locked, writeback may start again after writeback
3107  * has finished.
3108  */
folio_wait_stable(struct folio * folio)3109 void folio_wait_stable(struct folio *folio)
3110 {
3111 	if (mapping_stable_writes(folio_mapping(folio)))
3112 		folio_wait_writeback(folio);
3113 }
3114 EXPORT_SYMBOL_GPL(folio_wait_stable);
3115