1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <christoph@lameter.com>
11 * Copyright (C) 2008-2014 Christoph Lameter
12 */
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/vmstat.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/debugfs.h>
24 #include <linux/sched.h>
25 #include <linux/math64.h>
26 #include <linux/writeback.h>
27 #include <linux/compaction.h>
28 #include <linux/mm_inline.h>
29 #include <linux/page_owner.h>
30 #include <linux/sched/isolation.h>
31
32 #include "internal.h"
33
34 #ifdef CONFIG_PROC_FS
35 #ifdef CONFIG_NUMA
36 #define ENABLE_NUMA_STAT 1
37 static int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38
39 /* zero numa counters within a zone */
zero_zone_numa_counters(struct zone * zone)40 static void zero_zone_numa_counters(struct zone *zone)
41 {
42 int item, cpu;
43
44 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
45 atomic_long_set(&zone->vm_numa_event[item], 0);
46 for_each_online_cpu(cpu) {
47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
48 = 0;
49 }
50 }
51 }
52
53 /* zero numa counters of all the populated zones */
zero_zones_numa_counters(void)54 static void zero_zones_numa_counters(void)
55 {
56 struct zone *zone;
57
58 for_each_populated_zone(zone)
59 zero_zone_numa_counters(zone);
60 }
61
62 /* zero global numa counters */
zero_global_numa_counters(void)63 static void zero_global_numa_counters(void)
64 {
65 int item;
66
67 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
68 atomic_long_set(&vm_numa_event[item], 0);
69 }
70
invalid_numa_statistics(void)71 static void invalid_numa_statistics(void)
72 {
73 zero_zones_numa_counters();
74 zero_global_numa_counters();
75 }
76
77 static DEFINE_MUTEX(vm_numa_stat_lock);
78
sysctl_vm_numa_stat_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)79 static int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
80 void *buffer, size_t *length, loff_t *ppos)
81 {
82 int ret, oldval;
83
84 mutex_lock(&vm_numa_stat_lock);
85 if (write)
86 oldval = sysctl_vm_numa_stat;
87 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
88 if (ret || !write)
89 goto out;
90
91 if (oldval == sysctl_vm_numa_stat)
92 goto out;
93 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
94 static_branch_enable(&vm_numa_stat_key);
95 pr_info("enable numa statistics\n");
96 } else {
97 static_branch_disable(&vm_numa_stat_key);
98 invalid_numa_statistics();
99 pr_info("disable numa statistics, and clear numa counters\n");
100 }
101
102 out:
103 mutex_unlock(&vm_numa_stat_lock);
104 return ret;
105 }
106 #endif
107 #endif /* CONFIG_PROC_FS */
108
109 #ifdef CONFIG_VM_EVENT_COUNTERS
110 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
111 EXPORT_PER_CPU_SYMBOL(vm_event_states);
112
sum_vm_events(unsigned long * ret)113 static void sum_vm_events(unsigned long *ret)
114 {
115 int cpu;
116 int i;
117
118 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
119
120 for_each_online_cpu(cpu) {
121 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
122
123 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
124 ret[i] += this->event[i];
125 }
126 }
127
128 /*
129 * Accumulate the vm event counters across all CPUs.
130 * The result is unavoidably approximate - it can change
131 * during and after execution of this function.
132 */
all_vm_events(unsigned long * ret)133 void all_vm_events(unsigned long *ret)
134 {
135 cpus_read_lock();
136 sum_vm_events(ret);
137 cpus_read_unlock();
138 }
139 EXPORT_SYMBOL_GPL(all_vm_events);
140
141 /*
142 * Fold the foreign cpu events into our own.
143 *
144 * This is adding to the events on one processor
145 * but keeps the global counts constant.
146 */
vm_events_fold_cpu(int cpu)147 void vm_events_fold_cpu(int cpu)
148 {
149 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
150 int i;
151
152 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
153 count_vm_events(i, fold_state->event[i]);
154 fold_state->event[i] = 0;
155 }
156 }
157
158 #endif /* CONFIG_VM_EVENT_COUNTERS */
159
160 /*
161 * Manage combined zone based / global counters
162 *
163 * vm_stat contains the global counters
164 */
165 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
166 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
167 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
168 EXPORT_SYMBOL(vm_zone_stat);
169 EXPORT_SYMBOL(vm_node_stat);
170
171 #ifdef CONFIG_NUMA
fold_vm_zone_numa_events(struct zone * zone)172 static void fold_vm_zone_numa_events(struct zone *zone)
173 {
174 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
175 int cpu;
176 enum numa_stat_item item;
177
178 for_each_online_cpu(cpu) {
179 struct per_cpu_zonestat *pzstats;
180
181 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
182 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
183 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
184 }
185
186 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
187 zone_numa_event_add(zone_numa_events[item], zone, item);
188 }
189
fold_vm_numa_events(void)190 void fold_vm_numa_events(void)
191 {
192 struct zone *zone;
193
194 for_each_populated_zone(zone)
195 fold_vm_zone_numa_events(zone);
196 }
197 #endif
198
199 #ifdef CONFIG_SMP
200
calculate_pressure_threshold(struct zone * zone)201 int calculate_pressure_threshold(struct zone *zone)
202 {
203 int threshold;
204 int watermark_distance;
205
206 /*
207 * As vmstats are not up to date, there is drift between the estimated
208 * and real values. For high thresholds and a high number of CPUs, it
209 * is possible for the min watermark to be breached while the estimated
210 * value looks fine. The pressure threshold is a reduced value such
211 * that even the maximum amount of drift will not accidentally breach
212 * the min watermark
213 */
214 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
215 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
216
217 /*
218 * Maximum threshold is 125
219 */
220 threshold = min(125, threshold);
221
222 return threshold;
223 }
224
calculate_normal_threshold(struct zone * zone)225 int calculate_normal_threshold(struct zone *zone)
226 {
227 int threshold;
228 int mem; /* memory in 128 MB units */
229
230 /*
231 * The threshold scales with the number of processors and the amount
232 * of memory per zone. More memory means that we can defer updates for
233 * longer, more processors could lead to more contention.
234 * fls() is used to have a cheap way of logarithmic scaling.
235 *
236 * Some sample thresholds:
237 *
238 * Threshold Processors (fls) Zonesize fls(mem)+1
239 * ------------------------------------------------------------------
240 * 8 1 1 0.9-1 GB 4
241 * 16 2 2 0.9-1 GB 4
242 * 20 2 2 1-2 GB 5
243 * 24 2 2 2-4 GB 6
244 * 28 2 2 4-8 GB 7
245 * 32 2 2 8-16 GB 8
246 * 4 2 2 <128M 1
247 * 30 4 3 2-4 GB 5
248 * 48 4 3 8-16 GB 8
249 * 32 8 4 1-2 GB 4
250 * 32 8 4 0.9-1GB 4
251 * 10 16 5 <128M 1
252 * 40 16 5 900M 4
253 * 70 64 7 2-4 GB 5
254 * 84 64 7 4-8 GB 6
255 * 108 512 9 4-8 GB 6
256 * 125 1024 10 8-16 GB 8
257 * 125 1024 10 16-32 GB 9
258 */
259
260 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
261
262 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
263
264 /*
265 * Maximum threshold is 125
266 */
267 threshold = min(125, threshold);
268
269 return threshold;
270 }
271
272 /*
273 * Refresh the thresholds for each zone.
274 */
refresh_zone_stat_thresholds(void)275 void refresh_zone_stat_thresholds(void)
276 {
277 struct pglist_data *pgdat;
278 struct zone *zone;
279 int cpu;
280 int threshold;
281
282 /* Zero current pgdat thresholds */
283 for_each_online_pgdat(pgdat) {
284 for_each_online_cpu(cpu) {
285 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
286 }
287 }
288
289 for_each_populated_zone(zone) {
290 struct pglist_data *pgdat = zone->zone_pgdat;
291 unsigned long max_drift, tolerate_drift;
292
293 threshold = calculate_normal_threshold(zone);
294
295 for_each_online_cpu(cpu) {
296 int pgdat_threshold;
297
298 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
299 = threshold;
300
301 /* Base nodestat threshold on the largest populated zone. */
302 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
303 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
304 = max(threshold, pgdat_threshold);
305 }
306
307 /*
308 * Only set percpu_drift_mark if there is a danger that
309 * NR_FREE_PAGES reports the low watermark is ok when in fact
310 * the min watermark could be breached by an allocation
311 */
312 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
313 max_drift = num_online_cpus() * threshold;
314 if (max_drift > tolerate_drift)
315 zone->percpu_drift_mark = high_wmark_pages(zone) +
316 max_drift;
317 }
318 }
319
set_pgdat_percpu_threshold(pg_data_t * pgdat,int (* calculate_pressure)(struct zone *))320 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
321 int (*calculate_pressure)(struct zone *))
322 {
323 struct zone *zone;
324 int cpu;
325 int threshold;
326 int i;
327
328 for (i = 0; i < pgdat->nr_zones; i++) {
329 zone = &pgdat->node_zones[i];
330 if (!zone->percpu_drift_mark)
331 continue;
332
333 threshold = (*calculate_pressure)(zone);
334 for_each_online_cpu(cpu)
335 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
336 = threshold;
337 }
338 }
339
340 /*
341 * For use when we know that interrupts are disabled,
342 * or when we know that preemption is disabled and that
343 * particular counter cannot be updated from interrupt context.
344 */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)345 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
346 long delta)
347 {
348 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
349 s8 __percpu *p = pcp->vm_stat_diff + item;
350 long x;
351 long t;
352
353 /*
354 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
355 * atomicity is provided by IRQs being disabled -- either explicitly
356 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
357 * CPU migrations and preemption potentially corrupts a counter so
358 * disable preemption.
359 */
360 preempt_disable_nested();
361
362 x = delta + __this_cpu_read(*p);
363
364 t = __this_cpu_read(pcp->stat_threshold);
365
366 if (unlikely(abs(x) > t)) {
367 zone_page_state_add(x, zone, item);
368 x = 0;
369 }
370 __this_cpu_write(*p, x);
371
372 preempt_enable_nested();
373 }
374 EXPORT_SYMBOL(__mod_zone_page_state);
375
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)376 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
377 long delta)
378 {
379 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
380 s8 __percpu *p = pcp->vm_node_stat_diff + item;
381 long x;
382 long t;
383
384 if (vmstat_item_in_bytes(item)) {
385 /*
386 * Only cgroups use subpage accounting right now; at
387 * the global level, these items still change in
388 * multiples of whole pages. Store them as pages
389 * internally to keep the per-cpu counters compact.
390 */
391 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
392 delta >>= PAGE_SHIFT;
393 }
394
395 /* See __mod_node_page_state */
396 preempt_disable_nested();
397
398 x = delta + __this_cpu_read(*p);
399
400 t = __this_cpu_read(pcp->stat_threshold);
401
402 if (unlikely(abs(x) > t)) {
403 node_page_state_add(x, pgdat, item);
404 x = 0;
405 }
406 __this_cpu_write(*p, x);
407
408 preempt_enable_nested();
409 }
410 EXPORT_SYMBOL(__mod_node_page_state);
411
412 /*
413 * Optimized increment and decrement functions.
414 *
415 * These are only for a single page and therefore can take a struct page *
416 * argument instead of struct zone *. This allows the inclusion of the code
417 * generated for page_zone(page) into the optimized functions.
418 *
419 * No overflow check is necessary and therefore the differential can be
420 * incremented or decremented in place which may allow the compilers to
421 * generate better code.
422 * The increment or decrement is known and therefore one boundary check can
423 * be omitted.
424 *
425 * NOTE: These functions are very performance sensitive. Change only
426 * with care.
427 *
428 * Some processors have inc/dec instructions that are atomic vs an interrupt.
429 * However, the code must first determine the differential location in a zone
430 * based on the processor number and then inc/dec the counter. There is no
431 * guarantee without disabling preemption that the processor will not change
432 * in between and therefore the atomicity vs. interrupt cannot be exploited
433 * in a useful way here.
434 */
__inc_zone_state(struct zone * zone,enum zone_stat_item item)435 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
436 {
437 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
438 s8 __percpu *p = pcp->vm_stat_diff + item;
439 s8 v, t;
440
441 /* See __mod_node_page_state */
442 preempt_disable_nested();
443
444 v = __this_cpu_inc_return(*p);
445 t = __this_cpu_read(pcp->stat_threshold);
446 if (unlikely(v > t)) {
447 s8 overstep = t >> 1;
448
449 zone_page_state_add(v + overstep, zone, item);
450 __this_cpu_write(*p, -overstep);
451 }
452
453 preempt_enable_nested();
454 }
455
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)456 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
457 {
458 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
459 s8 __percpu *p = pcp->vm_node_stat_diff + item;
460 s8 v, t;
461
462 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
463
464 /* See __mod_node_page_state */
465 preempt_disable_nested();
466
467 v = __this_cpu_inc_return(*p);
468 t = __this_cpu_read(pcp->stat_threshold);
469 if (unlikely(v > t)) {
470 s8 overstep = t >> 1;
471
472 node_page_state_add(v + overstep, pgdat, item);
473 __this_cpu_write(*p, -overstep);
474 }
475
476 preempt_enable_nested();
477 }
478
__inc_zone_page_state(struct page * page,enum zone_stat_item item)479 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
480 {
481 __inc_zone_state(page_zone(page), item);
482 }
483 EXPORT_SYMBOL(__inc_zone_page_state);
484
__inc_node_page_state(struct page * page,enum node_stat_item item)485 void __inc_node_page_state(struct page *page, enum node_stat_item item)
486 {
487 __inc_node_state(page_pgdat(page), item);
488 }
489 EXPORT_SYMBOL(__inc_node_page_state);
490
__dec_zone_state(struct zone * zone,enum zone_stat_item item)491 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
492 {
493 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
494 s8 __percpu *p = pcp->vm_stat_diff + item;
495 s8 v, t;
496
497 /* See __mod_node_page_state */
498 preempt_disable_nested();
499
500 v = __this_cpu_dec_return(*p);
501 t = __this_cpu_read(pcp->stat_threshold);
502 if (unlikely(v < - t)) {
503 s8 overstep = t >> 1;
504
505 zone_page_state_add(v - overstep, zone, item);
506 __this_cpu_write(*p, overstep);
507 }
508
509 preempt_enable_nested();
510 }
511
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)512 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
513 {
514 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
515 s8 __percpu *p = pcp->vm_node_stat_diff + item;
516 s8 v, t;
517
518 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
519
520 /* See __mod_node_page_state */
521 preempt_disable_nested();
522
523 v = __this_cpu_dec_return(*p);
524 t = __this_cpu_read(pcp->stat_threshold);
525 if (unlikely(v < - t)) {
526 s8 overstep = t >> 1;
527
528 node_page_state_add(v - overstep, pgdat, item);
529 __this_cpu_write(*p, overstep);
530 }
531
532 preempt_enable_nested();
533 }
534
__dec_zone_page_state(struct page * page,enum zone_stat_item item)535 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
536 {
537 __dec_zone_state(page_zone(page), item);
538 }
539 EXPORT_SYMBOL(__dec_zone_page_state);
540
__dec_node_page_state(struct page * page,enum node_stat_item item)541 void __dec_node_page_state(struct page *page, enum node_stat_item item)
542 {
543 __dec_node_state(page_pgdat(page), item);
544 }
545 EXPORT_SYMBOL(__dec_node_page_state);
546
547 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
548 /*
549 * If we have cmpxchg_local support then we do not need to incur the overhead
550 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
551 *
552 * mod_state() modifies the zone counter state through atomic per cpu
553 * operations.
554 *
555 * Overstep mode specifies how overstep should handled:
556 * 0 No overstepping
557 * 1 Overstepping half of threshold
558 * -1 Overstepping minus half of threshold
559 */
mod_zone_state(struct zone * zone,enum zone_stat_item item,long delta,int overstep_mode)560 static inline void mod_zone_state(struct zone *zone,
561 enum zone_stat_item item, long delta, int overstep_mode)
562 {
563 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
564 s8 __percpu *p = pcp->vm_stat_diff + item;
565 long n, t, z;
566 s8 o;
567
568 o = this_cpu_read(*p);
569 do {
570 z = 0; /* overflow to zone counters */
571
572 /*
573 * The fetching of the stat_threshold is racy. We may apply
574 * a counter threshold to the wrong the cpu if we get
575 * rescheduled while executing here. However, the next
576 * counter update will apply the threshold again and
577 * therefore bring the counter under the threshold again.
578 *
579 * Most of the time the thresholds are the same anyways
580 * for all cpus in a zone.
581 */
582 t = this_cpu_read(pcp->stat_threshold);
583
584 n = delta + (long)o;
585
586 if (abs(n) > t) {
587 int os = overstep_mode * (t >> 1) ;
588
589 /* Overflow must be added to zone counters */
590 z = n + os;
591 n = -os;
592 }
593 } while (!this_cpu_try_cmpxchg(*p, &o, n));
594
595 if (z)
596 zone_page_state_add(z, zone, item);
597 }
598
mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)599 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
600 long delta)
601 {
602 mod_zone_state(zone, item, delta, 0);
603 }
604 EXPORT_SYMBOL(mod_zone_page_state);
605
inc_zone_page_state(struct page * page,enum zone_stat_item item)606 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
607 {
608 mod_zone_state(page_zone(page), item, 1, 1);
609 }
610 EXPORT_SYMBOL(inc_zone_page_state);
611
dec_zone_page_state(struct page * page,enum zone_stat_item item)612 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
613 {
614 mod_zone_state(page_zone(page), item, -1, -1);
615 }
616 EXPORT_SYMBOL(dec_zone_page_state);
617
mod_node_state(struct pglist_data * pgdat,enum node_stat_item item,int delta,int overstep_mode)618 static inline void mod_node_state(struct pglist_data *pgdat,
619 enum node_stat_item item, int delta, int overstep_mode)
620 {
621 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
622 s8 __percpu *p = pcp->vm_node_stat_diff + item;
623 long n, t, z;
624 s8 o;
625
626 if (vmstat_item_in_bytes(item)) {
627 /*
628 * Only cgroups use subpage accounting right now; at
629 * the global level, these items still change in
630 * multiples of whole pages. Store them as pages
631 * internally to keep the per-cpu counters compact.
632 */
633 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
634 delta >>= PAGE_SHIFT;
635 }
636
637 o = this_cpu_read(*p);
638 do {
639 z = 0; /* overflow to node counters */
640
641 /*
642 * The fetching of the stat_threshold is racy. We may apply
643 * a counter threshold to the wrong the cpu if we get
644 * rescheduled while executing here. However, the next
645 * counter update will apply the threshold again and
646 * therefore bring the counter under the threshold again.
647 *
648 * Most of the time the thresholds are the same anyways
649 * for all cpus in a node.
650 */
651 t = this_cpu_read(pcp->stat_threshold);
652
653 n = delta + (long)o;
654
655 if (abs(n) > t) {
656 int os = overstep_mode * (t >> 1) ;
657
658 /* Overflow must be added to node counters */
659 z = n + os;
660 n = -os;
661 }
662 } while (!this_cpu_try_cmpxchg(*p, &o, n));
663
664 if (z)
665 node_page_state_add(z, pgdat, item);
666 }
667
mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)668 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
669 long delta)
670 {
671 mod_node_state(pgdat, item, delta, 0);
672 }
673 EXPORT_SYMBOL(mod_node_page_state);
674
inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)675 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
676 {
677 mod_node_state(pgdat, item, 1, 1);
678 }
679
inc_node_page_state(struct page * page,enum node_stat_item item)680 void inc_node_page_state(struct page *page, enum node_stat_item item)
681 {
682 mod_node_state(page_pgdat(page), item, 1, 1);
683 }
684 EXPORT_SYMBOL(inc_node_page_state);
685
dec_node_page_state(struct page * page,enum node_stat_item item)686 void dec_node_page_state(struct page *page, enum node_stat_item item)
687 {
688 mod_node_state(page_pgdat(page), item, -1, -1);
689 }
690 EXPORT_SYMBOL(dec_node_page_state);
691 #else
692 /*
693 * Use interrupt disable to serialize counter updates
694 */
mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)695 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
696 long delta)
697 {
698 unsigned long flags;
699
700 local_irq_save(flags);
701 __mod_zone_page_state(zone, item, delta);
702 local_irq_restore(flags);
703 }
704 EXPORT_SYMBOL(mod_zone_page_state);
705
inc_zone_page_state(struct page * page,enum zone_stat_item item)706 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
707 {
708 unsigned long flags;
709 struct zone *zone;
710
711 zone = page_zone(page);
712 local_irq_save(flags);
713 __inc_zone_state(zone, item);
714 local_irq_restore(flags);
715 }
716 EXPORT_SYMBOL(inc_zone_page_state);
717
dec_zone_page_state(struct page * page,enum zone_stat_item item)718 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
719 {
720 unsigned long flags;
721
722 local_irq_save(flags);
723 __dec_zone_page_state(page, item);
724 local_irq_restore(flags);
725 }
726 EXPORT_SYMBOL(dec_zone_page_state);
727
inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)728 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
729 {
730 unsigned long flags;
731
732 local_irq_save(flags);
733 __inc_node_state(pgdat, item);
734 local_irq_restore(flags);
735 }
736 EXPORT_SYMBOL(inc_node_state);
737
mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)738 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
739 long delta)
740 {
741 unsigned long flags;
742
743 local_irq_save(flags);
744 __mod_node_page_state(pgdat, item, delta);
745 local_irq_restore(flags);
746 }
747 EXPORT_SYMBOL(mod_node_page_state);
748
inc_node_page_state(struct page * page,enum node_stat_item item)749 void inc_node_page_state(struct page *page, enum node_stat_item item)
750 {
751 unsigned long flags;
752 struct pglist_data *pgdat;
753
754 pgdat = page_pgdat(page);
755 local_irq_save(flags);
756 __inc_node_state(pgdat, item);
757 local_irq_restore(flags);
758 }
759 EXPORT_SYMBOL(inc_node_page_state);
760
dec_node_page_state(struct page * page,enum node_stat_item item)761 void dec_node_page_state(struct page *page, enum node_stat_item item)
762 {
763 unsigned long flags;
764
765 local_irq_save(flags);
766 __dec_node_page_state(page, item);
767 local_irq_restore(flags);
768 }
769 EXPORT_SYMBOL(dec_node_page_state);
770 #endif
771
772 /*
773 * Fold a differential into the global counters.
774 * Returns the number of counters updated.
775 */
fold_diff(int * zone_diff,int * node_diff)776 static int fold_diff(int *zone_diff, int *node_diff)
777 {
778 int i;
779 int changes = 0;
780
781 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
782 if (zone_diff[i]) {
783 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
784 changes++;
785 }
786
787 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
788 if (node_diff[i]) {
789 atomic_long_add(node_diff[i], &vm_node_stat[i]);
790 changes++;
791 }
792 return changes;
793 }
794
795 /*
796 * Update the zone counters for the current cpu.
797 *
798 * Note that refresh_cpu_vm_stats strives to only access
799 * node local memory. The per cpu pagesets on remote zones are placed
800 * in the memory local to the processor using that pageset. So the
801 * loop over all zones will access a series of cachelines local to
802 * the processor.
803 *
804 * The call to zone_page_state_add updates the cachelines with the
805 * statistics in the remote zone struct as well as the global cachelines
806 * with the global counters. These could cause remote node cache line
807 * bouncing and will have to be only done when necessary.
808 *
809 * The function returns the number of global counters updated.
810 */
refresh_cpu_vm_stats(bool do_pagesets)811 static int refresh_cpu_vm_stats(bool do_pagesets)
812 {
813 struct pglist_data *pgdat;
814 struct zone *zone;
815 int i;
816 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
817 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
818 int changes = 0;
819
820 for_each_populated_zone(zone) {
821 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
822 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
823
824 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
825 int v;
826
827 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
828 if (v) {
829
830 atomic_long_add(v, &zone->vm_stat[i]);
831 global_zone_diff[i] += v;
832 #ifdef CONFIG_NUMA
833 /* 3 seconds idle till flush */
834 __this_cpu_write(pcp->expire, 3);
835 #endif
836 }
837 }
838
839 if (do_pagesets) {
840 cond_resched();
841
842 changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
843 #ifdef CONFIG_NUMA
844 /*
845 * Deal with draining the remote pageset of this
846 * processor
847 *
848 * Check if there are pages remaining in this pageset
849 * if not then there is nothing to expire.
850 */
851 if (!__this_cpu_read(pcp->expire) ||
852 !__this_cpu_read(pcp->count))
853 continue;
854
855 /*
856 * We never drain zones local to this processor.
857 */
858 if (zone_to_nid(zone) == numa_node_id()) {
859 __this_cpu_write(pcp->expire, 0);
860 continue;
861 }
862
863 if (__this_cpu_dec_return(pcp->expire)) {
864 changes++;
865 continue;
866 }
867
868 if (__this_cpu_read(pcp->count)) {
869 drain_zone_pages(zone, this_cpu_ptr(pcp));
870 changes++;
871 }
872 #endif
873 }
874 }
875
876 for_each_online_pgdat(pgdat) {
877 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
878
879 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
880 int v;
881
882 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
883 if (v) {
884 atomic_long_add(v, &pgdat->vm_stat[i]);
885 global_node_diff[i] += v;
886 }
887 }
888 }
889
890 changes += fold_diff(global_zone_diff, global_node_diff);
891 return changes;
892 }
893
894 /*
895 * Fold the data for an offline cpu into the global array.
896 * There cannot be any access by the offline cpu and therefore
897 * synchronization is simplified.
898 */
cpu_vm_stats_fold(int cpu)899 void cpu_vm_stats_fold(int cpu)
900 {
901 struct pglist_data *pgdat;
902 struct zone *zone;
903 int i;
904 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
905 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
906
907 for_each_populated_zone(zone) {
908 struct per_cpu_zonestat *pzstats;
909
910 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
911
912 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
913 if (pzstats->vm_stat_diff[i]) {
914 int v;
915
916 v = pzstats->vm_stat_diff[i];
917 pzstats->vm_stat_diff[i] = 0;
918 atomic_long_add(v, &zone->vm_stat[i]);
919 global_zone_diff[i] += v;
920 }
921 }
922 #ifdef CONFIG_NUMA
923 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
924 if (pzstats->vm_numa_event[i]) {
925 unsigned long v;
926
927 v = pzstats->vm_numa_event[i];
928 pzstats->vm_numa_event[i] = 0;
929 zone_numa_event_add(v, zone, i);
930 }
931 }
932 #endif
933 }
934
935 for_each_online_pgdat(pgdat) {
936 struct per_cpu_nodestat *p;
937
938 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
939
940 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
941 if (p->vm_node_stat_diff[i]) {
942 int v;
943
944 v = p->vm_node_stat_diff[i];
945 p->vm_node_stat_diff[i] = 0;
946 atomic_long_add(v, &pgdat->vm_stat[i]);
947 global_node_diff[i] += v;
948 }
949 }
950
951 fold_diff(global_zone_diff, global_node_diff);
952 }
953
954 /*
955 * this is only called if !populated_zone(zone), which implies no other users of
956 * pset->vm_stat_diff[] exist.
957 */
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)958 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
959 {
960 unsigned long v;
961 int i;
962
963 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
964 if (pzstats->vm_stat_diff[i]) {
965 v = pzstats->vm_stat_diff[i];
966 pzstats->vm_stat_diff[i] = 0;
967 zone_page_state_add(v, zone, i);
968 }
969 }
970
971 #ifdef CONFIG_NUMA
972 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
973 if (pzstats->vm_numa_event[i]) {
974 v = pzstats->vm_numa_event[i];
975 pzstats->vm_numa_event[i] = 0;
976 zone_numa_event_add(v, zone, i);
977 }
978 }
979 #endif
980 }
981 #endif
982
983 #ifdef CONFIG_NUMA
984 /*
985 * Determine the per node value of a stat item. This function
986 * is called frequently in a NUMA machine, so try to be as
987 * frugal as possible.
988 */
sum_zone_node_page_state(int node,enum zone_stat_item item)989 unsigned long sum_zone_node_page_state(int node,
990 enum zone_stat_item item)
991 {
992 struct zone *zones = NODE_DATA(node)->node_zones;
993 int i;
994 unsigned long count = 0;
995
996 for (i = 0; i < MAX_NR_ZONES; i++)
997 count += zone_page_state(zones + i, item);
998
999 return count;
1000 }
1001
1002 /* Determine the per node value of a numa stat item. */
sum_zone_numa_event_state(int node,enum numa_stat_item item)1003 unsigned long sum_zone_numa_event_state(int node,
1004 enum numa_stat_item item)
1005 {
1006 struct zone *zones = NODE_DATA(node)->node_zones;
1007 unsigned long count = 0;
1008 int i;
1009
1010 for (i = 0; i < MAX_NR_ZONES; i++)
1011 count += zone_numa_event_state(zones + i, item);
1012
1013 return count;
1014 }
1015
1016 /*
1017 * Determine the per node value of a stat item.
1018 */
node_page_state_pages(struct pglist_data * pgdat,enum node_stat_item item)1019 unsigned long node_page_state_pages(struct pglist_data *pgdat,
1020 enum node_stat_item item)
1021 {
1022 long x = atomic_long_read(&pgdat->vm_stat[item]);
1023 #ifdef CONFIG_SMP
1024 if (x < 0)
1025 x = 0;
1026 #endif
1027 return x;
1028 }
1029
node_page_state(struct pglist_data * pgdat,enum node_stat_item item)1030 unsigned long node_page_state(struct pglist_data *pgdat,
1031 enum node_stat_item item)
1032 {
1033 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1034
1035 return node_page_state_pages(pgdat, item);
1036 }
1037 #endif
1038
1039 /*
1040 * Count number of pages "struct page" and "struct page_ext" consume.
1041 * nr_memmap_boot_pages: # of pages allocated by boot allocator
1042 * nr_memmap_pages: # of pages that were allocated by buddy allocator
1043 */
1044 static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
1045 static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
1046
memmap_boot_pages_add(long delta)1047 void memmap_boot_pages_add(long delta)
1048 {
1049 atomic_long_add(delta, &nr_memmap_boot_pages);
1050 }
1051
memmap_pages_add(long delta)1052 void memmap_pages_add(long delta)
1053 {
1054 atomic_long_add(delta, &nr_memmap_pages);
1055 }
1056
1057 #ifdef CONFIG_COMPACTION
1058
1059 struct contig_page_info {
1060 unsigned long free_pages;
1061 unsigned long free_blocks_total;
1062 unsigned long free_blocks_suitable;
1063 };
1064
1065 /*
1066 * Calculate the number of free pages in a zone, how many contiguous
1067 * pages are free and how many are large enough to satisfy an allocation of
1068 * the target size. Note that this function makes no attempt to estimate
1069 * how many suitable free blocks there *might* be if MOVABLE pages were
1070 * migrated. Calculating that is possible, but expensive and can be
1071 * figured out from userspace
1072 */
fill_contig_page_info(struct zone * zone,unsigned int suitable_order,struct contig_page_info * info)1073 static void fill_contig_page_info(struct zone *zone,
1074 unsigned int suitable_order,
1075 struct contig_page_info *info)
1076 {
1077 unsigned int order;
1078
1079 info->free_pages = 0;
1080 info->free_blocks_total = 0;
1081 info->free_blocks_suitable = 0;
1082
1083 for (order = 0; order < NR_PAGE_ORDERS; order++) {
1084 unsigned long blocks;
1085
1086 /*
1087 * Count number of free blocks.
1088 *
1089 * Access to nr_free is lockless as nr_free is used only for
1090 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1091 */
1092 blocks = data_race(zone->free_area[order].nr_free);
1093 info->free_blocks_total += blocks;
1094
1095 /* Count free base pages */
1096 info->free_pages += blocks << order;
1097
1098 /* Count the suitable free blocks */
1099 if (order >= suitable_order)
1100 info->free_blocks_suitable += blocks <<
1101 (order - suitable_order);
1102 }
1103 }
1104
1105 /*
1106 * A fragmentation index only makes sense if an allocation of a requested
1107 * size would fail. If that is true, the fragmentation index indicates
1108 * whether external fragmentation or a lack of memory was the problem.
1109 * The value can be used to determine if page reclaim or compaction
1110 * should be used
1111 */
__fragmentation_index(unsigned int order,struct contig_page_info * info)1112 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1113 {
1114 unsigned long requested = 1UL << order;
1115
1116 if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
1117 return 0;
1118
1119 if (!info->free_blocks_total)
1120 return 0;
1121
1122 /* Fragmentation index only makes sense when a request would fail */
1123 if (info->free_blocks_suitable)
1124 return -1000;
1125
1126 /*
1127 * Index is between 0 and 1 so return within 3 decimal places
1128 *
1129 * 0 => allocation would fail due to lack of memory
1130 * 1 => allocation would fail due to fragmentation
1131 */
1132 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1133 }
1134
1135 /*
1136 * Calculates external fragmentation within a zone wrt the given order.
1137 * It is defined as the percentage of pages found in blocks of size
1138 * less than 1 << order. It returns values in range [0, 100].
1139 */
extfrag_for_order(struct zone * zone,unsigned int order)1140 unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1141 {
1142 struct contig_page_info info;
1143
1144 fill_contig_page_info(zone, order, &info);
1145 if (info.free_pages == 0)
1146 return 0;
1147
1148 return div_u64((info.free_pages -
1149 (info.free_blocks_suitable << order)) * 100,
1150 info.free_pages);
1151 }
1152
1153 /* Same as __fragmentation index but allocs contig_page_info on stack */
fragmentation_index(struct zone * zone,unsigned int order)1154 int fragmentation_index(struct zone *zone, unsigned int order)
1155 {
1156 struct contig_page_info info;
1157
1158 fill_contig_page_info(zone, order, &info);
1159 return __fragmentation_index(order, &info);
1160 }
1161 #endif
1162
1163 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1164 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1165 #ifdef CONFIG_ZONE_DMA
1166 #define TEXT_FOR_DMA(xx) xx "_dma",
1167 #else
1168 #define TEXT_FOR_DMA(xx)
1169 #endif
1170
1171 #ifdef CONFIG_ZONE_DMA32
1172 #define TEXT_FOR_DMA32(xx) xx "_dma32",
1173 #else
1174 #define TEXT_FOR_DMA32(xx)
1175 #endif
1176
1177 #ifdef CONFIG_HIGHMEM
1178 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
1179 #else
1180 #define TEXT_FOR_HIGHMEM(xx)
1181 #endif
1182
1183 #ifdef CONFIG_ZONE_DEVICE
1184 #define TEXT_FOR_DEVICE(xx) xx "_device",
1185 #else
1186 #define TEXT_FOR_DEVICE(xx)
1187 #endif
1188
1189 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1190 TEXT_FOR_HIGHMEM(xx) xx "_movable", \
1191 TEXT_FOR_DEVICE(xx)
1192
1193 const char * const vmstat_text[] = {
1194 /* enum zone_stat_item counters */
1195 "nr_free_pages",
1196 "nr_free_pages_blocks",
1197 "nr_zone_inactive_anon",
1198 "nr_zone_active_anon",
1199 "nr_zone_inactive_file",
1200 "nr_zone_active_file",
1201 "nr_zone_unevictable",
1202 "nr_zone_write_pending",
1203 "nr_mlock",
1204 "nr_bounce",
1205 #if IS_ENABLED(CONFIG_ZSMALLOC)
1206 "nr_zspages",
1207 #endif
1208 "nr_free_cma",
1209 #ifdef CONFIG_UNACCEPTED_MEMORY
1210 "nr_unaccepted",
1211 #endif
1212
1213 /* enum numa_stat_item counters */
1214 #ifdef CONFIG_NUMA
1215 "numa_hit",
1216 "numa_miss",
1217 "numa_foreign",
1218 "numa_interleave",
1219 "numa_local",
1220 "numa_other",
1221 #endif
1222
1223 /* enum node_stat_item counters */
1224 "nr_inactive_anon",
1225 "nr_active_anon",
1226 "nr_inactive_file",
1227 "nr_active_file",
1228 "nr_unevictable",
1229 "nr_slab_reclaimable",
1230 "nr_slab_unreclaimable",
1231 "nr_isolated_anon",
1232 "nr_isolated_file",
1233 "workingset_nodes",
1234 "workingset_refault_anon",
1235 "workingset_refault_file",
1236 "workingset_activate_anon",
1237 "workingset_activate_file",
1238 "workingset_restore_anon",
1239 "workingset_restore_file",
1240 "workingset_nodereclaim",
1241 "nr_anon_pages",
1242 "nr_mapped",
1243 "nr_file_pages",
1244 "nr_dirty",
1245 "nr_writeback",
1246 "nr_writeback_temp",
1247 "nr_shmem",
1248 "nr_shmem_hugepages",
1249 "nr_shmem_pmdmapped",
1250 "nr_file_hugepages",
1251 "nr_file_pmdmapped",
1252 "nr_anon_transparent_hugepages",
1253 "nr_vmscan_write",
1254 "nr_vmscan_immediate_reclaim",
1255 "nr_dirtied",
1256 "nr_written",
1257 "nr_throttled_written",
1258 "nr_kernel_misc_reclaimable",
1259 "nr_foll_pin_acquired",
1260 "nr_foll_pin_released",
1261 "nr_kernel_stack",
1262 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1263 "nr_shadow_call_stack",
1264 #endif
1265 "nr_page_table_pages",
1266 "nr_sec_page_table_pages",
1267 #ifdef CONFIG_IOMMU_SUPPORT
1268 "nr_iommu_pages",
1269 #endif
1270 #ifdef CONFIG_SWAP
1271 "nr_swapcached",
1272 #endif
1273 #ifdef CONFIG_NUMA_BALANCING
1274 "pgpromote_success",
1275 "pgpromote_candidate",
1276 #endif
1277 "pgdemote_kswapd",
1278 "pgdemote_direct",
1279 "pgdemote_khugepaged",
1280 "pgdemote_proactive",
1281 #ifdef CONFIG_HUGETLB_PAGE
1282 "nr_hugetlb",
1283 #endif
1284 "nr_balloon_pages",
1285 /* system-wide enum vm_stat_item counters */
1286 "nr_dirty_threshold",
1287 "nr_dirty_background_threshold",
1288 "nr_memmap_pages",
1289 "nr_memmap_boot_pages",
1290
1291 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1292 /* enum vm_event_item counters */
1293 "pgpgin",
1294 "pgpgout",
1295 "pswpin",
1296 "pswpout",
1297
1298 TEXTS_FOR_ZONES("pgalloc")
1299 TEXTS_FOR_ZONES("allocstall")
1300 TEXTS_FOR_ZONES("pgskip")
1301
1302 "pgfree",
1303 "pgactivate",
1304 "pgdeactivate",
1305 "pglazyfree",
1306
1307 "pgfault",
1308 "pgmajfault",
1309 "pglazyfreed",
1310
1311 "pgrefill",
1312 "pgreuse",
1313 "pgsteal_kswapd",
1314 "pgsteal_direct",
1315 "pgsteal_khugepaged",
1316 "pgsteal_proactive",
1317 "pgscan_kswapd",
1318 "pgscan_direct",
1319 "pgscan_khugepaged",
1320 "pgscan_proactive",
1321 "pgscan_direct_throttle",
1322 "pgscan_anon",
1323 "pgscan_file",
1324 "pgsteal_anon",
1325 "pgsteal_file",
1326
1327 #ifdef CONFIG_NUMA
1328 "zone_reclaim_success",
1329 "zone_reclaim_failed",
1330 #endif
1331 "pginodesteal",
1332 "slabs_scanned",
1333 "kswapd_inodesteal",
1334 "kswapd_low_wmark_hit_quickly",
1335 "kswapd_high_wmark_hit_quickly",
1336 "pageoutrun",
1337
1338 "pgrotated",
1339
1340 "drop_pagecache",
1341 "drop_slab",
1342 "oom_kill",
1343
1344 #ifdef CONFIG_NUMA_BALANCING
1345 "numa_pte_updates",
1346 "numa_huge_pte_updates",
1347 "numa_hint_faults",
1348 "numa_hint_faults_local",
1349 "numa_pages_migrated",
1350 #endif
1351 #ifdef CONFIG_MIGRATION
1352 "pgmigrate_success",
1353 "pgmigrate_fail",
1354 "thp_migration_success",
1355 "thp_migration_fail",
1356 "thp_migration_split",
1357 #endif
1358 #ifdef CONFIG_COMPACTION
1359 "compact_migrate_scanned",
1360 "compact_free_scanned",
1361 "compact_isolated",
1362 "compact_stall",
1363 "compact_fail",
1364 "compact_success",
1365 "compact_daemon_wake",
1366 "compact_daemon_migrate_scanned",
1367 "compact_daemon_free_scanned",
1368 #endif
1369
1370 #ifdef CONFIG_HUGETLB_PAGE
1371 "htlb_buddy_alloc_success",
1372 "htlb_buddy_alloc_fail",
1373 #endif
1374 #ifdef CONFIG_CMA
1375 "cma_alloc_success",
1376 "cma_alloc_fail",
1377 #endif
1378 "unevictable_pgs_culled",
1379 "unevictable_pgs_scanned",
1380 "unevictable_pgs_rescued",
1381 "unevictable_pgs_mlocked",
1382 "unevictable_pgs_munlocked",
1383 "unevictable_pgs_cleared",
1384 "unevictable_pgs_stranded",
1385
1386 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1387 "thp_fault_alloc",
1388 "thp_fault_fallback",
1389 "thp_fault_fallback_charge",
1390 "thp_collapse_alloc",
1391 "thp_collapse_alloc_failed",
1392 "thp_file_alloc",
1393 "thp_file_fallback",
1394 "thp_file_fallback_charge",
1395 "thp_file_mapped",
1396 "thp_split_page",
1397 "thp_split_page_failed",
1398 "thp_deferred_split_page",
1399 "thp_underused_split_page",
1400 "thp_split_pmd",
1401 "thp_scan_exceed_none_pte",
1402 "thp_scan_exceed_swap_pte",
1403 "thp_scan_exceed_share_pte",
1404 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1405 "thp_split_pud",
1406 #endif
1407 "thp_zero_page_alloc",
1408 "thp_zero_page_alloc_failed",
1409 "thp_swpout",
1410 "thp_swpout_fallback",
1411 #endif
1412 #ifdef CONFIG_MEMORY_BALLOON
1413 "balloon_inflate",
1414 "balloon_deflate",
1415 #ifdef CONFIG_BALLOON_COMPACTION
1416 "balloon_migrate",
1417 #endif
1418 #endif /* CONFIG_MEMORY_BALLOON */
1419 #ifdef CONFIG_DEBUG_TLBFLUSH
1420 "nr_tlb_remote_flush",
1421 "nr_tlb_remote_flush_received",
1422 "nr_tlb_local_flush_all",
1423 "nr_tlb_local_flush_one",
1424 #endif /* CONFIG_DEBUG_TLBFLUSH */
1425
1426 #ifdef CONFIG_SWAP
1427 "swap_ra",
1428 "swap_ra_hit",
1429 "swpin_zero",
1430 "swpout_zero",
1431 #ifdef CONFIG_KSM
1432 "ksm_swpin_copy",
1433 #endif
1434 #endif
1435 #ifdef CONFIG_KSM
1436 "cow_ksm",
1437 #endif
1438 #ifdef CONFIG_ZSWAP
1439 "zswpin",
1440 "zswpout",
1441 "zswpwb",
1442 #endif
1443 #ifdef CONFIG_X86
1444 "direct_map_level2_splits",
1445 "direct_map_level3_splits",
1446 "direct_map_level2_collapses",
1447 "direct_map_level3_collapses",
1448 #endif
1449 #ifdef CONFIG_PER_VMA_LOCK_STATS
1450 "vma_lock_success",
1451 "vma_lock_abort",
1452 "vma_lock_retry",
1453 "vma_lock_miss",
1454 #endif
1455 #ifdef CONFIG_DEBUG_STACK_USAGE
1456 "kstack_1k",
1457 #if THREAD_SIZE > 1024
1458 "kstack_2k",
1459 #endif
1460 #if THREAD_SIZE > 2048
1461 "kstack_4k",
1462 #endif
1463 #if THREAD_SIZE > 4096
1464 "kstack_8k",
1465 #endif
1466 #if THREAD_SIZE > 8192
1467 "kstack_16k",
1468 #endif
1469 #if THREAD_SIZE > 16384
1470 "kstack_32k",
1471 #endif
1472 #if THREAD_SIZE > 32768
1473 "kstack_64k",
1474 #endif
1475 #if THREAD_SIZE > 65536
1476 "kstack_rest",
1477 #endif
1478 #endif
1479 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1480 };
1481 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1482
1483 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1484 defined(CONFIG_PROC_FS)
frag_start(struct seq_file * m,loff_t * pos)1485 static void *frag_start(struct seq_file *m, loff_t *pos)
1486 {
1487 pg_data_t *pgdat;
1488 loff_t node = *pos;
1489
1490 for (pgdat = first_online_pgdat();
1491 pgdat && node;
1492 pgdat = next_online_pgdat(pgdat))
1493 --node;
1494
1495 return pgdat;
1496 }
1497
frag_next(struct seq_file * m,void * arg,loff_t * pos)1498 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1499 {
1500 pg_data_t *pgdat = (pg_data_t *)arg;
1501
1502 (*pos)++;
1503 return next_online_pgdat(pgdat);
1504 }
1505
frag_stop(struct seq_file * m,void * arg)1506 static void frag_stop(struct seq_file *m, void *arg)
1507 {
1508 }
1509
1510 /*
1511 * Walk zones in a node and print using a callback.
1512 * If @assert_populated is true, only use callback for zones that are populated.
1513 */
walk_zones_in_node(struct seq_file * m,pg_data_t * pgdat,bool assert_populated,bool nolock,void (* print)(struct seq_file * m,pg_data_t *,struct zone *))1514 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1515 bool assert_populated, bool nolock,
1516 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1517 {
1518 struct zone *zone;
1519 struct zone *node_zones = pgdat->node_zones;
1520 unsigned long flags;
1521
1522 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1523 if (assert_populated && !populated_zone(zone))
1524 continue;
1525
1526 if (!nolock)
1527 spin_lock_irqsave(&zone->lock, flags);
1528 print(m, pgdat, zone);
1529 if (!nolock)
1530 spin_unlock_irqrestore(&zone->lock, flags);
1531 }
1532 }
1533 #endif
1534
1535 #ifdef CONFIG_PROC_FS
frag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1536 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1537 struct zone *zone)
1538 {
1539 int order;
1540
1541 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1542 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1543 /*
1544 * Access to nr_free is lockless as nr_free is used only for
1545 * printing purposes. Use data_race to avoid KCSAN warning.
1546 */
1547 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1548 seq_putc(m, '\n');
1549 }
1550
1551 /*
1552 * This walks the free areas for each zone.
1553 */
frag_show(struct seq_file * m,void * arg)1554 static int frag_show(struct seq_file *m, void *arg)
1555 {
1556 pg_data_t *pgdat = (pg_data_t *)arg;
1557 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1558 return 0;
1559 }
1560
pagetypeinfo_showfree_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1561 static void pagetypeinfo_showfree_print(struct seq_file *m,
1562 pg_data_t *pgdat, struct zone *zone)
1563 {
1564 int order, mtype;
1565
1566 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1567 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1568 pgdat->node_id,
1569 zone->name,
1570 migratetype_names[mtype]);
1571 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
1572 unsigned long freecount = 0;
1573 struct free_area *area;
1574 struct list_head *curr;
1575 bool overflow = false;
1576
1577 area = &(zone->free_area[order]);
1578
1579 list_for_each(curr, &area->free_list[mtype]) {
1580 /*
1581 * Cap the free_list iteration because it might
1582 * be really large and we are under a spinlock
1583 * so a long time spent here could trigger a
1584 * hard lockup detector. Anyway this is a
1585 * debugging tool so knowing there is a handful
1586 * of pages of this order should be more than
1587 * sufficient.
1588 */
1589 if (++freecount >= 100000) {
1590 overflow = true;
1591 break;
1592 }
1593 }
1594 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1595 spin_unlock_irq(&zone->lock);
1596 cond_resched();
1597 spin_lock_irq(&zone->lock);
1598 }
1599 seq_putc(m, '\n');
1600 }
1601 }
1602
1603 /* Print out the free pages at each order for each migatetype */
pagetypeinfo_showfree(struct seq_file * m,void * arg)1604 static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1605 {
1606 int order;
1607 pg_data_t *pgdat = (pg_data_t *)arg;
1608
1609 /* Print header */
1610 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1611 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1612 seq_printf(m, "%6d ", order);
1613 seq_putc(m, '\n');
1614
1615 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1616 }
1617
pagetypeinfo_showblockcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1618 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1619 pg_data_t *pgdat, struct zone *zone)
1620 {
1621 int mtype;
1622 unsigned long pfn;
1623 unsigned long start_pfn = zone->zone_start_pfn;
1624 unsigned long end_pfn = zone_end_pfn(zone);
1625 unsigned long count[MIGRATE_TYPES] = { 0, };
1626
1627 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1628 struct page *page;
1629
1630 page = pfn_to_online_page(pfn);
1631 if (!page)
1632 continue;
1633
1634 if (page_zone(page) != zone)
1635 continue;
1636
1637 mtype = get_pageblock_migratetype(page);
1638
1639 if (mtype < MIGRATE_TYPES)
1640 count[mtype]++;
1641 }
1642
1643 /* Print counts */
1644 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1645 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1646 seq_printf(m, "%12lu ", count[mtype]);
1647 seq_putc(m, '\n');
1648 }
1649
1650 /* Print out the number of pageblocks for each migratetype */
pagetypeinfo_showblockcount(struct seq_file * m,void * arg)1651 static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1652 {
1653 int mtype;
1654 pg_data_t *pgdat = (pg_data_t *)arg;
1655
1656 seq_printf(m, "\n%-23s", "Number of blocks type ");
1657 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1658 seq_printf(m, "%12s ", migratetype_names[mtype]);
1659 seq_putc(m, '\n');
1660 walk_zones_in_node(m, pgdat, true, false,
1661 pagetypeinfo_showblockcount_print);
1662 }
1663
1664 /*
1665 * Print out the number of pageblocks for each migratetype that contain pages
1666 * of other types. This gives an indication of how well fallbacks are being
1667 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1668 * to determine what is going on
1669 */
pagetypeinfo_showmixedcount(struct seq_file * m,pg_data_t * pgdat)1670 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1671 {
1672 #ifdef CONFIG_PAGE_OWNER
1673 int mtype;
1674
1675 if (!static_branch_unlikely(&page_owner_inited))
1676 return;
1677
1678 drain_all_pages(NULL);
1679
1680 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1681 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1682 seq_printf(m, "%12s ", migratetype_names[mtype]);
1683 seq_putc(m, '\n');
1684
1685 walk_zones_in_node(m, pgdat, true, true,
1686 pagetypeinfo_showmixedcount_print);
1687 #endif /* CONFIG_PAGE_OWNER */
1688 }
1689
1690 /*
1691 * This prints out statistics in relation to grouping pages by mobility.
1692 * It is expensive to collect so do not constantly read the file.
1693 */
pagetypeinfo_show(struct seq_file * m,void * arg)1694 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1695 {
1696 pg_data_t *pgdat = (pg_data_t *)arg;
1697
1698 /* check memoryless node */
1699 if (!node_state(pgdat->node_id, N_MEMORY))
1700 return 0;
1701
1702 seq_printf(m, "Page block order: %d\n", pageblock_order);
1703 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1704 seq_putc(m, '\n');
1705 pagetypeinfo_showfree(m, pgdat);
1706 pagetypeinfo_showblockcount(m, pgdat);
1707 pagetypeinfo_showmixedcount(m, pgdat);
1708
1709 return 0;
1710 }
1711
1712 static const struct seq_operations fragmentation_op = {
1713 .start = frag_start,
1714 .next = frag_next,
1715 .stop = frag_stop,
1716 .show = frag_show,
1717 };
1718
1719 static const struct seq_operations pagetypeinfo_op = {
1720 .start = frag_start,
1721 .next = frag_next,
1722 .stop = frag_stop,
1723 .show = pagetypeinfo_show,
1724 };
1725
is_zone_first_populated(pg_data_t * pgdat,struct zone * zone)1726 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1727 {
1728 int zid;
1729
1730 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1731 struct zone *compare = &pgdat->node_zones[zid];
1732
1733 if (populated_zone(compare))
1734 return zone == compare;
1735 }
1736
1737 return false;
1738 }
1739
zoneinfo_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1740 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1741 struct zone *zone)
1742 {
1743 int i;
1744 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1745 if (is_zone_first_populated(pgdat, zone)) {
1746 seq_printf(m, "\n per-node stats");
1747 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1748 unsigned long pages = node_page_state_pages(pgdat, i);
1749
1750 if (vmstat_item_print_in_thp(i))
1751 pages /= HPAGE_PMD_NR;
1752 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1753 pages);
1754 }
1755 }
1756 seq_printf(m,
1757 "\n pages free %lu"
1758 "\n boost %lu"
1759 "\n min %lu"
1760 "\n low %lu"
1761 "\n high %lu"
1762 "\n promo %lu"
1763 "\n spanned %lu"
1764 "\n present %lu"
1765 "\n managed %lu"
1766 "\n cma %lu",
1767 zone_page_state(zone, NR_FREE_PAGES),
1768 zone->watermark_boost,
1769 min_wmark_pages(zone),
1770 low_wmark_pages(zone),
1771 high_wmark_pages(zone),
1772 promo_wmark_pages(zone),
1773 zone->spanned_pages,
1774 zone->present_pages,
1775 zone_managed_pages(zone),
1776 zone_cma_pages(zone));
1777
1778 seq_printf(m,
1779 "\n protection: (%ld",
1780 zone->lowmem_reserve[0]);
1781 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1782 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1783 seq_putc(m, ')');
1784
1785 /* If unpopulated, no other information is useful */
1786 if (!populated_zone(zone)) {
1787 seq_putc(m, '\n');
1788 return;
1789 }
1790
1791 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1792 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1793 zone_page_state(zone, i));
1794
1795 #ifdef CONFIG_NUMA
1796 fold_vm_zone_numa_events(zone);
1797 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1798 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1799 zone_numa_event_state(zone, i));
1800 #endif
1801
1802 seq_printf(m, "\n pagesets");
1803 for_each_online_cpu(i) {
1804 struct per_cpu_pages *pcp;
1805 struct per_cpu_zonestat __maybe_unused *pzstats;
1806
1807 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1808 seq_printf(m,
1809 "\n cpu: %i"
1810 "\n count: %i"
1811 "\n high: %i"
1812 "\n batch: %i"
1813 "\n high_min: %i"
1814 "\n high_max: %i",
1815 i,
1816 pcp->count,
1817 pcp->high,
1818 pcp->batch,
1819 pcp->high_min,
1820 pcp->high_max);
1821 #ifdef CONFIG_SMP
1822 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1823 seq_printf(m, "\n vm stats threshold: %d",
1824 pzstats->stat_threshold);
1825 #endif
1826 }
1827 seq_printf(m,
1828 "\n node_unreclaimable: %u"
1829 "\n start_pfn: %lu",
1830 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1831 zone->zone_start_pfn);
1832 seq_putc(m, '\n');
1833 }
1834
1835 /*
1836 * Output information about zones in @pgdat. All zones are printed regardless
1837 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1838 * set of all zones and userspace would not be aware of such zones if they are
1839 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1840 */
zoneinfo_show(struct seq_file * m,void * arg)1841 static int zoneinfo_show(struct seq_file *m, void *arg)
1842 {
1843 pg_data_t *pgdat = (pg_data_t *)arg;
1844 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1845 return 0;
1846 }
1847
1848 static const struct seq_operations zoneinfo_op = {
1849 .start = frag_start, /* iterate over all zones. The same as in
1850 * fragmentation. */
1851 .next = frag_next,
1852 .stop = frag_stop,
1853 .show = zoneinfo_show,
1854 };
1855
1856 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1857 NR_VM_NUMA_EVENT_ITEMS + \
1858 NR_VM_NODE_STAT_ITEMS + \
1859 NR_VM_STAT_ITEMS + \
1860 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1861 NR_VM_EVENT_ITEMS : 0))
1862
vmstat_start(struct seq_file * m,loff_t * pos)1863 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1864 {
1865 unsigned long *v;
1866 int i;
1867
1868 if (*pos >= NR_VMSTAT_ITEMS)
1869 return NULL;
1870
1871 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1872 fold_vm_numa_events();
1873 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1874 m->private = v;
1875 if (!v)
1876 return ERR_PTR(-ENOMEM);
1877 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1878 v[i] = global_zone_page_state(i);
1879 v += NR_VM_ZONE_STAT_ITEMS;
1880
1881 #ifdef CONFIG_NUMA
1882 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1883 v[i] = global_numa_event_state(i);
1884 v += NR_VM_NUMA_EVENT_ITEMS;
1885 #endif
1886
1887 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1888 v[i] = global_node_page_state_pages(i);
1889 if (vmstat_item_print_in_thp(i))
1890 v[i] /= HPAGE_PMD_NR;
1891 }
1892 v += NR_VM_NODE_STAT_ITEMS;
1893
1894 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1895 v + NR_DIRTY_THRESHOLD);
1896 v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
1897 v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
1898 v += NR_VM_STAT_ITEMS;
1899
1900 #ifdef CONFIG_VM_EVENT_COUNTERS
1901 all_vm_events(v);
1902 v[PGPGIN] /= 2; /* sectors -> kbytes */
1903 v[PGPGOUT] /= 2;
1904 #endif
1905 return (unsigned long *)m->private + *pos;
1906 }
1907
vmstat_next(struct seq_file * m,void * arg,loff_t * pos)1908 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1909 {
1910 (*pos)++;
1911 if (*pos >= NR_VMSTAT_ITEMS)
1912 return NULL;
1913 return (unsigned long *)m->private + *pos;
1914 }
1915
vmstat_show(struct seq_file * m,void * arg)1916 static int vmstat_show(struct seq_file *m, void *arg)
1917 {
1918 unsigned long *l = arg;
1919 unsigned long off = l - (unsigned long *)m->private;
1920
1921 seq_puts(m, vmstat_text[off]);
1922 seq_put_decimal_ull(m, " ", *l);
1923 seq_putc(m, '\n');
1924
1925 if (off == NR_VMSTAT_ITEMS - 1) {
1926 /*
1927 * We've come to the end - add any deprecated counters to avoid
1928 * breaking userspace which might depend on them being present.
1929 */
1930 seq_puts(m, "nr_unstable 0\n");
1931 }
1932 return 0;
1933 }
1934
vmstat_stop(struct seq_file * m,void * arg)1935 static void vmstat_stop(struct seq_file *m, void *arg)
1936 {
1937 kfree(m->private);
1938 m->private = NULL;
1939 }
1940
1941 static const struct seq_operations vmstat_op = {
1942 .start = vmstat_start,
1943 .next = vmstat_next,
1944 .stop = vmstat_stop,
1945 .show = vmstat_show,
1946 };
1947 #endif /* CONFIG_PROC_FS */
1948
1949 #ifdef CONFIG_SMP
1950 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1951 static int sysctl_stat_interval __read_mostly = HZ;
1952 static int vmstat_late_init_done;
1953
1954 #ifdef CONFIG_PROC_FS
refresh_vm_stats(struct work_struct * work)1955 static void refresh_vm_stats(struct work_struct *work)
1956 {
1957 refresh_cpu_vm_stats(true);
1958 }
1959
vmstat_refresh(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1960 static int vmstat_refresh(const struct ctl_table *table, int write,
1961 void *buffer, size_t *lenp, loff_t *ppos)
1962 {
1963 long val;
1964 int err;
1965 int i;
1966
1967 /*
1968 * The regular update, every sysctl_stat_interval, may come later
1969 * than expected: leaving a significant amount in per_cpu buckets.
1970 * This is particularly misleading when checking a quantity of HUGE
1971 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1972 * which can equally be echo'ed to or cat'ted from (by root),
1973 * can be used to update the stats just before reading them.
1974 *
1975 * Oh, and since global_zone_page_state() etc. are so careful to hide
1976 * transiently negative values, report an error here if any of
1977 * the stats is negative, so we know to go looking for imbalance.
1978 */
1979 err = schedule_on_each_cpu(refresh_vm_stats);
1980 if (err)
1981 return err;
1982 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1983 /*
1984 * Skip checking stats known to go negative occasionally.
1985 */
1986 switch (i) {
1987 case NR_ZONE_WRITE_PENDING:
1988 case NR_FREE_CMA_PAGES:
1989 continue;
1990 }
1991 val = atomic_long_read(&vm_zone_stat[i]);
1992 if (val < 0) {
1993 pr_warn("%s: %s %ld\n",
1994 __func__, zone_stat_name(i), val);
1995 }
1996 }
1997 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1998 /*
1999 * Skip checking stats known to go negative occasionally.
2000 */
2001 switch (i) {
2002 case NR_WRITEBACK:
2003 continue;
2004 }
2005 val = atomic_long_read(&vm_node_stat[i]);
2006 if (val < 0) {
2007 pr_warn("%s: %s %ld\n",
2008 __func__, node_stat_name(i), val);
2009 }
2010 }
2011 if (write)
2012 *ppos += *lenp;
2013 else
2014 *lenp = 0;
2015 return 0;
2016 }
2017 #endif /* CONFIG_PROC_FS */
2018
vmstat_update(struct work_struct * w)2019 static void vmstat_update(struct work_struct *w)
2020 {
2021 if (refresh_cpu_vm_stats(true)) {
2022 /*
2023 * Counters were updated so we expect more updates
2024 * to occur in the future. Keep on running the
2025 * update worker thread.
2026 */
2027 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
2028 this_cpu_ptr(&vmstat_work),
2029 round_jiffies_relative(sysctl_stat_interval));
2030 }
2031 }
2032
2033 /*
2034 * Check if the diffs for a certain cpu indicate that
2035 * an update is needed.
2036 */
need_update(int cpu)2037 static bool need_update(int cpu)
2038 {
2039 pg_data_t *last_pgdat = NULL;
2040 struct zone *zone;
2041
2042 for_each_populated_zone(zone) {
2043 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2044 struct per_cpu_nodestat *n;
2045
2046 /*
2047 * The fast way of checking if there are any vmstat diffs.
2048 */
2049 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
2050 return true;
2051
2052 if (last_pgdat == zone->zone_pgdat)
2053 continue;
2054 last_pgdat = zone->zone_pgdat;
2055 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
2056 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
2057 return true;
2058 }
2059 return false;
2060 }
2061
2062 /*
2063 * Switch off vmstat processing and then fold all the remaining differentials
2064 * until the diffs stay at zero. The function is used by NOHZ and can only be
2065 * invoked when tick processing is not active.
2066 */
quiet_vmstat(void)2067 void quiet_vmstat(void)
2068 {
2069 if (system_state != SYSTEM_RUNNING)
2070 return;
2071
2072 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
2073 return;
2074
2075 if (!need_update(smp_processor_id()))
2076 return;
2077
2078 /*
2079 * Just refresh counters and do not care about the pending delayed
2080 * vmstat_update. It doesn't fire that often to matter and canceling
2081 * it would be too expensive from this path.
2082 * vmstat_shepherd will take care about that for us.
2083 */
2084 refresh_cpu_vm_stats(false);
2085 }
2086
2087 /*
2088 * Shepherd worker thread that checks the
2089 * differentials of processors that have their worker
2090 * threads for vm statistics updates disabled because of
2091 * inactivity.
2092 */
2093 static void vmstat_shepherd(struct work_struct *w);
2094
2095 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2096
vmstat_shepherd(struct work_struct * w)2097 static void vmstat_shepherd(struct work_struct *w)
2098 {
2099 int cpu;
2100
2101 cpus_read_lock();
2102 /* Check processors whose vmstat worker threads have been disabled */
2103 for_each_online_cpu(cpu) {
2104 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2105
2106 /*
2107 * In kernel users of vmstat counters either require the precise value and
2108 * they are using zone_page_state_snapshot interface or they can live with
2109 * an imprecision as the regular flushing can happen at arbitrary time and
2110 * cumulative error can grow (see calculate_normal_threshold).
2111 *
2112 * From that POV the regular flushing can be postponed for CPUs that have
2113 * been isolated from the kernel interference without critical
2114 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2115 * for all isolated CPUs to avoid interference with the isolated workload.
2116 */
2117 if (cpu_is_isolated(cpu))
2118 continue;
2119
2120 if (!delayed_work_pending(dw) && need_update(cpu))
2121 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2122
2123 cond_resched();
2124 }
2125 cpus_read_unlock();
2126
2127 schedule_delayed_work(&shepherd,
2128 round_jiffies_relative(sysctl_stat_interval));
2129 }
2130
start_shepherd_timer(void)2131 static void __init start_shepherd_timer(void)
2132 {
2133 int cpu;
2134
2135 for_each_possible_cpu(cpu) {
2136 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2137 vmstat_update);
2138
2139 /*
2140 * For secondary CPUs during CPU hotplug scenarios,
2141 * vmstat_cpu_online() will enable the work.
2142 * mm/vmstat:online enables and disables vmstat_work
2143 * symmetrically during CPU hotplug events.
2144 */
2145 if (!cpu_online(cpu))
2146 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2147 }
2148
2149 schedule_delayed_work(&shepherd,
2150 round_jiffies_relative(sysctl_stat_interval));
2151 }
2152
init_cpu_node_state(void)2153 static void __init init_cpu_node_state(void)
2154 {
2155 int node;
2156
2157 for_each_online_node(node) {
2158 if (!cpumask_empty(cpumask_of_node(node)))
2159 node_set_state(node, N_CPU);
2160 }
2161 }
2162
vmstat_cpu_online(unsigned int cpu)2163 static int vmstat_cpu_online(unsigned int cpu)
2164 {
2165 if (vmstat_late_init_done)
2166 refresh_zone_stat_thresholds();
2167
2168 if (!node_state(cpu_to_node(cpu), N_CPU)) {
2169 node_set_state(cpu_to_node(cpu), N_CPU);
2170 }
2171 enable_delayed_work(&per_cpu(vmstat_work, cpu));
2172
2173 return 0;
2174 }
2175
vmstat_cpu_down_prep(unsigned int cpu)2176 static int vmstat_cpu_down_prep(unsigned int cpu)
2177 {
2178 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2179 return 0;
2180 }
2181
vmstat_cpu_dead(unsigned int cpu)2182 static int vmstat_cpu_dead(unsigned int cpu)
2183 {
2184 const struct cpumask *node_cpus;
2185 int node;
2186
2187 node = cpu_to_node(cpu);
2188
2189 refresh_zone_stat_thresholds();
2190 node_cpus = cpumask_of_node(node);
2191 if (!cpumask_empty(node_cpus))
2192 return 0;
2193
2194 node_clear_state(node, N_CPU);
2195
2196 return 0;
2197 }
2198
vmstat_late_init(void)2199 static int __init vmstat_late_init(void)
2200 {
2201 refresh_zone_stat_thresholds();
2202 vmstat_late_init_done = 1;
2203
2204 return 0;
2205 }
2206 late_initcall(vmstat_late_init);
2207 #endif
2208
2209 #ifdef CONFIG_PROC_FS
2210 static const struct ctl_table vmstat_table[] = {
2211 #ifdef CONFIG_SMP
2212 {
2213 .procname = "stat_interval",
2214 .data = &sysctl_stat_interval,
2215 .maxlen = sizeof(sysctl_stat_interval),
2216 .mode = 0644,
2217 .proc_handler = proc_dointvec_jiffies,
2218 },
2219 {
2220 .procname = "stat_refresh",
2221 .data = NULL,
2222 .maxlen = 0,
2223 .mode = 0600,
2224 .proc_handler = vmstat_refresh,
2225 },
2226 #endif
2227 #ifdef CONFIG_NUMA
2228 {
2229 .procname = "numa_stat",
2230 .data = &sysctl_vm_numa_stat,
2231 .maxlen = sizeof(int),
2232 .mode = 0644,
2233 .proc_handler = sysctl_vm_numa_stat_handler,
2234 .extra1 = SYSCTL_ZERO,
2235 .extra2 = SYSCTL_ONE,
2236 },
2237 #endif
2238 };
2239 #endif
2240
2241 struct workqueue_struct *mm_percpu_wq;
2242
init_mm_internals(void)2243 void __init init_mm_internals(void)
2244 {
2245 int ret __maybe_unused;
2246
2247 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2248
2249 #ifdef CONFIG_SMP
2250 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2251 NULL, vmstat_cpu_dead);
2252 if (ret < 0)
2253 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2254
2255 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2256 vmstat_cpu_online,
2257 vmstat_cpu_down_prep);
2258 if (ret < 0)
2259 pr_err("vmstat: failed to register 'online' hotplug state\n");
2260
2261 cpus_read_lock();
2262 init_cpu_node_state();
2263 cpus_read_unlock();
2264
2265 start_shepherd_timer();
2266 #endif
2267 #ifdef CONFIG_PROC_FS
2268 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2269 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2270 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2271 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2272 register_sysctl_init("vm", vmstat_table);
2273 #endif
2274 }
2275
2276 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2277
2278 /*
2279 * Return an index indicating how much of the available free memory is
2280 * unusable for an allocation of the requested size.
2281 */
unusable_free_index(unsigned int order,struct contig_page_info * info)2282 static int unusable_free_index(unsigned int order,
2283 struct contig_page_info *info)
2284 {
2285 /* No free memory is interpreted as all free memory is unusable */
2286 if (info->free_pages == 0)
2287 return 1000;
2288
2289 /*
2290 * Index should be a value between 0 and 1. Return a value to 3
2291 * decimal places.
2292 *
2293 * 0 => no fragmentation
2294 * 1 => high fragmentation
2295 */
2296 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2297
2298 }
2299
unusable_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)2300 static void unusable_show_print(struct seq_file *m,
2301 pg_data_t *pgdat, struct zone *zone)
2302 {
2303 unsigned int order;
2304 int index;
2305 struct contig_page_info info;
2306
2307 seq_printf(m, "Node %d, zone %8s ",
2308 pgdat->node_id,
2309 zone->name);
2310 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2311 fill_contig_page_info(zone, order, &info);
2312 index = unusable_free_index(order, &info);
2313 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2314 }
2315
2316 seq_putc(m, '\n');
2317 }
2318
2319 /*
2320 * Display unusable free space index
2321 *
2322 * The unusable free space index measures how much of the available free
2323 * memory cannot be used to satisfy an allocation of a given size and is a
2324 * value between 0 and 1. The higher the value, the more of free memory is
2325 * unusable and by implication, the worse the external fragmentation is. This
2326 * can be expressed as a percentage by multiplying by 100.
2327 */
unusable_show(struct seq_file * m,void * arg)2328 static int unusable_show(struct seq_file *m, void *arg)
2329 {
2330 pg_data_t *pgdat = (pg_data_t *)arg;
2331
2332 /* check memoryless node */
2333 if (!node_state(pgdat->node_id, N_MEMORY))
2334 return 0;
2335
2336 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2337
2338 return 0;
2339 }
2340
2341 static const struct seq_operations unusable_sops = {
2342 .start = frag_start,
2343 .next = frag_next,
2344 .stop = frag_stop,
2345 .show = unusable_show,
2346 };
2347
2348 DEFINE_SEQ_ATTRIBUTE(unusable);
2349
extfrag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)2350 static void extfrag_show_print(struct seq_file *m,
2351 pg_data_t *pgdat, struct zone *zone)
2352 {
2353 unsigned int order;
2354 int index;
2355
2356 /* Alloc on stack as interrupts are disabled for zone walk */
2357 struct contig_page_info info;
2358
2359 seq_printf(m, "Node %d, zone %8s ",
2360 pgdat->node_id,
2361 zone->name);
2362 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2363 fill_contig_page_info(zone, order, &info);
2364 index = __fragmentation_index(order, &info);
2365 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2366 }
2367
2368 seq_putc(m, '\n');
2369 }
2370
2371 /*
2372 * Display fragmentation index for orders that allocations would fail for
2373 */
extfrag_show(struct seq_file * m,void * arg)2374 static int extfrag_show(struct seq_file *m, void *arg)
2375 {
2376 pg_data_t *pgdat = (pg_data_t *)arg;
2377
2378 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2379
2380 return 0;
2381 }
2382
2383 static const struct seq_operations extfrag_sops = {
2384 .start = frag_start,
2385 .next = frag_next,
2386 .stop = frag_stop,
2387 .show = extfrag_show,
2388 };
2389
2390 DEFINE_SEQ_ATTRIBUTE(extfrag);
2391
extfrag_debug_init(void)2392 static int __init extfrag_debug_init(void)
2393 {
2394 struct dentry *extfrag_debug_root;
2395
2396 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2397
2398 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2399 &unusable_fops);
2400
2401 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2402 &extfrag_fops);
2403
2404 return 0;
2405 }
2406
2407 module_init(extfrag_debug_init);
2408
2409 #endif
2410