1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <cl@gentwo.org>
11 * Copyright (C) 2008-2014 Christoph Lameter
12 */
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/vmstat.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/debugfs.h>
24 #include <linux/sched.h>
25 #include <linux/math64.h>
26 #include <linux/writeback.h>
27 #include <linux/compaction.h>
28 #include <linux/mm_inline.h>
29 #include <linux/page_owner.h>
30 #include <linux/sched/isolation.h>
31
32 #include "internal.h"
33
34 #ifdef CONFIG_PROC_FS
35 #ifdef CONFIG_NUMA
36 #define ENABLE_NUMA_STAT 1
37 static int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38
39 /* zero numa counters within a zone */
zero_zone_numa_counters(struct zone * zone)40 static void zero_zone_numa_counters(struct zone *zone)
41 {
42 int item, cpu;
43
44 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
45 atomic_long_set(&zone->vm_numa_event[item], 0);
46 for_each_online_cpu(cpu) {
47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
48 = 0;
49 }
50 }
51 }
52
53 /* zero numa counters of all the populated zones */
zero_zones_numa_counters(void)54 static void zero_zones_numa_counters(void)
55 {
56 struct zone *zone;
57
58 for_each_populated_zone(zone)
59 zero_zone_numa_counters(zone);
60 }
61
62 /* zero global numa counters */
zero_global_numa_counters(void)63 static void zero_global_numa_counters(void)
64 {
65 int item;
66
67 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
68 atomic_long_set(&vm_numa_event[item], 0);
69 }
70
invalid_numa_statistics(void)71 static void invalid_numa_statistics(void)
72 {
73 zero_zones_numa_counters();
74 zero_global_numa_counters();
75 }
76
77 static DEFINE_MUTEX(vm_numa_stat_lock);
78
sysctl_vm_numa_stat_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)79 static int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
80 void *buffer, size_t *length, loff_t *ppos)
81 {
82 int ret, oldval;
83
84 mutex_lock(&vm_numa_stat_lock);
85 if (write)
86 oldval = sysctl_vm_numa_stat;
87 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
88 if (ret || !write)
89 goto out;
90
91 if (oldval == sysctl_vm_numa_stat)
92 goto out;
93 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
94 static_branch_enable(&vm_numa_stat_key);
95 pr_info("enable numa statistics\n");
96 } else {
97 static_branch_disable(&vm_numa_stat_key);
98 invalid_numa_statistics();
99 pr_info("disable numa statistics, and clear numa counters\n");
100 }
101
102 out:
103 mutex_unlock(&vm_numa_stat_lock);
104 return ret;
105 }
106 #endif
107 #endif /* CONFIG_PROC_FS */
108
109 #ifdef CONFIG_VM_EVENT_COUNTERS
110 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
111 EXPORT_PER_CPU_SYMBOL(vm_event_states);
112
sum_vm_events(unsigned long * ret)113 static void sum_vm_events(unsigned long *ret)
114 {
115 int cpu;
116 int i;
117
118 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
119
120 for_each_online_cpu(cpu) {
121 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
122
123 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
124 ret[i] += this->event[i];
125 }
126 }
127
128 /*
129 * Accumulate the vm event counters across all CPUs.
130 * The result is unavoidably approximate - it can change
131 * during and after execution of this function.
132 */
all_vm_events(unsigned long * ret)133 void all_vm_events(unsigned long *ret)
134 {
135 cpus_read_lock();
136 sum_vm_events(ret);
137 cpus_read_unlock();
138 }
139 EXPORT_SYMBOL_GPL(all_vm_events);
140
141 /*
142 * Fold the foreign cpu events into our own.
143 *
144 * This is adding to the events on one processor
145 * but keeps the global counts constant.
146 */
vm_events_fold_cpu(int cpu)147 void vm_events_fold_cpu(int cpu)
148 {
149 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
150 int i;
151
152 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
153 count_vm_events(i, fold_state->event[i]);
154 fold_state->event[i] = 0;
155 }
156 }
157
158 #endif /* CONFIG_VM_EVENT_COUNTERS */
159
160 /*
161 * Manage combined zone based / global counters
162 *
163 * vm_stat contains the global counters
164 */
165 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
166 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
167 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
168 EXPORT_SYMBOL(vm_zone_stat);
169 EXPORT_SYMBOL(vm_node_stat);
170
171 #ifdef CONFIG_NUMA
fold_vm_zone_numa_events(struct zone * zone)172 static void fold_vm_zone_numa_events(struct zone *zone)
173 {
174 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
175 int cpu;
176 enum numa_stat_item item;
177
178 for_each_online_cpu(cpu) {
179 struct per_cpu_zonestat *pzstats;
180
181 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
182 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
183 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
184 }
185
186 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
187 zone_numa_event_add(zone_numa_events[item], zone, item);
188 }
189
fold_vm_numa_events(void)190 void fold_vm_numa_events(void)
191 {
192 struct zone *zone;
193
194 for_each_populated_zone(zone)
195 fold_vm_zone_numa_events(zone);
196 }
197 #endif
198
199 #ifdef CONFIG_SMP
200
calculate_pressure_threshold(struct zone * zone)201 int calculate_pressure_threshold(struct zone *zone)
202 {
203 int threshold;
204 int watermark_distance;
205
206 /*
207 * As vmstats are not up to date, there is drift between the estimated
208 * and real values. For high thresholds and a high number of CPUs, it
209 * is possible for the min watermark to be breached while the estimated
210 * value looks fine. The pressure threshold is a reduced value such
211 * that even the maximum amount of drift will not accidentally breach
212 * the min watermark
213 */
214 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
215 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
216
217 /*
218 * Maximum threshold is 125
219 */
220 threshold = min(125, threshold);
221
222 return threshold;
223 }
224
calculate_normal_threshold(struct zone * zone)225 int calculate_normal_threshold(struct zone *zone)
226 {
227 int threshold;
228 int mem; /* memory in 128 MB units */
229
230 /*
231 * The threshold scales with the number of processors and the amount
232 * of memory per zone. More memory means that we can defer updates for
233 * longer, more processors could lead to more contention.
234 * fls() is used to have a cheap way of logarithmic scaling.
235 *
236 * Some sample thresholds:
237 *
238 * Threshold Processors (fls) Zonesize fls(mem)+1
239 * ------------------------------------------------------------------
240 * 8 1 1 0.9-1 GB 4
241 * 16 2 2 0.9-1 GB 4
242 * 20 2 2 1-2 GB 5
243 * 24 2 2 2-4 GB 6
244 * 28 2 2 4-8 GB 7
245 * 32 2 2 8-16 GB 8
246 * 4 2 2 <128M 1
247 * 30 4 3 2-4 GB 5
248 * 48 4 3 8-16 GB 8
249 * 32 8 4 1-2 GB 4
250 * 32 8 4 0.9-1GB 4
251 * 10 16 5 <128M 1
252 * 40 16 5 900M 4
253 * 70 64 7 2-4 GB 5
254 * 84 64 7 4-8 GB 6
255 * 108 512 9 4-8 GB 6
256 * 125 1024 10 8-16 GB 8
257 * 125 1024 10 16-32 GB 9
258 */
259
260 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
261
262 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
263
264 /*
265 * Maximum threshold is 125
266 */
267 threshold = min(125, threshold);
268
269 return threshold;
270 }
271
272 /*
273 * Refresh the thresholds for each zone.
274 */
refresh_zone_stat_thresholds(void)275 void refresh_zone_stat_thresholds(void)
276 {
277 struct pglist_data *pgdat;
278 struct zone *zone;
279 int cpu;
280 int threshold;
281
282 /* Zero current pgdat thresholds */
283 for_each_online_pgdat(pgdat) {
284 for_each_online_cpu(cpu) {
285 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
286 }
287 }
288
289 for_each_populated_zone(zone) {
290 struct pglist_data *pgdat = zone->zone_pgdat;
291 unsigned long max_drift, tolerate_drift;
292
293 threshold = calculate_normal_threshold(zone);
294
295 for_each_online_cpu(cpu) {
296 int pgdat_threshold;
297
298 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
299 = threshold;
300
301 /* Base nodestat threshold on the largest populated zone. */
302 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
303 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
304 = max(threshold, pgdat_threshold);
305 }
306
307 /*
308 * Only set percpu_drift_mark if there is a danger that
309 * NR_FREE_PAGES reports the low watermark is ok when in fact
310 * the min watermark could be breached by an allocation
311 */
312 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
313 max_drift = num_online_cpus() * threshold;
314 if (max_drift > tolerate_drift)
315 zone->percpu_drift_mark = high_wmark_pages(zone) +
316 max_drift;
317 }
318 }
319
set_pgdat_percpu_threshold(pg_data_t * pgdat,int (* calculate_pressure)(struct zone *))320 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
321 int (*calculate_pressure)(struct zone *))
322 {
323 struct zone *zone;
324 int cpu;
325 int threshold;
326 int i;
327
328 for (i = 0; i < pgdat->nr_zones; i++) {
329 zone = &pgdat->node_zones[i];
330 if (!zone->percpu_drift_mark)
331 continue;
332
333 threshold = (*calculate_pressure)(zone);
334 for_each_online_cpu(cpu)
335 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
336 = threshold;
337 }
338 }
339
340 /*
341 * For use when we know that interrupts are disabled,
342 * or when we know that preemption is disabled and that
343 * particular counter cannot be updated from interrupt context.
344 */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)345 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
346 long delta)
347 {
348 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
349 s8 __percpu *p = pcp->vm_stat_diff + item;
350 long x;
351 long t;
352
353 /*
354 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
355 * atomicity is provided by IRQs being disabled -- either explicitly
356 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
357 * CPU migrations and preemption potentially corrupts a counter so
358 * disable preemption.
359 */
360 preempt_disable_nested();
361
362 x = delta + __this_cpu_read(*p);
363
364 t = __this_cpu_read(pcp->stat_threshold);
365
366 if (unlikely(abs(x) > t)) {
367 zone_page_state_add(x, zone, item);
368 x = 0;
369 }
370 __this_cpu_write(*p, x);
371
372 preempt_enable_nested();
373 }
374 EXPORT_SYMBOL(__mod_zone_page_state);
375
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)376 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
377 long delta)
378 {
379 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
380 s8 __percpu *p = pcp->vm_node_stat_diff + item;
381 long x;
382 long t;
383
384 if (vmstat_item_in_bytes(item)) {
385 /*
386 * Only cgroups use subpage accounting right now; at
387 * the global level, these items still change in
388 * multiples of whole pages. Store them as pages
389 * internally to keep the per-cpu counters compact.
390 */
391 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
392 delta >>= PAGE_SHIFT;
393 }
394
395 /* See __mod_zone_page_state() */
396 preempt_disable_nested();
397
398 x = delta + __this_cpu_read(*p);
399
400 t = __this_cpu_read(pcp->stat_threshold);
401
402 if (unlikely(abs(x) > t)) {
403 node_page_state_add(x, pgdat, item);
404 x = 0;
405 }
406 __this_cpu_write(*p, x);
407
408 preempt_enable_nested();
409 }
410 EXPORT_SYMBOL(__mod_node_page_state);
411
412 /*
413 * Optimized increment and decrement functions.
414 *
415 * These are only for a single page and therefore can take a struct page *
416 * argument instead of struct zone *. This allows the inclusion of the code
417 * generated for page_zone(page) into the optimized functions.
418 *
419 * No overflow check is necessary and therefore the differential can be
420 * incremented or decremented in place which may allow the compilers to
421 * generate better code.
422 * The increment or decrement is known and therefore one boundary check can
423 * be omitted.
424 *
425 * NOTE: These functions are very performance sensitive. Change only
426 * with care.
427 *
428 * Some processors have inc/dec instructions that are atomic vs an interrupt.
429 * However, the code must first determine the differential location in a zone
430 * based on the processor number and then inc/dec the counter. There is no
431 * guarantee without disabling preemption that the processor will not change
432 * in between and therefore the atomicity vs. interrupt cannot be exploited
433 * in a useful way here.
434 */
__inc_zone_state(struct zone * zone,enum zone_stat_item item)435 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
436 {
437 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
438 s8 __percpu *p = pcp->vm_stat_diff + item;
439 s8 v, t;
440
441 /* See __mod_zone_page_state() */
442 preempt_disable_nested();
443
444 v = __this_cpu_inc_return(*p);
445 t = __this_cpu_read(pcp->stat_threshold);
446 if (unlikely(v > t)) {
447 s8 overstep = t >> 1;
448
449 zone_page_state_add(v + overstep, zone, item);
450 __this_cpu_write(*p, -overstep);
451 }
452
453 preempt_enable_nested();
454 }
455
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)456 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
457 {
458 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
459 s8 __percpu *p = pcp->vm_node_stat_diff + item;
460 s8 v, t;
461
462 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
463
464 /* See __mod_zone_page_state() */
465 preempt_disable_nested();
466
467 v = __this_cpu_inc_return(*p);
468 t = __this_cpu_read(pcp->stat_threshold);
469 if (unlikely(v > t)) {
470 s8 overstep = t >> 1;
471
472 node_page_state_add(v + overstep, pgdat, item);
473 __this_cpu_write(*p, -overstep);
474 }
475
476 preempt_enable_nested();
477 }
478
__inc_zone_page_state(struct page * page,enum zone_stat_item item)479 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
480 {
481 __inc_zone_state(page_zone(page), item);
482 }
483 EXPORT_SYMBOL(__inc_zone_page_state);
484
__inc_node_page_state(struct page * page,enum node_stat_item item)485 void __inc_node_page_state(struct page *page, enum node_stat_item item)
486 {
487 __inc_node_state(page_pgdat(page), item);
488 }
489 EXPORT_SYMBOL(__inc_node_page_state);
490
__dec_zone_state(struct zone * zone,enum zone_stat_item item)491 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
492 {
493 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
494 s8 __percpu *p = pcp->vm_stat_diff + item;
495 s8 v, t;
496
497 /* See __mod_zone_page_state() */
498 preempt_disable_nested();
499
500 v = __this_cpu_dec_return(*p);
501 t = __this_cpu_read(pcp->stat_threshold);
502 if (unlikely(v < - t)) {
503 s8 overstep = t >> 1;
504
505 zone_page_state_add(v - overstep, zone, item);
506 __this_cpu_write(*p, overstep);
507 }
508
509 preempt_enable_nested();
510 }
511
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)512 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
513 {
514 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
515 s8 __percpu *p = pcp->vm_node_stat_diff + item;
516 s8 v, t;
517
518 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
519
520 /* See __mod_zone_page_state() */
521 preempt_disable_nested();
522
523 v = __this_cpu_dec_return(*p);
524 t = __this_cpu_read(pcp->stat_threshold);
525 if (unlikely(v < - t)) {
526 s8 overstep = t >> 1;
527
528 node_page_state_add(v - overstep, pgdat, item);
529 __this_cpu_write(*p, overstep);
530 }
531
532 preempt_enable_nested();
533 }
534
__dec_zone_page_state(struct page * page,enum zone_stat_item item)535 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
536 {
537 __dec_zone_state(page_zone(page), item);
538 }
539 EXPORT_SYMBOL(__dec_zone_page_state);
540
__dec_node_page_state(struct page * page,enum node_stat_item item)541 void __dec_node_page_state(struct page *page, enum node_stat_item item)
542 {
543 __dec_node_state(page_pgdat(page), item);
544 }
545 EXPORT_SYMBOL(__dec_node_page_state);
546
547 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
548 /*
549 * If we have cmpxchg_local support then we do not need to incur the overhead
550 * that comes with local_irq_save/restore if we use this_cpu_try_cmpxchg().
551 *
552 * mod_state() modifies the zone counter state through atomic per cpu
553 * operations.
554 *
555 * Overstep mode specifies how overstep should handled:
556 * 0 No overstepping
557 * 1 Overstepping half of threshold
558 * -1 Overstepping minus half of threshold
559 */
mod_zone_state(struct zone * zone,enum zone_stat_item item,long delta,int overstep_mode)560 static inline void mod_zone_state(struct zone *zone,
561 enum zone_stat_item item, long delta, int overstep_mode)
562 {
563 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
564 s8 __percpu *p = pcp->vm_stat_diff + item;
565 long n, t, z;
566 s8 o;
567
568 o = this_cpu_read(*p);
569 do {
570 z = 0; /* overflow to zone counters */
571
572 /*
573 * The fetching of the stat_threshold is racy. We may apply
574 * a counter threshold to the wrong the cpu if we get
575 * rescheduled while executing here. However, the next
576 * counter update will apply the threshold again and
577 * therefore bring the counter under the threshold again.
578 *
579 * Most of the time the thresholds are the same anyways
580 * for all cpus in a zone.
581 */
582 t = this_cpu_read(pcp->stat_threshold);
583
584 n = delta + (long)o;
585
586 if (abs(n) > t) {
587 int os = overstep_mode * (t >> 1) ;
588
589 /* Overflow must be added to zone counters */
590 z = n + os;
591 n = -os;
592 }
593 } while (!this_cpu_try_cmpxchg(*p, &o, n));
594
595 if (z)
596 zone_page_state_add(z, zone, item);
597 }
598
mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)599 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
600 long delta)
601 {
602 mod_zone_state(zone, item, delta, 0);
603 }
604 EXPORT_SYMBOL(mod_zone_page_state);
605
inc_zone_page_state(struct page * page,enum zone_stat_item item)606 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
607 {
608 mod_zone_state(page_zone(page), item, 1, 1);
609 }
610 EXPORT_SYMBOL(inc_zone_page_state);
611
dec_zone_page_state(struct page * page,enum zone_stat_item item)612 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
613 {
614 mod_zone_state(page_zone(page), item, -1, -1);
615 }
616 EXPORT_SYMBOL(dec_zone_page_state);
617
mod_node_state(struct pglist_data * pgdat,enum node_stat_item item,int delta,int overstep_mode)618 static inline void mod_node_state(struct pglist_data *pgdat,
619 enum node_stat_item item, int delta, int overstep_mode)
620 {
621 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
622 s8 __percpu *p = pcp->vm_node_stat_diff + item;
623 long n, t, z;
624 s8 o;
625
626 if (vmstat_item_in_bytes(item)) {
627 /*
628 * Only cgroups use subpage accounting right now; at
629 * the global level, these items still change in
630 * multiples of whole pages. Store them as pages
631 * internally to keep the per-cpu counters compact.
632 */
633 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
634 delta >>= PAGE_SHIFT;
635 }
636
637 o = this_cpu_read(*p);
638 do {
639 z = 0; /* overflow to node counters */
640
641 /*
642 * The fetching of the stat_threshold is racy. We may apply
643 * a counter threshold to the wrong the cpu if we get
644 * rescheduled while executing here. However, the next
645 * counter update will apply the threshold again and
646 * therefore bring the counter under the threshold again.
647 *
648 * Most of the time the thresholds are the same anyways
649 * for all cpus in a node.
650 */
651 t = this_cpu_read(pcp->stat_threshold);
652
653 n = delta + (long)o;
654
655 if (abs(n) > t) {
656 int os = overstep_mode * (t >> 1) ;
657
658 /* Overflow must be added to node counters */
659 z = n + os;
660 n = -os;
661 }
662 } while (!this_cpu_try_cmpxchg(*p, &o, n));
663
664 if (z)
665 node_page_state_add(z, pgdat, item);
666 }
667
mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)668 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
669 long delta)
670 {
671 mod_node_state(pgdat, item, delta, 0);
672 }
673 EXPORT_SYMBOL(mod_node_page_state);
674
inc_node_page_state(struct page * page,enum node_stat_item item)675 void inc_node_page_state(struct page *page, enum node_stat_item item)
676 {
677 mod_node_state(page_pgdat(page), item, 1, 1);
678 }
679 EXPORT_SYMBOL(inc_node_page_state);
680
dec_node_page_state(struct page * page,enum node_stat_item item)681 void dec_node_page_state(struct page *page, enum node_stat_item item)
682 {
683 mod_node_state(page_pgdat(page), item, -1, -1);
684 }
685 EXPORT_SYMBOL(dec_node_page_state);
686 #else
687 /*
688 * Use interrupt disable to serialize counter updates
689 */
mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)690 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
691 long delta)
692 {
693 unsigned long flags;
694
695 local_irq_save(flags);
696 __mod_zone_page_state(zone, item, delta);
697 local_irq_restore(flags);
698 }
699 EXPORT_SYMBOL(mod_zone_page_state);
700
inc_zone_page_state(struct page * page,enum zone_stat_item item)701 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
702 {
703 unsigned long flags;
704 struct zone *zone;
705
706 zone = page_zone(page);
707 local_irq_save(flags);
708 __inc_zone_state(zone, item);
709 local_irq_restore(flags);
710 }
711 EXPORT_SYMBOL(inc_zone_page_state);
712
dec_zone_page_state(struct page * page,enum zone_stat_item item)713 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
714 {
715 unsigned long flags;
716
717 local_irq_save(flags);
718 __dec_zone_page_state(page, item);
719 local_irq_restore(flags);
720 }
721 EXPORT_SYMBOL(dec_zone_page_state);
722
mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)723 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
724 long delta)
725 {
726 unsigned long flags;
727
728 local_irq_save(flags);
729 __mod_node_page_state(pgdat, item, delta);
730 local_irq_restore(flags);
731 }
732 EXPORT_SYMBOL(mod_node_page_state);
733
inc_node_page_state(struct page * page,enum node_stat_item item)734 void inc_node_page_state(struct page *page, enum node_stat_item item)
735 {
736 unsigned long flags;
737 struct pglist_data *pgdat;
738
739 pgdat = page_pgdat(page);
740 local_irq_save(flags);
741 __inc_node_state(pgdat, item);
742 local_irq_restore(flags);
743 }
744 EXPORT_SYMBOL(inc_node_page_state);
745
dec_node_page_state(struct page * page,enum node_stat_item item)746 void dec_node_page_state(struct page *page, enum node_stat_item item)
747 {
748 unsigned long flags;
749
750 local_irq_save(flags);
751 __dec_node_page_state(page, item);
752 local_irq_restore(flags);
753 }
754 EXPORT_SYMBOL(dec_node_page_state);
755 #endif
756
757 /*
758 * Fold a differential into the global counters.
759 * Returns whether counters were updated.
760 */
fold_diff(int * zone_diff,int * node_diff)761 static int fold_diff(int *zone_diff, int *node_diff)
762 {
763 int i;
764 bool changed = false;
765
766 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
767 if (zone_diff[i]) {
768 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
769 changed = true;
770 }
771 }
772
773 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
774 if (node_diff[i]) {
775 atomic_long_add(node_diff[i], &vm_node_stat[i]);
776 changed = true;
777 }
778 }
779
780 return changed;
781 }
782
783 /*
784 * Update the zone counters for the current cpu.
785 *
786 * Note that refresh_cpu_vm_stats strives to only access
787 * node local memory. The per cpu pagesets on remote zones are placed
788 * in the memory local to the processor using that pageset. So the
789 * loop over all zones will access a series of cachelines local to
790 * the processor.
791 *
792 * The call to zone_page_state_add updates the cachelines with the
793 * statistics in the remote zone struct as well as the global cachelines
794 * with the global counters. These could cause remote node cache line
795 * bouncing and will have to be only done when necessary.
796 *
797 * The function returns whether global counters were updated.
798 */
refresh_cpu_vm_stats(bool do_pagesets)799 static bool refresh_cpu_vm_stats(bool do_pagesets)
800 {
801 struct pglist_data *pgdat;
802 struct zone *zone;
803 int i;
804 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
805 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
806 bool changed = false;
807
808 for_each_populated_zone(zone) {
809 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
810 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
811
812 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
813 int v;
814
815 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
816 if (v) {
817
818 atomic_long_add(v, &zone->vm_stat[i]);
819 global_zone_diff[i] += v;
820 #ifdef CONFIG_NUMA
821 /* 3 seconds idle till flush */
822 __this_cpu_write(pcp->expire, 3);
823 #endif
824 }
825 }
826
827 if (do_pagesets) {
828 cond_resched();
829
830 if (decay_pcp_high(zone, this_cpu_ptr(pcp)))
831 changed = true;
832 #ifdef CONFIG_NUMA
833 /*
834 * Deal with draining the remote pageset of this
835 * processor
836 *
837 * Check if there are pages remaining in this pageset
838 * if not then there is nothing to expire.
839 */
840 if (!__this_cpu_read(pcp->expire) ||
841 !__this_cpu_read(pcp->count))
842 continue;
843
844 /*
845 * We never drain zones local to this processor.
846 */
847 if (zone_to_nid(zone) == numa_node_id()) {
848 __this_cpu_write(pcp->expire, 0);
849 continue;
850 }
851
852 if (__this_cpu_dec_return(pcp->expire)) {
853 changed = true;
854 continue;
855 }
856
857 if (__this_cpu_read(pcp->count)) {
858 drain_zone_pages(zone, this_cpu_ptr(pcp));
859 changed = true;
860 }
861 #endif
862 }
863 }
864
865 for_each_online_pgdat(pgdat) {
866 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
867
868 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
869 int v;
870
871 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
872 if (v) {
873 atomic_long_add(v, &pgdat->vm_stat[i]);
874 global_node_diff[i] += v;
875 }
876 }
877 }
878
879 if (fold_diff(global_zone_diff, global_node_diff))
880 changed = true;
881 return changed;
882 }
883
884 /*
885 * Fold the data for an offline cpu into the global array.
886 * There cannot be any access by the offline cpu and therefore
887 * synchronization is simplified.
888 */
cpu_vm_stats_fold(int cpu)889 void cpu_vm_stats_fold(int cpu)
890 {
891 struct pglist_data *pgdat;
892 struct zone *zone;
893 int i;
894 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
895 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
896
897 for_each_populated_zone(zone) {
898 struct per_cpu_zonestat *pzstats;
899
900 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
901
902 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
903 if (pzstats->vm_stat_diff[i]) {
904 int v;
905
906 v = pzstats->vm_stat_diff[i];
907 pzstats->vm_stat_diff[i] = 0;
908 atomic_long_add(v, &zone->vm_stat[i]);
909 global_zone_diff[i] += v;
910 }
911 }
912 #ifdef CONFIG_NUMA
913 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
914 if (pzstats->vm_numa_event[i]) {
915 unsigned long v;
916
917 v = pzstats->vm_numa_event[i];
918 pzstats->vm_numa_event[i] = 0;
919 zone_numa_event_add(v, zone, i);
920 }
921 }
922 #endif
923 }
924
925 for_each_online_pgdat(pgdat) {
926 struct per_cpu_nodestat *p;
927
928 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
929
930 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
931 if (p->vm_node_stat_diff[i]) {
932 int v;
933
934 v = p->vm_node_stat_diff[i];
935 p->vm_node_stat_diff[i] = 0;
936 atomic_long_add(v, &pgdat->vm_stat[i]);
937 global_node_diff[i] += v;
938 }
939 }
940
941 fold_diff(global_zone_diff, global_node_diff);
942 }
943
944 /*
945 * this is only called if !populated_zone(zone), which implies no other users of
946 * pset->vm_stat_diff[] exist.
947 */
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)948 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
949 {
950 unsigned long v;
951 int i;
952
953 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
954 if (pzstats->vm_stat_diff[i]) {
955 v = pzstats->vm_stat_diff[i];
956 pzstats->vm_stat_diff[i] = 0;
957 zone_page_state_add(v, zone, i);
958 }
959 }
960
961 #ifdef CONFIG_NUMA
962 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
963 if (pzstats->vm_numa_event[i]) {
964 v = pzstats->vm_numa_event[i];
965 pzstats->vm_numa_event[i] = 0;
966 zone_numa_event_add(v, zone, i);
967 }
968 }
969 #endif
970 }
971 #endif
972
973 #ifdef CONFIG_NUMA
974 /*
975 * Determine the per node value of a stat item. This function
976 * is called frequently in a NUMA machine, so try to be as
977 * frugal as possible.
978 */
sum_zone_node_page_state(int node,enum zone_stat_item item)979 unsigned long sum_zone_node_page_state(int node,
980 enum zone_stat_item item)
981 {
982 struct zone *zones = NODE_DATA(node)->node_zones;
983 int i;
984 unsigned long count = 0;
985
986 for (i = 0; i < MAX_NR_ZONES; i++)
987 count += zone_page_state(zones + i, item);
988
989 return count;
990 }
991
992 /* Determine the per node value of a numa stat item. */
sum_zone_numa_event_state(int node,enum numa_stat_item item)993 unsigned long sum_zone_numa_event_state(int node,
994 enum numa_stat_item item)
995 {
996 struct zone *zones = NODE_DATA(node)->node_zones;
997 unsigned long count = 0;
998 int i;
999
1000 for (i = 0; i < MAX_NR_ZONES; i++)
1001 count += zone_numa_event_state(zones + i, item);
1002
1003 return count;
1004 }
1005
1006 /*
1007 * Determine the per node value of a stat item.
1008 */
node_page_state_pages(struct pglist_data * pgdat,enum node_stat_item item)1009 unsigned long node_page_state_pages(struct pglist_data *pgdat,
1010 enum node_stat_item item)
1011 {
1012 long x = atomic_long_read(&pgdat->vm_stat[item]);
1013 #ifdef CONFIG_SMP
1014 if (x < 0)
1015 x = 0;
1016 #endif
1017 return x;
1018 }
1019
node_page_state(struct pglist_data * pgdat,enum node_stat_item item)1020 unsigned long node_page_state(struct pglist_data *pgdat,
1021 enum node_stat_item item)
1022 {
1023 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1024
1025 return node_page_state_pages(pgdat, item);
1026 }
1027 #endif
1028
1029 /*
1030 * Count number of pages "struct page" and "struct page_ext" consume.
1031 * nr_memmap_boot_pages: # of pages allocated by boot allocator
1032 * nr_memmap_pages: # of pages that were allocated by buddy allocator
1033 */
1034 static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
1035 static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
1036
memmap_boot_pages_add(long delta)1037 void memmap_boot_pages_add(long delta)
1038 {
1039 atomic_long_add(delta, &nr_memmap_boot_pages);
1040 }
1041
memmap_pages_add(long delta)1042 void memmap_pages_add(long delta)
1043 {
1044 atomic_long_add(delta, &nr_memmap_pages);
1045 }
1046
1047 #ifdef CONFIG_COMPACTION
1048
1049 struct contig_page_info {
1050 unsigned long free_pages;
1051 unsigned long free_blocks_total;
1052 unsigned long free_blocks_suitable;
1053 };
1054
1055 /*
1056 * Calculate the number of free pages in a zone, how many contiguous
1057 * pages are free and how many are large enough to satisfy an allocation of
1058 * the target size. Note that this function makes no attempt to estimate
1059 * how many suitable free blocks there *might* be if MOVABLE pages were
1060 * migrated. Calculating that is possible, but expensive and can be
1061 * figured out from userspace
1062 */
fill_contig_page_info(struct zone * zone,unsigned int suitable_order,struct contig_page_info * info)1063 static void fill_contig_page_info(struct zone *zone,
1064 unsigned int suitable_order,
1065 struct contig_page_info *info)
1066 {
1067 unsigned int order;
1068
1069 info->free_pages = 0;
1070 info->free_blocks_total = 0;
1071 info->free_blocks_suitable = 0;
1072
1073 for (order = 0; order < NR_PAGE_ORDERS; order++) {
1074 unsigned long blocks;
1075
1076 /*
1077 * Count number of free blocks.
1078 *
1079 * Access to nr_free is lockless as nr_free is used only for
1080 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1081 */
1082 blocks = data_race(zone->free_area[order].nr_free);
1083 info->free_blocks_total += blocks;
1084
1085 /* Count free base pages */
1086 info->free_pages += blocks << order;
1087
1088 /* Count the suitable free blocks */
1089 if (order >= suitable_order)
1090 info->free_blocks_suitable += blocks <<
1091 (order - suitable_order);
1092 }
1093 }
1094
1095 /*
1096 * A fragmentation index only makes sense if an allocation of a requested
1097 * size would fail. If that is true, the fragmentation index indicates
1098 * whether external fragmentation or a lack of memory was the problem.
1099 * The value can be used to determine if page reclaim or compaction
1100 * should be used
1101 */
__fragmentation_index(unsigned int order,struct contig_page_info * info)1102 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1103 {
1104 unsigned long requested = 1UL << order;
1105
1106 if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
1107 return 0;
1108
1109 if (!info->free_blocks_total)
1110 return 0;
1111
1112 /* Fragmentation index only makes sense when a request would fail */
1113 if (info->free_blocks_suitable)
1114 return -1000;
1115
1116 /*
1117 * Index is between 0 and 1 so return within 3 decimal places
1118 *
1119 * 0 => allocation would fail due to lack of memory
1120 * 1 => allocation would fail due to fragmentation
1121 */
1122 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1123 }
1124
1125 /*
1126 * Calculates external fragmentation within a zone wrt the given order.
1127 * It is defined as the percentage of pages found in blocks of size
1128 * less than 1 << order. It returns values in range [0, 100].
1129 */
extfrag_for_order(struct zone * zone,unsigned int order)1130 unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1131 {
1132 struct contig_page_info info;
1133
1134 fill_contig_page_info(zone, order, &info);
1135 if (info.free_pages == 0)
1136 return 0;
1137
1138 return div_u64((info.free_pages -
1139 (info.free_blocks_suitable << order)) * 100,
1140 info.free_pages);
1141 }
1142
1143 /* Same as __fragmentation index but allocs contig_page_info on stack */
fragmentation_index(struct zone * zone,unsigned int order)1144 int fragmentation_index(struct zone *zone, unsigned int order)
1145 {
1146 struct contig_page_info info;
1147
1148 fill_contig_page_info(zone, order, &info);
1149 return __fragmentation_index(order, &info);
1150 }
1151 #endif
1152
1153 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1154 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1155 #ifdef CONFIG_ZONE_DMA
1156 #define TEXT_FOR_DMA(xx, yy) [xx##_DMA] = yy "_dma",
1157 #else
1158 #define TEXT_FOR_DMA(xx, yy)
1159 #endif
1160
1161 #ifdef CONFIG_ZONE_DMA32
1162 #define TEXT_FOR_DMA32(xx, yy) [xx##_DMA32] = yy "_dma32",
1163 #else
1164 #define TEXT_FOR_DMA32(xx, yy)
1165 #endif
1166
1167 #ifdef CONFIG_HIGHMEM
1168 #define TEXT_FOR_HIGHMEM(xx, yy) [xx##_HIGH] = yy "_high",
1169 #else
1170 #define TEXT_FOR_HIGHMEM(xx, yy)
1171 #endif
1172
1173 #ifdef CONFIG_ZONE_DEVICE
1174 #define TEXT_FOR_DEVICE(xx, yy) [xx##_DEVICE] = yy "_device",
1175 #else
1176 #define TEXT_FOR_DEVICE(xx, yy)
1177 #endif
1178
1179 #define TEXTS_FOR_ZONES(xx, yy) \
1180 TEXT_FOR_DMA(xx, yy) \
1181 TEXT_FOR_DMA32(xx, yy) \
1182 [xx##_NORMAL] = yy "_normal", \
1183 TEXT_FOR_HIGHMEM(xx, yy) \
1184 [xx##_MOVABLE] = yy "_movable", \
1185 TEXT_FOR_DEVICE(xx, yy)
1186
1187 const char * const vmstat_text[] = {
1188 /* enum zone_stat_item counters */
1189 #define I(x) (x)
1190 [I(NR_FREE_PAGES)] = "nr_free_pages",
1191 [I(NR_FREE_PAGES_BLOCKS)] = "nr_free_pages_blocks",
1192 [I(NR_ZONE_INACTIVE_ANON)] = "nr_zone_inactive_anon",
1193 [I(NR_ZONE_ACTIVE_ANON)] = "nr_zone_active_anon",
1194 [I(NR_ZONE_INACTIVE_FILE)] = "nr_zone_inactive_file",
1195 [I(NR_ZONE_ACTIVE_FILE)] = "nr_zone_active_file",
1196 [I(NR_ZONE_UNEVICTABLE)] = "nr_zone_unevictable",
1197 [I(NR_ZONE_WRITE_PENDING)] = "nr_zone_write_pending",
1198 [I(NR_MLOCK)] = "nr_mlock",
1199 #if IS_ENABLED(CONFIG_ZSMALLOC)
1200 [I(NR_ZSPAGES)] = "nr_zspages",
1201 #endif
1202 [I(NR_FREE_CMA_PAGES)] = "nr_free_cma",
1203 #ifdef CONFIG_UNACCEPTED_MEMORY
1204 [I(NR_UNACCEPTED)] = "nr_unaccepted",
1205 #endif
1206 #undef I
1207
1208 /* enum numa_stat_item counters */
1209 #define I(x) (NR_VM_ZONE_STAT_ITEMS + x)
1210 #ifdef CONFIG_NUMA
1211 [I(NUMA_HIT)] = "numa_hit",
1212 [I(NUMA_MISS)] = "numa_miss",
1213 [I(NUMA_FOREIGN)] = "numa_foreign",
1214 [I(NUMA_INTERLEAVE_HIT)] = "numa_interleave",
1215 [I(NUMA_LOCAL)] = "numa_local",
1216 [I(NUMA_OTHER)] = "numa_other",
1217 #endif
1218 #undef I
1219
1220 /* enum node_stat_item counters */
1221 #define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + x)
1222 [I(NR_INACTIVE_ANON)] = "nr_inactive_anon",
1223 [I(NR_ACTIVE_ANON)] = "nr_active_anon",
1224 [I(NR_INACTIVE_FILE)] = "nr_inactive_file",
1225 [I(NR_ACTIVE_FILE)] = "nr_active_file",
1226 [I(NR_UNEVICTABLE)] = "nr_unevictable",
1227 [I(NR_SLAB_RECLAIMABLE_B)] = "nr_slab_reclaimable",
1228 [I(NR_SLAB_UNRECLAIMABLE_B)] = "nr_slab_unreclaimable",
1229 [I(NR_ISOLATED_ANON)] = "nr_isolated_anon",
1230 [I(NR_ISOLATED_FILE)] = "nr_isolated_file",
1231 [I(WORKINGSET_NODES)] = "workingset_nodes",
1232 [I(WORKINGSET_REFAULT_ANON)] = "workingset_refault_anon",
1233 [I(WORKINGSET_REFAULT_FILE)] = "workingset_refault_file",
1234 [I(WORKINGSET_ACTIVATE_ANON)] = "workingset_activate_anon",
1235 [I(WORKINGSET_ACTIVATE_FILE)] = "workingset_activate_file",
1236 [I(WORKINGSET_RESTORE_ANON)] = "workingset_restore_anon",
1237 [I(WORKINGSET_RESTORE_FILE)] = "workingset_restore_file",
1238 [I(WORKINGSET_NODERECLAIM)] = "workingset_nodereclaim",
1239 [I(NR_ANON_MAPPED)] = "nr_anon_pages",
1240 [I(NR_FILE_MAPPED)] = "nr_mapped",
1241 [I(NR_FILE_PAGES)] = "nr_file_pages",
1242 [I(NR_FILE_DIRTY)] = "nr_dirty",
1243 [I(NR_WRITEBACK)] = "nr_writeback",
1244 [I(NR_SHMEM)] = "nr_shmem",
1245 [I(NR_SHMEM_THPS)] = "nr_shmem_hugepages",
1246 [I(NR_SHMEM_PMDMAPPED)] = "nr_shmem_pmdmapped",
1247 [I(NR_FILE_THPS)] = "nr_file_hugepages",
1248 [I(NR_FILE_PMDMAPPED)] = "nr_file_pmdmapped",
1249 [I(NR_ANON_THPS)] = "nr_anon_transparent_hugepages",
1250 [I(NR_VMSCAN_WRITE)] = "nr_vmscan_write",
1251 [I(NR_VMSCAN_IMMEDIATE)] = "nr_vmscan_immediate_reclaim",
1252 [I(NR_DIRTIED)] = "nr_dirtied",
1253 [I(NR_WRITTEN)] = "nr_written",
1254 [I(NR_THROTTLED_WRITTEN)] = "nr_throttled_written",
1255 [I(NR_KERNEL_MISC_RECLAIMABLE)] = "nr_kernel_misc_reclaimable",
1256 [I(NR_FOLL_PIN_ACQUIRED)] = "nr_foll_pin_acquired",
1257 [I(NR_FOLL_PIN_RELEASED)] = "nr_foll_pin_released",
1258 [I(NR_VMALLOC)] = "nr_vmalloc",
1259 [I(NR_KERNEL_STACK_KB)] = "nr_kernel_stack",
1260 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1261 [I(NR_KERNEL_SCS_KB)] = "nr_shadow_call_stack",
1262 #endif
1263 [I(NR_PAGETABLE)] = "nr_page_table_pages",
1264 [I(NR_SECONDARY_PAGETABLE)] = "nr_sec_page_table_pages",
1265 #ifdef CONFIG_IOMMU_SUPPORT
1266 [I(NR_IOMMU_PAGES)] = "nr_iommu_pages",
1267 #endif
1268 #ifdef CONFIG_SWAP
1269 [I(NR_SWAPCACHE)] = "nr_swapcached",
1270 #endif
1271 #ifdef CONFIG_NUMA_BALANCING
1272 [I(PGPROMOTE_SUCCESS)] = "pgpromote_success",
1273 [I(PGPROMOTE_CANDIDATE)] = "pgpromote_candidate",
1274 [I(PGPROMOTE_CANDIDATE_NRL)] = "pgpromote_candidate_nrl",
1275 #endif
1276 [I(PGDEMOTE_KSWAPD)] = "pgdemote_kswapd",
1277 [I(PGDEMOTE_DIRECT)] = "pgdemote_direct",
1278 [I(PGDEMOTE_KHUGEPAGED)] = "pgdemote_khugepaged",
1279 [I(PGDEMOTE_PROACTIVE)] = "pgdemote_proactive",
1280 [I(PGSTEAL_KSWAPD)] = "pgsteal_kswapd",
1281 [I(PGSTEAL_DIRECT)] = "pgsteal_direct",
1282 [I(PGSTEAL_KHUGEPAGED)] = "pgsteal_khugepaged",
1283 [I(PGSTEAL_PROACTIVE)] = "pgsteal_proactive",
1284 [I(PGSTEAL_ANON)] = "pgsteal_anon",
1285 [I(PGSTEAL_FILE)] = "pgsteal_file",
1286 [I(PGSCAN_KSWAPD)] = "pgscan_kswapd",
1287 [I(PGSCAN_DIRECT)] = "pgscan_direct",
1288 [I(PGSCAN_KHUGEPAGED)] = "pgscan_khugepaged",
1289 [I(PGSCAN_PROACTIVE)] = "pgscan_proactive",
1290 [I(PGSCAN_ANON)] = "pgscan_anon",
1291 [I(PGSCAN_FILE)] = "pgscan_file",
1292 [I(PGREFILL)] = "pgrefill",
1293 #ifdef CONFIG_HUGETLB_PAGE
1294 [I(NR_HUGETLB)] = "nr_hugetlb",
1295 #endif
1296 [I(NR_BALLOON_PAGES)] = "nr_balloon_pages",
1297 [I(NR_KERNEL_FILE_PAGES)] = "nr_kernel_file_pages",
1298 [I(NR_GPU_ACTIVE)] = "nr_gpu_active",
1299 [I(NR_GPU_RECLAIM)] = "nr_gpu_reclaim",
1300 #undef I
1301
1302 /* system-wide enum vm_stat_item counters */
1303 #define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1304 NR_VM_NODE_STAT_ITEMS + x)
1305 [I(NR_DIRTY_THRESHOLD)] = "nr_dirty_threshold",
1306 [I(NR_DIRTY_BG_THRESHOLD)] = "nr_dirty_background_threshold",
1307 [I(NR_MEMMAP_PAGES)] = "nr_memmap_pages",
1308 [I(NR_MEMMAP_BOOT_PAGES)] = "nr_memmap_boot_pages",
1309 #undef I
1310
1311 #if defined(CONFIG_VM_EVENT_COUNTERS)
1312 /* enum vm_event_item counters */
1313 #define I(x) (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1314 NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS + x)
1315
1316 [I(PGPGIN)] = "pgpgin",
1317 [I(PGPGOUT)] = "pgpgout",
1318 [I(PSWPIN)] = "pswpin",
1319 [I(PSWPOUT)] = "pswpout",
1320
1321 #define OFF (NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + \
1322 NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS)
1323 TEXTS_FOR_ZONES(OFF+PGALLOC, "pgalloc")
1324 TEXTS_FOR_ZONES(OFF+ALLOCSTALL, "allocstall")
1325 TEXTS_FOR_ZONES(OFF+PGSCAN_SKIP, "pgskip")
1326 #undef OFF
1327
1328 [I(PGFREE)] = "pgfree",
1329 [I(PGACTIVATE)] = "pgactivate",
1330 [I(PGDEACTIVATE)] = "pgdeactivate",
1331 [I(PGLAZYFREE)] = "pglazyfree",
1332
1333 [I(PGFAULT)] = "pgfault",
1334 [I(PGMAJFAULT)] = "pgmajfault",
1335 [I(PGLAZYFREED)] = "pglazyfreed",
1336
1337 [I(PGREUSE)] = "pgreuse",
1338 [I(PGSCAN_DIRECT_THROTTLE)] = "pgscan_direct_throttle",
1339
1340 #ifdef CONFIG_NUMA
1341 [I(PGSCAN_ZONE_RECLAIM_SUCCESS)] = "zone_reclaim_success",
1342 [I(PGSCAN_ZONE_RECLAIM_FAILED)] = "zone_reclaim_failed",
1343 #endif
1344 [I(PGINODESTEAL)] = "pginodesteal",
1345 [I(SLABS_SCANNED)] = "slabs_scanned",
1346 [I(KSWAPD_INODESTEAL)] = "kswapd_inodesteal",
1347 [I(KSWAPD_LOW_WMARK_HIT_QUICKLY)] = "kswapd_low_wmark_hit_quickly",
1348 [I(KSWAPD_HIGH_WMARK_HIT_QUICKLY)] = "kswapd_high_wmark_hit_quickly",
1349 [I(PAGEOUTRUN)] = "pageoutrun",
1350
1351 [I(PGROTATED)] = "pgrotated",
1352
1353 [I(DROP_PAGECACHE)] = "drop_pagecache",
1354 [I(DROP_SLAB)] = "drop_slab",
1355 [I(OOM_KILL)] = "oom_kill",
1356
1357 #ifdef CONFIG_NUMA_BALANCING
1358 [I(NUMA_PTE_UPDATES)] = "numa_pte_updates",
1359 [I(NUMA_HUGE_PTE_UPDATES)] = "numa_huge_pte_updates",
1360 [I(NUMA_HINT_FAULTS)] = "numa_hint_faults",
1361 [I(NUMA_HINT_FAULTS_LOCAL)] = "numa_hint_faults_local",
1362 [I(NUMA_PAGE_MIGRATE)] = "numa_pages_migrated",
1363 #endif
1364 #ifdef CONFIG_MIGRATION
1365 [I(PGMIGRATE_SUCCESS)] = "pgmigrate_success",
1366 [I(PGMIGRATE_FAIL)] = "pgmigrate_fail",
1367 [I(THP_MIGRATION_SUCCESS)] = "thp_migration_success",
1368 [I(THP_MIGRATION_FAIL)] = "thp_migration_fail",
1369 [I(THP_MIGRATION_SPLIT)] = "thp_migration_split",
1370 #endif
1371 #ifdef CONFIG_COMPACTION
1372 [I(COMPACTMIGRATE_SCANNED)] = "compact_migrate_scanned",
1373 [I(COMPACTFREE_SCANNED)] = "compact_free_scanned",
1374 [I(COMPACTISOLATED)] = "compact_isolated",
1375 [I(COMPACTSTALL)] = "compact_stall",
1376 [I(COMPACTFAIL)] = "compact_fail",
1377 [I(COMPACTSUCCESS)] = "compact_success",
1378 [I(KCOMPACTD_WAKE)] = "compact_daemon_wake",
1379 [I(KCOMPACTD_MIGRATE_SCANNED)] = "compact_daemon_migrate_scanned",
1380 [I(KCOMPACTD_FREE_SCANNED)] = "compact_daemon_free_scanned",
1381 #endif
1382
1383 #ifdef CONFIG_HUGETLB_PAGE
1384 [I(HTLB_BUDDY_PGALLOC)] = "htlb_buddy_alloc_success",
1385 [I(HTLB_BUDDY_PGALLOC_FAIL)] = "htlb_buddy_alloc_fail",
1386 #endif
1387 #ifdef CONFIG_CMA
1388 [I(CMA_ALLOC_SUCCESS)] = "cma_alloc_success",
1389 [I(CMA_ALLOC_FAIL)] = "cma_alloc_fail",
1390 #endif
1391 [I(UNEVICTABLE_PGCULLED)] = "unevictable_pgs_culled",
1392 [I(UNEVICTABLE_PGSCANNED)] = "unevictable_pgs_scanned",
1393 [I(UNEVICTABLE_PGRESCUED)] = "unevictable_pgs_rescued",
1394 [I(UNEVICTABLE_PGMLOCKED)] = "unevictable_pgs_mlocked",
1395 [I(UNEVICTABLE_PGMUNLOCKED)] = "unevictable_pgs_munlocked",
1396 [I(UNEVICTABLE_PGCLEARED)] = "unevictable_pgs_cleared",
1397 [I(UNEVICTABLE_PGSTRANDED)] = "unevictable_pgs_stranded",
1398
1399 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1400 [I(THP_FAULT_ALLOC)] = "thp_fault_alloc",
1401 [I(THP_FAULT_FALLBACK)] = "thp_fault_fallback",
1402 [I(THP_FAULT_FALLBACK_CHARGE)] = "thp_fault_fallback_charge",
1403 [I(THP_COLLAPSE_ALLOC)] = "thp_collapse_alloc",
1404 [I(THP_COLLAPSE_ALLOC_FAILED)] = "thp_collapse_alloc_failed",
1405 [I(THP_FILE_ALLOC)] = "thp_file_alloc",
1406 [I(THP_FILE_FALLBACK)] = "thp_file_fallback",
1407 [I(THP_FILE_FALLBACK_CHARGE)] = "thp_file_fallback_charge",
1408 [I(THP_FILE_MAPPED)] = "thp_file_mapped",
1409 [I(THP_SPLIT_PAGE)] = "thp_split_page",
1410 [I(THP_SPLIT_PAGE_FAILED)] = "thp_split_page_failed",
1411 [I(THP_DEFERRED_SPLIT_PAGE)] = "thp_deferred_split_page",
1412 [I(THP_UNDERUSED_SPLIT_PAGE)] = "thp_underused_split_page",
1413 [I(THP_SPLIT_PMD)] = "thp_split_pmd",
1414 [I(THP_SCAN_EXCEED_NONE_PTE)] = "thp_scan_exceed_none_pte",
1415 [I(THP_SCAN_EXCEED_SWAP_PTE)] = "thp_scan_exceed_swap_pte",
1416 [I(THP_SCAN_EXCEED_SHARED_PTE)] = "thp_scan_exceed_share_pte",
1417 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1418 [I(THP_SPLIT_PUD)] = "thp_split_pud",
1419 #endif
1420 [I(THP_ZERO_PAGE_ALLOC)] = "thp_zero_page_alloc",
1421 [I(THP_ZERO_PAGE_ALLOC_FAILED)] = "thp_zero_page_alloc_failed",
1422 [I(THP_SWPOUT)] = "thp_swpout",
1423 [I(THP_SWPOUT_FALLBACK)] = "thp_swpout_fallback",
1424 #endif
1425 #ifdef CONFIG_BALLOON
1426 [I(BALLOON_INFLATE)] = "balloon_inflate",
1427 [I(BALLOON_DEFLATE)] = "balloon_deflate",
1428 #ifdef CONFIG_BALLOON_MIGRATION
1429 [I(BALLOON_MIGRATE)] = "balloon_migrate",
1430 #endif /* CONFIG_BALLOON_MIGRATION */
1431 #endif /* CONFIG_BALLOON */
1432 #ifdef CONFIG_DEBUG_TLBFLUSH
1433 [I(NR_TLB_REMOTE_FLUSH)] = "nr_tlb_remote_flush",
1434 [I(NR_TLB_REMOTE_FLUSH_RECEIVED)] = "nr_tlb_remote_flush_received",
1435 [I(NR_TLB_LOCAL_FLUSH_ALL)] = "nr_tlb_local_flush_all",
1436 [I(NR_TLB_LOCAL_FLUSH_ONE)] = "nr_tlb_local_flush_one",
1437 #endif /* CONFIG_DEBUG_TLBFLUSH */
1438
1439 #ifdef CONFIG_SWAP
1440 [I(SWAP_RA)] = "swap_ra",
1441 [I(SWAP_RA_HIT)] = "swap_ra_hit",
1442 [I(SWPIN_ZERO)] = "swpin_zero",
1443 [I(SWPOUT_ZERO)] = "swpout_zero",
1444 #ifdef CONFIG_KSM
1445 [I(KSM_SWPIN_COPY)] = "ksm_swpin_copy",
1446 #endif
1447 #endif
1448 #ifdef CONFIG_KSM
1449 [I(COW_KSM)] = "cow_ksm",
1450 #endif
1451 #ifdef CONFIG_ZSWAP
1452 [I(ZSWPIN)] = "zswpin",
1453 [I(ZSWPOUT)] = "zswpout",
1454 [I(ZSWPWB)] = "zswpwb",
1455 #endif
1456 #ifdef CONFIG_X86
1457 [I(DIRECT_MAP_LEVEL2_SPLIT)] = "direct_map_level2_splits",
1458 [I(DIRECT_MAP_LEVEL3_SPLIT)] = "direct_map_level3_splits",
1459 [I(DIRECT_MAP_LEVEL2_COLLAPSE)] = "direct_map_level2_collapses",
1460 [I(DIRECT_MAP_LEVEL3_COLLAPSE)] = "direct_map_level3_collapses",
1461 #endif
1462 #ifdef CONFIG_PER_VMA_LOCK_STATS
1463 [I(VMA_LOCK_SUCCESS)] = "vma_lock_success",
1464 [I(VMA_LOCK_ABORT)] = "vma_lock_abort",
1465 [I(VMA_LOCK_RETRY)] = "vma_lock_retry",
1466 [I(VMA_LOCK_MISS)] = "vma_lock_miss",
1467 #endif
1468 #ifdef CONFIG_DEBUG_STACK_USAGE
1469 [I(KSTACK_1K)] = "kstack_1k",
1470 #if THREAD_SIZE > 1024
1471 [I(KSTACK_2K)] = "kstack_2k",
1472 #endif
1473 #if THREAD_SIZE > 2048
1474 [I(KSTACK_4K)] = "kstack_4k",
1475 #endif
1476 #if THREAD_SIZE > 4096
1477 [I(KSTACK_8K)] = "kstack_8k",
1478 #endif
1479 #if THREAD_SIZE > 8192
1480 [I(KSTACK_16K)] = "kstack_16k",
1481 #endif
1482 #if THREAD_SIZE > 16384
1483 [I(KSTACK_32K)] = "kstack_32k",
1484 #endif
1485 #if THREAD_SIZE > 32768
1486 [I(KSTACK_64K)] = "kstack_64k",
1487 #endif
1488 #if THREAD_SIZE > 65536
1489 [I(KSTACK_REST)] = "kstack_rest",
1490 #endif
1491 #endif
1492 #undef I
1493 #endif /* CONFIG_VM_EVENT_COUNTERS */
1494 };
1495 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1496
1497 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1498 defined(CONFIG_PROC_FS)
frag_start(struct seq_file * m,loff_t * pos)1499 static void *frag_start(struct seq_file *m, loff_t *pos)
1500 {
1501 pg_data_t *pgdat;
1502 loff_t node = *pos;
1503
1504 for (pgdat = first_online_pgdat();
1505 pgdat && node;
1506 pgdat = next_online_pgdat(pgdat))
1507 --node;
1508
1509 return pgdat;
1510 }
1511
frag_next(struct seq_file * m,void * arg,loff_t * pos)1512 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1513 {
1514 pg_data_t *pgdat = (pg_data_t *)arg;
1515
1516 (*pos)++;
1517 return next_online_pgdat(pgdat);
1518 }
1519
frag_stop(struct seq_file * m,void * arg)1520 static void frag_stop(struct seq_file *m, void *arg)
1521 {
1522 }
1523
1524 /*
1525 * Walk zones in a node and print using a callback.
1526 * If @assert_populated is true, only use callback for zones that are populated.
1527 */
walk_zones_in_node(struct seq_file * m,pg_data_t * pgdat,bool assert_populated,bool nolock,void (* print)(struct seq_file * m,pg_data_t *,struct zone *))1528 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1529 bool assert_populated, bool nolock,
1530 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1531 {
1532 struct zone *zone;
1533 struct zone *node_zones = pgdat->node_zones;
1534 unsigned long flags;
1535
1536 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1537 if (assert_populated && !populated_zone(zone))
1538 continue;
1539
1540 if (!nolock)
1541 spin_lock_irqsave(&zone->lock, flags);
1542 print(m, pgdat, zone);
1543 if (!nolock)
1544 spin_unlock_irqrestore(&zone->lock, flags);
1545 }
1546 }
1547 #endif
1548
1549 #ifdef CONFIG_PROC_FS
frag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1550 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1551 struct zone *zone)
1552 {
1553 int order;
1554
1555 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1556 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1557 /*
1558 * Access to nr_free is lockless as nr_free is used only for
1559 * printing purposes. Use data_race to avoid KCSAN warning.
1560 */
1561 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1562 seq_putc(m, '\n');
1563 }
1564
1565 /*
1566 * This walks the free areas for each zone.
1567 */
frag_show(struct seq_file * m,void * arg)1568 static int frag_show(struct seq_file *m, void *arg)
1569 {
1570 pg_data_t *pgdat = (pg_data_t *)arg;
1571 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1572 return 0;
1573 }
1574
pagetypeinfo_showfree_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1575 static void pagetypeinfo_showfree_print(struct seq_file *m,
1576 pg_data_t *pgdat, struct zone *zone)
1577 {
1578 int order, mtype;
1579
1580 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1581 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1582 pgdat->node_id,
1583 zone->name,
1584 migratetype_names[mtype]);
1585 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
1586 unsigned long freecount = 0;
1587 struct free_area *area;
1588 struct list_head *curr;
1589 bool overflow = false;
1590
1591 area = &(zone->free_area[order]);
1592
1593 list_for_each(curr, &area->free_list[mtype]) {
1594 /*
1595 * Cap the free_list iteration because it might
1596 * be really large and we are under a spinlock
1597 * so a long time spent here could trigger a
1598 * hard lockup detector. Anyway this is a
1599 * debugging tool so knowing there is a handful
1600 * of pages of this order should be more than
1601 * sufficient.
1602 */
1603 if (++freecount >= 100000) {
1604 overflow = true;
1605 break;
1606 }
1607 }
1608 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1609 spin_unlock_irq(&zone->lock);
1610 cond_resched();
1611 spin_lock_irq(&zone->lock);
1612 }
1613 seq_putc(m, '\n');
1614 }
1615 }
1616
1617 /* Print out the free pages at each order for each migratetype */
pagetypeinfo_showfree(struct seq_file * m,void * arg)1618 static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1619 {
1620 int order;
1621 pg_data_t *pgdat = (pg_data_t *)arg;
1622
1623 /* Print header */
1624 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1625 for (order = 0; order < NR_PAGE_ORDERS; ++order)
1626 seq_printf(m, "%6d ", order);
1627 seq_putc(m, '\n');
1628
1629 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1630 }
1631
pagetypeinfo_showblockcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1632 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1633 pg_data_t *pgdat, struct zone *zone)
1634 {
1635 int mtype;
1636 unsigned long pfn;
1637 unsigned long start_pfn = zone->zone_start_pfn;
1638 unsigned long end_pfn = zone_end_pfn(zone);
1639 unsigned long count[MIGRATE_TYPES] = { 0, };
1640
1641 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1642 struct page *page;
1643
1644 page = pfn_to_online_page(pfn);
1645 if (!page)
1646 continue;
1647
1648 if (page_zone(page) != zone)
1649 continue;
1650
1651 mtype = get_pageblock_migratetype(page);
1652
1653 if (mtype < MIGRATE_TYPES)
1654 count[mtype]++;
1655 }
1656
1657 /* Print counts */
1658 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1659 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1660 seq_printf(m, "%12lu ", count[mtype]);
1661 seq_putc(m, '\n');
1662 }
1663
1664 /* Print out the number of pageblocks for each migratetype */
pagetypeinfo_showblockcount(struct seq_file * m,void * arg)1665 static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1666 {
1667 int mtype;
1668 pg_data_t *pgdat = (pg_data_t *)arg;
1669
1670 seq_printf(m, "\n%-23s", "Number of blocks type ");
1671 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1672 seq_printf(m, "%12s ", migratetype_names[mtype]);
1673 seq_putc(m, '\n');
1674 walk_zones_in_node(m, pgdat, true, false,
1675 pagetypeinfo_showblockcount_print);
1676 }
1677
1678 /*
1679 * Print out the number of pageblocks for each migratetype that contain pages
1680 * of other types. This gives an indication of how well fallbacks are being
1681 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1682 * to determine what is going on
1683 */
pagetypeinfo_showmixedcount(struct seq_file * m,pg_data_t * pgdat)1684 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1685 {
1686 #ifdef CONFIG_PAGE_OWNER
1687 int mtype;
1688
1689 if (!static_branch_unlikely(&page_owner_inited))
1690 return;
1691
1692 drain_all_pages(NULL);
1693
1694 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1695 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1696 seq_printf(m, "%12s ", migratetype_names[mtype]);
1697 seq_putc(m, '\n');
1698
1699 walk_zones_in_node(m, pgdat, true, true,
1700 pagetypeinfo_showmixedcount_print);
1701 #endif /* CONFIG_PAGE_OWNER */
1702 }
1703
1704 /*
1705 * This prints out statistics in relation to grouping pages by mobility.
1706 * It is expensive to collect so do not constantly read the file.
1707 */
pagetypeinfo_show(struct seq_file * m,void * arg)1708 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1709 {
1710 pg_data_t *pgdat = (pg_data_t *)arg;
1711
1712 /* check memoryless node */
1713 if (!node_state(pgdat->node_id, N_MEMORY))
1714 return 0;
1715
1716 seq_printf(m, "Page block order: %d\n", pageblock_order);
1717 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1718 seq_putc(m, '\n');
1719 pagetypeinfo_showfree(m, pgdat);
1720 pagetypeinfo_showblockcount(m, pgdat);
1721 pagetypeinfo_showmixedcount(m, pgdat);
1722
1723 return 0;
1724 }
1725
1726 static const struct seq_operations fragmentation_op = {
1727 .start = frag_start,
1728 .next = frag_next,
1729 .stop = frag_stop,
1730 .show = frag_show,
1731 };
1732
1733 static const struct seq_operations pagetypeinfo_op = {
1734 .start = frag_start,
1735 .next = frag_next,
1736 .stop = frag_stop,
1737 .show = pagetypeinfo_show,
1738 };
1739
is_zone_first_populated(pg_data_t * pgdat,struct zone * zone)1740 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1741 {
1742 int zid;
1743
1744 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1745 struct zone *compare = &pgdat->node_zones[zid];
1746
1747 if (populated_zone(compare))
1748 return zone == compare;
1749 }
1750
1751 return false;
1752 }
1753
zoneinfo_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1754 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1755 struct zone *zone)
1756 {
1757 int i;
1758 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1759 if (is_zone_first_populated(pgdat, zone)) {
1760 seq_printf(m, "\n per-node stats");
1761 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1762 unsigned long pages = node_page_state_pages(pgdat, i);
1763
1764 if (vmstat_item_print_in_thp(i))
1765 pages /= HPAGE_PMD_NR;
1766 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1767 pages);
1768 }
1769 }
1770 seq_printf(m,
1771 "\n pages free %lu"
1772 "\n boost %lu"
1773 "\n min %lu"
1774 "\n low %lu"
1775 "\n high %lu"
1776 "\n promo %lu"
1777 "\n spanned %lu"
1778 "\n present %lu"
1779 "\n managed %lu"
1780 "\n cma %lu",
1781 zone_page_state(zone, NR_FREE_PAGES),
1782 zone->watermark_boost,
1783 min_wmark_pages(zone),
1784 low_wmark_pages(zone),
1785 high_wmark_pages(zone),
1786 promo_wmark_pages(zone),
1787 zone->spanned_pages,
1788 zone->present_pages,
1789 zone_managed_pages(zone),
1790 zone_cma_pages(zone));
1791
1792 seq_printf(m,
1793 "\n protection: (%ld",
1794 zone->lowmem_reserve[0]);
1795 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1796 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1797 seq_putc(m, ')');
1798
1799 /* If unpopulated, no other information is useful */
1800 if (!populated_zone(zone)) {
1801 seq_putc(m, '\n');
1802 return;
1803 }
1804
1805 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1806 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1807 zone_page_state(zone, i));
1808
1809 #ifdef CONFIG_NUMA
1810 fold_vm_zone_numa_events(zone);
1811 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1812 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1813 zone_numa_event_state(zone, i));
1814 #endif
1815
1816 seq_printf(m, "\n pagesets");
1817 for_each_online_cpu(i) {
1818 struct per_cpu_pages *pcp;
1819 struct per_cpu_zonestat __maybe_unused *pzstats;
1820
1821 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1822 seq_printf(m,
1823 "\n cpu: %i"
1824 "\n count: %i"
1825 "\n high: %i"
1826 "\n batch: %i"
1827 "\n high_min: %i"
1828 "\n high_max: %i",
1829 i,
1830 pcp->count,
1831 pcp->high,
1832 pcp->batch,
1833 pcp->high_min,
1834 pcp->high_max);
1835 #ifdef CONFIG_SMP
1836 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1837 seq_printf(m, "\n vm stats threshold: %d",
1838 pzstats->stat_threshold);
1839 #endif
1840 }
1841 seq_printf(m,
1842 "\n node_unreclaimable: %u"
1843 "\n start_pfn: %lu"
1844 "\n reserved_highatomic: %lu"
1845 "\n free_highatomic: %lu",
1846 kswapd_test_hopeless(pgdat),
1847 zone->zone_start_pfn,
1848 zone->nr_reserved_highatomic,
1849 zone->nr_free_highatomic);
1850 seq_putc(m, '\n');
1851 }
1852
1853 /*
1854 * Output information about zones in @pgdat. All zones are printed regardless
1855 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1856 * set of all zones and userspace would not be aware of such zones if they are
1857 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1858 */
zoneinfo_show(struct seq_file * m,void * arg)1859 static int zoneinfo_show(struct seq_file *m, void *arg)
1860 {
1861 pg_data_t *pgdat = (pg_data_t *)arg;
1862 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1863 return 0;
1864 }
1865
1866 static const struct seq_operations zoneinfo_op = {
1867 .start = frag_start, /* iterate over all zones. The same as in
1868 * fragmentation. */
1869 .next = frag_next,
1870 .stop = frag_stop,
1871 .show = zoneinfo_show,
1872 };
1873
1874 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1875 NR_VM_NUMA_EVENT_ITEMS + \
1876 NR_VM_NODE_STAT_ITEMS + \
1877 NR_VM_STAT_ITEMS + \
1878 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1879 NR_VM_EVENT_ITEMS : 0))
1880
vmstat_start(struct seq_file * m,loff_t * pos)1881 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1882 {
1883 unsigned long *v;
1884 int i;
1885
1886 if (*pos >= NR_VMSTAT_ITEMS)
1887 return NULL;
1888
1889 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) != NR_VMSTAT_ITEMS);
1890 fold_vm_numa_events();
1891 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1892 m->private = v;
1893 if (!v)
1894 return ERR_PTR(-ENOMEM);
1895 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1896 v[i] = global_zone_page_state(i);
1897 v += NR_VM_ZONE_STAT_ITEMS;
1898
1899 #ifdef CONFIG_NUMA
1900 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1901 v[i] = global_numa_event_state(i);
1902 v += NR_VM_NUMA_EVENT_ITEMS;
1903 #endif
1904
1905 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1906 v[i] = global_node_page_state_pages(i);
1907 if (vmstat_item_print_in_thp(i))
1908 v[i] /= HPAGE_PMD_NR;
1909 }
1910 v += NR_VM_NODE_STAT_ITEMS;
1911
1912 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1913 v + NR_DIRTY_THRESHOLD);
1914 v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
1915 v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
1916 v += NR_VM_STAT_ITEMS;
1917
1918 #ifdef CONFIG_VM_EVENT_COUNTERS
1919 all_vm_events(v);
1920 v[PGPGIN] /= 2; /* sectors -> kbytes */
1921 v[PGPGOUT] /= 2;
1922 #endif
1923 return (unsigned long *)m->private + *pos;
1924 }
1925
vmstat_next(struct seq_file * m,void * arg,loff_t * pos)1926 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1927 {
1928 (*pos)++;
1929 if (*pos >= NR_VMSTAT_ITEMS)
1930 return NULL;
1931 return (unsigned long *)m->private + *pos;
1932 }
1933
vmstat_show(struct seq_file * m,void * arg)1934 static int vmstat_show(struct seq_file *m, void *arg)
1935 {
1936 unsigned long *l = arg;
1937 unsigned long off = l - (unsigned long *)m->private;
1938
1939 seq_puts(m, vmstat_text[off]);
1940 seq_put_decimal_ull(m, " ", *l);
1941 seq_putc(m, '\n');
1942
1943 if (off == NR_VMSTAT_ITEMS - 1) {
1944 /*
1945 * We've come to the end - add any deprecated counters to avoid
1946 * breaking userspace which might depend on them being present.
1947 */
1948 seq_puts(m, "nr_unstable 0\n");
1949 }
1950 return 0;
1951 }
1952
vmstat_stop(struct seq_file * m,void * arg)1953 static void vmstat_stop(struct seq_file *m, void *arg)
1954 {
1955 kfree(m->private);
1956 m->private = NULL;
1957 }
1958
1959 static const struct seq_operations vmstat_op = {
1960 .start = vmstat_start,
1961 .next = vmstat_next,
1962 .stop = vmstat_stop,
1963 .show = vmstat_show,
1964 };
1965 #endif /* CONFIG_PROC_FS */
1966
1967 #ifdef CONFIG_SMP
1968 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1969 static int sysctl_stat_interval __read_mostly = HZ;
1970 static int vmstat_late_init_done;
1971
1972 #ifdef CONFIG_PROC_FS
refresh_vm_stats(struct work_struct * work)1973 static void refresh_vm_stats(struct work_struct *work)
1974 {
1975 refresh_cpu_vm_stats(true);
1976 }
1977
vmstat_refresh(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1978 static int vmstat_refresh(const struct ctl_table *table, int write,
1979 void *buffer, size_t *lenp, loff_t *ppos)
1980 {
1981 long val;
1982 int err;
1983 int i;
1984
1985 /*
1986 * The regular update, every sysctl_stat_interval, may come later
1987 * than expected: leaving a significant amount in per_cpu buckets.
1988 * This is particularly misleading when checking a quantity of HUGE
1989 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1990 * which can equally be echo'ed to or cat'ted from (by root),
1991 * can be used to update the stats just before reading them.
1992 *
1993 * Oh, and since global_zone_page_state() etc. are so careful to hide
1994 * transiently negative values, report an error here if any of
1995 * the stats is negative, so we know to go looking for imbalance.
1996 */
1997 err = schedule_on_each_cpu(refresh_vm_stats);
1998 if (err)
1999 return err;
2000 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
2001 /*
2002 * Skip checking stats known to go negative occasionally.
2003 */
2004 switch (i) {
2005 case NR_ZONE_WRITE_PENDING:
2006 case NR_FREE_CMA_PAGES:
2007 continue;
2008 }
2009 val = atomic_long_read(&vm_zone_stat[i]);
2010 if (val < 0) {
2011 pr_warn("%s: %s %ld\n",
2012 __func__, zone_stat_name(i), val);
2013 }
2014 }
2015 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
2016 /*
2017 * Skip checking stats known to go negative occasionally.
2018 */
2019 switch (i) {
2020 case NR_WRITEBACK:
2021 continue;
2022 }
2023 val = atomic_long_read(&vm_node_stat[i]);
2024 if (val < 0) {
2025 pr_warn("%s: %s %ld\n",
2026 __func__, node_stat_name(i), val);
2027 }
2028 }
2029 if (write)
2030 *ppos += *lenp;
2031 else
2032 *lenp = 0;
2033 return 0;
2034 }
2035 #endif /* CONFIG_PROC_FS */
2036
vmstat_update(struct work_struct * w)2037 static void vmstat_update(struct work_struct *w)
2038 {
2039 if (refresh_cpu_vm_stats(true)) {
2040 /*
2041 * Counters were updated so we expect more updates
2042 * to occur in the future. Keep on running the
2043 * update worker thread.
2044 */
2045 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
2046 this_cpu_ptr(&vmstat_work),
2047 round_jiffies_relative(sysctl_stat_interval));
2048 }
2049 }
2050
2051 /*
2052 * Check if the diffs for a certain cpu indicate that
2053 * an update is needed.
2054 */
need_update(int cpu)2055 static bool need_update(int cpu)
2056 {
2057 pg_data_t *last_pgdat = NULL;
2058 struct zone *zone;
2059
2060 for_each_populated_zone(zone) {
2061 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2062 struct per_cpu_nodestat *n;
2063
2064 /*
2065 * The fast way of checking if there are any vmstat diffs.
2066 */
2067 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
2068 return true;
2069
2070 if (last_pgdat == zone->zone_pgdat)
2071 continue;
2072 last_pgdat = zone->zone_pgdat;
2073 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
2074 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
2075 return true;
2076 }
2077 return false;
2078 }
2079
2080 /*
2081 * Switch off vmstat processing and then fold all the remaining differentials
2082 * until the diffs stay at zero. The function is used by NOHZ and can only be
2083 * invoked when tick processing is not active.
2084 */
quiet_vmstat(void)2085 void quiet_vmstat(void)
2086 {
2087 if (system_state != SYSTEM_RUNNING)
2088 return;
2089
2090 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
2091 return;
2092
2093 if (!need_update(smp_processor_id()))
2094 return;
2095
2096 /*
2097 * Just refresh counters and do not care about the pending delayed
2098 * vmstat_update. It doesn't fire that often to matter and canceling
2099 * it would be too expensive from this path.
2100 * vmstat_shepherd will take care about that for us.
2101 */
2102 refresh_cpu_vm_stats(false);
2103 }
2104
2105 /*
2106 * Shepherd worker thread that checks the
2107 * differentials of processors that have their worker
2108 * threads for vm statistics updates disabled because of
2109 * inactivity.
2110 */
2111 static void vmstat_shepherd(struct work_struct *w);
2112
2113 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2114
vmstat_flush_workqueue(void)2115 void vmstat_flush_workqueue(void)
2116 {
2117 flush_workqueue(mm_percpu_wq);
2118 }
2119
vmstat_shepherd(struct work_struct * w)2120 static void vmstat_shepherd(struct work_struct *w)
2121 {
2122 int cpu;
2123
2124 cpus_read_lock();
2125 /* Check processors whose vmstat worker threads have been disabled */
2126 for_each_online_cpu(cpu) {
2127 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2128
2129 /*
2130 * In kernel users of vmstat counters either require the precise value and
2131 * they are using zone_page_state_snapshot interface or they can live with
2132 * an imprecision as the regular flushing can happen at arbitrary time and
2133 * cumulative error can grow (see calculate_normal_threshold).
2134 *
2135 * From that POV the regular flushing can be postponed for CPUs that have
2136 * been isolated from the kernel interference without critical
2137 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
2138 * for all isolated CPUs to avoid interference with the isolated workload.
2139 */
2140 scoped_guard(rcu) {
2141 if (cpu_is_isolated(cpu))
2142 continue;
2143
2144 if (!delayed_work_pending(dw) && need_update(cpu))
2145 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2146 }
2147
2148 cond_resched();
2149 }
2150 cpus_read_unlock();
2151
2152 schedule_delayed_work(&shepherd,
2153 round_jiffies_relative(sysctl_stat_interval));
2154 }
2155
start_shepherd_timer(void)2156 static void __init start_shepherd_timer(void)
2157 {
2158 int cpu;
2159
2160 for_each_possible_cpu(cpu) {
2161 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2162 vmstat_update);
2163
2164 /*
2165 * For secondary CPUs during CPU hotplug scenarios,
2166 * vmstat_cpu_online() will enable the work.
2167 * mm/vmstat:online enables and disables vmstat_work
2168 * symmetrically during CPU hotplug events.
2169 */
2170 if (!cpu_online(cpu))
2171 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2172 }
2173
2174 schedule_delayed_work(&shepherd,
2175 round_jiffies_relative(sysctl_stat_interval));
2176 }
2177
init_cpu_node_state(void)2178 static void __init init_cpu_node_state(void)
2179 {
2180 int node;
2181
2182 for_each_online_node(node) {
2183 if (!cpumask_empty(cpumask_of_node(node)))
2184 node_set_state(node, N_CPU);
2185 }
2186 }
2187
vmstat_cpu_online(unsigned int cpu)2188 static int vmstat_cpu_online(unsigned int cpu)
2189 {
2190 if (vmstat_late_init_done)
2191 refresh_zone_stat_thresholds();
2192
2193 if (!node_state(cpu_to_node(cpu), N_CPU)) {
2194 node_set_state(cpu_to_node(cpu), N_CPU);
2195 }
2196 enable_delayed_work(&per_cpu(vmstat_work, cpu));
2197
2198 return 0;
2199 }
2200
vmstat_cpu_down_prep(unsigned int cpu)2201 static int vmstat_cpu_down_prep(unsigned int cpu)
2202 {
2203 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2204 return 0;
2205 }
2206
vmstat_cpu_dead(unsigned int cpu)2207 static int vmstat_cpu_dead(unsigned int cpu)
2208 {
2209 const struct cpumask *node_cpus;
2210 int node;
2211
2212 node = cpu_to_node(cpu);
2213
2214 refresh_zone_stat_thresholds();
2215 node_cpus = cpumask_of_node(node);
2216 if (!cpumask_empty(node_cpus))
2217 return 0;
2218
2219 node_clear_state(node, N_CPU);
2220
2221 return 0;
2222 }
2223
vmstat_late_init(void)2224 static int __init vmstat_late_init(void)
2225 {
2226 refresh_zone_stat_thresholds();
2227 vmstat_late_init_done = 1;
2228
2229 return 0;
2230 }
2231 late_initcall(vmstat_late_init);
2232 #endif
2233
2234 #ifdef CONFIG_PROC_FS
2235 static const struct ctl_table vmstat_table[] = {
2236 #ifdef CONFIG_SMP
2237 {
2238 .procname = "stat_interval",
2239 .data = &sysctl_stat_interval,
2240 .maxlen = sizeof(sysctl_stat_interval),
2241 .mode = 0644,
2242 .proc_handler = proc_dointvec_jiffies,
2243 },
2244 {
2245 .procname = "stat_refresh",
2246 .data = NULL,
2247 .maxlen = 0,
2248 .mode = 0600,
2249 .proc_handler = vmstat_refresh,
2250 },
2251 #endif
2252 #ifdef CONFIG_NUMA
2253 {
2254 .procname = "numa_stat",
2255 .data = &sysctl_vm_numa_stat,
2256 .maxlen = sizeof(int),
2257 .mode = 0644,
2258 .proc_handler = sysctl_vm_numa_stat_handler,
2259 .extra1 = SYSCTL_ZERO,
2260 .extra2 = SYSCTL_ONE,
2261 },
2262 #endif
2263 };
2264 #endif
2265
2266 struct workqueue_struct *mm_percpu_wq;
2267
init_mm_internals(void)2268 void __init init_mm_internals(void)
2269 {
2270 int ret __maybe_unused;
2271
2272 mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
2273 WQ_MEM_RECLAIM | WQ_PERCPU, 0);
2274
2275 #ifdef CONFIG_SMP
2276 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2277 NULL, vmstat_cpu_dead);
2278 if (ret < 0)
2279 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2280
2281 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2282 vmstat_cpu_online,
2283 vmstat_cpu_down_prep);
2284 if (ret < 0)
2285 pr_err("vmstat: failed to register 'online' hotplug state\n");
2286
2287 cpus_read_lock();
2288 init_cpu_node_state();
2289 cpus_read_unlock();
2290
2291 start_shepherd_timer();
2292 #endif
2293 #ifdef CONFIG_PROC_FS
2294 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2295 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2296 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2297 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2298 register_sysctl_init("vm", vmstat_table);
2299 #endif
2300 }
2301
2302 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2303
2304 /*
2305 * Return an index indicating how much of the available free memory is
2306 * unusable for an allocation of the requested size.
2307 */
unusable_free_index(unsigned int order,struct contig_page_info * info)2308 static int unusable_free_index(unsigned int order,
2309 struct contig_page_info *info)
2310 {
2311 /* No free memory is interpreted as all free memory is unusable */
2312 if (info->free_pages == 0)
2313 return 1000;
2314
2315 /*
2316 * Index should be a value between 0 and 1. Return a value to 3
2317 * decimal places.
2318 *
2319 * 0 => no fragmentation
2320 * 1 => high fragmentation
2321 */
2322 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2323
2324 }
2325
unusable_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)2326 static void unusable_show_print(struct seq_file *m,
2327 pg_data_t *pgdat, struct zone *zone)
2328 {
2329 unsigned int order;
2330 int index;
2331 struct contig_page_info info;
2332
2333 seq_printf(m, "Node %d, zone %8s ",
2334 pgdat->node_id,
2335 zone->name);
2336 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2337 fill_contig_page_info(zone, order, &info);
2338 index = unusable_free_index(order, &info);
2339 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2340 }
2341
2342 seq_putc(m, '\n');
2343 }
2344
2345 /*
2346 * Display unusable free space index
2347 *
2348 * The unusable free space index measures how much of the available free
2349 * memory cannot be used to satisfy an allocation of a given size and is a
2350 * value between 0 and 1. The higher the value, the more of free memory is
2351 * unusable and by implication, the worse the external fragmentation is. This
2352 * can be expressed as a percentage by multiplying by 100.
2353 */
unusable_show(struct seq_file * m,void * arg)2354 static int unusable_show(struct seq_file *m, void *arg)
2355 {
2356 pg_data_t *pgdat = (pg_data_t *)arg;
2357
2358 /* check memoryless node */
2359 if (!node_state(pgdat->node_id, N_MEMORY))
2360 return 0;
2361
2362 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2363
2364 return 0;
2365 }
2366
2367 static const struct seq_operations unusable_sops = {
2368 .start = frag_start,
2369 .next = frag_next,
2370 .stop = frag_stop,
2371 .show = unusable_show,
2372 };
2373
2374 DEFINE_SEQ_ATTRIBUTE(unusable);
2375
extfrag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)2376 static void extfrag_show_print(struct seq_file *m,
2377 pg_data_t *pgdat, struct zone *zone)
2378 {
2379 unsigned int order;
2380 int index;
2381
2382 /* Alloc on stack as interrupts are disabled for zone walk */
2383 struct contig_page_info info;
2384
2385 seq_printf(m, "Node %d, zone %8s ",
2386 pgdat->node_id,
2387 zone->name);
2388 for (order = 0; order < NR_PAGE_ORDERS; ++order) {
2389 fill_contig_page_info(zone, order, &info);
2390 index = __fragmentation_index(order, &info);
2391 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2392 }
2393
2394 seq_putc(m, '\n');
2395 }
2396
2397 /*
2398 * Display fragmentation index for orders that allocations would fail for
2399 */
extfrag_show(struct seq_file * m,void * arg)2400 static int extfrag_show(struct seq_file *m, void *arg)
2401 {
2402 pg_data_t *pgdat = (pg_data_t *)arg;
2403
2404 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2405
2406 return 0;
2407 }
2408
2409 static const struct seq_operations extfrag_sops = {
2410 .start = frag_start,
2411 .next = frag_next,
2412 .stop = frag_stop,
2413 .show = extfrag_show,
2414 };
2415
2416 DEFINE_SEQ_ATTRIBUTE(extfrag);
2417
extfrag_debug_init(void)2418 static int __init extfrag_debug_init(void)
2419 {
2420 struct dentry *extfrag_debug_root;
2421
2422 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2423
2424 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2425 &unusable_fops);
2426
2427 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2428 &extfrag_fops);
2429
2430 return 0;
2431 }
2432
2433 module_init(extfrag_debug_init);
2434
2435 #endif
2436