1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * access_tracking_perf_test
4 *
5 * Copyright (C) 2021, Google, Inc.
6 *
7 * This test measures the performance effects of KVM's access tracking.
8 * Access tracking is driven by the MMU notifiers test_young, clear_young, and
9 * clear_flush_young. These notifiers do not have a direct userspace API,
10 * however the clear_young notifier can be triggered either by
11 * 1. marking a pages as idle in /sys/kernel/mm/page_idle/bitmap OR
12 * 2. adding a new MGLRU generation using the lru_gen debugfs file.
13 * This test leverages page_idle to enable access tracking on guest memory
14 * unless MGLRU is enabled, in which case MGLRU is used.
15 *
16 * To measure performance this test runs a VM with a configurable number of
17 * vCPUs that each touch every page in disjoint regions of memory. Performance
18 * is measured in the time it takes all vCPUs to finish touching their
19 * predefined region.
20 *
21 * Note that a deterministic correctness test of access tracking is not possible
22 * by using page_idle or MGLRU aging as it exists today. This is for a few
23 * reasons:
24 *
25 * 1. page_idle and MGLRU only issue clear_young notifiers, which lack a TLB flush.
26 * This means subsequent guest accesses are not guaranteed to see page table
27 * updates made by KVM until some time in the future.
28 *
29 * 2. page_idle only operates on LRU pages. Newly allocated pages are not
30 * immediately allocated to LRU lists. Instead they are held in a "pagevec",
31 * which is drained to LRU lists some time in the future. There is no
32 * userspace API to force this drain to occur.
33 *
34 * These limitations are worked around in this test by using a large enough
35 * region of memory for each vCPU such that the number of translations cached in
36 * the TLB and the number of pages held in pagevecs are a small fraction of the
37 * overall workload. And if either of those conditions are not true (for example
38 * in nesting, where TLB size is unlimited) this test will print a warning
39 * rather than silently passing.
40 */
41 #include <inttypes.h>
42 #include <limits.h>
43 #include <pthread.h>
44 #include <sys/mman.h>
45 #include <sys/types.h>
46 #include <sys/stat.h>
47
48 #include "kvm_util.h"
49 #include "test_util.h"
50 #include "memstress.h"
51 #include "guest_modes.h"
52 #include "processor.h"
53 #include "ucall_common.h"
54
55 #include "cgroup_util.h"
56 #include "lru_gen_util.h"
57
58 static const char *TEST_MEMCG_NAME = "access_tracking_perf_test";
59
60 /* Global variable used to synchronize all of the vCPU threads. */
61 static int iteration;
62
63 /* The cgroup memory controller root. Needed for lru_gen-based aging. */
64 char cgroup_root[PATH_MAX];
65
66 /* Defines what vCPU threads should do during a given iteration. */
67 static enum {
68 /* Run the vCPU to access all its memory. */
69 ITERATION_ACCESS_MEMORY,
70 /* Mark the vCPU's memory idle in page_idle. */
71 ITERATION_MARK_IDLE,
72 } iteration_work;
73
74 /* The iteration that was last completed by each vCPU. */
75 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
76
77 /* Whether to overlap the regions of memory vCPUs access. */
78 static bool overlap_memory_access;
79
80 /*
81 * If the test should only warn if there are too many idle pages (i.e., it is
82 * expected).
83 * -1: Not yet set.
84 * 0: We do not expect too many idle pages, so FAIL if too many idle pages.
85 * 1: Having too many idle pages is expected, so merely print a warning if
86 * too many idle pages are found.
87 */
88 static int idle_pages_warn_only = -1;
89
90 /* Whether or not to use MGLRU instead of page_idle for access tracking */
91 static bool use_lru_gen;
92
93 /* Total number of pages to expect in the memcg after touching everything */
94 static long test_pages;
95
96 /* Last generation we found the pages in */
97 static int lru_gen_last_gen = -1;
98
99 struct test_params {
100 /* The backing source for the region of memory. */
101 enum vm_mem_backing_src_type backing_src;
102
103 /* The amount of memory to allocate for each vCPU. */
104 uint64_t vcpu_memory_bytes;
105
106 /* The number of vCPUs to create in the VM. */
107 int nr_vcpus;
108 };
109
pread_uint64(int fd,const char * filename,uint64_t index)110 static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
111 {
112 uint64_t value;
113 off_t offset = index * sizeof(value);
114
115 TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
116 "pread from %s offset 0x%" PRIx64 " failed!",
117 filename, offset);
118
119 return value;
120
121 }
122
123 #define PAGEMAP_PRESENT (1ULL << 63)
124 #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
125
lookup_pfn(int pagemap_fd,struct kvm_vm * vm,uint64_t gva)126 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
127 {
128 uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
129 uint64_t entry;
130 uint64_t pfn;
131
132 entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
133 if (!(entry & PAGEMAP_PRESENT))
134 return 0;
135
136 pfn = entry & PAGEMAP_PFN_MASK;
137 __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
138
139 return pfn;
140 }
141
is_page_idle(int page_idle_fd,uint64_t pfn)142 static bool is_page_idle(int page_idle_fd, uint64_t pfn)
143 {
144 uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
145
146 return !!((bits >> (pfn % 64)) & 1);
147 }
148
mark_page_idle(int page_idle_fd,uint64_t pfn)149 static void mark_page_idle(int page_idle_fd, uint64_t pfn)
150 {
151 uint64_t bits = 1ULL << (pfn % 64);
152
153 TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
154 "Set page_idle bits for PFN 0x%" PRIx64, pfn);
155 }
156
too_many_idle_pages(long idle_pages,long total_pages,int vcpu_idx)157 static void too_many_idle_pages(long idle_pages, long total_pages, int vcpu_idx)
158 {
159 char prefix[18] = {};
160
161 if (vcpu_idx >= 0)
162 snprintf(prefix, 18, "vCPU%d: ", vcpu_idx);
163
164 TEST_ASSERT(idle_pages_warn_only,
165 "%sToo many pages still idle (%lu out of %lu)",
166 prefix, idle_pages, total_pages);
167
168 printf("WARNING: %sToo many pages still idle (%lu out of %lu), "
169 "this will affect performance results.\n",
170 prefix, idle_pages, total_pages);
171 }
172
pageidle_mark_vcpu_memory_idle(struct kvm_vm * vm,struct memstress_vcpu_args * vcpu_args)173 static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm,
174 struct memstress_vcpu_args *vcpu_args)
175 {
176 int vcpu_idx = vcpu_args->vcpu_idx;
177 uint64_t base_gva = vcpu_args->gva;
178 uint64_t pages = vcpu_args->pages;
179 uint64_t page;
180 uint64_t still_idle = 0;
181 uint64_t no_pfn = 0;
182 int page_idle_fd;
183 int pagemap_fd;
184
185 /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
186 if (overlap_memory_access && vcpu_idx)
187 return;
188
189 page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
190 TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
191
192 pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
193 TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
194
195 for (page = 0; page < pages; page++) {
196 uint64_t gva = base_gva + page * memstress_args.guest_page_size;
197 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
198
199 if (!pfn) {
200 no_pfn++;
201 continue;
202 }
203
204 if (is_page_idle(page_idle_fd, pfn)) {
205 still_idle++;
206 continue;
207 }
208
209 mark_page_idle(page_idle_fd, pfn);
210 }
211
212 /*
213 * Assumption: Less than 1% of pages are going to be swapped out from
214 * under us during this test.
215 */
216 TEST_ASSERT(no_pfn < pages / 100,
217 "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
218 vcpu_idx, no_pfn, pages);
219
220 /*
221 * Check that at least 90% of memory has been marked idle (the rest
222 * might not be marked idle because the pages have not yet made it to an
223 * LRU list or the translations are still cached in the TLB). 90% is
224 * arbitrary; high enough that we ensure most memory access went through
225 * access tracking but low enough as to not make the test too brittle
226 * over time and across architectures.
227 */
228 if (still_idle >= pages / 10)
229 too_many_idle_pages(still_idle, pages,
230 overlap_memory_access ? -1 : vcpu_idx);
231
232 close(page_idle_fd);
233 close(pagemap_fd);
234 }
235
find_generation(struct memcg_stats * stats,long total_pages)236 int find_generation(struct memcg_stats *stats, long total_pages)
237 {
238 /*
239 * For finding the generation that contains our pages, use the same
240 * 90% threshold that page_idle uses.
241 */
242 int gen = lru_gen_find_generation(stats, total_pages * 9 / 10);
243
244 if (gen >= 0)
245 return gen;
246
247 if (!idle_pages_warn_only) {
248 TEST_FAIL("Could not find a generation with 90%% of guest memory (%ld pages).",
249 total_pages * 9 / 10);
250 return gen;
251 }
252
253 /*
254 * We couldn't find a generation with 90% of guest memory, which can
255 * happen if access tracking is unreliable. Simply look for a majority
256 * of pages.
257 */
258 puts("WARNING: Couldn't find a generation with 90% of guest memory. "
259 "Performance results may not be accurate.");
260 gen = lru_gen_find_generation(stats, total_pages / 2);
261 TEST_ASSERT(gen >= 0,
262 "Could not find a generation with 50%% of guest memory (%ld pages).",
263 total_pages / 2);
264 return gen;
265 }
266
lru_gen_mark_memory_idle(struct kvm_vm * vm)267 static void lru_gen_mark_memory_idle(struct kvm_vm *vm)
268 {
269 struct timespec ts_start;
270 struct timespec ts_elapsed;
271 struct memcg_stats stats;
272 int new_gen;
273
274 /* Make a new generation */
275 clock_gettime(CLOCK_MONOTONIC, &ts_start);
276 lru_gen_do_aging(&stats, TEST_MEMCG_NAME);
277 ts_elapsed = timespec_elapsed(ts_start);
278
279 /* Check the generation again */
280 new_gen = find_generation(&stats, test_pages);
281
282 /*
283 * This function should only be invoked with newly-accessed pages,
284 * so pages should always move to a newer generation.
285 */
286 if (new_gen <= lru_gen_last_gen) {
287 /* We did not move to a newer generation. */
288 long idle_pages = lru_gen_sum_memcg_stats_for_gen(lru_gen_last_gen,
289 &stats);
290
291 too_many_idle_pages(min_t(long, idle_pages, test_pages),
292 test_pages, -1);
293 }
294 pr_info("%-30s: %ld.%09lds\n",
295 "Mark memory idle (lru_gen)", ts_elapsed.tv_sec,
296 ts_elapsed.tv_nsec);
297 lru_gen_last_gen = new_gen;
298 }
299
assert_ucall(struct kvm_vcpu * vcpu,uint64_t expected_ucall)300 static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
301 {
302 struct ucall uc;
303 uint64_t actual_ucall = get_ucall(vcpu, &uc);
304
305 TEST_ASSERT(expected_ucall == actual_ucall,
306 "Guest exited unexpectedly (expected ucall %" PRIu64
307 ", got %" PRIu64 ")",
308 expected_ucall, actual_ucall);
309 }
310
spin_wait_for_next_iteration(int * current_iteration)311 static bool spin_wait_for_next_iteration(int *current_iteration)
312 {
313 int last_iteration = *current_iteration;
314
315 do {
316 if (READ_ONCE(memstress_args.stop_vcpus))
317 return false;
318
319 *current_iteration = READ_ONCE(iteration);
320 } while (last_iteration == *current_iteration);
321
322 return true;
323 }
324
vcpu_thread_main(struct memstress_vcpu_args * vcpu_args)325 static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
326 {
327 struct kvm_vcpu *vcpu = vcpu_args->vcpu;
328 struct kvm_vm *vm = memstress_args.vm;
329 int vcpu_idx = vcpu_args->vcpu_idx;
330 int current_iteration = 0;
331
332 while (spin_wait_for_next_iteration(¤t_iteration)) {
333 switch (READ_ONCE(iteration_work)) {
334 case ITERATION_ACCESS_MEMORY:
335 vcpu_run(vcpu);
336 assert_ucall(vcpu, UCALL_SYNC);
337 break;
338 case ITERATION_MARK_IDLE:
339 pageidle_mark_vcpu_memory_idle(vm, vcpu_args);
340 break;
341 }
342
343 vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
344 }
345 }
346
spin_wait_for_vcpu(int vcpu_idx,int target_iteration)347 static void spin_wait_for_vcpu(int vcpu_idx, int target_iteration)
348 {
349 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_idx]) !=
350 target_iteration) {
351 continue;
352 }
353 }
354
355 /* The type of memory accesses to perform in the VM. */
356 enum access_type {
357 ACCESS_READ,
358 ACCESS_WRITE,
359 };
360
run_iteration(struct kvm_vm * vm,int nr_vcpus,const char * description)361 static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *description)
362 {
363 struct timespec ts_start;
364 struct timespec ts_elapsed;
365 int next_iteration, i;
366
367 /* Kick off the vCPUs by incrementing iteration. */
368 next_iteration = ++iteration;
369
370 clock_gettime(CLOCK_MONOTONIC, &ts_start);
371
372 /* Wait for all vCPUs to finish the iteration. */
373 for (i = 0; i < nr_vcpus; i++)
374 spin_wait_for_vcpu(i, next_iteration);
375
376 ts_elapsed = timespec_elapsed(ts_start);
377 pr_info("%-30s: %ld.%09lds\n",
378 description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
379 }
380
access_memory(struct kvm_vm * vm,int nr_vcpus,enum access_type access,const char * description)381 static void access_memory(struct kvm_vm *vm, int nr_vcpus,
382 enum access_type access, const char *description)
383 {
384 memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
385 iteration_work = ITERATION_ACCESS_MEMORY;
386 run_iteration(vm, nr_vcpus, description);
387 }
388
mark_memory_idle(struct kvm_vm * vm,int nr_vcpus)389 static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus)
390 {
391 if (use_lru_gen)
392 return lru_gen_mark_memory_idle(vm);
393
394 /*
395 * Even though this parallelizes the work across vCPUs, this is still a
396 * very slow operation because page_idle forces the test to mark one pfn
397 * at a time and the clear_young notifier may serialize on the KVM MMU
398 * lock.
399 */
400 pr_debug("Marking VM memory idle (slow)...\n");
401 iteration_work = ITERATION_MARK_IDLE;
402 run_iteration(vm, nr_vcpus, "Mark memory idle (page_idle)");
403 }
404
run_test(enum vm_guest_mode mode,void * arg)405 static void run_test(enum vm_guest_mode mode, void *arg)
406 {
407 struct test_params *params = arg;
408 struct kvm_vm *vm;
409 int nr_vcpus = params->nr_vcpus;
410
411 vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
412 params->backing_src, !overlap_memory_access);
413
414 /*
415 * If guest_page_size is larger than the host's page size, the
416 * guest (memstress) will only fault in a subset of the host's pages.
417 */
418 test_pages = params->nr_vcpus * params->vcpu_memory_bytes /
419 max(memstress_args.guest_page_size,
420 (uint64_t)getpagesize());
421
422 memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
423
424 pr_info("\n");
425 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
426
427 if (use_lru_gen) {
428 struct memcg_stats stats;
429
430 /*
431 * Do a page table scan now. Following initial population, aging
432 * may not cause the pages to move to a newer generation. Do
433 * an aging pass now so that future aging passes always move
434 * pages to a newer generation.
435 */
436 printf("Initial aging pass (lru_gen)\n");
437 lru_gen_do_aging(&stats, TEST_MEMCG_NAME);
438 TEST_ASSERT(lru_gen_sum_memcg_stats(&stats) >= test_pages,
439 "Not all pages accounted for (looking for %ld). "
440 "Was the memcg set up correctly?", test_pages);
441 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Re-populating memory");
442 lru_gen_read_memcg_stats(&stats, TEST_MEMCG_NAME);
443 lru_gen_last_gen = find_generation(&stats, test_pages);
444 }
445
446 /* As a control, read and write to the populated memory first. */
447 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory");
448 access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from populated memory");
449
450 /* Repeat on memory that has been marked as idle. */
451 mark_memory_idle(vm, nr_vcpus);
452 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to idle memory");
453 mark_memory_idle(vm, nr_vcpus);
454 access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
455
456 memstress_join_vcpu_threads(nr_vcpus);
457 memstress_destroy_vm(vm);
458 }
459
access_tracking_unreliable(void)460 static int access_tracking_unreliable(void)
461 {
462 #ifdef __x86_64__
463 /*
464 * When running nested, the TLB size may be effectively unlimited (for
465 * example, this is the case when running on KVM L0), and KVM doesn't
466 * explicitly flush the TLB when aging SPTEs. As a result, more pages
467 * are cached and the guest won't see the "idle" bit cleared.
468 */
469 if (this_cpu_has(X86_FEATURE_HYPERVISOR)) {
470 puts("Skipping idle page count sanity check, because the test is run nested");
471 return 1;
472 }
473 #endif
474 /*
475 * When NUMA balancing is enabled, guest memory will be unmapped to get
476 * NUMA faults, dropping the Accessed bits.
477 */
478 if (is_numa_balancing_enabled()) {
479 puts("Skipping idle page count sanity check, because NUMA balancing is enabled");
480 return 1;
481 }
482 return 0;
483 }
484
run_test_for_each_guest_mode(const char * cgroup,void * arg)485 static int run_test_for_each_guest_mode(const char *cgroup, void *arg)
486 {
487 for_each_guest_mode(run_test, arg);
488 return 0;
489 }
490
help(char * name)491 static void help(char *name)
492 {
493 puts("");
494 printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n",
495 name);
496 puts("");
497 printf(" -h: Display this help message.");
498 guest_modes_help();
499 printf(" -b: specify the size of the memory region which should be\n"
500 " dirtied by each vCPU. e.g. 10M or 3G.\n"
501 " (default: 1G)\n");
502 printf(" -v: specify the number of vCPUs to run.\n");
503 printf(" -o: Overlap guest memory accesses instead of partitioning\n"
504 " them into a separate region of memory for each vCPU.\n");
505 printf(" -w: Control whether the test warns or fails if more than 10%%\n"
506 " of pages are still seen as idle/old after accessing guest\n"
507 " memory. >0 == warn only, 0 == fail, <0 == auto. For auto\n"
508 " mode, the test fails by default, but switches to warn only\n"
509 " if NUMA balancing is enabled or the test detects it's running\n"
510 " in a VM.\n");
511 backing_src_help("-s");
512 puts("");
513 exit(0);
514 }
515
destroy_cgroup(char * cg)516 void destroy_cgroup(char *cg)
517 {
518 printf("Destroying cgroup: %s\n", cg);
519 }
520
main(int argc,char * argv[])521 int main(int argc, char *argv[])
522 {
523 struct test_params params = {
524 .backing_src = DEFAULT_VM_MEM_SRC,
525 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
526 .nr_vcpus = 1,
527 };
528 char *new_cg = NULL;
529 int page_idle_fd;
530 int opt;
531
532 guest_modes_append_default();
533
534 while ((opt = getopt(argc, argv, "hm:b:v:os:w:")) != -1) {
535 switch (opt) {
536 case 'm':
537 guest_modes_cmdline(optarg);
538 break;
539 case 'b':
540 params.vcpu_memory_bytes = parse_size(optarg);
541 break;
542 case 'v':
543 params.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
544 break;
545 case 'o':
546 overlap_memory_access = true;
547 break;
548 case 's':
549 params.backing_src = parse_backing_src_type(optarg);
550 break;
551 case 'w':
552 idle_pages_warn_only =
553 atoi_non_negative("Idle pages warning",
554 optarg);
555 break;
556 case 'h':
557 default:
558 help(argv[0]);
559 break;
560 }
561 }
562
563 if (idle_pages_warn_only == -1)
564 idle_pages_warn_only = access_tracking_unreliable();
565
566 if (lru_gen_usable()) {
567 bool cg_created = true;
568 int ret;
569
570 puts("Using lru_gen for aging");
571 use_lru_gen = true;
572
573 if (cg_find_controller_root(cgroup_root, sizeof(cgroup_root), "memory"))
574 ksft_exit_skip("Cannot find memory cgroup controller\n");
575
576 new_cg = cg_name(cgroup_root, TEST_MEMCG_NAME);
577 printf("Creating cgroup: %s\n", new_cg);
578 if (cg_create(new_cg)) {
579 if (errno == EEXIST) {
580 printf("Found existing cgroup");
581 cg_created = false;
582 } else {
583 ksft_exit_skip("could not create new cgroup: %s\n", new_cg);
584 }
585 }
586
587 /*
588 * This will fork off a new process to run the test within
589 * a new memcg, so we need to properly propagate the return
590 * value up.
591 */
592 ret = cg_run(new_cg, &run_test_for_each_guest_mode, ¶ms);
593 if (cg_created)
594 cg_destroy(new_cg);
595 if (ret < 0)
596 TEST_FAIL("child did not spawn or was abnormally killed");
597 if (ret)
598 return ret;
599 } else {
600 page_idle_fd = __open_path_or_exit("/sys/kernel/mm/page_idle/bitmap", O_RDWR,
601 "Is CONFIG_IDLE_PAGE_TRACKING enabled?");
602 close(page_idle_fd);
603
604 puts("Using page_idle for aging");
605 run_test_for_each_guest_mode(NULL, ¶ms);
606 }
607
608 return 0;
609 }
610