1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4 * the linux kernel to help device drivers mirror a process address space in
5 * the device. This allows the device to use the same address space which
6 * makes communication and data exchange a lot easier.
7 *
8 * This framework's sole purpose is to exercise various code paths inside
9 * the kernel to make sure that HMM performs as expected and to flush out any
10 * bugs.
11 */
12
13 #include "../kselftest_harness.h"
14
15 #include <errno.h>
16 #include <fcntl.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <strings.h>
22 #include <time.h>
23 #include <pthread.h>
24 #include <hugetlbfs.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <sys/mman.h>
28 #include <sys/ioctl.h>
29
30 /*
31 * This is a private UAPI to the kernel test module so it isn't exported
32 * in the usual include/uapi/... directory.
33 */
34 #include "../../../../lib/test_hmm_uapi.h"
35
36 struct hmm_buffer {
37 void *ptr;
38 void *mirror;
39 unsigned long size;
40 int fd;
41 uint64_t cpages;
42 uint64_t faults;
43 };
44
45 #define TWOMEG (1 << 21)
46 #define HMM_BUFFER_SIZE (1024 << 12)
47 #define HMM_PATH_MAX 64
48 #define NTIMES 10
49
50 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
51
FIXTURE(hmm)52 FIXTURE(hmm)
53 {
54 int fd;
55 unsigned int page_size;
56 unsigned int page_shift;
57 };
58
FIXTURE(hmm2)59 FIXTURE(hmm2)
60 {
61 int fd0;
62 int fd1;
63 unsigned int page_size;
64 unsigned int page_shift;
65 };
66
hmm_open(int unit)67 static int hmm_open(int unit)
68 {
69 char pathname[HMM_PATH_MAX];
70 int fd;
71
72 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
73 fd = open(pathname, O_RDWR, 0);
74 if (fd < 0)
75 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
76 pathname);
77 return fd;
78 }
79
FIXTURE_SETUP(hmm)80 FIXTURE_SETUP(hmm)
81 {
82 self->page_size = sysconf(_SC_PAGE_SIZE);
83 self->page_shift = ffs(self->page_size) - 1;
84
85 self->fd = hmm_open(0);
86 ASSERT_GE(self->fd, 0);
87 }
88
FIXTURE_SETUP(hmm2)89 FIXTURE_SETUP(hmm2)
90 {
91 self->page_size = sysconf(_SC_PAGE_SIZE);
92 self->page_shift = ffs(self->page_size) - 1;
93
94 self->fd0 = hmm_open(0);
95 ASSERT_GE(self->fd0, 0);
96 self->fd1 = hmm_open(1);
97 ASSERT_GE(self->fd1, 0);
98 }
99
FIXTURE_TEARDOWN(hmm)100 FIXTURE_TEARDOWN(hmm)
101 {
102 int ret = close(self->fd);
103
104 ASSERT_EQ(ret, 0);
105 self->fd = -1;
106 }
107
FIXTURE_TEARDOWN(hmm2)108 FIXTURE_TEARDOWN(hmm2)
109 {
110 int ret = close(self->fd0);
111
112 ASSERT_EQ(ret, 0);
113 self->fd0 = -1;
114
115 ret = close(self->fd1);
116 ASSERT_EQ(ret, 0);
117 self->fd1 = -1;
118 }
119
hmm_dmirror_cmd(int fd,unsigned long request,struct hmm_buffer * buffer,unsigned long npages)120 static int hmm_dmirror_cmd(int fd,
121 unsigned long request,
122 struct hmm_buffer *buffer,
123 unsigned long npages)
124 {
125 struct hmm_dmirror_cmd cmd;
126 int ret;
127
128 /* Simulate a device reading system memory. */
129 cmd.addr = (__u64)buffer->ptr;
130 cmd.ptr = (__u64)buffer->mirror;
131 cmd.npages = npages;
132
133 for (;;) {
134 ret = ioctl(fd, request, &cmd);
135 if (ret == 0)
136 break;
137 if (errno == EINTR)
138 continue;
139 return -errno;
140 }
141 buffer->cpages = cmd.cpages;
142 buffer->faults = cmd.faults;
143
144 return 0;
145 }
146
hmm_buffer_free(struct hmm_buffer * buffer)147 static void hmm_buffer_free(struct hmm_buffer *buffer)
148 {
149 if (buffer == NULL)
150 return;
151
152 if (buffer->ptr)
153 munmap(buffer->ptr, buffer->size);
154 free(buffer->mirror);
155 free(buffer);
156 }
157
158 /*
159 * Create a temporary file that will be deleted on close.
160 */
hmm_create_file(unsigned long size)161 static int hmm_create_file(unsigned long size)
162 {
163 char path[HMM_PATH_MAX];
164 int fd;
165
166 strcpy(path, "/tmp");
167 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
168 if (fd >= 0) {
169 int r;
170
171 do {
172 r = ftruncate(fd, size);
173 } while (r == -1 && errno == EINTR);
174 if (!r)
175 return fd;
176 close(fd);
177 }
178 return -1;
179 }
180
181 /*
182 * Return a random unsigned number.
183 */
hmm_random(void)184 static unsigned int hmm_random(void)
185 {
186 static int fd = -1;
187 unsigned int r;
188
189 if (fd < 0) {
190 fd = open("/dev/urandom", O_RDONLY);
191 if (fd < 0) {
192 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
193 __FILE__, __LINE__);
194 return ~0U;
195 }
196 }
197 read(fd, &r, sizeof(r));
198 return r;
199 }
200
hmm_nanosleep(unsigned int n)201 static void hmm_nanosleep(unsigned int n)
202 {
203 struct timespec t;
204
205 t.tv_sec = 0;
206 t.tv_nsec = n;
207 nanosleep(&t, NULL);
208 }
209
210 /*
211 * Simple NULL test of device open/close.
212 */
TEST_F(hmm,open_close)213 TEST_F(hmm, open_close)
214 {
215 }
216
217 /*
218 * Read private anonymous memory.
219 */
TEST_F(hmm,anon_read)220 TEST_F(hmm, anon_read)
221 {
222 struct hmm_buffer *buffer;
223 unsigned long npages;
224 unsigned long size;
225 unsigned long i;
226 int *ptr;
227 int ret;
228 int val;
229
230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
231 ASSERT_NE(npages, 0);
232 size = npages << self->page_shift;
233
234 buffer = malloc(sizeof(*buffer));
235 ASSERT_NE(buffer, NULL);
236
237 buffer->fd = -1;
238 buffer->size = size;
239 buffer->mirror = malloc(size);
240 ASSERT_NE(buffer->mirror, NULL);
241
242 buffer->ptr = mmap(NULL, size,
243 PROT_READ | PROT_WRITE,
244 MAP_PRIVATE | MAP_ANONYMOUS,
245 buffer->fd, 0);
246 ASSERT_NE(buffer->ptr, MAP_FAILED);
247
248 /*
249 * Initialize buffer in system memory but leave the first two pages
250 * zero (pte_none and pfn_zero).
251 */
252 i = 2 * self->page_size / sizeof(*ptr);
253 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
254 ptr[i] = i;
255
256 /* Set buffer permission to read-only. */
257 ret = mprotect(buffer->ptr, size, PROT_READ);
258 ASSERT_EQ(ret, 0);
259
260 /* Populate the CPU page table with a special zero page. */
261 val = *(int *)(buffer->ptr + self->page_size);
262 ASSERT_EQ(val, 0);
263
264 /* Simulate a device reading system memory. */
265 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
266 ASSERT_EQ(ret, 0);
267 ASSERT_EQ(buffer->cpages, npages);
268 ASSERT_EQ(buffer->faults, 1);
269
270 /* Check what the device read. */
271 ptr = buffer->mirror;
272 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
273 ASSERT_EQ(ptr[i], 0);
274 for (; i < size / sizeof(*ptr); ++i)
275 ASSERT_EQ(ptr[i], i);
276
277 hmm_buffer_free(buffer);
278 }
279
280 /*
281 * Read private anonymous memory which has been protected with
282 * mprotect() PROT_NONE.
283 */
TEST_F(hmm,anon_read_prot)284 TEST_F(hmm, anon_read_prot)
285 {
286 struct hmm_buffer *buffer;
287 unsigned long npages;
288 unsigned long size;
289 unsigned long i;
290 int *ptr;
291 int ret;
292
293 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
294 ASSERT_NE(npages, 0);
295 size = npages << self->page_shift;
296
297 buffer = malloc(sizeof(*buffer));
298 ASSERT_NE(buffer, NULL);
299
300 buffer->fd = -1;
301 buffer->size = size;
302 buffer->mirror = malloc(size);
303 ASSERT_NE(buffer->mirror, NULL);
304
305 buffer->ptr = mmap(NULL, size,
306 PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS,
308 buffer->fd, 0);
309 ASSERT_NE(buffer->ptr, MAP_FAILED);
310
311 /* Initialize buffer in system memory. */
312 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
313 ptr[i] = i;
314
315 /* Initialize mirror buffer so we can verify it isn't written. */
316 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
317 ptr[i] = -i;
318
319 /* Protect buffer from reading. */
320 ret = mprotect(buffer->ptr, size, PROT_NONE);
321 ASSERT_EQ(ret, 0);
322
323 /* Simulate a device reading system memory. */
324 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
325 ASSERT_EQ(ret, -EFAULT);
326
327 /* Allow CPU to read the buffer so we can check it. */
328 ret = mprotect(buffer->ptr, size, PROT_READ);
329 ASSERT_EQ(ret, 0);
330 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
331 ASSERT_EQ(ptr[i], i);
332
333 /* Check what the device read. */
334 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
335 ASSERT_EQ(ptr[i], -i);
336
337 hmm_buffer_free(buffer);
338 }
339
340 /*
341 * Write private anonymous memory.
342 */
TEST_F(hmm,anon_write)343 TEST_F(hmm, anon_write)
344 {
345 struct hmm_buffer *buffer;
346 unsigned long npages;
347 unsigned long size;
348 unsigned long i;
349 int *ptr;
350 int ret;
351
352 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
353 ASSERT_NE(npages, 0);
354 size = npages << self->page_shift;
355
356 buffer = malloc(sizeof(*buffer));
357 ASSERT_NE(buffer, NULL);
358
359 buffer->fd = -1;
360 buffer->size = size;
361 buffer->mirror = malloc(size);
362 ASSERT_NE(buffer->mirror, NULL);
363
364 buffer->ptr = mmap(NULL, size,
365 PROT_READ | PROT_WRITE,
366 MAP_PRIVATE | MAP_ANONYMOUS,
367 buffer->fd, 0);
368 ASSERT_NE(buffer->ptr, MAP_FAILED);
369
370 /* Initialize data that the device will write to buffer->ptr. */
371 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
372 ptr[i] = i;
373
374 /* Simulate a device writing system memory. */
375 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
376 ASSERT_EQ(ret, 0);
377 ASSERT_EQ(buffer->cpages, npages);
378 ASSERT_EQ(buffer->faults, 1);
379
380 /* Check what the device wrote. */
381 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
382 ASSERT_EQ(ptr[i], i);
383
384 hmm_buffer_free(buffer);
385 }
386
387 /*
388 * Write private anonymous memory which has been protected with
389 * mprotect() PROT_READ.
390 */
TEST_F(hmm,anon_write_prot)391 TEST_F(hmm, anon_write_prot)
392 {
393 struct hmm_buffer *buffer;
394 unsigned long npages;
395 unsigned long size;
396 unsigned long i;
397 int *ptr;
398 int ret;
399
400 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
401 ASSERT_NE(npages, 0);
402 size = npages << self->page_shift;
403
404 buffer = malloc(sizeof(*buffer));
405 ASSERT_NE(buffer, NULL);
406
407 buffer->fd = -1;
408 buffer->size = size;
409 buffer->mirror = malloc(size);
410 ASSERT_NE(buffer->mirror, NULL);
411
412 buffer->ptr = mmap(NULL, size,
413 PROT_READ,
414 MAP_PRIVATE | MAP_ANONYMOUS,
415 buffer->fd, 0);
416 ASSERT_NE(buffer->ptr, MAP_FAILED);
417
418 /* Simulate a device reading a zero page of memory. */
419 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
420 ASSERT_EQ(ret, 0);
421 ASSERT_EQ(buffer->cpages, 1);
422 ASSERT_EQ(buffer->faults, 1);
423
424 /* Initialize data that the device will write to buffer->ptr. */
425 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
426 ptr[i] = i;
427
428 /* Simulate a device writing system memory. */
429 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
430 ASSERT_EQ(ret, -EPERM);
431
432 /* Check what the device wrote. */
433 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
434 ASSERT_EQ(ptr[i], 0);
435
436 /* Now allow writing and see that the zero page is replaced. */
437 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
438 ASSERT_EQ(ret, 0);
439
440 /* Simulate a device writing system memory. */
441 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
442 ASSERT_EQ(ret, 0);
443 ASSERT_EQ(buffer->cpages, npages);
444 ASSERT_EQ(buffer->faults, 1);
445
446 /* Check what the device wrote. */
447 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
448 ASSERT_EQ(ptr[i], i);
449
450 hmm_buffer_free(buffer);
451 }
452
453 /*
454 * Check that a device writing an anonymous private mapping
455 * will copy-on-write if a child process inherits the mapping.
456 */
TEST_F(hmm,anon_write_child)457 TEST_F(hmm, anon_write_child)
458 {
459 struct hmm_buffer *buffer;
460 unsigned long npages;
461 unsigned long size;
462 unsigned long i;
463 int *ptr;
464 pid_t pid;
465 int child_fd;
466 int ret;
467
468 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
469 ASSERT_NE(npages, 0);
470 size = npages << self->page_shift;
471
472 buffer = malloc(sizeof(*buffer));
473 ASSERT_NE(buffer, NULL);
474
475 buffer->fd = -1;
476 buffer->size = size;
477 buffer->mirror = malloc(size);
478 ASSERT_NE(buffer->mirror, NULL);
479
480 buffer->ptr = mmap(NULL, size,
481 PROT_READ | PROT_WRITE,
482 MAP_PRIVATE | MAP_ANONYMOUS,
483 buffer->fd, 0);
484 ASSERT_NE(buffer->ptr, MAP_FAILED);
485
486 /* Initialize buffer->ptr so we can tell if it is written. */
487 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
488 ptr[i] = i;
489
490 /* Initialize data that the device will write to buffer->ptr. */
491 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
492 ptr[i] = -i;
493
494 pid = fork();
495 if (pid == -1)
496 ASSERT_EQ(pid, 0);
497 if (pid != 0) {
498 waitpid(pid, &ret, 0);
499 ASSERT_EQ(WIFEXITED(ret), 1);
500
501 /* Check that the parent's buffer did not change. */
502 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
503 ASSERT_EQ(ptr[i], i);
504 return;
505 }
506
507 /* Check that we see the parent's values. */
508 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
509 ASSERT_EQ(ptr[i], i);
510 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
511 ASSERT_EQ(ptr[i], -i);
512
513 /* The child process needs its own mirror to its own mm. */
514 child_fd = hmm_open(0);
515 ASSERT_GE(child_fd, 0);
516
517 /* Simulate a device writing system memory. */
518 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
519 ASSERT_EQ(ret, 0);
520 ASSERT_EQ(buffer->cpages, npages);
521 ASSERT_EQ(buffer->faults, 1);
522
523 /* Check what the device wrote. */
524 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
525 ASSERT_EQ(ptr[i], -i);
526
527 close(child_fd);
528 exit(0);
529 }
530
531 /*
532 * Check that a device writing an anonymous shared mapping
533 * will not copy-on-write if a child process inherits the mapping.
534 */
TEST_F(hmm,anon_write_child_shared)535 TEST_F(hmm, anon_write_child_shared)
536 {
537 struct hmm_buffer *buffer;
538 unsigned long npages;
539 unsigned long size;
540 unsigned long i;
541 int *ptr;
542 pid_t pid;
543 int child_fd;
544 int ret;
545
546 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
547 ASSERT_NE(npages, 0);
548 size = npages << self->page_shift;
549
550 buffer = malloc(sizeof(*buffer));
551 ASSERT_NE(buffer, NULL);
552
553 buffer->fd = -1;
554 buffer->size = size;
555 buffer->mirror = malloc(size);
556 ASSERT_NE(buffer->mirror, NULL);
557
558 buffer->ptr = mmap(NULL, size,
559 PROT_READ | PROT_WRITE,
560 MAP_SHARED | MAP_ANONYMOUS,
561 buffer->fd, 0);
562 ASSERT_NE(buffer->ptr, MAP_FAILED);
563
564 /* Initialize buffer->ptr so we can tell if it is written. */
565 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
566 ptr[i] = i;
567
568 /* Initialize data that the device will write to buffer->ptr. */
569 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
570 ptr[i] = -i;
571
572 pid = fork();
573 if (pid == -1)
574 ASSERT_EQ(pid, 0);
575 if (pid != 0) {
576 waitpid(pid, &ret, 0);
577 ASSERT_EQ(WIFEXITED(ret), 1);
578
579 /* Check that the parent's buffer did change. */
580 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
581 ASSERT_EQ(ptr[i], -i);
582 return;
583 }
584
585 /* Check that we see the parent's values. */
586 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
587 ASSERT_EQ(ptr[i], i);
588 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
589 ASSERT_EQ(ptr[i], -i);
590
591 /* The child process needs its own mirror to its own mm. */
592 child_fd = hmm_open(0);
593 ASSERT_GE(child_fd, 0);
594
595 /* Simulate a device writing system memory. */
596 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
597 ASSERT_EQ(ret, 0);
598 ASSERT_EQ(buffer->cpages, npages);
599 ASSERT_EQ(buffer->faults, 1);
600
601 /* Check what the device wrote. */
602 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
603 ASSERT_EQ(ptr[i], -i);
604
605 close(child_fd);
606 exit(0);
607 }
608
609 /*
610 * Write private anonymous huge page.
611 */
TEST_F(hmm,anon_write_huge)612 TEST_F(hmm, anon_write_huge)
613 {
614 struct hmm_buffer *buffer;
615 unsigned long npages;
616 unsigned long size;
617 unsigned long i;
618 void *old_ptr;
619 void *map;
620 int *ptr;
621 int ret;
622
623 size = 2 * TWOMEG;
624
625 buffer = malloc(sizeof(*buffer));
626 ASSERT_NE(buffer, NULL);
627
628 buffer->fd = -1;
629 buffer->size = size;
630 buffer->mirror = malloc(size);
631 ASSERT_NE(buffer->mirror, NULL);
632
633 buffer->ptr = mmap(NULL, size,
634 PROT_READ | PROT_WRITE,
635 MAP_PRIVATE | MAP_ANONYMOUS,
636 buffer->fd, 0);
637 ASSERT_NE(buffer->ptr, MAP_FAILED);
638
639 size = TWOMEG;
640 npages = size >> self->page_shift;
641 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
642 ret = madvise(map, size, MADV_HUGEPAGE);
643 ASSERT_EQ(ret, 0);
644 old_ptr = buffer->ptr;
645 buffer->ptr = map;
646
647 /* Initialize data that the device will write to buffer->ptr. */
648 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
649 ptr[i] = i;
650
651 /* Simulate a device writing system memory. */
652 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
653 ASSERT_EQ(ret, 0);
654 ASSERT_EQ(buffer->cpages, npages);
655 ASSERT_EQ(buffer->faults, 1);
656
657 /* Check what the device wrote. */
658 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
659 ASSERT_EQ(ptr[i], i);
660
661 buffer->ptr = old_ptr;
662 hmm_buffer_free(buffer);
663 }
664
665 /*
666 * Write huge TLBFS page.
667 */
TEST_F(hmm,anon_write_hugetlbfs)668 TEST_F(hmm, anon_write_hugetlbfs)
669 {
670 struct hmm_buffer *buffer;
671 unsigned long npages;
672 unsigned long size;
673 unsigned long i;
674 int *ptr;
675 int ret;
676 long pagesizes[4];
677 int n, idx;
678
679 /* Skip test if we can't allocate a hugetlbfs page. */
680
681 n = gethugepagesizes(pagesizes, 4);
682 if (n <= 0)
683 SKIP(return, "Huge page size could not be determined");
684 for (idx = 0; --n > 0; ) {
685 if (pagesizes[n] < pagesizes[idx])
686 idx = n;
687 }
688 size = ALIGN(TWOMEG, pagesizes[idx]);
689 npages = size >> self->page_shift;
690
691 buffer = malloc(sizeof(*buffer));
692 ASSERT_NE(buffer, NULL);
693
694 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
695 if (buffer->ptr == NULL) {
696 free(buffer);
697 SKIP(return, "Huge page could not be allocated");
698 }
699
700 buffer->fd = -1;
701 buffer->size = size;
702 buffer->mirror = malloc(size);
703 ASSERT_NE(buffer->mirror, NULL);
704
705 /* Initialize data that the device will write to buffer->ptr. */
706 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
707 ptr[i] = i;
708
709 /* Simulate a device writing system memory. */
710 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
711 ASSERT_EQ(ret, 0);
712 ASSERT_EQ(buffer->cpages, npages);
713 ASSERT_EQ(buffer->faults, 1);
714
715 /* Check what the device wrote. */
716 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
717 ASSERT_EQ(ptr[i], i);
718
719 free_hugepage_region(buffer->ptr);
720 buffer->ptr = NULL;
721 hmm_buffer_free(buffer);
722 }
723
724 /*
725 * Read mmap'ed file memory.
726 */
TEST_F(hmm,file_read)727 TEST_F(hmm, file_read)
728 {
729 struct hmm_buffer *buffer;
730 unsigned long npages;
731 unsigned long size;
732 unsigned long i;
733 int *ptr;
734 int ret;
735 int fd;
736 ssize_t len;
737
738 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
739 ASSERT_NE(npages, 0);
740 size = npages << self->page_shift;
741
742 fd = hmm_create_file(size);
743 ASSERT_GE(fd, 0);
744
745 buffer = malloc(sizeof(*buffer));
746 ASSERT_NE(buffer, NULL);
747
748 buffer->fd = fd;
749 buffer->size = size;
750 buffer->mirror = malloc(size);
751 ASSERT_NE(buffer->mirror, NULL);
752
753 /* Write initial contents of the file. */
754 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
755 ptr[i] = i;
756 len = pwrite(fd, buffer->mirror, size, 0);
757 ASSERT_EQ(len, size);
758 memset(buffer->mirror, 0, size);
759
760 buffer->ptr = mmap(NULL, size,
761 PROT_READ,
762 MAP_SHARED,
763 buffer->fd, 0);
764 ASSERT_NE(buffer->ptr, MAP_FAILED);
765
766 /* Simulate a device reading system memory. */
767 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
768 ASSERT_EQ(ret, 0);
769 ASSERT_EQ(buffer->cpages, npages);
770 ASSERT_EQ(buffer->faults, 1);
771
772 /* Check what the device read. */
773 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
774 ASSERT_EQ(ptr[i], i);
775
776 hmm_buffer_free(buffer);
777 }
778
779 /*
780 * Write mmap'ed file memory.
781 */
TEST_F(hmm,file_write)782 TEST_F(hmm, file_write)
783 {
784 struct hmm_buffer *buffer;
785 unsigned long npages;
786 unsigned long size;
787 unsigned long i;
788 int *ptr;
789 int ret;
790 int fd;
791 ssize_t len;
792
793 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
794 ASSERT_NE(npages, 0);
795 size = npages << self->page_shift;
796
797 fd = hmm_create_file(size);
798 ASSERT_GE(fd, 0);
799
800 buffer = malloc(sizeof(*buffer));
801 ASSERT_NE(buffer, NULL);
802
803 buffer->fd = fd;
804 buffer->size = size;
805 buffer->mirror = malloc(size);
806 ASSERT_NE(buffer->mirror, NULL);
807
808 buffer->ptr = mmap(NULL, size,
809 PROT_READ | PROT_WRITE,
810 MAP_SHARED,
811 buffer->fd, 0);
812 ASSERT_NE(buffer->ptr, MAP_FAILED);
813
814 /* Initialize data that the device will write to buffer->ptr. */
815 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
816 ptr[i] = i;
817
818 /* Simulate a device writing system memory. */
819 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
820 ASSERT_EQ(ret, 0);
821 ASSERT_EQ(buffer->cpages, npages);
822 ASSERT_EQ(buffer->faults, 1);
823
824 /* Check what the device wrote. */
825 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
826 ASSERT_EQ(ptr[i], i);
827
828 /* Check that the device also wrote the file. */
829 len = pread(fd, buffer->mirror, size, 0);
830 ASSERT_EQ(len, size);
831 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
832 ASSERT_EQ(ptr[i], i);
833
834 hmm_buffer_free(buffer);
835 }
836
837 /*
838 * Migrate anonymous memory to device private memory.
839 */
TEST_F(hmm,migrate)840 TEST_F(hmm, migrate)
841 {
842 struct hmm_buffer *buffer;
843 unsigned long npages;
844 unsigned long size;
845 unsigned long i;
846 int *ptr;
847 int ret;
848
849 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
850 ASSERT_NE(npages, 0);
851 size = npages << self->page_shift;
852
853 buffer = malloc(sizeof(*buffer));
854 ASSERT_NE(buffer, NULL);
855
856 buffer->fd = -1;
857 buffer->size = size;
858 buffer->mirror = malloc(size);
859 ASSERT_NE(buffer->mirror, NULL);
860
861 buffer->ptr = mmap(NULL, size,
862 PROT_READ | PROT_WRITE,
863 MAP_PRIVATE | MAP_ANONYMOUS,
864 buffer->fd, 0);
865 ASSERT_NE(buffer->ptr, MAP_FAILED);
866
867 /* Initialize buffer in system memory. */
868 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
869 ptr[i] = i;
870
871 /* Migrate memory to device. */
872 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
873 ASSERT_EQ(ret, 0);
874 ASSERT_EQ(buffer->cpages, npages);
875
876 /* Check what the device read. */
877 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
878 ASSERT_EQ(ptr[i], i);
879
880 hmm_buffer_free(buffer);
881 }
882
883 /*
884 * Migrate anonymous memory to device private memory and fault some of it back
885 * to system memory, then try migrating the resulting mix of system and device
886 * private memory to the device.
887 */
TEST_F(hmm,migrate_fault)888 TEST_F(hmm, migrate_fault)
889 {
890 struct hmm_buffer *buffer;
891 unsigned long npages;
892 unsigned long size;
893 unsigned long i;
894 int *ptr;
895 int ret;
896
897 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
898 ASSERT_NE(npages, 0);
899 size = npages << self->page_shift;
900
901 buffer = malloc(sizeof(*buffer));
902 ASSERT_NE(buffer, NULL);
903
904 buffer->fd = -1;
905 buffer->size = size;
906 buffer->mirror = malloc(size);
907 ASSERT_NE(buffer->mirror, NULL);
908
909 buffer->ptr = mmap(NULL, size,
910 PROT_READ | PROT_WRITE,
911 MAP_PRIVATE | MAP_ANONYMOUS,
912 buffer->fd, 0);
913 ASSERT_NE(buffer->ptr, MAP_FAILED);
914
915 /* Initialize buffer in system memory. */
916 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
917 ptr[i] = i;
918
919 /* Migrate memory to device. */
920 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
921 ASSERT_EQ(ret, 0);
922 ASSERT_EQ(buffer->cpages, npages);
923
924 /* Check what the device read. */
925 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
926 ASSERT_EQ(ptr[i], i);
927
928 /* Fault half the pages back to system memory and check them. */
929 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
930 ASSERT_EQ(ptr[i], i);
931
932 /* Migrate memory to the device again. */
933 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
934 ASSERT_EQ(ret, 0);
935 ASSERT_EQ(buffer->cpages, npages);
936
937 /* Check what the device read. */
938 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
939 ASSERT_EQ(ptr[i], i);
940
941 hmm_buffer_free(buffer);
942 }
943
944 /*
945 * Migrate anonymous shared memory to device private memory.
946 */
TEST_F(hmm,migrate_shared)947 TEST_F(hmm, migrate_shared)
948 {
949 struct hmm_buffer *buffer;
950 unsigned long npages;
951 unsigned long size;
952 int ret;
953
954 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
955 ASSERT_NE(npages, 0);
956 size = npages << self->page_shift;
957
958 buffer = malloc(sizeof(*buffer));
959 ASSERT_NE(buffer, NULL);
960
961 buffer->fd = -1;
962 buffer->size = size;
963 buffer->mirror = malloc(size);
964 ASSERT_NE(buffer->mirror, NULL);
965
966 buffer->ptr = mmap(NULL, size,
967 PROT_READ | PROT_WRITE,
968 MAP_SHARED | MAP_ANONYMOUS,
969 buffer->fd, 0);
970 ASSERT_NE(buffer->ptr, MAP_FAILED);
971
972 /* Migrate memory to device. */
973 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
974 ASSERT_EQ(ret, -ENOENT);
975
976 hmm_buffer_free(buffer);
977 }
978
979 /*
980 * Try to migrate various memory types to device private memory.
981 */
TEST_F(hmm2,migrate_mixed)982 TEST_F(hmm2, migrate_mixed)
983 {
984 struct hmm_buffer *buffer;
985 unsigned long npages;
986 unsigned long size;
987 int *ptr;
988 unsigned char *p;
989 int ret;
990 int val;
991
992 npages = 6;
993 size = npages << self->page_shift;
994
995 buffer = malloc(sizeof(*buffer));
996 ASSERT_NE(buffer, NULL);
997
998 buffer->fd = -1;
999 buffer->size = size;
1000 buffer->mirror = malloc(size);
1001 ASSERT_NE(buffer->mirror, NULL);
1002
1003 /* Reserve a range of addresses. */
1004 buffer->ptr = mmap(NULL, size,
1005 PROT_NONE,
1006 MAP_PRIVATE | MAP_ANONYMOUS,
1007 buffer->fd, 0);
1008 ASSERT_NE(buffer->ptr, MAP_FAILED);
1009 p = buffer->ptr;
1010
1011 /* Migrating a protected area should be an error. */
1012 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
1013 ASSERT_EQ(ret, -EINVAL);
1014
1015 /* Punch a hole after the first page address. */
1016 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1017 ASSERT_EQ(ret, 0);
1018
1019 /* We expect an error if the vma doesn't cover the range. */
1020 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
1021 ASSERT_EQ(ret, -EINVAL);
1022
1023 /* Page 2 will be a read-only zero page. */
1024 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1025 PROT_READ);
1026 ASSERT_EQ(ret, 0);
1027 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1028 val = *ptr + 3;
1029 ASSERT_EQ(val, 3);
1030
1031 /* Page 3 will be read-only. */
1032 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1033 PROT_READ | PROT_WRITE);
1034 ASSERT_EQ(ret, 0);
1035 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1036 *ptr = val;
1037 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1038 PROT_READ);
1039 ASSERT_EQ(ret, 0);
1040
1041 /* Page 4-5 will be read-write. */
1042 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1043 PROT_READ | PROT_WRITE);
1044 ASSERT_EQ(ret, 0);
1045 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1046 *ptr = val;
1047 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1048 *ptr = val;
1049
1050 /* Now try to migrate pages 2-5 to device 1. */
1051 buffer->ptr = p + 2 * self->page_size;
1052 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
1053 ASSERT_EQ(ret, 0);
1054 ASSERT_EQ(buffer->cpages, 4);
1055
1056 /* Page 5 won't be migrated to device 0 because it's on device 1. */
1057 buffer->ptr = p + 5 * self->page_size;
1058 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1059 ASSERT_EQ(ret, -ENOENT);
1060 buffer->ptr = p;
1061
1062 buffer->ptr = p;
1063 hmm_buffer_free(buffer);
1064 }
1065
1066 /*
1067 * Migrate anonymous memory to device private memory and fault it back to system
1068 * memory multiple times.
1069 */
TEST_F(hmm,migrate_multiple)1070 TEST_F(hmm, migrate_multiple)
1071 {
1072 struct hmm_buffer *buffer;
1073 unsigned long npages;
1074 unsigned long size;
1075 unsigned long i;
1076 unsigned long c;
1077 int *ptr;
1078 int ret;
1079
1080 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1081 ASSERT_NE(npages, 0);
1082 size = npages << self->page_shift;
1083
1084 for (c = 0; c < NTIMES; c++) {
1085 buffer = malloc(sizeof(*buffer));
1086 ASSERT_NE(buffer, NULL);
1087
1088 buffer->fd = -1;
1089 buffer->size = size;
1090 buffer->mirror = malloc(size);
1091 ASSERT_NE(buffer->mirror, NULL);
1092
1093 buffer->ptr = mmap(NULL, size,
1094 PROT_READ | PROT_WRITE,
1095 MAP_PRIVATE | MAP_ANONYMOUS,
1096 buffer->fd, 0);
1097 ASSERT_NE(buffer->ptr, MAP_FAILED);
1098
1099 /* Initialize buffer in system memory. */
1100 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1101 ptr[i] = i;
1102
1103 /* Migrate memory to device. */
1104 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
1105 npages);
1106 ASSERT_EQ(ret, 0);
1107 ASSERT_EQ(buffer->cpages, npages);
1108
1109 /* Check what the device read. */
1110 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1111 ASSERT_EQ(ptr[i], i);
1112
1113 /* Fault pages back to system memory and check them. */
1114 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1115 ASSERT_EQ(ptr[i], i);
1116
1117 hmm_buffer_free(buffer);
1118 }
1119 }
1120
1121 /*
1122 * Read anonymous memory multiple times.
1123 */
TEST_F(hmm,anon_read_multiple)1124 TEST_F(hmm, anon_read_multiple)
1125 {
1126 struct hmm_buffer *buffer;
1127 unsigned long npages;
1128 unsigned long size;
1129 unsigned long i;
1130 unsigned long c;
1131 int *ptr;
1132 int ret;
1133
1134 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1135 ASSERT_NE(npages, 0);
1136 size = npages << self->page_shift;
1137
1138 for (c = 0; c < NTIMES; c++) {
1139 buffer = malloc(sizeof(*buffer));
1140 ASSERT_NE(buffer, NULL);
1141
1142 buffer->fd = -1;
1143 buffer->size = size;
1144 buffer->mirror = malloc(size);
1145 ASSERT_NE(buffer->mirror, NULL);
1146
1147 buffer->ptr = mmap(NULL, size,
1148 PROT_READ | PROT_WRITE,
1149 MAP_PRIVATE | MAP_ANONYMOUS,
1150 buffer->fd, 0);
1151 ASSERT_NE(buffer->ptr, MAP_FAILED);
1152
1153 /* Initialize buffer in system memory. */
1154 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1155 ptr[i] = i + c;
1156
1157 /* Simulate a device reading system memory. */
1158 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1159 npages);
1160 ASSERT_EQ(ret, 0);
1161 ASSERT_EQ(buffer->cpages, npages);
1162 ASSERT_EQ(buffer->faults, 1);
1163
1164 /* Check what the device read. */
1165 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1166 ASSERT_EQ(ptr[i], i + c);
1167
1168 hmm_buffer_free(buffer);
1169 }
1170 }
1171
unmap_buffer(void * p)1172 void *unmap_buffer(void *p)
1173 {
1174 struct hmm_buffer *buffer = p;
1175
1176 /* Delay for a bit and then unmap buffer while it is being read. */
1177 hmm_nanosleep(hmm_random() % 32000);
1178 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1179 buffer->ptr = NULL;
1180
1181 return NULL;
1182 }
1183
1184 /*
1185 * Try reading anonymous memory while it is being unmapped.
1186 */
TEST_F(hmm,anon_teardown)1187 TEST_F(hmm, anon_teardown)
1188 {
1189 unsigned long npages;
1190 unsigned long size;
1191 unsigned long c;
1192 void *ret;
1193
1194 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1195 ASSERT_NE(npages, 0);
1196 size = npages << self->page_shift;
1197
1198 for (c = 0; c < NTIMES; ++c) {
1199 pthread_t thread;
1200 struct hmm_buffer *buffer;
1201 unsigned long i;
1202 int *ptr;
1203 int rc;
1204
1205 buffer = malloc(sizeof(*buffer));
1206 ASSERT_NE(buffer, NULL);
1207
1208 buffer->fd = -1;
1209 buffer->size = size;
1210 buffer->mirror = malloc(size);
1211 ASSERT_NE(buffer->mirror, NULL);
1212
1213 buffer->ptr = mmap(NULL, size,
1214 PROT_READ | PROT_WRITE,
1215 MAP_PRIVATE | MAP_ANONYMOUS,
1216 buffer->fd, 0);
1217 ASSERT_NE(buffer->ptr, MAP_FAILED);
1218
1219 /* Initialize buffer in system memory. */
1220 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1221 ptr[i] = i + c;
1222
1223 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1224 ASSERT_EQ(rc, 0);
1225
1226 /* Simulate a device reading system memory. */
1227 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1228 npages);
1229 if (rc == 0) {
1230 ASSERT_EQ(buffer->cpages, npages);
1231 ASSERT_EQ(buffer->faults, 1);
1232
1233 /* Check what the device read. */
1234 for (i = 0, ptr = buffer->mirror;
1235 i < size / sizeof(*ptr);
1236 ++i)
1237 ASSERT_EQ(ptr[i], i + c);
1238 }
1239
1240 pthread_join(thread, &ret);
1241 hmm_buffer_free(buffer);
1242 }
1243 }
1244
1245 /*
1246 * Test memory snapshot without faulting in pages accessed by the device.
1247 */
TEST_F(hmm2,snapshot)1248 TEST_F(hmm2, snapshot)
1249 {
1250 struct hmm_buffer *buffer;
1251 unsigned long npages;
1252 unsigned long size;
1253 int *ptr;
1254 unsigned char *p;
1255 unsigned char *m;
1256 int ret;
1257 int val;
1258
1259 npages = 7;
1260 size = npages << self->page_shift;
1261
1262 buffer = malloc(sizeof(*buffer));
1263 ASSERT_NE(buffer, NULL);
1264
1265 buffer->fd = -1;
1266 buffer->size = size;
1267 buffer->mirror = malloc(npages);
1268 ASSERT_NE(buffer->mirror, NULL);
1269
1270 /* Reserve a range of addresses. */
1271 buffer->ptr = mmap(NULL, size,
1272 PROT_NONE,
1273 MAP_PRIVATE | MAP_ANONYMOUS,
1274 buffer->fd, 0);
1275 ASSERT_NE(buffer->ptr, MAP_FAILED);
1276 p = buffer->ptr;
1277
1278 /* Punch a hole after the first page address. */
1279 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1280 ASSERT_EQ(ret, 0);
1281
1282 /* Page 2 will be read-only zero page. */
1283 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1284 PROT_READ);
1285 ASSERT_EQ(ret, 0);
1286 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1287 val = *ptr + 3;
1288 ASSERT_EQ(val, 3);
1289
1290 /* Page 3 will be read-only. */
1291 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1292 PROT_READ | PROT_WRITE);
1293 ASSERT_EQ(ret, 0);
1294 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1295 *ptr = val;
1296 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1297 PROT_READ);
1298 ASSERT_EQ(ret, 0);
1299
1300 /* Page 4-6 will be read-write. */
1301 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1302 PROT_READ | PROT_WRITE);
1303 ASSERT_EQ(ret, 0);
1304 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1305 *ptr = val;
1306
1307 /* Page 5 will be migrated to device 0. */
1308 buffer->ptr = p + 5 * self->page_size;
1309 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1310 ASSERT_EQ(ret, 0);
1311 ASSERT_EQ(buffer->cpages, 1);
1312
1313 /* Page 6 will be migrated to device 1. */
1314 buffer->ptr = p + 6 * self->page_size;
1315 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
1316 ASSERT_EQ(ret, 0);
1317 ASSERT_EQ(buffer->cpages, 1);
1318
1319 /* Simulate a device snapshotting CPU pagetables. */
1320 buffer->ptr = p;
1321 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1322 ASSERT_EQ(ret, 0);
1323 ASSERT_EQ(buffer->cpages, npages);
1324
1325 /* Check what the device saw. */
1326 m = buffer->mirror;
1327 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1328 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1329 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1330 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1331 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1332 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1333 HMM_DMIRROR_PROT_WRITE);
1334 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1335
1336 hmm_buffer_free(buffer);
1337 }
1338
1339 /*
1340 * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1341 * should be mapped by a large page table entry.
1342 */
TEST_F(hmm,compound)1343 TEST_F(hmm, compound)
1344 {
1345 struct hmm_buffer *buffer;
1346 unsigned long npages;
1347 unsigned long size;
1348 int *ptr;
1349 unsigned char *m;
1350 int ret;
1351 long pagesizes[4];
1352 int n, idx;
1353 unsigned long i;
1354
1355 /* Skip test if we can't allocate a hugetlbfs page. */
1356
1357 n = gethugepagesizes(pagesizes, 4);
1358 if (n <= 0)
1359 return;
1360 for (idx = 0; --n > 0; ) {
1361 if (pagesizes[n] < pagesizes[idx])
1362 idx = n;
1363 }
1364 size = ALIGN(TWOMEG, pagesizes[idx]);
1365 npages = size >> self->page_shift;
1366
1367 buffer = malloc(sizeof(*buffer));
1368 ASSERT_NE(buffer, NULL);
1369
1370 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1371 if (buffer->ptr == NULL) {
1372 free(buffer);
1373 return;
1374 }
1375
1376 buffer->size = size;
1377 buffer->mirror = malloc(npages);
1378 ASSERT_NE(buffer->mirror, NULL);
1379
1380 /* Initialize the pages the device will snapshot in buffer->ptr. */
1381 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1382 ptr[i] = i;
1383
1384 /* Simulate a device snapshotting CPU pagetables. */
1385 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1386 ASSERT_EQ(ret, 0);
1387 ASSERT_EQ(buffer->cpages, npages);
1388
1389 /* Check what the device saw. */
1390 m = buffer->mirror;
1391 for (i = 0; i < npages; ++i)
1392 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1393 HMM_DMIRROR_PROT_PMD);
1394
1395 /* Make the region read-only. */
1396 ret = mprotect(buffer->ptr, size, PROT_READ);
1397 ASSERT_EQ(ret, 0);
1398
1399 /* Simulate a device snapshotting CPU pagetables. */
1400 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1401 ASSERT_EQ(ret, 0);
1402 ASSERT_EQ(buffer->cpages, npages);
1403
1404 /* Check what the device saw. */
1405 m = buffer->mirror;
1406 for (i = 0; i < npages; ++i)
1407 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1408 HMM_DMIRROR_PROT_PMD);
1409
1410 free_hugepage_region(buffer->ptr);
1411 buffer->ptr = NULL;
1412 hmm_buffer_free(buffer);
1413 }
1414
1415 /*
1416 * Test two devices reading the same memory (double mapped).
1417 */
TEST_F(hmm2,double_map)1418 TEST_F(hmm2, double_map)
1419 {
1420 struct hmm_buffer *buffer;
1421 unsigned long npages;
1422 unsigned long size;
1423 unsigned long i;
1424 int *ptr;
1425 int ret;
1426
1427 npages = 6;
1428 size = npages << self->page_shift;
1429
1430 buffer = malloc(sizeof(*buffer));
1431 ASSERT_NE(buffer, NULL);
1432
1433 buffer->fd = -1;
1434 buffer->size = size;
1435 buffer->mirror = malloc(npages);
1436 ASSERT_NE(buffer->mirror, NULL);
1437
1438 /* Reserve a range of addresses. */
1439 buffer->ptr = mmap(NULL, size,
1440 PROT_READ | PROT_WRITE,
1441 MAP_PRIVATE | MAP_ANONYMOUS,
1442 buffer->fd, 0);
1443 ASSERT_NE(buffer->ptr, MAP_FAILED);
1444
1445 /* Initialize buffer in system memory. */
1446 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1447 ptr[i] = i;
1448
1449 /* Make region read-only. */
1450 ret = mprotect(buffer->ptr, size, PROT_READ);
1451 ASSERT_EQ(ret, 0);
1452
1453 /* Simulate device 0 reading system memory. */
1454 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1455 ASSERT_EQ(ret, 0);
1456 ASSERT_EQ(buffer->cpages, npages);
1457 ASSERT_EQ(buffer->faults, 1);
1458
1459 /* Check what the device read. */
1460 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1461 ASSERT_EQ(ptr[i], i);
1462
1463 /* Simulate device 1 reading system memory. */
1464 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1465 ASSERT_EQ(ret, 0);
1466 ASSERT_EQ(buffer->cpages, npages);
1467 ASSERT_EQ(buffer->faults, 1);
1468
1469 /* Check what the device read. */
1470 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1471 ASSERT_EQ(ptr[i], i);
1472
1473 /* Punch a hole after the first page address. */
1474 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1475 ASSERT_EQ(ret, 0);
1476
1477 hmm_buffer_free(buffer);
1478 }
1479
1480 TEST_HARNESS_MAIN
1481