1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3# Please run as root
4
5# Kselftest framework requirement - SKIP code is 4.
6ksft_skip=4
7
8count_total=0
9count_pass=0
10count_fail=0
11count_skip=0
12exitcode=0
13
14usage() {
15	cat <<EOF
16usage: ${BASH_SOURCE[0]:-$0} [ options ]
17
18  -a: run all tests, including extra ones (other than destructive ones)
19  -t: specify specific categories to tests to run
20  -h: display this message
21  -n: disable TAP output
22  -d: run destructive tests
23
24The default behavior is to run required tests only.  If -a is specified,
25will run all tests.
26
27Alternatively, specific groups tests can be run by passing a string
28to the -t argument containing one or more of the following categories
29separated by spaces:
30- mmap
31	tests for mmap(2)
32- gup_test
33	tests for gup
34- userfaultfd
35	tests for  userfaultfd(2)
36- compaction
37	a test for the patch "Allow compaction of unevictable pages"
38- mlock
39	tests for mlock(2)
40- mremap
41	tests for mremap(2)
42- hugevm
43	tests for very large virtual address space
44- vmalloc
45	vmalloc smoke tests
46- hmm
47	hmm smoke tests
48- madv_guard
49	test madvise(2) MADV_GUARD_INSTALL and MADV_GUARD_REMOVE options
50- madv_populate
51	test memadvise(2) MADV_POPULATE_{READ,WRITE} options
52- memfd_secret
53	test memfd_secret(2)
54- process_mrelease
55	test process_mrelease(2)
56- ksm
57	ksm tests that do not require >=2 NUMA nodes
58- ksm_numa
59	ksm tests that require >=2 NUMA nodes
60- pkey
61	memory protection key tests
62- soft_dirty
63	test soft dirty page bit semantics
64- pagemap
65	test pagemap_scan IOCTL
66- cow
67	test copy-on-write semantics
68- thp
69	test transparent huge pages
70- hugetlb
71	test hugetlbfs huge pages
72- migration
73	invoke move_pages(2) to exercise the migration entry code
74	paths in the kernel
75- mkdirty
76	test handling of code that might set PTE/PMD dirty in
77	read-only VMAs
78- mdwe
79	test prctl(PR_SET_MDWE, ...)
80- page_frag
81	test handling of page fragment allocation and freeing
82
83example: ./run_vmtests.sh -t "hmm mmap ksm"
84EOF
85	exit 0
86}
87
88RUN_ALL=false
89RUN_DESTRUCTIVE=false
90TAP_PREFIX="# "
91
92while getopts "aht:n" OPT; do
93	case ${OPT} in
94		"a") RUN_ALL=true ;;
95		"h") usage ;;
96		"t") VM_SELFTEST_ITEMS=${OPTARG} ;;
97		"n") TAP_PREFIX= ;;
98		"d") RUN_DESTRUCTIVE=true ;;
99	esac
100done
101shift $((OPTIND -1))
102
103# default behavior: run all tests
104VM_SELFTEST_ITEMS=${VM_SELFTEST_ITEMS:-default}
105
106test_selected() {
107	if [ "$VM_SELFTEST_ITEMS" == "default" ]; then
108		# If no VM_SELFTEST_ITEMS are specified, run all tests
109		return 0
110	fi
111	# If test selected argument is one of the test items
112	if [[ " ${VM_SELFTEST_ITEMS[*]} " =~ " ${1} " ]]; then
113	        return 0
114	else
115	        return 1
116	fi
117}
118
119run_gup_matrix() {
120    # -t: thp=on, -T: thp=off, -H: hugetlb=on
121    local hugetlb_mb=$(( needmem_KB / 1024 ))
122
123    for huge in -t -T "-H -m $hugetlb_mb"; do
124        # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm
125        for test_cmd in -u -U -a -b -L; do
126            # -w: write=1, -W: write=0
127            for write in -w -W; do
128                # -S: shared
129                for share in -S " "; do
130                    # -n: How many pages to fetch together?  512 is special
131                    # because it's default thp size (or 2M on x86), 123 to
132                    # just test partial gup when hit a huge in whatever form
133                    for num in "-n 1" "-n 512" "-n 123"; do
134                        CATEGORY="gup_test" run_test ./gup_test \
135                                $huge $test_cmd $write $share $num
136                    done
137                done
138            done
139        done
140    done
141}
142
143# get huge pagesize and freepages from /proc/meminfo
144while read -r name size unit; do
145	if [ "$name" = "HugePages_Free:" ]; then
146		freepgs="$size"
147	fi
148	if [ "$name" = "Hugepagesize:" ]; then
149		hpgsize_KB="$size"
150	fi
151done < /proc/meminfo
152
153# Simple hugetlbfs tests have a hardcoded minimum requirement of
154# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
155# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
156# both of these requirements into account and attempt to increase
157# number of huge pages available.
158nr_cpus=$(nproc)
159uffd_min_KB=$((hpgsize_KB * nr_cpus * 2))
160hugetlb_min_KB=$((256 * 1024))
161if [[ $uffd_min_KB -gt $hugetlb_min_KB ]]; then
162	needmem_KB=$uffd_min_KB
163else
164	needmem_KB=$hugetlb_min_KB
165fi
166
167# set proper nr_hugepages
168if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
169	nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
170	needpgs=$((needmem_KB / hpgsize_KB))
171	tries=2
172	while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
173		lackpgs=$((needpgs - freepgs))
174		echo 3 > /proc/sys/vm/drop_caches
175		if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
176			echo "Please run this test as root"
177			exit $ksft_skip
178		fi
179		while read -r name size unit; do
180			if [ "$name" = "HugePages_Free:" ]; then
181				freepgs=$size
182			fi
183		done < /proc/meminfo
184		tries=$((tries - 1))
185	done
186	if [ "$freepgs" -lt "$needpgs" ]; then
187		printf "Not enough huge pages available (%d < %d)\n" \
188		       "$freepgs" "$needpgs"
189	fi
190	HAVE_HUGEPAGES=1
191else
192	echo "no hugetlbfs support in kernel?"
193	HAVE_HUGEPAGES=0
194fi
195
196# filter 64bit architectures
197ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
198if [ -z "$ARCH" ]; then
199	ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
200fi
201VADDR64=0
202echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
203
204tap_prefix() {
205	sed -e "s/^/${TAP_PREFIX}/"
206}
207
208tap_output() {
209	if [[ ! -z "$TAP_PREFIX" ]]; then
210		read str
211		echo $str
212	fi
213}
214
215pretty_name() {
216	echo "$*" | sed -e 's/^\(bash \)\?\.\///'
217}
218
219# Usage: run_test [test binary] [arbitrary test arguments...]
220run_test() {
221	if test_selected ${CATEGORY}; then
222		local skip=0
223
224		# On memory constrainted systems some tests can fail to allocate hugepages.
225		# perform some cleanup before the test for a higher success rate.
226		if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then
227			if [ "${HAVE_HUGEPAGES}" = "1" ]; then
228				echo 3 > /proc/sys/vm/drop_caches
229				sleep 2
230				echo 1 > /proc/sys/vm/compact_memory
231				sleep 2
232			else
233				echo "hugepages not supported" | tap_prefix
234				skip=1
235			fi
236		fi
237
238		local test=$(pretty_name "$*")
239		local title="running $*"
240		local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
241		printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
242
243		if [ "${skip}" != "1" ]; then
244			("$@" 2>&1) | tap_prefix
245			local ret=${PIPESTATUS[0]}
246		else
247			local ret=$ksft_skip
248		fi
249		count_total=$(( count_total + 1 ))
250		if [ $ret -eq 0 ]; then
251			count_pass=$(( count_pass + 1 ))
252			echo "[PASS]" | tap_prefix
253			echo "ok ${count_total} ${test}" | tap_output
254		elif [ $ret -eq $ksft_skip ]; then
255			count_skip=$(( count_skip + 1 ))
256			echo "[SKIP]" | tap_prefix
257			echo "ok ${count_total} ${test} # SKIP" | tap_output
258			exitcode=$ksft_skip
259		else
260			count_fail=$(( count_fail + 1 ))
261			echo "[FAIL]" | tap_prefix
262			echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
263			exitcode=1
264		fi
265	fi # test_selected
266}
267
268echo "TAP version 13" | tap_output
269
270CATEGORY="hugetlb" run_test ./hugepage-mmap
271
272shmmax=$(cat /proc/sys/kernel/shmmax)
273shmall=$(cat /proc/sys/kernel/shmall)
274echo 268435456 > /proc/sys/kernel/shmmax
275echo 4194304 > /proc/sys/kernel/shmall
276CATEGORY="hugetlb" run_test ./hugepage-shm
277echo "$shmmax" > /proc/sys/kernel/shmmax
278echo "$shmall" > /proc/sys/kernel/shmall
279
280CATEGORY="hugetlb" run_test ./map_hugetlb
281CATEGORY="hugetlb" run_test ./hugepage-mremap
282CATEGORY="hugetlb" run_test ./hugepage-vmemmap
283CATEGORY="hugetlb" run_test ./hugetlb-madvise
284CATEGORY="hugetlb" run_test ./hugetlb_dio
285
286if [ "${HAVE_HUGEPAGES}" = "1" ]; then
287	nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
288	# For this test, we need one and just one huge page
289	echo 1 > /proc/sys/vm/nr_hugepages
290	CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
291	CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
292	# Restore the previous number of huge pages, since further tests rely on it
293	echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
294fi
295
296if test_selected "hugetlb"; then
297	echo "NOTE: These hugetlb tests provide minimal coverage.  Use"	  | tap_prefix
298	echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
299	echo "      hugetlb regression testing."			  | tap_prefix
300fi
301
302CATEGORY="mmap" run_test ./map_fixed_noreplace
303
304if $RUN_ALL; then
305    run_gup_matrix
306else
307    # get_user_pages_fast() benchmark
308    CATEGORY="gup_test" run_test ./gup_test -u
309    # pin_user_pages_fast() benchmark
310    CATEGORY="gup_test" run_test ./gup_test -a
311fi
312# Dump pages 0, 19, and 4096, using pin_user_pages:
313CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
314CATEGORY="gup_test" run_test ./gup_longterm
315
316CATEGORY="userfaultfd" run_test ./uffd-unit-tests
317uffd_stress_bin=./uffd-stress
318CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
319# Hugetlb tests require source and destination huge pages. Pass in half
320# the size of the free pages we have, which is used for *each*.
321# uffd-stress expects a region expressed in MiB, so we adjust
322# half_ufd_size_MB accordingly.
323half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2))
324CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
325CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
326CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
327CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
328# uffd-wp-mremap requires at least one page of each size.
329have_all_size_hugepgs=true
330declare -A nr_size_hugepgs
331for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do
332	old=$(cat $f)
333	nr_size_hugepgs["$f"]="$old"
334	if [ "$old" == 0 ]; then
335		echo 1 > "$f"
336	fi
337	if [ $(cat "$f") == 0 ]; then
338		have_all_size_hugepgs=false
339		break
340	fi
341done
342if $have_all_size_hugepgs; then
343	CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
344else
345	echo "# SKIP ./uffd-wp-mremap"
346fi
347
348#cleanup
349for f in "${!nr_size_hugepgs[@]}"; do
350	echo "${nr_size_hugepgs["$f"]}" > "$f"
351done
352echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
353
354CATEGORY="compaction" run_test ./compaction_test
355
356if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null;
357then
358	CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
359else
360	echo "# SKIP ./on-fault-limit"
361fi
362
363CATEGORY="mmap" run_test ./map_populate
364
365CATEGORY="mlock" run_test ./mlock-random-test
366
367CATEGORY="mlock" run_test ./mlock2-tests
368
369CATEGORY="process_mrelease" run_test ./mrelease_test
370
371CATEGORY="mremap" run_test ./mremap_test
372
373CATEGORY="hugetlb" run_test ./thuge-gen
374CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2
375CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2
376if $RUN_DESTRUCTIVE; then
377nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
378enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline)
379echo 8 > /proc/sys/vm/nr_hugepages
380CATEGORY="hugetlb" run_test ./hugetlb-soft-offline
381echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
382echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline
383CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison
384fi
385
386if [ $VADDR64 -ne 0 ]; then
387
388	# set overcommit_policy as OVERCOMMIT_ALWAYS so that kernel
389	# allows high virtual address allocation requests independent
390	# of platform's physical memory.
391
392	if [ -x ./virtual_address_range ]; then
393		prev_policy=$(cat /proc/sys/vm/overcommit_memory)
394		echo 1 > /proc/sys/vm/overcommit_memory
395		CATEGORY="hugevm" run_test ./virtual_address_range
396		echo $prev_policy > /proc/sys/vm/overcommit_memory
397	fi
398
399	# va high address boundary switch test
400	ARCH_ARM64="arm64"
401	prev_nr_hugepages=$(cat /proc/sys/vm/nr_hugepages)
402	if [ "$ARCH" == "$ARCH_ARM64" ]; then
403		echo 6 > /proc/sys/vm/nr_hugepages
404	fi
405	CATEGORY="hugevm" run_test bash ./va_high_addr_switch.sh
406	if [ "$ARCH" == "$ARCH_ARM64" ]; then
407		echo $prev_nr_hugepages > /proc/sys/vm/nr_hugepages
408	fi
409fi # VADDR64
410
411# vmalloc stability smoke test
412CATEGORY="vmalloc" run_test bash ./test_vmalloc.sh smoke
413
414CATEGORY="mremap" run_test ./mremap_dontunmap
415
416CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
417
418# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests
419CATEGORY="madv_guard" run_test ./guard-regions
420
421# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
422CATEGORY="madv_populate" run_test ./madv_populate
423
424if [ -x ./memfd_secret ]
425then
426(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
427CATEGORY="memfd_secret" run_test ./memfd_secret
428fi
429
430# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
431if [ "${HAVE_HUGEPAGES}" = "1" ]; then
432	CATEGORY="ksm" run_test ./ksm_tests -H -s 100
433fi
434# KSM KSM_MERGE_TIME test with size of 100
435CATEGORY="ksm" run_test ./ksm_tests -P -s 100
436# KSM MADV_MERGEABLE test with 10 identical pages
437CATEGORY="ksm" run_test ./ksm_tests -M -p 10
438# KSM unmerge test
439CATEGORY="ksm" run_test ./ksm_tests -U
440# KSM test with 10 zero pages and use_zero_pages = 0
441CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
442# KSM test with 10 zero pages and use_zero_pages = 1
443CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
444# KSM test with 2 NUMA nodes and merge_across_nodes = 1
445CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
446# KSM test with 2 NUMA nodes and merge_across_nodes = 0
447CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
448
449CATEGORY="ksm" run_test ./ksm_functional_tests
450
451# protection_keys tests
452nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
453if [ -x ./protection_keys_32 ]
454then
455	CATEGORY="pkey" run_test ./protection_keys_32
456fi
457
458if [ -x ./protection_keys_64 ]
459then
460	CATEGORY="pkey" run_test ./protection_keys_64
461fi
462echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
463
464if [ -x ./soft-dirty ]
465then
466	CATEGORY="soft_dirty" run_test ./soft-dirty
467fi
468
469CATEGORY="pagemap" run_test ./pagemap_ioctl
470
471# COW tests
472CATEGORY="cow" run_test ./cow
473
474CATEGORY="thp" run_test ./khugepaged
475
476CATEGORY="thp" run_test ./khugepaged -s 2
477
478CATEGORY="thp" run_test ./transhuge-stress -d 20
479
480# Try to create XFS if not provided
481if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
482    if [ "${HAVE_HUGEPAGES}" = "1" ]; then
483	if test_selected "thp"; then
484	    if grep xfs /proc/filesystems &>/dev/null; then
485		XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
486		SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
487		truncate -s 314572800 ${XFS_IMG}
488		mkfs.xfs -q ${XFS_IMG}
489		mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
490		MOUNTED_XFS=1
491	    fi
492	fi
493    fi
494fi
495
496CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
497
498if [ -n "${MOUNTED_XFS}" ]; then
499    umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
500    rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
501    rm -f ${XFS_IMG}
502fi
503
504CATEGORY="migration" run_test ./migration
505
506CATEGORY="mkdirty" run_test ./mkdirty
507
508CATEGORY="mdwe" run_test ./mdwe_test
509
510CATEGORY="page_frag" run_test ./test_page_frag.sh smoke
511
512CATEGORY="page_frag" run_test ./test_page_frag.sh aligned
513
514CATEGORY="page_frag" run_test ./test_page_frag.sh nonaligned
515
516echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
517echo "1..${count_total}" | tap_output
518
519exit $exitcode
520