xref: /src/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib (revision 8a62a2a5659d1839d8799b4274c04469d7f17c78)
1# SPDX-License-Identifier: CDDL-1.0
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or https://opensource.org/licenses/CDDL-1.0.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
25# Copyright (c) 2012, 2020, Delphix. All rights reserved.
26# Copyright (c) 2017, Tim Chase. All rights reserved.
27# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
28# Copyright (c) 2017, Lawrence Livermore National Security LLC.
29# Copyright (c) 2017, Datto Inc. All rights reserved.
30# Copyright (c) 2017, Open-E Inc. All rights reserved.
31# Copyright (c) 2021, The FreeBSD Foundation.
32# Copyright (c) 2025, Klara, Inc.
33# Copyright (c) 2026, TrueNAS.
34# Use is subject to license terms.
35#
36
37. ${STF_SUITE}/include/tunables.cfg
38
39. ${STF_TOOLS}/include/logapi.shlib
40. ${STF_SUITE}/include/math.shlib
41. ${STF_SUITE}/include/blkdev.shlib
42
43
44# On AlmaLinux 9 we will see $PWD = '.' instead of the full path.  This causes
45# some tests to fail.  Fix it up here.
46if [ "$PWD" = "." ] ; then
47	PWD="$(readlink -f $PWD)"
48fi
49
50#
51# Apply constrained path when available.  This is required since the
52# PATH may have been modified by sudo's secure_path behavior.
53#
54if [ -n "$STF_PATH" ]; then
55	export PATH="$STF_PATH"
56fi
57
58#
59# Generic dot version comparison function
60#
61# Returns success when version $1 is greater than or equal to $2.
62#
63function compare_version_gte
64{
65	[ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
66}
67
68# Helper function used by linux_version() and freebsd_version()
69# $1, if provided, should be a MAJOR, MAJOR.MINOR or MAJOR.MINOR.PATCH
70# version number
71function kernel_version
72{
73	typeset ver="$1"
74
75	[ -z "$ver" ] && case "$UNAME" in
76	Linux)
77		# Linux version numbers are X.Y.Z followed by optional
78		# vendor/distro specific stuff
79		#   RHEL7:       3.10.0-1160.108.1.el7.x86_64
80		#   Fedora 37:   6.5.12-100.fc37.x86_64
81		#   Debian 12.6: 6.1.0-22-amd64
82		ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
83		;;
84	FreeBSD)
85		# FreeBSD version numbers are X.Y-BRANCH-pZ. Depending on
86		# branch, -pZ may not be present, but this is typically only
87		# on pre-release or true .0 releases, so can be assumed 0
88		# if not present.
89		# eg:
90		#   13.2-RELEASE-p4
91		#   14.1-RELEASE
92		#   15.0-CURRENT
93		ver=$(uname -r | \
94		    grep -Eo "[0-9]+\.[0-9]+(-[A-Z0-9]+-p[0-9]+)?" | \
95		    sed -E "s/-[^-]+-p/./")
96		;;
97	*)
98		# Unknown system
99		log_fail "Don't know how to get kernel version for '$UNAME'"
100		;;
101	esac
102
103	typeset version major minor _
104	IFS='.' read -r version major minor _ <<<"$ver"
105
106	[ -z "$version" ] && version=0
107	[ -z "$major" ] && major=0
108	[ -z "$minor" ] && minor=0
109
110	echo $((version * 100000 + major * 1000 + minor))
111}
112
113# Linux kernel version comparison function
114#
115# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
116#
117# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
118function linux_version {
119	kernel_version "$1"
120}
121
122# FreeBSD version comparison function
123#
124# $1 FreeBSD version ("13.2", "14.0") or blank for installed FreeBSD version
125#
126# Used for comparison: if [ $(freebsd_version) -ge $(freebsd_version "13.2") ]
127function freebsd_version {
128	kernel_version "$1"
129}
130
131# Determine if this is a Linux test system
132#
133# Return 0 if platform Linux, 1 if otherwise
134
135function is_linux
136{
137	[ "$UNAME" = "Linux" ]
138}
139
140# Determine if this is an illumos test system
141#
142# Return 0 if platform illumos, 1 if otherwise
143function is_illumos
144{
145	[ "$UNAME" = "illumos" ]
146}
147
148# Determine if this is a FreeBSD test system
149#
150# Return 0 if platform FreeBSD, 1 if otherwise
151
152function is_freebsd
153{
154	[ "$UNAME" = "FreeBSD" ]
155}
156
157# Determine if this is a 32-bit system
158#
159# Return 0 if platform is 32-bit, 1 if otherwise
160
161function is_32bit
162{
163	[ $(getconf LONG_BIT) = "32" ]
164}
165
166# Determine if kmemleak is enabled
167#
168# Return 0 if kmemleak is enabled, 1 if otherwise
169
170function is_kmemleak
171{
172	is_linux && [ -e /sys/kernel/debug/kmemleak ]
173}
174
175# Determine whether a dataset is mounted
176#
177# $1 dataset name
178# $2 filesystem type; optional - defaulted to zfs
179#
180# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
181
182function ismounted
183{
184	typeset fstype=$2
185	[[ -z $fstype ]] && fstype=zfs
186	typeset out dir name
187
188	case $fstype in
189		zfs)
190			if [[ "$1" == "/"* ]] ; then
191				! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
192			else
193				! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
194			fi
195		;;
196		ufs|nfs)
197			if is_freebsd; then
198				mount -pt $fstype | while read dev dir _t _flags; do
199					[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
200				done
201			else
202				out=$(df -F $fstype $1 2>/dev/null) || return
203
204				dir=${out%%\(*}
205				dir=${dir%% *}
206				name=${out##*\(}
207				name=${name%%\)*}
208				name=${name%% *}
209
210				[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
211			fi
212		;;
213		ext*)
214			df -t $fstype $1 > /dev/null 2>&1
215		;;
216		zvol)
217			if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
218				link=$(readlink -f $ZVOL_DEVDIR/$1)
219				[[ -n "$link" ]] && \
220					mount | grep -q "^$link" && \
221						return 0
222			fi
223		;;
224		*)
225			false
226		;;
227	esac
228}
229
230# Return 0 if a dataset is mounted; 1 otherwise
231#
232# $1 dataset name
233# $2 filesystem type; optional - defaulted to zfs
234
235function mounted
236{
237	ismounted $1 $2
238}
239
240# Return 0 if a dataset is unmounted; 1 otherwise
241#
242# $1 dataset name
243# $2 filesystem type; optional - defaulted to zfs
244
245function unmounted
246{
247	! ismounted $1 $2
248}
249
250function default_setup
251{
252	default_setup_noexit "$@"
253
254	log_pass
255}
256
257function default_setup_no_mountpoint
258{
259	default_setup_noexit "$1" "$2" "$3" "yes"
260
261	log_pass
262}
263
264#
265# Given a list of disks, setup storage pools and datasets.
266#
267function default_setup_noexit
268{
269	typeset disklist=$1
270	typeset container=$2
271	typeset volume=$3
272	typeset no_mountpoint=$4
273	log_note begin default_setup_noexit
274
275	if is_global_zone; then
276		if poolexists $TESTPOOL ; then
277			destroy_pool $TESTPOOL
278		fi
279		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
280		log_must zpool create -f $TESTPOOL $disklist
281	else
282		reexport_pool
283	fi
284
285	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
286	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
287
288	log_must zfs create $TESTPOOL/$TESTFS
289	if [[ -z $no_mountpoint ]]; then
290		log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
291	fi
292
293	if [[ -n $container ]]; then
294		rm -rf $TESTDIR1  || \
295			log_unresolved Could not remove $TESTDIR1
296		mkdir -p $TESTDIR1 || \
297			log_unresolved Could not create $TESTDIR1
298
299		log_must zfs create $TESTPOOL/$TESTCTR
300		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
301		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
302		if [[ -z $no_mountpoint ]]; then
303			log_must zfs set mountpoint=$TESTDIR1 \
304			    $TESTPOOL/$TESTCTR/$TESTFS1
305		fi
306	fi
307
308	if [[ -n $volume ]]; then
309		if is_global_zone ; then
310			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
311			block_device_wait
312		else
313			log_must zfs create $TESTPOOL/$TESTVOL
314		fi
315	fi
316}
317
318#
319# Given a list of disks, setup a storage pool, file system and
320# a container.
321#
322function default_container_setup
323{
324	typeset disklist=$1
325
326	default_setup "$disklist" "true"
327}
328
329#
330# Given a list of disks, setup a storage pool,file system
331# and a volume.
332#
333function default_volume_setup
334{
335	typeset disklist=$1
336
337	default_setup "$disklist" "" "true"
338}
339
340#
341# Given a list of disks, setup a storage pool,file system,
342# a container and a volume.
343#
344function default_container_volume_setup
345{
346	typeset disklist=$1
347
348	default_setup "$disklist" "true" "true"
349}
350
351#
352# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
353# filesystem
354#
355# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
356# $2 snapshot name. Default, $TESTSNAP
357#
358function create_snapshot
359{
360	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
361	typeset snap=${2:-$TESTSNAP}
362
363	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
364	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
365
366	if snapexists $fs_vol@$snap; then
367		log_fail "$fs_vol@$snap already exists."
368	fi
369	datasetexists $fs_vol || \
370		log_fail "$fs_vol must exist."
371
372	log_must zfs snapshot $fs_vol@$snap
373}
374
375#
376# Create a clone from a snapshot, default clone name is $TESTCLONE.
377#
378# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
379# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
380#
381function create_clone   # snapshot clone
382{
383	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
384	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
385
386	[[ -z $snap ]] && \
387		log_fail "Snapshot name is undefined."
388	[[ -z $clone ]] && \
389		log_fail "Clone name is undefined."
390
391	log_must zfs clone $snap $clone
392}
393
394#
395# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
396# filesystem.
397#
398# $1 Existing filesystem or volume name. Default, $TESTFS
399# $2 Existing snapshot name. Default, $TESTSNAP
400# $3 bookmark name. Default, $TESTBKMARK
401#
402function create_bookmark
403{
404	typeset fs_vol=${1:-$TESTFS}
405	typeset snap=${2:-$TESTSNAP}
406	typeset bkmark=${3:-$TESTBKMARK}
407
408	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
409	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
410	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
411
412	if bkmarkexists $fs_vol#$bkmark; then
413		log_fail "$fs_vol#$bkmark already exists."
414	fi
415	datasetexists $fs_vol || \
416		log_fail "$fs_vol must exist."
417	snapexists $fs_vol@$snap || \
418		log_fail "$fs_vol@$snap must exist."
419
420	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
421}
422
423#
424# Create a temporary clone result of an interrupted resumable 'zfs receive'
425# $1 Destination filesystem name. Must not exist, will be created as the result
426#    of this function along with its %recv temporary clone
427# $2 Source filesystem name. Must not exist, will be created and destroyed
428#
429function create_recv_clone
430{
431	typeset recvfs="$1"
432	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
433	typeset snap="$sendfs@snap1"
434	typeset incr="$sendfs@snap2"
435	typeset mountpoint="$TESTDIR/create_recv_clone"
436	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
437
438	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
439
440	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
441	datasetexists $sendfs && log_fail "Send filesystem must not exist."
442
443	log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
444	log_must zfs snapshot $snap
445	log_must eval "zfs send $snap | zfs recv -u $recvfs"
446	log_must mkfile 1m "$mountpoint/data"
447	log_must zfs snapshot $incr
448	log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
449	    iflag=fullblock > $sendfile"
450	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
451	destroy_dataset "$sendfs" "-r"
452	log_must rm -f "$sendfile"
453
454	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
455		log_fail "Error creating temporary $recvfs/%recv clone"
456	fi
457}
458
459function default_mirror_setup
460{
461	default_mirror_setup_noexit $1 $2 $3
462
463	log_pass
464}
465
466#
467# Given a pair of disks, set up a storage pool and dataset for the mirror
468# @parameters: $1 the primary side of the mirror
469#   $2 the secondary side of the mirror
470# @uses: ZPOOL ZFS TESTPOOL TESTFS
471function default_mirror_setup_noexit
472{
473	readonly func="default_mirror_setup_noexit"
474	typeset primary=$1
475	typeset secondary=$2
476
477	[[ -z $primary ]] && \
478		log_fail "$func: No parameters passed"
479	[[ -z $secondary ]] && \
480		log_fail "$func: No secondary partition passed"
481	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
482	log_must zpool create -f $TESTPOOL mirror $@
483	log_must zfs create $TESTPOOL/$TESTFS
484	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
485}
486
487#
488# Destroy the configured testpool mirrors.
489# the mirrors are of the form ${TESTPOOL}{number}
490# @uses: ZPOOL ZFS TESTPOOL
491function destroy_mirrors
492{
493	default_cleanup_noexit
494
495	log_pass
496}
497
498function default_raidz_setup
499{
500	default_raidz_setup_noexit "$*"
501
502	log_pass
503}
504
505#
506# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
507# $1 the list of disks
508#
509function default_raidz_setup_noexit
510{
511	typeset disklist="$*"
512	disks=(${disklist[*]})
513
514	if [[ ${#disks[*]} -lt 2 ]]; then
515		log_fail "A raid-z requires a minimum of two disks."
516	fi
517
518	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
519	log_must zpool create -f $TESTPOOL raidz $disklist
520	log_must zfs create $TESTPOOL/$TESTFS
521	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
522}
523
524#
525# Common function used to cleanup storage pools and datasets.
526#
527# Invoked at the start of the test suite to ensure the system
528# is in a known state, and also at the end of each set of
529# sub-tests to ensure errors from one set of tests doesn't
530# impact the execution of the next set.
531
532function default_cleanup
533{
534	default_cleanup_noexit
535
536	log_pass
537}
538
539#
540# Utility function used to list all available pool names.
541#
542# NOTE: $KEEP is a variable containing pool names, separated by a newline
543# character, that must be excluded from the returned list.
544#
545function get_all_pools
546{
547	zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
548}
549
550function default_cleanup_noexit
551{
552	typeset pool=""
553	#
554	# Destroying the pool will also destroy any
555	# filesystems it contains.
556	#
557	if is_global_zone; then
558		zfs unmount -a > /dev/null 2>&1
559		ALL_POOLS=$(get_all_pools)
560		# Here, we loop through the pools we're allowed to
561		# destroy, only destroying them if it's safe to do
562		# so.
563		while [ -n "${ALL_POOLS}" ]
564		do
565			for pool in ${ALL_POOLS}
566			do
567				if safe_to_destroy_pool $pool ;
568				then
569					destroy_pool $pool
570				fi
571			done
572			ALL_POOLS=$(get_all_pools)
573		done
574
575		zfs mount -a
576	else
577		typeset fs=""
578		for fs in $(zfs list -H -o name \
579		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
580			destroy_dataset "$fs" "-Rf"
581		done
582
583		# Need cleanup here to avoid garbage dir left.
584		for fs in $(zfs list -H -o name); do
585			[[ $fs == /$ZONE_POOL ]] && continue
586			[[ -d $fs ]] && log_must rm -rf $fs/*
587		done
588
589		#
590		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
591		# the default value
592		#
593		for fs in $(zfs list -H -o name); do
594			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
595				log_must zfs set reservation=none $fs
596				log_must zfs set recordsize=128K $fs
597				log_must zfs set mountpoint=/$fs $fs
598				typeset enc=$(get_prop encryption $fs)
599				if [ -z "$enc" ] || [ "$enc" = "off" ]; then
600					log_must zfs set checksum=on $fs
601				fi
602				log_must zfs set compression=off $fs
603				log_must zfs set atime=on $fs
604				log_must zfs set devices=off $fs
605				log_must zfs set exec=on $fs
606				log_must zfs set setuid=on $fs
607				log_must zfs set readonly=off $fs
608				log_must zfs set snapdir=hidden $fs
609				log_must zfs set aclmode=groupmask $fs
610				log_must zfs set aclinherit=secure $fs
611			fi
612		done
613	fi
614
615	[[ -d $TESTDIR ]] && \
616		log_must rm -rf $TESTDIR
617
618	disk1=${DISKS%% *}
619	if is_mpath_device $disk1; then
620		delete_partitions
621	fi
622
623	rm -f $TEST_BASE_DIR/{err,out}
624}
625
626
627#
628# Common function used to cleanup storage pools, file systems
629# and containers.
630#
631function default_container_cleanup
632{
633	if ! is_global_zone; then
634		reexport_pool
635	fi
636
637	ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
638	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
639
640	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
641	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
642
643	[[ -e $TESTDIR1 ]] && \
644	    log_must rm -rf $TESTDIR1
645
646	default_cleanup
647}
648
649#
650# Common function used to cleanup snapshot of file system or volume. Default to
651# delete the file system's snapshot
652#
653# $1 snapshot name
654#
655function destroy_snapshot
656{
657	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
658
659	if ! snapexists $snap; then
660		log_fail "'$snap' does not exist."
661	fi
662
663	#
664	# For the sake of the value which come from 'get_prop' is not equal
665	# to the really mountpoint when the snapshot is unmounted. So, firstly
666	# check and make sure this snapshot's been mounted in current system.
667	#
668	typeset mtpt=""
669	if ismounted $snap; then
670		mtpt=$(get_prop mountpoint $snap)
671	fi
672
673	destroy_dataset "$snap"
674	[[ $mtpt != "" && -d $mtpt ]] && \
675		log_must rm -rf $mtpt
676}
677
678#
679# Common function used to cleanup clone.
680#
681# $1 clone name
682#
683function destroy_clone
684{
685	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
686
687	if ! datasetexists $clone; then
688		log_fail "'$clone' does not existed."
689	fi
690
691	# With the same reason in destroy_snapshot
692	typeset mtpt=""
693	if ismounted $clone; then
694		mtpt=$(get_prop mountpoint $clone)
695	fi
696
697	destroy_dataset "$clone"
698	[[ $mtpt != "" && -d $mtpt ]] && \
699		log_must rm -rf $mtpt
700}
701
702#
703# Common function used to cleanup bookmark of file system or volume.  Default
704# to delete the file system's bookmark.
705#
706# $1 bookmark name
707#
708function destroy_bookmark
709{
710	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
711
712	if ! bkmarkexists $bkmark; then
713		log_fail "'$bkmarkp' does not existed."
714	fi
715
716	destroy_dataset "$bkmark"
717}
718
719# Return 0 if a snapshot exists; $? otherwise
720#
721# $1 - snapshot name
722
723function snapexists
724{
725	zfs list -H -t snapshot "$1" > /dev/null 2>&1
726}
727
728#
729# Return 0 if a bookmark exists; $? otherwise
730#
731# $1 - bookmark name
732#
733function bkmarkexists
734{
735	zfs list -H -t bookmark "$1" > /dev/null 2>&1
736}
737
738#
739# Return 0 if a hold exists; $? otherwise
740#
741# $1 - hold tag
742# $2 - snapshot name
743#
744function holdexists
745{
746	! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
747}
748
749#
750# Set a property to a certain value on a dataset.
751# Sets a property of the dataset to the value as passed in.
752# @param:
753#	$1 dataset who's property is being set
754#	$2 property to set
755#	$3 value to set property to
756# @return:
757#	0 if the property could be set.
758#	non-zero otherwise.
759# @use: ZFS
760#
761function dataset_setprop
762{
763	typeset fn=dataset_setprop
764
765	if (($# < 3)); then
766		log_note "$fn: Insufficient parameters (need 3, had $#)"
767		return 1
768	fi
769	typeset output=
770	output=$(zfs set $2=$3 $1 2>&1)
771	typeset rv=$?
772	if ((rv != 0)); then
773		log_note "Setting property on $1 failed."
774		log_note "property $2=$3"
775		log_note "Return Code: $rv"
776		log_note "Output: $output"
777		return $rv
778	fi
779	return 0
780}
781
782#
783# Check a numeric assertion
784# @parameter: $@ the assertion to check
785# @output: big loud notice if assertion failed
786# @use: log_fail
787#
788function assert
789{
790	(($@)) || log_fail "$@"
791}
792
793#
794# Function to format partition size of a disk
795# Given a disk cxtxdx reduces all partitions
796# to 0 size
797#
798function zero_partitions #<whole_disk_name>
799{
800	typeset diskname=$1
801	typeset i
802
803	if is_freebsd; then
804		gpart destroy -F $diskname
805	elif is_linux; then
806		DSK=$DEV_DSKDIR/$diskname
807		DSK=$(echo $DSK | sed -e "s|//|/|g")
808		log_must parted $DSK -s -- mklabel gpt
809		blockdev --rereadpt $DSK 2>/dev/null
810		block_device_wait
811	else
812		for i in 0 1 3 4 5 6 7
813		do
814			log_must set_partition $i "" 0mb $diskname
815		done
816	fi
817
818	return 0
819}
820
821#
822# Given a slice, size and disk, this function
823# formats the slice to the specified size.
824# Size should be specified with units as per
825# the `format` command requirements eg. 100mb 3gb
826#
827# NOTE: This entire interface is problematic for the Linux parted utility
828# which requires the end of the partition to be specified.  It would be
829# best to retire this interface and replace it with something more flexible.
830# At the moment a best effort is made.
831#
832# arguments: <slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
833function set_partition
834{
835	typeset -i slicenum=$1
836	typeset start=$2
837	typeset size=$3
838	typeset disk=${4#$DEV_DSKDIR/}
839	disk=${disk#$DEV_RDSKDIR/}
840
841	case "$UNAME" in
842	Linux)
843		if [[ -z $size || -z $disk ]]; then
844			log_fail "The size or disk name is unspecified."
845		fi
846		disk=$DEV_DSKDIR/$disk
847		typeset size_mb=${size%%[mMgG]}
848
849		size_mb=${size_mb%%[mMgG][bB]}
850		if [[ ${size:1:1} == 'g' ]]; then
851			((size_mb = size_mb * 1024))
852		fi
853
854		# Create GPT partition table when setting slice 0 or
855		# when the device doesn't already contain a GPT label.
856		parted $disk -s -- print 1 >/dev/null
857		typeset ret_val=$?
858		if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
859			if ! parted $disk -s -- mklabel gpt; then
860				log_note "Failed to create GPT partition table on $disk"
861				return 1
862			fi
863		fi
864
865		# When no start is given align on the first cylinder.
866		if [[ -z "$start" ]]; then
867			start=1
868		fi
869
870		# Determine the cylinder size for the device and using
871		# that calculate the end offset in cylinders.
872		typeset -i cly_size_kb=0
873		cly_size_kb=$(parted -m $disk -s -- unit cyl print |
874			awk -F '[:k.]' 'NR == 3 {print $4}')
875		((end = (size_mb * 1024 / cly_size_kb) + start))
876
877		parted $disk -s -- \
878		    mkpart part$slicenum ${start}cyl ${end}cyl
879		typeset ret_val=$?
880		if [[ $ret_val -ne 0 ]]; then
881			log_note "Failed to create partition $slicenum on $disk"
882			return 1
883		fi
884
885		blockdev --rereadpt $disk 2>/dev/null
886		block_device_wait $disk
887		;;
888	FreeBSD)
889		if [[ -z $size || -z $disk ]]; then
890			log_fail "The size or disk name is unspecified."
891		fi
892		disk=$DEV_DSKDIR/$disk
893
894		if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
895			gpart destroy -F $disk >/dev/null 2>&1
896			if ! gpart create -s GPT $disk; then
897				log_note "Failed to create GPT partition table on $disk"
898				return 1
899			fi
900		fi
901
902		typeset index=$((slicenum + 1))
903
904		if [[ -n $start ]]; then
905			start="-b $start"
906		fi
907		gpart add -t freebsd-zfs $start -s $size -i $index $disk
908		if [[ $ret_val -ne 0 ]]; then
909			log_note "Failed to create partition $slicenum on $disk"
910			return 1
911		fi
912
913		block_device_wait $disk
914		;;
915	*)
916		if [[ -z $slicenum || -z $size || -z $disk ]]; then
917			log_fail "The slice, size or disk name is unspecified."
918		fi
919
920		typeset format_file="$TEST_BASE_DIR"/format_in.$$
921
922		echo "partition" >$format_file
923		echo "$slicenum" >> $format_file
924		echo "" >> $format_file
925		echo "" >> $format_file
926		echo "$start" >> $format_file
927		echo "$size" >> $format_file
928		echo "label" >> $format_file
929		echo "" >> $format_file
930		echo "q" >> $format_file
931		echo "q" >> $format_file
932
933		format -e -s -d $disk -f $format_file
934		typeset ret_val=$?
935		rm -f $format_file
936		;;
937	esac
938
939	if [[ $ret_val -ne 0 ]]; then
940		log_note "Unable to format $disk slice $slicenum to $size"
941		return 1
942	fi
943	return 0
944}
945
946#
947# Delete all partitions on all disks - this is specifically for the use of multipath
948# devices which currently can only be used in the test suite as raw/un-partitioned
949# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
950#
951function delete_partitions
952{
953	typeset disk
954
955	if [[ -z $DISKSARRAY ]]; then
956		DISKSARRAY=$DISKS
957	fi
958
959	if is_linux; then
960		typeset -i part
961		for disk in $DISKSARRAY; do
962			for (( part = 1; part < MAX_PARTITIONS; part++ )); do
963				typeset partition=${disk}${SLICE_PREFIX}${part}
964				parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
965				if lsblk | grep -qF ${partition}; then
966					log_fail "Partition ${partition} not deleted"
967				else
968					log_note "Partition ${partition} deleted"
969				fi
970			done
971		done
972	elif is_freebsd; then
973		for disk in $DISKSARRAY; do
974			if gpart destroy -F $disk; then
975				log_note "Partitions for ${disk} deleted"
976			else
977				log_fail "Partitions for ${disk} not deleted"
978			fi
979		done
980	fi
981}
982
983#
984# Get the end cyl of the given slice
985#
986function get_endslice #<disk> <slice>
987{
988	typeset disk=$1
989	typeset slice=$2
990	if [[ -z $disk || -z $slice ]] ; then
991		log_fail "The disk name or slice number is unspecified."
992	fi
993
994	case "$UNAME" in
995	Linux)
996		endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
997			awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
998		((endcyl = (endcyl + 1)))
999		;;
1000	FreeBSD)
1001		disk=${disk#/dev/zvol/}
1002		disk=${disk%p*}
1003		slice=$((slice + 1))
1004		endcyl=$(gpart show $disk | \
1005			awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1006		;;
1007	*)
1008		disk=${disk#/dev/dsk/}
1009		disk=${disk#/dev/rdsk/}
1010		disk=${disk%s*}
1011
1012		typeset -i ratio=0
1013		ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1014		    awk '/sectors\/cylinder/ {print $2}')
1015
1016		if ((ratio == 0)); then
1017			return
1018		fi
1019
1020		typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1021		    awk -v token="$slice" '$1 == token {print $6}')
1022
1023		((endcyl = (endcyl + 1) / ratio))
1024		;;
1025	esac
1026
1027	echo $endcyl
1028}
1029
1030
1031#
1032# Given a size,disk and total slice number,  this function formats the
1033# disk slices from 0 to the total slice number with the same specified
1034# size.
1035#
1036function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
1037{
1038	typeset -i i=0
1039	typeset slice_size=$1
1040	typeset disk_name=$2
1041	typeset total_slices=$3
1042	typeset cyl
1043
1044	zero_partitions $disk_name
1045	while ((i < $total_slices)); do
1046		if ! is_linux; then
1047			if ((i == 2)); then
1048				((i = i + 1))
1049				continue
1050			fi
1051		fi
1052		log_must set_partition $i "$cyl" $slice_size $disk_name
1053		cyl=$(get_endslice $disk_name $i)
1054		((i = i+1))
1055	done
1056}
1057
1058#
1059# This function continues to write to a filenum number of files into dirnum
1060# number of directories until either file_write returns an error or the
1061# maximum number of files per directory have been written.
1062#
1063# Usage:
1064# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1065#
1066# Return value: 0 on success
1067#		non 0 on error
1068#
1069# Where :
1070#	destdir:    is the directory where everything is to be created under
1071#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
1072#	filenum:    the maximum number of files per subdirectory
1073#	bytes:	    number of bytes to write
1074#	num_writes: number of types to write out bytes
1075#	data:	    the data that will be written
1076#
1077#	E.g.
1078#	fill_fs /testdir 20 25 1024 256 0
1079#
1080# Note: bytes * num_writes equals the size of the testfile
1081#
1082function fill_fs # destdir dirnum filenum bytes num_writes data
1083{
1084	typeset destdir=${1:-$TESTDIR}
1085	typeset -i dirnum=${2:-50}
1086	typeset -i filenum=${3:-50}
1087	typeset -i bytes=${4:-8192}
1088	typeset -i num_writes=${5:-10240}
1089	typeset data=${6:-"R"}
1090
1091	mkdir -p $destdir/{1..$dirnum}
1092	for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1093		file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1094		|| return
1095	done
1096}
1097
1098# Get the specified dataset property in parsable format or fail
1099function get_prop # property dataset
1100{
1101	typeset prop=$1
1102	typeset dataset=$2
1103
1104	zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
1105}
1106
1107# Get the specified pool property in parsable format or fail
1108function get_pool_prop # property pool
1109{
1110	typeset prop=$1
1111	typeset pool=$2
1112
1113	zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
1114}
1115
1116# Get the specified vdev property in parsable format or fail
1117function get_vdev_prop
1118{
1119	typeset prop="$1"
1120	typeset pool="$2"
1121	typeset vdev="$3"
1122
1123	zpool get -Hpo value "$prop" "$pool" "$vdev" || log_fail "zpool get $prop $pool $vdev"
1124}
1125
1126# Return 0 if a pool exists; $? otherwise
1127#
1128# $1 - pool name
1129
1130function poolexists
1131{
1132	typeset pool=$1
1133
1134	if [[ -z $pool ]]; then
1135		log_note "No pool name given."
1136		return 1
1137	fi
1138
1139	zpool get name "$pool" > /dev/null 2>&1
1140}
1141
1142# Return 0 if all the specified datasets exist; $? otherwise
1143#
1144# $1-n  dataset name
1145function datasetexists
1146{
1147	if (($# == 0)); then
1148		log_note "No dataset name given."
1149		return 1
1150	fi
1151
1152	zfs get name "$@" > /dev/null 2>&1
1153}
1154
1155# return 0 if none of the specified datasets exists, otherwise return 1.
1156#
1157# $1-n  dataset name
1158function datasetnonexists
1159{
1160	if (($# == 0)); then
1161		log_note "No dataset name given."
1162		return 1
1163	fi
1164
1165	while (($# > 0)); do
1166		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1167		    && return 1
1168		shift
1169	done
1170
1171	return 0
1172}
1173
1174# FreeBSD breaks exports(5) at whitespace and doesn't process escapes
1175# Solaris just breaks
1176#
1177# cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
1178#
1179# Linux can have spaces (which are \OOO-escaped),
1180# but can't have backslashes because they're parsed recursively
1181function shares_can_have_whitespace
1182{
1183	is_linux
1184}
1185
1186function is_shared_freebsd
1187{
1188	typeset fs=$1
1189
1190	pgrep -q mountd && showmount -E | grep -qx "$fs"
1191}
1192
1193function is_shared_illumos
1194{
1195	typeset fs=$1
1196	typeset mtpt
1197
1198	for mtpt in `share | awk '{print $2}'` ; do
1199		if [[ $mtpt == $fs ]] ; then
1200			return 0
1201		fi
1202	done
1203
1204	typeset stat=$(svcs -H -o STA nfs/server:default)
1205	if [[ $stat != "ON" ]]; then
1206		log_note "Current nfs/server status: $stat"
1207	fi
1208
1209	return 1
1210}
1211
1212function is_shared_linux
1213{
1214	typeset fs=$1
1215	! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
1216}
1217
1218#
1219# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1220#
1221# Returns 0 if shared, 1 otherwise.
1222#
1223function is_shared
1224{
1225	typeset fs=$1
1226	typeset mtpt
1227
1228	if [[ $fs != "/"* ]] ; then
1229		if datasetnonexists "$fs" ; then
1230			return 1
1231		else
1232			mtpt=$(get_prop mountpoint "$fs")
1233			case "$mtpt" in
1234				none|legacy|-) return 1
1235					;;
1236				*)	fs=$mtpt
1237					;;
1238			esac
1239		fi
1240	fi
1241
1242	case "$UNAME" in
1243	FreeBSD)	is_shared_freebsd "$fs"	;;
1244	Linux)		is_shared_linux "$fs"	;;
1245	*)		is_shared_illumos "$fs"	;;
1246	esac
1247}
1248
1249function is_exported_illumos
1250{
1251	typeset fs=$1
1252	typeset mtpt _
1253
1254	while read -r mtpt _; do
1255		[ "$mtpt" = "$fs" ] && return
1256	done < /etc/dfs/sharetab
1257
1258	return 1
1259}
1260
1261function is_exported_freebsd
1262{
1263	typeset fs=$1
1264	typeset mtpt _
1265
1266	while read -r mtpt _; do
1267		[ "$mtpt" = "$fs" ] && return
1268	done < /etc/zfs/exports
1269
1270	return 1
1271}
1272
1273function is_exported_linux
1274{
1275	typeset fs=$1
1276	typeset mtpt _
1277
1278	while read -r mtpt _; do
1279		[ "$(printf "$mtpt")" = "$fs" ] && return
1280	done < /etc/exports.d/zfs.exports
1281
1282	return 1
1283}
1284
1285#
1286# Given a mountpoint, or a dataset name, determine if it is exported via
1287# the os-specific NFS exports file.
1288#
1289# Returns 0 if exported, 1 otherwise.
1290#
1291function is_exported
1292{
1293	typeset fs=$1
1294	typeset mtpt
1295
1296	if [[ $fs != "/"* ]] ; then
1297		if datasetnonexists "$fs" ; then
1298			return 1
1299		else
1300			mtpt=$(get_prop mountpoint "$fs")
1301			case $mtpt in
1302				none|legacy|-) return 1
1303					;;
1304				*)	fs=$mtpt
1305					;;
1306			esac
1307		fi
1308	fi
1309
1310	case "$UNAME" in
1311	FreeBSD)	is_exported_freebsd "$fs"	;;
1312	Linux)		is_exported_linux "$fs"	;;
1313	*)		is_exported_illumos "$fs"	;;
1314	esac
1315}
1316
1317#
1318# Given a dataset name determine if it is shared via SMB.
1319#
1320# Returns 0 if shared, 1 otherwise.
1321#
1322function is_shared_smb
1323{
1324	typeset fs=$1
1325
1326	datasetexists "$fs" || return
1327
1328	if is_linux; then
1329		net usershare list | grep -xFq "${fs//[-\/]/_}"
1330	else
1331		log_note "SMB on $UNAME currently unsupported by the test framework"
1332		return 1
1333	fi
1334}
1335
1336#
1337# Given a mountpoint, determine if it is not shared via NFS.
1338#
1339# Returns 0 if not shared, 1 otherwise.
1340#
1341function not_shared
1342{
1343	! is_shared $1
1344}
1345
1346#
1347# Given a dataset determine if it is not shared via SMB.
1348#
1349# Returns 0 if not shared, 1 otherwise.
1350#
1351function not_shared_smb
1352{
1353	! is_shared_smb $1
1354}
1355
1356#
1357# Helper function to unshare a mountpoint.
1358#
1359function unshare_fs #fs
1360{
1361	typeset fs=$1
1362
1363	if is_shared $fs || is_shared_smb $fs; then
1364		log_must zfs unshare $fs
1365	fi
1366}
1367
1368#
1369# Helper function to share a NFS mountpoint.
1370#
1371function share_nfs #fs
1372{
1373	typeset fs=$1
1374
1375	is_shared "$fs" && return
1376
1377	case "$UNAME" in
1378	Linux)
1379		log_must exportfs "*:$fs"
1380		;;
1381	FreeBSD)
1382		typeset mountd
1383		read -r mountd < /var/run/mountd.pid
1384		log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
1385		log_must kill -s HUP "$mountd"
1386		;;
1387	*)
1388		log_must share -F nfs "$fs"
1389		;;
1390	esac
1391
1392	return 0
1393}
1394
1395#
1396# Helper function to unshare a NFS mountpoint.
1397#
1398function unshare_nfs #fs
1399{
1400	typeset fs=$1
1401
1402	! is_shared "$fs" && return
1403
1404	case "$UNAME" in
1405	Linux)
1406		log_must exportfs -u "*:$fs"
1407		;;
1408	FreeBSD)
1409		typeset mountd
1410		read -r mountd < /var/run/mountd.pid
1411		awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
1412		log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
1413		log_must kill -s HUP "$mountd"
1414		;;
1415	*)
1416		log_must unshare -F nfs $fs
1417		;;
1418	esac
1419
1420	return 0
1421}
1422
1423#
1424# Helper function to show NFS shares.
1425#
1426function showshares_nfs
1427{
1428	case "$UNAME" in
1429	Linux)
1430		exportfs -v
1431		;;
1432	FreeBSD)
1433		showmount
1434		;;
1435	*)
1436		share -F nfs
1437		;;
1438	esac
1439}
1440
1441function check_nfs
1442{
1443	case "$UNAME" in
1444	Linux)
1445		exportfs -s
1446		;;
1447	FreeBSD)
1448		showmount -e
1449		;;
1450	*)
1451		log_unsupported "Unknown platform"
1452		;;
1453	esac || log_unsupported "The NFS utilities are not installed"
1454}
1455
1456#
1457# Check NFS server status and trigger it online.
1458#
1459function setup_nfs_server
1460{
1461	# Cannot share directory in non-global zone.
1462	#
1463	if ! is_global_zone; then
1464		log_note "Cannot trigger NFS server by sharing in LZ."
1465		return
1466	fi
1467
1468	if is_linux; then
1469		#
1470		# Re-synchronize /var/lib/nfs/etab with /etc/exports and
1471		# /etc/exports.d./* to provide a clean test environment.
1472		#
1473		log_must exportfs -r
1474
1475		log_note "NFS server must be started prior to running ZTS."
1476		return
1477	elif is_freebsd; then
1478		log_must kill -s HUP $(</var/run/mountd.pid)
1479
1480		log_note "NFS server must be started prior to running ZTS."
1481		return
1482	fi
1483
1484	typeset nfs_fmri="svc:/network/nfs/server:default"
1485	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1486		#
1487		# Only really sharing operation can enable NFS server
1488		# to online permanently.
1489		#
1490		typeset dummy=/tmp/dummy
1491
1492		if [[ -d $dummy ]]; then
1493			log_must rm -rf $dummy
1494		fi
1495
1496		log_must mkdir $dummy
1497		log_must share $dummy
1498
1499		#
1500		# Waiting for fmri's status to be the final status.
1501		# Otherwise, in transition, an asterisk (*) is appended for
1502		# instances, unshare will reverse status to 'DIS' again.
1503		#
1504		# Waiting for 1's at least.
1505		#
1506		log_must sleep 1
1507		timeout=10
1508		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1509		do
1510			log_must sleep 1
1511
1512			((timeout -= 1))
1513		done
1514
1515		log_must unshare $dummy
1516		log_must rm -rf $dummy
1517	fi
1518
1519	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1520}
1521
1522#
1523# To verify whether calling process is in global zone
1524#
1525# Return 0 if in global zone, 1 in non-global zone
1526#
1527function is_global_zone
1528{
1529	if is_linux || is_freebsd; then
1530		return 0
1531	else
1532		typeset cur_zone=$(zonename 2>/dev/null)
1533		[ $cur_zone = "global" ]
1534	fi
1535}
1536
1537#
1538# Verify whether test is permitted to run from
1539# global zone, local zone, or both
1540#
1541# $1 zone limit, could be "global", "local", or "both"(no limit)
1542#
1543# Return 0 if permitted, otherwise exit with log_unsupported
1544#
1545function verify_runnable # zone limit
1546{
1547	typeset limit=$1
1548
1549	[[ -z $limit ]] && return 0
1550
1551	if is_global_zone ; then
1552		case $limit in
1553			global|both)
1554				;;
1555			local)	log_unsupported "Test is unable to run from "\
1556					"global zone."
1557				;;
1558			*)	log_note "Warning: unknown limit $limit - " \
1559					"use both."
1560				;;
1561		esac
1562	else
1563		case $limit in
1564			local|both)
1565				;;
1566			global)	log_unsupported "Test is unable to run from "\
1567					"local zone."
1568				;;
1569			*)	log_note "Warning: unknown limit $limit - " \
1570					"use both."
1571				;;
1572		esac
1573
1574		reexport_pool
1575	fi
1576
1577	return 0
1578}
1579
1580# Return 0 if create successfully or the pool exists; $? otherwise
1581# Note: In local zones, this function should return 0 silently.
1582#
1583# $1 - pool name
1584# $2-n - [keyword] devs_list
1585
1586function create_pool #pool devs_list
1587{
1588	typeset pool=${1%%/*}
1589
1590	shift
1591
1592	if [[ -z $pool ]]; then
1593		log_note "Missing pool name."
1594		return 1
1595	fi
1596
1597	if poolexists $pool ; then
1598		destroy_pool $pool
1599	fi
1600
1601	if is_global_zone ; then
1602		[[ -d /$pool ]] && rm -rf /$pool
1603		log_must zpool create -f $pool $@
1604	fi
1605
1606	return 0
1607}
1608
1609# Return 0 if destroy successfully or the pool exists; $? otherwise
1610# Note: In local zones, this function should return 0 silently.
1611#
1612# $1 - pool name
1613# Destroy pool with the given parameters.
1614
1615function destroy_pool #pool
1616{
1617	typeset pool=${1%%/*}
1618	typeset mtpt
1619
1620	if [[ -z $pool ]]; then
1621		log_note "No pool name given."
1622		return 1
1623	fi
1624
1625	if is_global_zone ; then
1626		if poolexists "$pool" ; then
1627			mtpt=$(get_prop mountpoint "$pool")
1628
1629			# At times, syseventd/udev activity can cause attempts
1630			# to destroy a pool to fail with EBUSY. We retry a few
1631			# times allowing failures before requiring the destroy
1632			# to succeed.
1633			log_must_busy zpool destroy -f $pool
1634
1635			[[ -d $mtpt ]] && \
1636				log_must rm -rf $mtpt
1637		else
1638			log_note "Pool does not exist. ($pool)"
1639			return 1
1640		fi
1641	fi
1642
1643	return 0
1644}
1645
1646# Return 0 if created successfully; $? otherwise
1647#
1648# $1 - dataset name
1649# $2-n - dataset options
1650
1651function create_dataset #dataset dataset_options
1652{
1653	typeset dataset=$1
1654
1655	shift
1656
1657	if [[ -z $dataset ]]; then
1658		log_note "Missing dataset name."
1659		return 1
1660	fi
1661
1662	if datasetexists $dataset ; then
1663		destroy_dataset $dataset
1664	fi
1665
1666	log_must zfs create $@ $dataset
1667
1668	return 0
1669}
1670
1671# Return 0 if destroy successfully or the dataset exists; $? otherwise
1672# Note: In local zones, this function should return 0 silently.
1673#
1674# $1 - dataset name
1675# $2 - custom arguments for zfs destroy
1676# Destroy dataset with the given parameters.
1677
1678function destroy_dataset # dataset [args]
1679{
1680	typeset dataset=$1
1681	typeset mtpt
1682	typeset args=${2:-""}
1683
1684	if [[ -z $dataset ]]; then
1685		log_note "No dataset name given."
1686		return 1
1687	fi
1688
1689	if is_global_zone ; then
1690		if datasetexists "$dataset" ; then
1691			mtpt=$(get_prop mountpoint "$dataset")
1692			log_must_busy zfs destroy $args $dataset
1693
1694			[ -d $mtpt ] && log_must rm -rf $mtpt
1695		else
1696			log_note "Dataset does not exist. ($dataset)"
1697			return 1
1698		fi
1699	fi
1700
1701	return 0
1702}
1703
1704#
1705# Reexport TESTPOOL & TESTPOOL(1-4)
1706#
1707function reexport_pool
1708{
1709	typeset -i cntctr=5
1710	typeset -i i=0
1711
1712	while ((i < cntctr)); do
1713		if ((i == 0)); then
1714			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1715			if ! ismounted $TESTPOOL; then
1716				log_must zfs mount $TESTPOOL
1717			fi
1718		else
1719			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1720			if eval ! ismounted \$TESTPOOL$i; then
1721				log_must eval zfs mount \$TESTPOOL$i
1722			fi
1723		fi
1724		((i += 1))
1725	done
1726}
1727
1728#
1729# Verify a given disk or pool state
1730#
1731# Return 0 is pool/disk matches expected state, 1 otherwise
1732#
1733function check_state # pool disk state{online,offline,degraded}
1734{
1735	typeset pool=$1
1736	typeset disk=${2#$DEV_DSKDIR/}
1737	typeset state=$3
1738
1739	[[ -z $pool ]] || [[ -z $state ]] \
1740	    && log_fail "Arguments invalid or missing"
1741
1742	if [[ -z $disk ]]; then
1743		#check pool state only
1744		zpool get -H -o value health $pool | grep -qi "$state"
1745	else
1746		zpool status -v $pool | grep "$disk" | grep -qi "$state"
1747	fi
1748}
1749
1750#
1751# Get the mountpoint of snapshot
1752# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1753# as its mountpoint
1754#
1755function snapshot_mountpoint
1756{
1757	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1758
1759	if [[ $dataset != *@* ]]; then
1760		log_fail "Error name of snapshot '$dataset'."
1761	fi
1762
1763	typeset fs=${dataset%@*}
1764	typeset snap=${dataset#*@}
1765
1766	if [[ -z $fs || -z $snap ]]; then
1767		log_fail "Error name of snapshot '$dataset'."
1768	fi
1769
1770	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1771}
1772
1773#
1774# Given a device and 'ashift' value verify it's correctly set on every label
1775#
1776function verify_ashift # device ashift
1777{
1778	typeset device="$1"
1779	typeset ashift="$2"
1780
1781	zdb -e -lll $device | awk -v ashift=$ashift '
1782	    /ashift: / {
1783	        if (ashift != $2)
1784	            exit 1;
1785	        else
1786	            count++;
1787	    }
1788	    END {
1789	        exit (count != 4);
1790	    }'
1791}
1792
1793#
1794# Given a pool and file system, this function will verify the file system
1795# using the zdb internal tool. Note that the pool is exported and imported
1796# to ensure it has consistent state.
1797#
1798function verify_filesys # pool filesystem dir
1799{
1800	typeset pool="$1"
1801	typeset filesys="$2"
1802	typeset zdbout="/tmp/zdbout.$$"
1803
1804	shift
1805	shift
1806	typeset dirs=$@
1807	typeset search_path=""
1808
1809	log_note "Calling zdb to verify filesystem '$filesys'"
1810	zfs unmount -a > /dev/null 2>&1
1811	log_must zpool export $pool
1812
1813	if [[ -n $dirs ]] ; then
1814		for dir in $dirs ; do
1815			search_path="$search_path -d $dir"
1816		done
1817	fi
1818
1819	log_must zpool import $search_path $pool
1820
1821	if ! zdb -cudi $filesys > $zdbout 2>&1; then
1822		log_note "Output: zdb -cudi $filesys"
1823		cat $zdbout
1824		rm -f $zdbout
1825		log_fail "zdb detected errors with: '$filesys'"
1826	fi
1827
1828	log_must zfs mount -a
1829	log_must rm -rf $zdbout
1830}
1831
1832#
1833# Given a pool issue a scrub and verify that no checksum errors are reported.
1834#
1835function verify_pool
1836{
1837	typeset pool=${1:-$TESTPOOL}
1838
1839	log_must zpool scrub $pool
1840	log_must wait_scrubbed $pool
1841
1842	typeset -i cksum=$(zpool status $pool | awk '
1843	    !NF { isvdev = 0 }
1844	    isvdev { errors += $NF }
1845	    /CKSUM$/ { isvdev = 1 }
1846	    END { print errors }
1847	')
1848	if [[ $cksum != 0 ]]; then
1849		log_must zpool status -v
1850	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1851	fi
1852}
1853
1854#
1855# Given a pool, and this function list all disks in the pool
1856#
1857function get_disklist # pool
1858{
1859	echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
1860	    grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
1861}
1862
1863#
1864# Given a pool, and this function list all disks in the pool with their full
1865# path (like "/dev/sda" instead of "sda").
1866#
1867function get_disklist_fullpath # pool
1868{
1869	get_disklist "-P $1"
1870}
1871
1872
1873
1874# /**
1875#  This function kills a given list of processes after a time period. We use
1876#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1877#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1878#  would be listed as FAIL, which we don't want : we're happy with stress tests
1879#  running for a certain amount of time, then finishing.
1880#
1881# @param $1 the time in seconds after which we should terminate these processes
1882# @param $2..$n the processes we wish to terminate.
1883# */
1884function stress_timeout
1885{
1886	typeset -i TIMEOUT=$1
1887	shift
1888	typeset cpids="$@"
1889
1890	log_note "Waiting for child processes($cpids). " \
1891		"It could last dozens of minutes, please be patient ..."
1892	log_must sleep $TIMEOUT
1893
1894	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1895	typeset pid
1896	for pid in $cpids; do
1897		ps -p $pid > /dev/null 2>&1 &&
1898			log_must kill -USR1 $pid
1899	done
1900}
1901
1902#
1903# Verify a given hotspare disk is inuse or avail
1904#
1905# Return 0 is pool/disk matches expected state, 1 otherwise
1906#
1907function check_hotspare_state # pool disk state{inuse,avail}
1908{
1909	typeset pool=$1
1910	typeset disk=${2#$DEV_DSKDIR/}
1911	typeset state=$3
1912
1913	cur_state=$(get_device_state $pool $disk "spares")
1914
1915	[ $state = $cur_state ]
1916}
1917
1918#
1919# Wait until a hotspare transitions to a given state or times out.
1920#
1921# Return 0 when  pool/disk matches expected state, 1 on timeout.
1922#
1923function wait_hotspare_state # pool disk state timeout
1924{
1925	typeset pool=$1
1926	typeset disk=${2#*$DEV_DSKDIR/}
1927	typeset state=$3
1928	typeset timeout=${4:-60}
1929	typeset -i i=0
1930
1931	while [[ $i -lt $timeout ]]; do
1932		if check_hotspare_state $pool $disk $state; then
1933			return 0
1934		fi
1935
1936		i=$((i+1))
1937		sleep 1
1938	done
1939
1940	return 1
1941}
1942
1943#
1944# Verify a given vdev disk is inuse or avail
1945#
1946# Return 0 is pool/disk matches expected state, 1 otherwise
1947#
1948function check_vdev_state # pool disk state{online,offline,unavail,removed}
1949{
1950	typeset pool=$1
1951	typeset disk=${2#*$DEV_DSKDIR/}
1952	typeset state=$3
1953
1954	cur_state=$(get_device_state $pool $disk)
1955
1956	[ $state = $cur_state ]
1957}
1958
1959#
1960# Wait until a vdev transitions to a given state or times out.
1961#
1962# Return 0 when  pool/disk matches expected state, 1 on timeout.
1963#
1964function wait_vdev_state # pool disk state timeout
1965{
1966	typeset pool=$1
1967	typeset disk=${2#*$DEV_DSKDIR/}
1968	typeset state=$3
1969	typeset timeout=${4:-60}
1970	typeset -i i=0
1971
1972	while [[ $i -lt $timeout ]]; do
1973		if check_vdev_state $pool $disk $state; then
1974			return 0
1975		fi
1976
1977		i=$((i+1))
1978		sleep 1
1979	done
1980
1981	return 1
1982}
1983
1984#
1985# Wait for vdev 'sit_out' property to be cleared.
1986#
1987# $1 pool name
1988# $2 vdev name
1989# $3 timeout
1990#
1991function wait_sit_out #pool vdev timeout
1992{
1993	typeset pool=${1:-$TESTPOOL}
1994	typeset vdev="$2"
1995	typeset timeout=${3:-300}
1996	for (( timer = 0; timer < $timeout; timer++ )); do
1997		if [ "$(get_vdev_prop sit_out "$pool" "$vdev")" = "off" ]; then
1998			return 0
1999		fi
2000		sleep 1;
2001	done
2002
2003	return 1
2004}
2005
2006#
2007# Check the output of 'zpool status -v <pool>',
2008# and to see if the content of <token> contain the <keyword> specified.
2009#
2010# Return 0 is contain, 1 otherwise
2011#
2012function check_pool_status # pool token keyword <verbose>
2013{
2014	typeset pool=$1
2015	typeset token=$2
2016	typeset keyword=$3
2017	typeset verbose=${4:-false}
2018
2019	scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
2020	if [[ $verbose == true ]]; then
2021		log_note $scan
2022	fi
2023	echo $scan | grep -qi "$keyword"
2024}
2025
2026#
2027# The following functions are instance of check_pool_status()
2028#	is_pool_resilvering - to check if the pool resilver is in progress
2029#	is_pool_resilvered - to check if the pool resilver is completed
2030#	is_pool_scrubbing - to check if the pool scrub is in progress
2031#	is_pool_scrubbed - to check if the pool scrub is completed
2032#	is_pool_scrub_stopped - to check if the pool scrub is stopped
2033#	is_pool_scrub_paused - to check if the pool scrub has paused
2034#	is_pool_removing - to check if the pool removing is a vdev
2035#	is_pool_removed - to check if the pool remove is completed
2036#	is_pool_discarding - to check if the pool checkpoint is being discarded
2037#	is_pool_replacing - to check if the pool is performing a replacement
2038#
2039function is_pool_resilvering #pool <verbose>
2040{
2041	check_pool_status "$1" "scan" \
2042	    "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2043}
2044
2045function is_pool_resilvered #pool <verbose>
2046{
2047	check_pool_status "$1" "scan" "resilvered " $2
2048}
2049
2050function is_pool_scrubbing #pool <verbose>
2051{
2052	check_pool_status "$1" "scan" "scrub in progress since " $2
2053}
2054
2055function is_pool_error_scrubbing #pool <verbose>
2056{
2057	check_pool_status "$1" "scrub" "error scrub in progress since " $2
2058	return $?
2059}
2060
2061function is_pool_scrubbed #pool <verbose>
2062{
2063	check_pool_status "$1" "scan" "scrub repaired" $2
2064}
2065
2066function is_pool_scrub_stopped #pool <verbose>
2067{
2068	check_pool_status "$1" "scan" "scrub canceled" $2
2069}
2070
2071function is_pool_error_scrub_stopped #pool <verbose>
2072{
2073	check_pool_status "$1" "scrub" "error scrub canceled on " $2
2074	return $?
2075}
2076
2077function is_pool_scrub_paused #pool <verbose>
2078{
2079	check_pool_status "$1" "scan" "scrub paused since " $2
2080}
2081
2082function is_pool_error_scrub_paused #pool <verbose>
2083{
2084	check_pool_status "$1" "scrub" "error scrub paused since " $2
2085	return $?
2086}
2087
2088function is_pool_removing #pool
2089{
2090	check_pool_status "$1" "remove" "in progress since "
2091}
2092
2093function is_pool_removed #pool
2094{
2095	check_pool_status "$1" "remove" "completed on"
2096}
2097
2098function is_pool_discarding #pool
2099{
2100	check_pool_status "$1" "checkpoint" "discarding"
2101}
2102function is_pool_replacing #pool
2103{
2104	zpool status "$1" | grep -qE 'replacing-[0-9]+'
2105}
2106
2107function wait_for_degraded
2108{
2109	typeset pool=$1
2110	typeset timeout=${2:-30}
2111	typeset t0=$SECONDS
2112
2113	while :; do
2114		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2115		log_note "$pool is not yet degraded."
2116		sleep 1
2117		if ((SECONDS - t0 > $timeout)); then
2118			log_note "$pool not degraded after $timeout seconds."
2119			return 1
2120		fi
2121	done
2122
2123	return 0
2124}
2125
2126#
2127# Use create_pool()/destroy_pool() to clean up the information in
2128# in the given disk to avoid slice overlapping.
2129#
2130function cleanup_devices #vdevs
2131{
2132	typeset pool="foopool$$"
2133
2134	for vdev in $@; do
2135		zero_partitions $vdev
2136	done
2137
2138	poolexists $pool && destroy_pool $pool
2139	create_pool $pool $@
2140	destroy_pool $pool
2141
2142	return 0
2143}
2144
2145#/**
2146# A function to find and locate free disks on a system or from given
2147# disks as the parameter. It works by locating disks that are in use
2148# as swap devices and dump devices, and also disks listed in /etc/vfstab
2149#
2150# $@ given disks to find which are free, default is all disks in
2151# the test system
2152#
2153# @return a string containing the list of available disks
2154#*/
2155function find_disks
2156{
2157	# Trust provided list, no attempt is made to locate unused devices.
2158	if is_linux || is_freebsd; then
2159		echo "$@"
2160		return
2161	fi
2162
2163
2164	sfi=/tmp/swaplist.$$
2165	dmpi=/tmp/dumpdev.$$
2166	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2167
2168	swap -l > $sfi
2169	dumpadm > $dmpi 2>/dev/null
2170
2171	disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
2172BEGIN { FS="."; }
2173
2174/^Specify disk/{
2175	searchdisks=0;
2176}
2177
2178{
2179	if (searchdisks && $2 !~ "^$"){
2180		split($2,arr," ");
2181		print arr[1];
2182	}
2183}
2184
2185/^AVAILABLE DISK SELECTIONS:/{
2186	searchdisks=1;
2187}
2188')}
2189
2190	unused=""
2191	for disk in $disks; do
2192	# Check for mounted
2193		grep -q "${disk}[sp]" /etc/mnttab && continue
2194	# Check for swap
2195		grep -q "${disk}[sp]" $sfi && continue
2196	# check for dump device
2197		grep -q "${disk}[sp]" $dmpi && continue
2198	# check to see if this disk hasn't been explicitly excluded
2199	# by a user-set environment variable
2200		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
2201		unused_candidates="$unused_candidates $disk"
2202	done
2203	rm $sfi $dmpi
2204
2205# now just check to see if those disks do actually exist
2206# by looking for a device pointing to the first slice in
2207# each case. limit the number to max_finddisksnum
2208	count=0
2209	for disk in $unused_candidates; do
2210		if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2211		    [ $count -lt $max_finddisksnum ]; then
2212			unused="$unused $disk"
2213			# do not impose limit if $@ is provided
2214			[[ -z $@ ]] && ((count = count + 1))
2215		fi
2216	done
2217
2218# finally, return our disk list
2219	echo $unused
2220}
2221
2222function add_user_freebsd #<group_name> <user_name> <basedir>
2223{
2224	typeset group=$1
2225	typeset user=$2
2226	typeset basedir=$3
2227
2228	# Check to see if the user exists.
2229	if id $user > /dev/null 2>&1; then
2230		return 0
2231	fi
2232
2233	# Assign 1000 as the base uid
2234	typeset -i uid=1000
2235	while true; do
2236		pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2237		case $? in
2238			0) break ;;
2239			# The uid is not unique
2240			65) ((uid += 1)) ;;
2241			*) return 1 ;;
2242		esac
2243		if [[ $uid == 65000 ]]; then
2244			log_fail "No user id available under 65000 for $user"
2245		fi
2246	done
2247
2248	# Silence MOTD
2249	touch $basedir/$user/.hushlogin
2250
2251	return 0
2252}
2253
2254#
2255# Delete the specified user.
2256#
2257# $1 login name
2258#
2259function del_user_freebsd #<logname>
2260{
2261	typeset user=$1
2262
2263	if id $user > /dev/null 2>&1; then
2264		log_must pw userdel $user
2265	fi
2266
2267	return 0
2268}
2269
2270#
2271# Select valid gid and create specified group.
2272#
2273# $1 group name
2274#
2275function add_group_freebsd #<group_name>
2276{
2277	typeset group=$1
2278
2279	# See if the group already exists.
2280	if pw groupshow $group >/dev/null 2>&1; then
2281		return 0
2282	fi
2283
2284	# Assign 1000 as the base gid
2285	typeset -i gid=1000
2286	while true; do
2287		pw groupadd -g $gid -n $group > /dev/null 2>&1
2288		case $? in
2289			0) return 0 ;;
2290			# The gid is not  unique
2291			65) ((gid += 1)) ;;
2292			*) return 1 ;;
2293		esac
2294		if [[ $gid == 65000 ]]; then
2295			log_fail "No user id available under 65000 for $group"
2296		fi
2297	done
2298}
2299
2300#
2301# Delete the specified group.
2302#
2303# $1 group name
2304#
2305function del_group_freebsd #<group_name>
2306{
2307	typeset group=$1
2308
2309	pw groupdel -n $group > /dev/null 2>&1
2310	case $? in
2311		# Group does not exist, or was deleted successfully.
2312		0|6|65) return 0 ;;
2313		# Name already exists as a group name
2314		9) log_must pw groupdel $group ;;
2315		*) return 1 ;;
2316	esac
2317
2318	return 0
2319}
2320
2321function add_user_illumos #<group_name> <user_name> <basedir>
2322{
2323	typeset group=$1
2324	typeset user=$2
2325	typeset basedir=$3
2326
2327	log_must useradd -g $group -d $basedir/$user -m $user
2328
2329	return 0
2330}
2331
2332function del_user_illumos #<user_name>
2333{
2334	typeset user=$1
2335
2336	if id $user > /dev/null 2>&1; then
2337		log_must_retry "currently used" 6 userdel $user
2338	fi
2339
2340	return 0
2341}
2342
2343function add_group_illumos #<group_name>
2344{
2345	typeset group=$1
2346
2347	typeset -i gid=100
2348	while true; do
2349		groupadd -g $gid $group > /dev/null 2>&1
2350		case $? in
2351			0) return 0 ;;
2352			# The gid is not  unique
2353			4) ((gid += 1)) ;;
2354			*) return 1 ;;
2355		esac
2356	done
2357}
2358
2359function del_group_illumos #<group_name>
2360{
2361	typeset group=$1
2362
2363	groupmod -n $grp $grp > /dev/null 2>&1
2364	case $? in
2365		# Group does not exist.
2366		6) return 0 ;;
2367		# Name already exists as a group name
2368		9) log_must groupdel $grp ;;
2369		*) return 1 ;;
2370	esac
2371}
2372
2373function add_user_linux #<group_name> <user_name> <basedir>
2374{
2375	typeset group=$1
2376	typeset user=$2
2377	typeset basedir=$3
2378
2379	log_must useradd -g $group -d $basedir/$user -m $user
2380
2381	# Add new users to the same group and the command line utils.
2382	# This allows them to be run out of the original users home
2383	# directory as long as it permissioned to be group readable.
2384	cmd_group=$(stat --format="%G" $(command -v zfs))
2385	log_must usermod -a -G $cmd_group $user
2386
2387	return 0
2388}
2389
2390function del_user_linux #<user_name>
2391{
2392	typeset user=$1
2393
2394	if id $user > /dev/null 2>&1; then
2395		log_must_retry "currently used" 6 userdel $user
2396	fi
2397}
2398
2399function add_group_linux #<group_name>
2400{
2401	typeset group=$1
2402
2403	# Assign 100 as the base gid, a larger value is selected for
2404	# Linux because for many distributions 1000 and under are reserved.
2405	while true; do
2406		groupadd $group > /dev/null 2>&1
2407		case $? in
2408			0) return 0 ;;
2409			*) return 1 ;;
2410		esac
2411	done
2412}
2413
2414function del_group_linux #<group_name>
2415{
2416	typeset group=$1
2417
2418	getent group $group > /dev/null 2>&1
2419	case $? in
2420		# Group does not exist.
2421		2) return 0 ;;
2422		# Name already exists as a group name
2423		0) log_must groupdel $group ;;
2424		*) return 1 ;;
2425	esac
2426
2427	return 0
2428}
2429
2430#
2431# Add specified user to specified group
2432#
2433# $1 group name
2434# $2 user name
2435# $3 base of the homedir (optional)
2436#
2437function add_user #<group_name> <user_name> <basedir>
2438{
2439	typeset group=$1
2440	typeset user=$2
2441	typeset basedir=${3:-"$TEST_BASE_DIR"}
2442
2443	if ((${#group} == 0 || ${#user} == 0)); then
2444		log_fail "group name or user name are not defined."
2445	fi
2446
2447	case "$UNAME" in
2448	FreeBSD)
2449		add_user_freebsd "$group" "$user" "$basedir"
2450		;;
2451	Linux)
2452		add_user_linux "$group" "$user" "$basedir"
2453		;;
2454	*)
2455		add_user_illumos "$group" "$user" "$basedir"
2456		;;
2457	esac
2458
2459	return 0
2460}
2461
2462#
2463# Delete the specified user.
2464#
2465# $1 login name
2466# $2 base of the homedir (optional)
2467#
2468function del_user #<logname> <basedir>
2469{
2470	typeset user=$1
2471	typeset basedir=${2:-"$TEST_BASE_DIR"}
2472
2473	if ((${#user} == 0)); then
2474		log_fail "login name is necessary."
2475	fi
2476
2477	case "$UNAME" in
2478	FreeBSD)
2479		del_user_freebsd "$user"
2480		;;
2481	Linux)
2482		del_user_linux "$user"
2483		;;
2484	*)
2485		del_user_illumos "$user"
2486		;;
2487	esac
2488
2489	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2490
2491	return 0
2492}
2493
2494#
2495# Select valid gid and create specified group.
2496#
2497# $1 group name
2498#
2499function add_group #<group_name>
2500{
2501	typeset group=$1
2502
2503	if ((${#group} == 0)); then
2504		log_fail "group name is necessary."
2505	fi
2506
2507	case "$UNAME" in
2508	FreeBSD)
2509		add_group_freebsd "$group"
2510		;;
2511	Linux)
2512		add_group_linux "$group"
2513		;;
2514	*)
2515		add_group_illumos "$group"
2516		;;
2517	esac
2518
2519	return 0
2520}
2521
2522#
2523# Delete the specified group.
2524#
2525# $1 group name
2526#
2527function del_group #<group_name>
2528{
2529	typeset group=$1
2530
2531	if ((${#group} == 0)); then
2532		log_fail "group name is necessary."
2533	fi
2534
2535	case "$UNAME" in
2536	FreeBSD)
2537		del_group_freebsd "$group"
2538		;;
2539	Linux)
2540		del_group_linux "$group"
2541		;;
2542	*)
2543		del_group_illumos "$group"
2544		;;
2545	esac
2546
2547	return 0
2548}
2549
2550#
2551# This function will return true if it's safe to destroy the pool passed
2552# as argument 1. It checks for pools based on zvols and files, and also
2553# files contained in a pool that may have a different mountpoint.
2554#
2555function safe_to_destroy_pool { # $1 the pool name
2556
2557	typeset pool=""
2558	typeset DONT_DESTROY=""
2559
2560	# We check that by deleting the $1 pool, we're not
2561	# going to pull the rug out from other pools. Do this
2562	# by looking at all other pools, ensuring that they
2563	# aren't built from files or zvols contained in this pool.
2564
2565	for pool in $(zpool list -H -o name)
2566	do
2567		ALTMOUNTPOOL=""
2568
2569		# this is a list of the top-level directories in each of the
2570		# files that make up the path to the files the pool is based on
2571		FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
2572
2573		# this is a list of the zvols that make up the pool
2574		ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
2575
2576		# also want to determine if it's a file-based pool using an
2577		# alternate mountpoint...
2578		POOL_FILE_DIRS=$(zpool status -v $pool | \
2579					awk '/\// {print $1}' | \
2580					awk -F/ '!/dev/ {print $2}')
2581
2582		for pooldir in $POOL_FILE_DIRS
2583		do
2584			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2585					awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
2586
2587			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2588		done
2589
2590
2591		if [ ! -z "$ZVOLPOOL" ]
2592		then
2593			DONT_DESTROY="true"
2594			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2595		fi
2596
2597		if [ ! -z "$FILEPOOL" ]
2598		then
2599			DONT_DESTROY="true"
2600			log_note "Pool $pool is built from $FILEPOOL on $1"
2601		fi
2602
2603		if [ ! -z "$ALTMOUNTPOOL" ]
2604		then
2605			DONT_DESTROY="true"
2606			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2607		fi
2608	done
2609
2610	if [ -z "${DONT_DESTROY}" ]
2611	then
2612		return 0
2613	else
2614		log_note "Warning: it is not safe to destroy $1!"
2615		return 1
2616	fi
2617}
2618
2619#
2620# Verify zfs operation with -p option work as expected
2621# $1 operation, value could be create, clone or rename
2622# $2 dataset type, value could be fs or vol
2623# $3 dataset name
2624# $4 new dataset name
2625#
2626function verify_opt_p_ops
2627{
2628	typeset ops=$1
2629	typeset datatype=$2
2630	typeset dataset=$3
2631	typeset newdataset=$4
2632
2633	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2634		log_fail "$datatype is not supported."
2635	fi
2636
2637	# check parameters accordingly
2638	case $ops in
2639		create)
2640			newdataset=$dataset
2641			dataset=""
2642			if [[ $datatype == "vol" ]]; then
2643				ops="create -V $VOLSIZE"
2644			fi
2645			;;
2646		clone)
2647			if [[ -z $newdataset ]]; then
2648				log_fail "newdataset should not be empty" \
2649					"when ops is $ops."
2650			fi
2651			log_must datasetexists $dataset
2652			log_must snapexists $dataset
2653			;;
2654		rename)
2655			if [[ -z $newdataset ]]; then
2656				log_fail "newdataset should not be empty" \
2657					"when ops is $ops."
2658			fi
2659			log_must datasetexists $dataset
2660			;;
2661		*)
2662			log_fail "$ops is not supported."
2663			;;
2664	esac
2665
2666	# make sure the upper level filesystem does not exist
2667	destroy_dataset "${newdataset%/*}" "-rRf"
2668
2669	# without -p option, operation will fail
2670	log_mustnot zfs $ops $dataset $newdataset
2671	log_mustnot datasetexists $newdataset ${newdataset%/*}
2672
2673	# with -p option, operation should succeed
2674	log_must zfs $ops -p $dataset $newdataset
2675	block_device_wait
2676
2677	if ! datasetexists $newdataset ; then
2678		log_fail "-p option does not work for $ops"
2679	fi
2680
2681	# when $ops is create or clone, redo the operation still return zero
2682	if [[ $ops != "rename" ]]; then
2683		log_must zfs $ops -p $dataset $newdataset
2684	fi
2685
2686	return 0
2687}
2688
2689#
2690# Get configuration of pool
2691# $1 pool name
2692# $2 config name
2693#
2694function get_config
2695{
2696	typeset pool=$1
2697	typeset config=$2
2698
2699	if ! poolexists "$pool" ; then
2700		return 1
2701	fi
2702	if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
2703		zdb -e $pool
2704	else
2705		zdb -C $pool
2706	fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
2707}
2708
2709#
2710# Privated function. Random select one of items from arguments.
2711#
2712# $1 count
2713# $2-n string
2714#
2715function _random_get
2716{
2717	typeset cnt=$1
2718	shift
2719
2720	typeset str="$@"
2721	typeset -i ind
2722	((ind = RANDOM % cnt + 1))
2723
2724	echo "$str" | cut -f $ind -d ' '
2725}
2726
2727#
2728# Random select one of item from arguments which include NONE string
2729#
2730function random_get_with_non
2731{
2732	typeset -i cnt=$#
2733	((cnt =+ 1))
2734
2735	_random_get "$cnt" "$@"
2736}
2737
2738#
2739# Random select one of item from arguments which doesn't include NONE string
2740#
2741function random_get
2742{
2743	_random_get "$#" "$@"
2744}
2745
2746#
2747# The function will generate a dataset name with specific length
2748# $1, the length of the name
2749# $2, the base string to construct the name
2750#
2751function gen_dataset_name
2752{
2753	typeset -i len=$1
2754	typeset basestr="$2"
2755	typeset -i baselen=${#basestr}
2756	typeset -i iter=0
2757	typeset l_name=""
2758
2759	if ((len % baselen == 0)); then
2760		((iter = len / baselen))
2761	else
2762		((iter = len / baselen + 1))
2763	fi
2764	while ((iter > 0)); do
2765		l_name="${l_name}$basestr"
2766
2767		((iter -= 1))
2768	done
2769
2770	echo $l_name
2771}
2772
2773#
2774# Get cksum tuple of dataset
2775# $1 dataset name
2776#
2777# sample zdb output:
2778# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2779# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2780# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2781# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2782function datasetcksum
2783{
2784	typeset cksum
2785	sync
2786	sync_all_pools
2787	zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
2788}
2789
2790#
2791# Get the given disk/slice state from the specific field of the pool
2792#
2793function get_device_state #pool disk field("", "spares","logs")
2794{
2795	typeset pool=$1
2796	typeset disk=${2#$DEV_DSKDIR/}
2797	typeset field=${3:-$pool}
2798
2799	zpool status -v "$pool" 2>/dev/null | \
2800		awk -v device=$disk -v pool=$pool -v field=$field \
2801		'BEGIN {startconfig=0; startfield=0; }
2802		/config:/ {startconfig=1}
2803		(startconfig==1) && ($1==field) {startfield=1; next;}
2804		(startfield==1) && ($1==device) {print $2; exit;}
2805		(startfield==1) &&
2806		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
2807}
2808
2809#
2810# get the root filesystem name if it's zfsroot system.
2811#
2812# return: root filesystem name
2813function get_rootfs
2814{
2815	typeset rootfs=""
2816
2817	if is_freebsd; then
2818		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
2819	elif ! is_linux; then
2820		rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
2821			/etc/mnttab)
2822	fi
2823	if [[ -z "$rootfs" ]]; then
2824		log_fail "Can not get rootfs"
2825	fi
2826	if datasetexists $rootfs; then
2827		echo $rootfs
2828	else
2829		log_fail "This is not a zfsroot system."
2830	fi
2831}
2832
2833#
2834# get the rootfs's pool name
2835# return:
2836#       rootpool name
2837#
2838function get_rootpool
2839{
2840	typeset rootfs=$(get_rootfs)
2841	echo ${rootfs%%/*}
2842}
2843
2844#
2845# To verify if the require numbers of disks is given
2846#
2847function verify_disk_count
2848{
2849	typeset -i min=${2:-1}
2850
2851	typeset -i count=$(echo "$1" | wc -w)
2852
2853	if ((count < min)); then
2854		log_untested "A minimum of $min disks is required to run." \
2855			" You specified $count disk(s)"
2856	fi
2857}
2858
2859function ds_is_volume
2860{
2861	typeset type=$(get_prop type $1)
2862	[ $type = "volume" ]
2863}
2864
2865function ds_is_filesystem
2866{
2867	typeset type=$(get_prop type $1)
2868	[ $type = "filesystem" ]
2869}
2870
2871#
2872# Check if Trusted Extensions are installed and enabled
2873#
2874function is_te_enabled
2875{
2876	svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
2877}
2878
2879# Return the number of CPUs (cross-platform)
2880function get_num_cpus
2881{
2882	if is_linux ; then
2883		grep -c '^processor' /proc/cpuinfo
2884	elif is_freebsd; then
2885		sysctl -n kern.smp.cpus
2886	else
2887		psrinfo | wc -l
2888	fi
2889}
2890
2891# Utility function to determine if a system has multiple cpus.
2892function is_mp
2893{
2894	[[ $(get_num_cpus) -gt 1 ]]
2895}
2896
2897function get_cpu_freq
2898{
2899	if is_linux; then
2900		lscpu | awk '/CPU( max)? MHz/ { print $NF }'
2901	elif is_freebsd; then
2902		sysctl -n hw.clockrate
2903	else
2904		psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2905	fi
2906}
2907
2908# Run the given command as the user provided.
2909function user_run
2910{
2911	typeset user=$1
2912	shift
2913
2914	log_note "user: $user"
2915	log_note "cmd: $*"
2916
2917	if ! sudo -Eu $user test -x $PATH ; then
2918		log_note "-------------------------------------------------"
2919		log_note "Warning: $user doesn't have permissions on $PATH"
2920		log_note ""
2921		log_note "This usually happens when you're running ZTS locally"
2922		log_note "from inside the ZFS source dir, and are attempting to"
2923		log_note "run a test that calls user_run.  The ephemeral user"
2924		log_note "($user) that ZTS is creating does not have permission"
2925		log_note "to traverse to $PATH, or the binaries in $PATH are"
2926		log_note "not the right permissions."
2927		log_note ""
2928		log_note "To get around this, copy your ZFS source directory"
2929		log_note "to a world-accessible location (like /tmp), and "
2930		log_note "change the permissions on your ZFS source dir "
2931		log_note "to allow access."
2932		log_note ""
2933		log_note "Also, verify that /dev/zfs is RW for others:"
2934		log_note ""
2935		log_note "    sudo chmod o+rw /dev/zfs"
2936		log_note "-------------------------------------------------"
2937	fi
2938
2939	typeset out=$TEST_BASE_DIR/out
2940	typeset err=$TEST_BASE_DIR/err
2941
2942	sudo -Eu $user \
2943	    env PATH="$PATH" ZTS_LOG_SUPPRESS_TIMESTAMP=1 \
2944	    ksh <<<"$*" >$out 2>$err
2945	typeset res=$?
2946	log_note "out: $(<$out)"
2947	log_note "err: $(<$err)"
2948	return $res
2949}
2950
2951#
2952# Check if the pool contains the specified vdevs
2953#
2954# $1 pool
2955# $2..n <vdev> ...
2956#
2957# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2958# vdevs is not in the pool, and 2 if pool name is missing.
2959#
2960function vdevs_in_pool
2961{
2962	typeset pool=$1
2963	typeset vdev
2964
2965	if [[ -z $pool ]]; then
2966		log_note "Missing pool name."
2967		return 2
2968	fi
2969
2970	shift
2971
2972	# We could use 'zpool list' to only get the vdevs of the pool but we
2973	# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
2974	# therefore we use the 'zpool status' output.
2975	typeset tmpfile=$(mktemp)
2976	zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
2977	for vdev in "$@"; do
2978		grep -wq ${vdev##*/} $tmpfile || return 1
2979	done
2980
2981	rm -f $tmpfile
2982	return 0
2983}
2984
2985function get_max
2986{
2987	typeset -l i max=$1
2988	shift
2989
2990	for i in "$@"; do
2991		max=$((max > i ? max : i))
2992	done
2993
2994	echo $max
2995}
2996
2997# Write data that can be compressed into a directory
2998function write_compressible
2999{
3000	typeset dir=$1
3001	typeset megs=$2
3002	typeset nfiles=${3:-1}
3003	typeset bs=${4:-1024k}
3004	typeset fname=${5:-file}
3005
3006	[[ -d $dir ]] || log_fail "No directory: $dir"
3007
3008	# Under Linux fio is not currently used since its behavior can
3009	# differ significantly across versions.  This includes missing
3010	# command line options and cases where the --buffer_compress_*
3011	# options fail to behave as expected.
3012	if is_linux; then
3013		typeset file_bytes=$(to_bytes $megs)
3014		typeset bs_bytes=4096
3015		typeset blocks=$(($file_bytes / $bs_bytes))
3016
3017		for (( i = 0; i < $nfiles; i++ )); do
3018			truncate -s $file_bytes $dir/$fname.$i
3019
3020			# Write every third block to get 66% compression.
3021			for (( j = 0; j < $blocks; j += 3 )); do
3022				dd if=/dev/urandom of=$dir/$fname.$i \
3023				    seek=$j bs=$bs_bytes count=1 \
3024				    conv=notrunc >/dev/null 2>&1
3025			done
3026		done
3027	else
3028		command -v fio > /dev/null || log_unsupported "fio missing"
3029		log_must eval fio \
3030		    --name=job \
3031		    --fallocate=0 \
3032		    --minimal \
3033		    --randrepeat=0 \
3034		    --buffer_compress_percentage=66 \
3035		    --buffer_compress_chunk=4096 \
3036		    --directory="$dir" \
3037		    --numjobs="$nfiles" \
3038		    --nrfiles="$nfiles" \
3039		    --rw=write \
3040		    --bs="$bs" \
3041		    --filesize="$megs" \
3042		    "--filename_format='$fname.\$jobnum' >/dev/null"
3043	fi
3044}
3045
3046function get_objnum
3047{
3048	typeset pathname=$1
3049	typeset objnum
3050
3051	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3052	if is_freebsd; then
3053		objnum=$(stat -f "%i" $pathname)
3054	else
3055		objnum=$(stat -c %i $pathname)
3056	fi
3057	echo $objnum
3058}
3059
3060#
3061# Sync data to the pool
3062#
3063# $1 pool name
3064# $2 boolean to force uberblock (and config including zpool cache file) update
3065#
3066function sync_pool #pool <force>
3067{
3068	typeset pool=${1:-$TESTPOOL}
3069	typeset force=${2:-false}
3070
3071	if [[ $force == true ]]; then
3072		log_must zpool sync -f $pool
3073	else
3074		log_must zpool sync $pool
3075	fi
3076
3077	return 0
3078}
3079
3080#
3081# Sync all pools
3082#
3083# $1 boolean to force uberblock (and config including zpool cache file) update
3084#
3085function sync_all_pools #<force>
3086{
3087	typeset force=${1:-false}
3088
3089	if [[ $force == true ]]; then
3090		log_must zpool sync -f
3091	else
3092		log_must zpool sync
3093	fi
3094
3095	return 0
3096}
3097
3098#
3099# Wait for zpool 'freeing' property drops to zero.
3100#
3101# $1 pool name
3102#
3103function wait_freeing #pool
3104{
3105	typeset pool=${1:-$TESTPOOL}
3106	while true; do
3107		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3108		log_must sleep 1
3109	done
3110}
3111
3112#
3113# Wait for every device replace operation to complete
3114#
3115# $1 pool name
3116# $2 timeout
3117#
3118function wait_replacing #pool timeout
3119{
3120	typeset timeout=${2:-300}
3121	typeset pool=${1:-$TESTPOOL}
3122	for (( timer = 0; timer < $timeout; timer++ )); do
3123		is_pool_replacing $pool || break;
3124		sleep 1;
3125	done
3126}
3127
3128# Wait for a pool to be scrubbed
3129#
3130# $1 pool name
3131# $2 timeout
3132#
3133function wait_scrubbed #pool timeout
3134{
3135       typeset timeout=${2:-300}
3136       typeset pool=${1:-$TESTPOOL}
3137       for (( timer = 0; timer < $timeout; timer++ )); do
3138               is_pool_scrubbed $pool && break;
3139               sleep 1;
3140       done
3141}
3142
3143# Backup the zed.rc in our test directory so that we can edit it for our test.
3144#
3145# Returns: Backup file name.  You will need to pass this to zed_rc_restore().
3146function zed_rc_backup
3147{
3148	zedrc_backup="$(mktemp)"
3149	cp $ZEDLET_DIR/zed.rc $zedrc_backup
3150	echo $zedrc_backup
3151}
3152
3153function zed_rc_restore
3154{
3155	mv $1 $ZEDLET_DIR/zed.rc
3156}
3157
3158#
3159# Setup custom environment for the ZED.
3160#
3161# $@ Optional list of zedlets to run under zed.
3162function zed_setup
3163{
3164	if ! is_linux; then
3165		log_unsupported "No zed on $UNAME"
3166	fi
3167
3168	if [[ ! -d $ZEDLET_DIR ]]; then
3169		log_must mkdir $ZEDLET_DIR
3170	fi
3171
3172	if [[ ! -e $VDEVID_CONF ]]; then
3173		log_must touch $VDEVID_CONF
3174	fi
3175
3176	if [[ -e $VDEVID_CONF_ETC ]]; then
3177		log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3178	fi
3179	EXTRA_ZEDLETS=$@
3180
3181	# Create a symlink for /etc/zfs/vdev_id.conf file.
3182	log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3183
3184	# Setup minimal ZED configuration.  Individual test cases should
3185	# add additional ZEDLETs as needed for their specific test.
3186	log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3187	log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3188
3189	# Scripts must only be user writable.
3190	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3191		saved_umask=$(umask)
3192		log_must umask 0022
3193		for i in $EXTRA_ZEDLETS ; do
3194			log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3195		done
3196		log_must umask $saved_umask
3197	fi
3198
3199	# Customize the zed.rc file to enable the full debug log.
3200	log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3201	echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3202
3203}
3204
3205#
3206# Cleanup custom ZED environment.
3207#
3208# $@ Optional list of zedlets to remove from our test zed.d directory.
3209function zed_cleanup
3210{
3211	if ! is_linux; then
3212		return
3213	fi
3214
3215	for extra_zedlet; do
3216		log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
3217	done
3218	log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
3219	                $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
3220	                $ZEDLET_DIR
3221}
3222
3223#
3224# Check if ZED is currently running; if so, returns PIDs
3225#
3226function zed_check
3227{
3228	if ! is_linux; then
3229		return
3230	fi
3231	zedpids="$(pgrep -x zed)"
3232	zedpids2="$(pgrep -x lt-zed)"
3233	echo ${zedpids} ${zedpids2}
3234}
3235
3236#
3237# Check if ZED is currently running, if not start ZED.
3238#
3239function zed_start
3240{
3241	if ! is_linux; then
3242		return
3243	fi
3244
3245	# ZEDLET_DIR=$TEST_BASE_DIR/zed
3246	if [[ ! -d $ZEDLET_DIR ]]; then
3247		log_must mkdir $ZEDLET_DIR
3248	fi
3249
3250	# Verify the ZED is not already running.
3251	zedpids=$(zed_check)
3252	if [ -n "$zedpids" ]; then
3253		# We never, ever, really want it to just keep going if zed
3254		# is already running - usually this implies our test cases
3255		# will break very strangely because whatever we wanted to
3256		# configure zed for won't be listening to our changes in the
3257		# tmpdir
3258		log_fail "ZED already running - ${zedpids}"
3259	else
3260		log_note "Starting ZED"
3261		# run ZED in the background and redirect foreground logging
3262		# output to $ZED_LOG.
3263		log_must truncate -s 0 $ZED_DEBUG_LOG
3264		log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3265		    "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3266	fi
3267
3268	return 0
3269}
3270
3271#
3272# Kill ZED process
3273#
3274function zed_stop
3275{
3276	if ! is_linux; then
3277		return ""
3278	fi
3279
3280	log_note "Stopping ZED"
3281	while true; do
3282		zedpids=$(zed_check)
3283		[ ! -n "$zedpids" ] && break
3284
3285		log_must kill $zedpids
3286		sleep 1
3287	done
3288	return 0
3289}
3290
3291#
3292# Drain all zevents
3293#
3294function zed_events_drain
3295{
3296	while [ $(zpool events -H | wc -l) -ne 0 ]; do
3297		sleep 1
3298		zpool events -c >/dev/null
3299	done
3300}
3301
3302# Set a variable in zed.rc to something, un-commenting it in the process.
3303#
3304# $1 variable
3305# $2 value
3306function zed_rc_set
3307{
3308	var="$1"
3309	val="$2"
3310	# Remove the line
3311	cmd="'/$var/d'"
3312	eval sed -i $cmd $ZEDLET_DIR/zed.rc
3313
3314	# Add it at the end
3315	echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3316}
3317
3318
3319#
3320# Check is provided device is being active used as a swap device.
3321#
3322function is_swap_inuse
3323{
3324	typeset device=$1
3325
3326	if [[ -z $device ]] ; then
3327		log_note "No device specified."
3328		return 1
3329	fi
3330
3331	case "$UNAME" in
3332	Linux)
3333		swapon -s | grep -wq $(readlink -f $device)
3334		;;
3335	FreeBSD)
3336		swapctl -l | grep -wq $device
3337		;;
3338	*)
3339		swap -l | grep -wq $device
3340		;;
3341	esac
3342}
3343
3344#
3345# Setup a swap device using the provided device.
3346#
3347function swap_setup
3348{
3349	typeset swapdev=$1
3350
3351	case "$UNAME" in
3352	Linux)
3353		log_must eval "mkswap $swapdev > /dev/null 2>&1"
3354		log_must swapon $swapdev
3355		;;
3356	FreeBSD)
3357		log_must swapctl -a $swapdev
3358		;;
3359	*)
3360    log_must swap -a $swapdev
3361		;;
3362	esac
3363
3364	return 0
3365}
3366
3367#
3368# Cleanup a swap device on the provided device.
3369#
3370function swap_cleanup
3371{
3372	typeset swapdev=$1
3373
3374	if is_swap_inuse $swapdev; then
3375		if is_linux; then
3376			log_must swapoff $swapdev
3377		elif is_freebsd; then
3378			log_must swapoff $swapdev
3379		else
3380			log_must swap -d $swapdev
3381		fi
3382	fi
3383
3384	return 0
3385}
3386
3387#
3388# Set a global system tunable (64-bit value)
3389#
3390# $1 tunable name (use a NAME defined in tunables.cfg)
3391# $2 tunable values
3392#
3393function set_tunable64
3394{
3395	set_tunable_impl "$1" "$2" Z
3396}
3397
3398#
3399# Set a global system tunable (32-bit value)
3400#
3401# $1 tunable name (use a NAME defined in tunables.cfg)
3402# $2 tunable values
3403#
3404function set_tunable32
3405{
3406	set_tunable_impl "$1" "$2" W
3407}
3408
3409function set_tunable_impl
3410{
3411	typeset name="$1"
3412	typeset value="$2"
3413	typeset mdb_cmd="$3"
3414
3415	eval "typeset tunable=\$$name"
3416	case "$tunable" in
3417	UNSUPPORTED)
3418		log_unsupported "Tunable '$name' is unsupported on $UNAME"
3419		;;
3420	"")
3421		log_fail "Tunable '$name' must be added to tunables.cfg"
3422		;;
3423	*)
3424		;;
3425	esac
3426
3427	[[ -z "$value" ]] && return 1
3428	[[ -z "$mdb_cmd" ]] && return 1
3429
3430	case "$UNAME" in
3431	Linux)
3432		typeset zfs_tunables="/sys/module/zfs/parameters"
3433		echo "$value" >"$zfs_tunables/$tunable"
3434		;;
3435	FreeBSD)
3436		sysctl vfs.zfs.$tunable=$value
3437		;;
3438	SunOS)
3439		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3440		;;
3441	esac
3442}
3443
3444function save_tunable
3445{
3446	if tunable_exists $1 ; then
3447		[[ ! -d $TEST_BASE_DIR ]] && return 1
3448		[[ -e $TEST_BASE_DIR/tunable-$1 ]] && return 2
3449		echo "$(get_tunable """$1""")" > "$TEST_BASE_DIR"/tunable-"$1"
3450	fi
3451}
3452
3453function restore_tunable
3454{
3455	if tunable_exists $1 ; then
3456		[[ ! -e $TEST_BASE_DIR/tunable-$1 ]] && return 1
3457		val="$(cat $TEST_BASE_DIR/tunable-"""$1""")"
3458		set_tunable64 "$1" "$val"
3459		rm $TEST_BASE_DIR/tunable-$1
3460	fi
3461}
3462
3463#
3464# Get a global system tunable
3465#
3466# $1 tunable name (use a NAME defined in tunables.cfg)
3467#
3468function get_tunable
3469{
3470	get_tunable_impl "$1"
3471}
3472
3473function get_tunable_impl
3474{
3475	typeset name="$1"
3476	typeset module="${2:-zfs}"
3477	typeset check_only="$3"
3478
3479	eval "typeset tunable=\$$name"
3480	case "$tunable" in
3481	UNSUPPORTED)
3482		if [ -z "$check_only" ] ; then
3483			log_unsupported "Tunable '$name' is unsupported on $UNAME"
3484		else
3485			return 1
3486		fi
3487		;;
3488	"")
3489		if [ -z "$check_only" ] ; then
3490			log_fail "Tunable '$name' must be added to tunables.cfg"
3491		else
3492			return 1
3493		fi
3494		;;
3495	*)
3496		;;
3497	esac
3498
3499	case "$UNAME" in
3500	Linux)
3501		typeset zfs_tunables="/sys/module/$module/parameters"
3502		cat $zfs_tunables/$tunable
3503		;;
3504	FreeBSD)
3505		sysctl -n vfs.zfs.$tunable
3506		;;
3507	SunOS)
3508		[[ "$module" -eq "zfs" ]] || return 1
3509		;;
3510	esac
3511}
3512
3513# Does a tunable exist?
3514#
3515# $1: Tunable name
3516function tunable_exists
3517{
3518	get_tunable_impl $1 "zfs" 1
3519}
3520
3521#
3522# Compute xxh128sum for given file or stdin if no file given.
3523# Note: file path must not contain spaces
3524#
3525function xxh128digest
3526{
3527	xxh128sum $1 | awk '{print $1}'
3528}
3529
3530#
3531# Compare the xxhash128 digest of two files.
3532#
3533function cmp_xxh128 {
3534	typeset file1=$1
3535	typeset file2=$2
3536
3537	typeset sum1=$(xxh128digest $file1)
3538	typeset sum2=$(xxh128digest $file2)
3539	test "$sum1" = "$sum2"
3540}
3541
3542function new_fs #<args>
3543{
3544	case "$UNAME" in
3545	FreeBSD)
3546		newfs "$@"
3547		;;
3548	*)
3549		echo y | newfs -v "$@"
3550		;;
3551	esac
3552}
3553
3554function stat_size #<path>
3555{
3556	typeset path=$1
3557
3558	case "$UNAME" in
3559	FreeBSD)
3560		stat -f %z "$path"
3561		;;
3562	*)
3563		stat -c %s "$path"
3564		;;
3565	esac
3566}
3567
3568function stat_mtime #<path>
3569{
3570	typeset path=$1
3571
3572	case "$UNAME" in
3573	FreeBSD)
3574		stat -f %m "$path"
3575		;;
3576	*)
3577		stat -c %Y "$path"
3578		;;
3579	esac
3580}
3581
3582function stat_ctime #<path>
3583{
3584	typeset path=$1
3585
3586	case "$UNAME" in
3587	FreeBSD)
3588		stat -f %c "$path"
3589		;;
3590	*)
3591		stat -c %Z "$path"
3592		;;
3593	esac
3594}
3595
3596function stat_crtime #<path>
3597{
3598	typeset path=$1
3599
3600	case "$UNAME" in
3601	FreeBSD)
3602		stat -f %B "$path"
3603		;;
3604	*)
3605		stat -c %W "$path"
3606		;;
3607	esac
3608}
3609
3610function stat_generation #<path>
3611{
3612	typeset path=$1
3613
3614	case "$UNAME" in
3615	Linux)
3616		getversion "${path}"
3617		;;
3618	*)
3619		stat -f %v "${path}"
3620		;;
3621	esac
3622}
3623
3624# Run a command as if it was being run in a TTY.
3625#
3626# Usage:
3627#
3628#    faketty command
3629#
3630function faketty
3631{
3632    if is_freebsd; then
3633        script -q /dev/null env "$@"
3634    else
3635        script --return --quiet -c "$*" /dev/null
3636    fi
3637}
3638
3639#
3640# Produce a random permutation of the integers in a given range (inclusive).
3641#
3642function range_shuffle # begin end
3643{
3644	typeset -i begin=$1
3645	typeset -i end=$2
3646
3647	seq ${begin} ${end} | sort -R
3648}
3649
3650#
3651# Cross-platform xattr helpers
3652#
3653
3654function get_xattr # name path
3655{
3656	typeset name=$1
3657	typeset path=$2
3658
3659	case "$UNAME" in
3660	FreeBSD)
3661		getextattr -qq user "${name}" "${path}"
3662		;;
3663	*)
3664		attr -qg "${name}" "${path}"
3665		;;
3666	esac
3667}
3668
3669function set_xattr # name value path
3670{
3671	typeset name=$1
3672	typeset value=$2
3673	typeset path=$3
3674
3675	case "$UNAME" in
3676	FreeBSD)
3677		setextattr user "${name}" "${value}" "${path}"
3678		;;
3679	*)
3680		attr -qs "${name}" -V "${value}" "${path}"
3681		;;
3682	esac
3683}
3684
3685function set_xattr_stdin # name value
3686{
3687	typeset name=$1
3688	typeset path=$2
3689
3690	case "$UNAME" in
3691	FreeBSD)
3692		setextattr -i user "${name}" "${path}"
3693		;;
3694	*)
3695		attr -qs "${name}" "${path}"
3696		;;
3697	esac
3698}
3699
3700function rm_xattr # name path
3701{
3702	typeset name=$1
3703	typeset path=$2
3704
3705	case "$UNAME" in
3706	FreeBSD)
3707		rmextattr -q user "${name}" "${path}"
3708		;;
3709	*)
3710		attr -qr "${name}" "${path}"
3711		;;
3712	esac
3713}
3714
3715function ls_xattr # path
3716{
3717	typeset path=$1
3718
3719	case "$UNAME" in
3720	FreeBSD)
3721		lsextattr -qq user "${path}"
3722		;;
3723	*)
3724		attr -ql "${path}"
3725		;;
3726	esac
3727}
3728
3729function punch_hole # offset length file
3730{
3731	typeset offset=$1
3732	typeset length=$2
3733	typeset file=$3
3734
3735	case "$UNAME" in
3736	FreeBSD)
3737		truncate -d -o $offset -l $length "$file"
3738		;;
3739	Linux)
3740		fallocate --punch-hole --offset $offset --length $length "$file"
3741		;;
3742	*)
3743		false
3744		;;
3745	esac
3746}
3747
3748function zero_range # offset length file
3749{
3750	typeset offset=$1
3751	typeset length=$2
3752	typeset file=$3
3753
3754	case "$UNAME" in
3755	Linux)
3756		fallocate --zero-range --offset $offset --length $length "$file"
3757		;;
3758	*)
3759		false
3760		;;
3761	esac
3762}
3763
3764#
3765# Wait for the specified arcstat to reach non-zero quiescence.
3766# If echo is 1 echo the value after reaching quiescence, otherwise
3767# if echo is 0 print the arcstat we are waiting on.
3768#
3769function arcstat_quiescence # stat echo
3770{
3771	typeset stat=$1
3772	typeset echo=$2
3773	typeset do_once=true
3774
3775	if [[ $echo -eq 0 ]]; then
3776		echo "Waiting for arcstat $1 quiescence."
3777	fi
3778
3779	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
3780		typeset stat1=$(kstat arcstats.$stat)
3781		sleep 0.5
3782		typeset stat2=$(kstat arcstats.$stat)
3783		do_once=false
3784	done
3785
3786	if [[ $echo -eq 1 ]]; then
3787		echo $stat2
3788	fi
3789}
3790
3791function arcstat_quiescence_noecho # stat
3792{
3793	typeset stat=$1
3794	arcstat_quiescence $stat 0
3795}
3796
3797function arcstat_quiescence_echo # stat
3798{
3799	typeset stat=$1
3800	arcstat_quiescence $stat 1
3801}
3802
3803#
3804# Given an array of pids, wait until all processes
3805# have completed and check their return status.
3806#
3807function wait_for_children #children
3808{
3809	rv=0
3810	children=("$@")
3811	for child in "${children[@]}"
3812	do
3813		child_exit=0
3814		wait ${child} || child_exit=$?
3815		if [ $child_exit -ne 0 ]; then
3816			echo "child ${child} failed with ${child_exit}"
3817			rv=1
3818		fi
3819	done
3820	return $rv
3821}
3822
3823#
3824# Compare two directory trees recursively in a manner similar to diff(1), but
3825# using rsync. If there are any discrepancies, a summary of the differences are
3826# output and a non-zero error is returned.
3827#
3828# If you're comparing a directory after a ZIL replay, you should set
3829# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
3830# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
3831# information).
3832#
3833function directory_diff # dir_a dir_b
3834{
3835	dir_a="$1"
3836	dir_b="$2"
3837	zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
3838
3839	# If one of the directories doesn't exist, return 2. This is to match the
3840	# semantics of diff.
3841	if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
3842		return 2
3843	fi
3844
3845	# Run rsync with --dry-run --itemize-changes to get something akin to diff
3846	# output, but rsync is far more thorough in detecting differences (diff
3847	# doesn't compare file metadata, and cannot handle special files).
3848	#
3849	# Also make sure to filter out non-user.* xattrs when comparing. On
3850	# SELinux-enabled systems the copied tree will probably have different
3851	# SELinux labels.
3852	args=("-nicaAHX" '--filter=-x! user.*' "--delete")
3853
3854	# NOTE: Quite a few rsync builds do not support --crtimes which would be
3855	# necessary to verify that creation times are being maintained properly.
3856	# Unfortunately because of this we cannot use it unconditionally but we can
3857	# check if this rsync build supports it and use it then. This check is
3858	# based on the same check in the rsync test suite (testsuite/crtimes.test).
3859	#
3860	# We check ctimes even with zil_replay=1 because the ZIL does store
3861	# creation times and we should make sure they match (if the creation times
3862	# do not match there is a "c" entry in one of the columns).
3863	if rsync --version | grep -q "[, ] crtimes"; then
3864		args+=("--crtimes")
3865	fi
3866
3867	# If we are testing a ZIL replay, we need to ignore timestamp changes.
3868	# Unfortunately --no-times doesn't do what we want -- it will still tell
3869	# you if the timestamps don't match but rsync will set the timestamps to
3870	# the current time (leading to an itemised change entry). It's simpler to
3871	# just filter out those lines.
3872	if [ "$zil_replay" -eq 0 ]; then
3873		filter=("cat")
3874	else
3875		# Different rsync versions have different numbers of columns. So just
3876		# require that aside from the first two, all other columns must be
3877		# blank (literal ".") or a timestamp field ("[tT]").
3878		filter=("grep" "-v" '^\..[.Tt]\+ ')
3879	fi
3880
3881	diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
3882	rv=0
3883	if [ -n "$diff" ]; then
3884		echo "$diff"
3885		rv=1
3886	fi
3887	return $rv
3888}
3889
3890#
3891# Compare two directory trees recursively, without checking whether the mtimes
3892# match (creation times will be checked if the available rsync binary supports
3893# it). This is necessary for ZIL replay checks (because the ZIL does not
3894# contain mtimes and thus after a ZIL replay, mtimes won't match).
3895#
3896# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
3897#
3898function replay_directory_diff # dir_a dir_b
3899{
3900	LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
3901}
3902
3903#
3904# Put coredumps into $1/core.{basename}
3905#
3906# Output must be saved and passed to pop_coredump_pattern on cleanup
3907#
3908function push_coredump_pattern # dir
3909{
3910	ulimit -c unlimited
3911	case "$UNAME" in
3912	Linux)
3913		cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
3914		echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
3915		    echo 0 >/proc/sys/kernel/core_uses_pid
3916		;;
3917	FreeBSD)
3918		sysctl -n kern.corefile
3919		sysctl kern.corefile="$1/core.%N" >/dev/null
3920		;;
3921	*)
3922		# Nothing to output – set only for this shell
3923		coreadm -p "$1/core.%f"
3924		;;
3925	esac
3926}
3927
3928#
3929# Put coredumps back into the default location
3930#
3931function pop_coredump_pattern
3932{
3933	[ -s "$1" ] || return 0
3934	case "$UNAME" in
3935	Linux)
3936		typeset pat pid
3937		{ read -r pat; read -r pid; } < "$1"
3938		echo "$pat" >/proc/sys/kernel/core_pattern &&
3939		    echo "$pid" >/proc/sys/kernel/core_uses_pid
3940		;;
3941	FreeBSD)
3942		sysctl kern.corefile="$(<"$1")" >/dev/null
3943		;;
3944	esac
3945}
3946
3947#
3948# get_same_blocks dataset1 path/to/file1 dataset2 path/to/file2 [key]
3949#
3950# Returns a space-separated list of the indexes (starting at 0) of the L0
3951# blocks that are shared between both files (by first DVA and checksum).
3952#
3953function get_same_blocks # dataset1 file1 dataset2 file2 [key]
3954{
3955	typeset ds1=$1
3956	typeset file1=$2
3957	typeset ds2=$3
3958	typeset file2=$4
3959
3960	typeset key=$5
3961	typeset keyarg=
3962	if [ ${#key} -gt 0 ]; then
3963		keyarg="--key=$key"
3964	fi
3965
3966	# this is usually called as $(get_same_blocks ...), and so expected
3967	# to put its result on stdout, and usually the caller is not watching
3968	# for failure. this makes things a little tricky to fail properly if
3969	# zdb fails or crashes, as we end up returning an empty string, which
3970	# is a valid return (no blocks the same)
3971	#
3972	# to get around this, we check zdb's return and echo a dummy value
3973	# before returning failure. this will not match whatever the caller
3974	# is checking for. if they do call it with log_must, then they get
3975	# a failure as expected.
3976
3977	typeset zdbout1=$(mktemp)
3978	typeset zdbout2=$(mktemp)
3979	typeset awkout1=$(mktemp)
3980	typeset awkout2=$(mktemp)
3981
3982	zdb $keyarg -vvvvv $ds1 -O $file1 > $zdbout1
3983	[[ $? -ne 0 ]] && echo "zdb $ds1 failed" && return 1
3984
3985	zdb $keyarg -vvvvv $ds2 -O $file2 > $zdbout2
3986	[[ $? -ne 0 ]] && echo "zdb $ds2 failed" && return 1
3987
3988	awk '/ L0 / { print l++ " " $3 " " $7 }' < $zdbout1 > $awkout1
3989	awk '/ L0 / { print l++ " " $3 " " $7 }' < $zdbout2 > $awkout2
3990
3991	echo $(sort -n $awkout1 $awkout2 | uniq -d | cut -f1 -d' ')
3992
3993	rm -f $zdbout1 $zdbout2 $awkout1 $awkout2
3994}
3995
3996. ${STF_SUITE}/include/kstat.shlib
3997