diff --git a/dracut/90zfs/Makefile.am b/dracut/90zfs/Makefile.am index 759aae81dcbf..a7fc5cdd552d 100644 --- a/dracut/90zfs/Makefile.am +++ b/dracut/90zfs/Makefile.am @@ -2,12 +2,14 @@ pkgdracutdir = $(dracutdir)/modules.d/90zfs pkgdracut_SCRIPTS = \ $(top_srcdir)/dracut/90zfs/module-setup.sh \ $(top_srcdir)/dracut/90zfs/mount-zfs.sh \ - $(top_srcdir)/dracut/90zfs/parse-zfs.sh + $(top_srcdir)/dracut/90zfs/parse-zfs.sh \ + $(top_srcdir)/scripts/zfs-initramfs/scripts/zfs EXTRA_DIST = \ $(top_srcdir)/dracut/90zfs/module-setup.sh.in \ $(top_srcdir)/dracut/90zfs/mount-zfs.sh.in \ - $(top_srcdir)/dracut/90zfs/parse-zfs.sh.in + $(top_srcdir)/dracut/90zfs/parse-zfs.sh.in \ + $(top_srcdir)/scripts/zfs-initramfs/scripts/zfs $(pkgdracut_SCRIPTS): -$(SED) -e 's,@bindir\@,$(bindir),g' \ diff --git a/dracut/90zfs/module-setup.sh.in b/dracut/90zfs/module-setup.sh.in index 6eb6843819aa..86d8b8542674 100755 --- a/dracut/90zfs/module-setup.sh.in +++ b/dracut/90zfs/module-setup.sh.in @@ -37,6 +37,7 @@ install() { dracut_install @udevdir@/zvol_id dracut_install mount.zfs dracut_install hostid + dracut_install /usr/lib/dracut/modules.d/90zfs/zfs inst_hook cmdline 95 "$moddir/parse-zfs.sh" inst_hook mount 98 "$moddir/mount-zfs.sh" diff --git a/dracut/90zfs/mount-zfs.sh.in b/dracut/90zfs/mount-zfs.sh.in index 2d2afd879f99..a4074e6ca9f5 100755 --- a/dracut/90zfs/mount-zfs.sh.in +++ b/dracut/90zfs/mount-zfs.sh.in @@ -2,70 +2,30 @@ . /lib/dracut-lib.sh -ZPOOL_FORCE="" - -if getargbool 0 zfs_force -y zfs.force -y zfsforce ; then - warn "ZFS: Will force-import pools if necessary." - ZPOOL_FORCE="-f" +# Of course the functions we need is called differently +# on different distributions - it would be way to easy +# otherwise!! +if type log_failure_msg > /dev/null 2>&1 ; then + # LSB functions + log_begin_msg=log_begin_msg + log_failure_msg=log_failure_msg + log_progress_msg=log_progress_msg +elif type success > /dev/null 2>&1 ; then + # Fedora/RedHat functions + log_begin_msg=success + log_failure_msg=failure + log_progress_msg="echo -n" +elif type einfo > /dev/null 2>&1 ; then + # Gentoo functions + log_begin_msg=einfo + log_failure_msg=eerror + log_progress_msg="echo -n" +else + log_begin_msg="echo -n" + log_failure_msg=echo + log_progress_msg="echo -n" fi -case "$root" in - zfs:*) - # We have ZFS modules loaded, so we're able to import pools now. - if [ "$root" = "zfs:AUTO" ] ; then - # Need to parse bootfs attribute - info "ZFS: Attempting to detect root from imported ZFS pools." - - # Might be imported by the kernel module, so try searching before - # we import anything. - zfsbootfs=`zpool list -H -o bootfs | sed -n '/-/ !p' | sed 'q'` - if [ "$?" != "0" ] || [ "$zfsbootfs" = "" ] || \ - [ "$zfsbootfs" = "no pools available" ] ; then - # Not there, so we need to import everything. - info "ZFS: Attempting to import additional pools." - zpool import -N -a ${ZPOOL_FORCE} - zfsbootfs=`zpool list -H -o bootfs | sed -n '/-/ !p' | sed 'q'` - if [ "$?" != "0" ] || [ "$zfsbootfs" = "" ] || \ - [ "$zfsbootfs" = "no pools available" ] ; then - rootok=0 - pool="" - - warn "ZFS: No bootfs attribute found in importable pools." - - # Re-export everything since we're not prepared to take - # responsibility for them. - zpool list -H | while read fs rest ; do - zpool export "$fs" - done - - return 1 - fi - fi - info "ZFS: Using ${zfsbootfs} as root." - else - # Should have an explicit pool set, so just import it and we're done. - zfsbootfs="${root#zfs:}" - pool="${zfsbootfs%%/*}" - if ! zpool list -H $pool > /dev/null ; then - # pool wasn't imported automatically by the kernel module, so - # try it manually. - info "ZFS: Importing pool ${pool}..." - if ! zpool import -N ${ZPOOL_FORCE} $pool ; then - warn "ZFS: Unable to import pool ${pool}." - rootok=0 - - return 1 - fi - fi - fi +. /usr/lib/dracut/modules.d/90zfs/zfs - # Above should have left our rpool imported and pool/dataset in $root. - # We need zfsutil for non-legacy mounts and not for legacy mounts. - mountpoint=`zfs get -H -o value mountpoint $zfsbootfs` - if [ "$mountpoint" = "legacy" ] ; then - mount -t zfs "$zfsbootfs" "$NEWROOT" && ROOTFS_MOUNTED=yes - else - mount -o zfsutil -t zfs "$zfsbootfs" "$NEWROOT" && ROOTFS_MOUNTED=yes - fi - ;; -esac +mountroot diff --git a/scripts/zfs-initramfs/conf-hooks.d/zfs b/scripts/zfs-initramfs/conf-hooks.d/zfs new file mode 100644 index 000000000000..29950cac04bb --- /dev/null +++ b/scripts/zfs-initramfs/conf-hooks.d/zfs @@ -0,0 +1,2 @@ +# Force the inclusion of Busybox in the initramfs. +BUSYBOX=y diff --git a/scripts/zfs-initramfs/default b/scripts/zfs-initramfs/default new file mode 100644 index 000000000000..cb63692f9739 --- /dev/null +++ b/scripts/zfs-initramfs/default @@ -0,0 +1,66 @@ +# ZoL userland configuration. + +# Run `zfs mount -a` during system start? +# This should be 'no' if zfs-mountall or a systemd generator +# is available. +ZFS_MOUNT='yes' + +# Run `zfs unmount -a` during system stop? +# This should be 'no' on most systems. +ZFS_UNMOUNT='yes' + +# Run `zfs share -a` during system start? +# nb: The shareiscsi, sharenfs, and sharesmb dataset properties. +ZFS_SHARE='yes' + +# Run `zfs unshare -a` during system stop? +ZFS_UNSHARE='yes' + +# Sould we use '-d /dev/disk/by-*' when importing pool. +# This is recomended, but the default 'no' uses the cache +# file. +# Variable is somewhat missleading. Previously the code +# tried _only_ '/dev/disk/by-id', but will now try any +# '/dev/disk/by-*' directory. +USE_DISK_BY_ID='yes' + +# Should the datasets be mounted verbosly (a mount counter +# will be used when mounting if set to 'yes'). +VERBOSE_MOUNT='no' + +# Should we allow overlay mounts (this is standard in Linux, +# but not ZFS which comes from Solaris where this is not allowed). +DO_OVERLAY_MOUNTS='no' + +# Any additional option to the 'zfs mount' command line. +# Include '-o' for each option wanted. +MOUNT_EXTRA_OPTIONS="" + +# Build kernel modules with the --enable-debug switch? +ZFS_DKMS_ENABLE_DEBUG='no' + +# Build kernel modules with the --enable-debug-dmu-tx switch? +ZFS_DKMS_ENABLE_DEBUG_DMU_TX='no' + +# Keep debugging symbols in kernel modules? +ZFS_DKMS_DISABLE_STRIP='no' + +# Wait for this many seconds in the initrd pre_mountroot? +# This delays startup and should be '0' on most systems. +ZFS_INITRD_PRE_MOUNTROOT_SLEEP='0' + +# Wait for this many seconds in the initrd mountroot? +# This delays startup and should be '0' on most systems. +# This might help on systems which have their ZFS root on +# a USB disk that takes just a little longer to be available +ZFS_INITRD_POST_MODPROBE_SLEEP='0' + +# List of additional datasets to mount after the root +# dataset is mounted. +# The init script will use the mountpoint specified in +# the 'mountpoint' property value in the dataset to +# determine where it should be mounted. +#ZFS_INITRD_ADDITIONAL_DATASETS="rpool/ROOT/usr_local" + +# Location of the lockfile. +LOCKDIR=/run/lock diff --git a/scripts/zfs-initramfs/hooks/zfs b/scripts/zfs-initramfs/hooks/zfs new file mode 100755 index 000000000000..b65c16ecde0d --- /dev/null +++ b/scripts/zfs-initramfs/hooks/zfs @@ -0,0 +1,100 @@ +#!/bin/sh +# +# Add ZoL filesystem capabilities to an initrd, usually for a native ZFS root. +# + +# This hook installs udev rules for ZoL. +PREREQ="zdev" + +# These prerequisites are provided by the zfsutils package. The zdb utility is +# not strictly required, but it can be useful at the initramfs recovery prompt. +COPY_EXEC_LIST="/sbin/zdb /sbin/zpool /sbin/zfs /sbin/mount.zfs" + +# These prerequisites are provided by the base system. +COPY_EXEC_LIST="$COPY_EXEC_LIST /bin/hostname /sbin/blkid" + +# Explicitly specify all kernel modules because automatic dependency resolution +# is unreliable on many systems. +BASE_MODULES="zlib_deflate spl zavl zcommon znvpair zunicode zfs" +CRPT_MODULES="sun-ccm sun-gcm sun-ctr" +MANUAL_ADD_MODULES_LIST="$BASE_MODULES" + +# Generic result code. +RC=0 + +case $1 in +prereqs) + echo "$PREREQ" + exit 0 + ;; +esac + +for ii in $COPY_EXEC_LIST +do + if [ ! -x "$ii" ] + then + echo "Error: $ii is not executable." + RC=2 + fi +done + +if [ "$RC" -ne 0 ] +then + exit "$RC" +fi + +. /usr/share/initramfs-tools/hook-functions + +mkdir -p "$DESTDIR/etc/" + +# ZDB uses pthreads for some functions, but the library dependency is not +# automatically detected. The `find` utility and extended `cp` options are +# used here because libgcc_s.so could be in a subdirectory of /lib for +# multi-arch installations. +cp --target-directory="$DESTDIR" --parents $(find /lib -type f -name libgcc_s.so.1) + +for ii in $COPY_EXEC_LIST +do + copy_exec "$ii" +done + +for ii in $MANUAL_ADD_MODULES_LIST +do + manual_add_modules "$ii" +done + +if [ -f "/etc/hostname" ] +then + cp -p "/etc/hostname" "$DESTDIR/etc/" +else + hostname >"$DESTDIR/etc/hostname" +fi + +for ii in zfs spl +do + if [ -f "/etc/modprobe.d/$ii" ]; then + if [ ! -d "$DESTDIR//etc/modprobe.d" ]; then + mkdir -p $DESTDIR//etc/modprobe.d + fi + cp -p "/etc/modprobe.d/$ii" $DESTDIR//etc/modprobe.d/ + fi +done + +# The spl-dkms package ensures that the /etc/hostid file exists. +# NB: Commentary in the spl-dkms.postinst script. +[ -f "/etc/hostid" ] && cp -p "/etc/hostid" "$DESTDIR/etc/hostid" + +# Install the zpool.cache file. +[ ! -d "$DESTDIR/boot/zfs" ] && mkdir -p "$DESTDIR/boot/zfs" +[ -d /boot/zfs ] && cp -r /boot/zfs "$DESTDIR/boot" + +# With pull request #1476 (not yet merged) comes a verbose warning +# if /usr/bin/net doesn't exist or isn't executable. Just create +# a dummy... +[ ! -d "$DESTDIR/usr/bin" ] && mkdir -p "$DESTDIR/usr/bin" +if [ ! -x "$DESTDIR/usr/bin/net" ]; then + touch "$DESTDIR/usr/bin/net" + chmod +x "$DESTDIR/usr/bin/net" +fi + +exit 0 diff --git a/scripts/zfs-initramfs/scripts/zfs b/scripts/zfs-initramfs/scripts/zfs new file mode 100644 index 000000000000..1fef53be9748 --- /dev/null +++ b/scripts/zfs-initramfs/scripts/zfs @@ -0,0 +1,507 @@ +# ZFS boot stub for initramfs-tools. +# +# In the initramfs environment, the /init script sources this stub to +# override the default functions in the /scripts/local script. +# +# Enable this by passing boot=zfs on the kernel command line. +# + + +pre_mountroot() +{ + if type run_scripts > /dev/null 2>&1 && [ -f "/scripts/local-top" -o -d "/scripts/local-top" ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Running /scripts/local-top" + run_scripts /scripts/local-top + [ "$quiet" != "y" ] && $log_end_msg + fi + + if [ -r '/etc/default/zfs' ] + then + . /etc/default/zfs + if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt '0' ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Sleeping for $ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..." + sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" + [ "$quiet" != "y" ] && $log_end_msg + fi + fi +} + +# Duplicates the functionality found under try_failure_hooks in functions +# but invoking that would be inappropriate here. +disable_plymouth() +{ + if [ -x /bin/plymouth ] && /bin/plymouth --ping + then + /bin/plymouth hide-splash >/dev/null 2>&1 + fi +} + +mountroot() +{ + pre_mountroot + + if type run_scripts > /dev/null 2>&1 && [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Running /scripts/local-premount" + run_scripts /scripts/local-premount + [ "$quiet" != "y" ] && $log_end_msg + fi + + # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear. + if type wait_for_udev > /dev/null 2>&1 ; then + wait_for_udev 10 + elif type wait_for_dev > /dev/null 2>&1 ; then + wait_for_dev + fi + + # zpool import refuse to import without a valid mtab + [ ! -f /proc/mounts ] && mount proc /proc + [ ! -f /etc/mtab ] && cat /proc/mounts > /etc/mtab + + # Load the module, without importing any pools - we want manual + # control over that part! + modprobe zfs zfs_autoimport_disable=1 + + if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt '0' ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Sleeping for $ZFS_INITRD_POST_MODPROBE_SLEEP seconds..." + sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP" + [ "$quiet" != "y" ] && $log_end_msg + fi + + # Look for the cache file + if [ -f /etc/zfs/zpool.cache ]; then + ZPOOL_CACHE=/etc/zfs/zpool.cache + elif [ -f /boot/zfs/zpool.cache ]; then + ZPOOL_CACHE=/boot/zfs/zpool.cache + fi + + # 'ROOT' is for Debian GNU/Linux (etc), 'root' is for Redhat/Fedora (etc) + if [ -n "$root" -a -z "$ROOT" ] + then + ROOT=${root} + fi + + # 'rootmnt' is for Debian GNU/Linux (etc), 'NEWROOT' is for RedHat/Fedora (etc) + if [ -n "$NEWROOT" -a -z "$rootmnt" ] + then + rootmnt=${NEWROOT} + fi + + # ---------------------------------------------------------------- + # G E T P O O L A N D R O O T F I L E S Y S T E M + + # Supports the following kernel command line argument combinations + # (in this order - first match win): + # + # rpool= + # bootfs=/ + # rpool= bootfs=/ + # -B zfs-bootfs=/ + # rpool=rpool (default if none of the above is used) + # root=/ + # root=ZFS=/ + # root=zfs:AUTO + # root=zfs:/ + # + # Option could also be + # + # In addition, setting one of zfs_force, zfs.force or zfsforce to + # yes, on or 1 will force import the pool. + + # Support force option + ZPOOL_FORCE="" + if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline + then + ZPOOL_FORCE="-f" + fi + + # ------------ + # Look for 'rpool' and 'bootfs' parameter + ZFS_RPOOL="${rpool#rpool=}" + ZFS_BOOTFS="${bootfs#bootfs=}" + + # ------------ + # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter. + # NOTE: Only use the pool name. The rest is not supported by ZoL. + if [ -z "$ZFS_RPOOL" ] + then + # The ${zfs-bootfs} variable is set at the kernel commmand + # line, usually by GRUB, but it cannot be referenced here + # directly because bourne variable names cannot contain a + # hyphen. + # + # Reassign the variable by dumping the environment and + # stripping the zfs-bootfs= prefix. Let the shell handle + # quoting through the eval command. + eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p') + + # Only the pool name is relevant because the ZFS filesystem on + # Linux is extrinsic and the userland cannot resolve a ZFS + # object number. + # + # Strip everything after the first slash character. + ZFS_RPOOL=${ZFS_RPOOL%%/*} + fi + + # ------------ + # rpool= but no bootfs=,root=ZFS or root=zfs: options. + if [ -n "$ZFS_RPOOL" -a -z "$ZFS_BOOTFS" ] && ! echo "$ROOT" | egrep -q "ZFS=|zfs:" + then + # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO' which + # will be caught later + ZFS_BOOTFS= + ROOT=zfs:AUTO + fi + + # ------------ + # Support the 'root=/' option. + if [ -z "$ZFS_RPOOL" -a -z "$ZFS_BOOTFS" -a -n "$ROOT" ] && ! echo "$ROOT" | egrep -iq "^/|^zfs" + then + # Strip everything after the first slash character. + ZFS_RPOOL=${ROOT%%/*} + ZFS_BOOTFS=$ROOT + fi + + # ------------ + # Support the 'root=ZFS=....' parameter (Debian GNU/Linux default) + if [ -z "$ZFS_BOOTFS" ] && echo "$ROOT" | grep -q "ZFS=" + then + ZFS_BOOTFS="${ROOT##*=}" + fi + + # ------------ + # Support Fedora/Redhat type argument 'root=zfs:....' + if [ -z "$ZFS_BOOTFS" ] && echo "$ROOT" | grep -q "^zfs:" + then + if [ "$ROOT" = "zfs:AUTO" ] + then + # Might be imported by the kernel module, so try searching before + # we import anything. + ZFS_BOOTFS=`zpool list -H -o bootfs | sed -n '/^-$/ !p' | sed 'q'` + if [ "$?" != "0" -o -z "$ZFS_BOOTFS" -o \ + "$ZFS_BOOTFS" = "no pools available" ] + then + # Not there, so we need to import everything. + + [ "$quiet" != "y" ] && $log_begin_msg "Attempting to import additional pools." + + if [ -n "$ZPOOL_CACHE" ] + then + ZFS_CMD="zpool import -c ${ZPOOL_CACHE} -f -N" + else + ZFS_CMD="zpool import -d /dev/disk/by-id -f -N" + fi + + # Get a list of pools available for import + if [ -n "$ZFS_RPOOL" ] + then + # We've specified a pool - check only that + POOLS=$ZFS_RPOOL + else + POOLS=$(zpool import 2>&1 | grep 'pool: ' | sed 's,.*: ,,') + fi + + for pool in $POOLS + do + ZFS_STDERR=$($ZFS_CMD $pool 2>&1) + ZFS_ERROR=$? + + ZFS_BOOTFS=$(zpool list -H -o bootfs $pool) + if [ -n "$ZFS_BOOTFS" -a "$ZFS_BOOTFS" != "-" ] + then + # Keep it mounted + POOL_MOUNTED=1 + break + else + zpool export $pool + fi + done + + [ "$quiet" != "y" ] && $log_end_msg $ZFS_ERROR + fi + else + # No auto - use overrides. + ZFS_BOOTFS="${ROOT#zfs:}" + ZFS_RPOOL="${ZFS_BOOTFS%%/*}" + fi + fi + + if [ -z "$ZFS_RPOOL" -a -n "$ZFS_BOOTFS" ] + then + # We (still) don't have the POOL, but we have a BOOTFS. The + # pool is (should be the the first part of the bootfs dataset + # path)... + ZFS_RPOOL="${ZFS_BOOTFS%%/*}" + fi + + # ---------------------------------------------------------------- + # I M P O R T P O O L + + if [ -z "$POOL_MOUNTED" ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Importing ZFS root pool $ZFS_RPOOL" + # Attempt 1: Try the correct/proper way. + if [ "$ZFS_ERROR" != 0 -a -n "$ZPOOL_CACHE" ] + then + ZFS_CMD="zpool import -c ${ZPOOL_CACHE} -N ${ZFS_RPOOL} ${ZPOOL_FORCE}" + + ZFS_STDERR=$($ZFS_CMD 2>&1) + ZFS_ERROR=$? + + [ "$ZFS_ERROR" != 0 ] && echo "FAIL: $ZFS_CMD. Retrying..." + fi + + # Attempt 2: Try using /dev/disk/by-id if it exists. + # NOTE: This should really be attempt 1! + [ -d /dev/disk/by-id ] && DISK_BY_ID="-d /dev/disk/by-id" + if [ "$ZFS_ERROR" != 0 -o -z "$ZPOOL_CACHE" ] + then + ZFS_CMD="zpool import -N ${DISK_BY_ID} ${ZFS_RPOOL} ${ZPOOL_FORCE}" + + ZFS_STDERR=$($ZFS_CMD 2>&1) + ZFS_ERROR=$? + + [ "$ZFS_ERROR" != 0 ] && echo "FAIL: $ZFS_CMD. Retrying..." + fi + + # Attempt 3: Last ditch attempt. If DISK_BY_ID is set, we've already tried... + if [ "$ZFS_ERROR" != 0 -a -z "$DISK_BY_ID" ] + then + ZFS_CMD="zpool import -N ${ZFS_RPOOL} ${ZPOOL_FORCE}" + + ZFS_STDERR=$($ZFS_CMD 2>&1) + ZFS_ERROR=$? + + [ "$ZFS_ERROR" != 0 ] && echo "FAIL: $ZFS_CMD. Retrying..." + fi + [ "$quiet" != "y" ] && $log_end_msg + + if [ "$ZFS_ERROR" != 0 ] + then + # Unable to import pool -- let the user sort this out + disable_plymouth + echo "" + echo "Command: $ZFS_CMD" + echo "Message: $ZFS_STDERR" + echo "Error: $ZFS_ERROR" + echo "" + echo "Manually import the root pool at the command prompt and then exit." + echo "Hint: Try: zpool import -R / -N ${ZFS_RPOOL}" + /bin/sh + fi + fi + + # ---------------------------------------------------------------- + + # Booting from a snapshot? + if [ -n "$ZFS_BOOTFS" ] && echo "$ZFS_BOOTFS" | grep -q '@' + then + # Make sure that the snapshot specified exist. + if ! zfs get -H type $ZFS_BOOTFS 2> /dev/null | grep -q "^$ZFS_BOOTFS" + then + # Use the original dataset (the part before '@'). + [ "$quiet" != "y" ] && $log_begin_msg "Snapshot does not exist. Using base dataset for root." + ZFS_BOOTFS=`echo "$ZFS_BOOTFS" | sed 's,@.*,,'` + [ "$quiet" != "y" ] && $log_end_msg + else + # Replace the '@' separating dataset and snapshot name with a underscore + # NOTE: This might not be the prettiest, but at least we'll know where the + # dataset came from. + dset=`echo "$ZFS_BOOTFS" | sed 's,@,_,'` + + # If the destination dataset for the clone already exists, destroy it. + if zfs get -H type $dset 2> /dev/null | grep -q "^$dset" + then + [ "$quiet" != "y" ] && $log_begin_msg "Destroying clone destination dataset" + ZFS_CMD="zfs destroy $dset" + ZFS_STDERR=$($ZFS_CMD 2>&1) + ZFS_ERROR=$? + + # Destroying the clone target was not successfull -- let the user sort this out + if [ "$ZFS_ERROR" != 0 ] + then + disable_plymouth + echo "" + echo "Command: $ZFS_CMD" + echo "Message: $ZFS_STDERR" + echo "Error: $ZFS_ERROR" + echo "" + echo "Failed to destroy the already existing dataset the clone would create." + echo "Please make sure that '$dset' is not availible." + /bin/sh + + ZFS_ERROR=0 + else + [ "$quiet" != "y" ] && $log_end_msg + fi + fi + + # Clone the snapshot into a dataset we can boot from + [ "$quiet" != "y" ] && $log_begin_msg "Cloning boot snapshot to dataset" + ZFS_CMD="zfs clone $ZFS_BOOTFS $dset" + ZFS_STDERR=$($ZFS_CMD 2>&1) + ZFS_ERROR=$? + + # Clone was not successfull -- let the user sort this out + if [ "$ZFS_ERROR" != 0 ] + then + disable_plymouth + echo "" + echo "Command: $ZFS_CMD" + echo "Message: $ZFS_STDERR" + echo "Error: $ZFS_ERROR" + echo "" + echo "Failed to clone snapshot." + echo "Make sure that the any problems are corrected and then make sure" + echo "that the dataset '$dset' exists and is bootable." + /bin/sh + + ZFS_ERROR=0 + else + [ "$quiet" != "y" ] && $log_end_msg + fi + + # Success - unmount the filesystem + umount /$dset + + # Use the clone as bootfs. + ZFS_BOOTFS="$dset" + fi + fi + + # Last ditch attempt - try to find the bootfs automatically + if [ -z "$ZFS_BOOTFS" ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Getting ZFS bootfs property" + ZFS_BOOTFS=$(zpool list -H -o bootfs "$ZFS_RPOOL") + ZFS_ERROR=$? + [ "$quiet" != "y" ] && $log_end_msg + fi + + if [ -z "$ZFS_BOOTFS" ] + then + # Unable to figure out the bootfs -- let the user sort this out + disable_plymouth + echo "" + echo "Command: zpool list -H -o bootfs $ZFS_RPOOL" + echo "Error: $ZFS_ERROR, unable to get the bootfs property." + echo "" + echo "Manually mount the root filesystem on $rootmnt and then exit." + echo "Hint: Try: mount -t zfs -o zfsutil $ZFS_RPOOL/ROOT/system $rootmnt" + /bin/sh + fi + + # ---------------------------------------------------------------- + # M O U N T R O O T F I L E S Y S T E M + + if zfs 2>&1 | grep -q 'key -l ' + then + # 'zfs key' is availible (hence we have crypto), check if filesystem is encrypted. + set -- `zfs get encryption $ZFS_BOOTFS | grep ^$ZFS_RPOOL` + crypt_type=$3 + if [ "$crypt_type" != "off" ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Loading crypto wrapper key for $ZFS_BOOTFS" + + # Just make sure that ALL crypto modules module is loaded. + # Simplest just to load all... + for mod in sun-ccm sun-gcm sun-ctr + do + modprobe $mod + done + + # If the key isn't availible, then this will fail! + ZFS_CMD="zfs key -l -r $ZFS_BOOTFS" + ZFS_STDERR=$($ZFS_CMD 2>&1) + ZFS_ERROR=$? + + if [ "$ZFS_ERROR" != 0 ] + then + disable_plymouth + echo "" + echo "Command: $ZFS_CMD" + echo "Message: $ZFS_STDERR" + echo "Error: $ZFS_ERROR" + echo "" + echo "Failed to load zfs encryption wrapper key (s)." + echo "Please verify dataset property 'keysource' for datasets" + echo "and rerun: $ZFS_CMD" + /bin/sh + + ZFS_ERROR=0 + else + [ "$quiet" != "y" ] && $log_end_msg + fi + fi + fi + + # Ideally, the root filesystem would be mounted like this: + # + # zpool import -R "$rootmnt" -N "$ZFS_RPOOL" + # zfs mount -o mountpoint=/ "$ZFS_BOOTFS" + # + # but the MOUNTPOINT prefix is preserved on descendent filesystem after + # the pivot into the regular root, which later breaks things like + # `zfs mount -a` and the /etc/mtab refresh. + + [ "$quiet" != "y" ] && $log_begin_msg "Mounting root filesystem" + mountpoint=`zfs get -H -o value mountpoint $ZFS_BOOTFS` + if [ "$mountpoint" = "legacy" ] ; then + ZFS_CMD="mount -t zfs" + else + ZFS_CMD="mount -o zfsutil -t zfs" + fi + ZFS_STDERR=$($ZFS_CMD ${ZFS_BOOTFS} ${rootmnt} 2>&1) + ZFS_ERROR=$? + [ "$quiet" != "y" ] && $log_end_msg + + if [ "$ZFS_ERROR" != 0 ] + then + disable_plymouth + echo "" + echo "Command: ${ZFS_CMD} ${ZFS_BOOTFS} ${rootmnt}" + echo "Message: $ZFS_STDERR" + echo "Error: $ZFS_ERROR" + echo "" + echo "Manually mount the root filesystem on $rootmnt and then exit." + /bin/sh + fi + + # ---------------------------------------------------------------- + # M O U N T A D D I T I O N A L F I L E S Y S T E M S R E Q U I R E D + if [ -n "$ZFS_INITRD_ADDITIONAL_DATASETS" ] + then + ZFS_CMD="mount -o zfsutil -t zfs" + + for fs in $ZFS_INITRD_ADDITIONAL_DATASETS + do + mountpoint=`zfs get -H -o value mountpoint $fs` + ZFS_STDERR=$($ZFS_CMD ${fs} ${rootmnt}/${mountpoint} 2>&1) + ZFS_ERROR=$? + [ "$quiet" != "y" ] && $log_end_msg + + if [ "$ZFS_ERROR" != 0 ] + then + disable_plymouth + echo "" + echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}" + echo "Message: $ZFS_STDERR" + echo "Error: $ZFS_ERROR" + echo "" + echo "Manually mount the filesystem on $rootmnt/$mountpoint and then exit." + /bin/sh + fi + done + fi + + # ---------------------------------------------------------------- + if type run_scripts > /dev/null 2>&1 && [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ] + then + [ "$quiet" != "y" ] && $log_begin_msg "Running /scripts/local-bottom" + run_scripts /scripts/local-bottom + [ "$quiet" != "y" ] && $log_end_msg + fi +}