diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 8d3a373047ec..eff92c2f8e4a 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -139,7 +139,7 @@ AC_DEFUN([ZFS_AC_RPM], [ ]) RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1" --define "$(DEBUG_DMU_TX) 1"' - RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)"' + RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)" --define "_initconfdir $(DEFAULT_INITCONF_DIR)"' RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)" --define "require_spldir $(SPL)" --define "require_splobj $(SPL_OBJ)" --define "ksrc $(LINUX)" --define "kobj $(LINUX_OBJ)"' RPM_DEFINE_DKMS= @@ -311,6 +311,20 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT]) AC_SUBST(DEFAULT_INIT_SCRIPT) + + AC_MSG_CHECKING([default init config direectory]) + case "$VENDOR" in + toss) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + redhat) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + fedora) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + sles) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + ubuntu) DEFAULT_INITCONF_DIR=/etc/default ;; + debian) DEFAULT_INITCONF_DIR=/etc/default ;; + *) DEFAULT_INITCONF_DIR=/etc/default ;; + esac + + AC_MSG_RESULT([$DEFAULT_INITCONF_DIR]) + AC_SUBST(DEFAULT_INITCONF_DIR) ]) dnl # diff --git a/etc/init.d/.gitignore b/etc/init.d/.gitignore index 73304bc2cd4a..0979b90af62d 100644 --- a/etc/init.d/.gitignore +++ b/etc/init.d/.gitignore @@ -1 +1,6 @@ +common.init +zfs-import +zfs-mount +zfs-share +zfs-zed zfs diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am index 5a049dfe14fd..cee81ab0a8ce 100644 --- a/etc/init.d/Makefile.am +++ b/etc/init.d/Makefile.am @@ -1,22 +1,38 @@ initdir = $(DEFAULT_INIT_DIR) -init_SCRIPTS = zfs +init_SCRIPTS = zfs-import zfs-mount zfs-share zfs-zed + +initcommondir = $(sysconfdir)/zfs +initcommon_SCRIPTS = common.init + +initconfdir = $(DEFAULT_INITCONF_DIR) +initconf_SCRIPTS = zfs EXTRA_DIST = \ - $(top_srcdir)/etc/init.d/zfs.fedora.in \ - $(top_srcdir)/etc/init.d/zfs.gentoo.in \ - $(top_srcdir)/etc/init.d/zfs.lsb.in \ - $(top_srcdir)/etc/init.d/zfs.lunar.in \ - $(top_srcdir)/etc/init.d/zfs.redhat.in + $(top_srcdir)/etc/init.d/common.init.in \ + $(top_srcdir)/etc/init.d/zfs-share.in \ + $(top_srcdir)/etc/init.d/zfs-import.in \ + $(top_srcdir)/etc/init.d/zfs-mount.in \ + $(top_srcdir)/etc/init.d/zfs-zed.in \ + $(top_srcdir)/etc/init.d/zfs.in -$(init_SCRIPTS): $(init_SCRIPTS).$(DEFAULT_INIT_SCRIPT).in - -$(SED) -e 's,@bindir\@,$(bindir),g' \ - -e 's,@sbindir\@,$(sbindir),g' \ - -e 's,@udevdir\@,$(udevdir),g' \ - -e 's,@udevruledir\@,$(udevruledir),g' \ - -e 's,@sysconfdir\@,$(sysconfdir),g' \ - -e 's,@initdir\@,$(initdir),g' \ - -e 's,@runstatedir\@,$(runstatedir),g' \ - '$@.$(DEFAULT_INIT_SCRIPT).in' >'$@' +$(init_SCRIPTS) $(initconf_SCRIPTS) $(initcommon_SCRIPTS): + -(if [ -e /etc/debian_version ]; then \ + NFS_SRV=nfs-kernel-server; \ + else \ + NFS_SRV=nfs; \ + fi; \ + $(SED) -e 's,@bindir\@,$(bindir),g' \ + -e 's,@sbindir\@,$(sbindir),g' \ + -e 's,@udevdir\@,$(udevdir),g' \ + -e 's,@udevruledir\@,$(udevruledir),g' \ + -e 's,@sysconfdir\@,$(sysconfdir),g' \ + -e 's,@initconfdir\@,$(initconfdir),g' \ + -e 's,@initdir\@,$(initdir),g' \ + -e 's,@runstatedir\@,$(runstatedir),g' \ + -e "s,@NFS_SRV\@,$$NFS_SRV,g" \ + '$@.in' >'$@'; \ + [ '$@' = 'common.init' -o '$@' = 'zfs' ] || \ + chmod +x '$@') distclean-local:: - -$(RM) $(init_SCRIPTS) + -$(RM) $(init_SCRIPTS) $(initcommon_SCRIPTS) $(initconf_SCRIPTS) diff --git a/etc/init.d/README.md b/etc/init.d/README.md new file mode 100644 index 000000000000..9f8b8a5e4297 --- /dev/null +++ b/etc/init.d/README.md @@ -0,0 +1,69 @@ +DESCRIPTION + These script where written with the primary intention of being portable and + usable on as many systems as possible. + + This is, in practice, usually not possible. But the intention is there. + And it is a good one. + + It have been tested successfully on: + + * Debian GNU/Linux Wheezy + * Debian GNU/Linux Jessie + * Ubuntu Trusty + * CentOS 6.0 + * CentOS 6.6 + * Gentoo + +SUPPORT + If you find that they don't work for your platform, please report this + at the ZFS On Linux issue tracker at https://github.com/zfsonlinux/zfs/issues. + + Please include: + + * Distribution name + * Distribution version + * Where to find a install CD image + * Architecture + + If you have code to share that fixes the problem, that is much better. + But please remember to try your best to think 'portability'. If you + suspect that what you're writing/modifying won't work on anything else + than your distribution, please make sure to put that code in appropriate + if/else/fi code. + + It currently MUST be bash (or fully compatible) for this to work. + + If you're making your own distribution and you want the scripts to + work on that, the biggest problem you'll (proably) have is the part + at the beginning of the "common.init.in" file which sets up the + logging output. + +INSTALLING INIT SCRIPT LINKS + To setup the init script links in /etc/rc?.d manually on a Debian GNU/Linux + (or derived) system, run the following commands (the order is important!): + + update-rc.d zfs-zed start 07 S . stop 08 0 1 6 . + update-rc.d zfs-import start 07 S . stop 07 0 1 6 . + update-rc.d zfs-mount start 02 2 3 4 5 . stop 06 0 1 6 . + update-rc.d zfs-share start 27 2 3 4 5 . stop 05 0 1 6 . + + To do the same on RedHat, Fedora and/or CentOS: + + chkconfig zfs-zed + chkconfig zfs-import + chkconfig zfs-mount + chkconfig zfs-share + + The idea here is to make sure ZED is started before the imports (so that + we can start consuming pool events before pools are imported). + + Then import any/all pools (except the root pool which is mounted in the + initrd before the system even boots - basically before the S (single-user) + mode). + + Then we mount all filesystems before we start any network service (such as + NFSd, AFSd, Samba, iSCSI targets and what not). Even if the share* in ZFS + isn't used, the filesystem must be mounted for the service to start properly. + + Then, at almost the very end, we share filesystems configured with the + share* property in ZFS. diff --git a/etc/init.d/common.init.in b/etc/init.d/common.init.in new file mode 100644 index 000000000000..ae9cb6922085 --- /dev/null +++ b/etc/init.d/common.init.in @@ -0,0 +1,384 @@ +# This is a script with common functions etc used by zfs-import, zfs-mount, +# zfs-share and zfs-zed. +# +# It is _NOT_ to be called independently + +PATH=/sbin:/bin:/usr/bin:/usr/sbin + +# Source function library +if [ -f /lib/lsb/init-functions ]; then + . /lib/lsb/init-functions +elif [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +fi + +# Of course the functions we need is called differently +# on different distributions - it would be way to easy +# otherwise!! +if type log_failure_msg > /dev/null 2>&1 ; then + # LSB functions - fall through + zfs_log_begin_msg() { log_begin_msg "$1"; } + zfs_log_end_msg() { log_end_msg "$1"; } + zfs_log_failure_msg() { log_failure_msg "$1"; } + zfs_log_progress_msg() { log_progress_msg "$1"; } +elif type success > /dev/null 2>&1 ; then + # Fedora/RedHat functions - $success and $failure defined in the lib. + zfs_log_begin_msg() { echo -n "$1"; } + zfs_log_end_msg() { success $"success"; echo; } + zfs_log_failure_msg() { failure $"failure"; echo; } + zfs_log_progress_msg() { echo -n "$1"; } +elif type einfo > /dev/null 2>&1 ; then + # Gentoo functions + zfs_log_begin_msg() { echo -n "$1"; } + zfs_log_end_msg() { einfo "$1"; } + zfs_log_failure_msg() { eerror "$1"; } + zfs_log_progress_msg() { echo -n "$1"; } +else + # Unknown - simple substitues. + zfs_log_begin_msg() { echo -n "$1"; } + zfs_log_end_msg() { + ret=$1 + if [ "$ret" -ge 1 ]; then + echo " failed!" + else + echo " success" + fi + return "$ret" + } + zfs_log_failure_msg() { echo "$1"; } + zfs_log_progress_msg() { echo -n "$1"; } +fi + +# Paths to what we need +ZFS="@sbindir@/zfs" +ZED="@sbindir@/zed" +ZPOOL="@sbindir@/zpool" +ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" + +# Sensible defaults +ZFS_MOUNT='yes' +ZFS_UNMOUNT='yes' + +export ZFS ZED ZPOOL ZPOOL_CACHE ZFS_MOUNT ZFS_UNMOUNT + +# Source zfs configuration, overriding the defaults +if [ -f @initconfdir@/zfs ]; then + . @initconfdir@/zfs +fi + +# ---------------------------------------------------- + +zfs_action() +{ + local MSG=$1; shift + local CMD=$* + local ret + + zfs_log_begin_msg "$MSG " + $CMD + ret=$? + zfs_log_end_msg $ret + + return $ret +} + +# Returns +# 0 if daemon has been started +# 1 if daemon was already running +# 2 if daemon could not be started +# 3 if unsupported +# +zfs_daemon_start() +{ + local PIDFILE="$1"; shift + local DAEMON_BIN="$1"; shift + local DAEMON_ARGS="$*" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --start --quiet --pidfile "$PIDFILE" \ + --exec "$DAEMON_BIN" --test > /dev/null || return 1 + + start-stop-daemon --start --quiet --exec "$DAEMON_BIN" -- \ + $DAEMON_ARGS || return 2 + elif type daemon > /dev/null 2>&1 ; then + # Fedora/RedHat functions + daemon --pidfile "$ZED_PIDFILE" "$ZED" $DAEMON_ARGS + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +# Returns +# 0 if daemon has been stopped +# 1 if daemon was already stopped +# 2 if daemon could not be stopped +# 3 if unsupported +# +zfs_daemon_stop() +{ + local PIDFILE="$1" + local DAEMON_BIN="$2" + local DAEMON_NAME="$3" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \ + --pidfile "$PIDFILE" --name "$DAEMON_NAME" + return $? + elif type killproc > /dev/null 2>&1 ; then + # Fedora/RedHat functions + killproc "$DAEMON_NAME" + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +# Returns status +zfs_daemon_status() +{ + local DAEMON_BIN="$1" + local DAEMON_NAME="$2" + + if type status_of_proc > /dev/null 2>&1 ; then + # LSB functions + status_of_proc "$DAEMON_NAME" "$DAEMON_BIN" + return $? + elif type status > /dev/null 2>&1 ; then + # Fedora/RedHat functions + status "$DAEMON_NAME" + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +zfs_daemon_reload() +{ + local PIDFILE="$1" + local DAEMON_NAME="$2" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --stop -signal 1 --quiet \ + --pidfile "$PIDFILE" --name "$DAEMON_NAME" + return $? + elif type killproc > /dev/null 2>&1 ; then + # Fedora/RedHat functions + killproc "$DAEMON_NAME" "-SIGHUP" > /dev/null + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +zfs_installed() +{ + if [ ! -x "$ZPOOL" ]; then + return 1 + else + # Test if it works (will catch missing/broken libs etc) + "$ZPOOL" -? > /dev/null 2>&1 + return $? + fi + + if [ ! -x "$ZFS" ]; then + return 2 + else + # Test if it works (will catch missing/broken libs etc) + "$ZFS" -? > /dev/null 2>&1 + return $? + fi + + return 0 +} + +# Trigger udev and wait for it to settle. +udev_trigger() +{ + if [ -x /sbin/udevadm ]; then + /sbin/udevadm trigger --action=change --subsystem-match=block + /sbin/udevadm settle + elif [ -x /sbin/udevsettle ]; then + /sbin/udevtrigger + /sbin/udevsettle + fi +} + +# From scripts/common.sh +wait_udev() +{ + local DEVICE=$1 + local DELAY=$2 + local COUNT=0 + + udev_trigger + while [ ! -e "${DEVICE}" ]; do + if [ ${COUNT} -gt "${DELAY}" ]; then + return 1 + fi + + let COUNT=${COUNT}+1 + sleep 1 + done + + return 0 +} + +# Do a lot of checks to make sure it's 'safe' to continue with the import. +checksystem() +{ + if grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no|0)( |$)' /proc/cmdline; + then + # Called with zfs=(off|no|0) - bail because we don't + # want anything import, mounted or shared. + # HOWEVER, only do this if we're called at the boot up + # (from init), not if we're running interactivly (as in + # from the shell - we know what we're doing). + [ -n "$init" ] && exit 3 + fi + + # Check if ZFS is installed. + zfs_installed || return 5 + + # Delay until all required block devices are present. + udev_trigger + + # Just make sure that /dev/zfs is created. + wait_udev /dev/zfs 15 + + if ! [ "$(uname -m)" == "x86_64" ]; then + echo "Warning: You're not running 64bit. Currently native zfs in"; + echo " Linux is only supported and tested on 64bit."; + # should we break here? People doing this should know what they + # do, thus i'm not breaking here. + fi + + return 0 +} + +reregister_mounts() +{ + local fs mntpnt fstype opts rest tmpdir + tmpdir=removethismountpointhoweverpossible + + while read -r fs mntpnt fstype opts rest ; do + fs=$(printf '%b\n' "$fs") + mntpnt=$(printf '%b\n' "$mntpnt") + if [ "$fstype" == "zfs" ] ; then + if [ "$mntpnt" == "/" ] ; then + mount -f -o zfsutil -t zfs --move / /$tmpdir + umount --fake /$tmpdir + else + umount --fake "$mntpnt" + fi + elif echo "$fs" | grep -qE "^/dev/(zd|zvol)" ; then + if [ "$mntpnt" == "/" ] ; then + mount -f -t "$fstype" --move / /$tmpdir + umount --fake /$tmpdir + else + umount --fake "$mntpnt" + fi + fi + done < <(cat /etc/mtab) + + while read -r fs mntpnt fstype opts rest ; do + fs=$(printf '%b\n' "$fs") + mntpnt=$(printf '%b\n' "$mntpnt") + if [ "$fstype" == "zfs" ] ; then + mount -f -t zfs -o zfsutil "$fs" "$mntpnt" + elif echo "$fs" | grep -q "^/dev/zd" ; then + mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" + fi + done < <(cat /proc/mounts) +} + +get_root_pool() +{ + set -- $(mount | grep ' on / ') + [ "$5" == "zfs" ] && echo "${1%%/*}" +} + +check_module_loaded() +{ + [ -r /sys/module/zfs/version ] && return 0 || return 1 +} + +load_module() +{ + # Load the zfs module stack + if ! check_module_loaded; then + if ! modprobe zfs; then + return 5 + fi + fi + return 0 +} + +declare -A MTAB +declare -A FSTAB + +# first parameter is a regular expression that filters mtab +read_mtab() +{ + local fs mntpnt fstype opts rest + + for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done + + while read -r fs mntpnt fstype opts rest ; do + fs=$(printf '%b\n' "$fs") + MTAB["$fs"]=$mntpnt + done < <(grep -E "$1" /etc/mtab) +} + +in_mtab() +{ + [ "${MTAB[$1]}" != "" ] + return $? +} + +# first parameter is a regular expression that filters fstab +read_fstab() +{ + local i=0 + + for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done + while read -r fs mntpnt fstype opts blah ; do + echo "$fs" | grep -q '^#' && continue + fs=$(printf '%b\n' "$fs") + FSTAB["$i"]=$mntpnt + i=$((i + 1)) + done < <(grep -E "$1" /etc/fstab) +} + +in_fstab() +{ + [ "${FSTAB[$1]}" != "" ] + return $? +} + +is_mounted() +{ + local fs=$1 + local line + + while read line; do + echo "$line" | grep -q " on $fs " && return 0 + done < <(mount) + + return 1 +} diff --git a/etc/init.d/zfs-import.in b/etc/init.d/zfs-import.in new file mode 100755 index 000000000000..3413e78d90d2 --- /dev/null +++ b/etc/init.d/zfs-import.in @@ -0,0 +1,250 @@ +#!/bin/bash +# +# zfs-mount This script will import/export zfs pools. +# +# chkconfig: 2345 01 99 +# description: This script will import/export zfs pools during system +# boot/shutdown. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-import +# Required-Start: zfs-zed +# Required-Stop: zfs-zed +# Default-Start: S +# Default-Stop: 0 1 6 +# X-Start-Before: checkfs +# X-Stop-After: zfs-mount +# Short-Description: Import ZFS pools +# Description: Run the `zpool import` or `zpool export` commands. +### END INIT INFO + +# Source the common init script +. @sysconfdir@/zfs/common.init + +# ---------------------------------------------------- + +# Import all pools +do_import() +{ + local already_imported available_pools pool apools exception dir RET r + RET=0 + + already_imported=$($ZPOOL list -H -oname) + available_pools=$($ZPOOL import 2> /dev/null | grep pool: | \ + sed 's@.*: @@') + + # Just in case - seen it happen + if [ -z "$available_pools" -a -n "$USE_DISK_BY_ID" -a \ + "$USE_DISK_BY_ID" == 'yes' ] + then + available_pools=$($ZPOOL import -d /dev/disk/by-id 2>/dev/null \ + | grep pool: | sed 's@.*: @@') + fi + + # Filter out any exceptions... + if [ -n "$ZFS_POOL_EXCEPTIONS" ]; then + apools="" + for pool in $available_pools; do + for exception in $ZFS_POOL_EXCEPTIONS; do + [ "$pool" != "$exception" ] && apools="$apools $pool" + done + done + + available_pools=$apools + fi + + # Mount all availible pools (except those set in ZFS_POOL_EXCEPTIONS. + # + # If not interactive (run from init - variable init='/sbin/init') + # we get ONE line for all pools being imported, with just a dot + # as status for each pool. + # Example: Importing ZFS pool(s)... [OK] + # + # If it IS interactive (started from the shell manually), then we + # get one line per pool importing. + # Example: Importing ZFS pool $pool1 [OK] + # Importing ZFS pool $pool2 [OK] + # [etc] + # + # In addition to that (being interactive), if VERBOSE_MOUNT='yes', + # then we also get information about where it finds the devices for + # the pool. + # Example: Importing ZFS pool $pool1 using /dev/disk/by-vdev [OK] + # Importing ZFS pool $pool2 using /dev/disk/by-vdev [OK] + # [etc] + [ -n "$init" ] && zfs_log_begin_msg "Importing ZFS pool(s)" + for pool in $available_pools; do + # We have pools that haven't been imported - import them + if [ -z "$init" ]; then + # Interactive - one 'Importing ...' line per pool + zfs_log_begin_msg "Importing ZFS pool $pool" + else + # Not interactive - a dot for each pool. + zfs_log_progress_msg "." + fi + + if [ "$USE_DISK_BY_ID" == 'yes' ]; then + # Really the default/prefered way. + if [ -z "$init" -a "$VERBOSE_MOUNT" == 'yes' ]; then + # Interactive + Verbose = more information + zfs_log_progress_msg " using " + fi + + for dir in /dev/disk/by-vdev /dev/disk/by-* /dev; do + [ ! -d "$dir" ] && continue + + if [ -z "$init" -a "$VERBOSE_MOUNT" == 'yes' ]; + then + # Interactive + Verbose = show dir. + zfs_log_progress_msg " $dir" + fi + + "$ZPOOL" import -d "$dir" -N "$pool" 2>/dev/null + r=$? ; RET=$((RET + r)) + [ "$r" -eq 0 ] && break + done + [ -z "$init" ] && zfs_log_end_msg $RET + elif [ -f "$ZPOOL_CACHE" ]; then + # Fallback - use a cache file + if [ -z "$init" -a "$VERBOSE_MOUNT" == 'yes' ]; then + # Interactive + Verbose = more information + zfs_log_progress_msg " using cache file" + fi + + "$ZPOOL" import -c "$ZPOOL_CACHE" -N "$pool" 2>/dev/null + r=$? ; RET=$((RET + r)) + if [ "$r" -eq 0 ]; then + if [ -z "$init" -a "$VERBOSE_MOUNT" == 'yes' ]; + then + zfs_log_end_msg 0 + fi + + break + fi + zfs_log_end_msg $RET + else + # Last ditch attempt, try /dev! + if [ -z "$init" -a "$VERBOSE_MOUNT" == 'yes' ]; then + # Interactive + Verbose = more information + zfs_log_begin_msg " using defaults" + fi + + "$ZPOOL" import -N "$pool" 2>/dev/null + r=$? ; RET=$((RET + r)) + if [ "$r" -eq 0 ]; then + if [ -z "$init" -a "$VERBOSE_MOUNT" == 'yes' ]; + then + zfs_log_end_msg 0 + fi + + break + fi + zfs_log_end_msg $RET + fi + done + [ -n "$init" ] && zfs_log_end_msg $RET + + if [ -n "$already_imported" -a -z "$available_pools" ]; then + # All pools imported + return 0 + fi + + return $RET +} + +# Export all pools +do_export() +{ + local pool root_pool RET r + RET=0 + + root_pool=$(get_root_pool) + + [ -n "$init" ] && zfs_log_begin_msg "Exporting ZFS pool(s)" + while read pool; do + [ "$pool" == "$root_pool" ] && continue + + if [ -z "$init" ]; then + # Interactive - one 'Importing ...' line per pool + zfs_log_begin_msg "Exporting ZFS pool $pool" + else + # Not interactive - a dot for each pool. + zfs_log_progress_msg "." + fi + + "$ZPOOL" export "$pool" + r=$? ; RET=$((RET + r)) + [ -z "$init" ] && zfs_log_end_msg $r + done < <("$ZPOOL" list -H -o name) + [ -n "$init" ] && zfs_log_end_msg $RET + + if [ "$RET" == "0" -a -z "$root_pool" ]; then + zfs_action "Unloading modules" rmmod zfs zunicode zavl \ + zcommon znvpair spl + fi +} + +# Output the status and list of pools +do_status() +{ + check_module_loaded || exit 0 + + "$ZPOOL" status && echo "" && "$ZPOOL" list +} + +do_start() +{ + if [ "$VERBOSE_MOUNT" == 'yes' ]; then + zfs_log_begin_msg "Checking if zfs userspace tools present" + fi + + if checksystem; then + [ "$VERBOSE_MOUNT" == 'yes' ] && zfs_log_end_msg 0 + + if [ "$VERBOSE_MOUNT" == 'yes' ]; then + zfs_log_begin_msg "Loading kernel ZFS infrastructure" + fi + + if ! load_module; then + [ "$VERBOSE_MOUNT" == 'yes' ] && zfs_log_end_msg 1 + return 5 + fi + [ "$VERBOSE_MOUNT" == 'yes' ] && zfs_log_end_msg 0 + + do_import && udev_trigger # just to make sure we get zvols. + fi +} + +do_stop() +{ + # Check to see if the module is even loaded. + check_module_loaded || exit 0 + + do_export +} + +# ---------------------------------------------------- + +case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + status) + do_status + ;; + force-reload|condrestart|reload|restart) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status}" + exit 3 + ;; +esac + +exit $? diff --git a/etc/init.d/zfs-mount.in b/etc/init.d/zfs-mount.in new file mode 100755 index 000000000000..3a6a2e4e94fb --- /dev/null +++ b/etc/init.d/zfs-mount.in @@ -0,0 +1,173 @@ +#!/bin/bash +# +# zfs-mount This script will mount/umount the zfs filesystems. +# +# chkconfig: 2345 06 99 +# description: This script will mount/umount the zfs filesystems during +# system boot/shutdown. Configuration of which filesystems +# should be mounted is handled by the zfs 'mountpoint' and +# 'canmount' properties. See the zfs(8) man page for details. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-mount +# Required-Start: $local_fs zfs-import +# Required-Stop: $local_fs zfs-import +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# X-Stop-After: zfs-share +# Short-Description: Mount ZFS filesystems and volumes +# Description: Run the `zfs mount -a` or `zfs umount -a` commands. +### END INIT INFO + +# Source the common init script +. @sysconfdir@/zfs/common.init + +# ---------------------------------------------------- + +# Mount all datasets/filesystems +do_mount() +{ + local verbose overlay i + + [ "$VERBOSE_MOUNT" == 'yes' ] && verbose=v + [ "$DO_OVERLAY_MOUNTS" == 'yes' ] && overlay=O + + zfs_action "Mounting ZFS filesystem(s)" \ + "$ZFS" mount -a$verbose$overlay $MOUNT_EXTRA_OPTIONS + + # Require each volume/filesytem to have 'noauto' and no fsck + # option. This shouldn't really be necessary, as long as one + # can get zfs-import to run sufficiently early on in the boot + # process - before local mounts. This is just here in case/if + # this isn't possible. + [ "$VERBOSE_MOUNT" == 'yes' ] && \ + zfs_log_begin_msg "Mounting volumes and filesystems registered in fstab" + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + for i in "${!FSTAB[@]}" ; do + in_mtab "${FSTAB[$i]}" && continue + is_mounted "${FSTAB[$i]}" && continue + + [ "$VERBOSE_MOUNT" == 'yes' ] && \ + zfs_log_progress_msg "${FSTAB[$i]} " + fsck "${FSTAB[$i]}" && mount "${FSTAB[$i]}" + done + + read_mtab "zfs" + read_fstab "zfs" + for i in "${!FSTAB[@]}" ; do + in_mtab "${FSTAB[$i]}" && continue + is_mounted "${FSTAB[$i]}" && continue + + [ "$VERBOSE_MOUNT" == 'yes' ] && \ + zfs_log_progress_msg "${FSTAB[$i]} " + mount "${FSTAB[$i]}" + done + [ "$VERBOSE_MOUNT" == 'yes' ] && zfs_log_end_msg 0 + + return 0 +} + +# Unmount all filesystems +do_unmount() +{ + local i + + # This shouldn't really be necessary, as long as one can get + # zfs-import to run sufficiently late in the shutdown/reboot process + # - after unmounting local filesystems. This is just here in case/if + # this isn't possible. + zfs_action "Unmounting ZFS filesystems" "$ZFS" unmount -a + [ "$VERBOSE_MOUNT" == 'yes' ] && \ + zfs_log_begin_msg "Unmounting volumes and filesystems registered in fstab" + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + for i in "${!FSTAB[@]}" ; do + dev=/dev/$(find "${FSTAB[$i]}" | sed 's@.*/@@') + in_mtab "$dev" && continue + is_mounted "${FSTAB[$i]}" || continue + + [ "$VERBOSE_MOUNT" == 'yes' ] && \ + zfs_log_progress_msg "${FSTAB[$i]} " + umount "${FSTAB[$i]}" + done + + read_mtab "zfs" + read_fstab "zfs" + for i in "${!FSTAB[@]}" ; do + in_mtab "${FSTAB[$i]}" && continue + is_mounted "${FSTAB[$i]}" || continue + + [ "$VERBOSE_MOUNT" == 'yes' ] && \ + zfs_log_progress_msg "${FSTAB[$i]} " + umount "${FSTAB[$i]}" + done + [ "$VERBOSE_MOUNT" == 'yes' ] && zfs_log_end_msg 0 +} + +do_start() +{ + check_module_loaded || exit 0 + + # fix mtab to include already-mounted fs filesystems, in case there are any + # we ONLY do this if mtab does not point to /proc/mounts + # which is the case in some systems (systemd may bring that soon) + if ! readlink /etc/mtab | grep -q /proc ; then + if grep -qE "(^/dev/zd|^/dev/zvol| zfs )" /proc/mounts ; then + zfs_action "Registering already-mounted ZFS filesystems and volumes" \ + reregister_mounts + fi + fi + + # Ensure / exists in /etc/mtab, if not update mtab accordingly. + # This should be handled by rc.sysinit but lets be paranoid. + awk '$2 == "/" { exit 1 }' /etc/mtab + RETVAL=$? + if [ "$RETVAL" -eq 0 ]; then + mount -f / + fi + + case "$ZFS_MOUNT" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 3 + ;; + esac + + do_mount +} + +do_stop() +{ + case "$ZFS_UNMOUNT" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + check_module_loaded || exit 0 + + do_unmount +} + +# ---------------------------------------------------- + +case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + force-reload|condrestart|reload|restart|status) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; +esac + +exit $? diff --git a/etc/init.d/zfs-share.in b/etc/init.d/zfs-share.in new file mode 100755 index 000000000000..a5fc1bb89a58 --- /dev/null +++ b/etc/init.d/zfs-share.in @@ -0,0 +1,73 @@ +#!/bin/bash +# +# zfs-share This script will network share zfs filesystems and volumes. +# +# chkconfig: 2345 30 99 +# description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-share +# Required-Start: $local_fs $network $remote_fs zfs-mount +# Required-Stop: $local_fs $network $remote_fs zfs-mount +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Should-Start: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 zfs-mount +# Should-Stop: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 zfs-mount +# Short-Description: Network share ZFS datasets and volumes. +# Description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +### END INIT INFO + +# Source the common init script +. @sysconfdir@/zfs/common.init + +# ---------------------------------------------------- + +do_start() +{ + case "$ZFS_SHARE" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + check_module_loaded || exit 0 + + zfs_action "Sharing ZFS filesystems" "$ZFS" share -a +} + +do_stop() +{ + case "$ZFS_UNSHARE" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + check_module_loaded || exit 0 + + zfs_action "Unsharing ZFS filesystems" "$ZFS" unshare -a +} + +# ---------------------------------------------------- + +case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + force-reload|reload|restart|status) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; +esac + +exit $? diff --git a/etc/init.d/zfs-zed.in b/etc/init.d/zfs-zed.in new file mode 100755 index 000000000000..f62dd0eb64ee --- /dev/null +++ b/etc/init.d/zfs-zed.in @@ -0,0 +1,106 @@ +#!/bin/bash +# +# zfs-zed +# +# chkconfig: 2345 01 99 +# description: This script will start and stop the ZFS Event Daemon. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-zed +# Required-Start: mtab +# Required-Stop: $local_fs mtab +# Default-Start: S +# Default-Stop: 0 1 6 +# X-Start-Before: checkfs +# X-Stop-After: zfs-import +# Short-Description: ZFS Event Daemon +# Description: zed monitors ZFS events. When a zevent is posted, zed +# will run any scripts that have been enabled for the +# corresponding zevent class. +### END INIT INFO +# +# NOTE: Not having '$local_fs' on Required-Start but only on Required-Stop +# is on purpose. If we have '$local_fs' in both (and X-Start-Before=checkfs) +# we get conflicts - zed and import needs to be started extremly early, +# but not stopped TO late. + +# Source the common init script +. @sysconfdir@/zfs/common.init + +ZED_NAME="zed" +ZED_PIDFILE="@runstatedir@/$ZED_NAME.pid" + +# Exit if the package is not installed +[ -x "$ZED" ] || exit 0 + +do_start() +{ + check_module_loaded || exit 0 + + zfs_action "Starting ZFS Event Daemon" zfs_daemon_start \ + "$ZED_PIDFILE" "$ZED" "$ZED_ARGS" + return $? +} + +do_stop() +{ + check_module_loaded || exit 0 + + zfs_action "Stopping ZFS Event Daemon" zfs_daemon_stop \ + "$ZED_PIDFILE" "$ZED" "$ZED_NAME" + RETVAL=$? + + [ "$RETVAL" == 0 ] && rm -f "$ZED_PIDFILE" + + return $RETVAL +} + +do_status() +{ + check_module_loaded || exit 0 + + zfs_daemon_status "$ZED" "$ZED_NAME" + return $? +} + +do_reload() +{ + check_module_loaded || exit 0 + + zfs_action "Reloading ZFS Event Daemon" zfs_daemon_reload \ + "$PIDFILE" "$ZED_NAME" + return $? +} + +# ---------------------------------------------------- + +case "$1" in + start) + do_start + ;; + + stop) + do_stop + ;; + + status) + do_status + ;; + + reload) + do_reload + ;; + + restart) + do_stop + do_start + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status|reload|restart}" + exit 1 + ;; +esac + +exit $? diff --git a/etc/init.d/zfs.fedora.in b/etc/init.d/zfs.fedora.in deleted file mode 100644 index 86f430dce925..000000000000 --- a/etc/init.d/zfs.fedora.in +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 1 -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -# Source function library & LSB routines -. /etc/rc.d/init.d/functions - -# script variables -RETVAL=0 -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -servicename=zfs -LOCKFILE=/var/lock/subsys/$servicename - -# functions -zfs_installed() { - modinfo zfs > /dev/null 2>&1 || return 5 - $ZPOOL > /dev/null 2>&1 - [ $? == 127 ] && return 5 - $ZFS > /dev/null 2>&1 - [ $? == 127 ] && return 5 - return 0 -} - -reregister_mounts() { - cat /etc/mtab | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -o zfsutil -t zfs --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - elif echo "$fs" | grep -q "^/dev/zd" ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -t "$fstype" --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - fi - done - cat /proc/mounts | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - mount -f -t zfs -o zfsutil "$fs" "$mntpnt" - elif echo "$fs" | grep -q "^/dev/zd" ; then - mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" - fi - done -} - -# i need a bash guru to simplify this, since this is copy and paste, but donno how -# to correctly dereference variable names in bash, or how to do this right - -declare -A MTAB -declare -A FSTAB - -# first parameter is a regular expression that filters mtab -read_mtab() { - for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - MTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/mtab) -} - -in_mtab() { - [ "${MTAB[$1]}" != "" ] - return $? -} - -# first parameter is a regular expression that filters fstab -read_fstab() { - for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - FSTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/fstab) -} - -in_fstab() { - [ "${FSTAB[$1]}" != "" ] - return $? -} - -start() -{ - if [ -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # Delay until all required block devices are present. - udevadm settle - - # load kernel module infrastructure - if ! grep -q zfs /proc/modules ; then - action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5 - fi - - # fix mtab to include already-mounted fs filesystems, in case there are any - # we ONLY do this if mtab does not point to /proc/mounts - # which is the case in some systems (systemd may bring that soon) - if ! readlink /etc/mtab | grep -q /proc ; then - if grep -qE "(^/dev/zd| zfs )" /proc/mounts ; then - action $"Registering already-mounted ZFS filesystems and volumes: " reregister_mounts || return 150 - fi - fi - - if [ -f $ZPOOL_CACHE ] ; then - - echo -n $"Importing ZFS pools not yet imported: " - $ZPOOL import -c $ZPOOL_CACHE -aN || true # stupid zpool will fail if all pools are already imported - RETVAL=$? - if [ $RETVAL -ne 0 ]; then - failure "Importing ZFS pools not yet imported: " - return 151 - fi - success "Importing ZFS pools not yet imported: " - - fi - - action $"Mounting ZFS filesystems not yet mounted: " $ZFS mount -a || return 152 - - action $"Exporting ZFS filesystems: " $ZFS share -a || return 153 - - read_mtab "^/dev/zd" - read_fstab "^/dev/zd" - - template=$"Mounting volume %s registered in fstab: " - for volume in "${!FSTAB[@]}" ; do - if in_mtab "$volume" ; then continue ; fi - string=`printf "$template" "$volume"` - action "$string" mount "$volume" - done - - touch "$LOCKFILE" -} - -stop() -{ - if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # the poweroff of the system takes care of this - # but it never unmounts the root filesystem itself - # shit - - action $"Syncing ZFS filesystems: " sync - # about the only thing we can do, and then we - # hope that the umount process will succeed - # unfortunately the umount process does not dismount - # the root file system, there ought to be some way - # we can tell zfs to just flush anything in memory - # when a request to remount,ro comes in - - #echo -n $"Unmounting ZFS filesystems: " - #$ZFS umount -a - #RETVAL=$? - #if [ $RETVAL -ne 0 ]; then - # failure - - # return 8 - #fi - #success - - rm -f "$LOCKFILE" -} - -# See how we are called -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - lsmod | grep -q zfs || RETVAL=3 - $ZPOOL status && echo && $ZFS list || { - [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4 - } - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ] ; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.gentoo.in b/etc/init.d/zfs.gentoo.in deleted file mode 100644 index 07fce01ba04b..000000000000 --- a/etc/init.d/zfs.gentoo.in +++ /dev/null @@ -1,124 +0,0 @@ -#!/sbin/runscript -# Copyright 1999-2011 Gentoo Foundation -# Released under the 2-clause BSD license. -# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $ - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -depend() -{ - # Try to allow people to mix and match fstab with ZFS in a way that makes sense. - if [ "$(mountinfo -s /)" = 'zfs' ] - then - before localmount - else - after localmount - fi - - # bootmisc will log to /var which may be a different zfs than root. - before bootmisc logger - use mtab - keyword -lxc -openvz -prefix -vserver -} - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -ZFS_MODULE=zfs - -checksystem() { - if [ ! -c /dev/zfs ]; then - einfo "Checking if ZFS modules present" - if ! modinfo zfs > /dev/null 2>&1 ; then - eerror "$ZFS_MODULE not found. Is the ZFS package installed?" - return 1 - fi - fi - einfo "Checking if zfs userspace tools present" - if [ ! -x $ZPOOL ]; then - eerror "$ZPOOL binary not found." - return 1 - fi - if [ ! -x $ZFS ]; then - eerror "$ZFS binary not found." - return 1 - fi - return 0 -} - -start() { - ebegin "Starting ZFS" - checksystem || return 1 - - # Delay until all required block devices are present. - udevadm settle - - if [ ! -c /dev/zfs ]; then - modprobe $ZFS_MODULE - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'." - eend $rv - return $rv - fi - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ -f $ZPOOL_CACHE ]; then - einfo "Importing ZFS pools" - # as per fedora script, import can fail if all pools are already imported - # The check for $rv makes no sense...but someday, it will work right. - $ZPOOL import -c $ZPOOL_CACHE -aN 2>/dev/null || true - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to import not-yet imported pools." - eend $rv - return $rv - fi - fi - - einfo "Mounting ZFS filesystems" - $ZFS mount -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to mount ZFS filesystems." - eend $rv - return $rv - fi - - einfo "Exporting ZFS filesystems" - $ZFS share -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to export ZFS filesystems." - eend $rv - return $rv - fi - - eend 0 - return 0 -} - -stop() -{ - ebegin "Unmounting ZFS filesystems" - $ZFS umount -a - rv=$? - if [ $rv -ne 0 ]; then - einfo "Some ZFS filesystems not unmounted" - fi - - # Don't fail if we couldn't umount everything. /usr might be in use. - eend 0 - return 0 -} - -status() -{ - # show pool status and list - $ZPOOL status && echo && $ZPOOL list -} diff --git a/etc/init.d/zfs.in b/etc/init.d/zfs.in new file mode 100644 index 000000000000..8515f2b1ee80 --- /dev/null +++ b/etc/init.d/zfs.in @@ -0,0 +1,75 @@ +# ZoL userland configuration. + +# Run `zfs mount -a` during system start? +ZFS_MOUNT='yes' + +# Run `zfs unmount -a` during system stop? +ZFS_UNMOUNT='yes' + +# Run `zfs share -a` during system start? +# nb: The shareiscsi, sharenfs, and sharesmb dataset properties. +ZFS_SHARE='yes' + +# Run `zfs unshare -a` during system stop? +ZFS_UNSHARE='yes' + +# Sould we use '-d /dev/disk/by-*' when importing pool? +# Variable is somewhat missleading. Previously the code tried _only_ +# '/dev/disk/by-id', but will now try any '/dev/disk/by-*' directory. +USE_DISK_BY_ID='yes' + +# Should the datasets be mounted verbosly? +# A mount counter will be used when mounting if set to 'yes'. +VERBOSE_MOUNT='no' + +# Should we allow overlay mounts? +# This is standard in Linux, but not ZFS which comes from Solaris where this +# is not allowed). +DO_OVERLAY_MOUNTS='no' + +# Any additional option to the 'zfs mount' command line? +# Include '-o' for each option wanted. +MOUNT_EXTRA_OPTIONS="" + +# Build kernel modules with the --enable-debug switch? +ZFS_DKMS_ENABLE_DEBUG='no' + +# Build kernel modules with the --enable-debug-dmu-tx switch? +ZFS_DKMS_ENABLE_DEBUG_DMU_TX='no' + +# Keep debugging symbols in kernel modules? +ZFS_DKMS_DISABLE_STRIP='no' + +# Wait for this many seconds in the initrd pre_mountroot? +# This delays startup and should be '0' on most systems. +ZFS_INITRD_PRE_MOUNTROOT_SLEEP='0' + +# Wait for this many seconds in the initrd mountroot? +# This delays startup and should be '0' on most systems. This might help on +# systems which have their ZFS root on a USB disk that takes just a little +# longer to be available +ZFS_INITRD_POST_MODPROBE_SLEEP='0' + +# List of additional datasets to mount after the root dataset is mounted? +# +# The init script will use the mountpoint specified in the 'mountpoint' +# property value in the dataset to determine where it should be mounted. +# +# This is a space separated list, and will be mounted in the order specified, +# so if one filesystem depends on a previous mountpoint, make sure to put +# them in the right order. +# +# It is not necessary to add filesystems below the root fs here. It is +# taken care of by the initrd script automatically. These are only for +# additional filesystems needed. Such as /opt, /usr/local which is not +# located under the root fs. +# Example: If root FS is 'rpool/ROOT/rootfs', this would make sence. +#ZFS_INITRD_ADDITIONAL_DATASETS="rpool/ROOT/usr rpool/ROOT/var" + +# List of pools that should NOT be imported at boot? +# This is a space separated list. +#ZFS_POOL_EXCEPTIONS="test2" + +# Optional arguments for the ZFS Event Daemon (ZED). +# See zed(8) for more information on availible options. +#ZED_ARGS="-M" diff --git a/etc/init.d/zfs.lsb.in b/etc/init.d/zfs.lsb.in deleted file mode 100644 index 05e815ede8fe..000000000000 --- a/etc/init.d/zfs.lsb.in +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /lib/lsb/init-functions - -LOCKFILE=/var/lock/zfs -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/default/zfs' ] && . /etc/default/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - log_begin_msg "Mounting ZFS filesystems" - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - log_end_msg $? - - log_begin_msg "Exporting ZFS filesystems" - "$ZFS" share -a - log_end_msg $? - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - log_begin_msg "Unsharing ZFS filesystems" - "$ZFS" unshare -a - log_end_msg $? - - log_begin_msg "Unmounting ZFS filesystems" - "$ZFS" umount -a - log_end_msg $? - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.lunar.in b/etc/init.d/zfs.lunar.in deleted file mode 100644 index 7a51104c2647..000000000000 --- a/etc/init.d/zfs.lunar.in +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# -# zfs This shell script takes care of starting (mount) and -# stopping (umount) zfs shares. -# -# chkconfig: 35 60 40 -# description: ZFS is a filesystem developed by Sun, ZFS is a -# combined file system and logical volume manager -# designed by Sun Microsystems. Made available to Linux -# using SPL (Solaris Porting Layer) by zfsonlinux.org. -# probe: true - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -case $1 in - start) echo "$1ing ZFS filesystems" - - # Delay until all required block devices are present. - udevadm settle - - if ! grep "zfs" /proc/modules > /dev/null; then - echo "ZFS kernel module not loaded yet; loading..."; - if ! modprobe zfs; then - echo "Failed to load ZFS kernel module..."; - exit 0; - fi - fi - - if ! [ `uname -m` == "x86_64" ]; then - echo "Warning: You're not running 64bit. Currently native zfs in"; - echo " linux is only supported and tested on 64bit."; - # should we break here? People doing this should know what they - # do, thus i'm not breaking here. - fi - - # mount the filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1; }') - echo -n "mounting $mdev..." - if $ZFS mount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - done < <($ZFS list -H); - - # export the filesystems - echo -n "exporting ZFS filesystems..." - if $ZFS share -a; then - echo -e "done"; - else - echo -e "failed"; - fi - - - ;; - - stop) echo "$1ping ZFS filesystems" - - if grep "zfs" /proc/modules > /dev/null; then - # module is loaded, so we can try to umount filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1 }'); - echo -n "umounting $mdev..."; - if $ZFS umount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - # the next line is, because i have to reverse the - # output, otherwise it wouldn't work as it should - done < <($ZFS list -H | tac); - - # and finally let's rmmod the module - rmmod zfs - - - else - # module not loaded, no need to umount anything - exit 0 - fi - - ;; - - restart) echo "$1ing ZFS filesystems" - $0 stop - $0 start - ;; - - *) echo "Usage: $0 {start|stop|restart}" - ;; - -esac diff --git a/etc/init.d/zfs.redhat.in b/etc/init.d/zfs.redhat.in deleted file mode 100644 index 30b9f0bf62b9..000000000000 --- a/etc/init.d/zfs.redhat.in +++ /dev/null @@ -1,150 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /etc/rc.d/init.d/functions - -LOCKFILE=/var/lock/zfs -ZED="@sbindir@/zed" -ZED_PIDFILE="@runstatedir@/zed.pid" -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="/etc/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/sysconfig/zfs' ] && . /etc/sysconfig/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Start the ZED for event handling - action $"Starting ZFS Event Daemon" daemon --pidfile="$ZED_PIDFILE" "$ZED" - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - action $"Importing ZFS pools" \ - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - action $"Importing ZFS pools" \ - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - action $"Mounting ZFS filesystems" \ - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - - action $"Sharing ZFS filesystems" \ - "$ZFS" share -a - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - action $"Unsharing ZFS filesystems" "$ZFS" unshare -a - action $"Unmounting ZFS filesystems" "$ZFS" umount -a - action $"Shutting down ZFS Event Daemon" killproc -p "$ZED_PIDFILE" "$ZED" - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in index c9bf36e6239d..7156ddf4ea47 100644 --- a/rpm/generic/zfs.spec.in +++ b/rpm/generic/zfs.spec.in @@ -229,7 +229,12 @@ find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \; %if 0%{?_systemd} %systemd_post zfs.target %else -[ -x /sbin/chkconfig ] && /sbin/chkconfig --add zfs +if [ -x /sbin/chkconfig ]; then + /sbin/chkconfig --add zfs-import + /sbin/chkconfig --add zfs-mount + /sbin/chkconfig --add zfs-share + /sbin/chkconfig --add zfs-zed +fi %endif exit 0 @@ -237,8 +242,11 @@ exit 0 %if 0%{?_systemd} %systemd_preun zfs.target %else -if [ $1 -eq 0 ] ; then - [ -x /sbin/chkconfig ] && /sbin/chkconfig --del zfs +if [ $1 -eq 0 ] && [ -x /sbin/chkconfig ]; then + /sbin/chkconfig --del zfs-import + /sbin/chkconfig --del zfs-mount + /sbin/chkconfig --del zfs-share + /sbin/chkconfig --del zfs-zed fi %endif exit 0 @@ -265,6 +273,7 @@ exit 0 %{_presetdir}/* %else %{_sysconfdir}/init.d/* +%{_initconfdir}/zfs %endif %files -n libzpool2