From 2b24a7144bdbd2c29db89f22adf966f74bc0b436 Mon Sep 17 00:00:00 2001 From: Turbo Fredriksson Date: Thu, 23 Apr 2015 20:35:45 +0200 Subject: [PATCH] Base init scripts for SYSV systems * Based on the init scripts included with Debian GNU/Linux, then take code from the already existing ones, trying to merge them into one for better maintainability. + Merge https://github.com/zfsonlinux/zfs/pull/2148 - Inform OpenRC that ZFS uses mtab. + Add a configurable ZFS_INITRD_POST_MODPROBE_SLEEP used in the initrd to sleep after the modprobe. + The import function, do_import(), imports pools by name instead of '-a' [all]. + Test all '/dev/disk/by-*' dirs for import. Include /dev as a last ditch attempt. + Fallback on importing the pool using the cache file (if it exists) if the 'by-id' didn't work. + Add exceptions to pool imports. + ZED script from the Debian GNU/Linux packages added. Signed-off-by: Turbo Fredriksson turbo@bayour.com Closes: #2974, #2107. --- config/zfs-build.m4 | 9 ++ etc/init.d/.gitignore | 5 + etc/init.d/Makefile.am | 48 +++++--- etc/init.d/common.init.in | 250 ++++++++++++++++++++++++++++++++++++++ etc/init.d/zfs-import.in | 183 ++++++++++++++++++++++++++++ etc/init.d/zfs-mount.in | 157 ++++++++++++++++++++++++ etc/init.d/zfs-share.in | 115 ++++++++++++++++++ etc/init.d/zfs-zed.in | 136 +++++++++++++++++++++ etc/init.d/zfs.fedora.in | 243 ------------------------------------ etc/init.d/zfs.gentoo.in | 124 ------------------- etc/init.d/zfs.in | 74 +++++++++++ etc/init.d/zfs.lsb.in | 153 ----------------------- etc/init.d/zfs.lunar.in | 100 --------------- etc/init.d/zfs.redhat.in | 150 ----------------------- 14 files changed, 961 insertions(+), 786 deletions(-) create mode 100644 etc/init.d/common.init.in create mode 100755 etc/init.d/zfs-import.in create mode 100755 etc/init.d/zfs-mount.in create mode 100755 etc/init.d/zfs-share.in create mode 100755 etc/init.d/zfs-zed.in delete mode 100644 etc/init.d/zfs.fedora.in delete mode 100644 etc/init.d/zfs.gentoo.in create mode 100644 etc/init.d/zfs.in delete mode 100644 etc/init.d/zfs.lsb.in delete mode 100644 etc/init.d/zfs.lunar.in delete mode 100644 etc/init.d/zfs.redhat.in diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 8d3a373047ec..f2f944fe720c 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -311,6 +311,15 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT]) AC_SUBST(DEFAULT_INIT_SCRIPT) + + AC_MSG_CHECKING([default init config direectory]) + AS_IF([test -d "/etc/default"], [ + DEFAULT_INITCONF_DIR="/etc/default" + ], [test -d "/etc/sysconfig"], [ + DEFAULT_INITCONF_DIR="/etc/sysconfig" + ]) + AC_MSG_RESULT([$DEFAULT_INITCONF_DIR]) + AC_SUBST(DEFAULT_INITCONF_DIR) ]) dnl # diff --git a/etc/init.d/.gitignore b/etc/init.d/.gitignore index 73304bc2cd4a..0979b90af62d 100644 --- a/etc/init.d/.gitignore +++ b/etc/init.d/.gitignore @@ -1 +1,6 @@ +common.init +zfs-import +zfs-mount +zfs-share +zfs-zed zfs diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am index 5a049dfe14fd..cee81ab0a8ce 100644 --- a/etc/init.d/Makefile.am +++ b/etc/init.d/Makefile.am @@ -1,22 +1,38 @@ initdir = $(DEFAULT_INIT_DIR) -init_SCRIPTS = zfs +init_SCRIPTS = zfs-import zfs-mount zfs-share zfs-zed + +initcommondir = $(sysconfdir)/zfs +initcommon_SCRIPTS = common.init + +initconfdir = $(DEFAULT_INITCONF_DIR) +initconf_SCRIPTS = zfs EXTRA_DIST = \ - $(top_srcdir)/etc/init.d/zfs.fedora.in \ - $(top_srcdir)/etc/init.d/zfs.gentoo.in \ - $(top_srcdir)/etc/init.d/zfs.lsb.in \ - $(top_srcdir)/etc/init.d/zfs.lunar.in \ - $(top_srcdir)/etc/init.d/zfs.redhat.in + $(top_srcdir)/etc/init.d/common.init.in \ + $(top_srcdir)/etc/init.d/zfs-share.in \ + $(top_srcdir)/etc/init.d/zfs-import.in \ + $(top_srcdir)/etc/init.d/zfs-mount.in \ + $(top_srcdir)/etc/init.d/zfs-zed.in \ + $(top_srcdir)/etc/init.d/zfs.in -$(init_SCRIPTS): $(init_SCRIPTS).$(DEFAULT_INIT_SCRIPT).in - -$(SED) -e 's,@bindir\@,$(bindir),g' \ - -e 's,@sbindir\@,$(sbindir),g' \ - -e 's,@udevdir\@,$(udevdir),g' \ - -e 's,@udevruledir\@,$(udevruledir),g' \ - -e 's,@sysconfdir\@,$(sysconfdir),g' \ - -e 's,@initdir\@,$(initdir),g' \ - -e 's,@runstatedir\@,$(runstatedir),g' \ - '$@.$(DEFAULT_INIT_SCRIPT).in' >'$@' +$(init_SCRIPTS) $(initconf_SCRIPTS) $(initcommon_SCRIPTS): + -(if [ -e /etc/debian_version ]; then \ + NFS_SRV=nfs-kernel-server; \ + else \ + NFS_SRV=nfs; \ + fi; \ + $(SED) -e 's,@bindir\@,$(bindir),g' \ + -e 's,@sbindir\@,$(sbindir),g' \ + -e 's,@udevdir\@,$(udevdir),g' \ + -e 's,@udevruledir\@,$(udevruledir),g' \ + -e 's,@sysconfdir\@,$(sysconfdir),g' \ + -e 's,@initconfdir\@,$(initconfdir),g' \ + -e 's,@initdir\@,$(initdir),g' \ + -e 's,@runstatedir\@,$(runstatedir),g' \ + -e "s,@NFS_SRV\@,$$NFS_SRV,g" \ + '$@.in' >'$@'; \ + [ '$@' = 'common.init' -o '$@' = 'zfs' ] || \ + chmod +x '$@') distclean-local:: - -$(RM) $(init_SCRIPTS) + -$(RM) $(init_SCRIPTS) $(initcommon_SCRIPTS) $(initconf_SCRIPTS) diff --git a/etc/init.d/common.init.in b/etc/init.d/common.init.in new file mode 100644 index 000000000000..1b1920d592de --- /dev/null +++ b/etc/init.d/common.init.in @@ -0,0 +1,250 @@ +# This is a script with common functions etc used by zfs-mount and zfs-share. +# +# It is _NOT_ to be called independently + +PATH=/sbin:/bin:/usr/bin:/usr/sbin + +# Source function library +if [ -f /lib/lsb/init-functions ]; then + . /lib/lsb/init-functions +elif [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +fi + +# Of course the functions we need is called differently +# on different distributions - it would be way to easy +# otherwise!! +if type log_failure_msg > /dev/null ; then + # LSB functions + log_begin_msg=log_begin_msg + log_failure_msg=log_failure_msg + log_progress_msg=log_progress_msg +elif type success > /dev/null ; then + # Fedora/RedHat functions + log_begin_msg=success + log_failure_msg=failure + log_progress_msg=echo -n +elif type einfo > /dev/null ; then + # Gentoo functions + log_begin_msg=einfo + log_failure_msg=eerror + log_progress_msg=echo -n +else + log_begin_msg=echo -n + log_failure_msg=echo + log_progress_msg=echo -n +fi + +# The log_end_msg is a little different - it's both an +# echo of a failed message and a return of a code number. +# So if it doesn't exist, we define a very simple one +# that would do the work. +if ! type log_end_msg > /dev/null ; then + log_end_msg() { + ret=$1 + if [ "$ret" -ge 1 ]; then + echo " failed!" + else + echo " success" + fi + return $ret + } +fi +log_end_msg=log_end_msg + +# Paths to what we need +ZFS="@sbindir@/zfs" +ZED="@sbindir@/zed" +ZPOOL="@sbindir@/zpool" +ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" + +# Sensible defaults +ZFS_MOUNT='yes' +ZFS_UNMOUNT='yes' +LOCKDIR=/run/lock + +# Source zfs configuration, overriding the defaults +if [ -f /etc/@initconfdir@/zfs ]; then + . /etc/@initconfdir@/zfs +fi + +[ ! -d "$LOCKDIR" ] && mkdir $LOCKDIR + +# ---------------------------------------------------- + +zfs_installed() { + $log_begin_msg "Checking if zfs userspace tools present" + if [ ! -x $ZPOOL ]; then + $log_failure_msg "$ZPOOL binary not found." + $log_end_msg 1 + fi + if [ ! -x $ZFS ]; then + $log_failure_msg "$ZFS binary not found." + $log_end_msg 1 + fi + $log_end_msg 0 +} + +# Trigger udev and wait for it to settle. +udev_trigger() { + if [ -x /sbin/udevadm ]; then + /sbin/udevadm trigger --action=change --subsystem-match=block + /sbin/udevadm settle + elif [ -x /sbin/udevsettle ]; then + /sbin/udevtrigger + /sbin/udevsettle + fi +} + +# From scripts/common.sh +wait_udev() { + local DEVICE=$1 + local DELAY=$2 + local COUNT=0 + + udev_trigger + while [ ! -e ${DEVICE} ]; do + if [ ${COUNT} -gt ${DELAY} ]; then + return 1 + fi + + let COUNT=${COUNT}+1 + sleep 1 + done + + return 0 +} + +# Do a lot of checks to make sure it's 'safe' to continue with import/mount etc +checksystem() +{ + if [ -z "$init" ]; then + # Not interactive and we don't want to import pool or mount filesystems. + # Won't of course work if you're booting from ZFS... + grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no|0)( |$)' /proc/cmdline && exit 3 + fi + + [ -f "$LOCKDIR/$servicename" ] && return 3 + + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + return 5 + } + + # Delay until all required block devices are present. + if [ -x /sbin/udevadm ]; then + /sbin/udevadm settle + elif [ -x /sbin/udevsettle ]; then + /sbin/udevsettle + fi + + # Load the zfs module stack + if ! grep -q zfs /proc/modules ; then + $log_begin_msg "Loading kernel ZFS infrastructure: " + modprobe zfs || { + $log_failure_msg "Could not load zfs module" + $log_end_msg 1 + return 5 + } + $log_end_msg 0 + fi + + # Just make sure that /dev/zfs is created. + wait_udev /dev/zfs 15 + + # fix mtab to include already-mounted fs filesystems, in case there are any + # we ONLY do this if mtab does not point to /proc/mounts + # which is the case in some systems (systemd may bring that soon) + if ! readlink /etc/mtab | grep -q /proc ; then + if grep -qE "(^/dev/zd|^/dev/zvol| zfs )" /proc/mounts ; then + $log_begin_msg "Registering already-mounted ZFS filesystems and volumes: " + reregister_mounts || { + $log_end_msg 1 + return 150 + } + fi + fi + + # Ensure / exists in /etc/mtab, if not update mtab accordingly. + # This should be handled by rc.sysinit but lets be paranoid. + awk '$2 == "/" { exit 1 }' /etc/mtab + RETVAL=$? + if [ "$RETVAL" -eq 0 ]; then + /bin/mount -f / + fi + + if ! [ `uname -m` == "x86_64" ]; then + echo "Warning: You're not running 64bit. Currently native zfs in"; + echo " linux is only supported and tested on 64bit."; + # should we break here? People doing this should know what they + # do, thus i'm not breaking here. + fi +} + +reregister_mounts() { + cat /etc/mtab | while read -r fs mntpnt fstype opts rest ; do + fs=`printf '%b\n' "$fs"` + mntpnt=`printf '%b\n' "$mntpnt"` + if [ "$fstype" == "zfs" ] ; then + if [ "$mntpnt" == "/" ] ; then + mount -f -o zfsutil -t zfs --move / /removethismountpointhoweverpossible + umount --fake /removethismountpointhoweverpossible + else + umount --fake "$mntpnt" + fi + elif echo "$fs" | grep -qE "^/dev/(zd|zvol)" ; then + if [ "$mntpnt" == "/" ] ; then + mount -f -t "$fstype" --move / /removethismountpointhoweverpossible + umount --fake /removethismountpointhoweverpossible + else + umount --fake "$mntpnt" + fi + fi + done + cat /proc/mounts | while read -r fs mntpnt fstype opts rest ; do + fs=`printf '%b\n' "$fs"` + mntpnt=`printf '%b\n' "$mntpnt"` + if [ "$fstype" == "zfs" ] ; then + mount -f -t zfs -o zfsutil "$fs" "$mntpnt" + elif echo "$fs" | grep -q "^/dev/zd" ; then + mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" + fi + done +} + +# i need a bash guru to simplify this, since this is copy and paste, but donno how +# to correctly dereference variable names in bash, or how to do this right + +declare -A MTAB +declare -A FSTAB + +# first parameter is a regular expression that filters mtab +read_mtab() { + for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done + while read -r fs mntpnt fstype opts blah ; do + fs=`printf '%b\n' "$fs"` + MTAB["$fs"]=$mntpnt + done < <(grep -E "$1" /etc/mtab) +} + +in_mtab() { + [ "${MTAB[$1]}" != "" ] + return $? +} + +# first parameter is a regular expression that filters fstab +read_fstab() { + local i=0 + for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done + while read -r fs mntpnt fstype opts blah ; do + fs=`printf '%b\n' "$fs"` + FSTAB["$i"]=$mntpnt + i=$((i + 1)) + done < <(grep -E "$1" /etc/fstab) +} + +in_fstab() { + [ "${FSTAB[$1]}" != "" ] + return $? +} diff --git a/etc/init.d/zfs-import.in b/etc/init.d/zfs-import.in new file mode 100755 index 000000000000..6e46223b59bd --- /dev/null +++ b/etc/init.d/zfs-import.in @@ -0,0 +1,183 @@ +#!/bin/bash +# +# zfs-mount This script will import/export zfs pools. +# +# chkconfig: 2345 01 99 +# description: This script will import/export zfs pools during system +# boot/shutdown. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-import zpool +# Required-Start: checkroot +# Required-Stop: $local_fs +# Default-Start: S +# X-Start-Before: mountall +# X-Stop-After: zfs-mount +# Default-Stop: 0 1 6 +# Short-Description: Import ZFS pools +# Description: Run the `zpool import` or `zpool export` commands. +### END INIT INFO + +# Source the common init script +. @sysconfdir@/zfs/common.init +servicename=zfs-import + +# ---------------------------------------------------- + +# Import all pools +do_import() +{ + POOL_IMPORTED= + already_imported=$(zpool list -H -oname) + available_pools=$(zpool import 2> /dev/null | grep pool: | sed 's@.*: @@') + + # Just in case - seen it happen + if [ -z "$available_pools" -a -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" == 'yes' ]; then + available_pools=$(zpool import -d /dev/disk/by-id 2> /dev/null | \ + grep pool: | sed 's@.*: @@') + fi + + # TODO: + # If there's only ever been one pool on the system and it have been recreated + # WITHOUT using 'labelclear' first, it might show up in 'available_pools'. But + # _most likely_ can't be imported any more. + # POOL_IMPORTED will therefor be unset! + # => Unknown what to do about this... Somehow use the 'state: UNAVAIL' for this? + + for pool in $available_pools; do + # We have pools that haven't been imported - import them + if [ -n "$ZFS_POOL_EXCEPTIONS" ]; then + for exception in $ZFS_POOL_EXCEPTIONS; do + # ... unless we've specified them as NOT + # to import. + [ "$pool" == "$exception" ] && continue 2 + done + fi + + if [ "$USE_DISK_BY_ID" == 'yes' ]; then + # Really the default/prefered way. + $log_begin_msg "Importing ZFS pool $pool using:" + for dir in /dev/disk/by-vdev /dev/disk/by-* /dev; do + $log_progress_msg "$dir " + "$ZPOOL" import -d $dir -N $pool 2>/dev/null + RET=$? + if [ "$RET" -eq 0 ]; then + POOL_IMPORTED=1 + $log_end_msg $RET + continue 2 + fi + done + elif [ -f "$ZPOOL_CACHE" ] ; then + # Fallback - use a cache file + $log_begin_msg "Importing ZFS pools (using cache file)" + "$ZPOOL" import -c "$ZPOOL_CACHE" -N $pool 2>/dev/null + RET=$? + if [ "$RET" -eq 0 ]; then + POOL_IMPORTED=1 + $log_end_msg $RET + continue 2 + fi + else + # Last ditch attempt, try /dev! + $log_begin_msg "Importing ZFS pools" + "$ZPOOL" import -N $pool 2>/dev/null + RET=$? + if [ "$RET" -eq 0 ]; then + POOL_IMPORTED=1 + $log_end_msg $RET + continue 2 + fi + + $log_end_msg $ret + fi + done + + if [ -n "$already_imported" -a -z "$available_pools" ]; then + # All pools imported + POOL_IMPORTED=1 + $log_end_msg $RET + fi +} + +# Export all pools +do_export() +{ + $log_begin_msg "Exporting ZFS pools" + "$ZPOOL" list -H -o name | \ + while read pool; do + "$ZPOOL" export $pool + done + rmmod zfs + $log_end_msg 0 # return code not that important. +} + +# Output the status and list of pools +do_status() +{ + [ ! -f "$LOCKDIR/$servicename" ] && return 3 + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no point in running zpool. + exit 0 + fi + + "$ZPOOL" status && echo "" && "$ZPOOL" list +} + +do_start() +{ + checksystem && { + do_import + + [ -n "$POOL_IMPORTED" ] && \ + touch "$LOCKDIR/$servicename" + } +} + +do_stop() +{ + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + $log_end_msg 5 + } + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no need to umount anything + exit 0 + fi + + set -- `mount | grep ' on / '` + if [ "$5" != "zfs" ]; then + # Only export the pool if we're not running a zfs root. + # Won't work, because the filesystem (and therefor pool) + # is busy (being mounted :). + do_export + fi + + rm -f "$LOCKDIR/$servicename" +} + +# ---------------------------------------------------- + +case "$1" in + (start) + do_start + ;; + (stop) + do_stop + ;; + (status) + do_status + ;; + (force-reload|condrestart|reload|restart) + # no-op + ;; + (*) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status}" + exit 3 + ;; +esac diff --git a/etc/init.d/zfs-mount.in b/etc/init.d/zfs-mount.in new file mode 100755 index 000000000000..1450c6d69e62 --- /dev/null +++ b/etc/init.d/zfs-mount.in @@ -0,0 +1,157 @@ +#!/bin/bash +# +# zfs-mount This script will mount/umount the zfs filesystems. +# +# chkconfig: 2345 01 99 +# description: This script will mount/umount the zfs filesystems during +# system boot/shutdown. Configuration of which filesystems +# should be mounted is handled by the zfs 'mountpoint' and +# 'canmount' properties. See the zfs(8) man page for details. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zvol zfs zfs-mount +# Required-Start: $local_fs +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Mount ZFS filesystems and volumes +# Description: Run the `zfs mount -a` or `zfs umount -a` commands. +### END INIT INFO + +# Source the common init script +. @sysconfdir@/zfs/common.init +servicename=zfs-mount + +# ---------------------------------------------------- + +# Mount all datasets/filesystems +do_mount() +{ + if [ -f "$LOCKDIR/zfs-import" ]; then + [ "$VERBOSE_MOUNT" == 'yes' ] && verbose=v + [ "$DO_OVERLAY_MOUNTS" == 'yes' ] && overlay=O + + $log_begin_msg "Mounting ZFS filesystems not yet mounted" + $ZFS mount -a$verbose$overlay $MOUNT_EXTRA_OPTIONS + RET=$? + if [ $RET != 0 ] ; then + $log_end_msg $RET + exit $RET + fi + $log_end_msg 0 + + FS_MOUNTED=1 + + $log_begin_msg "Mounting volumes registered in fstab: " + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + for volume in "${!FSTAB[@]}" ; do + in_mtab "$volume" && continue + + $log_progress_msg "$volume " + mount "$volume" + done + + read_mtab "zfs" + read_fstab "zfs" + for fs in "${!FSTAB[@]}" ; do + in_mtab "${FSTAB[$fs]}" && continue + + $log_progress_msg "${FSTAB[$fs]} " + mount "${FSTAB[$fs]}" + done + + $log_end_msg 0 + fi +} + +# Unmount all filesystems +do_unmount() +{ + $log_begin_msg "Unmounting ZFS filesystems" + $ZFS unmount -a + RET=$? + + # Ignore a non-zero `zfs` result so that a busy ZFS instance + # does not hang the system during shutdown. + if [ $RET != 0 ] ; then + $log_end_msg $RET + fi + + $log_end_msg 0 + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + + $log_begin_msg "Unmounting volumes registered in fstab: " + for volume in "${!FSTAB[@]}" ; do + dev=/dev/$(ls -l "$volume" | sed 's@.*/@@') + if ! in_mtab "$dev" ; then continue ; fi + + $log_progress_msg "$volume " + umount "$volume" + done + + $log_end_msg 0 +} + +start() +{ + checksystem && { + case "$ZFS_MOUNT" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 3 + ;; + esac + + do_mount + + [ -f "$LOCKDIR/zfs-import" -a -n "$FS_MOUNTED" ] && \ + touch "$LOCKDIR/$servicename" + } +} + +stop() +{ + case "$ZFS_UNMOUNT" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + $log_end_msg 5 + } + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no need to umount anything + exit 0 + fi + + do_unmount + + rm -f "$LOCKDIR/$servicename" +} + +# ---------------------------------------------------- + +case "$1" in + (start) + start + ;; + (stop) + stop + ;; + (force-reload|condrestart|reload|restart) + # no-op + ;; + (*) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; +esac diff --git a/etc/init.d/zfs-share.in b/etc/init.d/zfs-share.in new file mode 100755 index 000000000000..521834bc31b7 --- /dev/null +++ b/etc/init.d/zfs-share.in @@ -0,0 +1,115 @@ +#!/bin/bash +# +# zfs-share This script will network share zfs filesystems and volumes. +# +# chkconfig: 2345 30 99 +# description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +# probe: true +# +### BEGIN INIT INFO +# Provides: shareiscsi sharenfs sharesmb zfs-share +# Required-Start: $local_fs $network $remote_fs zvol +# Required-Stop: $local_fs $network $remote_fs zvol +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# X-Stop-Before: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 +# Short-Description: Network share ZFS datasets and volumes. +# Description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +### END INIT INFO + +# Source the common init script +. @sysconfdir@/zfs/common.init +servicename=zfs-share + +# ---------------------------------------------------- + +do_share() +{ + $log_begin_msg "Sharing ZFS filesystems" + $ZFS share -a + RET=$? + + if [ $RET != 0 ] ; then + $log_failure_msg "Failed to share filesystems" + $log_end_msg $RET + fi + + $log_end_msg 0 +} + +do_unshare() +{ + $log_begin_msg "Unsharing ZFS filesystems" + $ZFS unshare -a + RET=$? + + # Ignore a non-zero `zfs` result so that a busy ZFS instance + # does not hang the system during shutdown. + if [ $RET != 0 ] ; then + $log_end_msg $RET + fi + + $log_end_msg 0 +} + +start() +{ + checksystem && { + case "$ZFS_SHARE" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + do_share + + touch "$LOCKDIR/$servicename" + } +} + +stop() +{ + case "$ZFS_UNSHARE" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + # Do a more simplified version of checksystem() + + [ ! -f "$LOCKDIR/$servicename" ] && return 3 + + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + $log_end_msg 5 + } + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no need to unshare anything + exit 0 + fi + + do_unshare + + rm -f "$LOCKDIR/$servicename" +} + +case "$1" in + (start) + start + ;; + (stop) + stop + ;; + (force-reload|reload|restart|status) + # no-op + ;; + (*) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; +esac diff --git a/etc/init.d/zfs-zed.in b/etc/init.d/zfs-zed.in new file mode 100755 index 000000000000..5c19137f0c14 --- /dev/null +++ b/etc/init.d/zfs-zed.in @@ -0,0 +1,136 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: zed zeventd zfs-zed +# Required-Start: $local_fs zpool +# Required-Stop: $local_fs zpool +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: ZFS Event Daemon +# Description: zed monitors ZFS events. When a zevent is posted, zed +# will run any scripts that have been enabled for the +# corresponding zevent class. +### END INIT INFO + +# Author: Turbo Fredriksson + +# Source the common init script +. @sysconfdir@/zfs/common.init +servicename=zfs-zed + +# Exit if the package is not installed +[ -x "$ZED" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$servicename ] && . /etc/default/$servicename + +# Function that starts the daemon/service +do_start() +{ + # Return + # 0 if daemon has been started + # 1 if daemon was already running + # 2 if daemon could not be started + + start-stop-daemon --start --quiet --pidfile /var/run/$servicename.pid --exec $ZED \ + --test > /dev/null || return 1 + + start-stop-daemon --start --quiet --pidfile /var/run/$servicename.pid --exec $ZED \ + -- $DAEMON_ARGS || return 2 + + # Add code here, if necessary, that waits for the process to be ready + # to handle requests from services started subsequently which depend + # on this one. As a last resort, sleep for some time. +} + +# Function that stops the daemon/service +do_stop() +{ + # Return + # 0 if daemon has been stopped + # 1 if daemon was already stopped + # 2 if daemon could not be stopped + # other if a failure occurred + + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \ + --pidfile /var/run/$servicename.pid --name $servicename + RETVAL="$?" + [ "$RETVAL" = 2 ] && return 2 + + # Wait for children to finish too if this is a daemon that forks + # and if the daemon is only ever run from this initscript. + # If the above conditions are not satisfied then add some other code + # that waits for the process to drop all resources that could be + # needed by services started subsequently. A last resort is to + # sleep for some time. + start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $ZED + [ "$?" = 2 ] && return 2 + + # Many daemons don't delete their pidfiles when they exit. + rm -f /var/run/$servicename.pid + return "$RETVAL" +} + +# +# Function that sends a SIGHUP to the daemon/service +# +do_reload() { + # If the daemon can reload its configuration without + # restarting (for example, when it is sent a SIGHUP), + # then implement that here. + + start-stop-daemon --stop --signal 1 --quiet \ + --pidfile /var/run/$servicename.pid --name $servicename + return 0 +} + +case "$1" in + start) + [ "$VERBOSE" != no ] && $log_begin_msg "Starting ZFS Event Daemon" "$servicename" + do_start + case "$?" in + 0|1) [ "$VERBOSE" != no ] && $log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && $log_end_msg 1 ;; + esac + ;; + stop) + [ "$VERBOSE" != no ] && $log_begin_msg "Stopping ZFS Event Daemon" "$servicename" + do_stop + case "$?" in + 0|1) [ "$VERBOSE" != no ] && $log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && $log_end_msg 1 ;; + esac + ;; + status) + status_of_proc "$ZED" "$servicename" && exit 0 || exit $? + ;; + reload|force-reload) + $log_begin_msg "Reloading ZFS Event Daemon" "$servicename" + do_reload + $log_end_msg $? + ;; + restart) + $log_begin_msg "Restarting ZFS Event Daemon" "$servicename" + do_stop + case "$?" in + 0|1) + do_start + case "$?" in + 0) $log_end_msg 0 ;; + 1) $log_end_msg 1 ;; # Old process is still running + *) $log_end_msg 1 ;; # Failed to start + esac + ;; + *) + # Failed to stop + $log_end_msg 1 + ;; + esac + ;; + *) + echo "Usage: $SCRIPTNAME {start|stop|status|restart|reload|force-reload}" >&2 + exit 3 + ;; +esac + +exit 0 diff --git a/etc/init.d/zfs.fedora.in b/etc/init.d/zfs.fedora.in deleted file mode 100644 index 86f430dce925..000000000000 --- a/etc/init.d/zfs.fedora.in +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 1 -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -# Source function library & LSB routines -. /etc/rc.d/init.d/functions - -# script variables -RETVAL=0 -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -servicename=zfs -LOCKFILE=/var/lock/subsys/$servicename - -# functions -zfs_installed() { - modinfo zfs > /dev/null 2>&1 || return 5 - $ZPOOL > /dev/null 2>&1 - [ $? == 127 ] && return 5 - $ZFS > /dev/null 2>&1 - [ $? == 127 ] && return 5 - return 0 -} - -reregister_mounts() { - cat /etc/mtab | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -o zfsutil -t zfs --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - elif echo "$fs" | grep -q "^/dev/zd" ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -t "$fstype" --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - fi - done - cat /proc/mounts | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - mount -f -t zfs -o zfsutil "$fs" "$mntpnt" - elif echo "$fs" | grep -q "^/dev/zd" ; then - mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" - fi - done -} - -# i need a bash guru to simplify this, since this is copy and paste, but donno how -# to correctly dereference variable names in bash, or how to do this right - -declare -A MTAB -declare -A FSTAB - -# first parameter is a regular expression that filters mtab -read_mtab() { - for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - MTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/mtab) -} - -in_mtab() { - [ "${MTAB[$1]}" != "" ] - return $? -} - -# first parameter is a regular expression that filters fstab -read_fstab() { - for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - FSTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/fstab) -} - -in_fstab() { - [ "${FSTAB[$1]}" != "" ] - return $? -} - -start() -{ - if [ -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # Delay until all required block devices are present. - udevadm settle - - # load kernel module infrastructure - if ! grep -q zfs /proc/modules ; then - action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5 - fi - - # fix mtab to include already-mounted fs filesystems, in case there are any - # we ONLY do this if mtab does not point to /proc/mounts - # which is the case in some systems (systemd may bring that soon) - if ! readlink /etc/mtab | grep -q /proc ; then - if grep -qE "(^/dev/zd| zfs )" /proc/mounts ; then - action $"Registering already-mounted ZFS filesystems and volumes: " reregister_mounts || return 150 - fi - fi - - if [ -f $ZPOOL_CACHE ] ; then - - echo -n $"Importing ZFS pools not yet imported: " - $ZPOOL import -c $ZPOOL_CACHE -aN || true # stupid zpool will fail if all pools are already imported - RETVAL=$? - if [ $RETVAL -ne 0 ]; then - failure "Importing ZFS pools not yet imported: " - return 151 - fi - success "Importing ZFS pools not yet imported: " - - fi - - action $"Mounting ZFS filesystems not yet mounted: " $ZFS mount -a || return 152 - - action $"Exporting ZFS filesystems: " $ZFS share -a || return 153 - - read_mtab "^/dev/zd" - read_fstab "^/dev/zd" - - template=$"Mounting volume %s registered in fstab: " - for volume in "${!FSTAB[@]}" ; do - if in_mtab "$volume" ; then continue ; fi - string=`printf "$template" "$volume"` - action "$string" mount "$volume" - done - - touch "$LOCKFILE" -} - -stop() -{ - if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # the poweroff of the system takes care of this - # but it never unmounts the root filesystem itself - # shit - - action $"Syncing ZFS filesystems: " sync - # about the only thing we can do, and then we - # hope that the umount process will succeed - # unfortunately the umount process does not dismount - # the root file system, there ought to be some way - # we can tell zfs to just flush anything in memory - # when a request to remount,ro comes in - - #echo -n $"Unmounting ZFS filesystems: " - #$ZFS umount -a - #RETVAL=$? - #if [ $RETVAL -ne 0 ]; then - # failure - - # return 8 - #fi - #success - - rm -f "$LOCKFILE" -} - -# See how we are called -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - lsmod | grep -q zfs || RETVAL=3 - $ZPOOL status && echo && $ZFS list || { - [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4 - } - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ] ; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.gentoo.in b/etc/init.d/zfs.gentoo.in deleted file mode 100644 index 07fce01ba04b..000000000000 --- a/etc/init.d/zfs.gentoo.in +++ /dev/null @@ -1,124 +0,0 @@ -#!/sbin/runscript -# Copyright 1999-2011 Gentoo Foundation -# Released under the 2-clause BSD license. -# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $ - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -depend() -{ - # Try to allow people to mix and match fstab with ZFS in a way that makes sense. - if [ "$(mountinfo -s /)" = 'zfs' ] - then - before localmount - else - after localmount - fi - - # bootmisc will log to /var which may be a different zfs than root. - before bootmisc logger - use mtab - keyword -lxc -openvz -prefix -vserver -} - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -ZFS_MODULE=zfs - -checksystem() { - if [ ! -c /dev/zfs ]; then - einfo "Checking if ZFS modules present" - if ! modinfo zfs > /dev/null 2>&1 ; then - eerror "$ZFS_MODULE not found. Is the ZFS package installed?" - return 1 - fi - fi - einfo "Checking if zfs userspace tools present" - if [ ! -x $ZPOOL ]; then - eerror "$ZPOOL binary not found." - return 1 - fi - if [ ! -x $ZFS ]; then - eerror "$ZFS binary not found." - return 1 - fi - return 0 -} - -start() { - ebegin "Starting ZFS" - checksystem || return 1 - - # Delay until all required block devices are present. - udevadm settle - - if [ ! -c /dev/zfs ]; then - modprobe $ZFS_MODULE - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'." - eend $rv - return $rv - fi - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ -f $ZPOOL_CACHE ]; then - einfo "Importing ZFS pools" - # as per fedora script, import can fail if all pools are already imported - # The check for $rv makes no sense...but someday, it will work right. - $ZPOOL import -c $ZPOOL_CACHE -aN 2>/dev/null || true - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to import not-yet imported pools." - eend $rv - return $rv - fi - fi - - einfo "Mounting ZFS filesystems" - $ZFS mount -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to mount ZFS filesystems." - eend $rv - return $rv - fi - - einfo "Exporting ZFS filesystems" - $ZFS share -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to export ZFS filesystems." - eend $rv - return $rv - fi - - eend 0 - return 0 -} - -stop() -{ - ebegin "Unmounting ZFS filesystems" - $ZFS umount -a - rv=$? - if [ $rv -ne 0 ]; then - einfo "Some ZFS filesystems not unmounted" - fi - - # Don't fail if we couldn't umount everything. /usr might be in use. - eend 0 - return 0 -} - -status() -{ - # show pool status and list - $ZPOOL status && echo && $ZPOOL list -} diff --git a/etc/init.d/zfs.in b/etc/init.d/zfs.in new file mode 100644 index 000000000000..3cff6d642c57 --- /dev/null +++ b/etc/init.d/zfs.in @@ -0,0 +1,74 @@ +# ZoL userland configuration. + +# Run `zfs mount -a` during system start? +ZFS_MOUNT='yes' + +# Run `zfs unmount -a` during system stop? +ZFS_UNMOUNT='yes' + +# Run `zfs share -a` during system start? +# nb: The shareiscsi, sharenfs, and sharesmb dataset properties. +ZFS_SHARE='yes' + +# Run `zfs unshare -a` during system stop? +ZFS_UNSHARE='yes' + +# Sould we use '-d /dev/disk/by-*' when importing pool? +# Variable is somewhat missleading. Previously the code tried _only_ +# '/dev/disk/by-id', but will now try any '/dev/disk/by-*' directory. +USE_DISK_BY_ID='yes' + +# Should the datasets be mounted verbosly? +# A mount counter will be used when mounting if set to 'yes'. +VERBOSE_MOUNT='no' + +# Should we allow overlay mounts? +# This is standard in Linux, but not ZFS which comes from Solaris where this +# is not allowed). +DO_OVERLAY_MOUNTS='no' + +# Any additional option to the 'zfs mount' command line? +# Include '-o' for each option wanted. +MOUNT_EXTRA_OPTIONS="" + +# Build kernel modules with the --enable-debug switch? +ZFS_DKMS_ENABLE_DEBUG='no' + +# Build kernel modules with the --enable-debug-dmu-tx switch? +ZFS_DKMS_ENABLE_DEBUG_DMU_TX='no' + +# Keep debugging symbols in kernel modules? +ZFS_DKMS_DISABLE_STRIP='no' + +# Wait for this many seconds in the initrd pre_mountroot? +# This delays startup and should be '0' on most systems. +ZFS_INITRD_PRE_MOUNTROOT_SLEEP='0' + +# Wait for this many seconds in the initrd mountroot? +# This delays startup and should be '0' on most systems. This might help on +# systems which have their ZFS root on a USB disk that takes just a little +# longer to be available +ZFS_INITRD_POST_MODPROBE_SLEEP='0' + +# List of additional datasets to mount after the root dataset is mounted? +# +# The init script will use the mountpoint specified in the 'mountpoint' +# property value in the dataset to determine where it should be mounted. +# +# This is a space separated list, and will be mounted in the order specified, +# so if one filesystem depends on a previous mountpoint, make sure to put +# them in the right order. +# +# It is not necessary to add filesystems below the root fs here. It is +# taken care of by the initrd script automatically. These are only for +# additional filesystems needed. Such as /opt, /usr/local which is not +# located under the root fs. +# Example: If root FS is 'rpool/ROOT/rootfs', this would make sence. +#ZFS_INITRD_ADDITIONAL_DATASETS="rpool/ROOT/usr rpool/ROOT/var" + +# List of pools that should NOT be imported at boot? +# This is a space separated list. +#ZFS_POOL_EXCEPTIONS="test2" + +# Location of the lockfile. +LOCKDIR=/run/lock diff --git a/etc/init.d/zfs.lsb.in b/etc/init.d/zfs.lsb.in deleted file mode 100644 index 05e815ede8fe..000000000000 --- a/etc/init.d/zfs.lsb.in +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /lib/lsb/init-functions - -LOCKFILE=/var/lock/zfs -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/default/zfs' ] && . /etc/default/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - log_begin_msg "Mounting ZFS filesystems" - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - log_end_msg $? - - log_begin_msg "Exporting ZFS filesystems" - "$ZFS" share -a - log_end_msg $? - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - log_begin_msg "Unsharing ZFS filesystems" - "$ZFS" unshare -a - log_end_msg $? - - log_begin_msg "Unmounting ZFS filesystems" - "$ZFS" umount -a - log_end_msg $? - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.lunar.in b/etc/init.d/zfs.lunar.in deleted file mode 100644 index 7a51104c2647..000000000000 --- a/etc/init.d/zfs.lunar.in +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# -# zfs This shell script takes care of starting (mount) and -# stopping (umount) zfs shares. -# -# chkconfig: 35 60 40 -# description: ZFS is a filesystem developed by Sun, ZFS is a -# combined file system and logical volume manager -# designed by Sun Microsystems. Made available to Linux -# using SPL (Solaris Porting Layer) by zfsonlinux.org. -# probe: true - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -case $1 in - start) echo "$1ing ZFS filesystems" - - # Delay until all required block devices are present. - udevadm settle - - if ! grep "zfs" /proc/modules > /dev/null; then - echo "ZFS kernel module not loaded yet; loading..."; - if ! modprobe zfs; then - echo "Failed to load ZFS kernel module..."; - exit 0; - fi - fi - - if ! [ `uname -m` == "x86_64" ]; then - echo "Warning: You're not running 64bit. Currently native zfs in"; - echo " linux is only supported and tested on 64bit."; - # should we break here? People doing this should know what they - # do, thus i'm not breaking here. - fi - - # mount the filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1; }') - echo -n "mounting $mdev..." - if $ZFS mount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - done < <($ZFS list -H); - - # export the filesystems - echo -n "exporting ZFS filesystems..." - if $ZFS share -a; then - echo -e "done"; - else - echo -e "failed"; - fi - - - ;; - - stop) echo "$1ping ZFS filesystems" - - if grep "zfs" /proc/modules > /dev/null; then - # module is loaded, so we can try to umount filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1 }'); - echo -n "umounting $mdev..."; - if $ZFS umount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - # the next line is, because i have to reverse the - # output, otherwise it wouldn't work as it should - done < <($ZFS list -H | tac); - - # and finally let's rmmod the module - rmmod zfs - - - else - # module not loaded, no need to umount anything - exit 0 - fi - - ;; - - restart) echo "$1ing ZFS filesystems" - $0 stop - $0 start - ;; - - *) echo "Usage: $0 {start|stop|restart}" - ;; - -esac diff --git a/etc/init.d/zfs.redhat.in b/etc/init.d/zfs.redhat.in deleted file mode 100644 index 30b9f0bf62b9..000000000000 --- a/etc/init.d/zfs.redhat.in +++ /dev/null @@ -1,150 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /etc/rc.d/init.d/functions - -LOCKFILE=/var/lock/zfs -ZED="@sbindir@/zed" -ZED_PIDFILE="@runstatedir@/zed.pid" -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="/etc/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/sysconfig/zfs' ] && . /etc/sysconfig/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Start the ZED for event handling - action $"Starting ZFS Event Daemon" daemon --pidfile="$ZED_PIDFILE" "$ZED" - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - action $"Importing ZFS pools" \ - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - action $"Importing ZFS pools" \ - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - action $"Mounting ZFS filesystems" \ - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - - action $"Sharing ZFS filesystems" \ - "$ZFS" share -a - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - action $"Unsharing ZFS filesystems" "$ZFS" unshare -a - action $"Unmounting ZFS filesystems" "$ZFS" umount -a - action $"Shutting down ZFS Event Daemon" killproc -p "$ZED_PIDFILE" "$ZED" - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL