diff --git a/etc/init.d/.gitignore b/etc/init.d/.gitignore index 73304bc2cd4a..9d97d75ec537 100644 --- a/etc/init.d/.gitignore +++ b/etc/init.d/.gitignore @@ -1 +1,4 @@ zfs +zfs-common +zfs-mount +zfs-share diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am index 72093484c477..da5581e3d043 100644 --- a/etc/init.d/Makefile.am +++ b/etc/init.d/Makefile.am @@ -1,21 +1,21 @@ initdir = $(DEFAULT_INIT_DIR) -init_SCRIPTS = zfs - -EXTRA_DIST = \ - $(top_srcdir)/etc/init.d/zfs.fedora.in \ - $(top_srcdir)/etc/init.d/zfs.gentoo.in \ - $(top_srcdir)/etc/init.d/zfs.lsb.in \ - $(top_srcdir)/etc/init.d/zfs.lunar.in \ - $(top_srcdir)/etc/init.d/zfs.redhat.in +init_SCRIPTS = zfs-common zfs-mount zfs-share $(init_SCRIPTS): - -$(SED) -e 's,@bindir\@,$(bindir),g' \ + -(if [ -e /etc/debian_version ]; then \ + NFS_SRV=nfs-kernel-server; \ + else \ + NFS_SRV=nfs; \ + fi; \ + $(SED) -e 's,@bindir\@,$(bindir),g' \ -e 's,@sbindir\@,$(sbindir),g' \ -e 's,@udevdir\@,$(udevdir),g' \ -e 's,@udevruledir\@,$(udevruledir),g' \ -e 's,@sysconfdir\@,$(sysconfdir),g' \ -e 's,@initdir\@,$(initdir),g' \ - '$@.$(DEFAULT_INIT_SCRIPT).in' >'$@' + -e "s,@NFS_SRV\@,$$NFS_SRV,g" \ + '$@.in' >'$@'; \ + chmod +x '$@') distclean-local:: -$(RM) $(init_SCRIPTS) diff --git a/etc/init.d/default b/etc/init.d/default new file mode 100644 index 000000000000..9aca737aaabf --- /dev/null +++ b/etc/init.d/default @@ -0,0 +1,69 @@ +# ZoL userland configuration. + +# Run `zfs mount -a` during system start? +# This should be 'no' if zfs-mountall or a systemd generator +# is available. +ZFS_MOUNT='yes' + +# Run `zfs unmount -a` during system stop? +# This should be 'no' on most systems. +ZFS_UNMOUNT='yes' + +# Run `zfs share -a` during system start? +# nb: The shareiscsi, sharenfs, and sharesmb dataset properties. +ZFS_SHARE='yes' + +# Run `zfs unshare -a` during system stop? +ZFS_UNSHARE='yes' + +# Sould we use '-d /dev/disk/by-*' when importing pool. +# This is recomended, but the default 'no' uses the cache +# file. +# Variable is somewhat missleading. Previously the code +# tried _only_ '/dev/disk/by-id', but will now try any +# '/dev/disk/by-*' directory. +USE_DISK_BY_ID='yes' + +# Should the datasets be mounted verbosly (a mount counter +# will be used when mounting if set to 'yes'). +VERBOSE_MOUNT='no' + +# Should we allow overlay mounts (this is standard in Linux, +# but not ZFS which comes from Solaris where this is not allowed). +DO_OVERLAY_MOUNTS='no' + +# Any additional option to the 'zfs mount' command line. +# Include '-o' for each option wanted. +MOUNT_EXTRA_OPTIONS="" + +# Build kernel modules with the --enable-debug switch? +ZFS_DKMS_ENABLE_DEBUG='no' + +# Build kernel modules with the --enable-debug-dmu-tx switch? +ZFS_DKMS_ENABLE_DEBUG_DMU_TX='no' + +# Keep debugging symbols in kernel modules? +ZFS_DKMS_DISABLE_STRIP='no' + +# Wait for this many seconds in the initrd pre_mountroot? +# This delays startup and should be '0' on most systems. +ZFS_INITRD_PRE_MOUNTROOT_SLEEP='0' + +# Wait for this many seconds in the initrd mountroot? +# This delays startup and should be '0' on most systems. +# This might help on systems which have their ZFS root on +# a USB disk that takes just a little longer to be available +ZFS_INITRD_POST_MODPROBE_SLEEP='0' + +# List of additional datasets to mount after the root +# dataset is mounted. +# The init script will use the mountpoint specified in +# the 'mountpoint' property value in the dataset to +# determine where it should be mounted. +#ZFS_INITRD_ADDITIONAL_DATASETS="rpool/ROOT/usr_local" + +# List of pools that should NOT be imported at boot +#ZFS_POOL_EXCEPTIONS="test2" + +# Location of the lockfile. +LOCKDIR=/run/lock diff --git a/etc/init.d/zfs-common.in b/etc/init.d/zfs-common.in new file mode 100644 index 000000000000..a0306d629b2b --- /dev/null +++ b/etc/init.d/zfs-common.in @@ -0,0 +1,244 @@ +# This is a script with common functions etc used by zfs-mount and zfs-share. +# +# It is _NOT_ to be called independently + +PATH=/sbin:/bin:/usr/bin:/usr/sbin + +# Source function library +if [ -f /lib/lsb/init-functions ]; then + . /lib/lsb/init-functions +elif [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +elif [ -f /etc/init.d/functions.sh ]; then + . /etc/init.d/functions.sh +fi + +# Of course the functions we need is called differently +# on different distributions - it would be way to easy +# otherwise!! +if type log_failure_msg > /dev/null 2>&1 ; then + # LSB functions + log_begin_msg=log_begin_msg + log_failure_msg=log_failure_msg + log_progress_msg=log_progress_msg +elif type success > /dev/null 2>&1 ; then + # Fedora/RedHat functions + log_begin_msg=success + log_failure_msg=failure + log_progress_msg=echo -n +elif type einfo > /dev/null ; then + # Gentoo functions + log_begin_msg=einfo + log_failure_msg=eerror + log_progress_msg="echo -n" +else + log_begin_msg=echo -n + log_failure_msg=echo + log_progress_msg=echo -n +fi + +# The log_end_msg is a little different - it's both an +# echo of a failed message and a return of a code number. +# So if it doesn't exist, we define a very simple one +# that would do the work. +if ! type log_end_msg > /dev/null 2>&1 ; then + log_end_msg() { + ret=$1 + if [ "$ret" -ge 1 ]; then + echo " failed!" + else + echo " success" + fi + return $ret + } +fi +log_end_msg=log_end_msg + +# Paths to what we need +ZFS="@sbindir@/zfs" +ZPOOL="@sbindir@/zpool" +ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" + +# Sencible defaults +ZFS_MOUNT='yes' +ZFS_UNMOUNT='yes' +LOCKDIR=/var/lock/zfs + +# Source zfs configuration, overriding the defaults +if [ -f /etc/default/zfs ]; then + . /etc/default/zfs +elif [ -f /etc/sysconfig/zfs ]; then + . /etc/sysconfig/zfs +elif [ -f /etc/conf.d/zfs ]; then + . /etc/conf.d/zfs +fi + +[ ! -d "$LOCKDIR" ] && mkdir $LOCKDIR + +# ---------------------------------------------------- + +zfs_installed() { + $log_begin_msg "Checking if zfs userspace tools present" + if [ ! -x $ZPOOL ]; then + $log_failure_msg "$ZPOOL binary not found." + $log_end_msg 1 + fi + if [ ! -x $ZFS ]; then + $log_failure_msg "$ZFS binary not found." + $log_end_msg 1 + fi + $log_end_msg 0 +} + +# Do a lot of checks to make sure it's 'safe' to continue with import/mount etc +checksystem() +{ + if [ -z "$init" ]; then + # Not interactive + grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 + fi + + [ -f "$LOCKDIR/$servicename" ] && return 3 + + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + return 5 + } + + # Requires selinux policy which has not been written. + if [ -r "/selinux/enforce" ] && + [ "$(cat /selinux/enforce)" == "1" ]; then + + $log_failure_msg "SELinux ZFS policy required" + return 4 + fi + + # Delay until all required block devices are present. + if [ -x /sbin/udevadm ]; then + /sbin/udevadm settle + elif [ -x /sbin/udevsettle ]; then + /sbin/udevsettle + fi + + # Load the zfs module stack + if ! grep -q zfs /proc/modules ; then + $log_begin_msg "Loading kernel ZFS infrastructure: " + modprobe zfs || { + $log_failure_msg "Could not load zfs module" + $log_end_msg 1 + return 5 + } + $log_end_msg 0 + fi + sleep 1 + + # fix mtab to include already-mounted fs filesystems, in case there are any + # we ONLY do this if mtab does not point to /proc/mounts + # which is the case in some systems (systemd may bring that soon) + if ! readlink /etc/mtab | grep -q /proc ; then + if grep -qE "(^/dev/zd|^/dev/zvol| zfs )" /proc/mounts ; then + $log_begin_msg "Registering already-mounted ZFS filesystems and volumes: " + reregister_mounts || { + $log_end_msg 1 + return 150 + } + fi + fi + + # Ensure / exists in /etc/mtab, if not update mtab accordingly. + # This should be handled by rc.sysinit but lets be paranoid. + awk '$2 == "/" { exit 1 }' /etc/mtab + RETVAL=$? + if [ "$RETVAL" -eq 0 ]; then + /bin/mount -f / + fi + + if ! [ `uname -m` == "x86_64" ]; then + echo "Warning: You're not running 64bit. Currently native zfs in"; + echo " linux is only supported and tested on 64bit."; + # should we break here? People doing this should know what they + # do, thus i'm not breaking here. + fi +} + +depend() +{ + # Try to allow people to mix and match fstab with ZFS in a way that makes sense. + if [ "$(mountinfo -s /)" == 'zfs' ]; then + before localmount + else + after localmount + fi + + # bootmisc will log to /var which may be a different zfs than root. + before bootmisc logger + use mtab + keyword -lxc -openvz -prefix -vserver +} + +reregister_mounts() { + cat /etc/mtab | while read -r fs mntpnt fstype opts rest ; do + fs=`printf '%b\n' "$fs"` + mntpnt=`printf '%b\n' "$mntpnt"` + if [ "$fstype" == "zfs" ] ; then + if [ "$mntpnt" == "/" ] ; then + mount -f -o zfsutil -t zfs --move / /removethismountpointhoweverpossible + umount --fake /removethismountpointhoweverpossible + else + umount --fake "$mntpnt" + fi + elif echo "$fs" | grep -qE "^/dev/(zd|zvol)" ; then + if [ "$mntpnt" == "/" ] ; then + mount -f -t "$fstype" --move / /removethismountpointhoweverpossible + umount --fake /removethismountpointhoweverpossible + else + umount --fake "$mntpnt" + fi + fi + done + cat /proc/mounts | while read -r fs mntpnt fstype opts rest ; do + fs=`printf '%b\n' "$fs"` + mntpnt=`printf '%b\n' "$mntpnt"` + if [ "$fstype" == "zfs" ] ; then + mount -f -t zfs -o zfsutil "$fs" "$mntpnt" + elif echo "$fs" | grep -q "^/dev/zd" ; then + mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" + fi + done +} + +# i need a bash guru to simplify this, since this is copy and paste, but donno how +# to correctly dereference variable names in bash, or how to do this right + +declare -A MTAB +declare -A FSTAB + +# first parameter is a regular expression that filters mtab +read_mtab() { + for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done + while read -r fs mntpnt fstype opts blah ; do + fs=`printf '%b\n' "$fs"` + MTAB["$fs"]=$mntpnt + done < <(grep -E "$1" /etc/mtab) +} + +in_mtab() { + [ "${MTAB[$1]}" != "" ] + return $? +} + +# first parameter is a regular expression that filters fstab +read_fstab() { + for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done + while read -r fs mntpnt fstype opts blah ; do + fs=`printf '%b\n' "$fs"` + FSTAB["$fs"]=$mntpnt + done < <(grep -E "$1" /etc/fstab) +} + +in_fstab() { + [ "${FSTAB[$1]}" != "" ] + return $? +} + diff --git a/etc/init.d/zfs-mount.in b/etc/init.d/zfs-mount.in new file mode 100755 index 000000000000..008e8686c02e --- /dev/null +++ b/etc/init.d/zfs-mount.in @@ -0,0 +1,221 @@ +#!/bin/bash +# +# zfs-mount This script will import/mount/umount/export the zfs filesystems. +# +# chkconfig: 2345 01 99 +# description: This script will import/mount/umount/export the zfs +# filesystems during system boot/shutdown. Configuration of +# which filesystems should be mounted is handled by the zfs +# 'mountpoint' and 'canmount' properties. See the zfs(8) man +# page for details. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zvol zfs zfs-mount +# Required-Start: $local_fs +# Required-Stop: +# X-Stop-After: umountfs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Import and mount ZFS pools, filesystems and volumes +# Description: Run the `zfs import`, `zfs mount -a`, `zfs umount -a` or +# `zfs export` command. +### END INIT INFO + +# Source the common init script +. /etc/zfs/common.init +servicename=zfs-mount + +# ---------------------------------------------------- + +# Import all pools +do_import() +{ + if [ "$USE_DISK_BY_ID" == 'yes' ]; then + already_imported=$(zpool list -H -oname) + available_pools=$(zpool import | grep pool: | sed 's@.*: @@') + + for pool in $available_pools; do + if [ -n "$ZFS_POOL_EXCEPTIONS" ]; then + for exception in $ZFS_POOL_EXCEPTIONS; do + [ "$pool" == "$exception" ] && continue 2 + done + fi + + # Last ditch attempt, try /dev! + for dir in /dev/disk/by-* /dev; do + $log_begin_msg "Importing ZFS pool $pool (using $dir)" + "$ZPOOL" import -d $dir -N $pool 2>/dev/null + RET=$? + if [ "$RET" -eq 0 ]; then + POOL_IMPORTED=1 + $log_end_msg $RET + continue 2 + fi + done + done + fi + + if [ -f "$ZPOOL_CACHE" -a "$POOL_IMPORTED" == '0' ] ; then + $log_begin_msg "Importing ZFS pools (using cache file)" + "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null || true # stupid zpool will fail if all pools are already imported + RET=$? + + if [ "$RET" -eq 0 ]; then + POOL_IMPORTED=1 + fi + + $log_end_msg $ret + fi +} + +# Export all pools +do_export() +{ + $log_begin_msg "Exporting ZFS pools" + "$ZPOOL" list -H -o name | \ + while read pool; do + "$ZPOOL" export $pool + done + rmmod zfs + $log_end_msg 0 # return code not that important. +} + +# Mount all datasets/filesystems +do_mount() +{ + if [ -n "$POOL_IMPORTED" ]; then + [ "$VERBOSE_MOUNT" == 'yes' ] && verbose=v + [ "$DO_OVERLAY_MOUNTS" == 'yes' ] && overlay=O + + $log_begin_msg "Mounting ZFS filesystems not yet mounted" + $ZFS mount -a$verbose$overlay $MOUNT_EXTRA_OPTIONS + RET=$? + + if [ $RET != 0 ] ; then + $log_end_msg $RET + exit $RET + fi + $log_end_msg 0 + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + + $log_begin_msg "Mounting volumes registered in fstab: " + for volume in "${!FSTAB[@]}" ; do + if in_mtab "$volume" ; then continue ; fi + + $log_progress_msg "$volume " + mount "$volume" + done + + $log_end_msg 0 + fi +} + +# Unmount all filesystems +do_unmount() +{ + $log_begin_msg "Unmounting ZFS filesystems" + $ZFS unmount -a + RET=$? + + # Ignore a non-zero `zfs` result so that a busy ZFS instance + # does not hang the system during shutdown. + if [ $RET != 0 ] ; then + $log_end_msg $RET + fi + + $log_end_msg 0 + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + + $log_begin_msg "Unmounting volumes registered in fstab: " + for volume in "${!FSTAB[@]}" ; do + dev=/dev/$(ls -l "$volume" | sed 's@.*/@@') + if ! in_mtab "$dev" ; then continue ; fi + + $log_progress_msg "$volume " + umount "$volume" + done + + $log_end_msg 0 +} + +# Output the status and list of pools +status() +{ + [ ! -f "$LOCKDIR/$servicename" ] && return 3 + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no point in running zpool. + exit 0 + fi + + "$ZPOOL" status && echo "" && "$ZPOOL" list +} + +start() +{ + checksystem && { + case "$ZFS_MOUNT" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 3 + ;; + esac + + do_import + do_mount + + touch "$LOCKDIR/$servicename" + } +} + +stop() +{ + case "$ZFS_UNMOUNT" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + $log_end_msg 5 + } + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no need to umount anything + exit 0 + fi + + do_unmount + do_export + + rm -f "$LOCKDIR/$servicename" +} + +# ---------------------------------------------------- + +case "$1" in + (start) + start + ;; + (stop) + stop + ;; + (status) + status + ;; + (force-reload|condrestart|reload|restart) + # no-op + ;; + (*) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status}" + exit 3 + ;; +esac diff --git a/etc/init.d/zfs-share.in b/etc/init.d/zfs-share.in new file mode 100755 index 000000000000..c3851ff03615 --- /dev/null +++ b/etc/init.d/zfs-share.in @@ -0,0 +1,116 @@ +#!/bin/bash +# +# zfs-share This script will network share zfs filesystems and volumes. +# +# chkconfig: 2345 30 99 +# description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +# probe: true +# +### BEGIN INIT INFO +# Provides: shareiscsi sharenfs sharesmb zfs-share +# Required-Start: $local_fs $network $remote_fs +# Required-Stop: $local_fs $network $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Should-Start: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 +# Should-Stop: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 +# Short-Description: Network share ZFS datasets and volumes. +# Description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +### END INIT INFO + +# Source the common init script +. /etc/zfs/common.init +servicename=zfs-share + +# ---------------------------------------------------- + +do_share() +{ + $log_begin_msg "Sharing ZFS filesystems" + $ZFS share -a + RET=$? + + if [ $RET != 0 ] ; then + $log_failure_msg "Failed to share filesystems" + $log_end_msg $RET + fi + + $log_end_msg 0 +} + +do_unshare() +{ + $log_begin_msg "Unsharing ZFS filesystems" + $ZFS unshare -a + RET=$? + + # Ignore a non-zero `zfs` result so that a busy ZFS instance + # does not hang the system during shutdown. + if [ $RET != 0 ] ; then + $log_end_msg $RET + fi + + $log_end_msg 0 +} + +start() +{ + checksystem && { + case "$ZFS_SHARE" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + do_share + + touch "$LOCKDIR/$servicename" + } +} + +stop() +{ + case "$ZFS_UNSHARE" in + ([Oo][Ff][Ff]|[Nn][Oo]|'') + exit 0 + ;; + esac + + # Do a more simplified version of checksystem() + + [ ! -f "$LOCKDIR/$servicename" ] && return 3 + + # Check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + $log_failure_msg "not installed" + $log_end_msg 5 + } + + if ! grep -q zfs /proc/modules ; then + # module not loaded, no need to unshare anything + exit 0 + fi + + do_unshare + + rm -f "$LOCKDIR/$servicename" +} + +case "$1" in + (start) + start + ;; + (stop) + stop + ;; + (force-reload|reload|restart|status) + # no-op + ;; + (*) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; +esac diff --git a/etc/init.d/zfs.fedora.in b/etc/init.d/zfs.fedora.in deleted file mode 100644 index 86f430dce925..000000000000 --- a/etc/init.d/zfs.fedora.in +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 1 -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -# Source function library & LSB routines -. /etc/rc.d/init.d/functions - -# script variables -RETVAL=0 -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -servicename=zfs -LOCKFILE=/var/lock/subsys/$servicename - -# functions -zfs_installed() { - modinfo zfs > /dev/null 2>&1 || return 5 - $ZPOOL > /dev/null 2>&1 - [ $? == 127 ] && return 5 - $ZFS > /dev/null 2>&1 - [ $? == 127 ] && return 5 - return 0 -} - -reregister_mounts() { - cat /etc/mtab | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -o zfsutil -t zfs --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - elif echo "$fs" | grep -q "^/dev/zd" ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -t "$fstype" --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - fi - done - cat /proc/mounts | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - mount -f -t zfs -o zfsutil "$fs" "$mntpnt" - elif echo "$fs" | grep -q "^/dev/zd" ; then - mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" - fi - done -} - -# i need a bash guru to simplify this, since this is copy and paste, but donno how -# to correctly dereference variable names in bash, or how to do this right - -declare -A MTAB -declare -A FSTAB - -# first parameter is a regular expression that filters mtab -read_mtab() { - for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - MTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/mtab) -} - -in_mtab() { - [ "${MTAB[$1]}" != "" ] - return $? -} - -# first parameter is a regular expression that filters fstab -read_fstab() { - for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - FSTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/fstab) -} - -in_fstab() { - [ "${FSTAB[$1]}" != "" ] - return $? -} - -start() -{ - if [ -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # Delay until all required block devices are present. - udevadm settle - - # load kernel module infrastructure - if ! grep -q zfs /proc/modules ; then - action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5 - fi - - # fix mtab to include already-mounted fs filesystems, in case there are any - # we ONLY do this if mtab does not point to /proc/mounts - # which is the case in some systems (systemd may bring that soon) - if ! readlink /etc/mtab | grep -q /proc ; then - if grep -qE "(^/dev/zd| zfs )" /proc/mounts ; then - action $"Registering already-mounted ZFS filesystems and volumes: " reregister_mounts || return 150 - fi - fi - - if [ -f $ZPOOL_CACHE ] ; then - - echo -n $"Importing ZFS pools not yet imported: " - $ZPOOL import -c $ZPOOL_CACHE -aN || true # stupid zpool will fail if all pools are already imported - RETVAL=$? - if [ $RETVAL -ne 0 ]; then - failure "Importing ZFS pools not yet imported: " - return 151 - fi - success "Importing ZFS pools not yet imported: " - - fi - - action $"Mounting ZFS filesystems not yet mounted: " $ZFS mount -a || return 152 - - action $"Exporting ZFS filesystems: " $ZFS share -a || return 153 - - read_mtab "^/dev/zd" - read_fstab "^/dev/zd" - - template=$"Mounting volume %s registered in fstab: " - for volume in "${!FSTAB[@]}" ; do - if in_mtab "$volume" ; then continue ; fi - string=`printf "$template" "$volume"` - action "$string" mount "$volume" - done - - touch "$LOCKFILE" -} - -stop() -{ - if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # the poweroff of the system takes care of this - # but it never unmounts the root filesystem itself - # shit - - action $"Syncing ZFS filesystems: " sync - # about the only thing we can do, and then we - # hope that the umount process will succeed - # unfortunately the umount process does not dismount - # the root file system, there ought to be some way - # we can tell zfs to just flush anything in memory - # when a request to remount,ro comes in - - #echo -n $"Unmounting ZFS filesystems: " - #$ZFS umount -a - #RETVAL=$? - #if [ $RETVAL -ne 0 ]; then - # failure - - # return 8 - #fi - #success - - rm -f "$LOCKFILE" -} - -# See how we are called -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - lsmod | grep -q zfs || RETVAL=3 - $ZPOOL status && echo && $ZFS list || { - [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4 - } - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ] ; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.gentoo.in b/etc/init.d/zfs.gentoo.in deleted file mode 100644 index 07fce01ba04b..000000000000 --- a/etc/init.d/zfs.gentoo.in +++ /dev/null @@ -1,124 +0,0 @@ -#!/sbin/runscript -# Copyright 1999-2011 Gentoo Foundation -# Released under the 2-clause BSD license. -# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $ - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -depend() -{ - # Try to allow people to mix and match fstab with ZFS in a way that makes sense. - if [ "$(mountinfo -s /)" = 'zfs' ] - then - before localmount - else - after localmount - fi - - # bootmisc will log to /var which may be a different zfs than root. - before bootmisc logger - use mtab - keyword -lxc -openvz -prefix -vserver -} - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -ZFS_MODULE=zfs - -checksystem() { - if [ ! -c /dev/zfs ]; then - einfo "Checking if ZFS modules present" - if ! modinfo zfs > /dev/null 2>&1 ; then - eerror "$ZFS_MODULE not found. Is the ZFS package installed?" - return 1 - fi - fi - einfo "Checking if zfs userspace tools present" - if [ ! -x $ZPOOL ]; then - eerror "$ZPOOL binary not found." - return 1 - fi - if [ ! -x $ZFS ]; then - eerror "$ZFS binary not found." - return 1 - fi - return 0 -} - -start() { - ebegin "Starting ZFS" - checksystem || return 1 - - # Delay until all required block devices are present. - udevadm settle - - if [ ! -c /dev/zfs ]; then - modprobe $ZFS_MODULE - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'." - eend $rv - return $rv - fi - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ -f $ZPOOL_CACHE ]; then - einfo "Importing ZFS pools" - # as per fedora script, import can fail if all pools are already imported - # The check for $rv makes no sense...but someday, it will work right. - $ZPOOL import -c $ZPOOL_CACHE -aN 2>/dev/null || true - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to import not-yet imported pools." - eend $rv - return $rv - fi - fi - - einfo "Mounting ZFS filesystems" - $ZFS mount -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to mount ZFS filesystems." - eend $rv - return $rv - fi - - einfo "Exporting ZFS filesystems" - $ZFS share -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to export ZFS filesystems." - eend $rv - return $rv - fi - - eend 0 - return 0 -} - -stop() -{ - ebegin "Unmounting ZFS filesystems" - $ZFS umount -a - rv=$? - if [ $rv -ne 0 ]; then - einfo "Some ZFS filesystems not unmounted" - fi - - # Don't fail if we couldn't umount everything. /usr might be in use. - eend 0 - return 0 -} - -status() -{ - # show pool status and list - $ZPOOL status && echo && $ZPOOL list -} diff --git a/etc/init.d/zfs.lsb.in b/etc/init.d/zfs.lsb.in deleted file mode 100644 index 05e815ede8fe..000000000000 --- a/etc/init.d/zfs.lsb.in +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /lib/lsb/init-functions - -LOCKFILE=/var/lock/zfs -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/default/zfs' ] && . /etc/default/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - log_begin_msg "Mounting ZFS filesystems" - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - log_end_msg $? - - log_begin_msg "Exporting ZFS filesystems" - "$ZFS" share -a - log_end_msg $? - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - log_begin_msg "Unsharing ZFS filesystems" - "$ZFS" unshare -a - log_end_msg $? - - log_begin_msg "Unmounting ZFS filesystems" - "$ZFS" umount -a - log_end_msg $? - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.lunar.in b/etc/init.d/zfs.lunar.in deleted file mode 100644 index 7a51104c2647..000000000000 --- a/etc/init.d/zfs.lunar.in +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# -# zfs This shell script takes care of starting (mount) and -# stopping (umount) zfs shares. -# -# chkconfig: 35 60 40 -# description: ZFS is a filesystem developed by Sun, ZFS is a -# combined file system and logical volume manager -# designed by Sun Microsystems. Made available to Linux -# using SPL (Solaris Porting Layer) by zfsonlinux.org. -# probe: true - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -case $1 in - start) echo "$1ing ZFS filesystems" - - # Delay until all required block devices are present. - udevadm settle - - if ! grep "zfs" /proc/modules > /dev/null; then - echo "ZFS kernel module not loaded yet; loading..."; - if ! modprobe zfs; then - echo "Failed to load ZFS kernel module..."; - exit 0; - fi - fi - - if ! [ `uname -m` == "x86_64" ]; then - echo "Warning: You're not running 64bit. Currently native zfs in"; - echo " linux is only supported and tested on 64bit."; - # should we break here? People doing this should know what they - # do, thus i'm not breaking here. - fi - - # mount the filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1; }') - echo -n "mounting $mdev..." - if $ZFS mount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - done < <($ZFS list -H); - - # export the filesystems - echo -n "exporting ZFS filesystems..." - if $ZFS share -a; then - echo -e "done"; - else - echo -e "failed"; - fi - - - ;; - - stop) echo "$1ping ZFS filesystems" - - if grep "zfs" /proc/modules > /dev/null; then - # module is loaded, so we can try to umount filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1 }'); - echo -n "umounting $mdev..."; - if $ZFS umount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - # the next line is, because i have to reverse the - # output, otherwise it wouldn't work as it should - done < <($ZFS list -H | tac); - - # and finally let's rmmod the module - rmmod zfs - - - else - # module not loaded, no need to umount anything - exit 0 - fi - - ;; - - restart) echo "$1ing ZFS filesystems" - $0 stop - $0 start - ;; - - *) echo "Usage: $0 {start|stop|restart}" - ;; - -esac diff --git a/etc/init.d/zfs.redhat.in b/etc/init.d/zfs.redhat.in deleted file mode 100644 index 5fa3275b9720..000000000000 --- a/etc/init.d/zfs.redhat.in +++ /dev/null @@ -1,175 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 1 -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -# Source function library & LSB routines -. /etc/rc.d/init.d/functions - -# script variables -RETVAL=0 -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -servicename=zfs -LOCKFILE=/var/lock/subsys/$servicename - -# functions -zfs_installed() { - modinfo zfs > /dev/null 2>&1 || return 5 - $ZPOOL > /dev/null 2>&1 - [ $? == 127 ] && return 5 - $ZFS > /dev/null 2>&1 - [ $? == 127 ] && return 5 - return 0 -} - -# i need a bash guru to simplify this, since this is copy and paste, but donno how -# to correctly dereference variable names in bash, or how to do this right - -# first parameter is a regular expression that filters fstab -read_fstab() { - unset FSTAB - n=0 - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - FSTAB[$n]=$fs - let n++ - done < <(egrep "$1" /etc/fstab) -} - -start() -{ - # Disable lockfile check - # if [ -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # Delay until all required block devices are present. - if [ -x /sbin/udevadm ]; then - /sbin/udevadm settle - elif [ -x /sbin/udevsettle ]; then - /sbin/udevsettle - fi - - # load kernel module infrastructure - if ! grep -q zfs /proc/modules ; then - action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5 - fi - sleep 1 - - action $"Mounting automounted ZFS filesystems: " $ZFS mount -a || return 152 - - action $"Exporting ZFS filesystems: " $ZFS share -a || return 153 - - # Read fstab, try to mount zvols ignoring error - read_fstab "^/dev/(zd|zvol)" - template=$"Mounting volume %s registered in fstab: " - for volume in "${FSTAB[@]}" ; do - string=`printf "$template" "$volume"` - action "$string" mount "$volume" 2>/dev/null || /bin/true - done - - # touch "$LOCKFILE" -} - -stop() -{ - # Disable lockfile check - # if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # the poweroff of the system takes care of this - # but it never unmounts the root filesystem itself - # shit - - action $"Syncing ZFS filesystems: " sync - # about the only thing we can do, and then we - # hope that the umount process will succeed - # unfortunately the umount process does not dismount - # the root file system, there ought to be some way - # we can tell zfs to just flush anything in memory - # when a request to remount,ro comes in - - #echo -n $"Unmounting ZFS filesystems: " - #$ZFS umount -a - #RETVAL=$? - #if [ $RETVAL -ne 0 ]; then - # failure - - # return 8 - #fi - #success - - rm -f "$LOCKFILE" -} - -# See how we are called -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - lsmod | grep -q zfs || RETVAL=3 - $ZPOOL status && echo && $ZFS list || { - [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4 - } - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ] ; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - RETVAL=3 - ;; -esac - -exit $RETVAL