From a16bc6bdd9613c38cc9ceb6041e733b77435d476 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Tue, 4 Feb 2014 16:10:38 -0800 Subject: [PATCH 1/5] Add zimport.sh compatibility test script Verify that an assortment of known good reference pools can be imported using different versions of the ZoL code. By default references pools for the major ZFS implementation will be checked against the most recent ZoL tags and the master development branch. Alternate tags or branches may be verified with the '-s option. Passing the keyword "installed" will instruct the script to test whatever version is installed. Preferentially a reference pool is used for all tests. However, if one does not exist and the pool-tag matches one of the src-tags then a new reference pool will be created using binaries from that source build. This is particularly useful when you need to test your changes before opening a pull request. New reference pools may be added by placing a bzip2 compressed tarball of the pool in the scripts/zpool-example directory and then passing the -p option. To increase the test coverage reference pools should be collected for all the major ZFS implementations. Having these pools easily available is also helpful to the developers. Care should be taken to run these tests with a kernel supported by all the listed tags. Otherwise build failure will cause false positives. EXAMPLES: The following example will verify the zfs-0.6.2 tag, the master branch, and the installed zfs version can correctly import the listed pools. Note there is no reference pool available for master and installed but because binaries are available one is automatically constructed. The working directory is also preserved between runs (-k) preventing the need to rebuild from source for multiple runs. zimport.sh -k -f /var/tmp/zimport \ -s "zfs-0.6.1 zfs-0.6.2 master installed" \ -p "all master installed" --------------------- ZFS on Linux Source Versions -------------- zfs-0.6.1 zfs-0.6.2 master 0.6.2-180 ----------------------------------------------------------------- Clone SPL Skip Skip Skip Skip Clone ZFS Skip Skip Skip Skip Build SPL Skip Skip Skip Skip Build ZFS Skip Skip Skip Skip ----------------------------------------------------------------- zevo-1.1.1 Pass Pass Pass Pass zol-0.6.1 Pass Pass Pass Pass zol-0.6.2-173 Fail Fail Pass Pass zol-0.6.2 Pass Pass Pass Pass master Fail Fail Pass Pass installed Pass Pass Pass Pass Signed-off-by: Brian Behlendorf Signed-off-by: Tim Chase Signed-off-by: Richard Yao Issue #2094 --- .gitmodules | 3 + scripts/Makefile.am | 2 + scripts/zfs-images | 1 + scripts/zimport.sh | 495 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 501 insertions(+) create mode 100644 .gitmodules create mode 160000 scripts/zfs-images create mode 100755 scripts/zimport.sh diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000000..d400f10a7e63 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "scripts/zfs-images"] + path = scripts/zfs-images + url = https://github.com/zfsonlinux/zfs-images diff --git a/scripts/Makefile.am b/scripts/Makefile.am index bc8fe99cf552..7894db49e148 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -7,6 +7,7 @@ dist_pkgdata_SCRIPTS = \ $(top_builddir)/scripts/common.sh \ $(top_srcdir)/scripts/zconfig.sh \ $(top_srcdir)/scripts/zfault.sh \ + $(top_srcdir)/scripts/zimport.sh \ $(top_srcdir)/scripts/zfs.sh \ $(top_srcdir)/scripts/zpool-create.sh \ $(top_srcdir)/scripts/zpios.sh \ @@ -17,6 +18,7 @@ dist_pkgdata_SCRIPTS = \ ZFS=$(top_builddir)/scripts/zfs.sh ZCONFIG=$(top_builddir)/scripts/zconfig.sh ZFAULT=$(top_builddir)/scripts/zfault.sh +ZIMPORT=$(top_builddir)/scripts/zimport.sh ZTEST=$(top_builddir)/cmd/ztest/ztest ZPIOS_SANITY=$(top_builddir)/scripts/zpios-sanity.sh diff --git a/scripts/zfs-images b/scripts/zfs-images new file mode 160000 index 000000000000..3331601f6dc5 --- /dev/null +++ b/scripts/zfs-images @@ -0,0 +1 @@ +Subproject commit 3331601f6dc50ef2c9779c1656218701b48b276c diff --git a/scripts/zimport.sh b/scripts/zimport.sh new file mode 100755 index 000000000000..eeaec11cf077 --- /dev/null +++ b/scripts/zimport.sh @@ -0,0 +1,495 @@ +#!/bin/sh +# +# Verify that an assortment of known good reference pools can be imported +# using different versions of the ZoL code. +# +# By default references pools for the major ZFS implementation will be +# checked against the most recent ZoL tags and the master development branch. +# Alternate tags or branches may be verified with the '-s option. +# Passing the keyword "installed" will instruct the script to test whatever +# version is installed. +# +# Preferentially a reference pool is used for all tests. However, if one +# does not exist and the pool-tag matches one of the src-tags then a new +# reference pool will be created using binaries from that source build. +# This is particularly useful when you need to test your changes before +# opening a pull request. The keyword 'all' can be used as short hand +# refer to all available reference pools. +# +# New reference pools may be added by placing a bzip2 compressed tarball +# of the pool in the scripts/zfs-images directory and then passing +# the -p option. To increase the test coverage reference pools +# should be collected for all the major ZFS implementations. Having these +# pools easily available is also helpful to the developers. +# +# Care should be taken to run these tests with a kernel supported by all +# the listed tags. Otherwise build failure will cause false positives. +# +# +# EXAMPLES: +# +# The following example will verify the zfs-0.6.2 tag, the master branch, +# and the installed zfs version can correctly import the listed pools. +# Note there is no reference pool available for master and installed but +# because binaries are available one is automatically constructed. The +# working directory is also preserved between runs (-k) preventing the +# need to rebuild from source for multiple runs. +# +# zimport.sh -k -f /var/tmp/zimport \ +# -s "zfs-0.6.2 master installed" \ +# -p "zevo-1.1.1 zol-0.6.2 zol-0.6.2-173 master installed" +# +# --------------------- ZFS on Linux Source Versions -------------- +# zfs-0.6.2 master 0.6.2-175_g36eb554 +# ----------------------------------------------------------------- +# Clone SPL Local Local Skip +# Clone ZFS Local Local Skip +# Build SPL Pass Pass Skip +# Build ZFS Pass Pass Skip +# ----------------------------------------------------------------- +# zevo-1.1.1 Pass Pass Pass +# zol-0.6.2 Pass Pass Pass +# zol-0.6.2-173 Fail Pass Pass +# master Pass Pass Pass +# installed Pass Pass Pass +# +basedir="$(dirname $0)" + +SCRIPT_COMMON=common.sh +if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then +. "${basedir}/${SCRIPT_COMMON}" +else +echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 +fi + +PROG=zimport.sh + +SRC_TAGS="zfs-0.6.1 zfs-0.6.2 master" +POOL_TAGS="all master" +TEST_DIR=`mktemp -u -d -p /var/tmp zimport.XXXXXXXX` +KEEP=0 +VERBOSE=0 +COLOR=1 +REPO="https://github.com/zfsonlinux" +IMAGES_DIR="$SCRIPTDIR/zfs-images/" +IMAGES_TAR="https://github.com/zfsonlinux/zfs-images/tarball/master" +CPUS=`grep -c ^processor /proc/cpuinfo` +ERROR=0 + +usage() { +cat << EOF +USAGE: +zimport.sh [hvl] [-r repo] [-s src-tag] [-i pool-dir] [-p pool-tag] [-f path] + +DESCRIPTION: + ZPOOL import verification tests + +OPTIONS: + -h Show this message + -v Verbose + -c No color + -k Keep temporary directory + -r Source repository ($REPO) + -s ... Verify ZoL versions with the listed tags + -i Pool image directory + -p ... Verify pools created with the listed tags + -f Temporary directory to use + +EOF +} + +while getopts 'hvckr:s:i:p:f:?' OPTION; do + case $OPTION in + h) + usage + exit 1 + ;; + v) + VERBOSE=1 + ;; + c) + COLOR=0 + ;; + k) + KEEP=1 + ;; + r) + REPO="$OPTARG" + ;; + s) + SRC_TAGS="$OPTARG" + ;; + i) + IMAGES_DIR="$OPTARG" + ;; + p) + POOL_TAGS="$OPTARG" + ;; + f) + TEST_DIR="$OPTARG" + ;; + ?) + usage + exit + ;; + esac +done + +# Initialize the test suite +init +check_modules || die "ZFS modules must be unloaded" + +SRC_DIR="$TEST_DIR/src" +SRC_DIR_SPL="$SRC_DIR/spl" +SRC_DIR_ZFS="$SRC_DIR/zfs" + +if [ $COLOR -eq 0 ]; then + COLOR_GREEN="" + COLOR_BROWN="" + COLOR_RED="" + COLOR_RESET="" +fi + +pass_nonewline() { + echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t\t" +} + +skip_nonewline() { + echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t\t" +} + +fail_nonewline() { + echo -n -e "${COLOR_RED}Fail${COLOR_RESET}\t\t" +} + +# +# Set several helper variables which are derived from a source tag. +# +# SPL_TAG - The tag zfs-x.y.z is translated to spl-x.y.z. +# SPL_DIR - The spl directory name. +# SPL_URL - The spl github URL to fetch the tarball. +# ZFS_TAG - The passed zfs-x.y.z tag +# ZFS_DIR - The zfs directory name +# ZFS_URL - The zfs github URL to fetch the tarball +# +src_set_vars() { + local TAG=$1 + + SPL_TAG=`echo $TAG | sed -e 's/zfs/spl/'` + SPL_DIR=$SRC_DIR_SPL/$SPL_TAG + SPL_URL=$REPO/spl/tarball/$SPL_TAG + + ZFS_TAG=$TAG + ZFS_DIR=$SRC_DIR_ZFS/$ZFS_TAG + ZFS_URL=$REPO/zfs/tarball/$ZFS_TAG + + if [ "$TAG" = "installed" ]; then + ZPOOL_CMD=`which zpool` + ZFS_CMD=`which zfs` + ZFS_SH="/usr/share/zfs/zfs.sh" + ZPOOL_CREATE="/usr/share/zfs/zpool-create.sh" + else + ZPOOL_CMD="./cmd/zpool/zpool" + ZFS_CMD="./cmd/zfs/zfs" + ZFS_SH="./scripts/zfs.sh" + ZPOOL_CREATE="./scripts/zpool-create.sh" + fi +} + +# +# Set several helper variables which are derived from a pool name such +# as zol-0.6.x, zevo-1.1.1, etc. These refer to example pools from various +# ZFS implementations which are used to verify compatibility. +# +# POOL_TAG - The example pools name in scripts/zfs-images/. +# POOL_BZIP - The full path to the example bzip2 compressed pool. +# POOL_DIR - The top level test path for this pool. +# POOL_DIR_PRISTINE - The directory containing a pristine version of the pool. +# POOL_DIR_COPY - The directory containing a working copy of the pool. +# POOL_DIR_SRC - Location of a source build if it exists for this pool. +# +pool_set_vars() { + local TAG=$1 + + POOL_TAG=$TAG + POOL_BZIP=$IMAGES_DIR/$POOL_TAG.tar.bz2 + POOL_DIR=$TEST_DIR/pools/$POOL_TAG + POOL_DIR_PRISTINE=$POOL_DIR/pristine + POOL_DIR_COPY=$POOL_DIR/copy + POOL_DIR_SRC=`echo -n "$SRC_DIR_ZFS/"; \ + echo "$POOL_TAG" | sed -e 's/zol/zfs/'` +} + +# +# Construct a non-trivial pool given a specific version of the source. More +# interesting pools provide better test coverage so this function should +# extended as needed to create more realistic pools. +# +pool_create() { + pool_set_vars $1 + src_set_vars $1 + + if [ "$POOL_TAG" != "installed" ]; then + cd $POOL_DIR_SRC + fi + + $ZFS_SH zfs="spa_config_path=$POOL_DIR_PRISTINE" || fail + + # Create a file vdev RAIDZ pool. + FILEDIR="$POOL_DIR_PRISTINE" $ZPOOL_CREATE \ + -c file-raidz -p $POOL_TAG -v >/dev/null || fail + + # Create a pool/fs filesystem with some random contents. + $ZFS_CMD create $POOL_TAG/fs || fail + populate /$POOL_TAG/fs/ 10 100 + + # Snapshot that filesystem, clone it, remove the files/dirs, + # replace them with new files/dirs. + $ZFS_CMD snap $POOL_TAG/fs@snap || fail + $ZFS_CMD clone $POOL_TAG/fs@snap $POOL_TAG/clone || fail + rm -Rf /$POOL_TAG/clone/* || fail + populate /$POOL_TAG/clone/ 10 100 + + # Scrub the pool, delay slightly, then export it. It is now + # somewhat interesting for testing purposes. + $ZPOOL_CMD scrub $POOL_TAG || fail + sleep 10 + $ZPOOL_CMD export $POOL_TAG || fail + + $ZFS_SH -u || fail +} + +# If the zfs-images directory doesn't exist fetch a copy from Github then +# cache it in the $TEST_DIR and update $IMAGES_DIR. +if [ ! -d $IMAGES_DIR ]; then + IMAGES_DIR="$TEST_DIR/zfs-images" + mkdir -p $IMAGES_DIR + curl -sL $IMAGES_TAR | \ + tar -xz -C $IMAGES_DIR --strip-components=1 || fail +fi + +# Given the available images in the zfs-images directory substitute the +# list of available images for the reserved keywork 'all'. +for TAG in $POOL_TAGS; do + + if [ "$TAG" = "all" ]; then + ALL_TAGS=`ls $IMAGES_DIR | grep "tar.bz2" | \ + sed 's/.tar.bz2//' | tr '\n' ' '` + NEW_TAGS="$NEW_TAGS $ALL_TAGS" + else + NEW_TAGS="$NEW_TAGS $TAG" + fi +done +POOL_TAGS="$NEW_TAGS" + +if [ $VERBOSE -ne 0 ]; then + echo "---------------------------- Options ----------------------------" + echo "VERBOSE=$VERBOSE" + echo "KEEP=$KEEP" + echo "REPO=$REPO" + echo "SRC_TAGS="$SRC_TAGS"" + echo "POOL_TAGS="$POOL_TAGS"" + echo "PATH=$TEST_DIR" + echo +fi + +if [ ! -d $TEST_DIR ]; then + mkdir -p $TEST_DIR +fi + +# Print a header for all tags which are being tested. +echo "--------------------- ZFS on Linux Source Versions --------------" +printf "%-16s" " " +for TAG in $SRC_TAGS; do + src_set_vars $TAG + + if [ "$TAG" = "installed" ]; then + ZFS_VERSION=`modinfo zfs | awk '/version:/ { print $2; exit }'` + if [ -n "$ZFS_VERSION" ]; then + printf "%-16s" $ZFS_VERSION + else + echo "ZFS is not installed\n" + fail + fi + else + printf "%-16s" $TAG + fi +done +echo -e "\n-----------------------------------------------------------------" + +# +# Attempt to generate the tarball from your local git repository, if that +# fails then attempt to download the tarball from Github. +# +printf "%-16s" "Clone SPL" +for TAG in $SRC_TAGS; do + src_set_vars $TAG + + if [ -d $SPL_DIR ]; then + skip_nonewline + elif [ "$SPL_TAG" = "installed" ]; then + skip_nonewline + else + cd $SPLSRC + + if [ ! -d $SRC_DIR_SPL ]; then + mkdir -p $SRC_DIR_SPL + fi + + git archive --format=tar --prefix=$SPL_TAG/ $SPL_TAG \ + -o $SRC_DIR_SPL/$SPL_TAG.tar &>/dev/nul || \ + rm $SRC_DIR_SPL/$SPL_TAG.tar + if [ -s $SRC_DIR_SPL/$SPL_TAG.tar ]; then + tar -xf $SRC_DIR_SPL/$SPL_TAG.tar -C $SRC_DIR_SPL + rm $SRC_DIR_SPL/$SPL_TAG.tar + echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t" + else + mkdir -p $SPL_DIR || fail + curl -sL $SPL_URL | tar -xz -C $SPL_DIR \ + --strip-components=1 || fail + echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t" + fi + fi +done +printf "\n" + +# +# Attempt to generate the tarball from your local git repository, if that +# fails then attempt to download the tarball from Github. +# +printf "%-16s" "Clone ZFS" +for TAG in $SRC_TAGS; do + src_set_vars $TAG + + if [ -d $ZFS_DIR ]; then + skip_nonewline + elif [ "$ZFS_TAG" = "installed" ]; then + skip_nonewline + else + cd $SRCDIR + + if [ ! -d $SRC_DIR_ZFS ]; then + mkdir -p $SRC_DIR_ZFS + fi + + git archive --format=tar --prefix=$ZFS_TAG/ $ZFS_TAG \ + -o $SRC_DIR_ZFS/$ZFS_TAG.tar &>/dev/nul || \ + rm $SRC_DIR_ZFS/$ZFS_TAG.tar + if [ -s $SRC_DIR_ZFS/$ZFS_TAG.tar ]; then + tar -xf $SRC_DIR_ZFS/$ZFS_TAG.tar -C $SRC_DIR_ZFS + rm $SRC_DIR_ZFS/$ZFS_TAG.tar + echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t" + else + mkdir -p $ZFS_DIR || fail + curl -sL $ZFS_URL | tar -xz -C $ZFS_DIR \ + --strip-components=1 || fail + echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t" + fi + fi +done +printf "\n" + +# Build the listed tags +printf "%-16s" "Build SPL" +for TAG in $SRC_TAGS; do + src_set_vars $TAG + + if [ -f $SPL_DIR/module/spl/spl.ko ]; then + skip_nonewline + elif [ "$SPL_TAG" = "installed" ]; then + skip_nonewline + else + cd $SPL_DIR + make distclean &>/dev/null + sh ./autogen.sh &>/dev/null || fail + ./configure &>/dev/null || fail + make -s -j$CPUS &>/dev/null || fail + pass_nonewline + fi +done +printf "\n" + +# Build the listed tags +printf "%-16s" "Build ZFS" +for TAG in $SRC_TAGS; do + src_set_vars $TAG + + if [ -f $ZFS_DIR/module/zfs/zfs.ko ]; then + skip_nonewline + elif [ "$ZFS_TAG" = "installed" ]; then + skip_nonewline + else + cd $ZFS_DIR + make distclean &>/dev/null + sh ./autogen.sh &>/dev/null || fail + ./configure --with-spl=$SPL_DIR &>/dev/null || fail + make -s -j$CPUS &>/dev/null || fail + pass_nonewline + fi +done +printf "\n" +echo "-----------------------------------------------------------------" + +# Either create a new pool using 'zpool create', or alternately restore an +# existing pool from another ZFS implementation for compatibility testing. +for TAG in $POOL_TAGS; do + pool_set_vars $TAG + SKIP=0 + + printf "%-16s" $POOL_TAG + rm -Rf $POOL_DIR + mkdir -p $POOL_DIR_PRISTINE + + # Use the existing compressed image if available. + if [ -f $POOL_BZIP ]; then + tar -xjf $POOL_BZIP -C $POOL_DIR_PRISTINE \ + --strip-components=1 || fail + # Use the installed version to create the pool. + elif [ "$TAG" = "installed" ]; then + pool_create $TAG + # A source build is available to create the pool. + elif [ -d $POOL_DIR_SRC ]; then + pool_create $TAG + else + SKIP=1 + fi + + # Verify 'zpool import' works for all listed source versions. + for TAG in $SRC_TAGS; do + + if [ $SKIP -eq 1 ]; then + skip_nonewline + continue + fi + + src_set_vars $TAG + if [ "$TAG" != "installed" ]; then + cd $ZFS_DIR + fi + $ZFS_SH zfs="spa_config_path=$POOL_DIR_COPY" + + cp -a --sparse=always $POOL_DIR_PRISTINE $POOL_DIR_COPY || fail + POOL_NAME=`$ZPOOL_CMD import -d $POOL_DIR_COPY | \ + awk '/pool:/ { print $2; exit 0 }'` + + $ZPOOL_CMD import -N -d $POOL_DIR_COPY $POOL_NAME &>/dev/null + if [ $? -ne 0 ]; then + fail_nonewline + ERROR=1 + else + $ZPOOL_CMD export $POOL_NAME || fail + pass_nonewline + fi + + rm -Rf $POOL_DIR_COPY + + $ZFS_SH -u || fail + done + printf "\n" +done + +if [ ! $KEEP ]; then + rm -Rf $TEST_DIR +fi + +exit $ERROR From ed9e8368d3e6ed565174270bd7f5fb7caeac9727 Mon Sep 17 00:00:00 2001 From: Richard Yao Date: Wed, 5 Feb 2014 17:15:35 -0500 Subject: [PATCH 2/5] Revert changes to zbookmark_t Commit 1421c89142376bfd41e4de22ed7c7846b9e41f95 added a field to zbookmark_t that unintentinoally caused a disk format change. This negatively affected backward compatibility and platform portability. Therefore, this field is being removed. The function that field permitted is left unimplemented until a later patch that will reimplement the field in a way that does not affect the disk format. Signed-off-by: Richard Yao Signed-off-by: Tim Chase Signed-off-by: Brian Behlendorf Issue #2094 --- include/sys/zio.h | 2 -- module/zfs/spa_stats.c | 1 - 2 files changed, 3 deletions(-) diff --git a/include/sys/zio.h b/include/sys/zio.h index d4350badc100..129e2bcb9b33 100644 --- a/include/sys/zio.h +++ b/include/sys/zio.h @@ -260,7 +260,6 @@ struct zbookmark { uint64_t zb_object; int64_t zb_level; uint64_t zb_blkid; - char * zb_func; }; #define SET_BOOKMARK(zb, objset, object, level, blkid) \ @@ -269,7 +268,6 @@ struct zbookmark { (zb)->zb_object = object; \ (zb)->zb_level = level; \ (zb)->zb_blkid = blkid; \ - (zb)->zb_func = FTAG; \ } #define ZB_DESTROYED_OBJSET (-1ULL) diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c index a94fecfe87f8..a35f5df65609 100644 --- a/module/zfs/spa_stats.c +++ b/module/zfs/spa_stats.c @@ -207,7 +207,6 @@ spa_read_history_add(spa_t *spa, const zbookmark_t *zb, uint32_t aflags) return; srh = kmem_zalloc(sizeof (spa_read_history_t), KM_PUSHPAGE); - strlcpy(srh->origin, zb->zb_func, sizeof (srh->origin)); strlcpy(srh->comm, getcomm(), sizeof (srh->comm)); srh->start = gethrtime(); srh->objset = zb->zb_objset; From 731782ec316656c0d1b78d6c9f0194d35ebb7eff Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 20 Feb 2014 19:42:15 -0800 Subject: [PATCH 3/5] Use expected zpool_status_t type Both the zpool_import_status() and zpool_get_status() functions return the zpool_status_t enum. This explicit type should be used rather than the more generic int type. This patch makes no functional change and should only be considered code cleanup. It happens to have been done in the context of #2094 because that's when I noticed this issue. Signed-off-by: Brian Behlendorf Signed-off-by: Tim Chase Signed-off-by: Richard Yao Date: Thu, 20 Feb 2014 19:57:17 -0800 Subject: [PATCH 4/5] Add generic errata infrastructure From time to time it may be necessary to inform the pool administrator about an errata which impacts their pool. These errata will by shown to the administrator through the 'zpool status' and 'zpool import' output as appropriate. The errata must clearly describe the issue detected, how the pool is impacted, and what action should be taken to resolve the situation. Additional information for each errata will be provided at http://zfsonlinux.org/msg/ZFS-8000-ER. To accomplish the above this patch adds the required infrastructure to allow the kernel modules to notify the utilities that an errata has been detected. This is done through the ZPOOL_CONFIG_ERRATA uint64_t which has been added to the pool configuration nvlist. To add a new errata the following changes must be made: * A new errata identifier must be assigned by adding a new enum value to the zpool_errata_t type. New enums must be added to the end to preserve the existing ordering. * Code must be added to detect the issue. This does not strictly need to be done at pool import time but doing so will make the errata visible in 'zpool import' as well as 'zpool status'. Once detected the spa->spa_errata member should be set to the new enum. * If possible code should be added to clear the spa->spa_errata member once the errata has been resolved. * The show_import() and status_callback() functions must be updated to include an informational message describing the errata. This should include an action message describing what an administrator should do to address the errata. * The documentation at http://zfsonlinux.org/msg/ZFS-8000-ER must be updated to describe the errata. This space can be used to provide as much additional information as needed to fully describe the errata. A link to this documentation will be automatically generated in the output of 'zpool import' and 'zpool status'. Original-idea-by: Tim Chase Signed-off-by: Brian Behlendorf Signed-off-by: Tim Chase Signed-off-by: Richard Yao vs_state, vs->vs_aux); - reason = zpool_import_status(config, &msgid); + reason = zpool_import_status(config, &msgid, &errata); (void) printf(gettext(" pool: %s\n"), name); (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid); @@ -1715,6 +1716,11 @@ show_import(nvlist_t *config) "resilvered.\n")); break; + case ZPOOL_STATUS_ERRATA: + (void) printf(gettext(" status: Errata #%d detected.\n"), + errata); + break; + default: /* * No other status can be seen when importing pools. @@ -1736,6 +1742,17 @@ show_import(nvlist_t *config) (void) printf(gettext(" action: The pool can be " "imported using its name or numeric " "identifier and\n\tthe '-f' flag.\n")); + } else if (reason == ZPOOL_STATUS_ERRATA) { + switch (errata) { + case ZPOOL_ERRATA_NONE: + break; + + default: + /* + * All errata must contain an action message. + */ + assert(0); + } } else { (void) printf(gettext(" action: The pool can be " "imported using its name or numeric " @@ -4126,12 +4143,13 @@ status_callback(zpool_handle_t *zhp, void *data) nvlist_t *config, *nvroot; char *msgid; zpool_status_t reason; + zpool_errata_t errata; const char *health; uint_t c; vdev_stat_t *vs; config = zpool_get_config(zhp, NULL); - reason = zpool_get_status(zhp, &msgid); + reason = zpool_get_status(zhp, &msgid, &errata); cbp->cb_count++; @@ -4349,6 +4367,23 @@ status_callback(zpool_handle_t *zhp, void *data) "'zpool clear'.\n")); break; + case ZPOOL_STATUS_ERRATA: + (void) printf(gettext("status: Errata #%d detected.\n"), + errata); + + switch (errata) { + case ZPOOL_ERRATA_NONE: + break; + + default: + /* + * All errata which allow the pool to be imported + * must contain an action message. + */ + assert(0); + } + break; + default: /* * The remaining errors can't actually be generated, yet. diff --git a/include/libzfs.h b/include/libzfs.h index 742f39f944e9..55dd34c99de1 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -312,6 +312,7 @@ typedef enum { ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */ ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */ ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */ + ZPOOL_STATUS_ERRATA, /* informational errata available */ /* * If the pool has unsupported features but can still be opened in @@ -347,8 +348,10 @@ typedef enum { ZPOOL_STATUS_OK } zpool_status_t; -extern zpool_status_t zpool_get_status(zpool_handle_t *, char **); -extern zpool_status_t zpool_import_status(nvlist_t *, char **); +extern zpool_status_t zpool_get_status(zpool_handle_t *, char **, + zpool_errata_t *); +extern zpool_status_t zpool_import_status(nvlist_t *, char **, + zpool_errata_t *); extern void zpool_dump_ddt(const ddt_stat_t *dds, const ddt_histogram_t *ddh); /* diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index c54721155a85..50d099fc990c 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -548,6 +548,7 @@ typedef struct zpool_rewind_policy { #define ZPOOL_CONFIG_CAN_RDONLY "can_rdonly" /* not stored on disk */ #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read" #define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */ +#define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */ /* * The persistent vdev state is stored as separate values rather than a single * 'vdev_state' entry. This is because a device can be in multiple states, such @@ -704,6 +705,15 @@ typedef enum dsl_scan_state { DSS_NUM_STATES } dsl_scan_state_t; +/* + * Errata described by http://zfsonlinux.org/msg/ZFS-8000-ER. The ordering + * of this enum must be maintained to ensure the errata identifiers map to + * the correct documentation. New errata may only be appended to the list + * and must contain corresponding documentation at the above link. + */ +typedef enum zpool_errata { + ZPOOL_ERRATA_NONE, +} zpool_errata_t; /* * Vdev statistics. Note: all fields should be 64-bit because this diff --git a/include/sys/spa_impl.h b/include/sys/spa_impl.h index 55515c1fc369..90a32d3f0694 100644 --- a/include/sys/spa_impl.h +++ b/include/sys/spa_impl.h @@ -236,6 +236,7 @@ struct spa { uint64_t spa_deadman_calls; /* number of deadman calls */ hrtime_t spa_sync_starttime; /* starting time of spa_sync */ uint64_t spa_deadman_synctime; /* deadman expiration timer */ + uint64_t spa_errata; /* errata issues detected */ spa_stats_t spa_stats; /* assorted spa statistics */ /* diff --git a/lib/libzfs/libzfs_status.c b/lib/libzfs/libzfs_status.c index 0ef5f36d69f3..534ff853a5bc 100644 --- a/lib/libzfs/libzfs_status.c +++ b/lib/libzfs/libzfs_status.c @@ -67,6 +67,7 @@ static char *zfs_msgid_table[] = { "ZFS-8000-HC", "ZFS-8000-JQ", "ZFS-8000-K4", + "ZFS-8000-ER", }; #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0])) @@ -182,7 +183,7 @@ find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t)) * only picks the most damaging of all the current errors to report. */ static zpool_status_t -check_status(nvlist_t *config, boolean_t isimport) +check_status(nvlist_t *config, boolean_t isimport, zpool_errata_t *erratap) { nvlist_t *nvroot; vdev_stat_t *vs; @@ -193,6 +194,7 @@ check_status(nvlist_t *config, boolean_t isimport) uint64_t stateval; uint64_t suspended; uint64_t hostid = 0; + uint64_t errata = 0; unsigned long system_hostid = gethostid() & 0xffffffff; verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, @@ -356,13 +358,22 @@ check_status(nvlist_t *config, boolean_t isimport) } } + /* + * Informational errata available. + */ + (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata); + if (errata) { + *erratap = errata; + return (ZPOOL_STATUS_ERRATA); + } + return (ZPOOL_STATUS_OK); } zpool_status_t -zpool_get_status(zpool_handle_t *zhp, char **msgid) +zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata) { - zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE); + zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata); if (ret >= NMSGID) *msgid = NULL; @@ -373,9 +384,9 @@ zpool_get_status(zpool_handle_t *zhp, char **msgid) } zpool_status_t -zpool_import_status(nvlist_t *config, char **msgid) +zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata) { - zpool_status_t ret = check_status(config, B_TRUE); + zpool_status_t ret = check_status(config, B_TRUE, errata); if (ret >= NMSGID) *msgid = NULL; diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 7052eec4abab..9e7a7b785a32 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -4083,6 +4083,8 @@ spa_tryimport(nvlist_t *tryconfig) spa->spa_uberblock.ub_timestamp) == 0); VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, + spa->spa_errata) == 0); /* * If the bootfs property exists on this pool then we diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c index 9efa053616bc..5b95a8e811e5 100644 --- a/module/zfs/spa_config.c +++ b/module/zfs/spa_config.c @@ -365,6 +365,8 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) txg) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, + spa->spa_errata) == 0); VERIFY(spa->spa_comment == NULL || nvlist_add_string(config, ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0); From 4f2dcb3eee0e6c922f1f54955c3e15fd20b3b4f6 Mon Sep 17 00:00:00 2001 From: Richard Yao Date: Thu, 20 Feb 2014 20:28:33 -0800 Subject: [PATCH 5/5] Add erratum for issue #2094 ZoL commit 1421c89 unintentionally changed the disk format in a forward- compatible, but not backward compatible way. This was accomplished by adding an entry to zbookmark_t, which is included in a couple of on-disk structures. That lead to the creation of pools with incorrect dsl_scan_phys_t objects that could only be imported by versions of ZoL containing that commit. Such pools cannot be imported by other versions of ZFS or past versions of ZoL. The additional field has been removed by the previous commit. However, affected pools must be imported and scrubbed using a version of ZoL with this commit applied. This will return the pools to a state in which they may be imported by other implementations. The 'zpool import' or 'zpool status' command can be used to determine if a pool is impacted. A message similar to one of the following means your pool must be scrubbed to restore compatibility. $ zpool import pool: zol-0.6.2-173 id: 1165955789558693437 state: ONLINE status: Errata #1 detected. action: The pool can be imported using its name or numeric identifier, however there is a compatibility issue which should be corrected by running 'zpool scrub' see: http://zfsonlinux.org/msg/ZFS-8000-ER config: ... $ zpool status pool: zol-0.6.2-173 state: ONLINE scan: pool compatibility issue detected. see: https://github.com/zfsonlinux/zfs/issues/2094 action: To correct the issue run 'zpool scrub'. config: ... If there was an async destroy in progress 'zpool import' will prevent the pool from being imported. Further advice on how to proceed will be provided by the error message as follows. $ zpool import pool: zol-0.6.2-173 id: 1165955789558693437 state: ONLINE status: Errata #2 detected. action: The pool can not be imported with this version of ZFS due to an active asynchronous destroy. Revert to an earlier version and allow the destroy to complete before updating. see: http://zfsonlinux.org/msg/ZFS-8000-ER config: ... Pools affected by the damaged dsl_scan_phys_t can be detected prior to an upgrade by running the following command as root: zdb -dddd poolname 1 | grep -P '^\t\tscan = ' | sed -e 's;scan = ;;' | wc -w Note that `poolname` must be replaced with the name of the pool you wish to check. A value of 25 indicates the dsl_scan_phys_t has been damaged. A value of 24 indicates that the dsl_scan_phys_t is normal. A value of 0 indicates that there has never been a scrub run on the pool. The regression caused by the change to zbookmark_t never made it into a tagged release, Gentoo backports, Ubuntu, Debian, Fedora, or EPEL stable respositorys. Only those using the HEAD version directly from Github after the 0.6.2 but before the 0.6.3 tag are affected. This patch does have one limitation that should be mentioned. It will not detect errata #2 on a pool unless errata #1 is also present. It expected this will not be a significant problem because pools impacted by errata #2 have a high probably of being impacted by errata #1. End users can ensure they do no hit this unlikely case by waiting for all asynchronous destroy operations to complete before updating ZoL. The presence of any background destroys on any imported pools can be checked by running `zpool get freeing` as root. This will display a non-zero value for any pool with an active asynchronous destroy. Lastly, it is expected that no user data has been lost as a result of this erratum. Original-patch-by: Tim Chase Reworked-by: Brian Behlendorf Signed-off-by: Tim Chase Signed-off-by: Richard Yao Signed-off-by: Brian Behlendorf Issue #2094 --- cmd/zpool/zpool_main.c | 22 ++++++++++++++++++++++ include/sys/dsl_scan.h | 2 ++ include/sys/fs/zfs.h | 2 ++ module/zfs/dsl_scan.c | 39 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+) diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index d496d0c72e26..41e13af9a32b 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -1747,6 +1747,23 @@ show_import(nvlist_t *config) case ZPOOL_ERRATA_NONE: break; + case ZPOOL_ERRATA_ZOL_2094_SCRUB: + (void) printf(gettext(" action: The pool can " + "be imported using its name or numeric " + "identifier,\n\thowever there is a compat" + "ibility issue which should be corrected" + "\n\tby running 'zpool scrub'\n")); + break; + + case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: + (void) printf(gettext(" action: The pool can" + "not be imported with this version of ZFS " + "due to\n\tan active asynchronous destroy. " + "Revert to an earlier version\n\tand " + "allow the destroy to complete before " + "updating.\n")); + break; + default: /* * All errata must contain an action message. @@ -4375,6 +4392,11 @@ status_callback(zpool_handle_t *zhp, void *data) case ZPOOL_ERRATA_NONE: break; + case ZPOOL_ERRATA_ZOL_2094_SCRUB: + (void) printf(gettext("action: To correct the issue " + "run 'zpool scrub'.\n")); + break; + default: /* * All errata which allow the pool to be imported diff --git a/include/sys/dsl_scan.h b/include/sys/dsl_scan.h index bf8c5ac824a1..bcb85d67d38e 100644 --- a/include/sys/dsl_scan.h +++ b/include/sys/dsl_scan.h @@ -72,6 +72,8 @@ typedef enum dsl_scan_flags { DSF_VISIT_DS_AGAIN = 1<<0, } dsl_scan_flags_t; +#define DSL_SCAN_FLAGS_MASK (DSF_VISIT_DS_AGAIN) + /* * Every pool will have one dsl_scan_t and this structure will contain * in-memory information about the scan and a pointer to the on-disk diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index 50d099fc990c..df5692d14b1c 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -713,6 +713,8 @@ typedef enum dsl_scan_state { */ typedef enum zpool_errata { ZPOOL_ERRATA_NONE, + ZPOOL_ERRATA_ZOL_2094_SCRUB, + ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY, } zpool_errata_t; /* diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index ea04507813f7..7807f8485a86 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -123,6 +123,42 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg) err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, &scn->scn_phys); + /* + * Detect if the pool contains the signature of #2094. If it + * does properly update the scn->scn_phys structure and notify + * the administrator by setting an errata for the pool. + */ + if (err == EOVERFLOW) { + uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; + VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); + VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, + (23 * sizeof (uint64_t))); + + err = zap_lookup(dp->dp_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, + sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); + if (err == 0) { + uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; + + if (overflow & ~DSL_SCAN_FLAGS_MASK || + scn->scn_async_destroying) { + spa->spa_errata = + ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; + return (EOVERFLOW); + } + + bcopy(zaptmp, &scn->scn_phys, + SCAN_PHYS_NUMINTS * sizeof (uint64_t)); + scn->scn_phys.scn_flags = overflow; + + /* Required scrub already in progress. */ + if (scn->scn_phys.scn_state == DSS_FINISHED || + scn->scn_phys.scn_state == DSS_CANCELED) + spa->spa_errata = + ZPOOL_ERRATA_ZOL_2094_SCRUB; + } + } + if (err == ENOENT) return (0); else if (err) @@ -319,6 +355,9 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) } scn->scn_phys.scn_end_time = gethrestime_sec(); + + if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) + spa->spa_errata = 0; } /* ARGSUSED */