diff --git a/cmd/zed/agents/zfs_mod.c b/cmd/zed/agents/zfs_mod.c
index a8d084bb4bd3..50d1932f67a9 100644
--- a/cmd/zed/agents/zfs_mod.c
+++ b/cmd/zed/agents/zfs_mod.c
@@ -382,6 +382,15 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
if (is_mpath_wholedisk) {
/* Don't label device mapper or multipath disks. */
+ zed_log_msg(LOG_INFO,
+ " it's a multipath wholedisk, don't label");
+ if (zpool_prepare_disk(zhp, vdev, "autoreplace") != 0) {
+ zed_log_msg(LOG_INFO,
+ " zpool_prepare_disk: could not "
+ "prepare '%s' (%s)", fullpath,
+ libzfs_error_description(g_zfshdl));
+ return;
+ }
} else if (!labeled) {
/*
* we're auto-replacing a raw disk, so label it first
@@ -404,8 +413,10 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
* If this is a request to label a whole disk, then attempt to
* write out the label.
*/
- if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
- zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
+ if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
+ vdev, "autoreplace") != 0) {
+ zed_log_msg(LOG_INFO,
+ " zpool_prepare_and_label_disk: could not "
"label '%s' (%s)", leafname,
libzfs_error_description(g_zfshdl));
diff --git a/cmd/zpool/zpool_iter.c b/cmd/zpool/zpool_iter.c
index 7c6549b0ae54..506b529dce48 100644
--- a/cmd/zpool/zpool_iter.c
+++ b/cmd/zpool/zpool_iter.c
@@ -443,37 +443,22 @@ vdev_run_cmd(vdev_cmd_data_t *data, char *cmd)
{
int rc;
char *argv[2] = {cmd};
- char *env[5] = {(char *)"PATH=/bin:/sbin:/usr/bin:/usr/sbin"};
+ char **env;
char **lines = NULL;
int lines_cnt = 0;
int i;
- /* Setup our custom environment variables */
- rc = asprintf(&env[1], "VDEV_PATH=%s",
- data->path ? data->path : "");
- if (rc == -1) {
- env[1] = NULL;
+ env = zpool_vdev_script_alloc_env(data->pool, data->path, data->upath,
+ data->vdev_enc_sysfs_path, NULL, NULL);
+ if (env == NULL)
goto out;
- }
-
- rc = asprintf(&env[2], "VDEV_UPATH=%s",
- data->upath ? data->upath : "");
- if (rc == -1) {
- env[2] = NULL;
- goto out;
- }
-
- rc = asprintf(&env[3], "VDEV_ENC_SYSFS_PATH=%s",
- data->vdev_enc_sysfs_path ?
- data->vdev_enc_sysfs_path : "");
- if (rc == -1) {
- env[3] = NULL;
- goto out;
- }
/* Run the command */
rc = libzfs_run_process_get_stdout_nopath(cmd, argv, env, &lines,
&lines_cnt);
+
+ zpool_vdev_script_free_env(env);
+
if (rc != 0)
goto out;
@@ -485,10 +470,6 @@ vdev_run_cmd(vdev_cmd_data_t *data, char *cmd)
out:
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
-
- /* Start with i = 1 since env[0] was statically allocated */
- for (i = 1; i < ARRAY_SIZE(env); i++)
- free(env[i]);
}
/*
diff --git a/cmd/zpool/zpool_util.h b/cmd/zpool/zpool_util.h
index b35dea0cd449..db8e631dc6be 100644
--- a/cmd/zpool/zpool_util.h
+++ b/cmd/zpool/zpool_util.h
@@ -126,6 +126,10 @@ vdev_cmd_data_list_t *all_pools_for_each_vdev_run(int argc, char **argv,
void free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl);
+void free_vdev_cmd_data(vdev_cmd_data_t *data);
+
+int vdev_run_cmd_simple(char *path, char *cmd);
+
int check_device(const char *path, boolean_t force,
boolean_t isspare, boolean_t iswholedisk);
boolean_t check_sector_size_database(char *path, int *sector_size);
diff --git a/cmd/zpool/zpool_vdev.c b/cmd/zpool/zpool_vdev.c
index 99a521aa2a28..ae793ca743f9 100644
--- a/cmd/zpool/zpool_vdev.c
+++ b/cmd/zpool/zpool_vdev.c
@@ -947,7 +947,7 @@ zero_label(const char *path)
* need to get the devid after we label the disk.
*/
static int
-make_disks(zpool_handle_t *zhp, nvlist_t *nv)
+make_disks(zpool_handle_t *zhp, nvlist_t *nv, boolean_t replacing)
{
nvlist_t **child;
uint_t c, children;
@@ -1043,8 +1043,13 @@ make_disks(zpool_handle_t *zhp, nvlist_t *nv)
/*
* When labeling a pool the raw device node name
* is provided as it appears under /dev/.
+ *
+ * Note that 'zhp' will be NULL when we're creating a
+ * pool.
*/
- if (zpool_label_disk(g_zfs, zhp, devnode) == -1)
+ if (zpool_prepare_and_label_disk(g_zfs, zhp, devnode,
+ nv, zhp == NULL ? "create" :
+ replacing ? "replace" : "add") == -1)
return (-1);
/*
@@ -1082,19 +1087,19 @@ make_disks(zpool_handle_t *zhp, nvlist_t *nv)
}
for (c = 0; c < children; c++)
- if ((ret = make_disks(zhp, child[c])) != 0)
+ if ((ret = make_disks(zhp, child[c], replacing)) != 0)
return (ret);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0)
for (c = 0; c < children; c++)
- if ((ret = make_disks(zhp, child[c])) != 0)
+ if ((ret = make_disks(zhp, child[c], replacing)) != 0)
return (ret);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0)
for (c = 0; c < children; c++)
- if ((ret = make_disks(zhp, child[c])) != 0)
+ if ((ret = make_disks(zhp, child[c], replacing)) != 0)
return (ret);
return (0);
@@ -1752,7 +1757,7 @@ split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
return (NULL);
}
- if (!flags.dryrun && make_disks(zhp, newroot) != 0) {
+ if (!flags.dryrun && make_disks(zhp, newroot, B_FALSE) != 0) {
nvlist_free(newroot);
return (NULL);
}
@@ -1873,7 +1878,7 @@ make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force, int check_rep,
/*
* Run through the vdev specification and label any whole disks found.
*/
- if (!dryrun && make_disks(zhp, newroot) != 0) {
+ if (!dryrun && make_disks(zhp, newroot, replacing) != 0) {
nvlist_free(newroot);
return (NULL);
}
diff --git a/include/libzfs.h b/include/libzfs.h
index a7037e3e6266..5aab9dbddc38 100644
--- a/include/libzfs.h
+++ b/include/libzfs.h
@@ -325,6 +325,14 @@ _LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *,
const char *);
+_LIBZFS_H int zpool_prepare_disk(zpool_handle_t *zhp, nvlist_t *vdev_nv,
+ const char *prepare_str);
+_LIBZFS_H int zpool_prepare_and_label_disk(libzfs_handle_t *hdl,
+ zpool_handle_t *, const char *, nvlist_t *vdev_nv, const char *prepare_str);
+_LIBZFS_H char ** zpool_vdev_script_alloc_env(const char *pool_name,
+ const char *vdev_path, const char *vdev_upath,
+ const char *vdev_enc_sysfs_path, const char *opt_key, const char *opt_val);
+_LIBZFS_H void zpool_vdev_script_free_env(char **env);
_LIBZFS_H uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp,
const char *path);
diff --git a/lib/libzfs/libzfs.abi b/lib/libzfs/libzfs.abi
index 0a8e9bcbd74d..907b0191f75b 100644
--- a/lib/libzfs/libzfs.abi
+++ b/lib/libzfs/libzfs.abi
@@ -514,6 +514,8 @@
+
+
@@ -561,6 +563,8 @@
+
+
diff --git a/lib/libzfs/os/linux/libzfs_pool_os.c b/lib/libzfs/os/linux/libzfs_pool_os.c
index 401151b1afb5..4975072254ec 100644
--- a/lib/libzfs/os/linux/libzfs_pool_os.c
+++ b/lib/libzfs/os/linux/libzfs_pool_os.c
@@ -338,3 +338,193 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
}
return (0);
}
+
+/* PATH + 5 env vars + a NULL entry = 7 */
+#define ZPOOL_VDEV_SCRIPT_ENV_COUNT 7
+
+/*
+ * There's a few places where ZFS will call external scripts (like the script
+ * in zpool.d/ and `zfs_prepare_disk`). These scripts are called with a
+ * reduced $PATH, and some vdev specific environment vars set. This function
+ * will allocate an populate the environment variable array that is passed to
+ * these scripts. The user must free the arrays with zpool_vdev_free_env() when
+ * they are done.
+ *
+ * The following env vars will be set (but value could be blank):
+ *
+ * POOL_NAME
+ * VDEV_PATH
+ * VDEV_UPATH
+ * VDEV_ENC_SYSFS_PATH
+ *
+ * In addition, you can set an optional environment variable named 'opt_key'
+ * to 'opt_val' if you want.
+ *
+ * Returns allocated env[] array on success, NULL otherwise.
+ */
+char **
+zpool_vdev_script_alloc_env(const char *pool_name,
+ const char *vdev_path, const char *vdev_upath,
+ const char *vdev_enc_sysfs_path, const char *opt_key, const char *opt_val)
+{
+ char **env = NULL;
+ int rc;
+
+ env = calloc(ZPOOL_VDEV_SCRIPT_ENV_COUNT, sizeof (*env));
+ if (!env)
+ return (NULL);
+
+ env[0] = strdup("PATH=/bin:/sbin:/usr/bin:/usr/sbin");
+ if (!env[0])
+ goto error;
+
+ /* Setup our custom environment variables */
+ rc = asprintf(&env[1], "POOL_NAME=%s", pool_name ? pool_name : "");
+ if (rc == -1) {
+ env[1] = NULL;
+ goto error;
+ }
+
+ rc = asprintf(&env[2], "VDEV_PATH=%s", vdev_path ? vdev_path : "");
+ if (rc == -1) {
+ env[2] = NULL;
+ goto error;
+ }
+
+ rc = asprintf(&env[3], "VDEV_UPATH=%s", vdev_upath ? vdev_upath : "");
+ if (rc == -1) {
+ env[3] = NULL;
+ goto error;
+ }
+
+ rc = asprintf(&env[4], "VDEV_ENC_SYSFS_PATH=%s",
+ vdev_enc_sysfs_path ? vdev_enc_sysfs_path : "");
+ if (rc == -1) {
+ env[4] = NULL;
+ goto error;
+ }
+
+ if (opt_key != NULL) {
+ rc = asprintf(&env[5], "%s=%s", opt_key,
+ opt_val ? opt_val : "");
+ if (rc == -1) {
+ env[5] = NULL;
+ goto error;
+ }
+ }
+
+ return (env);
+
+error:
+ for (int i = 0; i < ZPOOL_VDEV_SCRIPT_ENV_COUNT; i++)
+ free(env[i]);
+
+ free(env);
+
+ return (NULL);
+}
+
+/*
+ * Free the env[] array that was allocated by zpool_vdev_script_alloc_env().
+ */
+void
+zpool_vdev_script_free_env(char **env)
+{
+ for (int i = 0; i < ZPOOL_VDEV_SCRIPT_ENV_COUNT; i++)
+ free(env[i]);
+
+ free(env);
+}
+
+/*
+ * Prepare a disk by (optionally) running a program before labeling the disk.
+ * This can be useful for installing disk firmware or doing some pre-flight
+ * checks on the disk before it becomes part of the pool. The program run is
+ * located at LIBEXECDIR/zfs_prepare_disk (defaults to
+ * /usr/local/libexec/zfs/zfs_prepare_disk).
+ *
+ * Return 0 on success, non-zero on failure.
+ */
+int
+zpool_prepare_disk(zpool_handle_t *zhp, nvlist_t *vdev_nv,
+ const char *prepare_str)
+{
+ const char *script_path = LIBEXECDIR "/zfs_prepare_disk";
+ const char *pool_name;
+ int rc = 0;
+
+ /* Path to script and a NULL entry */
+ char *argv[2] = {(char *)script_path};
+ char **env = NULL;
+ const char *path = NULL, *enc_sysfs_path = NULL;
+ char *upath;
+
+ if (access(script_path, X_OK) != 0) {
+ /* No script, nothing to do */
+ return (0);
+ }
+
+ (void) nvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH, &path);
+ (void) nvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
+ &enc_sysfs_path);
+
+ upath = zfs_get_underlying_path(path);
+ pool_name = zhp ? zpool_get_name(zhp) : NULL;
+
+ env = zpool_vdev_script_alloc_env(pool_name, path, upath,
+ enc_sysfs_path, "VDEV_PREPARE", prepare_str);
+
+ free(upath);
+
+ if (env == NULL) {
+ return (ENOMEM);
+ }
+
+ rc = libzfs_run_process_get_stdout(script_path, argv, env, NULL, 0);
+
+ zpool_vdev_script_free_env(env);
+
+ return (rc);
+}
+
+/*
+ * Optionally run a script and then label a disk. The script can be used to
+ * prepare a disk for inclusion into the pool. For example, it might update
+ * the disk's firmware or check its health.
+ *
+ * The 'name' provided is the short name, stripped of any leading
+ * /dev path, and is passed to zpool_label_disk. vdev_nv is the nvlist for
+ * the vdev. prepare_str is a string that gets passed as the VDEV_PREPARE
+ * env variable to the script.
+ *
+ * The following env vars are passed to the script:
+ *
+ * POOL_NAME: The pool name (blank during zpool create)
+ * VDEV_PREPARE: Reason why the disk is being prepared for inclusion:
+ * "create", "add", "replace", or "autoreplace"
+ * VDEV_PATH: Path to the disk
+ * VDEV_UPATH: One of the 'underlying paths' to the disk. This is
+ * useful for DM devices.
+ * VDEV_ENC_SYSFS_PATH: Path to the disk's enclosure sysfs path, if available.
+ *
+ * Note, some of these values can be blank.
+ *
+ * Return 0 on success, non-zero otherwise.
+ */
+int
+zpool_prepare_and_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp,
+ const char *name, nvlist_t *vdev_nv, const char *prepare_str)
+{
+ int rc;
+ char vdev_path[MAXPATHLEN];
+ (void) snprintf(vdev_path, sizeof (vdev_path), "%s/%s", DISK_ROOT,
+ name);
+
+ /* zhp will be NULL when creating a pool */
+ rc = zpool_prepare_disk(zhp, vdev_nv, prepare_str);
+ if (rc != 0)
+ return (rc);
+
+ zpool_label_disk(hdl, zhp, name);
+ return (0);
+}
diff --git a/man/Makefile.am b/man/Makefile.am
index 36c1aede106e..45156571eec3 100644
--- a/man/Makefile.am
+++ b/man/Makefile.am
@@ -62,6 +62,7 @@ dist_man_MANS = \
%D%/man8/zfs-userspace.8 \
%D%/man8/zfs-wait.8 \
%D%/man8/zfs_ids_to_path.8 \
+ %D%/man8/zfs_prepare_disk.8 \
%D%/man8/zgenhostid.8 \
%D%/man8/zinject.8 \
%D%/man8/zpool.8 \
diff --git a/man/man8/zfs_prepare_disk.8 b/man/man8/zfs_prepare_disk.8
new file mode 100644
index 000000000000..921223393173
--- /dev/null
+++ b/man/man8/zfs_prepare_disk.8
@@ -0,0 +1,72 @@
+.\"
+.\" Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
+.\" Copyright (C) 2023 Lawrence Livermore National Security, LLC.
+.\" Refer to the OpenZFS git commit log for authoritative copyright attribution.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License Version 1.0 (CDDL-1.0).
+.\" You can obtain a copy of the license from the top-level file
+.\" "OPENSOLARIS.LICENSE" or at .
+.\" You may not use this file except in compliance with the license.
+.\"
+.\" Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049)
+.\"
+.Dd August 30, 2023
+.Dt ZFS_PREPARE_DISK 8
+.Os
+.
+.Sh NAME
+.Nm zfs_prepare_disk
+.Nd special script that gets run before bringing a disk into a pool
+.Sh DESCRIPTION
+.Nm
+is an optional script that gets called by libzfs before bringing a disk into a
+pool.
+It can be modified by the user to run whatever commands are necessary to prepare
+a disk for inclusion into the pool.
+For example, users can add lines to
+.Nm zfs_prepare_disk
+to do things like update the drive's firmware or check the drive's health.
+.Nm zfs_prepare_disk
+is optional and can be removed if not needed.
+libzfs will look for
+.Nm zfs_prepare_disk
+in LIBEXECDIR (default /usr/local/libexec/zfs).
+.
+.Ss Properties
+.Nm zfs_prepare_disk
+will be passed the following environment variables:
+.sp
+.Bl -tag -compact -width "org.openzfs.systemd:required-by=unit[ unit]…"
+.
+.It Nm POOL_NAME
+.No Name of the pool
+.It Nm VDEV_PATH
+.No Path to the disk (like /dev/sda)
+.It Nm VDEV_PREPARE
+.No Reason why the disk is being prepared for inclusion
+('create', 'add', 'replace', or 'autoreplace').
+This can be useful if you only want the script to be run under certain actions.
+.It Nm VDEV_UPATH
+.No Path to one of the underlying devices for the
+disk.
+For multipath this would return one of the /dev/sd* paths to the disk.
+If the device is not a device mapper device, then
+.Nm VDEV_UPATH
+just returns the same value as
+.Nm VDEV_PATH
+.It Nm VDEV_ENC_SYSFS_PATH
+.No Path to the disk's enclosure sysfs path, if available
+.El
+.Pp
+Note that some of these variables may have a blank value.
+.Nm POOL_NAME
+is blank at pool creation time, for example.
+.Sh ENVIRONMENT
+.Nm zfs_prepare_disk
+runs with a limited $PATH.
+.Sh EXIT STATUS
+.Nm zfs_prepare_disk
+should return 0 on success, non-zero otherwise.
+If non-zero is returned, the disk will not be included in the pool.
+.
diff --git a/man/man8/zfs_prepare_disk.8.in b/man/man8/zfs_prepare_disk.8.in
new file mode 100644
index 000000000000..bf8d87e124af
--- /dev/null
+++ b/man/man8/zfs_prepare_disk.8.in
@@ -0,0 +1,72 @@
+.\"
+.\" Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
+.\" Copyright (C) 2023 Lawrence Livermore National Security, LLC.
+.\" Refer to the OpenZFS git commit log for authoritative copyright attribution.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License Version 1.0 (CDDL-1.0).
+.\" You can obtain a copy of the license from the top-level file
+.\" "OPENSOLARIS.LICENSE" or at .
+.\" You may not use this file except in compliance with the license.
+.\"
+.\" Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049)
+.\"
+.Dd August 30, 2023
+.Dt ZFS_PREPARE_DISK 8
+.Os
+.
+.Sh NAME
+.Nm zfs_prepare_disk
+.Nd special script that gets run before bringing a disk into a pool
+.Sh DESCRIPTION
+.Nm
+is an optional script that gets called by libzfs before bringing a disk into a
+pool.
+It can be modified by the user to run whatever commands are necessary to prepare
+a disk for inclusion into the pool.
+For example, users can add lines to
+.Nm zfs_prepare_disk
+to do things like update the drive's firmware or check the drive's health.
+.Nm zfs_prepare_disk
+is optional and can be removed if not needed.
+libzfs will look for
+.Nm zfs_prepare_disk
+in LIBEXECDIR (default @zfsexecdir@).
+.
+.Ss Properties
+.Nm zfs_prepare_disk
+will be passed the following environment variables:
+.sp
+.Bl -tag -compact -width "org.openzfs.systemd:required-by=unit[ unit]…"
+.
+.It Nm POOL_NAME
+.No Name of the pool
+.It Nm VDEV_PATH
+.No Path to the disk (like /dev/sda)
+.It Nm VDEV_PREPARE
+.No Reason why the disk is being prepared for inclusion
+('create', 'add', 'replace', or 'autoreplace').
+This can be useful if you only want the script to be run under certain actions.
+.It Nm VDEV_UPATH
+.No Path to one of the underlying devices for the
+disk.
+For multipath this would return one of the /dev/sd* paths to the disk.
+If the device is not a device mapper device, then
+.Nm VDEV_UPATH
+just returns the same value as
+.Nm VDEV_PATH
+.It Nm VDEV_ENC_SYSFS_PATH
+.No Path to the disk's enclosure sysfs path, if available
+.El
+.Pp
+Note that some of these variables may have a blank value.
+.Nm POOL_NAME
+is blank at pool creation time, for example.
+.Sh ENVIRONMENT
+.Nm zfs_prepare_disk
+runs with a limited $PATH.
+.Sh EXIT STATUS
+.Nm zfs_prepare_disk
+should return 0 on success, non-zero otherwise.
+If non-zero is returned, the disk will not be included in the pool.
+.
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 4175d27ea32a..d69f7521e5cc 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -18,6 +18,10 @@ scripts_scripts = \
%D%/zimport.sh \
%D%/zloop.sh
+libexecdir = $(zfsexecdir)
+dist_libexec_SCRIPTS = \
+ %D%/zfs_prepare_disk
+
if CONFIG_USER
dist_scripts_SCRIPTS = $(scripts_scripts)
else
diff --git a/scripts/zfs_prepare_disk b/scripts/zfs_prepare_disk
new file mode 100755
index 000000000000..bb85a7a650dc
--- /dev/null
+++ b/scripts/zfs_prepare_disk
@@ -0,0 +1,17 @@
+#!/bin/sh
+#
+# This is an optional helper script that is automatically called by libzfs
+# before a disk is about to be added into the pool. It can be modified by
+# the user to run whatever commands are necessary to prepare a disk for
+# inclusion into the pool. For example, users can add lines to this
+# script to do things like update the drive's firmware or check the drive's
+# health. The script is optional and can be removed if it is not needed.
+#
+# See the zfs_prepare_disk man page for details.
+#
+# Example:
+#
+# echo "Prepare disk $VDEV_PATH ($VDEV_UPATH) for $VDEV_PREPARE in $POOL_NAME"
+#
+
+exit 0