diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index c28560a15f7e..7a95add94a20 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -4643,7 +4643,7 @@ get_interval_count(int *argcp, char **argv, float *iv, /* * Determine if the last argument is an integer or a pool name */ - if (argc > 0 && isnumber(argv[argc - 1])) { + if (argc > 0 && zfs_isnumber(argv[argc - 1])) { char *end; errno = 0; @@ -4673,7 +4673,7 @@ get_interval_count(int *argcp, char **argv, float *iv, * If the last argument is also an integer, then we have both a count * and an interval. */ - if (argc > 0 && isnumber(argv[argc - 1])) { + if (argc > 0 && zfs_isnumber(argv[argc - 1])) { char *end; errno = 0; diff --git a/cmd/zpool/zpool_util.c b/cmd/zpool/zpool_util.c index c26c0eb3969b..1c1eb024f365 100644 --- a/cmd/zpool/zpool_util.c +++ b/cmd/zpool/zpool_util.c @@ -98,20 +98,6 @@ array64_max(uint64_t array[], unsigned int len) return (max); } -/* - * Return 1 if "str" is a number string, 0 otherwise. Works for integer and - * floating point numbers. - */ -int -isnumber(char *str) -{ - for (; *str; str++) - if (!(isdigit(*str) || (*str == '.'))) - return (0); - - return (1); -} - /* * Find highest one bit set. * Returns bit number + 1 of highest bit that is set, otherwise returns 0. diff --git a/cmd/zpool/zpool_util.h b/cmd/zpool/zpool_util.h index 3afc82d54b52..b853042ebc96 100644 --- a/cmd/zpool/zpool_util.h +++ b/cmd/zpool/zpool_util.h @@ -42,7 +42,6 @@ void *safe_malloc(size_t); void zpool_no_memory(void); uint_t num_logs(nvlist_t *nv); uint64_t array64_max(uint64_t array[], unsigned int len); -int isnumber(char *str); int highbit64(uint64_t i); int lowbit64(uint64_t i); diff --git a/cmd/zpool/zpool_vdev.c b/cmd/zpool/zpool_vdev.c index 527fca08b887..f149bf699942 100644 --- a/cmd/zpool/zpool_vdev.c +++ b/cmd/zpool/zpool_vdev.c @@ -213,14 +213,6 @@ static vdev_disk_db_entry_t vdev_disk_database[] = { {"ATA WDC WD2500BEVT-0", 4096}, {"ATA WDC WD3200BEVT-0", 4096}, {"ATA WDC WD5000BEVT-0", 4096}, - /* Virtual disks: Assume zvols with default volblocksize */ -#if 0 - {"ATA QEMU HARDDISK ", 8192}, - {"IET VIRTUAL-DISK ", 8192}, - {"OI COMSTAR ", 8192}, - {"SUN COMSTAR ", 8192}, - {"NETAPP LUN ", 8192}, -#endif }; static const int vdev_disk_database_size = diff --git a/include/libzutil.h b/include/libzutil.h index 69d1e6bbd638..3e6432f5c50a 100644 --- a/include/libzutil.h +++ b/include/libzutil.h @@ -117,6 +117,8 @@ extern boolean_t is_mpath_whole_disk(const char *); #define is_mpath_whole_disk(path) (B_FALSE) #endif +extern int zfs_isnumber(const char *); + /* * Formats for iostat numbers. Examples: "12K", "30ms", "4B", "2321234", "-". * @@ -149,6 +151,22 @@ extern void zpool_dump_ddt(const ddt_stat_t *, const ddt_histogram_t *); extern int zpool_history_unpack(char *, uint64_t, uint64_t *, nvlist_t ***, uint_t *); +struct libpc_handle; +typedef struct libpc_handle libpc_handle_t; + +extern int zutil_error(libpc_handle_t *hdl, const char *error, const char *msg); +extern void zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...); +extern int zutil_no_memory(libpc_handle_t *hdl); +extern void * zutil_alloc(libpc_handle_t *hdl, size_t size); +extern char *zutil_strdup(libpc_handle_t *hdl, const char *str); +extern int zutil_pool_active(libpc_handle_t *hdl, const char *name, + uint64_t guid, boolean_t *isactive); +extern nvlist_t *zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig); +extern int zutil_error_fmt(libpc_handle_t *hdl, const char *error, + const char *fmt, ...); + +extern nvlist_t *zpool_find_import_impl(libpc_handle_t *hdl, + importargs_t *iarg); #ifdef __cplusplus } #endif diff --git a/lib/libzutil/Makefile.am b/lib/libzutil/Makefile.am index 720b843ab97d..daea8070c5e2 100644 --- a/lib/libzutil/Makefile.am +++ b/lib/libzutil/Makefile.am @@ -12,9 +12,15 @@ noinst_LTLIBRARIES = libzutil.la USER_C = \ zutil_device_path.c \ zutil_import.c \ + zutil_import.h \ zutil_nicenum.c \ zutil_pool.c +if BUILD_LINUX +USER_C += \ + os/linux/zutil_import_os.c +endif + nodist_libzutil_la_SOURCES = $(USER_C) libzutil_la_LIBADD = \ diff --git a/lib/libzutil/os/linux/zutil_import_os.c b/lib/libzutil/os/linux/zutil_import_os.c new file mode 100644 index 000000000000..caba2f9a795a --- /dev/null +++ b/lib/libzutil/os/linux/zutil_import_os.c @@ -0,0 +1,1055 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2015 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018 by Delphix. All rights reserved. + * Copyright 2015 RackTop Systems. + * Copyright (c) 2016, Intel Corporation. + */ + +/* + * Pool import support functions. + * + * Used by zpool, ztest, zdb, and zhack to locate importable configs. Since + * these commands are expected to run in the global zone, we can assume + * that the devices are all readable when called. + * + * To import a pool, we rely on reading the configuration information from the + * ZFS label of each device. If we successfully read the label, then we + * organize the configuration information in the following hierarchy: + * + * pool guid -> toplevel vdev guid -> label txg + * + * Duplicate entries matching this same tuple will be discarded. Once we have + * examined every device, we pick the best label txg config for each toplevel + * vdev. We then arrange these toplevel vdevs into a complete pool config, and + * update any paths that have changed. Finally, we attempt to import the pool + * using our derived config, and record the results. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../../zutil_import.h" + +#ifdef HAVE_LIBUDEV +#include +#include +#endif +#include + +#define DEV_BYID_PATH "/dev/disk/by-id/" + +static boolean_t +is_watchdog_dev(char *dev) +{ + /* For 'watchdog' dev */ + if (strcmp(dev, "watchdog") == 0) + return (B_TRUE); + + /* For 'watchdog */ + if (strstr(dev, "watchdog") == dev && isdigit(dev[8])) + return (B_TRUE); + + return (B_FALSE); +} + +static int +label_paths_impl(libpc_handle_t *hdl, nvlist_t *nvroot, uint64_t pool_guid, + uint64_t vdev_guid, char **path, char **devid) +{ + nvlist_t **child; + uint_t c, children; + uint64_t guid; + char *val; + int error; + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) { + error = label_paths_impl(hdl, child[c], + pool_guid, vdev_guid, path, devid); + if (error) + return (error); + } + return (0); + } + + if (nvroot == NULL) + return (0); + + error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid); + if ((error != 0) || (guid != vdev_guid)) + return (0); + + error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &val); + if (error == 0) + *path = val; + + error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &val); + if (error == 0) + *devid = val; + + return (0); +} + +/* + * Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID + * and store these strings as config_path and devid_path respectively. + * The returned pointers are only valid as long as label remains valid. + */ +static int +label_paths(libpc_handle_t *hdl, nvlist_t *label, char **path, char **devid) +{ + nvlist_t *nvroot; + uint64_t pool_guid; + uint64_t vdev_guid; + + *path = NULL; + *devid = NULL; + + if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid)) + return (ENOENT); + + return (label_paths_impl(hdl, nvroot, pool_guid, vdev_guid, path, + devid)); +} + +static void +zpool_open_func(void *arg) +{ + rdsk_node_t *rn = arg; + libpc_handle_t *hdl = rn->rn_hdl; + struct stat64 statbuf; + nvlist_t *config; + char *bname, *dupname; + uint64_t vdev_guid = 0; + int error; + int num_labels = 0; + int fd; + + /* + * Skip devices with well known prefixes there can be side effects + * when opening devices which need to be avoided. + * + * hpet - High Precision Event Timer + * watchdog - Watchdog must be closed in a special way. + */ + dupname = zutil_strdup(hdl, rn->rn_name); + bname = basename(dupname); + error = ((strcmp(bname, "hpet") == 0) || is_watchdog_dev(bname)); + free(dupname); + if (error) + return; + + /* + * Ignore failed stats. We only want regular files and block devices. + */ + if (stat64(rn->rn_name, &statbuf) != 0 || + (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) + return; + + /* + * Preferentially open using O_DIRECT to bypass the block device + * cache which may be stale for multipath devices. An EINVAL errno + * indicates O_DIRECT is unsupported so fallback to just O_RDONLY. + */ + fd = open(rn->rn_name, O_RDONLY | O_DIRECT); + if ((fd < 0) && (errno == EINVAL)) + fd = open(rn->rn_name, O_RDONLY); +#ifdef notyet + if ((fd < 0) && (errno == EACCES)) + hdl->lpc_open_access_error = B_TRUE; +#endif + if (fd < 0) + return; + + /* + * This file is too small to hold a zpool + */ + if (S_ISREG(statbuf.st_mode) && statbuf.st_size < SPA_MINDEVSIZE) { + (void) close(fd); + return; + } + + error = zpool_read_label(fd, &config, &num_labels); + if (error != 0) { + (void) close(fd); + return; + } + + if (num_labels == 0) { + (void) close(fd); + nvlist_free(config); + return; + } + + /* + * Check that the vdev is for the expected guid. Additional entries + * are speculatively added based on the paths stored in the labels. + * Entries with valid paths but incorrect guids must be removed. + */ + error = nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid); + if (error || (rn->rn_vdev_guid && rn->rn_vdev_guid != vdev_guid)) { + (void) close(fd); + nvlist_free(config); + return; + } + + (void) close(fd); + + rn->rn_config = config; + rn->rn_num_labels = num_labels; + + /* + * Add additional entries for paths described by this label. + */ + if (rn->rn_labelpaths) { + char *path = NULL; + char *devid = NULL; + rdsk_node_t *slice; + avl_index_t where; + int error; + + if (label_paths(rn->rn_hdl, rn->rn_config, &path, &devid)) + return; + + /* + * Allow devlinks to stabilize so all paths are available. + */ + zpool_label_disk_wait(rn->rn_name, DISK_LABEL_WAIT); + + if (path != NULL) { + slice = zutil_alloc(hdl, sizeof (rdsk_node_t)); + slice->rn_name = zutil_strdup(hdl, path); + slice->rn_vdev_guid = vdev_guid; + slice->rn_avl = rn->rn_avl; + slice->rn_hdl = hdl; + slice->rn_order = IMPORT_ORDER_PREFERRED_1; + slice->rn_labelpaths = B_FALSE; + pthread_mutex_lock(rn->rn_lock); + if (avl_find(rn->rn_avl, slice, &where)) { + pthread_mutex_unlock(rn->rn_lock); + free(slice->rn_name); + free(slice); + } else { + avl_insert(rn->rn_avl, slice, where); + pthread_mutex_unlock(rn->rn_lock); + zpool_open_func(slice); + } + } + + if (devid != NULL) { + slice = zutil_alloc(hdl, sizeof (rdsk_node_t)); + error = asprintf(&slice->rn_name, "%s%s", + DEV_BYID_PATH, devid); + if (error == -1) { + free(slice); + return; + } + + slice->rn_vdev_guid = vdev_guid; + slice->rn_avl = rn->rn_avl; + slice->rn_hdl = hdl; + slice->rn_order = IMPORT_ORDER_PREFERRED_2; + slice->rn_labelpaths = B_FALSE; + pthread_mutex_lock(rn->rn_lock); + if (avl_find(rn->rn_avl, slice, &where)) { + pthread_mutex_unlock(rn->rn_lock); + free(slice->rn_name); + free(slice); + } else { + avl_insert(rn->rn_avl, slice, where); + pthread_mutex_unlock(rn->rn_lock); + zpool_open_func(slice); + } + } + } +} + +static char * +zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = { + "/dev/disk/by-vdev", /* Custom rules, use first if they exist */ + "/dev/mapper", /* Use multipath devices before components */ + "/dev/disk/by-partlabel", /* Single unique entry set by user */ + "/dev/disk/by-partuuid", /* Generated partition uuid */ + "/dev/disk/by-label", /* Custom persistent labels */ + "/dev/disk/by-uuid", /* Single unique entry and persistent */ + "/dev/disk/by-id", /* May be multiple entries and persistent */ + "/dev/disk/by-path", /* Encodes physical location and persistent */ + "/dev" /* UNSAFE device names will change */ +}; + +const char * const * +zpool_default_search_paths(size_t *count) +{ + *count = DEFAULT_IMPORT_PATH_SIZE; + return ((const char * const *)zpool_default_import_path); +} + +/* + * Given a full path to a device determine if that device appears in the + * import search path. If it does return the first match and store the + * index in the passed 'order' variable, otherwise return an error. + */ +static int +zfs_path_order(char *name, int *order) +{ + int i = 0, error = ENOENT; + char *dir, *env, *envdup; + + env = getenv("ZPOOL_IMPORT_PATH"); + if (env) { + envdup = strdup(env); + dir = strtok(envdup, ":"); + while (dir) { + if (strncmp(name, dir, strlen(dir)) == 0) { + *order = i; + error = 0; + break; + } + dir = strtok(NULL, ":"); + i++; + } + free(envdup); + } else { + for (i = 0; i < DEFAULT_IMPORT_PATH_SIZE; i++) { + if (strncmp(name, zpool_default_import_path[i], + strlen(zpool_default_import_path[i])) == 0) { + *order = i; + error = 0; + break; + } + } + } + + return (error); +} + +/* + * Use libblkid to quickly enumerate all known zfs devices. + */ +static int +zpool_find_import_blkid(libpc_handle_t *hdl, pthread_mutex_t *lock, + avl_tree_t **slice_cache) +{ + rdsk_node_t *slice; + blkid_cache cache; + blkid_dev_iterate iter; + blkid_dev dev; + avl_index_t where; + int error; + + *slice_cache = NULL; + + error = blkid_get_cache(&cache, NULL); + if (error != 0) + return (error); + + error = blkid_probe_all_new(cache); + if (error != 0) { + blkid_put_cache(cache); + return (error); + } + + iter = blkid_dev_iterate_begin(cache); + if (iter == NULL) { + blkid_put_cache(cache); + return (EINVAL); + } + + error = blkid_dev_set_search(iter, "TYPE", "zfs_member"); + if (error != 0) { + blkid_dev_iterate_end(iter); + blkid_put_cache(cache); + return (error); + } + + *slice_cache = zutil_alloc(hdl, sizeof (avl_tree_t)); + avl_create(*slice_cache, slice_cache_compare, sizeof (rdsk_node_t), + offsetof(rdsk_node_t, rn_node)); + + while (blkid_dev_next(iter, &dev) == 0) { + slice = zutil_alloc(hdl, sizeof (rdsk_node_t)); + slice->rn_name = zutil_strdup(hdl, blkid_dev_devname(dev)); + slice->rn_vdev_guid = 0; + slice->rn_lock = lock; + slice->rn_avl = *slice_cache; + slice->rn_hdl = hdl; + slice->rn_labelpaths = B_TRUE; + + error = zfs_path_order(slice->rn_name, &slice->rn_order); + if (error == 0) + slice->rn_order += IMPORT_ORDER_SCAN_OFFSET; + else + slice->rn_order = IMPORT_ORDER_DEFAULT; + + pthread_mutex_lock(lock); + if (avl_find(*slice_cache, slice, &where)) { + free(slice->rn_name); + free(slice); + } else { + avl_insert(*slice_cache, slice, where); + } + pthread_mutex_unlock(lock); + } + + blkid_dev_iterate_end(iter); + blkid_put_cache(cache); + + return (0); +} + +/* + * Given a list of directories to search, find all pools stored on disk. This + * includes partial pools which are not available to import. If no args are + * given (argc is 0), then the default directory (/dev/dsk) is searched. + * poolname or guid (but not both) are provided by the caller when trying + * to import a specific pool. + */ +nvlist_t * +zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg) +{ + nvlist_t *ret = NULL; + pool_list_t pools = { 0 }; + pool_entry_t *pe, *penext; + vdev_entry_t *ve, *venext; + config_entry_t *ce, *cenext; + name_entry_t *ne, *nenext; + pthread_mutex_t lock; + avl_tree_t *cache; + rdsk_node_t *slice; + void *cookie; + tpool_t *t; + + verify(iarg->poolname == NULL || iarg->guid == 0); + pthread_mutex_init(&lock, NULL); + + /* + * Locate pool member vdevs using libblkid or by directory scanning. + * On success a newly allocated AVL tree which is populated with an + * entry for each discovered vdev will be returned as the cache. + * It's the callers responsibility to consume and destroy this tree. + */ + if (iarg->scan || iarg->paths != 0) { + int dirs = iarg->paths; + char **dir = iarg->path; + + if (dirs == 0) { + dir = zpool_default_import_path; + dirs = DEFAULT_IMPORT_PATH_SIZE; + } + + if (zpool_find_import_scan(hdl, &lock, &cache, dir, dirs) != 0) + return (NULL); + } else { + if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) + return (NULL); + } + + /* + * Create a thread pool to parallelize the process of reading and + * validating labels, a large number of threads can be used due to + * minimal contention. + */ + t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL); + for (slice = avl_first(cache); slice; + (slice = avl_walk(cache, slice, AVL_AFTER))) + (void) tpool_dispatch(t, zpool_open_func, slice); + + tpool_wait(t); + tpool_destroy(t); + + /* + * Process the cache, filtering out any entries which are not + * for the specified pool then adding matching label configs. + */ + cookie = NULL; + while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) { + if (slice->rn_config != NULL) { + nvlist_t *config = slice->rn_config; + boolean_t matched = B_TRUE; + boolean_t aux = B_FALSE; + int fd; + + /* + * Check if it's a spare or l2cache device. If it is, + * we need to skip the name and guid check since they + * don't exist on aux device label. + */ + if (iarg->poolname != NULL || iarg->guid != 0) { + uint64_t state; + aux = nvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_STATE, &state) == 0 && + (state == POOL_STATE_SPARE || + state == POOL_STATE_L2CACHE); + } + + if (iarg->poolname != NULL && !aux) { + char *pname; + + matched = nvlist_lookup_string(config, + ZPOOL_CONFIG_POOL_NAME, &pname) == 0 && + strcmp(iarg->poolname, pname) == 0; + } else if (iarg->guid != 0 && !aux) { + uint64_t this_guid; + + matched = nvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 && + iarg->guid == this_guid; + } + if (matched) { + /* + * Verify all remaining entries can be opened + * exclusively. This will prune all underlying + * multipath devices which otherwise could + * result in the vdev appearing as UNAVAIL. + * + * Under zdb, this step isn't required and + * would prevent a zdb -e of active pools with + * no cachefile. + */ + fd = open(slice->rn_name, O_RDONLY | O_EXCL); + if (fd >= 0 || iarg->can_be_active) { + if (fd >= 0) + close(fd); + add_config(hdl, &pools, + slice->rn_name, slice->rn_order, + slice->rn_num_labels, config); + } + } + nvlist_free(config); + } + free(slice->rn_name); + free(slice); + } + avl_destroy(cache); + free(cache); + pthread_mutex_destroy(&lock); + + ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy); + + for (pe = pools.pools; pe != NULL; pe = penext) { + penext = pe->pe_next; + for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { + venext = ve->ve_next; + for (ce = ve->ve_configs; ce != NULL; ce = cenext) { + cenext = ce->ce_next; + nvlist_free(ce->ce_config); + free(ce); + } + free(ve); + } + free(pe); + } + + for (ne = pools.names; ne != NULL; ne = nenext) { + nenext = ne->ne_next; + free(ne->ne_name); + free(ne); + } + + return (ret); +} + +/* + * Linux persistent device strings for vdev labels + * + * based on libudev for consistency with libudev disk add/remove events + */ + +typedef struct vdev_dev_strs { + char vds_devid[128]; + char vds_devphys[128]; +} vdev_dev_strs_t; + +#ifdef HAVE_LIBUDEV +/* + * Obtain the persistent device id string (describes what) + * + * used by ZED vdev matching for auto-{online,expand,replace} + */ +int +zfs_device_get_devid(struct udev_device *dev, char *bufptr, size_t buflen) +{ + struct udev_list_entry *entry; + const char *bus; + char devbyid[MAXPATHLEN]; + + /* The bus based by-id path is preferred */ + bus = udev_device_get_property_value(dev, "ID_BUS"); + + if (bus == NULL) { + const char *dm_uuid; + + /* + * For multipath nodes use the persistent uuid based identifier + * + * Example: /dev/disk/by-id/dm-uuid-mpath-35000c5006304de3f + */ + dm_uuid = udev_device_get_property_value(dev, "DM_UUID"); + if (dm_uuid != NULL) { + (void) snprintf(bufptr, buflen, "dm-uuid-%s", dm_uuid); + return (0); + } + + /* + * For volumes use the persistent /dev/zvol/dataset identifier + */ + entry = udev_device_get_devlinks_list_entry(dev); + while (entry != NULL) { + const char *name; + + name = udev_list_entry_get_name(entry); + if (strncmp(name, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0) { + (void) strlcpy(bufptr, name, buflen); + return (0); + } + entry = udev_list_entry_get_next(entry); + } + + /* + * NVME 'by-id' symlinks are similar to bus case + */ + struct udev_device *parent; + + parent = udev_device_get_parent_with_subsystem_devtype(dev, + "nvme", NULL); + if (parent != NULL) + bus = "nvme"; /* continue with bus symlink search */ + else + return (ENODATA); + } + + /* + * locate the bus specific by-id link + */ + (void) snprintf(devbyid, sizeof (devbyid), "%s%s-", DEV_BYID_PATH, bus); + entry = udev_device_get_devlinks_list_entry(dev); + while (entry != NULL) { + const char *name; + + name = udev_list_entry_get_name(entry); + if (strncmp(name, devbyid, strlen(devbyid)) == 0) { + name += strlen(DEV_BYID_PATH); + (void) strlcpy(bufptr, name, buflen); + return (0); + } + entry = udev_list_entry_get_next(entry); + } + + return (ENODATA); +} + +/* + * Obtain the persistent physical location string (describes where) + * + * used by ZED vdev matching for auto-{online,expand,replace} + */ +int +zfs_device_get_physical(struct udev_device *dev, char *bufptr, size_t buflen) +{ + const char *physpath = NULL; + struct udev_list_entry *entry; + + /* + * Normal disks use ID_PATH for their physical path. + */ + physpath = udev_device_get_property_value(dev, "ID_PATH"); + if (physpath != NULL && strlen(physpath) > 0) { + (void) strlcpy(bufptr, physpath, buflen); + return (0); + } + + /* + * Device mapper devices are virtual and don't have a physical + * path. For them we use ID_VDEV instead, which is setup via the + * /etc/vdev_id.conf file. ID_VDEV provides a persistent path + * to a virtual device. If you don't have vdev_id.conf setup, + * you cannot use multipath autoreplace with device mapper. + */ + physpath = udev_device_get_property_value(dev, "ID_VDEV"); + if (physpath != NULL && strlen(physpath) > 0) { + (void) strlcpy(bufptr, physpath, buflen); + return (0); + } + + /* + * For ZFS volumes use the persistent /dev/zvol/dataset identifier + */ + entry = udev_device_get_devlinks_list_entry(dev); + while (entry != NULL) { + physpath = udev_list_entry_get_name(entry); + if (strncmp(physpath, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0) { + (void) strlcpy(bufptr, physpath, buflen); + return (0); + } + entry = udev_list_entry_get_next(entry); + } + + /* + * For all other devices fallback to using the by-uuid name. + */ + entry = udev_device_get_devlinks_list_entry(dev); + while (entry != NULL) { + physpath = udev_list_entry_get_name(entry); + if (strncmp(physpath, "/dev/disk/by-uuid", 17) == 0) { + (void) strlcpy(bufptr, physpath, buflen); + return (0); + } + entry = udev_list_entry_get_next(entry); + } + + return (ENODATA); +} + +/* + * A disk is considered a multipath whole disk when: + * DEVNAME key value has "dm-" + * DM_NAME key value has "mpath" prefix + * DM_UUID key exists + * ID_PART_TABLE_TYPE key does not exist or is not gpt + */ +static boolean_t +udev_mpath_whole_disk(struct udev_device *dev) +{ + const char *devname, *type, *uuid; + + devname = udev_device_get_property_value(dev, "DEVNAME"); + type = udev_device_get_property_value(dev, "ID_PART_TABLE_TYPE"); + uuid = udev_device_get_property_value(dev, "DM_UUID"); + + if ((devname != NULL && strncmp(devname, "/dev/dm-", 8) == 0) && + ((type == NULL) || (strcmp(type, "gpt") != 0)) && + (uuid != NULL)) { + return (B_TRUE); + } + + return (B_FALSE); +} + +static int +udev_device_is_ready(struct udev_device *dev) +{ +#ifdef HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED + return (udev_device_get_is_initialized(dev)); +#else + /* wait for DEVLINKS property to be initialized */ + return (udev_device_get_property_value(dev, "DEVLINKS") != NULL); +#endif +} +#endif /* HAVE_LIBUDEV */ + +/* + * Wait up to timeout_ms for udev to set up the device node. The device is + * considered ready when libudev determines it has been initialized, all of + * the device links have been verified to exist, and it has been allowed to + * settle. At this point the device the device can be accessed reliably. + * Depending on the complexity of the udev rules this process could take + * several seconds. + */ +int +zpool_label_disk_wait(const char *path, int timeout_ms) +{ +#ifdef HAVE_LIBUDEV + struct udev *udev; + struct udev_device *dev = NULL; + char nodepath[MAXPATHLEN]; + char *sysname = NULL; + int ret = ENODEV; + int settle_ms = 50; + long sleep_ms = 10; + hrtime_t start, settle; + + if ((udev = udev_new()) == NULL) + return (ENXIO); + + start = gethrtime(); + settle = 0; + + do { + if (sysname == NULL) { + if (realpath(path, nodepath) != NULL) { + sysname = strrchr(nodepath, '/') + 1; + } else { + (void) usleep(sleep_ms * MILLISEC); + continue; + } + } + + dev = udev_device_new_from_subsystem_sysname(udev, + "block", sysname); + if ((dev != NULL) && udev_device_is_ready(dev)) { + struct udev_list_entry *links, *link = NULL; + + ret = 0; + links = udev_device_get_devlinks_list_entry(dev); + + udev_list_entry_foreach(link, links) { + struct stat64 statbuf; + const char *name; + + name = udev_list_entry_get_name(link); + errno = 0; + if (stat64(name, &statbuf) == 0 && errno == 0) + continue; + + settle = 0; + ret = ENODEV; + break; + } + + if (ret == 0) { + if (settle == 0) { + settle = gethrtime(); + } else if (NSEC2MSEC(gethrtime() - settle) >= + settle_ms) { + udev_device_unref(dev); + break; + } + } + } + + udev_device_unref(dev); + (void) usleep(sleep_ms * MILLISEC); + + } while (NSEC2MSEC(gethrtime() - start) < timeout_ms); + + udev_unref(udev); + + return (ret); +#else + int settle_ms = 50; + long sleep_ms = 10; + hrtime_t start, settle; + struct stat64 statbuf; + + start = gethrtime(); + settle = 0; + + do { + errno = 0; + if ((stat64(path, &statbuf) == 0) && (errno == 0)) { + if (settle == 0) + settle = gethrtime(); + else if (NSEC2MSEC(gethrtime() - settle) >= settle_ms) + return (0); + } else if (errno != ENOENT) { + return (errno); + } + + usleep(sleep_ms * MILLISEC); + } while (NSEC2MSEC(gethrtime() - start) < timeout_ms); + + return (ENODEV); +#endif /* HAVE_LIBUDEV */ +} + +/* + * Encode the persistent devices strings + * used for the vdev disk label + */ +static int +encode_device_strings(const char *path, vdev_dev_strs_t *ds, + boolean_t wholedisk) +{ +#ifdef HAVE_LIBUDEV + struct udev *udev; + struct udev_device *dev = NULL; + char nodepath[MAXPATHLEN]; + char *sysname; + int ret = ENODEV; + hrtime_t start; + + if ((udev = udev_new()) == NULL) + return (ENXIO); + + /* resolve path to a runtime device node instance */ + if (realpath(path, nodepath) == NULL) + goto no_dev; + + sysname = strrchr(nodepath, '/') + 1; + + /* + * Wait up to 3 seconds for udev to set up the device node context + */ + start = gethrtime(); + do { + dev = udev_device_new_from_subsystem_sysname(udev, "block", + sysname); + if (dev == NULL) + goto no_dev; + if (udev_device_is_ready(dev)) + break; /* udev ready */ + + udev_device_unref(dev); + dev = NULL; + + if (NSEC2MSEC(gethrtime() - start) < 10) + (void) sched_yield(); /* yield/busy wait up to 10ms */ + else + (void) usleep(10 * MILLISEC); + + } while (NSEC2MSEC(gethrtime() - start) < (3 * MILLISEC)); + + if (dev == NULL) + goto no_dev; + + /* + * Only whole disks require extra device strings + */ + if (!wholedisk && !udev_mpath_whole_disk(dev)) + goto no_dev; + + ret = zfs_device_get_devid(dev, ds->vds_devid, sizeof (ds->vds_devid)); + if (ret != 0) + goto no_dev_ref; + + /* physical location string (optional) */ + if (zfs_device_get_physical(dev, ds->vds_devphys, + sizeof (ds->vds_devphys)) != 0) { + ds->vds_devphys[0] = '\0'; /* empty string --> not available */ + } + +no_dev_ref: + udev_device_unref(dev); +no_dev: + udev_unref(udev); + + return (ret); +#else + return (ENOENT); +#endif +} + +/* + * Update a leaf vdev's persistent device strings (Linux only) + * + * - only applies for a dedicated leaf vdev (aka whole disk) + * - updated during pool create|add|attach|import + * - used for matching device matching during auto-{online,expand,replace} + * - stored in a leaf disk config label (i.e. alongside 'path' NVP) + * - these strings are currently not used in kernel (i.e. for vdev_disk_open) + * + * single device node example: + * devid: 'scsi-MG03SCA300_350000494a8cb3d67-part1' + * phys_path: 'pci-0000:04:00.0-sas-0x50000394a8cb3d67-lun-0' + * + * multipath device node example: + * devid: 'dm-uuid-mpath-35000c5006304de3f' + * + * We also store the enclosure sysfs path for turning on enclosure LEDs + * (if applicable): + * vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4' + */ +void +update_vdev_config_dev_strs(nvlist_t *nv) +{ + vdev_dev_strs_t vds; + char *env, *type, *path; + uint64_t wholedisk = 0; + char *upath, *spath; + + /* + * For the benefit of legacy ZFS implementations, allow + * for opting out of devid strings in the vdev label. + * + * example use: + * env ZFS_VDEV_DEVID_OPT_OUT=YES zpool import dozer + * + * explanation: + * Older ZFS on Linux implementations had issues when attempting to + * display pool config VDEV names if a "devid" NVP value is present + * in the pool's config. + * + * For example, a pool that originated on illumos platform would + * have a devid value in the config and "zpool status" would fail + * when listing the config. + * + * A pool can be stripped of any "devid" values on import or + * prevented from adding them on zpool create|add by setting + * ZFS_VDEV_DEVID_OPT_OUT. + */ + env = getenv("ZFS_VDEV_DEVID_OPT_OUT"); + if (env && (strtoul(env, NULL, 0) > 0 || + !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) { + (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); + (void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH); + return; + } + + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0 || + strcmp(type, VDEV_TYPE_DISK) != 0) { + return; + } + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) + return; + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk); + + /* + * Update device string values in the config nvlist. + */ + if (encode_device_strings(path, &vds, (boolean_t)wholedisk) == 0) { + (void) nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vds.vds_devid); + if (vds.vds_devphys[0] != '\0') { + (void) nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH, + vds.vds_devphys); + } + + /* Add enclosure sysfs path (if disk is in an enclosure). */ + upath = zfs_get_underlying_path(path); + spath = zfs_get_enclosure_sysfs_path(upath); + if (spath) + nvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, + spath); + else + nvlist_remove_all(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH); + + free(upath); + free(spath); + } else { + /* Clear out any stale entries. */ + (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); + (void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH); + (void) nvlist_remove_all(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH); + } +} diff --git a/lib/libzutil/zutil_import.c b/lib/libzutil/zutil_import.c index 28733cc747cc..591697bbb98e 100644 --- a/lib/libzutil/zutil_import.c +++ b/lib/libzutil/zutil_import.c @@ -52,10 +52,6 @@ #include #include #include -#ifdef HAVE_LIBUDEV -#include -#include -#endif #include #include #include @@ -67,34 +63,29 @@ #include #include -#include #include #include #include -#define IMPORT_ORDER_PREFERRED_1 1 -#define IMPORT_ORDER_PREFERRED_2 2 -#define IMPORT_ORDER_SCAN_OFFSET 10 -#define IMPORT_ORDER_DEFAULT 100 -#define DEFAULT_IMPORT_PATH_SIZE 9 +#include "zutil_import.h" -#define EZFS_BADCACHE "invalid or missing cache file" -#define EZFS_BADPATH "must be an absolute path" -#define EZFS_NOMEM "out of memory" -#define EZFS_EACESS "some devices require root privileges" +#ifdef HAVE_LIBUDEV +#include +#include +#endif -typedef struct libpc_handle { +struct libpc_handle { boolean_t lpc_printerr; boolean_t lpc_open_access_error; boolean_t lpc_desc_active; char lpc_desc[1024]; const pool_config_ops_t *lpc_ops; void *lpc_lib_handle; -} libpc_handle_t; +}; /*PRINTFLIKE2*/ -static void -zfs_error_aux(libpc_handle_t *hdl, const char *fmt, ...) +void +zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...) { va_list ap; @@ -127,8 +118,8 @@ zfs_verror(libpc_handle_t *hdl, const char *error, const char *fmt, va_list ap) } /*PRINTFLIKE3*/ -static int -zfs_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...) +int +zutil_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...) { va_list ap; @@ -141,539 +132,39 @@ zfs_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...) return (-1); } -static int -zfs_error(libpc_handle_t *hdl, const char *error, const char *msg) +int +zutil_error(libpc_handle_t *hdl, const char *error, const char *msg) { - return (zfs_error_fmt(hdl, error, "%s", msg)); + return (zutil_error_fmt(hdl, error, "%s", msg)); } -static int -no_memory(libpc_handle_t *hdl) +int +zutil_no_memory(libpc_handle_t *hdl) { - zfs_error(hdl, EZFS_NOMEM, "internal error"); + zutil_error(hdl, EZFS_NOMEM, "internal error"); exit(1); } -static void * -zfs_alloc(libpc_handle_t *hdl, size_t size) +void * +zutil_alloc(libpc_handle_t *hdl, size_t size) { void *data; if ((data = calloc(1, size)) == NULL) - (void) no_memory(hdl); + (void) zutil_no_memory(hdl); return (data); } -static char * -zfs_strdup(libpc_handle_t *hdl, const char *str) +char * +zutil_strdup(libpc_handle_t *hdl, const char *str) { char *ret; if ((ret = strdup(str)) == NULL) - (void) no_memory(hdl); - - return (ret); -} - -/* - * Intermediate structures used to gather configuration information. - */ -typedef struct config_entry { - uint64_t ce_txg; - nvlist_t *ce_config; - struct config_entry *ce_next; -} config_entry_t; - -typedef struct vdev_entry { - uint64_t ve_guid; - config_entry_t *ve_configs; - struct vdev_entry *ve_next; -} vdev_entry_t; - -typedef struct pool_entry { - uint64_t pe_guid; - vdev_entry_t *pe_vdevs; - struct pool_entry *pe_next; -} pool_entry_t; - -typedef struct name_entry { - char *ne_name; - uint64_t ne_guid; - uint64_t ne_order; - uint64_t ne_num_labels; - struct name_entry *ne_next; -} name_entry_t; - -typedef struct pool_list { - pool_entry_t *pools; - name_entry_t *names; -} pool_list_t; - -#define ZVOL_ROOT "/dev/zvol" -#define DEV_BYID_PATH "/dev/disk/by-id/" - -/* - * Linux persistent device strings for vdev labels - * - * based on libudev for consistency with libudev disk add/remove events - */ - -typedef struct vdev_dev_strs { - char vds_devid[128]; - char vds_devphys[128]; -} vdev_dev_strs_t; - -#ifdef HAVE_LIBUDEV -/* - * Obtain the persistent device id string (describes what) - * - * used by ZED vdev matching for auto-{online,expand,replace} - */ -int -zfs_device_get_devid(struct udev_device *dev, char *bufptr, size_t buflen) -{ - struct udev_list_entry *entry; - const char *bus; - char devbyid[MAXPATHLEN]; - - /* The bus based by-id path is preferred */ - bus = udev_device_get_property_value(dev, "ID_BUS"); - - if (bus == NULL) { - const char *dm_uuid; - - /* - * For multipath nodes use the persistent uuid based identifier - * - * Example: /dev/disk/by-id/dm-uuid-mpath-35000c5006304de3f - */ - dm_uuid = udev_device_get_property_value(dev, "DM_UUID"); - if (dm_uuid != NULL) { - (void) snprintf(bufptr, buflen, "dm-uuid-%s", dm_uuid); - return (0); - } - - /* - * For volumes use the persistent /dev/zvol/dataset identifier - */ - entry = udev_device_get_devlinks_list_entry(dev); - while (entry != NULL) { - const char *name; - - name = udev_list_entry_get_name(entry); - if (strncmp(name, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0) { - (void) strlcpy(bufptr, name, buflen); - return (0); - } - entry = udev_list_entry_get_next(entry); - } - - /* - * NVME 'by-id' symlinks are similar to bus case - */ - struct udev_device *parent; - - parent = udev_device_get_parent_with_subsystem_devtype(dev, - "nvme", NULL); - if (parent != NULL) - bus = "nvme"; /* continue with bus symlink search */ - else - return (ENODATA); - } - - /* - * locate the bus specific by-id link - */ - (void) snprintf(devbyid, sizeof (devbyid), "%s%s-", DEV_BYID_PATH, bus); - entry = udev_device_get_devlinks_list_entry(dev); - while (entry != NULL) { - const char *name; - - name = udev_list_entry_get_name(entry); - if (strncmp(name, devbyid, strlen(devbyid)) == 0) { - name += strlen(DEV_BYID_PATH); - (void) strlcpy(bufptr, name, buflen); - return (0); - } - entry = udev_list_entry_get_next(entry); - } - - return (ENODATA); -} - -/* - * Obtain the persistent physical location string (describes where) - * - * used by ZED vdev matching for auto-{online,expand,replace} - */ -int -zfs_device_get_physical(struct udev_device *dev, char *bufptr, size_t buflen) -{ - const char *physpath = NULL; - struct udev_list_entry *entry; - - /* - * Normal disks use ID_PATH for their physical path. - */ - physpath = udev_device_get_property_value(dev, "ID_PATH"); - if (physpath != NULL && strlen(physpath) > 0) { - (void) strlcpy(bufptr, physpath, buflen); - return (0); - } - - /* - * Device mapper devices are virtual and don't have a physical - * path. For them we use ID_VDEV instead, which is setup via the - * /etc/vdev_id.conf file. ID_VDEV provides a persistent path - * to a virtual device. If you don't have vdev_id.conf setup, - * you cannot use multipath autoreplace with device mapper. - */ - physpath = udev_device_get_property_value(dev, "ID_VDEV"); - if (physpath != NULL && strlen(physpath) > 0) { - (void) strlcpy(bufptr, physpath, buflen); - return (0); - } - - /* - * For ZFS volumes use the persistent /dev/zvol/dataset identifier - */ - entry = udev_device_get_devlinks_list_entry(dev); - while (entry != NULL) { - physpath = udev_list_entry_get_name(entry); - if (strncmp(physpath, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0) { - (void) strlcpy(bufptr, physpath, buflen); - return (0); - } - entry = udev_list_entry_get_next(entry); - } - - /* - * For all other devices fallback to using the by-uuid name. - */ - entry = udev_device_get_devlinks_list_entry(dev); - while (entry != NULL) { - physpath = udev_list_entry_get_name(entry); - if (strncmp(physpath, "/dev/disk/by-uuid", 17) == 0) { - (void) strlcpy(bufptr, physpath, buflen); - return (0); - } - entry = udev_list_entry_get_next(entry); - } - - return (ENODATA); -} - -/* - * A disk is considered a multipath whole disk when: - * DEVNAME key value has "dm-" - * DM_NAME key value has "mpath" prefix - * DM_UUID key exists - * ID_PART_TABLE_TYPE key does not exist or is not gpt - */ -static boolean_t -udev_mpath_whole_disk(struct udev_device *dev) -{ - const char *devname, *type, *uuid; - - devname = udev_device_get_property_value(dev, "DEVNAME"); - type = udev_device_get_property_value(dev, "ID_PART_TABLE_TYPE"); - uuid = udev_device_get_property_value(dev, "DM_UUID"); - - if ((devname != NULL && strncmp(devname, "/dev/dm-", 8) == 0) && - ((type == NULL) || (strcmp(type, "gpt") != 0)) && - (uuid != NULL)) { - return (B_TRUE); - } - - return (B_FALSE); -} - -static int -udev_device_is_ready(struct udev_device *dev) -{ -#ifdef HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED - return (udev_device_get_is_initialized(dev)); -#else - /* wait for DEVLINKS property to be initialized */ - return (udev_device_get_property_value(dev, "DEVLINKS") != NULL); -#endif -} -#endif /* HAVE_LIBUDEV */ - -/* - * Wait up to timeout_ms for udev to set up the device node. The device is - * considered ready when libudev determines it has been initialized, all of - * the device links have been verified to exist, and it has been allowed to - * settle. At this point the device the device can be accessed reliably. - * Depending on the complexity of the udev rules this process could take - * several seconds. - */ -int -zpool_label_disk_wait(const char *path, int timeout_ms) -{ -#ifdef HAVE_LIBUDEV - struct udev *udev; - struct udev_device *dev = NULL; - char nodepath[MAXPATHLEN]; - char *sysname = NULL; - int ret = ENODEV; - int settle_ms = 50; - long sleep_ms = 10; - hrtime_t start, settle; - - if ((udev = udev_new()) == NULL) - return (ENXIO); - - start = gethrtime(); - settle = 0; - - do { - if (sysname == NULL) { - if (realpath(path, nodepath) != NULL) { - sysname = strrchr(nodepath, '/') + 1; - } else { - (void) usleep(sleep_ms * MILLISEC); - continue; - } - } - - dev = udev_device_new_from_subsystem_sysname(udev, - "block", sysname); - if ((dev != NULL) && udev_device_is_ready(dev)) { - struct udev_list_entry *links, *link = NULL; - - ret = 0; - links = udev_device_get_devlinks_list_entry(dev); - - udev_list_entry_foreach(link, links) { - struct stat64 statbuf; - const char *name; - - name = udev_list_entry_get_name(link); - errno = 0; - if (stat64(name, &statbuf) == 0 && errno == 0) - continue; - - settle = 0; - ret = ENODEV; - break; - } - - if (ret == 0) { - if (settle == 0) { - settle = gethrtime(); - } else if (NSEC2MSEC(gethrtime() - settle) >= - settle_ms) { - udev_device_unref(dev); - break; - } - } - } - - udev_device_unref(dev); - (void) usleep(sleep_ms * MILLISEC); - - } while (NSEC2MSEC(gethrtime() - start) < timeout_ms); - - udev_unref(udev); - - return (ret); -#else - int settle_ms = 50; - long sleep_ms = 10; - hrtime_t start, settle; - struct stat64 statbuf; - - start = gethrtime(); - settle = 0; - - do { - errno = 0; - if ((stat64(path, &statbuf) == 0) && (errno == 0)) { - if (settle == 0) - settle = gethrtime(); - else if (NSEC2MSEC(gethrtime() - settle) >= settle_ms) - return (0); - } else if (errno != ENOENT) { - return (errno); - } - - usleep(sleep_ms * MILLISEC); - } while (NSEC2MSEC(gethrtime() - start) < timeout_ms); - - return (ENODEV); -#endif /* HAVE_LIBUDEV */ -} - -/* - * Encode the persistent devices strings - * used for the vdev disk label - */ -static int -encode_device_strings(const char *path, vdev_dev_strs_t *ds, - boolean_t wholedisk) -{ -#ifdef HAVE_LIBUDEV - struct udev *udev; - struct udev_device *dev = NULL; - char nodepath[MAXPATHLEN]; - char *sysname; - int ret = ENODEV; - hrtime_t start; - - if ((udev = udev_new()) == NULL) - return (ENXIO); - - /* resolve path to a runtime device node instance */ - if (realpath(path, nodepath) == NULL) - goto no_dev; - - sysname = strrchr(nodepath, '/') + 1; - - /* - * Wait up to 3 seconds for udev to set up the device node context - */ - start = gethrtime(); - do { - dev = udev_device_new_from_subsystem_sysname(udev, "block", - sysname); - if (dev == NULL) - goto no_dev; - if (udev_device_is_ready(dev)) - break; /* udev ready */ - - udev_device_unref(dev); - dev = NULL; - - if (NSEC2MSEC(gethrtime() - start) < 10) - (void) sched_yield(); /* yield/busy wait up to 10ms */ - else - (void) usleep(10 * MILLISEC); - - } while (NSEC2MSEC(gethrtime() - start) < (3 * MILLISEC)); - - if (dev == NULL) - goto no_dev; - - /* - * Only whole disks require extra device strings - */ - if (!wholedisk && !udev_mpath_whole_disk(dev)) - goto no_dev; - - ret = zfs_device_get_devid(dev, ds->vds_devid, sizeof (ds->vds_devid)); - if (ret != 0) - goto no_dev_ref; - - /* physical location string (optional) */ - if (zfs_device_get_physical(dev, ds->vds_devphys, - sizeof (ds->vds_devphys)) != 0) { - ds->vds_devphys[0] = '\0'; /* empty string --> not available */ - } - -no_dev_ref: - udev_device_unref(dev); -no_dev: - udev_unref(udev); + (void) zutil_no_memory(hdl); return (ret); -#else - return (ENOENT); -#endif -} - -/* - * Update a leaf vdev's persistent device strings (Linux only) - * - * - only applies for a dedicated leaf vdev (aka whole disk) - * - updated during pool create|add|attach|import - * - used for matching device matching during auto-{online,expand,replace} - * - stored in a leaf disk config label (i.e. alongside 'path' NVP) - * - these strings are currently not used in kernel (i.e. for vdev_disk_open) - * - * single device node example: - * devid: 'scsi-MG03SCA300_350000494a8cb3d67-part1' - * phys_path: 'pci-0000:04:00.0-sas-0x50000394a8cb3d67-lun-0' - * - * multipath device node example: - * devid: 'dm-uuid-mpath-35000c5006304de3f' - * - * We also store the enclosure sysfs path for turning on enclosure LEDs - * (if applicable): - * vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4' - */ -void -update_vdev_config_dev_strs(nvlist_t *nv) -{ - vdev_dev_strs_t vds; - char *env, *type, *path; - uint64_t wholedisk = 0; - char *upath, *spath; - - /* - * For the benefit of legacy ZFS implementations, allow - * for opting out of devid strings in the vdev label. - * - * example use: - * env ZFS_VDEV_DEVID_OPT_OUT=YES zpool import dozer - * - * explanation: - * Older ZFS on Linux implementations had issues when attempting to - * display pool config VDEV names if a "devid" NVP value is present - * in the pool's config. - * - * For example, a pool that originated on illumos platform would - * have a devid value in the config and "zpool status" would fail - * when listing the config. - * - * A pool can be stripped of any "devid" values on import or - * prevented from adding them on zpool create|add by setting - * ZFS_VDEV_DEVID_OPT_OUT. - */ - env = getenv("ZFS_VDEV_DEVID_OPT_OUT"); - if (env && (strtoul(env, NULL, 0) > 0 || - !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) { - (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); - (void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH); - return; - } - - if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0 || - strcmp(type, VDEV_TYPE_DISK) != 0) { - return; - } - if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) - return; - (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk); - - /* - * Update device string values in config nvlist - */ - if (encode_device_strings(path, &vds, (boolean_t)wholedisk) == 0) { - (void) nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vds.vds_devid); - if (vds.vds_devphys[0] != '\0') { - (void) nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH, - vds.vds_devphys); - } - - /* Add enclosure sysfs path (if disk is in an enclosure) */ - upath = zfs_get_underlying_path(path); - spath = zfs_get_enclosure_sysfs_path(upath); - if (spath) - nvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, - spath); - else - nvlist_remove_all(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH); - - free(upath); - free(spath); - } else { - /* clear out any stale entries */ - (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); - (void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH); - (void) nvlist_remove_all(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH); - } } /* @@ -709,7 +200,7 @@ fix_paths(libpc_handle_t *hdl, nvlist_t *nv, name_entry_t *names) * use the lowest order device which corresponds to the first match * while traversing the ZPOOL_IMPORT_PATH search path. */ - verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); + guid = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID); if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) path = NULL; @@ -762,7 +253,7 @@ fix_paths(libpc_handle_t *hdl, nvlist_t *nv, name_entry_t *names) /* * Add the given configuration to the list of known devices. */ -static int +int add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, int order, int num_labels, nvlist_t *config) { @@ -781,10 +272,10 @@ add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, &state) == 0 && (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { - if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) + if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL) return (-1); - if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { + if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) { free(ne); return (-1); } @@ -826,7 +317,7 @@ add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, } if (pe == NULL) { - if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { + if ((pe = zutil_alloc(hdl, sizeof (pool_entry_t))) == NULL) { return (-1); } pe->pe_guid = pool_guid; @@ -844,7 +335,7 @@ add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, } if (ve == NULL) { - if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { + if ((ve = zutil_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { return (-1); } ve->ve_guid = top_guid; @@ -863,7 +354,7 @@ add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, } if (ce == NULL) { - if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { + if ((ce = zutil_alloc(hdl, sizeof (config_entry_t))) == NULL) { return (-1); } ce->ce_txg = txg; @@ -878,10 +369,10 @@ add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, * mappings so that we can fix up the configuration as necessary before * doing the import. */ - if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) + if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL) return (-1); - if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { + if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) { free(ne); return (-1); } @@ -895,27 +386,6 @@ add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, return (0); } -static int -pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid, - boolean_t *isactive) -{ - ASSERT(hdl->lpc_ops->pco_pool_active != NULL); - - int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name, - guid, isactive); - - return (error); -} - -static nvlist_t * -refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig) -{ - ASSERT(hdl->lpc_ops->pco_refresh_config != NULL); - - return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle, - tryconfig)); -} - /* * Determine if the vdev id is a hole in the namespace. */ @@ -940,7 +410,7 @@ vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id) * pass to fix up any incorrect paths, and then add it to the main list to * return to the user. */ -static nvlist_t * +nvlist_t * get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, nvlist_t *policy) { @@ -1096,7 +566,7 @@ get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, if (id >= children) { nvlist_t **newchild; - newchild = zfs_alloc(hdl, (id + 1) * + newchild = zutil_alloc(hdl, (id + 1) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; @@ -1128,7 +598,7 @@ get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, } else if (max_id > children) { nvlist_t **newchild; - newchild = zfs_alloc(hdl, (max_id) * + newchild = zutil_alloc(hdl, (max_id) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; @@ -1266,7 +736,7 @@ get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); - if (pool_active(hdl, name, guid, &isactive) != 0) + if (zutil_pool_active(hdl, name, guid, &isactive) != 0) goto error; if (isactive) { @@ -1281,7 +751,7 @@ get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, goto nomem; } - if ((nvl = refresh_config(hdl, config)) == NULL) { + if ((nvl = zutil_refresh_config(hdl, config)) == NULL) { nvlist_free(config); config = NULL; continue; @@ -1346,7 +816,7 @@ get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, return (ret); nomem: - (void) no_memory(hdl); + (void) zutil_no_memory(hdl); error: nvlist_free(config); nvlist_free(ret); @@ -1357,360 +827,32 @@ get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, return (NULL); } -/* - * Return the offset of the given label. - */ -static uint64_t -label_offset(uint64_t size, int l) -{ - ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); - return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? - 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); -} /* - * Given a file descriptor, read the label information and return an nvlist - * describing the configuration, if there is one. The number of valid - * labels found will be returned in num_labels when non-NULL. + * Sorted by full path and then vdev guid to allow for multiple entries with + * the same full path name. This is required because it's possible to + * have multiple block devices with labels that refer to the same + * ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both + * entries need to be added to the cache. Scenarios where this can occur + * include overwritten pool labels, devices which are visible from multiple + * hosts and multipath devices. */ int -zpool_read_label(int fd, nvlist_t **config, int *num_labels) +slice_cache_compare(const void *arg1, const void *arg2) { - struct stat64 statbuf; - int l, count = 0; - vdev_label_t *label; - nvlist_t *expected_config = NULL; - uint64_t expected_guid = 0, size; - int error; + const char *nm1 = ((rdsk_node_t *)arg1)->rn_name; + const char *nm2 = ((rdsk_node_t *)arg2)->rn_name; + uint64_t guid1 = ((rdsk_node_t *)arg1)->rn_vdev_guid; + uint64_t guid2 = ((rdsk_node_t *)arg2)->rn_vdev_guid; + int rv; - *config = NULL; - - if (fstat64_blk(fd, &statbuf) == -1) - return (0); - size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); - - error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label)); - if (error) - return (-1); - - for (l = 0; l < VDEV_LABELS; l++) { - uint64_t state, guid, txg; - - if (pread64(fd, label, sizeof (vdev_label_t), - label_offset(size, l)) != sizeof (vdev_label_t)) - continue; - - if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, - sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) - continue; - - if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID, - &guid) != 0 || guid == 0) { - nvlist_free(*config); - continue; - } - - if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, - &state) != 0 || state > POOL_STATE_L2CACHE) { - nvlist_free(*config); - continue; - } - - if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && - (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, - &txg) != 0 || txg == 0)) { - nvlist_free(*config); - continue; - } - - if (expected_guid) { - if (expected_guid == guid) - count++; - - nvlist_free(*config); - } else { - expected_config = *config; - expected_guid = guid; - count++; - } - } - - if (num_labels != NULL) - *num_labels = count; - - free(label); - *config = expected_config; - - return (0); -} - -typedef struct rdsk_node { - char *rn_name; /* Full path to device */ - int rn_order; /* Preferred order (low to high) */ - int rn_num_labels; /* Number of valid labels */ - uint64_t rn_vdev_guid; /* Expected vdev guid when set */ - libpc_handle_t *rn_hdl; - nvlist_t *rn_config; /* Label config */ - avl_tree_t *rn_avl; - avl_node_t rn_node; - pthread_mutex_t *rn_lock; - boolean_t rn_labelpaths; -} rdsk_node_t; - -/* - * Sorted by full path and then vdev guid to allow for multiple entries with - * the same full path name. This is required because it's possible to - * have multiple block devices with labels that refer to the same - * ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both - * entries need to be added to the cache. Scenarios where this can occur - * include overwritten pool labels, devices which are visible from multiple - * hosts and multipath devices. - */ -static int -slice_cache_compare(const void *arg1, const void *arg2) -{ - const char *nm1 = ((rdsk_node_t *)arg1)->rn_name; - const char *nm2 = ((rdsk_node_t *)arg2)->rn_name; - uint64_t guid1 = ((rdsk_node_t *)arg1)->rn_vdev_guid; - uint64_t guid2 = ((rdsk_node_t *)arg2)->rn_vdev_guid; - int rv; - - rv = AVL_ISIGN(strcmp(nm1, nm2)); - if (rv) - return (rv); + rv = AVL_ISIGN(strcmp(nm1, nm2)); + if (rv) + return (rv); return (AVL_CMP(guid1, guid2)); } -static boolean_t -is_watchdog_dev(char *dev) -{ - /* For 'watchdog' dev */ - if (strcmp(dev, "watchdog") == 0) - return (B_TRUE); - - /* For 'watchdog */ - if (strstr(dev, "watchdog") == dev && isdigit(dev[8])) - return (B_TRUE); - - return (B_FALSE); -} - -static int -label_paths_impl(libpc_handle_t *hdl, nvlist_t *nvroot, uint64_t pool_guid, - uint64_t vdev_guid, char **path, char **devid) -{ - nvlist_t **child; - uint_t c, children; - uint64_t guid; - char *val; - int error; - - if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, - &child, &children) == 0) { - for (c = 0; c < children; c++) { - error = label_paths_impl(hdl, child[c], - pool_guid, vdev_guid, path, devid); - if (error) - return (error); - } - return (0); - } - - if (nvroot == NULL) - return (0); - - error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid); - if ((error != 0) || (guid != vdev_guid)) - return (0); - - error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &val); - if (error == 0) - *path = val; - - error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &val); - if (error == 0) - *devid = val; - - return (0); -} - -/* - * Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID - * and store these strings as config_path and devid_path respectively. - * The returned pointers are only valid as long as label remains valid. - */ -static int -label_paths(libpc_handle_t *hdl, nvlist_t *label, char **path, char **devid) -{ - nvlist_t *nvroot; - uint64_t pool_guid; - uint64_t vdev_guid; - - *path = NULL; - *devid = NULL; - - if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || - nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) || - nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid)) - return (ENOENT); - - return (label_paths_impl(hdl, nvroot, pool_guid, vdev_guid, path, - devid)); -} - -static void -zpool_open_func(void *arg) -{ - rdsk_node_t *rn = arg; - libpc_handle_t *hdl = rn->rn_hdl; - struct stat64 statbuf; - nvlist_t *config; - char *bname, *dupname; - uint64_t vdev_guid = 0; - int error; - int num_labels = 0; - int fd; - - /* - * Skip devices with well known prefixes there can be side effects - * when opening devices which need to be avoided. - * - * hpet - High Precision Event Timer - * watchdog - Watchdog must be closed in a special way. - */ - dupname = zfs_strdup(hdl, rn->rn_name); - bname = basename(dupname); - error = ((strcmp(bname, "hpet") == 0) || is_watchdog_dev(bname)); - free(dupname); - if (error) - return; - - /* - * Ignore failed stats. We only want regular files and block devices. - */ - if (stat64(rn->rn_name, &statbuf) != 0 || - (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) - return; - - /* - * Preferentially open using O_DIRECT to bypass the block device - * cache which may be stale for multipath devices. An EINVAL errno - * indicates O_DIRECT is unsupported so fallback to just O_RDONLY. - */ - fd = open(rn->rn_name, O_RDONLY | O_DIRECT); - if ((fd < 0) && (errno == EINVAL)) - fd = open(rn->rn_name, O_RDONLY); - - if ((fd < 0) && (errno == EACCES)) - hdl->lpc_open_access_error = B_TRUE; - - if (fd < 0) - return; - - /* - * This file is too small to hold a zpool - */ - if (S_ISREG(statbuf.st_mode) && statbuf.st_size < SPA_MINDEVSIZE) { - (void) close(fd); - return; - } - - error = zpool_read_label(fd, &config, &num_labels); - if (error != 0) { - (void) close(fd); - return; - } - - if (num_labels == 0) { - (void) close(fd); - nvlist_free(config); - return; - } - - /* - * Check that the vdev is for the expected guid. Additional entries - * are speculatively added based on the paths stored in the labels. - * Entries with valid paths but incorrect guids must be removed. - */ - error = nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid); - if (error || (rn->rn_vdev_guid && rn->rn_vdev_guid != vdev_guid)) { - (void) close(fd); - nvlist_free(config); - return; - } - - (void) close(fd); - - rn->rn_config = config; - rn->rn_num_labels = num_labels; - - /* - * Add additional entries for paths described by this label. - */ - if (rn->rn_labelpaths) { - char *path = NULL; - char *devid = NULL; - rdsk_node_t *slice; - avl_index_t where; - int error; - - if (label_paths(rn->rn_hdl, rn->rn_config, &path, &devid)) - return; - - /* - * Allow devlinks to stabilize so all paths are available. - */ - zpool_label_disk_wait(rn->rn_name, DISK_LABEL_WAIT); - - if (path != NULL) { - slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); - slice->rn_name = zfs_strdup(hdl, path); - slice->rn_vdev_guid = vdev_guid; - slice->rn_avl = rn->rn_avl; - slice->rn_hdl = hdl; - slice->rn_order = IMPORT_ORDER_PREFERRED_1; - slice->rn_labelpaths = B_FALSE; - pthread_mutex_lock(rn->rn_lock); - if (avl_find(rn->rn_avl, slice, &where)) { - pthread_mutex_unlock(rn->rn_lock); - free(slice->rn_name); - free(slice); - } else { - avl_insert(rn->rn_avl, slice, where); - pthread_mutex_unlock(rn->rn_lock); - zpool_open_func(slice); - } - } - - if (devid != NULL) { - slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); - error = asprintf(&slice->rn_name, "%s%s", - DEV_BYID_PATH, devid); - if (error == -1) { - free(slice); - return; - } - - slice->rn_vdev_guid = vdev_guid; - slice->rn_avl = rn->rn_avl; - slice->rn_hdl = hdl; - slice->rn_order = IMPORT_ORDER_PREFERRED_2; - slice->rn_labelpaths = B_FALSE; - pthread_mutex_lock(rn->rn_lock); - if (avl_find(rn->rn_avl, slice, &where)) { - pthread_mutex_unlock(rn->rn_lock); - free(slice->rn_name); - free(slice); - } else { - avl_insert(rn->rn_avl, slice, where); - pthread_mutex_unlock(rn->rn_lock); - zpool_open_func(slice); - } - } - } -} - static void zpool_find_import_scan_add_slice(libpc_handle_t *hdl, pthread_mutex_t *lock, avl_tree_t *cache, const char *path, const char *name, int order) @@ -1718,7 +860,7 @@ zpool_find_import_scan_add_slice(libpc_handle_t *hdl, pthread_mutex_t *lock, avl_index_t where; rdsk_node_t *slice; - slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); + slice = zutil_alloc(hdl, sizeof (rdsk_node_t)); if (asprintf(&slice->rn_name, "%s/%s", path, name) == -1) { free(slice); return; @@ -1754,8 +896,8 @@ zpool_find_import_scan_dir(libpc_handle_t *hdl, pthread_mutex_t *lock, if (error == ENOENT) return (0); - zfs_error_aux(hdl, strerror(error)); - (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext( + zutil_error_aux(hdl, strerror(error)); + (void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext( TEXT_DOMAIN, "cannot resolve path '%s'"), dir); return (error); } @@ -1763,8 +905,8 @@ zpool_find_import_scan_dir(libpc_handle_t *hdl, pthread_mutex_t *lock, dirp = opendir(path); if (dirp == NULL) { error = errno; - zfs_error_aux(hdl, strerror(error)); - (void) zfs_error_fmt(hdl, EZFS_BADPATH, + zutil_error_aux(hdl, strerror(error)); + (void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "cannot open '%s'"), path); return (error); } @@ -1799,8 +941,8 @@ zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock, * whole path because if it's a symlink, we want the * path of the symlink not where it points to. */ - d = zfs_strdup(hdl, dir); - b = zfs_strdup(hdl, dir); + d = zutil_strdup(hdl, dir); + b = zutil_strdup(hdl, dir); dpath = dirname(d); name = basename(b); @@ -1811,8 +953,8 @@ zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock, goto out; } - zfs_error_aux(hdl, strerror(error)); - (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext( + zutil_error_aux(hdl, strerror(error)); + (void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext( TEXT_DOMAIN, "cannot resolve path '%s'"), dir); goto out; } @@ -1828,7 +970,7 @@ zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock, /* * Scan a list of directories for zfs devices. */ -static int +int zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock, avl_tree_t **slice_cache, char **dir, int dirs) { @@ -1838,7 +980,7 @@ zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock, int i, error; *slice_cache = NULL; - cache = zfs_alloc(hdl, sizeof (avl_tree_t)); + cache = zutil_alloc(hdl, sizeof (avl_tree_t)); avl_create(cache, slice_cache_compare, sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node)); @@ -1850,8 +992,8 @@ zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock, if (error == ENOENT) continue; - zfs_error_aux(hdl, strerror(error)); - (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext( + zutil_error_aux(hdl, strerror(error)); + (void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext( TEXT_DOMAIN, "cannot resolve path '%s'"), dir[i]); goto error; } @@ -1886,289 +1028,113 @@ zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock, return (error); } -static char * -zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = { - "/dev/disk/by-vdev", /* Custom rules, use first if they exist */ - "/dev/mapper", /* Use multipath devices before components */ - "/dev/disk/by-partlabel", /* Single unique entry set by user */ - "/dev/disk/by-partuuid", /* Generated partition uuid */ - "/dev/disk/by-label", /* Custom persistent labels */ - "/dev/disk/by-uuid", /* Single unique entry and persistent */ - "/dev/disk/by-id", /* May be multiple entries and persistent */ - "/dev/disk/by-path", /* Encodes physical location and persistent */ - "/dev" /* UNSAFE device names will change */ -}; +#define ZVOL_ROOT "/dev/zvol" -const char * const * -zpool_default_search_paths(size_t *count) +int +zutil_pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid, + boolean_t *isactive) { - *count = DEFAULT_IMPORT_PATH_SIZE; - return ((const char * const *)zpool_default_import_path); -} + ASSERT(hdl->lpc_ops->pco_pool_active != NULL); -/* - * Given a full path to a device determine if that device appears in the - * import search path. If it does return the first match and store the - * index in the passed 'order' variable, otherwise return an error. - */ -static int -zfs_path_order(char *name, int *order) -{ - int i = 0, error = ENOENT; - char *dir, *env, *envdup; - - env = getenv("ZPOOL_IMPORT_PATH"); - if (env) { - envdup = strdup(env); - dir = strtok(envdup, ":"); - while (dir) { - if (strncmp(name, dir, strlen(dir)) == 0) { - *order = i; - error = 0; - break; - } - dir = strtok(NULL, ":"); - i++; - } - free(envdup); - } else { - for (i = 0; i < DEFAULT_IMPORT_PATH_SIZE; i++) { - if (strncmp(name, zpool_default_import_path[i], - strlen(zpool_default_import_path[i])) == 0) { - *order = i; - error = 0; - break; - } - } - } + int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name, + guid, isactive); return (error); } +nvlist_t * +zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig) +{ + ASSERT(hdl->lpc_ops->pco_refresh_config != NULL); + + return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle, + tryconfig)); +} /* - * Use libblkid to quickly enumerate all known zfs devices. + * Return the offset of the given label. */ -static int -zpool_find_import_blkid(libpc_handle_t *hdl, pthread_mutex_t *lock, - avl_tree_t **slice_cache) +static uint64_t +label_offset(uint64_t size, int l) { - rdsk_node_t *slice; - blkid_cache cache; - blkid_dev_iterate iter; - blkid_dev dev; - avl_index_t where; - int error; - - *slice_cache = NULL; - - error = blkid_get_cache(&cache, NULL); - if (error != 0) - return (error); - - error = blkid_probe_all_new(cache); - if (error != 0) { - blkid_put_cache(cache); - return (error); - } - - iter = blkid_dev_iterate_begin(cache); - if (iter == NULL) { - blkid_put_cache(cache); - return (EINVAL); - } - - error = blkid_dev_set_search(iter, "TYPE", "zfs_member"); - if (error != 0) { - blkid_dev_iterate_end(iter); - blkid_put_cache(cache); - return (error); - } - - *slice_cache = zfs_alloc(hdl, sizeof (avl_tree_t)); - avl_create(*slice_cache, slice_cache_compare, sizeof (rdsk_node_t), - offsetof(rdsk_node_t, rn_node)); - - while (blkid_dev_next(iter, &dev) == 0) { - slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); - slice->rn_name = zfs_strdup(hdl, blkid_dev_devname(dev)); - slice->rn_vdev_guid = 0; - slice->rn_lock = lock; - slice->rn_avl = *slice_cache; - slice->rn_hdl = hdl; - slice->rn_labelpaths = B_TRUE; - - error = zfs_path_order(slice->rn_name, &slice->rn_order); - if (error == 0) - slice->rn_order += IMPORT_ORDER_SCAN_OFFSET; - else - slice->rn_order = IMPORT_ORDER_DEFAULT; - - pthread_mutex_lock(lock); - if (avl_find(*slice_cache, slice, &where)) { - free(slice->rn_name); - free(slice); - } else { - avl_insert(*slice_cache, slice, where); - } - pthread_mutex_unlock(lock); - } - - blkid_dev_iterate_end(iter); - blkid_put_cache(cache); - - return (0); + ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); + return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? + 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); } /* - * Given a list of directories to search, find all pools stored on disk. This - * includes partial pools which are not available to import. If no args are - * given (argc is 0), then the default directory (/dev/dsk) is searched. - * poolname or guid (but not both) are provided by the caller when trying - * to import a specific pool. + * Given a file descriptor, read the label information and return an nvlist + * describing the configuration, if there is one. The number of valid + * labels found will be returned in num_labels when non-NULL. */ -static nvlist_t * -zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg) +int +zpool_read_label(int fd, nvlist_t **config, int *num_labels) { - nvlist_t *ret = NULL; - pool_list_t pools = { 0 }; - pool_entry_t *pe, *penext; - vdev_entry_t *ve, *venext; - config_entry_t *ce, *cenext; - name_entry_t *ne, *nenext; - pthread_mutex_t lock; - avl_tree_t *cache; - rdsk_node_t *slice; - void *cookie; - tpool_t *t; + struct stat64 statbuf; + int l, count = 0; + vdev_label_t *label; + nvlist_t *expected_config = NULL; + uint64_t expected_guid = 0, size; + int error; - verify(iarg->poolname == NULL || iarg->guid == 0); - pthread_mutex_init(&lock, NULL); + *config = NULL; - /* - * Locate pool member vdevs using libblkid or by directory scanning. - * On success a newly allocated AVL tree which is populated with an - * entry for each discovered vdev will be returned as the cache. - * It's the callers responsibility to consume and destroy this tree. - */ - if (iarg->scan || iarg->paths != 0) { - int dirs = iarg->paths; - char **dir = iarg->path; + if (fstat64_blk(fd, &statbuf) == -1) + return (0); + size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); - if (dirs == 0) { - dir = zpool_default_import_path; - dirs = DEFAULT_IMPORT_PATH_SIZE; - } + error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label)); + if (error) + return (-1); - if (zpool_find_import_scan(hdl, &lock, &cache, dir, dirs) != 0) - return (NULL); - } else { - if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) - return (NULL); - } + for (l = 0; l < VDEV_LABELS; l++) { + uint64_t state, guid, txg; - /* - * Create a thread pool to parallelize the process of reading and - * validating labels, a large number of threads can be used due to - * minimal contention. - */ - t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL); - for (slice = avl_first(cache); slice; - (slice = avl_walk(cache, slice, AVL_AFTER))) - (void) tpool_dispatch(t, zpool_open_func, slice); + if (pread64(fd, label, sizeof (vdev_label_t), + label_offset(size, l)) != sizeof (vdev_label_t)) + continue; - tpool_wait(t); - tpool_destroy(t); + if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, + sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) + continue; - /* - * Process the cache, filtering out any entries which are not - * for the specified pool then adding matching label configs. - */ - cookie = NULL; - while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) { - if (slice->rn_config != NULL) { - nvlist_t *config = slice->rn_config; - boolean_t matched = B_TRUE; - boolean_t aux = B_FALSE; - int fd; + if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID, + &guid) != 0 || guid == 0) { + nvlist_free(*config); + continue; + } - /* - * Check if it's a spare or l2cache device. If it is, - * we need to skip the name and guid check since they - * don't exist on aux device label. - */ - if (iarg->poolname != NULL || iarg->guid != 0) { - uint64_t state; - aux = nvlist_lookup_uint64(config, - ZPOOL_CONFIG_POOL_STATE, &state) == 0 && - (state == POOL_STATE_SPARE || - state == POOL_STATE_L2CACHE); - } + if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, + &state) != 0 || state > POOL_STATE_L2CACHE) { + nvlist_free(*config); + continue; + } - if (iarg->poolname != NULL && !aux) { - char *pname; + if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && + (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, + &txg) != 0 || txg == 0)) { + nvlist_free(*config); + continue; + } - matched = nvlist_lookup_string(config, - ZPOOL_CONFIG_POOL_NAME, &pname) == 0 && - strcmp(iarg->poolname, pname) == 0; - } else if (iarg->guid != 0 && !aux) { - uint64_t this_guid; + if (expected_guid) { + if (expected_guid == guid) + count++; - matched = nvlist_lookup_uint64(config, - ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 && - iarg->guid == this_guid; - } - if (matched) { - /* - * Verify all remaining entries can be opened - * exclusively. This will prune all underlying - * multipath devices which otherwise could - * result in the vdev appearing as UNAVAIL. - * - * Under zdb, this step isn't required and - * would prevent a zdb -e of active pools with - * no cachefile. - */ - fd = open(slice->rn_name, O_RDONLY | O_EXCL); - if (fd >= 0 || iarg->can_be_active) { - if (fd >= 0) - close(fd); - add_config(hdl, &pools, - slice->rn_name, slice->rn_order, - slice->rn_num_labels, config); - } - } - nvlist_free(config); - } - free(slice->rn_name); - free(slice); - } - avl_destroy(cache); - free(cache); - pthread_mutex_destroy(&lock); - - ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy); - - for (pe = pools.pools; pe != NULL; pe = penext) { - penext = pe->pe_next; - for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { - venext = ve->ve_next; - for (ce = ve->ve_configs; ce != NULL; ce = cenext) { - cenext = ce->ce_next; - nvlist_free(ce->ce_config); - free(ce); - } - free(ve); + nvlist_free(*config); + } else { + expected_config = *config; + expected_guid = guid; + count++; } - free(pe); } - for (ne = pools.names; ne != NULL; ne = nenext) { - nenext = ne->ne_next; - free(ne->ne_name); - free(ne); - } + if (num_labels != NULL) + *num_labels = count; - return (ret); + free(label); + *config = expected_config; + + return (0); } /* @@ -2193,21 +1159,21 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile, verify(poolname == NULL || guid == 0); if ((fd = open(cachefile, O_RDONLY)) < 0) { - zfs_error_aux(hdl, "%s", strerror(errno)); - (void) zfs_error(hdl, EZFS_BADCACHE, + zutil_error_aux(hdl, "%s", strerror(errno)); + (void) zutil_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to open cache file")); return (NULL); } if (fstat64(fd, &statbuf) != 0) { - zfs_error_aux(hdl, "%s", strerror(errno)); + zutil_error_aux(hdl, "%s", strerror(errno)); (void) close(fd); - (void) zfs_error(hdl, EZFS_BADCACHE, + (void) zutil_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to get size of cache file")); return (NULL); } - if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { + if ((buf = zutil_alloc(hdl, statbuf.st_size)) == NULL) { (void) close(fd); return (NULL); } @@ -2215,7 +1181,7 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile, if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { (void) close(fd); free(buf); - (void) zfs_error(hdl, EZFS_BADCACHE, + (void) zutil_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to read cache file contents")); return (NULL); @@ -2225,7 +1191,7 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile, if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { free(buf); - (void) zfs_error(hdl, EZFS_BADCACHE, + (void) zutil_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "invalid or corrupt cache file contents")); return (NULL); @@ -2238,7 +1204,7 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile, * state. */ if (nvlist_alloc(&pools, 0, 0) != 0) { - (void) no_memory(hdl); + (void) zutil_no_memory(hdl); nvlist_free(raw); return (NULL); } @@ -2255,7 +1221,7 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile, if (guid != 0 && guid != this_guid) continue; - if (pool_active(hdl, name, this_guid, &active) != 0) { + if (zutil_pool_active(hdl, name, this_guid, &active) != 0) { nvlist_free(raw); nvlist_free(pools); return (NULL); @@ -2266,20 +1232,20 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile, if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE, cachefile) != 0) { - (void) no_memory(hdl); + (void) zutil_no_memory(hdl); nvlist_free(raw); nvlist_free(pools); return (NULL); } - if ((dst = refresh_config(hdl, src)) == NULL) { + if ((dst = zutil_refresh_config(hdl, src)) == NULL) { nvlist_free(raw); nvlist_free(pools); return (NULL); } if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { - (void) no_memory(hdl); + (void) zutil_no_memory(hdl); nvlist_free(dst); nvlist_free(raw); nvlist_free(pools); @@ -2313,7 +1279,7 @@ zpool_search_import(void *hdl, importargs_t *import, if ((pools == NULL || nvlist_empty(pools)) && handle.lpc_open_access_error && geteuid() != 0) { - (void) zfs_error(&handle, EZFS_EACESS, dgettext(TEXT_DOMAIN, + (void) zutil_error(&handle, EZFS_EACESS, dgettext(TEXT_DOMAIN, "no pools found")); } diff --git a/lib/libzutil/zutil_import.h b/lib/libzutil/zutil_import.h new file mode 100644 index 000000000000..3abe3bd469e4 --- /dev/null +++ b/lib/libzutil/zutil_import.h @@ -0,0 +1,99 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2015 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018 by Delphix. All rights reserved. + * Copyright 2015 RackTop Systems. + * Copyright (c) 2016, Intel Corporation. + */ +#ifndef _LIBZUTIL_ZUTIL_IMPORT_H_ +#define _LIBZUTIL_ZUTIL_IMPORT_H_ + +#define EZFS_BADCACHE "invalid or missing cache file" +#define EZFS_BADPATH "must be an absolute path" +#define EZFS_NOMEM "out of memory" +#define EZFS_EACESS "some devices require root privileges" + +#define IMPORT_ORDER_PREFERRED_1 1 +#define IMPORT_ORDER_PREFERRED_2 2 +#define IMPORT_ORDER_SCAN_OFFSET 10 +#define IMPORT_ORDER_DEFAULT 100 +#define DEFAULT_IMPORT_PATH_SIZE 9 + +/* + * Intermediate structures used to gather configuration information. + */ +typedef struct config_entry { + uint64_t ce_txg; + nvlist_t *ce_config; + struct config_entry *ce_next; +} config_entry_t; + +typedef struct vdev_entry { + uint64_t ve_guid; + config_entry_t *ve_configs; + struct vdev_entry *ve_next; +} vdev_entry_t; + +typedef struct pool_entry { + uint64_t pe_guid; + vdev_entry_t *pe_vdevs; + struct pool_entry *pe_next; +} pool_entry_t; + +typedef struct name_entry { + char *ne_name; + uint64_t ne_guid; + uint64_t ne_order; + uint64_t ne_num_labels; + struct name_entry *ne_next; +} name_entry_t; + +typedef struct pool_list { + pool_entry_t *pools; + name_entry_t *names; +} pool_list_t; + +int add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path, + int order, int num_labels, nvlist_t *config); +nvlist_t *get_configs(libpc_handle_t *hdl, pool_list_t *pl, + boolean_t active_ok, nvlist_t *policy); + +typedef struct rdsk_node { + char *rn_name; /* Full path to device */ + int rn_order; /* Preferred order (low to high) */ + int rn_num_labels; /* Number of valid labels */ + uint64_t rn_vdev_guid; /* Expected vdev guid when set */ + libpc_handle_t *rn_hdl; + nvlist_t *rn_config; /* Label config */ + avl_tree_t *rn_avl; + avl_node_t rn_node; + pthread_mutex_t *rn_lock; + boolean_t rn_labelpaths; +} rdsk_node_t; + +int slice_cache_compare(const void *, const void *); + +int zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock, + avl_tree_t **slice_cache, char **dir, int dirs); + +#endif /* _LIBZUTIL_ZUTIL_IMPORT_H_ */ diff --git a/lib/libzutil/zutil_nicenum.c b/lib/libzutil/zutil_nicenum.c index 9a81011fca02..fab4f49b820e 100644 --- a/lib/libzutil/zutil_nicenum.c +++ b/lib/libzutil/zutil_nicenum.c @@ -23,10 +23,25 @@ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. */ +#include #include #include #include +/* + * Return 1 if "str" is a number string, 0 otherwise. Works for integer and + * floating point numbers. + */ +int +zfs_isnumber(const char *str) +{ + for (; *str; str++) + if (!(isdigit(*str) || (*str == '.'))) + return (0); + + return (1); +} + /* * Convert a number to an appropriately human-readable output. */