From 044579aa4cba17d16aab61cdab010c941ba528b2 Mon Sep 17 00:00:00 2001 From: Haakan T Johansson Date: Thu, 1 Dec 2016 00:06:03 +0100 Subject: [PATCH] zpool status -k shows kind of vdevs: ssd, hdd or mixed. --- cmd/zpool/zpool_main.c | 20 +++++++++++++++----- man/man8/zpool.8 | 13 +++++++++++-- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index 474a191ba1a6..7b8d15cef512 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -340,7 +340,7 @@ get_usage(zpool_help_t idx) { case HELP_SCRUB: return (gettext("\tscrub [-s] ...\n")); case HELP_STATUS: - return (gettext("\tstatus [-gLPvxD] [-T d|u] [pool] ... " + return (gettext("\tstatus [-gkLPvxD] [-T d|u] [pool] ... " "[interval [count]]\n")); case HELP_UPGRADE: return (gettext("\tupgrade\n" @@ -1538,6 +1538,7 @@ typedef struct status_cbdata { boolean_t cb_explain; boolean_t cb_first; boolean_t cb_dedup_stats; + boolean_t cb_kind; boolean_t cb_print_status; vdev_cmd_data_list_t *vcdl; } status_cbdata_t; @@ -1601,8 +1602,13 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf)); (void) printf(" %5s %5s %5s", rbuf, wbuf, cbuf); + } else { + (void) printf(" "); } + if (cb->cb_kind) + (void) printf(" %4s", kind_mark(nv)); + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, ¬present) == 0) { verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); @@ -6042,9 +6048,9 @@ status_callback(zpool_handle_t *zhp, void *data) cbp->cb_namewidth = 10; (void) printf(gettext("config:\n\n")); - (void) printf(gettext("\t%-*s %-8s %5s %5s %5s\n"), + (void) printf(gettext("\t%-*s %-8s %5s %5s %5s%s\n"), cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", - "CKSUM"); + "CKSUM", cbp->cb_kind ? " KIND" : ""); print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, B_FALSE); @@ -6104,10 +6110,11 @@ status_callback(zpool_handle_t *zhp, void *data) } /* - * zpool status [-c CMD] [-gLPvx] [-T d|u] [pool] ... [interval [count]] + * zpool status [-c CMD] [-gkLPvx] [-T d|u] [pool] ... [interval [count]] * * -c CMD For each vdev, run command CMD * -g Display guid for individual vdev name. + * -k Display kind of device, e.g. solid-state or mixed. * -L Follow links when resolving vdev path name. * -P Display full path for vdev name. * -v Display complete error logs @@ -6128,7 +6135,7 @@ zpool_do_status(int argc, char **argv) char *cmd = NULL; /* check options */ - while ((c = getopt(argc, argv, "c:gLPvxDT:")) != -1) { + while ((c = getopt(argc, argv, "c:gkLPvxDT:")) != -1) { switch (c) { case 'c': cmd = optarg; @@ -6136,6 +6143,9 @@ zpool_do_status(int argc, char **argv) case 'g': cb.cb_name_flags |= VDEV_NAME_GUID; break; + case 'k': + cb.cb_kind = B_TRUE; + break; case 'L': cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; break; diff --git a/man/man8/zpool.8 b/man/man8/zpool.8 index 10e08121badd..e31b1b3e96f6 100644 --- a/man/man8/zpool.8 +++ b/man/man8/zpool.8 @@ -159,7 +159,7 @@ zpool \- configures ZFS storage pools .LP .nf -\fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] +\fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gkLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] .fi .LP @@ -2108,7 +2108,7 @@ Sets the specified property for \fInewpool\fR. See the “Properties” section .sp .ne 2 .na -\fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] +\fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gkLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] .ad .sp .6 .RS 4n @@ -2142,6 +2142,15 @@ in parallel for each vdev for performance. Display vdev GUIDs instead of the normal device names. These GUIDs can be used innplace of device names for the zpool detach/offline/remove/replace commands. .RE +.sp +.ne 2 +.na +\fB\fB-k\fR\fR +.ad +.RS 12n +Display the kind of device a pool or vdev is based on: ssd, hdd or file. Mixed mirror vdevs that have both ssd (or file) and hdd members are marked "mix". A pool is considered mixed if all members are at least mixed, i.e. no pure hdd. +.RE + .sp .ne 2 .na