diff --git a/include/sys/mmp.h b/include/sys/mmp.h index 1ce685f9c38e..63dcb3aed829 100644 --- a/include/sys/mmp.h +++ b/include/sys/mmp.h @@ -43,6 +43,7 @@ typedef struct mmp_thread { uberblock_t mmp_ub; /* last ub written by sync */ zio_t *mmp_zio_root; /* root of mmp write zios */ uint64_t mmp_kstat_id; /* unique id for next MMP write kstat */ + int mmp_skip_error; /* reason for last skipped write */ } mmp_thread_t; @@ -58,6 +59,20 @@ extern ulong_t zfs_multihost_interval; extern uint_t zfs_multihost_fail_intervals; extern uint_t zfs_multihost_import_intervals; +/* + * If MMP is unable to find a leaf vdev to write an MMP block to, + * it will set *error to one of the below error codes. + * + * MMP_WRITE_PENDING At least one writeable leaf vdev was found, but it had a + * pending MMP write. + * MMP_TREE_RO At least one leaf was found, but none were writeable. + */ +typedef enum mmp_error { + MMP_WRITE_TREE_RO = EHWPOISON + 1, + MMP_WRITE_PENDING, + MMP_WRITE_OTHER +} mmp_error_t; + #ifdef __cplusplus } #endif diff --git a/include/sys/spa.h b/include/sys/spa.h index 7b529330fa84..62832eff0e72 100644 --- a/include/sys/spa.h +++ b/include/sys/spa.h @@ -887,10 +887,12 @@ extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t, struct dsl_pool *); extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *); extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs); +extern int spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id); extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error, hrtime_t duration); -extern void spa_mmp_history_add(uint64_t txg, uint64_t timestamp, - uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id); +extern void *spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp, + uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id, + int error); /* Pool configuration locks */ extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw); diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index a668e8dbdb23..2db0bf9be2f4 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -201,27 +201,35 @@ mmp_thread_stop(spa_t *spa) } /* - * Choose a leaf vdev to write an MMP block to. It must not have an - * outstanding mmp write (if so then there is a problem, and a new write will - * also block). If there is no usable leaf in this subtree return NULL, - * otherwise return a pointer to the leaf. - * - * When walking the subtree, a random child is chosen as the starting point so - * that when the tree is healthy, the leaf chosen will be random with even - * distribution. If there are unhealthy vdevs in the tree, the distribution - * will be really poor only if a large proportion of the vdevs are unhealthy, - * in which case there are other more pressing problems. + * Flags for recording the state of the vdev tree with regards to MMP. */ +typedef enum mmp_vdev_state_flag { + MMP_FAIL_NOT_WRITABLE = (1 << 0), + MMP_FAIL_WRITE_PENDING = (1 << 1), +} mmp_vdev_state_flag_t; + static vdev_t * -mmp_random_leaf(vdev_t *vd) +mmp_random_leaf_impl(vdev_t *vd, int *fail_mask) { int child_idx; - if (!vdev_writeable(vd)) + if (!vdev_writeable(vd)) { + *fail_mask |= MMP_FAIL_NOT_WRITABLE; return (NULL); + } + + if (vd->vdev_ops->vdev_op_leaf) { + vdev_t *ret; - if (vd->vdev_ops->vdev_op_leaf) - return (vd->vdev_mmp_pending == 0 ? vd : NULL); + if (vd->vdev_mmp_pending != 0) { + *fail_mask |= MMP_FAIL_WRITE_PENDING; + ret = NULL; + } else { + ret = vd; + } + + return (ret); + } child_idx = spa_get_random(vd->vdev_children); for (int offset = vd->vdev_children; offset > 0; offset--) { @@ -229,7 +237,7 @@ mmp_random_leaf(vdev_t *vd) vdev_t *child = vd->vdev_child[(child_idx + offset) % vd->vdev_children]; - leaf = mmp_random_leaf(child); + leaf = mmp_random_leaf_impl(child, fail_mask); if (leaf) return (leaf); } @@ -237,6 +245,40 @@ mmp_random_leaf(vdev_t *vd) return (NULL); } +/* + * Choose a leaf vdev to write an MMP block to. It must not have an + * outstanding mmp write (if so then there is a problem, and a new write will + * also block). If there is no usable leaf in this subtree return NULL, + * otherwise return a pointer to the leaf. + * + * When walking the subtree, a random child is chosen as the starting point so + * that when the tree is healthy, the leaf chosen will be random with even + * distribution. If there are unhealthy vdevs in the tree, the distribution + * will be really poor only if a large proportion of the vdevs are unhealthy, + * in which case there are other more pressing problems. + */ + +static int +mmp_random_leaf(vdev_t *in_vd, vdev_t **out_vd) +{ + int fail_mask = 0; + int error = 0; + vdev_t *vd = mmp_random_leaf_impl(in_vd, &fail_mask); + + if (vd == NULL) { + if (fail_mask & MMP_FAIL_WRITE_PENDING) + error = MMP_WRITE_PENDING; + else if (fail_mask & MMP_FAIL_NOT_WRITABLE) + error = MMP_WRITE_TREE_RO; + else + error = MMP_WRITE_OTHER; + } else { + *out_vd = vd; + } + + return (error); +} + static void mmp_write_done(zio_t *zio) { @@ -319,8 +361,8 @@ mmp_write_uberblock(spa_t *spa) int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; mmp_thread_t *mmp = &spa->spa_mmp; uberblock_t *ub; - vdev_t *vd; - int label; + vdev_t *vd = NULL; + int label, error; uint64_t offset; hrtime_t lock_acquire_time = gethrtime(); @@ -330,13 +372,33 @@ mmp_write_uberblock(spa_t *spa) zfs_dbgmsg("SCL_STATE acquisition took %llu ns\n", (u_longlong_t)lock_acquire_time); - vd = mmp_random_leaf(spa->spa_root_vdev); - if (vd == NULL) { + error = mmp_random_leaf(spa->spa_root_vdev, &vd); + + mutex_enter(&mmp->mmp_io_lock); + + /* + * spa_mmp_history has two types of entries: + * issued mmp write: records time issued, error status, etc. + * skipped mmp write: an mmp write could not be issued because no + * suitable leaf vdev was available. See comment + * above struct spa_mmp_history for details. + */ + + if (error) { + if (mmp->mmp_skip_error == error) { + spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1); + } else { + mmp->mmp_skip_error = error; + spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg, + gethrestime_sec(), mmp->mmp_delay, NULL, 0, + mmp->mmp_kstat_id++, error); + } + mutex_exit(&mmp->mmp_io_lock); spa_config_exit(spa, SCL_STATE, FTAG); return; } - mutex_enter(&mmp->mmp_io_lock); + mmp->mmp_skip_error = 0; if (mmp->mmp_zio_root == NULL) mmp->mmp_zio_root = zio_root(spa, NULL, NULL, @@ -347,13 +409,14 @@ mmp_write_uberblock(spa_t *spa) ub->ub_mmp_magic = MMP_MAGIC; ub->ub_mmp_delay = mmp->mmp_delay; vd->vdev_mmp_pending = gethrtime(); - vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id++; + vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id; zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags); abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE); abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd)); abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t)); + mmp->mmp_kstat_id++; mutex_exit(&mmp->mmp_io_lock); offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) - @@ -364,8 +427,8 @@ mmp_write_uberblock(spa_t *spa) VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp, flags | ZIO_FLAG_DONT_PROPAGATE); - spa_mmp_history_add(ub->ub_txg, ub->ub_timestamp, ub->ub_mmp_delay, vd, - label, vd->vdev_mmp_kstat_id); + (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp, + ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0); zio_nowait(zio); } diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c index c560d642f46c..b4b996ba6bf5 100644 --- a/module/zfs/spa_stats.c +++ b/module/zfs/spa_stats.c @@ -718,18 +718,28 @@ spa_io_history_destroy(spa_t *spa) */ /* - * MMP statistics - Information exported regarding each MMP update + * MMP statistics - Information exported regarding attempted MMP writes + * For MMP writes issued, fields used as per comments below. + * For MMP writes skipped, an entry represents a span of time when + * writes were skipped for same reason (error from mmp_random_leaf). + * Differences are: + * timestamp time first write skipped, if >1 skipped in a row + * mmp_delay delay value at timestamp + * vdev_guid number of writes skipped + * io_error one of enum mmp_error + * duration time span (ns) of skipped writes */ typedef struct spa_mmp_history { uint64_t mmp_kstat_id; /* unique # for updates */ uint64_t txg; /* txg of last sync */ - uint64_t timestamp; /* UTC time of of last sync */ - uint64_t mmp_delay; /* nanosec since last MMP write */ + uint64_t timestamp; /* UTC time MMP write issued */ + uint64_t mmp_delay; /* mmp_thread.mmp_delay at timestamp */ uint64_t vdev_guid; /* unique ID of leaf vdev */ char *vdev_path; - uint64_t vdev_label; /* vdev label */ + int vdev_label; /* vdev label */ int io_error; /* error status of MMP write */ + hrtime_t error_start; /* hrtime of start of error period */ hrtime_t duration; /* time from submission to completion */ list_node_t smh_link; } spa_mmp_history_t; @@ -748,8 +758,8 @@ spa_mmp_history_data(char *buf, size_t size, void *data) { spa_mmp_history_t *smh = (spa_mmp_history_t *)data; - (void) snprintf(buf, size, "%-10llu %-10llu %-10llu %-6lld %-10lld " - "%-12llu %-24llu %-10llu %s\n", + (void) snprintf(buf, size, "%-10llu %-10llu %10llu %6lld %10lld " + "%12llu %-24llu %-10lld %s\n", (u_longlong_t)smh->mmp_kstat_id, (u_longlong_t)smh->txg, (u_longlong_t)smh->timestamp, (longlong_t)smh->io_error, (longlong_t)smh->duration, (u_longlong_t)smh->mmp_delay, @@ -869,8 +879,42 @@ spa_mmp_history_destroy(spa_t *spa) mutex_destroy(&ssh->lock); } +/* + * Set duration in existing "skip" record to how long we have waited for a leaf + * vdev to become available. + * + * Important that we start search at the beginning of the list where new + * records are inserted, so this is normally an O(1) operation. + */ +int +spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id) +{ + spa_stats_history_t *ssh = &spa->spa_stats.mmp_history; + spa_mmp_history_t *smh; + int error = ENOENT; + + if (zfs_multihost_history == 0 && ssh->size == 0) + return (0); + + mutex_enter(&ssh->lock); + for (smh = list_head(&ssh->list); smh != NULL; + smh = list_next(&ssh->list, smh)) { + if (smh->mmp_kstat_id == mmp_kstat_id) { + ASSERT3U(smh->io_error, !=, 0); + smh->duration = gethrtime() - smh->error_start; + smh->vdev_guid++; + error = 0; + break; + } + } + mutex_exit(&ssh->lock); + + return (error); +} + /* * Set MMP write duration and error status in existing record. + * See comment re: search order above spa_mmp_history_set_skip(). */ int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error, @@ -887,6 +931,7 @@ spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error, for (smh = list_head(&ssh->list); smh != NULL; smh = list_next(&ssh->list, smh)) { if (smh->mmp_kstat_id == mmp_kstat_id) { + ASSERT(smh->io_error == 0); smh->io_error = io_error; smh->duration = duration; error = 0; @@ -899,29 +944,39 @@ spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error, } /* - * Add a new MMP write to historical record. + * Add a new MMP historical record. + * error == 0 : a write was issued. + * error != 0 : a write was not issued because no leaves were found. */ -void -spa_mmp_history_add(uint64_t txg, uint64_t timestamp, uint64_t mmp_delay, - vdev_t *vd, int label, uint64_t mmp_kstat_id) +void * +spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp, + uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id, + int error) { - spa_t *spa = vd->vdev_spa; spa_stats_history_t *ssh = &spa->spa_stats.mmp_history; spa_mmp_history_t *smh, *rm; if (zfs_multihost_history == 0 && ssh->size == 0) - return; + return (NULL); smh = kmem_zalloc(sizeof (spa_mmp_history_t), KM_SLEEP); smh->txg = txg; smh->timestamp = timestamp; smh->mmp_delay = mmp_delay; - smh->vdev_guid = vd->vdev_guid; - if (vd->vdev_path) - smh->vdev_path = strdup(vd->vdev_path); + if (vd) { + smh->vdev_guid = vd->vdev_guid; + if (vd->vdev_path) + smh->vdev_path = strdup(vd->vdev_path); + } smh->vdev_label = label; smh->mmp_kstat_id = mmp_kstat_id; + if (error) { + smh->io_error = error; + smh->error_start = gethrtime(); + smh->vdev_guid = 1; + } + mutex_enter(&ssh->lock); list_insert_head(&ssh->list, smh); @@ -936,6 +991,7 @@ spa_mmp_history_add(uint64_t txg, uint64_t timestamp, uint64_t mmp_delay, } mutex_exit(&ssh->lock); + return ((void *)smh); } void