Skip to content

Commit

Permalink
Multi-modifier protection (MMP)
Browse files Browse the repository at this point in the history
Add multihost=on|off pool property to control MMP.  When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp.  Property defaults to off.

During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock.  Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".

Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval.  The period is specified in milliseconds.  The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.

Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path.  Abbreviated
output below.

$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg   timestamp  mmp_delay   vdev_guid   vdev_label vdev_path
20468    261337  250274925   68396651780       3    /dev/sda
20468    261339  252023374   6267402363293     1    /dev/sdc
20468    261340  252000858   6698080955233     1    /dev/sdx
20468    261341  251980635   783892869810      2    /dev/sdy
20468    261342  253385953   8923255792467     3    /dev/sdd
20468    261344  253336622   042125143176      0    /dev/sdab
20468    261345  253310522   1200778101278     2    /dev/sde
20468    261346  253286429   0950576198362     2    /dev/sdt
20468    261347  253261545   96209817917       3    /dev/sds
20468    261349  253238188   8555725937673     3    /dev/sdb

Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.

When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test.  For example, the
pool is exported to run zdb and then imported again.  Add a new ztest
function, "-M", to alter ztest behavior to prevent this.

Add new tests to verify the new functionality.  Tests provided by
Giuseppe Di Natale.

Reviewed by: Matthew Ahrens <[email protected]>
Reviewed-by: Giuseppe Di Natale <[email protected]>
Reviewed-by: Ned Bass <[email protected]>
Reviewed-by: Andreas Dilger <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Olaf Faaland <[email protected]>
Closes #745
Closes #6279
  • Loading branch information
ofaaland authored and behlendorf committed Jul 13, 2017
1 parent 34ae0ae commit 379ca9c
Show file tree
Hide file tree
Showing 56 changed files with 2,646 additions and 250 deletions.
131 changes: 40 additions & 91 deletions cmd/zdb/zdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2165,6 +2165,13 @@ dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, asctime(localtime(&timestamp)));

(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (ub->ub_mmp_magic == MMP_MAGIC)
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);

if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
Expand Down Expand Up @@ -2529,6 +2536,11 @@ dump_label_uberblocks(label_t *label, uint64_t ashift, int label_num)
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;

if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;

print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
Expand Down Expand Up @@ -4125,89 +4137,6 @@ zdb_embedded_block(char *thing)
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
}

static boolean_t
pool_match(nvlist_t *cfg, char *tgt)
{
uint64_t v, guid = strtoull(tgt, NULL, 0);
char *s;

if (guid != 0) {
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
return (v == guid);
} else {
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
return (strcmp(s, tgt) == 0);
}
return (B_FALSE);
}

static char *
find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv)
{
nvlist_t *pools;
nvlist_t *match = NULL;
char *name = NULL;
char *sepp = NULL;
char sep = '\0';
int count = 0;
importargs_t args = { 0 };

args.paths = dirc;
args.path = dirv;
args.can_be_active = B_TRUE;

if ((sepp = strpbrk(*target, "/@")) != NULL) {
sep = *sepp;
*sepp = '\0';
}

pools = zpool_search_import(g_zfs, &args);

if (pools != NULL) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, configp) == 0);
if (pool_match(*configp, *target)) {
count++;
if (match != NULL) {
/* print previously found config */
if (name != NULL) {
(void) printf("%s\n", name);
dump_nvlist(match, 8);
name = NULL;
}
(void) printf("%s\n",
nvpair_name(elem));
dump_nvlist(*configp, 8);
} else {
match = *configp;
name = nvpair_name(elem);
}
}
}
}
if (count > 1)
(void) fatal("\tMatched %d pools - use pool GUID "
"instead of pool name or \n"
"\tpool name part of a dataset name to select pool", count);

if (sepp)
*sepp = sep;
/*
* If pool GUID was specified for pool id, replace it with pool name
*/
if (name && (strstr(*target, name) != *target)) {
int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0);

*target = umem_alloc(sz, UMEM_NOFAIL);
(void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : "");
}

*configp = name ? match : NULL;

return (name);
}

int
main(int argc, char **argv)
{
Expand Down Expand Up @@ -4424,21 +4353,31 @@ main(int argc, char **argv)
target = argv[0];

if (dump_opt['e']) {
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
char *name = find_zpool(&target, &cfg, nsearch, searchdirs);

error = ENOENT;
if (name) {
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;

error = zpool_tryimport(g_zfs, target, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_REWIND_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
error = spa_import(name, cfg, NULL, flags);

/*
* Disable the activity check to allow examination of
* active pools.
*/
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
error = spa_import(target, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}

Expand All @@ -4453,6 +4392,16 @@ main(int argc, char **argv)

if (error == 0) {
if (target_is_spa || dump_opt['R']) {
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);

error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
Expand Down
54 changes: 10 additions & 44 deletions cmd/zhack/zhack.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,16 +121,11 @@ space_delta_cb(dmu_object_type_t bonustype, void *data,
* Target is the dataset whose pool we want to open.
*/
static void
import_pool(const char *target, boolean_t readonly)
zhack_import(char *target, boolean_t readonly)
{
nvlist_t *config;
nvlist_t *pools;
int error;
char *sepp;
spa_t *spa;
nvpair_t *elem;
nvlist_t *props;
char *name;
int error;

kernel_init(readonly ? FREAD : (FREAD | FWRITE));
g_zfs = libzfs_init();
Expand All @@ -139,43 +134,14 @@ import_pool(const char *target, boolean_t readonly)
dmu_objset_register_type(DMU_OST_ZFS, space_delta_cb);

g_readonly = readonly;

/*
* If we only want readonly access, it's OK if we find
* a potentially-active (ie, imported into the kernel) pool from the
* default cachefile.
*/
if (readonly && spa_open(target, &spa, FTAG) == 0) {
spa_close(spa, FTAG);
return;
}

g_importargs.unique = B_TRUE;
g_importargs.can_be_active = readonly;
g_pool = strdup(target);
if ((sepp = strpbrk(g_pool, "/@")) != NULL)
*sepp = '\0';
g_importargs.poolname = g_pool;
pools = zpool_search_import(g_zfs, &g_importargs);

if (nvlist_empty(pools)) {
if (!g_importargs.can_be_active) {
g_importargs.can_be_active = B_TRUE;
if (zpool_search_import(g_zfs, &g_importargs) != NULL ||
spa_open(target, &spa, FTAG) == 0) {
fatal(spa, FTAG, "cannot import '%s': pool is "
"active; run " "\"zpool export %s\" "
"first\n", g_pool, g_pool);
}
}

fatal(NULL, FTAG, "cannot import '%s': no such pool "
"available\n", g_pool);
}

elem = nvlist_next_nvpair(pools, NULL);
name = nvpair_name(elem);
VERIFY(nvpair_value_nvlist(elem, &config) == 0);
error = zpool_tryimport(g_zfs, target, &config, &g_importargs);
if (error)
fatal(NULL, FTAG, "cannot import '%s': %s", target,
libzfs_error_description(g_zfs));

props = NULL;
if (readonly) {
Expand All @@ -185,22 +151,22 @@ import_pool(const char *target, boolean_t readonly)
}

zfeature_checks_disable = B_TRUE;
error = spa_import(name, config, props, ZFS_IMPORT_NORMAL);
error = spa_import(target, config, props, ZFS_IMPORT_NORMAL);
zfeature_checks_disable = B_FALSE;
if (error == EEXIST)
error = 0;

if (error)
fatal(NULL, FTAG, "can't import '%s': %s", name,
fatal(NULL, FTAG, "can't import '%s': %s", target,
strerror(error));
}

static void
zhack_spa_open(const char *target, boolean_t readonly, void *tag, spa_t **spa)
zhack_spa_open(char *target, boolean_t readonly, void *tag, spa_t **spa)
{
int err;

import_pool(target, readonly);
zhack_import(target, readonly);

zfeature_checks_disable = B_TRUE;
err = spa_open(target, spa, tag);
Expand Down
Loading

0 comments on commit 379ca9c

Please sign in to comment.