Skip to content

Commit

Permalink
Fix and disable blocks statistics during scrub.
Browse files Browse the repository at this point in the history
Block statistics calculation during scrub I/O issue in case of sorted
scrub accounted ditto blocks several times.  Embedded blocks on other
side were not accounted at all.  This change moves the accounting from
issue to scan stage, that fixes both problems and also allows to avoid
pool-wide locking and the lock contention it created.

Since this statistics is quite specific and is not even exposed now
anywhere, disable its calculation by default to not waste CPU time.

Signed-off-by: Alexander Motin <[email protected]>
Sponsored-By: iXsystems, Inc.
  • Loading branch information
amotin committed Jun 22, 2022
1 parent d51f4ea commit 678b7e6
Showing 1 changed file with 29 additions and 31 deletions.
60 changes: 29 additions & 31 deletions module/zfs/dsl_scan.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(vdev_t *vd);

extern int zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats;

/*
* By default zfs will check to ensure it is not over the hard memory
Expand Down Expand Up @@ -793,14 +794,19 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)

/* back to the generic stuff */

if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
mutex_init(&dp->dp_blkstats->zab_lock, NULL,
MUTEX_DEFAULT, NULL);
if (zfs_scan_blkstats) {
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
}
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
} else {
if (dp->dp_blkstats) {
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
dp->dp_blkstats = NULL;
}
}
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));

if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
Expand Down Expand Up @@ -3810,10 +3816,8 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
}

static void
count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{
int i;

/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
Expand All @@ -3828,28 +3832,21 @@ count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
if (scn->scn_is_sorted) {
atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[0]));
} else {
spa_t *spa = scn->scn_dp->dp_spa;

for (i = 0; i < BP_GET_NDVAS(bp); i++) {
atomic_add_64(&spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[i]));
}
}
atomic_add_64(&spa->spa_scan_pass_issued,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}

static __noinline void
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;

mutex_enter(&zab->zab_lock);

for (i = 0; i < 4; i++) {
for (int i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;

Expand Down Expand Up @@ -3884,8 +3881,6 @@ count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
break;
}
}

mutex_exit(&zab->zab_lock);
}

static void
Expand Down Expand Up @@ -3989,10 +3984,10 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;


count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
count_block(scn, dp->dp_blkstats, bp);
count_block_issued(spa, bp, B_TRUE);
return (0);
}

Expand Down Expand Up @@ -4032,7 +4027,7 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block(scn, dp->dp_blkstats, bp);
count_block_issued(spa, bp, B_TRUE);
}

/* do not relocate this block */
Expand Down Expand Up @@ -4106,7 +4101,7 @@ scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
}

ASSERT(pio != NULL);
count_block(scn, dp->dp_blkstats, bp);
count_block_issued(spa, bp, queue == NULL);
zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
Expand Down Expand Up @@ -4342,7 +4337,7 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)

/* count the block as though we issued it */
sio2bp(sio, &tmpbp);
count_block(scn, dp->dp_blkstats, &tmpbp);
count_block_issued(spa, &tmpbp, B_FALSE);

sio_free(sio);
}
Expand Down Expand Up @@ -4433,6 +4428,9 @@ ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");

ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");

ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
"Fraction of RAM for scan hard limit");

Expand Down

0 comments on commit 678b7e6

Please sign in to comment.