Skip to content

Commit

Permalink
Maximize txg size to improve throughput
Browse files Browse the repository at this point in the history
Added a new module parameter zfs_txg_quiesce_advance to control the
timing of quiescing open txg.

The open txg can be quiesced into the pipeline even there is a txg
still syncing. When the dirty data in syncing txg is below
zfs_txg_quiesce_advance, which also measns the sync is about to
complete, quiesce the open txg into the pipeline.
0 value means only quiesce the open txg when all the data in the
previous txg synced. When all dirty data is writen out, the sync
thread may not return immediately. It may be blocked by some mutex
lock. Thus, even with 0 value, it still can help minimize idle time
of the sync stage.
Recommended value: 0 - 5% of zfs_dirty_data_max in bytes.

I also move txg_kick() call into dsl_pool_dirty_space() and
dsl_pool_undirty_space(), so that txg_kick() can also be triggered by
undirty routines.

Addressed race condition of tx->tx_open_txg in txg_sync_thread().

Some other style fixes regarding code review.

Signed-off-by: jxdking <[email protected]>
  • Loading branch information
jxdking committed May 17, 2021
1 parent 184f4cd commit 6b40c56
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 22 deletions.
2 changes: 1 addition & 1 deletion include/sys/dsl_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ struct dsl_deadlist;
extern unsigned long zfs_dirty_data_max;
extern unsigned long zfs_dirty_data_max_max;
extern int zfs_dirty_data_sync_percent;
extern unsigned long zfs_txg_quiesce_advance;
extern int zfs_dirty_data_max_percent;
extern int zfs_dirty_data_max_max_percent;
extern int zfs_delay_min_dirty_percent;
Expand Down Expand Up @@ -171,7 +172,6 @@ void dsl_pool_mos_diduse_space(dsl_pool_t *dp,
void dsl_pool_ckpoint_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp);
boolean_t dsl_pool_need_dirty_delay(dsl_pool_t *dp);
boolean_t dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg);
void dsl_pool_config_enter(dsl_pool_t *dp, void *tag);
void dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag);
void dsl_pool_config_exit(dsl_pool_t *dp, void *tag);
Expand Down
4 changes: 0 additions & 4 deletions module/zfs/dmu_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
#include <sys/sa_impl.h>
#include <sys/zfs_context.h>
#include <sys/trace_zfs.h>
#include <sys/txg.h>

typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
uint64_t arg1, uint64_t arg2);
Expand Down Expand Up @@ -1056,9 +1055,6 @@ dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)

txg_rele_to_quiesce(&tx->tx_txgh);

if (dsl_pool_need_dirty_sync(tx->tx_pool, tx->tx_txg)) {
txg_kick(tx->tx_pool, tx->tx_txg);
}
return (0);
}

Expand Down
49 changes: 40 additions & 9 deletions module/zfs/dsl_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,16 @@ int zfs_dirty_data_max_max_percent = 25;
*/
int zfs_dirty_data_sync_percent = 20;

/*
* The open txg can be quiesced into the pipeline even there is a txg still
* syncing. When the dirty data in syncing txg is below
* zfs_txg_quiesce_advance, which also measns the sync is about to complete,
* quiesce the open txg into the pipeline.
* 0 means only quiesce the open txg when all the data in the previous txg
* is synced.
*/
unsigned long zfs_txg_quiesce_advance = 0;

/*
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
* and delay each transaction.
Expand Down Expand Up @@ -899,27 +909,32 @@ dsl_pool_need_dirty_delay(dsl_pool_t *dp)
{
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
uint64_t dirty;

mutex_enter(&dp->dp_lock);
dirty = dp->dp_dirty_total;
uint64_t dirty = dp->dp_dirty_total;
mutex_exit(&dp->dp_lock);

return (dirty > delay_min_bytes);
}

boolean_t
static boolean_t
dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg)
{
uint64_t dirty;
ASSERT(MUTEX_HELD(&dp->dp_lock));

uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
uint64_t total = dp->dp_dirty_total;

mutex_enter(&dp->dp_lock);
dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
mutex_exit(&dp->dp_lock);

return (dirty > dirty_min_bytes);
/*
* Only quiesce new transaction group when previous syncing is
* getting close to completion, so that quiescing completed just
* in time for it. That's the time when the dirty data in
* syncing txg shrinks below zfs_txg_quiesce_advance.
*/
return (dirty > dirty_min_bytes &&
total - dirty <= zfs_txg_quiesce_advance);
}

void
Expand All @@ -929,7 +944,11 @@ dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
mutex_enter(&dp->dp_lock);
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
dsl_pool_dirty_delta(dp, space);
boolean_t needsync = dsl_pool_need_dirty_sync(dp, tx->tx_txg);
mutex_exit(&dp->dp_lock);

if (needsync)
txg_kick(dp, tx->tx_txg);
}
}

Expand All @@ -949,7 +968,16 @@ dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
ASSERT3U(dp->dp_dirty_total, >=, space);
dsl_pool_dirty_delta(dp, -space);

/* Assuming txg + 1 is in open stage, check if it needs to be synced. */
boolean_t needsync = dsl_pool_need_dirty_sync(dp, txg + 1);
mutex_exit(&dp->dp_lock);
/*
* Pass txg + 1 into txg_kick. Inside txg_kick(), it will kick only
* if txg + 1 is actually in open stage.
*/
if (needsync)
txg_kick(dp, txg + 1);
}

/* ARGSUSED */
Expand Down Expand Up @@ -1411,6 +1439,9 @@ ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD,
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW,
"Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");

ZFS_MODULE_PARAM(zfs, zfs_, txg_quiesce_advance, ULONG, ZMOD_RW,
"Threshold of the dirty data in syncing txg to quiesce open txg");

ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW,
"How quickly delay approaches infinity");

Expand Down
19 changes: 11 additions & 8 deletions module/zfs/txg.c
Original file line number Diff line number Diff line change
Expand Up @@ -531,8 +531,6 @@ txg_sync_thread(void *arg)
clock_t timeout = zfs_txg_timeout * hz;
clock_t timer;
uint64_t txg;
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;

/*
* We sync when we're scanning, there's someone waiting
Expand All @@ -543,8 +541,7 @@ txg_sync_thread(void *arg)
while (!dsl_scan_active(dp->dp_scan) &&
!tx->tx_exiting && timer > 0 &&
tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
!txg_has_quiesced_to_sync(dp) &&
dp->dp_dirty_total < dirty_min_bytes) {
!txg_has_quiesced_to_sync(dp)) {
dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
Expand All @@ -557,9 +554,11 @@ txg_sync_thread(void *arg)
* prompting it to do so if necessary.
*/
while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
cv_broadcast(&tx->tx_quiesce_more_cv);
if (!txg_is_quiescing(dp)) {
if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg + 1)
tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
cv_broadcast(&tx->tx_quiesce_more_cv);
}
txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
}

Expand Down Expand Up @@ -779,6 +778,7 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
* If there isn't a txg quiescing in the pipeline, push the txg
* through the pipeline by quiescing the open txg.
* It is fine there is a txg still syncing.
* Pass in the txg number of the transaction that should be closed and synced.
*/
void
txg_kick(dsl_pool_t *dp, uint64_t txg)
Expand All @@ -787,8 +787,11 @@ txg_kick(dsl_pool_t *dp, uint64_t txg)

ASSERT(!dsl_pool_config_held(dp));

if (txg != tx->tx_open_txg ||
tx->tx_quiesce_txg_waiting > tx->tx_open_txg)
return;

mutex_enter(&tx->tx_sync_lock);
txg = txg == 0 ? tx->tx_open_txg : txg;
if (txg == tx->tx_open_txg &&
!txg_is_quiescing(dp) &&
tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
Expand Down

0 comments on commit 6b40c56

Please sign in to comment.