From 1e344288b8e9123fa95a0f1d496b2c286580bca2 Mon Sep 17 00:00:00 2001 From: GeLiXin <47034221@qq.com> Date: Mon, 16 May 2016 19:51:01 +0800 Subject: [PATCH] Fix: segmentation fault of write throttle while import Signed-off-by: Ge Lixin <47034221@qq.com> --- module/zfs/vdev_queue.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index af8af67de70d..bfe5b05a474e 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -249,20 +249,31 @@ static int vdev_queue_max_async_writes(spa_t *spa) { int writes; - uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; + uint64_t dirty = 0; + dsl_pool_t *dp = spa_get_dsl(spa); uint64_t min_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_min_dirty_percent / 100; uint64_t max_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_max_dirty_percent / 100; + /* + * async writes may be issued before the pool finish the process of + * initialization, which means we can't get the statistics of dirty + * data from the spa. typically this happens when the self-healing + * zio was issued by mirror while importing. we push data out as fast + * as possible to speed up the initialization. + */ + if (dp == NULL) + return (zfs_vdev_async_write_max_active); + /* * Sync tasks correspond to interactive user actions. To reduce the * execution time of those actions we push data out as fast as possible. */ - if (spa_has_pending_synctask(spa)) { + if (spa_has_pending_synctask(spa)) return (zfs_vdev_async_write_max_active); - } + dirty = dp->dp_dirty_total; if (dirty < min_bytes) return (zfs_vdev_async_write_min_active); if (dirty > max_bytes)