From 53a46edc7c25e9add02c2c4edeb45472a7616b1e Mon Sep 17 00:00:00 2001 From: GeLiXin <47034221@qq.com> Date: Mon, 16 May 2016 19:51:01 +0800 Subject: [PATCH] Fix: segmentation fault of write throttle while import Signed-off-by: Ge Lixin <47034221@qq.com> --- module/zfs/vdev_queue.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index e828ce9176f5..2dba84ecd975 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -249,12 +249,23 @@ static int vdev_queue_max_async_writes(spa_t *spa) { int writes; - uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; + uint64_t dirty = 0; uint64_t min_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_min_dirty_percent / 100; uint64_t max_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_max_dirty_percent / 100; + /* + * async writes may be issued before the pool finish the process of + * initialization, which means we can't get the statistics of dirty + * data from the spa. typically this happens when the self-healing + * zio was issued by mirror while importing. we push data out as fast + * as possible to speed up the initialization. + */ + if (spa->spa_dsl_pool == NULL) { + return (zfs_vdev_async_write_max_active); + } + /* * Sync tasks correspond to interactive user actions. To reduce the * execution time of those actions we push data out as fast as possible. @@ -263,6 +274,7 @@ vdev_queue_max_async_writes(spa_t *spa) return (zfs_vdev_async_write_max_active); } + dirty = spa->spa_dsl_pool->dp_dirty_total; if (dirty < min_bytes) return (zfs_vdev_async_write_min_active); if (dirty > max_bytes)