Skip to content

Commit

Permalink
more code cleanups
Browse files Browse the repository at this point in the history
Signed-off-by: Tom Caputi <[email protected]>
  • Loading branch information
Tom Caputi committed Jun 22, 2017
1 parent f9111e8 commit 6f49f97
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 13 deletions.
1 change: 0 additions & 1 deletion include/sys/zfs_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,6 @@ extern void delay(clock_t ticks);
#define MSEC_TO_TICK(msec) ((msec) / (MILLISEC / hz))
#define USEC_TO_TICK(usec) ((usec) / (MICROSEC / hz))
#define NSEC_TO_TICK(usec) ((usec) / (NANOSEC / hz))
#define TICK_TO_MSEC(tick) ((tick) * (1000 / hz))

#define gethrestime_sec() time(NULL)
#define gethrestime(t) \
Expand Down
21 changes: 12 additions & 9 deletions module/zfs/dsl_scan.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,8 @@ dsl_scan_enqueue_impl(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O. The dummy flag can be set to
* or immediately executes the I/O. The dummy flag can be set to indicate
* this IO has already been done and a placeholder should be used instead.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
Expand Down Expand Up @@ -1491,7 +1492,7 @@ dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
}

out:
arc_buf_destroy(buf, &buf);
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}

Expand Down Expand Up @@ -2611,13 +2612,13 @@ scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
int64_t bytes_issued = 0;
boolean_t pauseed = B_FALSE;
boolean_t paused = B_FALSE;

while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;

if (scan_io_queue_check_pause(scn)) {
pauseed = B_TRUE;
paused = B_TRUE;
break;
}

Expand Down Expand Up @@ -2645,7 +2646,7 @@ scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
ASSERT3U(queue->q_zio_bytes, >=, bytes_issued);
atomic_add_64(&queue->q_zio_bytes, -bytes_issued);

return (pauseed);
return (paused);
}

/*
Expand Down Expand Up @@ -2749,18 +2750,20 @@ scan_io_queues_run_one(void *arg)
scan_io_t *sio = NULL;
list_t sio_list;
const range_seg_t *rs;
boolean_t pauseed = B_FALSE;
boolean_t paused = B_FALSE;

ASSERT(queue->q_scn->scn_is_sorted);

list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
mutex_enter(q_lock);

/* reset per-queue scan statistics for this txg */
queue->q_avg_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_avg_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
queue->q_dummy_zios_this_txg = 0;

/* loop until we have run out of time or zios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
Expand Down Expand Up @@ -2795,18 +2798,18 @@ scan_io_queues_run_one(void *arg)

/* issuing zio's can take a long time so drop the queue lock. */
mutex_exit(q_lock);
pauseed = scan_io_queue_issue(queue, &sio_list);
paused = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);

/* invalidate the in-flight I/O range */
bzero(&queue->q_issuing_rs, sizeof (queue->q_issuing_rs));
cv_broadcast(&queue->q_cv);

/*
* If we were pauseed in the middle of processing, requeue
* If we were paused in the middle of processing, requeue
* any unfinished zios and exit.
*/
if (pauseed)
if (paused)
break;
}

Expand Down
6 changes: 4 additions & 2 deletions tests/zfs-tests/tests/functional/events/events_001_pos.ksh
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,15 @@ run_and_verify -p "$MPOOL" \
-e "sysevent.fs.zfs.resilver_start" \
-e "sysevent.fs.zfs.resilver_finish" \
-e "sysevent.fs.zfs.config_sync" \
"zpool replace -f $MPOOL $VDEV1 $VDEV4"
"zpool replace -f $MPOOL $VDEV1 $VDEV4 && \
while ! is_pool_resilvered $MPOOL; do sleep 1; done"

# Scrub a pool.
run_and_verify -p "$MPOOL" \
-e "sysevent.fs.zfs.scrub_start" \
-e "sysevent.fs.zfs.scrub_finish" \
"zpool scrub $MPOOL"
"zpool scrub $MPOOL && \
while ! is_pool_scrubbed $MPOOL; do sleep 1; done"

# Export then import a pool (may change to a pool_export event)
run_and_verify -p "$MPOOL" \
Expand Down
5 changes: 5 additions & 0 deletions tests/zfs-tests/tests/functional/events/events_002_pos.ksh
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,13 @@ zed_stop

# 4. Generate additional events.
log_must cp -f $ZEDLET_DIR/zed.debug.log $ZEDLET_DIR/zed.debug.log.old

log_must zpool offline $MPOOL $VDEV1
log_must zpool online $MPOOL $VDEV1
while ! is_pool_resilvered $MPOOL; do
sleep 1
done

log_must zpool scrub $MPOOL

# Wait for the scrub to wrap, or is_healthy will be wrong.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ function run_and_verify
zedlog=${zedlog:-$ZEDLET_DIR/zed.debug.log}
fullcmd="$1"
cmd=$(echo $fullcmd | awk '{print $1}')
subcmd=$(echo $fullcmd | awk '{print $2}')

# If we aren't running zpool or zfs, something is wrong
[[ $cmd == "zpool" || $cmd == "zfs" ]] || \
Expand Down

0 comments on commit 6f49f97

Please sign in to comment.