Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename rangelock_ functions to zfs_rangelock_ #9402

Merged
merged 1 commit into from
Oct 3, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions include/sys/zfs_rlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ typedef struct locked_range {
uint8_t lr_read_wanted; /* reader wants to lock this range */
} locked_range_t;

void rangelock_init(rangelock_t *, rangelock_cb_t *, void *);
void rangelock_fini(rangelock_t *);
void zfs_rangelock_init(rangelock_t *, rangelock_cb_t *, void *);
void zfs_rangelock_fini(rangelock_t *);

locked_range_t *rangelock_enter(rangelock_t *,
locked_range_t *zfs_rangelock_enter(rangelock_t *,
uint64_t, uint64_t, rangelock_type_t);
void rangelock_exit(locked_range_t *);
void rangelock_reduce(locked_range_t *, uint64_t, uint64_t);
void zfs_rangelock_exit(locked_range_t *);
void zfs_rangelock_reduce(locked_range_t *, uint64_t, uint64_t);

#ifdef __cplusplus
}
Expand Down
34 changes: 17 additions & 17 deletions module/os/linux/zfs/zfs_vnops.c
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Lock the range against changes.
*/
locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
uio->uio_loffset, uio->uio_resid, RL_READER);

/*
Expand Down Expand Up @@ -558,7 +558,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
task_io_account_read(nread);
out:
rangelock_exit(lr);
zfs_rangelock_exit(lr);

ZFS_EXIT(zfsvfs);
return (error);
Expand Down Expand Up @@ -672,7 +672,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
Expand All @@ -689,11 +689,11 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}

if (woff >= limit) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
Expand Down Expand Up @@ -811,7 +811,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
rangelock_reduce(lr, woff, n);
zfs_rangelock_reduce(lr, woff, n);
}

/*
Expand Down Expand Up @@ -950,7 +950,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
}

zfs_inode_update(zp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);

/*
* If we're in replay mode, or we made no progress, return error.
Expand Down Expand Up @@ -1003,7 +1003,7 @@ zfs_get_done(zgd_t *zgd, int error)
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);

rangelock_exit(zgd->zgd_lr);
zfs_rangelock_exit(zgd->zgd_lr);

/*
* Release the vnode asynchronously as we currently have the
Expand Down Expand Up @@ -1064,7 +1064,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
Expand All @@ -1086,12 +1086,12 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
rangelock_exit(zgd->zgd_lr);
zfs_rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
Expand Down Expand Up @@ -4517,22 +4517,22 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);

locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
pgoff, pglen, RL_WRITER);
lock_page(pp);

/* Page mapping changed or it was no longer dirty, we're done */
if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
unlock_page(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (0);
}

/* Another process started write block if required */
if (PageWriteback(pp)) {
unlock_page(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);

if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(pp))
Expand All @@ -4546,7 +4546,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
/* Clear the dirty flag the required locks are held */
if (!clear_page_dirty_for_io(pp)) {
unlock_page(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (0);
}
Expand All @@ -4573,7 +4573,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
__set_page_dirty_nobuffers(pp);
ClearPageError(pp);
end_page_writeback(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (err);
}
Expand All @@ -4600,7 +4600,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
zfs_putpage_commit_cb, pp);
dmu_tx_commit(tx);

rangelock_exit(lr);
zfs_rangelock_exit(lr);

if (wbc->sync_mode != WB_SYNC_NONE) {
/*
Expand Down
28 changes: 14 additions & 14 deletions module/os/linux/zfs/zfs_znode.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);

rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);

zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
Expand All @@ -151,7 +151,7 @@ zfs_znode_cache_destructor(void *buf, void *arg)
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
rangelock_fini(&zp->z_rangelock);
zfs_rangelock_fini(&zp->z_rangelock);

ASSERT(zp->z_dirlocks == NULL);
ASSERT(zp->z_acl_cached == NULL);
Expand Down Expand Up @@ -1475,13 +1475,13 @@ zfs_extend(znode_t *zp, uint64_t end)
/*
* We will change zp_size, lock the whole file.
*/
lr = rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);

/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
Expand Down Expand Up @@ -1511,7 +1511,7 @@ zfs_extend(znode_t *zp, uint64_t end)
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}

Expand All @@ -1523,7 +1523,7 @@ zfs_extend(znode_t *zp, uint64_t end)
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));

rangelock_exit(lr);
zfs_rangelock_exit(lr);

dmu_tx_commit(tx);

Expand Down Expand Up @@ -1592,13 +1592,13 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
/*
* Lock the range being freed.
*/
lr = rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);

/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}

Expand Down Expand Up @@ -1648,7 +1648,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
page_len);
}
}
rangelock_exit(lr);
zfs_rangelock_exit(lr);

return (error);
}
Expand All @@ -1674,20 +1674,20 @@ zfs_trunc(znode_t *zp, uint64_t end)
/*
* We will change zp_size, lock the whole file.
*/
lr = rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);

/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}

error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
Expand All @@ -1697,7 +1697,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}

Expand All @@ -1713,7 +1713,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);

dmu_tx_commit(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);

return (0);
}
Expand Down
14 changes: 7 additions & 7 deletions module/os/linux/zfs/zvol_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ zvol_write(void *arg)
if (error)
break;
}
rangelock_exit(zvr->lr);
zfs_rangelock_exit(zvr->lr);

int64_t nwritten = start_resid - uio.uio_resid;
dataset_kstats_update_write_kstats(&zv->zv_zso->zvo_kstat, nwritten);
Expand Down Expand Up @@ -213,7 +213,7 @@ zvol_discard(void *arg)
ZVOL_OBJ, start, size);
}
unlock:
rangelock_exit(zvr->lr);
zfs_rangelock_exit(zvr->lr);

if (error == 0 && sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
Expand Down Expand Up @@ -259,7 +259,7 @@ zvol_read(void *arg)
break;
}
}
rangelock_exit(zvr->lr);
zfs_rangelock_exit(zvr->lr);

int64_t nread = start_resid - uio.uio_resid;
dataset_kstats_update_read_kstats(&zv->zv_zso->zvo_kstat, nread);
Expand Down Expand Up @@ -344,7 +344,7 @@ zvol_request(struct request_queue *q, struct bio *bio)
* are asynchronous, we take it here synchronously to make
* sure overlapped I/Os are properly ordered.
*/
zvr->lr = rangelock_enter(&zv->zv_rangelock, offset, size,
zvr->lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, size,
RL_WRITER);
/*
* Sync writes and discards execute zil_commit() which may need
Expand Down Expand Up @@ -383,7 +383,7 @@ zvol_request(struct request_queue *q, struct bio *bio)

rw_enter(&zv->zv_suspend_lock, RW_READER);

zvr->lr = rangelock_enter(&zv->zv_rangelock, offset, size,
zvr->lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, size,
RL_READER);
if (zvol_request_sync || taskq_dispatch(zvol_taskq,
zvol_read, zvr, TQ_SLEEP) == TASKQID_INVALID)
Expand Down Expand Up @@ -799,7 +799,7 @@ zvol_alloc(dev_t dev, const char *name)
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, MAXNAMELEN);

rangelock_init(&zv->zv_rangelock, NULL, NULL);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);

zv->zv_zso->zvo_disk->major = zvol_major;
Expand Down Expand Up @@ -861,7 +861,7 @@ zvol_free(zvol_state_t *zv)
ASSERT(zv->zv_zso->zvo_disk->private_data == NULL);

rw_destroy(&zv->zv_suspend_lock);
rangelock_fini(&zv->zv_rangelock);
zfs_rangelock_fini(&zv->zv_rangelock);

del_gendisk(zv->zv_zso->zvo_disk);
blk_cleanup_queue(zv->zv_zso->zvo_queue);
Expand Down
Loading