Skip to content

Commit

Permalink
Switch KM_SLEEP to KM_PUSHPAGE
Browse files Browse the repository at this point in the history
Differences between how paging is done on Solaris and Linux can cause
deadlocks if KM_SLEEP is used in any the following contexts.

  * The txg_sync thread
  * The zvol write/discard threads
  * The zpl_putpage() VFS callback

This is because KM_SLEEP will allow for direct reclaim which may result
in the VM calling back in to the filesystem or block layer to write out
pages.  If a lock is held over this operation the potential exists to
deadlock the system.  To ensure forward progress all memory allocations
in these contexts must us KM_PUSHPAGE which disables performing any I/O
to accomplish the memory allocation.

Previously, this behavior was acheived by setting PF_MEMALLOC on the
thread.  However, that resulted in unexpected side effects such as the
exhaustion of pages in ZONE_DMA.  This approach touchs more of the zfs
code, but it is more consistent with the right way to handle these cases
under Linux.

This is patch lays the ground work for being able to safely revert the
following commits which used PF_MEMALLOC:

  21ade34 Disable direct reclaim for z_wr_* threads
  cfc9a5c Fix zpl_writepage() deadlock
  eec8164 Fix ASSERTION(!dsl_pool_sync_context(tx->tx_pool))

Signed-off-by: Richard Yao <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Issue #726
  • Loading branch information
ryao authored and behlendorf committed Aug 27, 2012
1 parent 991fc1d commit b8d06fc
Show file tree
Hide file tree
Showing 45 changed files with 187 additions and 185 deletions.
14 changes: 7 additions & 7 deletions include/sys/dbuf.h
Original file line number Diff line number Diff line change
Expand Up @@ -345,13 +345,13 @@ boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
} \
_NOTE(CONSTCOND) } while (0)

#define dprintf_dbuf_bp(db, bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
sprintf_blkptr(__blkbuf, bp); \
dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
#define dprintf_dbuf_bp(db, bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_PUSHPAGE); \
sprintf_blkptr(__blkbuf, bp); \
dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
_NOTE(CONSTCOND) } while (0)

#define DBUF_VERIFY(db) dbuf_verify(db)
Expand Down
2 changes: 1 addition & 1 deletion include/sys/dsl_dataset.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ int dsl_destroy_inconsistent(const char *dsname, void *arg);
#ifdef ZFS_DEBUG
#define dprintf_ds(ds, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(MAXNAMELEN, KM_SLEEP); \
char *__ds_name = kmem_alloc(MAXNAMELEN, KM_PUSHPAGE); \
dsl_dataset_name(ds, __ds_name); \
dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, MAXNAMELEN); \
Expand Down
2 changes: 1 addition & 1 deletion include/sys/dsl_dir.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ timestruc_t dsl_dir_snap_cmtime(dsl_dir_t *dd);
#define dprintf_dd(dd, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, \
KM_SLEEP); \
KM_PUSHPAGE); \
dsl_dir_name(dd, __ds_name); \
dprintf("dd=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); \
Expand Down
12 changes: 6 additions & 6 deletions include/sys/spa.h
Original file line number Diff line number Diff line change
Expand Up @@ -690,12 +690,12 @@ extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, const char *name);

#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
sprintf_blkptr(__blkbuf, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_PUSHPAGE); \
sprintf_blkptr(__blkbuf, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
_NOTE(CONSTCOND) } while (0)
#else
Expand Down
2 changes: 1 addition & 1 deletion module/zcommon/zprop_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ zprop_iter_common(zprop_func func, void *cb, boolean_t show_all,
size = num_props * sizeof (zprop_desc_t *);

#if defined(_KERNEL)
order = kmem_alloc(size, KM_SLEEP);
order = kmem_alloc(size, KM_PUSHPAGE);
#else
if ((order = malloc(size)) == NULL)
return (ZPROP_CONT);
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/arc.c
Original file line number Diff line number Diff line change
Expand Up @@ -3547,7 +3547,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
ASSERT(hdr->b_acb == NULL);
if (l2arc)
hdr->b_flags |= ARC_L2CACHE;
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_PUSHPAGE);
callback->awcb_ready = ready;
callback->awcb_done = done;
callback->awcb_private = private;
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/bplist.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ bplist_destroy(bplist_t *bpl)
void
bplist_append(bplist_t *bpl, const blkptr_t *bp)
{
bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_SLEEP);
bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_PUSHPAGE);

mutex_enter(&bpl->bpl_lock);
bpe->bpe_blk = *bp;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ dbuf_init(void)
#if defined(_KERNEL) && defined(HAVE_SPL)
/* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel */
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
#else
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
#endif
Expand Down Expand Up @@ -1719,7 +1719,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);

db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);

db->db_objset = os;
db->db.db_object = dn->dn_object;
Expand Down Expand Up @@ -2019,7 +2019,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
int error;

dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
__dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);

error = __dbuf_hold_impl(dh);
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/ddt.c
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ ddt_get_dedup_stats(spa_t *spa, ddt_stat_t *dds_total)
ddt_histogram_t *ddh_total;

/* XXX: Move to a slab */
ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_PUSHPAGE);
ddt_get_dedup_histogram(spa, ddh_total);
ddt_histogram_stat(dds_total, ddh_total);
kmem_free(ddh_total, sizeof (ddt_histogram_t));
Expand Down
12 changes: 6 additions & 6 deletions module/zfs/dmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
}
nblks = 1;
}
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP | KM_NODEBUG);
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_PUSHPAGE | KM_NODEBUG);

if (dn->dn_objset->os_dsl_dataset)
dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool;
Expand Down Expand Up @@ -863,11 +863,11 @@ dmu_xuio_init(xuio_t *xuio, int nblk)
uio_t *uio = &xuio->xu_uio;

uio->uio_iovcnt = nblk;
uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_PUSHPAGE);

priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_PUSHPAGE);
priv->cnt = nblk;
priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_PUSHPAGE);
priv->iovp = uio->uio_iov;
XUIO_XUZC_PRIV(xuio) = priv;

Expand Down Expand Up @@ -1431,7 +1431,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
return (EIO); /* Make zl_get_data do txg_waited_synced() */
}

dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
dsa->dsa_dr = NULL;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
Expand Down Expand Up @@ -1555,7 +1555,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
mutex_exit(&db->db_mtx);

dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
dsa->dsa_dr = dr;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/dmu_objset.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,

ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));

os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
os = kmem_zalloc(sizeof (objset_t), KM_PUSHPAGE);
os->os_dsl_dataset = ds;
os->os_spa = spa;
os->os_rootbp = bp;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dmu_traverse.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,9 +361,9 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *rootbp,
zbookmark_t *czb;
int err;

td = kmem_alloc(sizeof(traverse_data_t), KM_SLEEP);
pd = kmem_zalloc(sizeof(prefetch_data_t), KM_SLEEP);
czb = kmem_alloc(sizeof(zbookmark_t), KM_SLEEP);
td = kmem_alloc(sizeof(traverse_data_t), KM_PUSHPAGE);
pd = kmem_zalloc(sizeof(prefetch_data_t), KM_PUSHPAGE);
czb = kmem_alloc(sizeof(zbookmark_t), KM_PUSHPAGE);

td->td_spa = spa;
td->td_objset = ds ? ds->ds_object : 0;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dmu_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ static kstat_t *dmu_tx_ksp;
dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t *dd)
{
dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE);
tx->tx_dir = dd;
if (dd)
tx->tx_pool = dd->dd_pool;
Expand Down Expand Up @@ -141,7 +141,7 @@ dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
}
}

txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE);
txh->txh_tx = tx;
txh->txh_dnode = dn;
#ifdef DEBUG_DMU_TX
Expand Down Expand Up @@ -1241,7 +1241,7 @@ dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
{
dmu_tx_callback_t *dcb;

dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE);

dcb->dcb_func = func;
dcb->dcb_data = data;
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/dmu_zfetch.c
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
if (cur_streams >= max_streams) {
return;
}
newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
newstream = kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE);
}

newstream->zst_offset = zst.zst_offset;
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dnode.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_PUSHPAGE);

ASSERT(!POINTER_IS_VALID(dn->dn_objset));
dn->dn_moved = 0;
Expand Down Expand Up @@ -1491,7 +1491,7 @@ dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
} else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
/* clear a chunk out of this range */
free_range_t *new_rp =
kmem_alloc(sizeof (free_range_t), KM_SLEEP);
kmem_alloc(sizeof (free_range_t), KM_PUSHPAGE);

new_rp->fr_blkid = endblk;
new_rp->fr_nblks = fr_endblk - endblk;
Expand Down Expand Up @@ -1669,7 +1669,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];

/* Add new range to dn_ranges */
rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
rp = kmem_alloc(sizeof (free_range_t), KM_PUSHPAGE);
rp->fr_blkid = blkid;
rp->fr_nblks = nblks;
found = avl_find(tree, rp, &where);
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/dsl_dataset.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
if (ds == NULL) {
dsl_dataset_t *winner = NULL;

ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_PUSHPAGE);
ds->ds_dbuf = dbuf;
ds->ds_object = dsobj;
ds->ds_phys = dbuf->db_data;
Expand Down
6 changes: 4 additions & 2 deletions module/zfs/dsl_deadlist.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,9 @@ dsl_deadlist_load_tree(dsl_deadlist_t *dl)
for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dsl_deadlist_entry_t *dle;

dle = kmem_alloc(sizeof (*dle), KM_PUSHPAGE);
dle->dle_mintxg = strtonum(za.za_name, NULL);
VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
za.za_first_integer));
Expand Down Expand Up @@ -215,7 +217,7 @@ dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)

dsl_deadlist_load_tree(dl);

dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle = kmem_alloc(sizeof (*dle), KM_PUSHPAGE);
dle->dle_mintxg = mintxg;
obj = bpobj_alloc(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
Expand Down
10 changes: 5 additions & 5 deletions module/zfs/dsl_dir.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
if (dd == NULL) {
dsl_dir_t *winner;

dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
dd = kmem_zalloc(sizeof (dsl_dir_t), KM_PUSHPAGE);
dd->dd_object = ddobj;
dd->dd_dbuf = dbuf;
dd->dd_pool = dp;
Expand Down Expand Up @@ -791,7 +791,7 @@ dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
asize - ref_rsrv);
mutex_exit(&dd->dd_lock);

tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_PUSHPAGE);
tr->tr_ds = dd;
tr->tr_size = asize;
list_insert_tail(tr_list, tr);
Expand Down Expand Up @@ -825,7 +825,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
return (0);
}

tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
tr_list = kmem_alloc(sizeof (list_t), KM_PUSHPAGE);
list_create(tr_list, sizeof (struct tempreserve),
offsetof(struct tempreserve, tr_node));
ASSERT3S(asize, >, 0);
Expand All @@ -835,7 +835,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
if (err == 0) {
struct tempreserve *tr;

tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_PUSHPAGE);
tr->tr_size = lsize;
list_insert_tail(tr_list, tr);

Expand All @@ -851,7 +851,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
if (err == 0) {
struct tempreserve *tr;

tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_PUSHPAGE);
tr->tr_dp = dd->dd_pool;
tr->tr_size = asize;
list_insert_tail(tr_list, tr);
Expand Down
6 changes: 3 additions & 3 deletions module/zfs/dsl_prop.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,9 +247,9 @@ dsl_prop_register(dsl_dataset_t *ds, const char *propname,
return (err);
}

cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP);
cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_PUSHPAGE);
cbr->cbr_ds = ds;
cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_SLEEP);
cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_PUSHPAGE);
(void) strcpy((char *)cbr->cbr_propname, propname);
cbr->cbr_func = callback;
cbr->cbr_arg = cbarg;
Expand Down Expand Up @@ -534,7 +534,7 @@ dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj,
}
mutex_exit(&dd->dd_lock);

za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
for (zap_cursor_init(&zc, mos,
dd->dd_phys->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, za) == 0;
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/lzjb.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
uint16_t *hp;
uint16_t *lempel;

lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_PUSHPAGE);
while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) {
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
Expand Down
10 changes: 5 additions & 5 deletions module/zfs/metaslab.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
{
metaslab_class_t *mc;

mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE);

mc->mc_spa = spa;
mc->mc_rotor = NULL;
Expand Down Expand Up @@ -217,7 +217,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
{
metaslab_group_t *mg;

mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
Expand Down Expand Up @@ -422,9 +422,9 @@ metaslab_pp_load(space_map_t *sm)
space_seg_t *ss;

ASSERT(sm->sm_ppd == NULL);
sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE);

sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_PUSHPAGE);
avl_create(sm->sm_pp_root, metaslab_segsize_compare,
sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));

Expand Down Expand Up @@ -725,7 +725,7 @@ metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
vdev_t *vd = mg->mg_vd;
metaslab_t *msp;

msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE);
mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);

msp->ms_smo_syncing = *smo;
Expand Down
Loading

0 comments on commit b8d06fc

Please sign in to comment.