From d95ec0dbcdf5153ace0f9c5b87f3f194e517db56 Mon Sep 17 00:00:00 2001 From: Andrew Innes Date: Sun, 2 Oct 2022 17:56:28 +0800 Subject: [PATCH] Replace 'long' with os defined ZFS_MODULE_LONG for LLP64 on windows Unortunately, Windows defines 'long' as 32-bit even on x64 compiles. We create two new macros ZFS_MODULE_LONG and ZFS_MODULE_ULONG. These two will be 'long' on Unix, and let the toolchain handle the size of it. On Windows the two macros are defined as 'int64_t'/'uint64_t'. Signed-off-by: Andrew Innes Co-Authored-By: Jorgen Lundman --- include/os/freebsd/spl/sys/mod_os.h | 4 ++++ include/os/linux/kernel/linux/mod_compat.h | 4 ++++ include/sys/dmu_zfetch.h | 2 +- include/sys/dsl_deadlist.h | 2 +- include/sys/dsl_pool.h | 8 +++---- include/sys/mmp.h | 2 +- include/sys/spa.h | 6 ++--- include/sys/zcp.h | 4 ++-- include/sys/zfs_context.h | 4 ++++ include/sys/zfs_ioctl_impl.h | 2 +- module/os/freebsd/zfs/vdev_file.c | 4 ++-- module/os/linux/zfs/vdev_file.c | 4 ++-- module/zfs/arc.c | 28 +++++++++++----------- module/zfs/dbuf.c | 4 ++-- module/zfs/dmu.c | 2 +- module/zfs/dmu_zfetch.c | 2 +- module/zfs/dsl_deadlist.c | 2 +- module/zfs/dsl_pool.c | 8 +++---- module/zfs/dsl_scan.c | 8 +++---- module/zfs/metaslab.c | 7 +++--- module/zfs/mmp.c | 2 +- module/zfs/spa.c | 2 +- module/zfs/spa_checkpoint.c | 2 +- module/zfs/spa_log_spacemap.c | 18 +++++++------- module/zfs/spa_misc.c | 6 ++--- module/zfs/vdev_indirect.c | 4 ++-- module/zfs/vdev_initialize.c | 6 ++--- module/zfs/vdev_rebuild.c | 4 ++-- module/zfs/zcp.c | 4 ++-- module/zfs/zfs_ioctl.c | 4 ++-- module/zfs/zfs_log.c | 2 +- module/zfs/zfs_vnops.c | 2 +- module/zfs/zil.c | 2 +- 33 files changed, 89 insertions(+), 76 deletions(-) diff --git a/include/os/freebsd/spl/sys/mod_os.h b/include/os/freebsd/spl/sys/mod_os.h index e2815ce9e543..69f8920ad4ff 100644 --- a/include/os/freebsd/spl/sys/mod_os.h +++ b/include/os/freebsd/spl/sys/mod_os.h @@ -31,6 +31,10 @@ #include +/* Other platforms have long as 32-bit */ +#define ZFS_MODULE_LONG long +#define ZFS_MODULE_ULONG unsigned long + #define ZMOD_RW CTLFLAG_RWTUN #define ZMOD_RD CTLFLAG_RDTUN diff --git a/include/os/linux/kernel/linux/mod_compat.h b/include/os/linux/kernel/linux/mod_compat.h index a091bbfe179d..963cb54e7542 100644 --- a/include/os/linux/kernel/linux/mod_compat.h +++ b/include/os/linux/kernel/linux/mod_compat.h @@ -48,6 +48,10 @@ typedef struct kernel_param zfs_kernel_param_t; /* END CSTYLED */ #define STRING charp +/* Other platforms have long as 32-bit */ +#define ZFS_MODULE_LONG long +#define ZFS_MODULE_ULONG unsigned long + enum scope_prefix_types { zfs, zfs_arc, diff --git a/include/sys/dmu_zfetch.h b/include/sys/dmu_zfetch.h index ad3bc040756c..f8c3c1545ecb 100644 --- a/include/sys/dmu_zfetch.h +++ b/include/sys/dmu_zfetch.h @@ -36,7 +36,7 @@ extern "C" { #endif -extern unsigned long zfetch_array_rd_sz; +extern ZFS_MODULE_ULONG zfetch_array_rd_sz; struct dnode; /* so we can reference dnode */ diff --git a/include/sys/dsl_deadlist.h b/include/sys/dsl_deadlist.h index a94bba56ff7a..b1fb93370b83 100644 --- a/include/sys/dsl_deadlist.h +++ b/include/sys/dsl_deadlist.h @@ -84,7 +84,7 @@ typedef struct livelist_condense_entry { boolean_t cancelled; } livelist_condense_entry_t; -extern unsigned long zfs_livelist_max_entries; +extern ZFS_MODULE_ULONG zfs_livelist_max_entries; extern int zfs_livelist_min_percent_shared; typedef int deadlist_iter_t(void *args, dsl_deadlist_entry_t *dle); diff --git a/include/sys/dsl_pool.h b/include/sys/dsl_pool.h index 9364106d94b7..bbe9ee678fb2 100644 --- a/include/sys/dsl_pool.h +++ b/include/sys/dsl_pool.h @@ -57,13 +57,13 @@ struct dsl_scan; struct dsl_crypto_params; struct dsl_deadlist; -extern unsigned long zfs_dirty_data_max; -extern unsigned long zfs_dirty_data_max_max; -extern unsigned long zfs_wrlog_data_max; +extern ZFS_MODULE_ULONG zfs_dirty_data_max; +extern ZFS_MODULE_ULONG zfs_dirty_data_max_max; +extern ZFS_MODULE_ULONG zfs_wrlog_data_max; extern uint_t zfs_dirty_data_max_percent; extern uint_t zfs_dirty_data_max_max_percent; extern uint_t zfs_delay_min_dirty_percent; -extern unsigned long zfs_delay_scale; +extern ZFS_MODULE_ULONG zfs_delay_scale; /* These macros are for indexing into the zfs_all_blkstats_t. */ #define DMU_OT_DEFERRED DMU_OT_NONE diff --git a/include/sys/mmp.h b/include/sys/mmp.h index ce9c4496a04f..05cf64fc008c 100644 --- a/include/sys/mmp.h +++ b/include/sys/mmp.h @@ -64,7 +64,7 @@ extern void mmp_signal_all_threads(void); /* Global tuning */ extern int param_set_multihost_interval(ZFS_MODULE_PARAM_ARGS); -extern ulong_t zfs_multihost_interval; +extern ZFS_MODULE_ULONG zfs_multihost_interval; extern uint_t zfs_multihost_fail_intervals; extern uint_t zfs_multihost_import_intervals; diff --git a/include/sys/spa.h b/include/sys/spa.h index 3e68cb8c6511..74d49b83a2aa 100644 --- a/include/sys/spa.h +++ b/include/sys/spa.h @@ -1218,9 +1218,9 @@ int param_set_deadman_failmode(ZFS_MODULE_PARAM_ARGS); extern spa_mode_t spa_mode_global; extern int zfs_deadman_enabled; -extern unsigned long zfs_deadman_synctime_ms; -extern unsigned long zfs_deadman_ziotime_ms; -extern unsigned long zfs_deadman_checktime_ms; +extern ZFS_MODULE_ULONG zfs_deadman_synctime_ms; +extern ZFS_MODULE_ULONG zfs_deadman_ziotime_ms; +extern ZFS_MODULE_ULONG zfs_deadman_checktime_ms; extern kmem_cache_t *zio_buf_cache[]; extern kmem_cache_t *zio_data_buf_cache[]; diff --git a/include/sys/zcp.h b/include/sys/zcp.h index f0a78f9cb5c4..f7433932af6b 100644 --- a/include/sys/zcp.h +++ b/include/sys/zcp.h @@ -33,8 +33,8 @@ extern "C" { #define ZCP_RUN_INFO_KEY "runinfo" -extern unsigned long zfs_lua_max_instrlimit; -extern unsigned long zfs_lua_max_memlimit; +extern ZFS_MODULE_ULONG zfs_lua_max_instrlimit; +extern ZFS_MODULE_ULONG zfs_lua_max_memlimit; int zcp_argerror(lua_State *, int, const char *, ...); diff --git a/include/sys/zfs_context.h b/include/sys/zfs_context.h index d29d7118ff00..4eadc94453f4 100644 --- a/include/sys/zfs_context.h +++ b/include/sys/zfs_context.h @@ -210,6 +210,10 @@ typedef struct zfs_kernel_param { const char *name; /* unused stub */ } zfs_kernel_param_t; +/* Other platforms have long as 32-bit */ +#define ZFS_MODULE_LONG long +#define ZFS_MODULE_ULONG unsigned long + #define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc) #define ZFS_MODULE_PARAM_ARGS void #define ZFS_MODULE_PARAM_CALL(scope_prefix, name_prefix, name, setfunc, \ diff --git a/include/sys/zfs_ioctl_impl.h b/include/sys/zfs_ioctl_impl.h index 0bf9fa6ff193..d3129afeb237 100644 --- a/include/sys/zfs_ioctl_impl.h +++ b/include/sys/zfs_ioctl_impl.h @@ -24,7 +24,7 @@ #define _ZFS_IOCTL_IMPL_H_ extern kmutex_t zfsdev_state_lock; -extern unsigned long zfs_max_nvlist_src_size; +extern ZFS_MODULE_ULONG zfs_max_nvlist_src_size; typedef int zfs_ioc_legacy_func_t(zfs_cmd_t *); typedef int zfs_ioc_func_t(const char *, nvlist_t *, nvlist_t *); diff --git a/module/os/freebsd/zfs/vdev_file.c b/module/os/freebsd/zfs/vdev_file.c index 73cc6aa48c0b..653ad0a51cbd 100644 --- a/module/os/freebsd/zfs/vdev_file.c +++ b/module/os/freebsd/zfs/vdev_file.c @@ -40,8 +40,8 @@ static taskq_t *vdev_file_taskq; -static unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; -static unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; +static ZFS_MODULE_ULONG vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; +static ZFS_MODULE_ULONG vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; void vdev_file_init(void) diff --git a/module/os/linux/zfs/vdev_file.c b/module/os/linux/zfs/vdev_file.c index 46e412f6eeb4..aa4a57da5485 100644 --- a/module/os/linux/zfs/vdev_file.c +++ b/module/os/linux/zfs/vdev_file.c @@ -53,8 +53,8 @@ static taskq_t *vdev_file_taskq; * impact the vdev_ashift setting which can only be set at vdev creation * time. */ -static unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; -static unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; +static ZFS_MODULE_ULONG vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; +static ZFS_MODULE_ULONG vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; static void vdev_file_hold(vdev_t *vd) diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 33865f715b0f..1d5c3d12b59b 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -421,10 +421,10 @@ boolean_t arc_warm; */ unsigned long zfs_arc_max = 0; unsigned long zfs_arc_min = 0; -unsigned long zfs_arc_meta_limit = 0; -unsigned long zfs_arc_meta_min = 0; -static unsigned long zfs_arc_dnode_limit = 0; -static unsigned long zfs_arc_dnode_reduce_percent = 10; +ZFS_MODULE_ULONG zfs_arc_meta_limit = 0; +ZFS_MODULE_ULONG zfs_arc_meta_min = 0; +static ZFS_MODULE_ULONG zfs_arc_dnode_limit = 0; +static ZFS_MODULE_ULONG zfs_arc_dnode_reduce_percent = 10; static uint_t zfs_arc_grow_retry = 0; static uint_t zfs_arc_shrink_shift = 0; static uint_t zfs_arc_p_min_shift = 0; @@ -449,12 +449,12 @@ int zfs_compressed_arc_enabled = B_TRUE; * ARC will evict meta buffers that exceed arc_meta_limit. This * tunable make arc_meta_limit adjustable for different workloads. */ -static unsigned long zfs_arc_meta_limit_percent = 75; +static ZFS_MODULE_ULONG zfs_arc_meta_limit_percent = 75; /* * Percentage that can be consumed by dnodes of ARC meta buffers. */ -static unsigned long zfs_arc_dnode_limit_percent = 10; +static ZFS_MODULE_ULONG zfs_arc_dnode_limit_percent = 10; /* * These tunables are Linux-specific @@ -781,12 +781,12 @@ uint64_t zfs_crc64_table[256]; #define L2ARC_FEED_TYPES 4 /* L2ARC Performance Tunables */ -unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */ -unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */ -unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */ -unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; -unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ -unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ +ZFS_MODULE_ULONG l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */ +ZFS_MODULE_ULONG l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */ +ZFS_MODULE_ULONG l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */ +ZFS_MODULE_ULONG l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; +ZFS_MODULE_ULONG l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ +ZFS_MODULE_ULONG l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ int l2arc_feed_again = B_TRUE; /* turbo warmup */ int l2arc_norw = B_FALSE; /* no reads during writes */ @@ -909,7 +909,7 @@ static int l2arc_mfuonly = 0; * will vary depending of how well the specific device handles * these commands. */ -static unsigned long l2arc_trim_ahead = 0; +static ZFS_MODULE_ULONG l2arc_trim_ahead = 0; /* * Performance tuning of L2ARC persistence: @@ -925,7 +925,7 @@ static unsigned long l2arc_trim_ahead = 0; * not to waste space. */ static int l2arc_rebuild_enabled = B_TRUE; -static unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024; +static ZFS_MODULE_ULONG l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024; /* L2ARC persistence rebuild control routines. */ void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen); diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index db1123d37d98..7c7e0dad27c2 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -227,8 +227,8 @@ typedef struct dbuf_cache { dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; /* Size limits for the caches */ -static unsigned long dbuf_cache_max_bytes = ULONG_MAX; -static unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX; +static ZFS_MODULE_ULONG dbuf_cache_max_bytes = ULONG_MAX; +static ZFS_MODULE_ULONG dbuf_metadata_cache_max_bytes = ULONG_MAX; /* Set the default sizes of the caches to log2 fraction of arc size */ static uint_t dbuf_cache_shift = 5; diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 9e67eb51f415..038f0d65e44f 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -70,7 +70,7 @@ static int zfs_nopwrite_enabled = 1; * will wait until the next TXG. * A value of zero will disable this throttle. */ -static unsigned long zfs_per_txg_dirty_frees_percent = 30; +static ZFS_MODULE_ULONG zfs_per_txg_dirty_frees_percent = 30; /* * Enable/disable forcing txg sync when dirty checking for holes with lseek(). diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c index 101d2ee7b7a2..512264902788 100644 --- a/module/zfs/dmu_zfetch.c +++ b/module/zfs/dmu_zfetch.c @@ -58,7 +58,7 @@ unsigned int zfetch_max_distance = 64 * 1024 * 1024; /* max bytes to prefetch indirects for per stream (default 64MB) */ unsigned int zfetch_max_idistance = 64 * 1024 * 1024; /* max number of bytes in an array_read in which we allow prefetching (1MB) */ -unsigned long zfetch_array_rd_sz = 1024 * 1024; +ZFS_MODULE_ULONG zfetch_array_rd_sz = 1024 * 1024; typedef struct zfetch_stats { kstat_named_t zfetchstat_hits; diff --git a/module/zfs/dsl_deadlist.c b/module/zfs/dsl_deadlist.c index 1ecae0fe3865..0f32453d0420 100644 --- a/module/zfs/dsl_deadlist.c +++ b/module/zfs/dsl_deadlist.c @@ -92,7 +92,7 @@ * will be loaded into memory and shouldn't take up an inordinate amount of * space. We settled on ~500000 entries, corresponding to roughly 128M. */ -unsigned long zfs_livelist_max_entries = 500000; +ZFS_MODULE_ULONG zfs_livelist_max_entries = 500000; /* * We can approximate how much of a performance gain a livelist will give us diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 4fd3722a051e..a0bc136ec200 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -99,8 +99,8 @@ * capped at zfs_dirty_data_max_max. It can also be overridden with a module * parameter. */ -unsigned long zfs_dirty_data_max = 0; -unsigned long zfs_dirty_data_max_max = 0; +ZFS_MODULE_ULONG zfs_dirty_data_max = 0; +ZFS_MODULE_ULONG zfs_dirty_data_max_max = 0; uint_t zfs_dirty_data_max_percent = 10; uint_t zfs_dirty_data_max_max_percent = 25; @@ -109,7 +109,7 @@ uint_t zfs_dirty_data_max_max_percent = 25; * when approaching the limit until log data is cleared out after txg sync. * It only counts TX_WRITE log with WR_COPIED or WR_NEED_COPY. */ -unsigned long zfs_wrlog_data_max = 0; +ZFS_MODULE_ULONG zfs_wrlog_data_max = 0; /* * If there's at least this much dirty data (as a percentage of @@ -138,7 +138,7 @@ uint_t zfs_delay_min_dirty_percent = 60; * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the * multiply in dmu_tx_delay(). */ -unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; +ZFS_MODULE_ULONG zfs_delay_scale = 1000 * 1000 * 1000 / 2000; /* * This determines the number of threads used by the dp_sync_taskq. diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index f0cd1feaf55b..e3ee05898eb9 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -147,13 +147,13 @@ static int zfs_scan_strict_mem_lim = B_FALSE; * overload the drives with I/O, since that is protected by * zfs_vdev_scrub_max_active. */ -static unsigned long zfs_scan_vdev_limit = 4 << 20; +static ZFS_MODULE_ULONG zfs_scan_vdev_limit = 4 << 20; static uint_t zfs_scan_issue_strategy = 0; /* don't queue & sort zios, go direct */ static int zfs_scan_legacy = B_FALSE; -static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ +static ZFS_MODULE_ULONG zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ /* * fill_weight is non-tunable at runtime, so we copy it at module init from @@ -192,9 +192,9 @@ static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; /* max number of blocks to free in a single TXG */ -static unsigned long zfs_async_block_max_blocks = ULONG_MAX; +static ZFS_MODULE_ULONG zfs_async_block_max_blocks = ULONG_MAX; /* max number of dedup blocks to free in a single TXG */ -static unsigned long zfs_max_async_dedup_frees = 100000; +static ZFS_MODULE_ULONG zfs_max_async_dedup_frees = 100000; /* set to disable resilver deferring */ static int zfs_resilver_disable_defer = B_FALSE; diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index efcfeecd778e..c559c78c92db 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -51,12 +51,12 @@ * operation, we will try to write this amount of data to each disk before * moving on to the next top-level vdev. */ -static unsigned long metaslab_aliquot = 1024 * 1024; +static ZFS_MODULE_ULONG metaslab_aliquot = 1024 * 1024; /* * For testing, make some blocks above a certain size be gang blocks. */ -unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; +ZFS_MODULE_ULONG metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; /* * In pools where the log space map feature is not enabled we touch @@ -286,7 +286,8 @@ static const int max_disabled_ms = 3; * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. * To avoid 64-bit overflow, don't set above UINT32_MAX. */ -static unsigned long zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ +static ZFS_MODULE_ULONG zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; +/* 1 hour */ /* * Maximum percentage of memory to use on storing loaded metaslabs. If loading diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index 92fd6c422330..a2d1afbc2d5f 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -156,7 +156,7 @@ * vary with the I/O load and this observed value is the ub_mmp_delay which is * stored in the uberblock. The minimum allowed value is 100 ms. */ -ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL; +ZFS_MODULE_ULONG zfs_multihost_interval = MMP_DEFAULT_INTERVAL; /* * Used to control the duration of the activity test on import. Smaller values diff --git a/module/zfs/spa.c b/module/zfs/spa.c index cc367745e486..b3678349ac69 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -218,7 +218,7 @@ static int spa_load_print_vdev_tree = B_FALSE; * there are also risks of performing an inadvertent rewind as we might be * missing all the vdevs with the latest uberblocks. */ -unsigned long zfs_max_missing_tvds = 0; +ZFS_MODULE_ULONG zfs_max_missing_tvds = 0; /* * The parameters below are similar to zfs_max_missing_tvds but are only diff --git a/module/zfs/spa_checkpoint.c b/module/zfs/spa_checkpoint.c index a837b1ce97ec..2d7141be98ec 100644 --- a/module/zfs/spa_checkpoint.c +++ b/module/zfs/spa_checkpoint.c @@ -158,7 +158,7 @@ * amount of checkpointed data that has been freed within them while * the pool had a checkpoint. */ -static unsigned long zfs_spa_discard_memory_limit = 16 * 1024 * 1024; +static ZFS_MODULE_ULONG zfs_spa_discard_memory_limit = 16 * 1024 * 1024; int spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs) diff --git a/module/zfs/spa_log_spacemap.c b/module/zfs/spa_log_spacemap.c index 4ecce8214f6a..bddcaa0d38e9 100644 --- a/module/zfs/spa_log_spacemap.c +++ b/module/zfs/spa_log_spacemap.c @@ -188,13 +188,13 @@ static const unsigned long zfs_log_sm_blksz = 1ULL << 17; * (thus the _ppm suffix; reads as "parts per million"). As an example, * the default of 1000 allows 0.1% of memory to be used. */ -static unsigned long zfs_unflushed_max_mem_ppm = 1000; +static ZFS_MODULE_ULONG zfs_unflushed_max_mem_ppm = 1000; /* * Specific hard-limit in memory that ZFS allows to be used for * unflushed changes. */ -static unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30; +static ZFS_MODULE_ULONG zfs_unflushed_max_mem_amt = 1ULL << 30; /* * The following tunable determines the number of blocks that can be used for @@ -243,33 +243,33 @@ static unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30; * provide upper and lower bounds for the log block limit. * [see zfs_unflushed_log_block_{min,max}] */ -static unsigned long zfs_unflushed_log_block_pct = 400; +static ZFS_MODULE_ULONG zfs_unflushed_log_block_pct = 400; /* * If the number of metaslabs is small and our incoming rate is high, we could * get into a situation that we are flushing all our metaslabs every TXG. Thus * we always allow at least this many log blocks. */ -static unsigned long zfs_unflushed_log_block_min = 1000; +static ZFS_MODULE_ULONG zfs_unflushed_log_block_min = 1000; /* * If the log becomes too big, the import time of the pool can take a hit in * terms of performance. Thus we have a hard limit in the size of the log in * terms of blocks. */ -static unsigned long zfs_unflushed_log_block_max = (1ULL << 17); +static ZFS_MODULE_ULONG zfs_unflushed_log_block_max = (1ULL << 18); /* * Also we have a hard limit in the size of the log in terms of dirty TXGs. */ -static unsigned long zfs_unflushed_log_txg_max = 1000; +static ZFS_MODULE_ULONG zfs_unflushed_log_txg_max = 1000; /* * Max # of rows allowed for the log_summary. The tradeoff here is accuracy and * stability of the flushing algorithm (longer summary) vs its runtime overhead * (smaller summary is faster to traverse). */ -static unsigned long zfs_max_logsm_summary_length = 10; +static ZFS_MODULE_ULONG zfs_max_logsm_summary_length = 10; /* * Tunable that sets the lower bound on the metaslabs to flush every TXG. @@ -282,7 +282,7 @@ static unsigned long zfs_max_logsm_summary_length = 10; * The point of this tunable is to be used in extreme cases where we really * want to flush more metaslabs than our adaptable heuristic plans to flush. */ -static unsigned long zfs_min_metaslabs_to_flush = 1; +static ZFS_MODULE_ULONG zfs_min_metaslabs_to_flush = 1; /* * Tunable that specifies how far in the past do we want to look when trying to @@ -293,7 +293,7 @@ static unsigned long zfs_min_metaslabs_to_flush = 1; * average over all the blocks that we walk * [see spa_estimate_incoming_log_blocks]. */ -static unsigned long zfs_max_log_walking = 5; +static ZFS_MODULE_ULONG zfs_max_log_walking = 5; /* * This tunable exists solely for testing purposes. It ensures that the log diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 102070013404..12c2feb5b50e 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -304,20 +304,20 @@ int zfs_free_leak_on_eio = B_FALSE; * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting * in one of three behaviors controlled by zfs_deadman_failmode. */ -unsigned long zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ +ZFS_MODULE_ULONG zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ /* * This value controls the maximum amount of time zio_wait() will block for an * outstanding IO. By default this is 300 seconds at which point the "hung" * behavior will be applied as described for zfs_deadman_synctime_ms. */ -unsigned long zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ +ZFS_MODULE_ULONG zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ /* * Check time in milliseconds. This defines the frequency at which we check * for hung I/O. */ -unsigned long zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ +ZFS_MODULE_ULONG zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ /* * By default the deadman is enabled. diff --git a/module/zfs/vdev_indirect.c b/module/zfs/vdev_indirect.c index 0ca0c245e952..6c15730eeb4d 100644 --- a/module/zfs/vdev_indirect.c +++ b/module/zfs/vdev_indirect.c @@ -189,14 +189,14 @@ static uint_t zfs_condense_indirect_obsolete_pct = 25; * consumed by the obsolete space map; the default of 1GB is small enough * that we typically don't mind "wasting" it. */ -static unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; +static ZFS_MODULE_ULONG zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; /* * Don't bother condensing if the mapping uses less than this amount of * memory. The default of 128KB is considered a "trivial" amount of * memory and not worth reducing. */ -static unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; +static ZFS_MODULE_ULONG zfs_condense_min_mapping_bytes = 128 * 1024; /* * This is used by the test suite so that it can ensure that certain diff --git a/module/zfs/vdev_initialize.c b/module/zfs/vdev_initialize.c index 965fb7ef0593..157d19008a29 100644 --- a/module/zfs/vdev_initialize.c +++ b/module/zfs/vdev_initialize.c @@ -37,16 +37,16 @@ * Value that is written to disk during initialization. */ #ifdef _ILP32 -static unsigned long zfs_initialize_value = 0xdeadbeefUL; +static ZFS_MODULE_ULONG zfs_initialize_value = 0xdeadbeefUL; #else -static unsigned long zfs_initialize_value = 0xdeadbeefdeadbeeeULL; +static ZFS_MODULE_ULONG zfs_initialize_value = 0xdeadbeefdeadbeeeULL; #endif /* maximum number of I/Os outstanding per leaf vdev */ static const int zfs_initialize_limit = 1; /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */ -static unsigned long zfs_initialize_chunk_size = 1024 * 1024; +static ZFS_MODULE_ULONG zfs_initialize_chunk_size = 1024 * 1024; static boolean_t vdev_initialize_should_stop(vdev_t *vd) diff --git a/module/zfs/vdev_rebuild.c b/module/zfs/vdev_rebuild.c index 1ce578e228d8..c52c010e4c14 100644 --- a/module/zfs/vdev_rebuild.c +++ b/module/zfs/vdev_rebuild.c @@ -103,7 +103,7 @@ * Size of rebuild reads; defaults to 1MiB per data disk and is capped at * SPA_MAXBLOCKSIZE. */ -static unsigned long zfs_rebuild_max_segment = 1024 * 1024; +static ZFS_MODULE_ULONG zfs_rebuild_max_segment = 1024 * 1024; /* * Maximum number of parallelly executed bytes per leaf vdev caused by a @@ -121,7 +121,7 @@ static unsigned long zfs_rebuild_max_segment = 1024 * 1024; * With a value of 32MB the sequential resilver write rate was measured at * 800MB/s sustained while rebuilding to a distributed spare. */ -static unsigned long zfs_rebuild_vdev_limit = 32 << 20; +static ZFS_MODULE_ULONG zfs_rebuild_vdev_limit = 32 << 20; /* * Automatically start a pool scrub when the last active sequential resilver diff --git a/module/zfs/zcp.c b/module/zfs/zcp.c index fe90242ca40d..455a8d3870d0 100644 --- a/module/zfs/zcp.c +++ b/module/zfs/zcp.c @@ -109,8 +109,8 @@ #define ZCP_NVLIST_MAX_DEPTH 20 static const uint64_t zfs_lua_check_instrlimit_interval = 100; -unsigned long zfs_lua_max_instrlimit = ZCP_MAX_INSTRLIMIT; -unsigned long zfs_lua_max_memlimit = ZCP_MAX_MEMLIMIT; +ZFS_MODULE_ULONG zfs_lua_max_instrlimit = ZCP_MAX_INSTRLIMIT; +ZFS_MODULE_ULONG zfs_lua_max_memlimit = ZCP_MAX_MEMLIMIT; /* * Forward declarations for mutually recursive functions diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index c3266c09306b..4f4b5a61c0bb 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -229,14 +229,14 @@ static zfsdev_state_t *zfsdev_state_list; * for zc->zc_nvlist_src_size, since we will need to allocate that much memory. * Defaults to 0=auto which is handled by platform code. */ -unsigned long zfs_max_nvlist_src_size = 0; +ZFS_MODULE_ULONG zfs_max_nvlist_src_size = 0; /* * When logging the output nvlist of an ioctl in the on-disk history, limit * the logged size to this many bytes. This must be less than DMU_MAX_ACCESS. * This applies primarily to zfs_ioc_channel_program(). */ -static unsigned long zfs_history_output_max = 1024 * 1024; +static ZFS_MODULE_ULONG zfs_history_output_max = 1024 * 1024; uint_t zfs_fsyncer_key; uint_t zfs_allow_log_key; diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c index c92044337bce..f93f64711823 100644 --- a/module/zfs/zfs_log.c +++ b/module/zfs/zfs_log.c @@ -525,7 +525,7 @@ zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp, * called as soon as the write is on stable storage (be it via a DMU sync or a * ZIL commit). */ -static long zfs_immediate_write_sz = 32768; +static ZFS_MODULE_LONG zfs_immediate_write_sz = 32768; void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 57f03f116273..c69a0e075e34 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -176,7 +176,7 @@ zfs_access(znode_t *zp, int mode, int flag, cred_t *cr) return (error); } -static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */ +static ZFS_MODULE_ULONG zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */ /* * Read bytes from specified file into supplied buffer. diff --git a/module/zfs/zil.c b/module/zfs/zil.c index dc5b8018e16e..05e64d58f22c 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -132,7 +132,7 @@ static int zil_nocacheflush = 0; * Any writes above that will be executed with lower (asynchronous) priority * to limit potential SLOG device abuse by single active ZIL writer. */ -static unsigned long zil_slog_bulk = 768 * 1024; +static ZFS_MODULE_ULONG zil_slog_bulk = 768 * 1024; static kmem_cache_t *zil_lwb_cache; static kmem_cache_t *zil_zcw_cache;