diff --git a/cmd/zfs/zfs_iter.c b/cmd/zfs/zfs_iter.c index b7ddf63dfe22..a0a80d481648 100644 --- a/cmd/zfs/zfs_iter.c +++ b/cmd/zfs/zfs_iter.c @@ -218,6 +218,13 @@ zfs_sort_only_by_name(const zfs_sort_column_t *sc) sc->sc_prop == ZFS_PROP_NAME); } +int +zfs_sort_only_by_createtxg(const zfs_sort_column_t *sc) +{ + return (sc != NULL && sc->sc_next == NULL && + sc->sc_prop == ZFS_PROP_CREATETXG); +} + static int zfs_compare(const void *larg, const void *rarg) { @@ -301,7 +308,7 @@ zfs_sort(const void *larg, const void *rarg, void *data) for (psc = sc; psc != NULL; psc = psc->sc_next) { char lbuf[ZFS_MAXPROPLEN], rbuf[ZFS_MAXPROPLEN]; char *lstr, *rstr; - uint64_t lnum, rnum; + uint64_t lnum = 0, rnum = 0; boolean_t lvalid, rvalid; int ret = 0; @@ -352,11 +359,9 @@ zfs_sort(const void *larg, const void *rarg, void *data) zfs_get_type(r), B_FALSE); if (lvalid) - (void) zfs_prop_get_numeric(l, psc->sc_prop, - &lnum, NULL, NULL, 0); + lnum = zfs_prop_get_int(l, psc->sc_prop); if (rvalid) - (void) zfs_prop_get_numeric(r, psc->sc_prop, - &rnum, NULL, NULL, 0); + rnum = zfs_prop_get_int(r, psc->sc_prop); } if (!lvalid && !rvalid) diff --git a/cmd/zfs/zfs_iter.h b/cmd/zfs/zfs_iter.h index d77002dec29b..effb22ded3fc 100644 --- a/cmd/zfs/zfs_iter.h +++ b/cmd/zfs/zfs_iter.h @@ -53,6 +53,7 @@ int zfs_for_each(int, char **, int options, zfs_type_t, int zfs_add_sort_column(zfs_sort_column_t **, const char *, boolean_t); void zfs_free_sort_columns(zfs_sort_column_t *); int zfs_sort_only_by_name(const zfs_sort_column_t *); +int zfs_sort_only_by_createtxg(const zfs_sort_column_t *); #ifdef __cplusplus } diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c index 6e77b57f4218..75649f597ef2 100644 --- a/cmd/zfs/zfs_main.c +++ b/cmd/zfs/zfs_main.c @@ -3655,11 +3655,14 @@ found3:; argv += optind; /* - * If we are only going to list snapshot names and sort by name, - * then we can use faster version. + * If we are only going to list snapshot names and sort by name or + * by createtxg, then we can use faster version. */ - if (strcmp(fields, "name") == 0 && zfs_sort_only_by_name(sortcol)) + if (strcmp(fields, "name") == 0 && + (zfs_sort_only_by_name(sortcol) || + zfs_sort_only_by_createtxg(sortcol))) { flags |= ZFS_ITER_SIMPLE; + } /* * If "-o space" and no types were specified, don't display snapshots. diff --git a/cmd/zfs_object_agent/server/src/main.rs b/cmd/zfs_object_agent/server/src/main.rs index c676e2e38f8e..a92f18f4151f 100644 --- a/cmd/zfs_object_agent/server/src/main.rs +++ b/cmd/zfs_object_agent/server/src/main.rs @@ -118,6 +118,10 @@ enum Commands { #[clap(short = 'b', long)] bucket: String, + /// Credentials profile in ~/.aws/credentials + #[clap(short = 'p', long)] + profile: Option, + /// AWS access key id #[clap( short = 'i', @@ -125,7 +129,9 @@ enum Commands { alias = "aws_access_key_id", requires = "aws-secret-access-key", required_unless_present = "aws-instance-profile", - conflicts_with = "aws-instance-profile" + required_unless_present = "profile", + conflicts_with = "aws-instance-profile", + conflicts_with = "profile" )] aws_access_key_id: Option, @@ -136,12 +142,19 @@ enum Commands { alias = "aws_secret_access_key", requires = "aws-access-key-id", required_unless_present = "aws-instance-profile", - conflicts_with = "aws-instance-profile" + required_unless_present = "profile", + conflicts_with = "aws-instance-profile", + conflicts_with = "profile" )] aws_secret_access_key: Option, /// Use IAM instance profile - #[clap(short = 'm', long, alias = "aws_instance_profile")] + #[clap( + short = 'm', + long, + alias = "aws_instance_profile", + conflicts_with = "profile" + )] aws_instance_profile: bool, }, /// test connectivity blob @@ -154,15 +167,26 @@ enum Commands { #[clap(short = 'b', long)] bucket: String, + /// Credentials profile in ~/.azure/credentials + #[clap( + short = 'p', + long, + conflicts_with = "azure-account", + conflicts_with = "azure-key", + conflicts_with = "managed-identity" + )] + profile: Option, + /// Azure-Blob account name - #[clap(short = 'a', long)] - azure_account: String, + #[clap(short = 'a', long, required_unless_present = "profile")] + azure_account: Option, /// Azure-Blob secret key #[clap( short = 's', long, required_unless_present = "managed-identity", + required_unless_present = "profile", conflicts_with = "managed-identity" )] azure_key: Option, @@ -172,6 +196,7 @@ enum Commands { short = 'm', long, required_unless_present = "azure-key", + required_unless_present = "profile", conflicts_with = "azure-key" )] managed_identity: bool, @@ -211,6 +236,7 @@ fn main() { endpoint, region, bucket, + profile, aws_access_key_id, aws_secret_access_key, aws_instance_profile, @@ -220,6 +246,8 @@ fn main() { region, credentials: if aws_instance_profile { S3Credentials::InstanceProfile + } else if let Some(p) = profile { + S3Credentials::Profile(p) } else { S3Credentials::Key { aws_access_key_id: aws_access_key_id.unwrap(), @@ -234,6 +262,7 @@ fn main() { Some(Commands::TestConnectivityBlob { endpoint, bucket, + profile, azure_account, azure_key, managed_identity, @@ -241,10 +270,14 @@ fn main() { let protocol = ObjectAccessProtocol::Blob { endpoint, credentials: if managed_identity { - BlobCredentials::ManagedCredentials { azure_account } + BlobCredentials::ManagedCredentials { + azure_account: azure_account.unwrap(), + } + } else if profile.is_some() { + BlobCredentials::Profile(profile.unwrap()) } else { BlobCredentials::Key { - azure_account, + azure_account: azure_account.unwrap(), azure_key: azure_key.unwrap(), } }, @@ -329,13 +362,15 @@ mod test { Some(Commands::TestConnectivityBlob { endpoint, bucket, + profile, azure_account, azure_key, managed_identity, }) => { assert_eq!(endpoint.unwrap(), "foo"); assert_eq!(&bucket, "bar"); - assert_eq!(azure_account, "azure-account"); + assert!(profile.is_none()); + assert_eq!(azure_account.unwrap(), "azure-account"); assert_eq!(azure_key.unwrap(), "super-secret-key"); assert!(!managed_identity); } @@ -352,13 +387,15 @@ mod test { Some(Commands::TestConnectivityBlob { endpoint, bucket, + profile, azure_account, azure_key, managed_identity, }) => { assert_eq!(endpoint.unwrap(), "foo"); assert_eq!(&bucket, "bar"); - assert_eq!(azure_account, "azure-account"); + assert!(profile.is_none()); + assert_eq!(azure_account.unwrap(), "azure-account"); assert!(azure_key.is_none()); assert!(managed_identity); } @@ -366,6 +403,28 @@ mod test { } } + #[test] + fn test_connectivity_blob_profile() { + let cli = pos("zfs_object_agent test-connectivity-blob -e foo -b bar -p profile"); + match cli.command { + Some(Commands::TestConnectivityBlob { + endpoint, + bucket, + profile, + azure_account, + azure_key, + managed_identity, + }) => { + assert_eq!(endpoint.unwrap(), "foo"); + assert_eq!(&bucket, "bar"); + assert_eq!(profile.unwrap(), "profile"); + assert!(azure_account.is_none()); + assert!(azure_key.is_none()); + assert!(!managed_identity); + } + _ => panic!("wrong subcommand"), + } + } #[test] fn test_connectivity_default_protocol() { let cli = @@ -375,6 +434,7 @@ mod test { endpoint, region, bucket, + profile, aws_access_key_id, aws_secret_access_key, aws_instance_profile, @@ -382,6 +442,7 @@ mod test { assert_eq!(endpoint, "foo"); assert_eq!(region, "bar"); assert_eq!(&bucket, "baz"); + assert!(profile.is_none()); assert!(aws_access_key_id.is_none()); assert!(aws_secret_access_key.is_none()); assert!(aws_instance_profile); @@ -392,6 +453,31 @@ mod test { #[test] fn test_connectivity_profile() { + let cli = pos("zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz -p profile"); + match cli.command { + Some(Commands::TestConnectivityS3 { + endpoint, + region, + bucket, + profile, + aws_access_key_id, + aws_secret_access_key, + aws_instance_profile, + }) => { + assert_eq!(endpoint, "foo"); + assert_eq!(region, "bar"); + assert_eq!(&bucket, "baz"); + assert_eq!(profile.unwrap(), "profile"); + assert!(aws_access_key_id.is_none()); + assert!(aws_secret_access_key.is_none()); + assert!(!aws_instance_profile); + } + _ => panic!("wrong subcommand"), + } + } + + #[test] + fn test_connectivity_instance_profile() { let cli = pos( "zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz --aws-instance-profile", ); @@ -400,6 +486,7 @@ mod test { endpoint, region, bucket, + profile, aws_access_key_id, aws_secret_access_key, aws_instance_profile, @@ -407,6 +494,7 @@ mod test { assert_eq!(endpoint, "foo"); assert_eq!(region, "bar"); assert_eq!(&bucket, "baz"); + assert!(profile.is_none()); assert!(aws_access_key_id.is_none()); assert!(aws_secret_access_key.is_none()); assert!(aws_instance_profile); @@ -423,6 +511,7 @@ mod test { endpoint, region, bucket, + profile, aws_access_key_id, aws_secret_access_key, aws_instance_profile, @@ -430,6 +519,7 @@ mod test { assert_eq!(endpoint, "foo"); assert_eq!(region, "bar"); assert_eq!(&bucket, "baz"); + assert!(profile.is_none()); assert_eq!(aws_access_key_id.unwrap(), "abcd"); assert_eq!(aws_secret_access_key.unwrap(), "1234"); assert!(!aws_instance_profile); @@ -450,6 +540,9 @@ mod test { fn test_connectivity_s3_param_conflicts() { neg("zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz -i abcd -s 1234 --aws-instance-profile"); neg("zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz -i abcd --aws-instance-profile"); + neg("zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz -p profile --aws-instance-profile"); + neg("zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz -s 1234 -p profile"); + neg("zfs_object_agent test-connectivity-s3 -e foo -r bar -b baz -i abcd -p profile"); } #[test] @@ -464,6 +557,9 @@ mod test { #[test] fn test_connectivity_blob_param_conflicts() { neg("zfs_object_agent test-connectivity-blob -e foo -b baz -a abcd -s 1234 --managed_identity"); + neg("zfs_object_agent test-connectivity-blob -e foo -b baz -a abcd -p profile"); + neg("zfs_object_agent test-connectivity-blob -e foo -b baz -s 123 -p profile"); + neg("zfs_object_agent test-connectivity-blob -e foo -b baz -p profile --managed_identity"); } #[test] diff --git a/cmd/zfs_object_agent/util/src/zettacache_stats.rs b/cmd/zfs_object_agent/util/src/zettacache_stats.rs index 63a6e47cd913..e1e47c54b77a 100644 --- a/cmd/zfs_object_agent/util/src/zettacache_stats.rs +++ b/cmd/zfs_object_agent/util/src/zettacache_stats.rs @@ -104,7 +104,9 @@ impl Sub for &StatCount { type Output = StatCount; fn sub(self, other: Self) -> Self::Output { - StatCount(AtomicU64::new(self.0.load(Relaxed) - other.0.load(Relaxed))) + StatCount(AtomicU64::new( + self.0.load(Relaxed).saturating_sub(other.0.load(Relaxed)), + )) } } @@ -144,7 +146,9 @@ impl Sub for &StatBytes { type Output = StatBytes; fn sub(self, other: Self) -> Self::Output { - StatBytes(AtomicU64::new(self.0.load(Relaxed) - other.0.load(Relaxed))) + StatBytes(AtomicU64::new( + self.0.load(Relaxed).saturating_sub(other.0.load(Relaxed)), + )) } } @@ -265,6 +269,7 @@ impl Sub for &IoStatValues { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DiskIoStats { pub name: String, + pub instance: u64, // unique instance for this disk pub stats: EnumMap, } @@ -272,6 +277,7 @@ impl DiskIoStats { pub fn new(name: String) -> DiskIoStats { DiskIoStats { name, + instance: rand::random(), stats: Default::default(), } } @@ -320,38 +326,41 @@ pub struct IoStatsRef<'a> { pub disk_stats: Vec<&'a DiskIoStats>, } -impl Sub for &IoStats { - type Output = IoStats; +impl IoStats { + pub fn max_name_len(&self) -> usize { + self.disk_stats + .iter() + .map(|stats| stats.name.len()) + .max() + .unwrap_or_default() + } - /// Subtract two IoStats. Used to create the net values between two IoStats samples. - fn sub(self, other: Self) -> IoStats { + /// Subtract two IoStats to create the net values between two samples. + /// Returns None if the two stats cannot be compared (agent restarted or a disk changed). + pub fn checked_sub(&self, other: &IoStats) -> Option { if other.disk_stats.is_empty() { - return self.clone(); + return Some(self.clone()); + } + // Avoid comparing stats when the agent was restarted or disks were changed + if self.cache_runtime_id != other.cache_runtime_id + || self.disk_stats.len() != other.disk_stats.len() + { + return None; } - assert_eq!(self.cache_runtime_id, other.cache_runtime_id); - let mut difference = IoStats { timestamp: self.timestamp - other.timestamp, ..Default::default() }; - - assert_eq!(self.disk_stats.len(), other.disk_stats.len()); for (self_stat, other_stat) in self.disk_stats.iter().zip(other.disk_stats.iter()) { - assert_eq!(self_stat.name, other_stat.name); + // Avoid comparing stats when disks have changed + if self_stat.instance != other_stat.instance { + return None; + } + // We're now confident that stats are comparable so the underlying stat subtraction + // (i.e. StatCount or StatBytes) is a saturating_sub() difference.disk_stats.push(self_stat - other_stat); } - - difference - } -} - -impl IoStats { - pub fn max_name_len(&self) -> usize { - self.disk_stats - .iter() - .map(|stats| stats.name.len()) - .max() - .unwrap_or_default() + Some(difference) } } diff --git a/cmd/zfs_object_agent/zcache/src/iostat.rs b/cmd/zfs_object_agent/zcache/src/iostat.rs index f928d73b3761..be3e5f66272c 100644 --- a/cmd/zfs_object_agent/zcache/src/iostat.rs +++ b/cmd/zfs_object_agent/zcache/src/iostat.rs @@ -1,6 +1,9 @@ //! The iostat subcommand for zcache. use std::cmp::max; +use std::collections::hash_map::DefaultHasher; +use std::hash::Hash; +use std::hash::Hasher; use std::sync::atomic::Ordering::Relaxed; use std::thread::sleep; use std::time::Duration; @@ -335,11 +338,7 @@ impl IoStatDisplay { debug!("{latest:?}"); - if previous.disk_stats.is_empty() - || latest.cache_runtime_id == previous.cache_runtime_id - { - let delta = &latest - &previous; - + if let Some(delta) = latest.checked_sub(&previous) { match &self.histogram_name { None => self.display_iostat_default(iteration, &delta), Some(name) => self.display_iostat_histogram(name, &delta), @@ -347,7 +346,7 @@ impl IoStatDisplay { // Flush stdout in case output is redirected to a file flush_stdout()?; } else { - info!("object agent restarted"); + info!("object agent restarted or disks changed"); } iteration += 1; @@ -371,11 +370,19 @@ impl IoStatDisplay { } fn insert_summary_disk(disk_stats: &mut IoStats) { - let mut summary = DiskIoStats::new("summary".to_string()); + let mut summary = DiskIoStats { + name: "summary".to_string(), + instance: 0, + stats: Default::default(), + }; + let mut hasher = DefaultHasher::new(); for disk in &disk_stats.disk_stats { summary += disk; + disk.instance.hash(&mut hasher); } + // Since the summay disk is compared first, make its instance depend on every disk + summary.instance = hasher.finish(); disk_stats.disk_stats.insert(0, summary); } } diff --git a/cmd/zfs_object_agent/zettacache/src/open.rs b/cmd/zfs_object_agent/zettacache/src/open.rs index d738d28960d7..8a2791d9a70c 100644 --- a/cmd/zfs_object_agent/zettacache/src/open.rs +++ b/cmd/zfs_object_agent/zettacache/src/open.rs @@ -183,15 +183,14 @@ fn is_valid_cache( match primary { Some(disk) => { - let disks_from_primary = disk - .superblock - .primary - .as_ref() - .unwrap() - .disks - .iter() - .map(|(k, v)| (*k, v.guid)) - .collect::>(); + let disks_from_primary = match &disk.superblock.primary { + Some(primary) => primary + .disks + .iter() + .map(|(k, v)| (*k, v.guid)) + .collect::>(), + None => return false, + }; // Only retain disks with IDs that are part of the primary block disks.retain( diff --git a/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs b/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs index b895720cf95c..cfb33d07734c 100644 --- a/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs +++ b/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs @@ -53,6 +53,7 @@ tunable! { // Try to keep this amount of slabs available for normal (i.e. BlockAllocator) use. Ideally // this will be enough space to absorb all the writes that can occur between index merges. + // This should be less than TARGET_FREE_BLOCKS_PCT. static ref TARGET_AVAILABLE_SLABS_PCT: Percent = Percent::new(2.0); } diff --git a/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs b/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs index f09994227a42..f780fc100ce1 100644 --- a/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs +++ b/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs @@ -113,7 +113,8 @@ tunable! { // keep at least 5% of the cache "free". We need to have slop for the rebalance code to be // able to consolidate slabs (to create empty slabs) to accomodate block size changes in the // workload. This target includes both free blocks within the BlockAllocator, and free slabs - // within the SlabAllocator which are available to the BlockAllocator. + // within the SlabAllocator which are available to the BlockAllocator. This should be more + // than TARGET_AVAILABLE_SLABS_PCT. static ref TARGET_FREE_BLOCKS_PCT: Percent = Percent::new(5.0); // Keep the total footprint for the pending changes and index cache data at about 12% of diff --git a/cmd/zfs_object_agent/zettaobject/src/object_based_log.rs b/cmd/zfs_object_agent/zettaobject/src/object_based_log.rs index 9d4c35eb1387..8c83a0d79a77 100644 --- a/cmd/zfs_object_agent/zettaobject/src/object_based_log.rs +++ b/cmd/zfs_object_agent/zettaobject/src/object_based_log.rs @@ -1,11 +1,11 @@ use std::marker::PhantomData; +use std::mem; use std::sync::Arc; use std::time::Instant; use anyhow::Context; use anyhow::Result; use futures::future; -use futures::future::join_all; use futures::stream; use futures::stream::StreamExt; use futures_core::Stream; @@ -183,23 +183,6 @@ impl ObjectBasedLog { } } - /* - pub fn verify_clean_shutdown(&mut self) { - // Make sure there are no objects past the logical end of the log - self.recovered = true; - } - */ - - pub fn to_phys(&self) -> ObjectBasedLogPhys { - ObjectBasedLogPhys { - generation: self.generation, - num_chunks: self.num_chunks, - num_entries: self.num_entries, - key: self.name.clone(), - entry_type: PhantomData, - } - } - pub fn append(&mut self, txg: Txg, entry: T) { // XXX assert that txg is the same as the txg for the other pending entries? self.pending_entries.push(entry); @@ -221,8 +204,6 @@ impl ObjectBasedLog { self.num_chunks += 1; self.num_entries += chunk.entries.len() as u64; - // XXX cloning name, would be nice if we could find a way to - // reference them from the spawned task (use Arc) let shared_state = self.shared_state.clone(); let name = self.name.clone(); let handle = measure!("ObjectBasedLog::initiate_flush()").spawn(async move { @@ -233,16 +214,21 @@ impl ObjectBasedLog { assert!(self.pending_entries.is_empty()); } - pub async fn flush(&mut self, txg: Txg) { + pub async fn flush(&mut self, txg: Txg) -> ObjectBasedLogPhys { if !self.pending_entries.is_empty() { self.initiate_flush(txg); } - let wait_for = self.pending_flushes.split_off(0); - let join_result = join_all(wait_for).await; - for r in join_result { - r.unwrap(); + for jh in mem::take(&mut self.pending_flushes) { + jh.await.unwrap(); } self.num_flushed_chunks = self.num_chunks; + ObjectBasedLogPhys { + generation: self.generation, + num_chunks: self.num_chunks, + num_entries: self.num_entries, + key: self.name.clone(), + entry_type: PhantomData, + } } pub async fn clear(&mut self, txg: Txg) { diff --git a/cmd/zfs_object_agent/zettaobject/src/pool.rs b/cmd/zfs_object_agent/zettaobject/src/pool.rs index 54e9f9031172..e5c56437891d 100644 --- a/cmd/zfs_object_agent/zettaobject/src/pool.rs +++ b/cmd/zfs_object_agent/zettaobject/src/pool.rs @@ -23,7 +23,7 @@ use bytes::Bytes; use bytesize::ByteSize; use derivative::Derivative; use futures::future; -use futures::future::join3; +use futures::future::join; use futures::future::Future; use futures::stream; use futures::stream::*; @@ -470,13 +470,18 @@ struct ReclaimLog { } impl ReclaimLog { - fn to_phys(&self) -> ReclaimLogPhys { + async fn flush(&mut self, txg: Txg) -> ReclaimLogPhys { + let (pending_frees_log, object_size_log) = join( + self.pending_frees_log.flush(txg), + self.object_size_log.flush(txg), + ) + .await; ReclaimLogPhys { num_bits: self.num_bits, prefix: self.prefix, - pending_frees_log: self.pending_frees_log.to_phys(), + pending_frees_log, pending_free_bytes: self.pending_free_bytes, - object_size_log: self.object_size_log.to_phys(), + object_size_log, } } } @@ -544,10 +549,16 @@ struct ReclaimInfo { } impl ReclaimInfo { - fn to_phys(&self) -> ReclaimInfoPhys { + async fn flush(&mut self, txg: Txg) -> ReclaimInfoPhys { ReclaimInfoPhys { indirect_table: self.indirect_table.log_ids.clone(), - reclaim_logs: self.reclaim_logs.iter().map(|log| log.to_phys()).collect(), + reclaim_logs: self + .reclaim_logs + .iter_mut() + .map(|log| log.flush(txg)) + .collect::>() + .collect() + .await, } } } @@ -555,7 +566,6 @@ impl ReclaimInfo { type WriteCallback = Box; /// state that's modified while syncing a txg -//#[derive(Debug)] struct PoolSyncingState { // Note: some objects may contain additional (adjacent) blocks, if they have // been consolidated but this fact is not yet represented in the log. A @@ -1341,29 +1351,19 @@ impl Pool { } } - let frees_log_stream = FuturesUnordered::new(); - let size_log_stream = FuturesUnordered::new(); - for log in syncing_state.reclaim_info.reclaim_logs.iter_mut() { - frees_log_stream.push(log.pending_frees_log.flush(txg)); - size_log_stream.push(log.object_size_log.flush(txg)); - } - - join3( + let (storage_object_log, reclaim_info) = join( syncing_state.storage_object_log.flush(txg), - frees_log_stream.count(), - size_log_stream.count(), + syncing_state.reclaim_info.flush(txg), ) .await; - syncing_state.storage_object_log.flush(txg).await; - // write uberblock let u = UberblockPhys { guid: state.shared_state.guid, txg, date: SystemTime::now(), - storage_object_log: syncing_state.storage_object_log.to_phys(), - reclaim_info: syncing_state.reclaim_info.to_phys(), + storage_object_log, + reclaim_info, next_block: syncing_state.next_block(), obsolete_objects: syncing_state.object_deleter.phys(), zfs_uberblock, diff --git a/lib/libzfs/libzfs_dataset.c b/lib/libzfs/libzfs_dataset.c index 9af8677e39a6..3c70191ae371 100644 --- a/lib/libzfs/libzfs_dataset.c +++ b/lib/libzfs/libzfs_dataset.c @@ -527,6 +527,7 @@ make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc) zhp->zfs_head_type = pzhp->zfs_type; zhp->zfs_type = ZFS_TYPE_SNAPSHOT; zhp->zpool_hdl = zpool_handle(zhp); + zhp->zfs_dmustats = zc->zc_objset_stats; return (zhp); } @@ -2308,6 +2309,19 @@ get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src, *val = zhp->zfs_dmustats.dds_redacted; break; + case ZFS_PROP_CREATETXG: + /* + * We can directly read createtxg property from zfs + * handle for Filesystem, Snapshot and ZVOL types. + */ + if ((zhp->zfs_type == ZFS_TYPE_FILESYSTEM) || + (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) || + (zhp->zfs_type == ZFS_TYPE_VOLUME)) { + *val = zhp->zfs_dmustats.dds_creation_txg; + break; + } + zfs_fallthrough; + default: switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index c0016532d88e..df0a006fc10b 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -2321,6 +2321,8 @@ zfs_ioc_snapshot_list_next(zfs_cmd_t *zc) } if (zc->zc_simple) { + zc->zc_objset_stats.dds_creation_txg = + dsl_get_creationtxg(ds); dsl_dataset_rele(ds, FTAG); break; } diff --git a/tests/zfs-tests/tests/functional/io/io_uring.ksh b/tests/zfs-tests/tests/functional/io/io_uring.ksh index 601445f6a43d..47e439d0f4d5 100755 --- a/tests/zfs-tests/tests/functional/io/io_uring.ksh +++ b/tests/zfs-tests/tests/functional/io/io_uring.ksh @@ -40,7 +40,7 @@ verify_runnable "global" -if [[ $(linux_version) -lt $(linux_version "5.1") ]]; then +if ! $(grep -q "CONFIG_IO_URING=y" /boot/config-$(uname -r)); then log_unsupported "Requires io_uring support" fi