diff --git a/src/iter/test.rs b/src/iter/test.rs index e920986a5..d2a6c1dee 100644 --- a/src/iter/test.rs +++ b/src/iter/test.rs @@ -447,53 +447,89 @@ fn check_cmp_gt_to_seq() { #[test] fn check_cmp_short_circuit() { + // We only use a single thread in order to make the short-circuit behavior deterministic. + let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let a = vec![0; 1024]; let mut b = a.clone(); b[42] = 1; - let counter = AtomicUsize::new(0); - let result = a - .par_iter() - .inspect(|_| { - counter.fetch_add(1, Ordering::SeqCst); - }) - .cmp(&b); - assert!(result == ::std::cmp::Ordering::Less); - assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one + pool.install(|| { + let expected = ::std::cmp::Ordering::Less; + assert_eq!(a.par_iter().cmp(&b), expected); + + for len in 1..10 { + let counter = AtomicUsize::new(0); + let result = a + .par_iter() + .with_max_len(len) + .inspect(|_| { + counter.fetch_add(1, Ordering::SeqCst); + }) + .cmp(&b); + assert_eq!(result, expected); + // should not have visited every single one + assert!(counter.into_inner() < a.len()); + } + }); } #[test] fn check_partial_cmp_short_circuit() { + // We only use a single thread to make the short-circuit behavior deterministic. + let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let a = vec![0; 1024]; let mut b = a.clone(); b[42] = 1; - let counter = AtomicUsize::new(0); - let result = a - .par_iter() - .inspect(|_| { - counter.fetch_add(1, Ordering::SeqCst); - }) - .partial_cmp(&b); - assert!(result == Some(::std::cmp::Ordering::Less)); - assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one + pool.install(|| { + let expected = Some(::std::cmp::Ordering::Less); + assert_eq!(a.par_iter().partial_cmp(&b), expected); + + for len in 1..10 { + let counter = AtomicUsize::new(0); + let result = a + .par_iter() + .with_max_len(len) + .inspect(|_| { + counter.fetch_add(1, Ordering::SeqCst); + }) + .partial_cmp(&b); + assert_eq!(result, expected); + // should not have visited every single one + assert!(counter.into_inner() < a.len()); + } + }); } #[test] fn check_partial_cmp_nan_short_circuit() { + // We only use a single thread to make the short-circuit behavior deterministic. + let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let a = vec![0.0; 1024]; let mut b = a.clone(); b[42] = f64::NAN; - let counter = AtomicUsize::new(0); - let result = a - .par_iter() - .inspect(|_| { - counter.fetch_add(1, Ordering::SeqCst); - }) - .partial_cmp(&b); - assert!(result == None); - assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one + pool.install(|| { + let expected = None; + assert_eq!(a.par_iter().partial_cmp(&b), expected); + + for len in 1..10 { + let counter = AtomicUsize::new(0); + let result = a + .par_iter() + .with_max_len(len) + .inspect(|_| { + counter.fetch_add(1, Ordering::SeqCst); + }) + .partial_cmp(&b); + assert_eq!(result, expected); + // should not have visited every single one + assert!(counter.into_inner() < a.len()); + } + }); } #[test] diff --git a/tests/iter_panic.rs b/tests/iter_panic.rs index 0497dc870..f40071287 100644 --- a/tests/iter_panic.rs +++ b/tests/iter_panic.rs @@ -1,6 +1,7 @@ extern crate rayon; use rayon::prelude::*; +use rayon::ThreadPoolBuilder; use std::ops::Range; use std::panic::{self, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -22,26 +23,32 @@ fn iter_panic() { #[test] fn iter_panic_fuse() { - fn count(iter: impl ParallelIterator + UnwindSafe) -> usize { - let count = AtomicUsize::new(0); - let result = panic::catch_unwind(|| { - iter.for_each(|_| { - count.fetch_add(1, Ordering::Relaxed); + // We only use a single thread in order to make the behavior + // of 'panic_fuse' deterministic + let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + + pool.install(|| { + fn count(iter: impl ParallelIterator + UnwindSafe) -> usize { + let count = AtomicUsize::new(0); + let result = panic::catch_unwind(|| { + iter.for_each(|_| { + count.fetch_add(1, Ordering::Relaxed); + }); }); - }); - assert!(result.is_err()); - count.into_inner() - } - - // Without `panic_fuse()`, we'll reach every item except the panicking one. - let expected = ITER.len() - 1; - let iter = ITER.into_par_iter().with_max_len(1); - assert_eq!(count(iter.clone().inspect(check)), expected); - - // With `panic_fuse()` anywhere in the chain, we'll reach fewer items. - assert!(count(iter.clone().inspect(check).panic_fuse()) < expected); - assert!(count(iter.clone().panic_fuse().inspect(check)) < expected); - - // Try in reverse to be sure we hit the producer case. - assert!(count(iter.clone().panic_fuse().inspect(check).rev()) < expected); + assert!(result.is_err()); + count.into_inner() + } + + // Without `panic_fuse()`, we'll reach every item except the panicking one. + let expected = ITER.len() - 1; + let iter = ITER.into_par_iter().with_max_len(1); + assert_eq!(count(iter.clone().inspect(check)), expected); + + // With `panic_fuse()` anywhere in the chain, we'll reach fewer items. + assert!(count(iter.clone().inspect(check).panic_fuse()) < expected); + assert!(count(iter.clone().panic_fuse().inspect(check)) < expected); + + // Try in reverse to be sure we hit the producer case. + assert!(count(iter.clone().panic_fuse().inspect(check).rev()) < expected); + }); }