Skip to content

Commit

Permalink
Merge #674
Browse files Browse the repository at this point in the history
674: Make test 'iter_panic_fuse' deterministic r=cuviper a=Aaron1011

Fixes #667

'panic_fuse' will only stop other threads 'as soon as possible' -
if the other threads are fast enough, they might end up processing
the entire rest of the iterator.

This commit changes the test 'iter_panic_fuse' to properly take this
into account, by creating a custom threadpool with only 1 thread.
This makes the test deterministic - with only one thread, the panic
is guaranmteed to be observed when the next item is processed, causing
the desired early exit.

Co-authored-by: Aaron Hill <[email protected]>
Co-authored-by: Josh Stone <[email protected]>
  • Loading branch information
3 people committed Jul 18, 2019
2 parents 2b5227c + 2a2c5a8 commit 45a1c33
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 48 deletions.
90 changes: 63 additions & 27 deletions src/iter/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -447,53 +447,89 @@ fn check_cmp_gt_to_seq() {

#[test]
fn check_cmp_short_circuit() {
// We only use a single thread in order to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();

let a = vec![0; 1024];
let mut b = a.clone();
b[42] = 1;

let counter = AtomicUsize::new(0);
let result = a
.par_iter()
.inspect(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
.cmp(&b);
assert!(result == ::std::cmp::Ordering::Less);
assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one
pool.install(|| {
let expected = ::std::cmp::Ordering::Less;
assert_eq!(a.par_iter().cmp(&b), expected);

for len in 1..10 {
let counter = AtomicUsize::new(0);
let result = a
.par_iter()
.with_max_len(len)
.inspect(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
.cmp(&b);
assert_eq!(result, expected);
// should not have visited every single one
assert!(counter.into_inner() < a.len());
}
});
}

#[test]
fn check_partial_cmp_short_circuit() {
// We only use a single thread to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();

let a = vec![0; 1024];
let mut b = a.clone();
b[42] = 1;

let counter = AtomicUsize::new(0);
let result = a
.par_iter()
.inspect(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
.partial_cmp(&b);
assert!(result == Some(::std::cmp::Ordering::Less));
assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one
pool.install(|| {
let expected = Some(::std::cmp::Ordering::Less);
assert_eq!(a.par_iter().partial_cmp(&b), expected);

for len in 1..10 {
let counter = AtomicUsize::new(0);
let result = a
.par_iter()
.with_max_len(len)
.inspect(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
.partial_cmp(&b);
assert_eq!(result, expected);
// should not have visited every single one
assert!(counter.into_inner() < a.len());
}
});
}

#[test]
fn check_partial_cmp_nan_short_circuit() {
// We only use a single thread to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();

let a = vec![0.0; 1024];
let mut b = a.clone();
b[42] = f64::NAN;

let counter = AtomicUsize::new(0);
let result = a
.par_iter()
.inspect(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
.partial_cmp(&b);
assert!(result == None);
assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one
pool.install(|| {
let expected = None;
assert_eq!(a.par_iter().partial_cmp(&b), expected);

for len in 1..10 {
let counter = AtomicUsize::new(0);
let result = a
.par_iter()
.with_max_len(len)
.inspect(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
.partial_cmp(&b);
assert_eq!(result, expected);
// should not have visited every single one
assert!(counter.into_inner() < a.len());
}
});
}

#[test]
Expand Down
49 changes: 28 additions & 21 deletions tests/iter_panic.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
extern crate rayon;

use rayon::prelude::*;
use rayon::ThreadPoolBuilder;
use std::ops::Range;
use std::panic::{self, UnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
Expand All @@ -22,26 +23,32 @@ fn iter_panic() {

#[test]
fn iter_panic_fuse() {
fn count(iter: impl ParallelIterator + UnwindSafe) -> usize {
let count = AtomicUsize::new(0);
let result = panic::catch_unwind(|| {
iter.for_each(|_| {
count.fetch_add(1, Ordering::Relaxed);
// We only use a single thread in order to make the behavior
// of 'panic_fuse' deterministic
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();

pool.install(|| {
fn count(iter: impl ParallelIterator + UnwindSafe) -> usize {
let count = AtomicUsize::new(0);
let result = panic::catch_unwind(|| {
iter.for_each(|_| {
count.fetch_add(1, Ordering::Relaxed);
});
});
});
assert!(result.is_err());
count.into_inner()
}

// Without `panic_fuse()`, we'll reach every item except the panicking one.
let expected = ITER.len() - 1;
let iter = ITER.into_par_iter().with_max_len(1);
assert_eq!(count(iter.clone().inspect(check)), expected);

// With `panic_fuse()` anywhere in the chain, we'll reach fewer items.
assert!(count(iter.clone().inspect(check).panic_fuse()) < expected);
assert!(count(iter.clone().panic_fuse().inspect(check)) < expected);

// Try in reverse to be sure we hit the producer case.
assert!(count(iter.clone().panic_fuse().inspect(check).rev()) < expected);
assert!(result.is_err());
count.into_inner()
}

// Without `panic_fuse()`, we'll reach every item except the panicking one.
let expected = ITER.len() - 1;
let iter = ITER.into_par_iter().with_max_len(1);
assert_eq!(count(iter.clone().inspect(check)), expected);

// With `panic_fuse()` anywhere in the chain, we'll reach fewer items.
assert!(count(iter.clone().inspect(check).panic_fuse()) < expected);
assert!(count(iter.clone().panic_fuse().inspect(check)) < expected);

// Try in reverse to be sure we hit the producer case.
assert!(count(iter.clone().panic_fuse().inspect(check).rev()) < expected);
});
}

0 comments on commit 45a1c33

Please sign in to comment.