From d7c17fb6316246d84fe571e363031b399f201d70 Mon Sep 17 00:00:00 2001 From: Will Shanks Date: Tue, 26 Dec 2023 21:35:22 -0500 Subject: [PATCH] Suppress more numpy warnings from uncertainties (#1350) This PR is a follow up to https://github.com/Qiskit-Extensions/qiskit-experiments/pull/1070. It suppresses numpy warnings during uncertainties array creation in more places in the code. Something changed recently in the numpy code so that it generates warnings when it used to suppress them, in particular when using `numpy.vectorize` which wraps user-level Python code inside of numpy C code and seems to lose context about warning state. See https://github.com/numpy/numpy/issues/21416 for more information. --- .../curve_analysis/curve_analysis.py | 6 ++- .../curve_analysis/curve_data.py | 11 +++-- qiskit_experiments/data_processing/nodes.py | 10 +++- test/data_processing/test_data_processing.py | 11 +++-- test/data_processing/test_nodes.py | 46 ++++++++++++------- 5 files changed, 56 insertions(+), 28 deletions(-) diff --git a/qiskit_experiments/curve_analysis/curve_analysis.py b/qiskit_experiments/curve_analysis/curve_analysis.py index 50b3f9c9c8..d2eea799cc 100644 --- a/qiskit_experiments/curve_analysis/curve_analysis.py +++ b/qiskit_experiments/curve_analysis/curve_analysis.py @@ -231,7 +231,11 @@ def _run_data_processing( ) processed_values = self.options.data_processor(to_process) source["yval"] = unp.nominal_values(processed_values).flatten() - source["yerr"] = unp.std_devs(processed_values).flatten() + with np.errstate(invalid="ignore"): + # For averaged data, the processed std dev will be NaN. + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + source["yerr"] = unp.std_devs(processed_values).flatten() source["category"] = category table = ScatterTable(data=source) diff --git a/qiskit_experiments/curve_analysis/curve_data.py b/qiskit_experiments/curve_analysis/curve_data.py index 62214e9d9b..4f5ee74380 100644 --- a/qiskit_experiments/curve_analysis/curve_data.py +++ b/qiskit_experiments/curve_analysis/curve_data.py @@ -235,10 +235,13 @@ def ufloat_params(self) -> Dict[str, uncertainties.UFloat]: ) else: # Invalid covariance matrix. Std dev is set to nan, i.e. not computed. - ufloat_fitvals = uarray( - nominal_values=[self.params[name] for name in self.var_names], - std_devs=np.full(len(self.var_names), np.nan), - ) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + ufloat_fitvals = uarray( + nominal_values=[self.params[name] for name in self.var_names], + std_devs=np.full(len(self.var_names), np.nan), + ) # Combine fixed params and fitting variables into a single dictionary # Fixed parameter has zero std_dev ufloat_params = {} diff --git a/qiskit_experiments/data_processing/nodes.py b/qiskit_experiments/data_processing/nodes.py index bb31a42dfc..9bb4ed183b 100644 --- a/qiskit_experiments/data_processing/nodes.py +++ b/qiskit_experiments/data_processing/nodes.py @@ -85,7 +85,10 @@ def _process(self, data: np.ndarray) -> np.ndarray: reduced_array = np.mean(data, axis=ax) nominals = unp.nominal_values(reduced_array) - errors = unp.std_devs(reduced_array) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + errors = unp.std_devs(reduced_array) if np.any(np.isnan(errors)): # replace empty elements with SEM @@ -781,7 +784,10 @@ def _process(self, data: np.ndarray) -> np.ndarray: p_mean = alpha_posterior[0] / alpha_sum p_var = p_mean * (1 - p_mean) / (alpha_sum + 1) - probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var)) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var)) return probabilities diff --git a/test/data_processing/test_data_processing.py b/test/data_processing/test_data_processing.py index 206bf0d4e0..273450dfe0 100644 --- a/test/data_processing/test_data_processing.py +++ b/test/data_processing/test_data_processing.py @@ -418,10 +418,13 @@ def test_json_trained(self): unp.nominal_values(loaded_out), ) - np.testing.assert_array_almost_equal( - unp.std_devs(ref_out), - unp.std_devs(loaded_out), - ) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + np.testing.assert_array_almost_equal( + unp.std_devs(ref_out), + unp.std_devs(loaded_out), + ) class TestIQSingleAvg(BaseDataProcessorTest): diff --git a/test/data_processing/test_nodes.py b/test/data_processing/test_nodes.py index 6f92477e52..4587c984b0 100644 --- a/test/data_processing/test_nodes.py +++ b/test/data_processing/test_nodes.py @@ -43,7 +43,10 @@ class TestAveraging(BaseDataProcessorTest): def test_simple(self): """Simple test of averaging. Standard error of mean is generated.""" - datum = unp.uarray([[1, 2], [3, 4], [5, 6]], np.full((3, 2), np.nan)) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + datum = unp.uarray([[1, 2], [3, 4], [5, 6]], np.full((3, 2), np.nan)) node = AverageData(axis=1) processed_data = node(data=datum) @@ -85,16 +88,19 @@ def test_with_error(self): def test_with_error_partly_non_error(self): """Compute error propagation. Some elements have no error.""" - datum = unp.uarray( - [ - [1, 2, 3, 4, 5, 6], - [1, 2, 3, 4, 5, 6], - ], - [ - [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], - [np.nan, 0.2, 0.3, 0.4, 0.5, 0.6], - ], - ) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + datum = unp.uarray( + [ + [1, 2, 3, 4, 5, 6], + [1, 2, 3, 4, 5, 6], + ], + [ + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], + [np.nan, 0.2, 0.3, 0.4, 0.5, 0.6], + ], + ) node = AverageData(axis=1) processed_data = node(data=datum) @@ -130,7 +136,10 @@ def test_iq_averaging(self): ) iq_std = np.full_like(iq_data, np.nan) - self.create_experiment_data(unp.uarray(iq_data, iq_std), single_shot=True) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + self.create_experiment_data(unp.uarray(iq_data, iq_std), single_shot=True) avg_iq = AverageData(axis=0) processed_data = avg_iq(data=np.asarray(self.iq_experiment.data(0)["memory"])) @@ -188,11 +197,14 @@ def test_simple(self): decimal=-8, ) - np.testing.assert_array_almost_equal( - unp.std_devs(processed), - unp.std_devs(expected), - decimal=-8, - ) + with np.errstate(invalid="ignore"): + # Setting std_devs to NaN will trigger floating point exceptions + # which we can ignore. See https://stackoverflow.com/q/75656026 + np.testing.assert_array_almost_equal( + unp.std_devs(processed), + unp.std_devs(expected), + decimal=-8, + ) class TestNormalize(QiskitExperimentsTestCase):