From 13473af89c5771527e76e1447e22e65b78e8848b Mon Sep 17 00:00:00 2001 From: Naoki Kanazawa Date: Sat, 31 Jul 2021 00:39:49 +0900 Subject: [PATCH] CurveAnalysis refactoring (#265) Co-authored-by: Christopher J. Wood --- docs/_ext/custom_styles/styles.py | 5 +- .../calibration_management/update_library.py | 57 ++- qiskit_experiments/curve_analysis/__init__.py | 15 +- .../curve_analysis/curve_analysis.py | 371 ++++++++++-------- .../curve_analysis_result_data.py | 65 --- .../curve_analysis/curve_data.py | 81 +++- .../curve_analysis/curve_fit.py | 32 +- qiskit_experiments/curve_analysis/utils.py | 71 ---- .../curve_analysis/visualization.py | 22 +- .../calibration/analysis/drag_analysis.py | 35 +- .../analysis/fine_amplitude_analysis.py | 347 ++++++++-------- .../analysis/oscillation_analysis.py | 42 +- .../library/calibration/rabi.py | 10 + .../characterization/ef_spectroscopy.py | 10 + .../characterization/qubit_spectroscopy.py | 6 +- .../characterization/resonance_analysis.py | 47 +-- .../library/characterization/t1_analysis.py | 2 + .../characterization/t2ramsey_analysis.py | 2 + .../interleaved_rb_analysis.py | 96 ++--- .../randomized_benchmarking/rb_analysis.py | 129 +++--- test/calibration/experiments/test_drag.py | 15 +- .../experiments/test_fine_amplitude.py | 12 +- test/calibration/experiments/test_rabi.py | 9 +- test/calibration/test_update_library.py | 22 +- test/curve_analysis/test_curve_fit.py | 40 +- test/curve_analysis/test_curve_fitting.py | 4 +- test/test_qubit_spectroscopy.py | 37 +- 27 files changed, 761 insertions(+), 823 deletions(-) delete mode 100644 qiskit_experiments/curve_analysis/curve_analysis_result_data.py delete mode 100644 qiskit_experiments/curve_analysis/utils.py diff --git a/docs/_ext/custom_styles/styles.py b/docs/_ext/custom_styles/styles.py index 91d2dfaa3c..5d2db35ce1 100644 --- a/docs/_ext/custom_styles/styles.py +++ b/docs/_ext/custom_styles/styles.py @@ -216,11 +216,14 @@ def _extra_sections(self) -> Dict[str, List[str]]: analysis_option_desc = [] if analysis_class: + default_analysis_options = self._target_cls._default_analysis_options().__dict__ + analysis_docs_config = copy.copy(self._config) analysis_docs_config.napoleon_custom_sections = [("analysis options", "args")] analysis_option = _generate_options_documentation( current_class=analysis_class, method_name="_default_options", + target_args=list(default_analysis_options.keys()), config=analysis_docs_config, indent=self._indent, ) @@ -230,7 +233,7 @@ def _extra_sections(self) -> Dict[str, List[str]]: analysis_option_desc.append("") analysis_option_desc.extend( _format_default_options( - defaults=analysis_class._default_options().__dict__, + defaults=default_analysis_options, indent=self._indent, ) ) diff --git a/qiskit_experiments/calibration_management/update_library.py b/qiskit_experiments/calibration_management/update_library.py index bd87ea1a34..fcaeacf93f 100644 --- a/qiskit_experiments/calibration_management/update_library.py +++ b/qiskit_experiments/calibration_management/update_library.py @@ -20,6 +20,7 @@ from qiskit.circuit import Parameter from qiskit.pulse import ScheduleBlock +from qiskit_experiments.curve_analysis.curve_analysis import PARAMS_ENTRY_PREFIX from qiskit_experiments.framework.experiment_data import ExperimentData from qiskit_experiments.calibration_management.backend_calibrations import BackendCalibrations from qiskit_experiments.calibration_management.calibrations import Calibrations @@ -101,7 +102,7 @@ def update( exp_data: ExperimentData, parameter: str, schedule: Optional[Union[ScheduleBlock, str]], - result_index: int = -1, + result_index: Optional[int] = None, group: str = "default", ): """Update the calibrations based on the data. @@ -112,23 +113,27 @@ def update( parameter: The name of the parameter in the calibrations to update. schedule: The ScheduleBlock instance or the name of the instance to which the parameter is attached. - result_index: The result index to use, defaults to -1. + result_index: The result index to use. By default search entry by name. group: The calibrations group to update. Defaults to "default." Raises: CalibrationError: If the analysis result does not contain a frequency variable. """ + if result_index is None: + result = [ + r for r in exp_data.analysis_results() if r.name.startswith(PARAMS_ENTRY_PREFIX) + ][0] + else: + result = exp_data.analysis_results(index=result_index) - result = exp_data.analysis_results(result_index).extra - - if cls.__fit_parameter__ not in result["popt_keys"]: + if cls.__fit_parameter__ not in result.extra["popt_keys"]: raise CalibrationError( f"{cls.__name__} updates from analysis classes " f"which report {cls.__fit_parameter__} in popt." ) param = parameter - value = result["popt"][result["popt_keys"].index(cls.__fit_parameter__)] + value = result.value.value[result.extra["popt_keys"].index(cls.__fit_parameter__)] cls._add_parameter_value( calibrations, exp_data, value, param, schedule=schedule, group=group @@ -146,14 +151,29 @@ def update( cls, calibrations: BackendCalibrations, exp_data: ExperimentData, - parameter: str = BackendCalibrations.__qubit_freq_parameter__, - result_index: int = -1, + result_index: Optional[int] = None, group: str = "default", **options, ): - """Update a qubit frequency from, e.g., QubitSpectroscopy.""" + """Update a qubit frequency from, e.g., QubitSpectroscopy + + The value of the amplitude must be derived from the fit so the base method cannot be used. + + Args: + calibrations: The calibrations to update. + exp_data: The experiment data from which to update. + result_index: The result index to use. By default search entry by name. + group: The calibrations group to update. Defaults to "default." + options: Trailing options. + + """ super().update( - calibrations, exp_data, parameter, schedule=None, result_index=result_index, group=group + calibrations=calibrations, + exp_data=exp_data, + parameter=calibrations.__qubit_freq_parameter__, + schedule=None, + result_index=result_index, + group=group, ) @@ -172,7 +192,7 @@ def update( cls, calibrations: Calibrations, exp_data: ExperimentData, - result_index: int = -1, + result_index: Optional[int] = None, group: str = "default", angles_schedules: List[Tuple[float, str, Union[str, ScheduleBlock]]] = None, **options, @@ -184,7 +204,7 @@ def update( Args: calibrations: The calibrations to update. exp_data: The experiment data from which to update. - result_index: The result index to use, defaults to -1. + result_index: The result index to use. By default search entry by name. group: The calibrations group to update. Defaults to "default." angles_schedules: A list of tuples specifying which angle to update for which pulse schedule. Each tuple is of the form: (angle, parameter_name, @@ -202,11 +222,15 @@ def update( if angles_schedules is None: angles_schedules = [(np.pi, "amp", "xp")] - result = exp_data.analysis_results(result_index).extra + if result_index is None: + result = [ + r for r in exp_data.analysis_results() if r.name.startswith(PARAMS_ENTRY_PREFIX) + ][0] + else: + result = exp_data.analysis_results(index=result_index) if isinstance(exp_data.experiment, Rabi): - freq = result["popt"][result["popt_keys"].index("freq")] - rate = 2 * np.pi * freq + rate = 2 * np.pi * result.value.value[result.extra["popt_keys"].index("freq")] for angle, param, schedule in angles_schedules: qubits = exp_data.data(0)["metadata"]["qubits"] @@ -217,10 +241,9 @@ def update( cls._add_parameter_value(calibrations, exp_data, value, param, schedule, group) elif isinstance(exp_data.experiment, FineAmplitude): - d_theta = result["popt"][result["popt_keys"].index("d_theta")] + d_theta = result.value.value[result.extra["popt_keys"].index("d_theta")] for target_angle, param, schedule in angles_schedules: - qubits = exp_data.data(0)["metadata"]["qubits"] prev_amp = calibrations.get_parameter_value(param, qubits, schedule, group=group) diff --git a/qiskit_experiments/curve_analysis/__init__.py b/qiskit_experiments/curve_analysis/__init__.py index ffddead054..78e8e07790 100644 --- a/qiskit_experiments/curve_analysis/__init__.py +++ b/qiskit_experiments/curve_analysis/__init__.py @@ -23,9 +23,10 @@ :toctree: ../stubs/ CurveAnalysis - CurveAnalysisResultData SeriesDef CurveData + FitData + ParameterRepr Functions ========= @@ -71,18 +72,9 @@ plot_curve_fit plot_errorbar plot_scatter - -Utility -******* -.. autosummary:: - :toctree: ../stubs/ - - get_opt_error - get_opt_value """ from .curve_analysis import CurveAnalysis -from .curve_analysis_result_data import CurveAnalysisResultData -from .curve_data import CurveData, SeriesDef +from .curve_data import CurveData, SeriesDef, FitData, ParameterRepr from .curve_fit import ( curve_fit, multi_curve_fit, @@ -90,6 +82,5 @@ process_multi_curve_data, ) from .visualization import plot_curve_fit, plot_errorbar, plot_scatter -from .utils import get_opt_error, get_opt_value from . import guess from . import fit_function diff --git a/qiskit_experiments/curve_analysis/curve_analysis.py b/qiskit_experiments/curve_analysis/curve_analysis.py index 9ae3583db9..241a919b89 100644 --- a/qiskit_experiments/curve_analysis/curve_analysis.py +++ b/qiskit_experiments/curve_analysis/curve_analysis.py @@ -16,30 +16,37 @@ # pylint: disable=invalid-name import dataclasses +import functools import inspect from abc import ABC -import functools from typing import Any, Dict, List, Tuple, Callable, Union, Optional import numpy as np from qiskit.providers.options import Options +from qiskit.providers import Backend -from qiskit_experiments.framework import BaseAnalysis, ExperimentData, AnalysisResultData, FitVal -from qiskit_experiments.data_processing import DataProcessor -from qiskit_experiments.data_processing.exceptions import DataProcessorError -from qiskit_experiments.exceptions import AnalysisError -from qiskit_experiments.matplotlib import pyplot, requires_matplotlib, HAS_MATPLOTLIB -from qiskit_experiments.data_processing.processor_library import get_processor - -from qiskit_experiments.curve_analysis.curve_data import CurveData, SeriesDef -from qiskit_experiments.curve_analysis.curve_analysis_result_data import CurveAnalysisResultData +from qiskit_experiments.curve_analysis.curve_data import ( + CurveData, + SeriesDef, + FitData, + ParameterRepr, +) from qiskit_experiments.curve_analysis.curve_fit import multi_curve_fit from qiskit_experiments.curve_analysis.visualization import ( plot_scatter, plot_errorbar, plot_curve_fit, ) -from qiskit_experiments.curve_analysis.utils import get_opt_value, get_opt_error +from qiskit_experiments.data_processing import DataProcessor +from qiskit_experiments.data_processing.exceptions import DataProcessorError +from qiskit_experiments.data_processing.processor_library import get_processor +from qiskit_experiments.exceptions import AnalysisError +from qiskit_experiments.framework import BaseAnalysis, ExperimentData, AnalysisResultData, FitVal +from qiskit_experiments.matplotlib import pyplot, requires_matplotlib, HAS_MATPLOTLIB + + +PARAMS_ENTRY_PREFIX = "@Parameters_" +DATA_ENTRY_PREFIX = "@Data_" class CurveAnalysis(BaseAnalysis, ABC): @@ -65,6 +72,11 @@ class CurveAnalysis(BaseAnalysis, ABC): - ``plot_fit_uncertainty``: A Boolean signaling whether to plot fit uncertainty for this series in the plot. + - ``__fixed_parameters__``: A list of parameter names fixed during the fitting. + These parameters should be provided in some way. For example, you can provide + them via experiment options or analysis options. Parameter names should be + used in the ``fit_func`` in the series definition. + See the Examples below for more details. @@ -86,7 +98,6 @@ class AnalysisExample(CurveAnalysis): ), ] - **A fitting for two exponential decay curve with partly shared parameter** In this type of experiment, the analysis deals with two curves. @@ -192,6 +203,18 @@ class AnalysisExample(CurveAnalysis): Override :meth:`~self._format_data`. For example, here you can apply smoothing to y values, remove outlier, or apply filter function to the data. + - Create extra data from fit result: + Override :meth:`~self._extra_database_entry`. You need to return a list of + :class:`~qiskit_experiments.framework.analysis_result_data.AnalysisResultData` + object. This returns an empty list by default. + + - Customize fit quality evaluation: + Override :meth:`~self._evaluate_quality`. This value will be shown in the + database. You can determine the quality represented by the predefined string + "good" or "bad" based on fit result, + such as parameter uncertainty and reduced chi-squared value. + This returns ``None`` by default. This means evaluation is not performed. + - Customize post-analysis data processing: Override :meth:`~self._post_analysis`. For example, here you can calculate new entity from fit values, such as EPC of RB experiment. @@ -266,6 +289,9 @@ def __init__(self): #: List[CurveData]: Processed experiment data set. self.__processed_data_set = list() + #: Backend: backend object used for experimentation + self.__backend = None + # Add expected options to instance variable so that every method can access to. for key in self._default_options().__dict__: setattr(self, f"__{key}", None) @@ -291,8 +317,13 @@ def _default_options(cls) -> Options: xlabel (str): X label of fit result figure. ylabel (str): Y label of fit result figure. ylim (Tuple[float, float]): Min and max height limit of fit plot. - fit_reports (Dict[str, str]): Mapping of fit parameters and representation - in the fit report. + result_parameters (List[Union[str, ParameterRepr]): Parameters reported in the + database as a dedicated entry. This is a list of parameter representation + which is either string or ParameterRepr object. If you provide more + information other than name, you can specify + ``[ParameterRepr("alpha", "\u03B1", "a.u.")]`` for example. + The parameter name should be defined in the series definition. + Representation should be printable in standard output, i.e. no latex syntax. return_data_points (bool): Set ``True`` to return formatted XY data. """ return Options( @@ -307,12 +338,16 @@ def _default_options(cls) -> Options: xlabel=None, ylabel=None, ylim=None, - fit_reports=None, + result_parameters=None, return_data_points=False, ) @requires_matplotlib - def _create_figures(self, result_data: CurveAnalysisResultData) -> List["Figure"]: + def _create_figures( + self, + fit_data: FitData, + analysis_results: List[AnalysisResultData], + ) -> List["Figure"]: """Create new figures with the fit result and raw data. Subclass can override this method to create different type of figures, but @@ -320,13 +355,12 @@ def _create_figures(self, result_data: CurveAnalysisResultData) -> List["Figure" works with ``DbExperimentData``. Args: - result_data: Result data containing fit parameters. + fit_data: Fit data set. + analysis_results: List of database entries. Returns: List of figures. """ - fit_available = all(key in result_data for key in ("popt", "popt_err", "xrange")) - axis = self._get_option("axis") if axis is None: figure = pyplot.figure(figsize=(8, 5)) @@ -334,24 +368,20 @@ def _create_figures(self, result_data: CurveAnalysisResultData) -> List["Figure" else: figure = axis.get_figure() - ymin, ymax = np.inf, -np.inf for series_def in self.__series__: - - # plot raw data - curve_data_raw = self._data(series_name=series_def.name, label="raw_data") - ymin = min(ymin, *curve_data_raw.y) - ymax = max(ymax, *curve_data_raw.y) - plot_scatter(xdata=curve_data_raw.x, ydata=curve_data_raw.y, ax=axis, zorder=0) + curve_data_fit = self._data(series_name=series_def.name, label="fit_ready") - # plot formatted data + # plot raw data if data is formatted + if not np.array_equal(curve_data_raw.y, curve_data_fit.y): + plot_scatter(xdata=curve_data_raw.x, ydata=curve_data_raw.y, ax=axis, zorder=0) + # plot formatted data curve_data_fit = self._data(series_name=series_def.name, label="fit_ready") if np.all(np.isnan(curve_data_fit.y_err)): sigma = None else: sigma = np.nan_to_num(curve_data_fit.y_err) - plot_errorbar( xdata=curve_data_fit.x, ydata=curve_data_fit.y, @@ -365,11 +395,10 @@ def _create_figures(self, result_data: CurveAnalysisResultData) -> List["Figure" ) # plot fit curve - - if fit_available: + if fit_data: plot_curve_fit( func=series_def.fit_func, - result=result_data, + result=fit_data, ax=axis, color=series_def.plot_color, zorder=2, @@ -377,7 +406,6 @@ def _create_figures(self, result_data: CurveAnalysisResultData) -> List["Figure" ) # format axis - if len(self.__series__) > 1: axis.legend(loc="center right") axis.set_xlabel(self._get_option("xlabel"), fontsize=16) @@ -388,28 +416,23 @@ def _create_figures(self, result_data: CurveAnalysisResultData) -> List["Figure" # automatic scaling y axis by actual data point. # note that y axis will be scaled by confidence interval by default. # sometimes we cannot see any data point if variance of parameters is too large. - - height = ymax - ymin - axis.set_ylim(ymin - 0.1 * height, ymax + 0.1 * height) + height = fit_data.y_range[1] - fit_data.y_range[0] + axis.set_ylim(fit_data.y_range[0] - 0.1 * height, fit_data.y_range[1] + 0.1 * height) # write analysis report + def _format_val(val): + if val < 1e-2 or val > 1e2: + return f"{val: .4e}" + return f"{val: .4f}" - fit_reports = self._get_option("fit_reports") - if fit_reports and fit_available: - # write fit status in the plot + if fit_data and analysis_results: analysis_description = "" - for par_name, label in fit_reports.items(): - try: - # fit value - pval = get_opt_value(result_data, par_name) - perr = get_opt_error(result_data, par_name) - except ValueError: - # maybe post processed value - pval = result_data[par_name] - perr = result_data[f"{par_name}_err"] - analysis_description += f"{label} = {pval: .3e}\u00B1{perr: .3e}\n" - chisq = result_data["reduced_chisq"] - analysis_description += f"Fit \u03C7-squared = {chisq: .4f}" + for res in analysis_results: + if isinstance(res.value, FitVal) and not res.name.startswith(PARAMS_ENTRY_PREFIX): + fitval = res.value + value_repr = f"{_format_val(fitval.value)} \u00B1 {_format_val(fitval.stderr)}" + analysis_description += f"{res.name} = {value_repr}\n" + analysis_description += r"Fit $\chi^2$ = " + f"{fit_data.reduced_chisq: .4f}" report_handler = axis.text( 0.60, @@ -511,18 +534,33 @@ def _format_data(self, data: CurveData) -> CurveData: metadata=data.metadata, ) - def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisResultData: + # pylint: disable=unused-argument + def _extra_database_entry(self, fit_data: FitData) -> List[AnalysisResultData]: """Calculate new quantity from the fit result. Subclasses can override this method to do post analysis. Args: - result_data: Result containing fit result. + fit_data: Fit result. + + Returns: + List of database entry created from the fit data. + """ + return [] + + # pylint: disable=unused-argument + def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]: + """Evaluate quality of the fit result. + + Subclasses can override this method to do post analysis. + + Args: + fit_data: Fit result. Returns: - Updated result data containing the result of post analysis. + String that represents fit result quality. Usually "good" or "bad". """ - return result_data + return None def _extract_curves( self, experiment_data: ExperimentData, data_processor: Union[Callable, DataProcessor] @@ -706,6 +744,11 @@ def _physical_qubits(self) -> List[int]: # Ignore experiment metadata is not set or key is not found return None + @property + def _backend(self) -> Backend: + """Getter for backend object.""" + return self.__backend + def _experiment_options(self, index: int = -1) -> Dict[str, Any]: """Return the experiment options of given job index. @@ -862,11 +905,9 @@ def _run_analysis( is a list of any figures for the experiment. Raises: - AnalysisError: if the analysis fails. + AnalysisError: If the analysis fails. + DataProcessorError: When data processing failed. """ - result_data = CurveAnalysisResultData() - result_data["analysis_type"] = self.__class__.__name__ - figures = list() # # 1. Parse arguments @@ -901,6 +942,12 @@ def _run_analysis( except AttributeError: pass + # get backend + try: + self.__backend = experiment_data.backend + except AttributeError: + pass + try: # # 2. Setup data processor @@ -916,7 +963,7 @@ def _run_analysis( try: meas_level = run_options["meas_level"] except KeyError as ex: - raise AnalysisError( + raise DataProcessorError( f"Cannot process data without knowing the measurement level: {str(ex)}." ) from ex @@ -927,37 +974,41 @@ def _run_analysis( if isinstance(data_processor, DataProcessor) and not data_processor.is_trained: # Qiskit DataProcessor instance. May need calibration. - try: - data_processor.train(data=experiment_data.data()) - except DataProcessorError as ex: - raise AnalysisError( - f"DataProcessor calibration failed with error message: {str(ex)}." - ) from ex + data_processor.train(data=experiment_data.data()) # # 3. Extract curve entries from experiment data # - try: - self._extract_curves(experiment_data=experiment_data, data_processor=data_processor) - except DataProcessorError as ex: - raise AnalysisError( - f"Data extraction and formatting failed with error message: {str(ex)}." - ) from ex + self._extract_curves(experiment_data=experiment_data, data_processor=data_processor) # # 4. Run fitting # - try: - curve_fitter = self._get_option("curve_fitter") - formatted_data = self._data(label="fit_ready") - - # Generate fit options - fit_candidates = self._setup_fitting(**extra_options) - - # Fit for each fit parameter combination - if isinstance(fit_candidates, dict): - # Only single initial guess - fit_options = self._format_fit_options(**fit_candidates) + curve_fitter = self._get_option("curve_fitter") + formatted_data = self._data(label="fit_ready") + + # Generate fit options + fit_candidates = self._setup_fitting(**extra_options) + + # Fit for each fit parameter combination + if isinstance(fit_candidates, dict): + # Only single initial guess + fit_options = self._format_fit_options(**fit_candidates) + fit_result = curve_fitter( + funcs=[series_def.fit_func for series_def in self.__series__], + series=formatted_data.data_index, + xdata=formatted_data.x, + ydata=formatted_data.y, + sigma=formatted_data.y_err, + **fit_options, + ) + else: + # Multiple initial guesses + fit_options_candidates = [ + self._format_fit_options(**fit_options) for fit_options in fit_candidates + ] + fit_results = [] + for fit_options in fit_options_candidates: fit_result = curve_fitter( funcs=[series_def.fit_func for series_def in self.__series__], series=formatted_data.data_index, @@ -966,80 +1017,86 @@ def _run_analysis( sigma=formatted_data.y_err, **fit_options, ) - result_data.update(**fit_result) - else: - # Multiple initial guesses - fit_options_candidates = [ - self._format_fit_options(**fit_options) for fit_options in fit_candidates - ] - fit_results = [] - for fit_options in fit_options_candidates: - fit_result = curve_fitter( - funcs=[series_def.fit_func for series_def in self.__series__], - series=formatted_data.data_index, - xdata=formatted_data.x, - ydata=formatted_data.y, - sigma=formatted_data.y_err, - **fit_options, - ) - fit_results.append(fit_result) - if len(fit_results) == 0: - raise AnalysisError( - "All initial guesses and parameter boundaries failed to fit the data. " - "Please provide better initial guesses or fit parameter boundaries." - ) - # Sort by chi squared value - fit_results = sorted(fit_results, key=lambda r: r["reduced_chisq"]) - result_data.update(**fit_results[0]) - - result_data["success"] = True - - except AnalysisError as ex: - result_data["error_message"] = str(ex) - result_data["success"] = False + fit_results.append(fit_result) + if len(fit_results) == 0: + raise AnalysisError( + "All initial guesses and parameter boundaries failed to fit the data. " + "Please provide better initial guesses or fit parameter boundaries." + ) + # Sort by chi squared value + fit_result = sorted(fit_results, key=lambda r: r.reduced_chisq)[0] - else: - # - # 5. Post-process analysis data - # - result_data = self._post_analysis(result_data=result_data) - - finally: - # - # 6. Create figures - # - if self._get_option("plot") and HAS_MATPLOTLIB: - figures.extend(self._create_figures(result_data=result_data)) - - # - # 7. Optionally store raw data points - # - if self._get_option("return_data_points"): - raw_data_dict = dict() - for series_def in self.__series__: - series_data = self._data(series_name=series_def.name, label="raw_data") - raw_data_dict[series_def.name] = { - "xdata": series_data.x, - "ydata": series_data.y, - "sigma": series_data.y_err, - } - result_data["raw_data"] = raw_data_dict - - except AnalysisError as ex: - result_data["error_message"] = str(ex) - result_data["success"] = False - - # Convert to AnalysisResult - name = result_data.pop("analysis_type") - value = FitVal(result_data.get("popt"), result_data.get("popt_err")) - chisq = result_data.pop("chisq", None) or result_data.pop("reduced_chisq", None) - quality = result_data.pop("quality", None) - analysis_result = AnalysisResultData( - name=name, - value=value, - chisq=chisq, - quality=quality, - extra=result_data, - ) + except AnalysisError: + fit_result = None + + # + # 5. Create database entry + # + analysis_results = [] + if fit_result: + # pylint: disable=assignment-from-none + quality = self._evaluate_quality(fit_data=fit_result) + + # overview entry + analysis_results.append( + AnalysisResultData( + name=PARAMS_ENTRY_PREFIX + self.__class__.__name__, + value=FitVal(fit_result.popt, fit_result.popt_err), + chisq=fit_result.reduced_chisq, + quality=quality, + extra={ + "popt_keys": fit_result.popt_keys, + "dof": fit_result.dof, + "covariance_mat": fit_result.pcov, + }, + ) + ) + + # output special parameters + result_parameters = self._get_option("result_parameters") + if result_parameters: + for param_repr in result_parameters: + if isinstance(param_repr, ParameterRepr): + p_name = param_repr.name + p_repr = param_repr.repr or param_repr.name + unit = param_repr.unit + else: + p_name = param_repr + p_repr = param_repr + unit = None + result_entry = AnalysisResultData( + name=p_repr, + value=fit_result.fitval(p_name, unit), + chisq=fit_result.reduced_chisq, + quality=quality, + ) + analysis_results.append(result_entry) + + # add extra database entries + analysis_results.extend(self._extra_database_entry(fit_result)) + + if self._get_option("return_data_points"): + # save raw data points in the data base if option is set (default to false) + raw_data_dict = dict() + for series_def in self.__series__: + series_data = self._data(series_name=series_def.name, label="raw_data") + raw_data_dict[series_def.name] = { + "xdata": series_data.x, + "ydata": series_data.y, + "sigma": series_data.y_err, + } + raw_data_entry = AnalysisResultData( + name=DATA_ENTRY_PREFIX + self.__class__.__name__, + value=raw_data_dict, + ) + analysis_results.append(raw_data_entry) + + # + # 6. Create figures + # + if self._get_option("plot") and HAS_MATPLOTLIB: + figures = self._create_figures(fit_data=fit_result, analysis_results=analysis_results) + else: + figures = [] - return [analysis_result], figures + return analysis_results, figures diff --git a/qiskit_experiments/curve_analysis/curve_analysis_result_data.py b/qiskit_experiments/curve_analysis/curve_analysis_result_data.py deleted file mode 100644 index 8ddba90288..0000000000 --- a/qiskit_experiments/curve_analysis/curve_analysis_result_data.py +++ /dev/null @@ -1,65 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2021. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. -""" -Curve analysis result data class. -""" - - -class CurveAnalysisResultData(dict): - """Analysis data container for curve fit analysis. - - Class Attributes: - __keys_not_shown__: Data keys of analysis result which are not directly shown - in `__str__` method. By default, `pcov` (covariance matrix), - `raw_data` (raw x, y, sigma data points), `popt`, `popt_keys`, and `popt_err` - are not displayed. Fit parameters (popt) are formatted to - - .. code-block:: - - p0 = 1.2 ± 0.34 - p1 = 5.6 ± 0.78 - - rather showing raw key-value pairs - - .. code-block:: - - popt_keys = ["p0", "p1"] - popt = [1.2, 5.6] - popt_err = [0.34, 0.78] - - The covariance matrix and raw data points are not shown because they output - very long string usually doesn't fit in with the summary of the analysis object, - i.e. user wants to quickly get the over view of fit values and goodness of fit, - such as the chi-squared value and computer evaluated quality. - - However these non-displayed values are still kept and user can access to - these values with `result["raw_data"]` and `result["pcov"]` if necessary. - """ - - __keys_not_shown__ = "pcov", "raw_data", "popt", "popt_keys", "popt_err" - - def __str__(self): - out = "" - - if self.get("success"): - popt_keys = self.get("popt_keys") - popt = self.get("popt") - popt_err = self.get("popt_err") - - for key, value, error in zip(popt_keys, popt, popt_err): - out += f"\n - {key}: {value} \u00B1 {error}" - - for key, value in self.items(): - if key in self.__keys_not_shown__: - continue - out += f"\n- {key}: {value}" - return out diff --git a/qiskit_experiments/curve_analysis/curve_data.py b/qiskit_experiments/curve_analysis/curve_data.py index d113995c55..b729ad80b6 100644 --- a/qiskit_experiments/curve_analysis/curve_data.py +++ b/qiskit_experiments/curve_analysis/curve_data.py @@ -15,9 +15,11 @@ """ import dataclasses -from typing import Any, Dict, Callable, Union +from typing import Any, Dict, Callable, Union, List, Tuple, Optional import numpy as np +from qiskit_experiments.framework import FitVal + @dataclasses.dataclass(frozen=True) class SeriesDef: @@ -46,9 +48,86 @@ class SeriesDef: class CurveData: """Set of extracted experiment data.""" + # Name of this data set label: str + + # X data x: np.ndarray + + # Y data (measured data) y: np.ndarray + + # Error bar y_err: np.ndarray + + # Maping of data index to series index data_index: Union[np.ndarray, int] + + # Metadata associated with each data point. Generated from the circuit metadata. metadata: np.ndarray = None + + +@dataclasses.dataclass(frozen=True) +class FitData: + """Set of data generated by the fit function.""" + + # Order sensitive fit parameter values + popt: np.ndarray + + # Order sensitive parameter name list + popt_keys: List[str] + + # Order sensitive fit parameter uncertainty + popt_err: np.ndarray + + # Covariance matrix + pcov: np.ndarray + + # Reduced Chi-squared value of fit curve + reduced_chisq: float + + # Degree of freedom + dof: int + + # X data range + x_range: Tuple[float, float] + + # Y data range + y_range: Tuple[float, float] + + def fitval(self, key: str, unit: Optional[str] = None) -> FitVal: + """A helper method to get fit value object from parameter key name. + + Args: + key: Name of parameters to extract. + unit: Optional. Unit of this value. + + Returns: + FitVal object. + + Raises: + ValueError: When specified parameter is not defined. + """ + try: + index = self.popt_keys.index(key) + return FitVal( + value=self.popt[index], + stderr=self.popt_err[index], + unit=unit, + ) + except ValueError as ex: + raise ValueError(f"Parameter {key} is not defined.") from ex + + +@dataclasses.dataclass +class ParameterRepr: + """Detailed description of fitting parameter.""" + + # Fitter argument name + name: str + + # Unicode representation + repr: Optional[str] = None + + # Unit + unit: Optional[str] = None diff --git a/qiskit_experiments/curve_analysis/curve_fit.py b/qiskit_experiments/curve_analysis/curve_fit.py index 13045c21e2..7021b07311 100644 --- a/qiskit_experiments/curve_analysis/curve_fit.py +++ b/qiskit_experiments/curve_analysis/curve_fit.py @@ -20,7 +20,7 @@ import scipy.optimize as opt from qiskit_experiments.exceptions import AnalysisError from qiskit_experiments.curve_analysis.data_processing import filter_data -from qiskit_experiments.curve_analysis.curve_analysis_result_data import CurveAnalysisResultData +from qiskit_experiments.curve_analysis.curve_data import FitData def curve_fit( @@ -31,7 +31,7 @@ def curve_fit( sigma: Optional[np.ndarray] = None, bounds: Optional[Union[Dict[str, Tuple[float, float]], Tuple[np.ndarray, np.ndarray]]] = None, **kwargs, -) -> CurveAnalysisResultData: +) -> FitData: r"""Perform a non-linear least squares to fit This solves the optimization problem @@ -142,20 +142,20 @@ def fit_func(x, *params): residues = residues / (sigma ** 2) reduced_chisq = np.sum(residues) / dof - # Compute xdata range for fit - xdata_range = [min(xdata), max(xdata)] + # Compute data range for fit + xdata_range = np.min(xdata), np.max(xdata) + ydata_range = np.min(ydata), np.max(ydata) - result = { - "popt": popt, - "popt_keys": param_keys, - "popt_err": popt_err, - "pcov": pcov, - "reduced_chisq": reduced_chisq, - "dof": dof, - "xrange": xdata_range, - } - - return CurveAnalysisResultData(result) + return FitData( + popt=popt, + popt_keys=param_keys, + popt_err=popt_err, + pcov=pcov, + reduced_chisq=reduced_chisq, + dof=dof, + x_range=xdata_range, + y_range=ydata_range, + ) def multi_curve_fit( @@ -168,7 +168,7 @@ def multi_curve_fit( weights: Optional[np.ndarray] = None, bounds: Optional[Union[Dict[str, Tuple[float, float]], Tuple[np.ndarray, np.ndarray]]] = None, **kwargs, -) -> CurveAnalysisResultData: +) -> FitData: r"""Perform a linearized multi-objective non-linear least squares fit. This solves the optimization problem diff --git a/qiskit_experiments/curve_analysis/utils.py b/qiskit_experiments/curve_analysis/utils.py deleted file mode 100644 index 16d2d8d304..0000000000 --- a/qiskit_experiments/curve_analysis/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2021. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Analysis utility functions.""" - -from typing import Dict - - -def get_opt_value(result_data: Dict, param_name: str) -> float: - """A helper function to get parameter value from a result dictionary. - - Args: - result_data: Result data. - param_name: Name of parameter to extract. - - Returns: - Parameter value. - - Raises: - KeyError: - - When the result does not contain parameter information. - ValueError: - - When specified parameter is not defined. - """ - try: - index = result_data["popt_keys"].index(param_name) - return result_data["popt"][index] - except KeyError as ex: - raise KeyError( - "Input result has not fit parameter information. " - "Please confirm if the fit is successfully completed." - ) from ex - except ValueError as ex: - raise ValueError(f"Parameter {param_name} is not defined.") from ex - - -def get_opt_error(result_data: Dict, param_name: str) -> float: - """A helper function to get error value from analysis result. - - Args: - result_data: Result data. - param_name: Name of parameter to extract. - - Returns: - Parameter error value. - - Raises: - KeyError: - - When the result does not contain parameter information. - ValueError: - - When specified parameter is not defined. - """ - try: - index = result_data["popt_keys"].index(param_name) - return result_data["popt_err"][index] - except KeyError as ex: - raise KeyError( - "Input result has not fit parameter information. " - "Please confirm if the fit is successfully completed." - ) from ex - except ValueError as ex: - raise ValueError(f"Parameter {param_name} is not defined.") from ex diff --git a/qiskit_experiments/curve_analysis/visualization.py b/qiskit_experiments/curve_analysis/visualization.py index 9d04f09180..6eaa69edbd 100644 --- a/qiskit_experiments/curve_analysis/visualization.py +++ b/qiskit_experiments/curve_analysis/visualization.py @@ -12,10 +12,11 @@ """ Plotting functions for experiment analysis """ -from typing import Callable, Optional, Dict +from typing import Callable, Optional import numpy as np from qiskit_experiments.matplotlib import pyplot, requires_matplotlib +from qiskit_experiments.curve_analysis.curve_data import FitData # pylint: disable = unused-import from qiskit_experiments.matplotlib import HAS_MATPLOTLIB @@ -24,7 +25,7 @@ @requires_matplotlib def plot_curve_fit( func: Callable, - result: Dict, + result: FitData, fit_uncertainty: bool = False, ax=None, num_fit_points: int = 100, @@ -38,7 +39,7 @@ def plot_curve_fit( Args: func: the fit function for curve_fit. - result: a result dictionary from curve_fit. + result: a fitting data set. fit_uncertainty: if True plot the fit uncertainty from popt_err. ax (matplotlib.axes.Axes): Optional, a matplotlib axes to add the plot to. num_fit_points: the number of points to plot for xrange. @@ -66,10 +67,17 @@ def plot_curve_fit( plot_opts["linewidth"] = 2 # Result data - fit_params = result["popt"] - param_keys = result.get("popt_keys") - fit_errors = result.get("popt_err") - xmin, xmax = result["xrange"] + if isinstance(result, dict): + # TODO: remove this after T1 T2 migration to curve analysis + fit_params = result["popt"] + param_keys = result["popt_keys"] + fit_errors = result["popt_err"] + xmin, xmax = result["x_range"] + else: + fit_params = result.popt + param_keys = result.popt_keys + fit_errors = result.popt_err + xmin, xmax = result.x_range # Plot fit data xs = np.linspace(xmin, xmax, num_fit_points) diff --git a/qiskit_experiments/library/calibration/analysis/drag_analysis.py b/qiskit_experiments/library/calibration/analysis/drag_analysis.py index 0c0c36aeb6..2860a2254f 100644 --- a/qiskit_experiments/library/calibration/analysis/drag_analysis.py +++ b/qiskit_experiments/library/calibration/analysis/drag_analysis.py @@ -15,17 +15,11 @@ from typing import Any, Dict, List, Union import numpy as np -from qiskit_experiments.curve_analysis import ( - CurveAnalysis, - CurveAnalysisResultData, - SeriesDef, - get_opt_value, - get_opt_error, -) +import qiskit_experiments.curve_analysis as curve from qiskit_experiments.curve_analysis.fit_function import cos -class DragCalAnalysis(CurveAnalysis): +class DragCalAnalysis(curve.CurveAnalysis): r"""Drag calibration analysis based on a fit to a cosine function. # section: fit_model @@ -62,7 +56,7 @@ class DragCalAnalysis(CurveAnalysis): """ __series__ = [ - SeriesDef( + curve.SeriesDef( fit_func=lambda x, amp, freq0, freq1, freq2, beta, base: cos( x, amp=amp, freq=freq0, phase=-2 * np.pi * freq0 * beta, baseline=base ), @@ -71,7 +65,7 @@ class DragCalAnalysis(CurveAnalysis): filter_kwargs={"series": 0}, plot_symbol="o", ), - SeriesDef( + curve.SeriesDef( fit_func=lambda x, amp, freq0, freq1, freq2, beta, base: cos( x, amp=amp, freq=freq1, phase=-2 * np.pi * freq1 * beta, baseline=base ), @@ -80,7 +74,7 @@ class DragCalAnalysis(CurveAnalysis): filter_kwargs={"series": 1}, plot_symbol="^", ), - SeriesDef( + curve.SeriesDef( fit_func=lambda x, amp, freq0, freq1, freq2, beta, base: cos( x, amp=amp, freq=freq2, phase=-2 * np.pi * freq2 * beta, baseline=base ), @@ -115,7 +109,7 @@ def _default_options(cls): "beta": None, "base": None, } - default_options.fit_reports = {"beta": "beta"} + default_options.result_parameters = ["beta"] default_options.xlabel = "Beta" default_options.ylabel = "Signal (arb. units)" @@ -187,7 +181,7 @@ def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any] return fit_options - def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisResultData: + def _evaluate_quality(self, fit_data: curve.FitData) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: @@ -195,20 +189,17 @@ def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisR - a DRAG parameter value within the first period of the lowest number of repetitions, - an error on the drag beta smaller than the beta. """ - - fit_beta = get_opt_value(result_data, "beta") - fit_freq0 = get_opt_value(result_data, "freq0") - fit_beta_err = get_opt_error(result_data, "beta") + fit_beta = fit_data.fitval("beta").value + fit_beta_err = fit_data.fitval("beta").stderr + fit_freq0 = fit_data.fitval("freq0").value criteria = [ - result_data["reduced_chisq"] < 3, + fit_data.reduced_chisq < 3, fit_beta < 1 / fit_freq0, fit_beta_err < abs(fit_beta), ] if all(criteria): - result_data["quality"] = "good" - else: - result_data["quality"] = "bad" + return "good" - return result_data + return "bad" diff --git a/qiskit_experiments/library/calibration/analysis/fine_amplitude_analysis.py b/qiskit_experiments/library/calibration/analysis/fine_amplitude_analysis.py index ca7e224a96..c88075232a 100644 --- a/qiskit_experiments/library/calibration/analysis/fine_amplitude_analysis.py +++ b/qiskit_experiments/library/calibration/analysis/fine_amplitude_analysis.py @@ -1,177 +1,170 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2021. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Fine amplitude calibration analysis.""" - -from typing import Any, Dict, List, Union -import numpy as np - -from qiskit_experiments.exceptions import CalibrationError -from qiskit_experiments.curve_analysis import ( - CurveAnalysis, - CurveAnalysisResultData, - SeriesDef, - fit_function, - get_opt_value, -) - - -class FineAmplitudeAnalysis(CurveAnalysis): - r"""Fine amplitude analysis class based on a fit to a cosine function. - - # section: fit_model - - Analyse a fine amplitude calibration experiment by fitting the data to a cosine function. - The user must also specify the intended rotation angle per gate, here labeled, - :math:`{\rm apg}`. The parameter of interest in the - fit is the deviation from the intended rotation angle per gate labeled - :math:`{\rm d}\theta`. The fit function is - - .. math:: - y = \frac{{\rm amp}}{2}\cos\left(x[{\rm d}\theta + {\rm apg} ] \ - +{\rm phase\_offset}\right)+{\rm base} - - # section: fit_parameters - defpar \rm amp: - desc: Amplitude of the oscillation. - init_guess: The maximum y value less the minimum y value. - bounds: [-2, 2] scaled to the maximum signal value. - - defpar \rm base: - desc: Base line. - init_guess: The average of the data. - bounds: [-1, 1] scaled to the maximum signal value. - - defpar d\theta: - desc: The angle offset in the gate that we wish to measure. - init_guess: Multiple initial guesses are tried ranging from -a to a - where a is given by :code:`max(abs(angle_per_gate), np.pi / 2)`. - bounds: [-pi, pi]. - - # section: note - - The following is a list of fixed-valued parameters that enter the fit. - - * :math:`{\rm apg}` The angle per gate is set by the user, for example pi for a pi-pulse. - * :math:`{\rm phase\_offset}` The phase offset in the cosine oscillation, for example, - :math:`\pi/2` if a square-root of X gate is added before the repeated gates. - """ - - __series__ = [ - SeriesDef( - fit_func=lambda x, amp, d_theta, phase_offset, base, angle_per_gate: fit_function.cos( - x, - amp=0.5 * amp, - freq=(d_theta + angle_per_gate) / (2 * np.pi), - phase=phase_offset, - baseline=base, - ), - plot_color="blue", - ) - ] - - # The intended angle per gat of the gate being calibrated, e.g. pi for a pi-pulse. - __fixed_parameters__ = ["angle_per_gate", "phase_offset"] - - @classmethod - def _default_options(cls): - r"""Return the default analysis options. - - See :meth:`~qiskit_experiment.curve_analysis.CurveAnalysis._default_options` for - descriptions of analysis options. - - Analysis Options: - angle_per_gate (float): The ideal angle per repeated gate. - The user must set this option as it defaults to None. - phase_offset (float): A phase offset for the analysis. This phase offset will be - :math:`\pi/2` if the square-root of X gate is added before the repeated gates. - This is decided for the user in :meth:`set_schedule` depending on whether the - sx gate is included in the experiment. - number_of_guesses (int): The number of initial guesses to try. - max_good_angle_error (float): The maximum angle error for which the fit is - considered as good. Defaults to :math:`\pi/2`. - """ - default_options = super()._default_options() - default_options.p0 = {"amp": None, "d_theta": None, "phase": None, "base": None} - default_options.bounds = {"amp": None, "d_theta": None, "phase": None, "base": None} - default_options.fit_reports = {"d_theta": "d_theta"} - default_options.xlabel = "Number of gates (n)" - default_options.ylabel = "Population" - default_options.angle_per_gate = None - default_options.phase_offset = 0.0 - default_options.number_guesses = 21 - default_options.max_good_angle_error = np.pi / 2 - - return default_options - - def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any]]]: - """Fitter options.""" - user_p0 = self._get_option("p0") - user_bounds = self._get_option("bounds") - n_guesses = self._get_option("number_guesses") - - max_y, min_y = np.max(self._data().y), np.min(self._data().y) - b_guess = (max_y + min_y) / 2 - a_guess = max_y - min_y - - max_abs_y = np.max(np.abs(self._data().y)) - - # Base the initial guess on the intended angle_per_gate. - angle_per_gate = self._get_option("angle_per_gate") - - if angle_per_gate is None: - raise CalibrationError("The angle_per_gate was not specified in the analysis options.") - - guess_range = max(abs(angle_per_gate), np.pi / 2) - - fit_options = [] - - for angle in np.linspace(-guess_range, guess_range, n_guesses): - fit_option = { - "p0": { - "amp": user_p0["amp"] or a_guess, - "d_theta": angle, - "base": b_guess, - }, - "bounds": { - "amp": user_bounds.get("amp", None) or (-2 * max_abs_y, 2 * max_abs_y), - "d_theta": user_bounds.get("d_theta", None) or (-np.pi, np.pi), - "base": user_bounds.get("d_theta", None) or (-1 * max_abs_y, 1 * max_abs_y), - }, - } - - fit_options.append(fit_option) - - return fit_options - - def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisResultData: - """Algorithmic criteria for whether the fit is good or bad. - - A good fit has: - - a reduced chi-squared lower than three, - - a measured angle error that is smaller than the allowed maximum good angle error. - This quantity is set in the analysis options. - """ - fit_d_theta = get_opt_value(result_data, "d_theta") - max_good_angle_error = self._get_option("max_good_angle_error") - - criteria = [ - result_data["reduced_chisq"] < 3, - abs(fit_d_theta) < abs(max_good_angle_error), - ] - - if all(criteria): - result_data["quality"] = "good" - else: - result_data["quality"] = "bad" - - return result_data +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Fine amplitude calibration analysis.""" + +from typing import Any, Dict, List, Union +import numpy as np + +from qiskit_experiments.exceptions import CalibrationError +import qiskit_experiments.curve_analysis as curve + + +class FineAmplitudeAnalysis(curve.CurveAnalysis): + r"""Fine amplitude analysis class based on a fit to a cosine function. + + # section: fit_model + + Analyse a fine amplitude calibration experiment by fitting the data to a cosine function. + The user must also specify the intended rotation angle per gate, here labeled, + :math:`{\rm apg}`. The parameter of interest in the + fit is the deviation from the intended rotation angle per gate labeled + :math:`{\rm d}\theta`. The fit function is + + .. math:: + y = \frac{{\rm amp}}{2}\cos\left(x[{\rm d}\theta + {\rm apg} ] \ + +{\rm phase\_offset}\right)+{\rm base} + + # section: fit_parameters + defpar \rm amp: + desc: Amplitude of the oscillation. + init_guess: The maximum y value less the minimum y value. + bounds: [-2, 2] scaled to the maximum signal value. + + defpar \rm base: + desc: Base line. + init_guess: The average of the data. + bounds: [-1, 1] scaled to the maximum signal value. + + defpar d\theta: + desc: The angle offset in the gate that we wish to measure. + init_guess: Multiple initial guesses are tried ranging from -a to a + where a is given by :code:`max(abs(angle_per_gate), np.pi / 2)`. + bounds: [-pi, pi]. + + # section: note + + The following is a list of fixed-valued parameters that enter the fit. + + * :math:`{\rm apg}` The angle per gate is set by the user, for example pi for a pi-pulse. + * :math:`{\rm phase\_offset}` The phase offset in the cosine oscillation, for example, + :math:`\pi/2` if a square-root of X gate is added before the repeated gates. + """ + + __series__ = [ + curve.SeriesDef( + # pylint: disable=line-too-long + fit_func=lambda x, amp, d_theta, phase_offset, base, angle_per_gate: curve.fit_function.cos( + x, + amp=0.5 * amp, + freq=(d_theta + angle_per_gate) / (2 * np.pi), + phase=phase_offset, + baseline=base, + ), + plot_color="blue", + ) + ] + + # The intended angle per gat of the gate being calibrated, e.g. pi for a pi-pulse. + __fixed_parameters__ = ["angle_per_gate", "phase_offset"] + + @classmethod + def _default_options(cls): + r"""Return the default analysis options. + + See :meth:`~qiskit_experiment.curve_analysis.CurveAnalysis._default_options` for + descriptions of analysis options. + + Analysis Options: + angle_per_gate (float): The ideal angle per repeated gate. + The user must set this option as it defaults to None. + phase_offset (float): A phase offset for the analysis. This phase offset will be + :math:`\pi/2` if the square-root of X gate is added before the repeated gates. + This is decided for the user in :meth:`set_schedule` depending on whether the + sx gate is included in the experiment. + number_of_guesses (int): The number of initial guesses to try. + max_good_angle_error (float): The maximum angle error for which the fit is + considered as good. Defaults to :math:`\pi/2`. + """ + default_options = super()._default_options() + default_options.p0 = {"amp": None, "d_theta": None, "phase": None, "base": None} + default_options.bounds = {"amp": None, "d_theta": None, "phase": None, "base": None} + default_options.result_parameters = ["d_theta"] + default_options.xlabel = "Number of gates (n)" + default_options.ylabel = "Population" + default_options.angle_per_gate = None + default_options.phase_offset = 0.0 + default_options.number_guesses = 21 + default_options.max_good_angle_error = np.pi / 2 + + return default_options + + def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any]]]: + """Fitter options.""" + user_p0 = self._get_option("p0") + user_bounds = self._get_option("bounds") + n_guesses = self._get_option("number_guesses") + + max_y, min_y = np.max(self._data().y), np.min(self._data().y) + b_guess = (max_y + min_y) / 2 + a_guess = max_y - min_y + + max_abs_y = np.max(np.abs(self._data().y)) + + # Base the initial guess on the intended angle_per_gate. + angle_per_gate = self._get_option("angle_per_gate") + + if angle_per_gate is None: + raise CalibrationError("The angle_per_gate was not specified in the analysis options.") + + guess_range = max(abs(angle_per_gate), np.pi / 2) + + fit_options = [] + + for angle in np.linspace(-guess_range, guess_range, n_guesses): + fit_option = { + "p0": { + "amp": user_p0["amp"] or a_guess, + "d_theta": angle, + "base": b_guess, + }, + "bounds": { + "amp": user_bounds.get("amp", None) or (-2 * max_abs_y, 2 * max_abs_y), + "d_theta": user_bounds.get("d_theta", None) or (-np.pi, np.pi), + "base": user_bounds.get("d_theta", None) or (-1 * max_abs_y, 1 * max_abs_y), + }, + } + + fit_options.append(fit_option) + + return fit_options + + def _evaluate_quality(self, fit_data: curve.FitData) -> Union[str, None]: + """Algorithmic criteria for whether the fit is good or bad. + + A good fit has: + - a reduced chi-squared lower than three, + - a measured angle error that is smaller than the allowed maximum good angle error. + This quantity is set in the analysis options. + """ + fit_d_theta = fit_data.fitval("d_theta").value + max_good_angle_error = self._get_option("max_good_angle_error") + + criteria = [ + fit_data.reduced_chisq < 3, + abs(fit_d_theta) < abs(max_good_angle_error), + ] + + if all(criteria): + return "good" + + return "bad" diff --git a/qiskit_experiments/library/calibration/analysis/oscillation_analysis.py b/qiskit_experiments/library/calibration/analysis/oscillation_analysis.py index 422d3e4626..b9893cd6c3 100644 --- a/qiskit_experiments/library/calibration/analysis/oscillation_analysis.py +++ b/qiskit_experiments/library/calibration/analysis/oscillation_analysis.py @@ -15,18 +15,10 @@ from typing import Any, Dict, List, Union import numpy as np -from qiskit_experiments.curve_analysis import ( - CurveAnalysis, - CurveAnalysisResultData, - SeriesDef, - fit_function, - guess, - get_opt_value, - get_opt_error, -) - - -class OscillationAnalysis(CurveAnalysis): +import qiskit_experiments.curve_analysis as curve + + +class OscillationAnalysis(curve.CurveAnalysis): r"""Oscillation analysis class based on a fit of the data to a cosine function. # section: fit_model @@ -62,8 +54,8 @@ class OscillationAnalysis(CurveAnalysis): """ __series__ = [ - SeriesDef( - fit_func=lambda x, amp, freq, phase, base: fit_function.cos( + curve.SeriesDef( + fit_func=lambda x, amp, freq, phase, base: curve.fit_function.cos( x, amp=amp, freq=freq, phase=phase, baseline=base ), plot_color="blue", @@ -80,7 +72,7 @@ def _default_options(cls): default_options = super()._default_options() default_options.p0 = {"amp": None, "freq": None, "phase": None, "base": None} default_options.bounds = {"amp": None, "freq": None, "phase": None, "base": None} - default_options.fit_reports = {"freq": "rate"} + default_options.result_parameters = ["freq"] default_options.xlabel = "Amplitude" default_options.ylabel = "Signal (arb. units)" @@ -95,9 +87,9 @@ def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any] max_abs_y = np.max(np.abs(curve_data.y)) - f_guess = guess.frequency(curve_data.x, curve_data.y) - b_guess = guess.constant_sinusoidal_offset(curve_data.y) - a_guess, _ = guess.max_height(curve_data.y - b_guess, absolute=True) + f_guess = curve.guess.frequency(curve_data.x, curve_data.y) + b_guess = curve.guess.constant_sinusoidal_offset(curve_data.y) + a_guess, _ = curve.guess.max_height(curve_data.y - b_guess, absolute=True) if user_p0["phase"] is not None: p_guesses = [user_p0["phase"]] @@ -125,7 +117,7 @@ def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any] return fit_options - def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisResultData: + def _evaluate_quality(self, fit_data: curve.FitData) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: @@ -134,18 +126,16 @@ def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisR - less than 10 full periods, and - an error on the fit frequency lower than the fit frequency. """ - fit_freq = get_opt_value(result_data, "freq") - fit_freq_err = get_opt_error(result_data, "freq") + fit_freq = fit_data.fitval("freq").value + fit_freq_err = fit_data.fitval("freq").stderr criteria = [ - result_data["reduced_chisq"] < 3, + fit_data.reduced_chisq < 3, 1.0 / 4.0 < fit_freq < 10.0, (fit_freq_err is None or (fit_freq_err < fit_freq)), ] if all(criteria): - result_data["quality"] = "good" - else: - result_data["quality"] = "bad" + return "good" - return result_data + return "bad" diff --git a/qiskit_experiments/library/calibration/rabi.py b/qiskit_experiments/library/calibration/rabi.py index 4b28195306..e78347ebce 100644 --- a/qiskit_experiments/library/calibration/rabi.py +++ b/qiskit_experiments/library/calibration/rabi.py @@ -23,6 +23,7 @@ from qiskit.providers.options import Options from qiskit_experiments.framework import BaseExperiment +from qiskit_experiments.curve_analysis import ParameterRepr from qiskit_experiments.library.calibration.analysis.oscillation_analysis import OscillationAnalysis from qiskit_experiments.exceptions import CalibrationError @@ -94,6 +95,7 @@ def _default_experiment_options(cls) -> Options: def _default_analysis_options(cls) -> Options: """Default analysis options.""" options = super()._default_analysis_options() + options.result_parameters = [ParameterRepr("freq", "rabi_rate")] options.normalization = True return options @@ -243,6 +245,14 @@ def _default_experiment_options(cls) -> Options: return options + @classmethod + def _default_analysis_options(cls) -> Options: + """Default analysis options.""" + options = super()._default_analysis_options() + options.result_parameters = [ParameterRepr("freq", "rabi_rate_12")] + + return options + def _default_gate_schedule(self, backend: Optional[Backend] = None): """Create the default schedule for the EFRabi gate with a frequency shift to the 1-2 transition.""" diff --git a/qiskit_experiments/library/characterization/ef_spectroscopy.py b/qiskit_experiments/library/characterization/ef_spectroscopy.py index 47134c14c1..c0baa9fad5 100644 --- a/qiskit_experiments/library/characterization/ef_spectroscopy.py +++ b/qiskit_experiments/library/characterization/ef_spectroscopy.py @@ -14,7 +14,9 @@ from qiskit import QuantumCircuit from qiskit.circuit import Gate +from qiskit.providers.options import Options +from qiskit_experiments.curve_analysis import ParameterRepr from qiskit_experiments.library.characterization.qubit_spectroscopy import QubitSpectroscopy @@ -33,6 +35,14 @@ class EFSpectroscopy(QubitSpectroscopy): """ + @classmethod + def _default_analysis_options(cls) -> Options: + """Default analysis options.""" + options = super()._default_analysis_options() + options.result_parameters = [ParameterRepr("freq", "f12", "Hz")] + + return options + def _template_circuit(self, freq_param) -> QuantumCircuit: """Return the template quantum circuit.""" circuit = QuantumCircuit(1) diff --git a/qiskit_experiments/library/characterization/qubit_spectroscopy.py b/qiskit_experiments/library/characterization/qubit_spectroscopy.py index 0eea0d4aa4..aff5377ea1 100644 --- a/qiskit_experiments/library/characterization/qubit_spectroscopy.py +++ b/qiskit_experiments/library/characterization/qubit_spectroscopy.py @@ -15,16 +15,17 @@ from typing import List, Optional, Tuple, Union import numpy as np +import qiskit.pulse as pulse from qiskit import QuantumCircuit from qiskit.circuit import Gate, Parameter from qiskit.exceptions import QiskitError from qiskit.providers import Backend -import qiskit.pulse as pulse -from qiskit.utils import apply_prefix from qiskit.providers.options import Options from qiskit.qobj.utils import MeasLevel +from qiskit.utils import apply_prefix from qiskit_experiments.framework import BaseExperiment +from qiskit_experiments.curve_analysis import ParameterRepr from qiskit_experiments.library.characterization.resonance_analysis import ResonanceAnalysis @@ -71,6 +72,7 @@ def _default_experiment_options(cls) -> Options: def _default_analysis_options(cls) -> Options: """Default analysis options.""" options = super()._default_analysis_options() + options.result_parameters = [ParameterRepr("freq", "f01", "Hz")] options.normalization = True return options diff --git a/qiskit_experiments/library/characterization/resonance_analysis.py b/qiskit_experiments/library/characterization/resonance_analysis.py index 4e7421620c..e7e7836594 100644 --- a/qiskit_experiments/library/characterization/resonance_analysis.py +++ b/qiskit_experiments/library/characterization/resonance_analysis.py @@ -15,19 +15,10 @@ from typing import Any, Dict, List, Union import numpy as np +import qiskit_experiments.curve_analysis as curve -from qiskit_experiments.curve_analysis import ( - CurveAnalysis, - CurveAnalysisResultData, - SeriesDef, - fit_function, - guess, - get_opt_value, - get_opt_error, -) - -class ResonanceAnalysis(CurveAnalysis): +class ResonanceAnalysis(curve.CurveAnalysis): r"""A class to analyze a resonance, typically seen as a peak. Overview @@ -66,8 +57,8 @@ class ResonanceAnalysis(CurveAnalysis): """ __series__ = [ - SeriesDef( - fit_func=lambda x, a, sigma, freq, b: fit_function.gaussian( + curve.SeriesDef( + fit_func=lambda x, a, sigma, freq, b: curve.fit_function.gaussian( x, amp=a, sigma=sigma, x0=freq, baseline=b ), plot_color="blue", @@ -84,7 +75,7 @@ def _default_options(cls): default_options = super()._default_options() default_options.p0 = {"a": None, "sigma": None, "freq": None, "b": None} default_options.bounds = {"a": None, "sigma": None, "freq": None, "b": None} - default_options.fit_reports = {"freq": "frequency"} + default_options.reporting_parameters = {"freq": ("frequency", "Hz")} default_options.normalization = True return default_options @@ -96,13 +87,15 @@ def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any] curve_data = self._data() - b_guess = guess.constant_spectral_offset(curve_data.y) + b_guess = curve.guess.constant_spectral_offset(curve_data.y) y_ = curve_data.y - b_guess - _, peak_idx = guess.max_height(y_, absolute=True) + _, peak_idx = curve.guess.max_height(y_, absolute=True) a_guess = curve_data.y[peak_idx] - b_guess f_guess = curve_data.x[peak_idx] - s_guess = guess.full_width_half_max(curve_data.x, y_, peak_idx) / np.sqrt(8 * np.log(2)) + s_guess = curve.guess.full_width_half_max(curve_data.x, y_, peak_idx) / np.sqrt( + 8 * np.log(2) + ) max_abs_y = np.max(np.abs(curve_data.y)) @@ -124,7 +117,7 @@ def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any] return fit_option - def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisResultData: + def _evaluate_quality(self, fit_data: curve.FitData) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: @@ -143,11 +136,11 @@ def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisR min_freq = np.min(curve_data.x) freq_increment = np.mean(np.diff(curve_data.x)) - fit_a = get_opt_value(result_data, "a") - fit_b = get_opt_value(result_data, "b") - fit_freq = get_opt_value(result_data, "freq") - fit_sigma = get_opt_value(result_data, "sigma") - fit_sigma_err = get_opt_error(result_data, "sigma") + fit_a = fit_data.fitval("a").value + fit_b = fit_data.fitval("b").value + fit_freq = fit_data.fitval("freq").value + fit_sigma = fit_data.fitval("sigma").value + fit_sigma_err = fit_data.fitval("sigma").stderr snr = abs(fit_a) / np.sqrt(abs(np.median(curve_data.y) - fit_b)) fit_width_ratio = fit_sigma / (max_freq - min_freq) @@ -156,14 +149,12 @@ def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisR min_freq <= fit_freq <= max_freq, 1.5 * freq_increment < fit_sigma, fit_width_ratio < 0.25, - result_data["reduced_chisq"] < 3, + fit_data.reduced_chisq < 3, (fit_sigma_err is None or fit_sigma_err < fit_sigma), snr > 2, ] if all(criteria): - result_data["quality"] = "good" - else: - result_data["quality"] = "bad" + return "good" - return result_data + return "bad" diff --git a/qiskit_experiments/library/characterization/t1_analysis.py b/qiskit_experiments/library/characterization/t1_analysis.py index 68f2345a5a..d55dd47918 100644 --- a/qiskit_experiments/library/characterization/t1_analysis.py +++ b/qiskit_experiments/library/characterization/t1_analysis.py @@ -15,6 +15,7 @@ from typing import Tuple, List +import dataclasses import numpy as np from qiskit.utils import apply_prefix @@ -111,6 +112,7 @@ def fit_fun(x, a, tau, c): init = {"a": amplitude_guess, "tau": t1_guess, "c": offset_guess} fit_result = curve_fit(fit_fun, xdata, ydata, init, sigma=sigma) + fit_result = dataclasses.asdict(fit_result) fit_result["circuit_unit"] = unit if unit == "dt": fit_result["dt"] = conversion_factor diff --git a/qiskit_experiments/library/characterization/t2ramsey_analysis.py b/qiskit_experiments/library/characterization/t2ramsey_analysis.py index 07eff0bb5b..0546c532f7 100644 --- a/qiskit_experiments/library/characterization/t2ramsey_analysis.py +++ b/qiskit_experiments/library/characterization/t2ramsey_analysis.py @@ -14,6 +14,7 @@ """ from typing import List, Optional, Tuple, Dict +import dataclasses import numpy as np from qiskit.utils import apply_prefix @@ -131,6 +132,7 @@ def _format_plot(ax, unit, fit_result, conversion_factor): fit_result = curve_fit( osc_fit_fun, xdata, ydata, p0=list(p0.values()), sigma=sigma, bounds=bounds ) + fit_result = dataclasses.asdict(fit_result) fit_result["circuit_unit"] = unit if unit == "dt": fit_result["dt"] = conversion_factor diff --git a/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py b/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py index 2fe01aa16b..cfcd9fdeef 100644 --- a/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py +++ b/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py @@ -17,13 +17,7 @@ import numpy as np from qiskit_experiments.framework import AnalysisResultData, FitVal -from qiskit_experiments.curve_analysis import ( - SeriesDef, - CurveAnalysisResultData, - fit_function, - get_opt_value, - get_opt_error, -) +import qiskit_experiments.curve_analysis as curve from .rb_analysis import RBAnalysis @@ -99,9 +93,9 @@ class InterleavedRBAnalysis(RBAnalysis): """ __series__ = [ - SeriesDef( + curve.SeriesDef( name="Standard", - fit_func=lambda x, a, alpha, alpha_c, b: fit_function.exponential_decay( + fit_func=lambda x, a, alpha, alpha_c, b: curve.fit_function.exponential_decay( x, amp=a, lamb=-1.0, base=alpha, baseline=b ), filter_kwargs={"interleaved": False}, @@ -109,9 +103,9 @@ class InterleavedRBAnalysis(RBAnalysis): plot_symbol=".", plot_fit_uncertainty=True, ), - SeriesDef( + curve.SeriesDef( name="Interleaved", - fit_func=lambda x, a, alpha, alpha_c, b: fit_function.exponential_decay( + fit_func=lambda x, a, alpha, alpha_c, b: curve.fit_function.exponential_decay( x, amp=a, lamb=-1.0, base=alpha * alpha_c, baseline=b ), filter_kwargs={"interleaved": True}, @@ -136,8 +130,7 @@ def _default_options(cls): "alpha_c": (0.0, 1.0), "b": (0.0, 1.0), } - default_options.fit_reports = {"alpha": "\u03B1", "alpha_c": "\u03B1$_c$", "EPC": "EPC"} - + default_options.result_parameters = ["alpha", "alpha_c"] return default_options def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any]]]: @@ -171,71 +164,36 @@ def _setup_fitting(self, **options) -> Union[Dict[str, Any], List[Dict[str, Any] return fit_option - def _post_analysis(self, result_data: CurveAnalysisResultData) -> CurveAnalysisResultData: + def _extra_database_entry(self, fit_data: curve.FitData) -> List[AnalysisResultData]: """Calculate EPC.""" - # Add EPC data nrb = 2 ** self._num_qubits scale = (nrb - 1) / nrb - alpha = get_opt_value(result_data, "alpha") - alpha_c = get_opt_value(result_data, "alpha_c") - alpha_c_err = get_opt_error(result_data, "alpha_c") + + alpha = fit_data.fitval("alpha") + alpha_c = fit_data.fitval("alpha_c") # Calculate epc_est (=r_c^est) - Eq. (4): - epc_est = scale * (1 - alpha_c) - epc_est_err = scale * alpha_c_err - result_data["EPC"] = epc_est - result_data["EPC_err"] = epc_est_err + epc = FitVal(value=scale * (1 - alpha_c.value), stderr=scale * alpha_c.stderr) # Calculate the systematic error bounds - Eq. (5): - systematic_err_1 = scale * (abs(alpha - alpha_c) + (1 - alpha)) + systematic_err_1 = scale * (abs(alpha.value - alpha_c.value) + (1 - alpha.value)) systematic_err_2 = ( - 2 * (nrb * nrb - 1) * (1 - alpha) / (alpha * nrb * nrb) - + 4 * (np.sqrt(1 - alpha)) * (np.sqrt(nrb * nrb - 1)) / alpha + 2 * (nrb * nrb - 1) * (1 - alpha.value) / (alpha.value * nrb * nrb) + + 4 * (np.sqrt(1 - alpha.value)) * (np.sqrt(nrb * nrb - 1)) / alpha.value ) systematic_err = min(systematic_err_1, systematic_err_2) - systematic_err_l = epc_est - systematic_err - systematic_err_r = epc_est + systematic_err - result_data["EPC_systematic_err"] = systematic_err - result_data["EPC_systematic_bounds"] = [max(systematic_err_l, 0), systematic_err_r] - - return result_data - - def _run_analysis(self, experiment_data, **options): - """Run analysis on circuit data.""" - - analysis_results, figures = super()._run_analysis(experiment_data, **options) - - # Manual formatting for analysis result - # This sort of post-processing should be refactored into CurveAnalysis - # so that it works with the AnalysisResult dataclasses - curve_result = analysis_results[0] - chisq = curve_result.chisq - quality = curve_result.quality - result_data = curve_result.extra - - alpha = FitVal(get_opt_value(result_data, "alpha"), get_opt_error(result_data, "alpha")) - analysis_results.append(AnalysisResultData("alpha", alpha, chisq=chisq, quality=quality)) - - alpha_c = FitVal( - get_opt_value(result_data, "alpha_c"), get_opt_error(result_data, "alpha_c") - ) - analysis_results.append( - AnalysisResultData("alpha_c", alpha_c, chisq=chisq, quality=quality) + systematic_err_l = epc.value - systematic_err + systematic_err_r = epc.value + systematic_err + + extra_data = AnalysisResultData( + name="EPC", + value=epc, + chisq=fit_data.reduced_chisq, + quality=self._evaluate_quality(fit_data), + extra={ + "EPC_systematic_err": systematic_err, + "EPC_systematic_bounds": [max(systematic_err_l, 0), systematic_err_r], + }, ) - if "EPC" in result_data: - extra = {} - for key in ["EPC_systematic_err", "EPC__systematic_bounds"]: - if key in result_data: - extra[key] = result_data[key] - epc = FitVal(result_data["EPC"], result_data.get("EPC_err")) - analysis_results.append( - AnalysisResultData( - "EPC", - epc, - chisq=chisq, - quality=quality, - extra=extra, - ) - ) - return analysis_results, figures + return [extra_data] diff --git a/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py b/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py index dbdf5385e3..102ef6964a 100644 --- a/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py +++ b/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py @@ -79,7 +79,7 @@ def _default_options(cls): default_options.bounds = {"a": (0.0, 1.0), "alpha": (0.0, 1.0), "b": (0.0, 1.0)} default_options.xlabel = "Clifford Length" default_options.ylabel = "P(0)" - default_options.fit_reports = {"alpha": "\u03B1", "EPC": "EPC"} + default_options.result_parameters = ["alpha"] default_options.error_dict = None default_options.epg_1_qubit = None default_options.gate_error_ratio = None @@ -146,88 +146,71 @@ def _format_data(self, data: curve.CurveData) -> curve.CurveData: data_index=mean_data_index, ) - def _post_analysis( - self, result_data: curve.CurveAnalysisResultData - ) -> curve.CurveAnalysisResultData: + def _extra_database_entry(self, fit_data: curve.FitData) -> List[AnalysisResultData]: """Calculate EPC.""" - alpha = curve.get_opt_value(result_data, "alpha") - alpha_err = curve.get_opt_error(result_data, "alpha") + extra_entries = [] + # Calculate EPC + alpha = fit_data.fitval("alpha") scale = (2 ** self._num_qubits - 1) / (2 ** self._num_qubits) - result_data["EPC"] = scale * (1 - alpha) - result_data["EPC_err"] = scale * alpha_err / alpha + epc = FitVal(value=scale * (1 - alpha.value), stderr=scale * alpha.stderr / alpha.value) + extra_entries.append( + AnalysisResultData( + name="EPC", + value=epc, + chisq=fit_data.reduced_chisq, + quality=self._evaluate_quality(fit_data), + ) + ) - # Add EPG data - gate_error_ratio = self._get_option("gate_error_ratio") - if gate_error_ratio is None: + # Calculate EPG + if not self._get_option("gate_error_ratio"): # we attempt to get the ratio from the backend properties - gate_error_ratio = self._get_option("error_dict") + if not self._get_option("error_dict"): + gate_error_ratio = RBUtils.get_error_dict_from_backend( + backend=self._backend, qubits=self._physical_qubits + ) + else: + gate_error_ratio = self._get_option("error_dict") + else: + gate_error_ratio = self._get_option("gate_error_ratio") + count_ops = [] for meta in self._data(label="raw_data").metadata: count_ops += meta.get("count_ops", []) + if len(count_ops) > 0 and gate_error_ratio is not None: gates_per_clifford = RBUtils.gates_per_clifford(count_ops) num_qubits = len(self._physical_qubits) - if num_qubits in [1, 2]: - if num_qubits == 1: - epg = RBUtils.calculate_1q_epg( - result_data["EPC"], - self._physical_qubits, - gate_error_ratio, - gates_per_clifford, - ) - elif self._num_qubits == 2: - epg_1_qubit = self._get_option("epg_1_qubit") - epg = RBUtils.calculate_2q_epg( - result_data["EPC"], - self._physical_qubits, - gate_error_ratio, - gates_per_clifford, - epg_1_qubit=epg_1_qubit, - ) - result_data["EPG"] = epg - return result_data - - def _run_analysis(self, experiment_data, **options): - """Run analysis on circuit data.""" - error_dict = options["error_dict"] - qubits = experiment_data.metadata()["physical_qubits"] - if not error_dict: - options["error_dict"] = RBUtils.get_error_dict_from_backend( - experiment_data.backend, qubits - ) - analysis_results, figures = super()._run_analysis(experiment_data, **options) - - # Manual formatting for analysis result - # This sort of post-processing should be refactored into CurveAnalysis - # so that it works with the AnalysisResult dataclasses - curve_result = analysis_results[0] - chisq = curve_result.chisq - quality = curve_result.quality - result_data = curve_result.extra - - alpha = FitVal( - curve.get_opt_value(result_data, "alpha"), curve.get_opt_error(result_data, "alpha") - ) - analysis_results.append(AnalysisResultData("alpha", alpha, chisq=chisq, quality=quality)) - if "EPC" in result_data: - analysis_results.append( - AnalysisResultData( - "EPC", - FitVal(result_data["EPC"], result_data.get("EPC_err")), - chisq=chisq, - quality=quality, + + if num_qubits == 1: + epg = RBUtils.calculate_1q_epg( + epc.value, + self._physical_qubits, + gate_error_ratio, + gates_per_clifford, ) - ) - # TODO: the EPG dict should be broken up into separate - # analysis results for each gate on each qubit - if "EPG" in result_data: - analysis_results.append( - AnalysisResultData( - "EPG", - result_data["EPG"], - chisq=chisq, - quality=quality, + elif num_qubits == 2: + epg_1_qubit = self._get_option("epg_1_qubit") + epg = RBUtils.calculate_2q_epg( + epc.value, + self._physical_qubits, + gate_error_ratio, + gates_per_clifford, + epg_1_qubit=epg_1_qubit, ) - ) - return analysis_results, figures + else: + # EPG calculation is not supported for more than 3 qubits RB + epg = None + + if epg: + extra_entries.append( + AnalysisResultData( + "EPG", + value=epg, + chisq=fit_data.reduced_chisq, + quality=self._evaluate_quality(fit_data), + ) + ) + + return extra_entries diff --git a/test/calibration/experiments/test_drag.py b/test/calibration/experiments/test_drag.py index cbfc910122..4eb58303b6 100644 --- a/test/calibration/experiments/test_drag.py +++ b/test/calibration/experiments/test_drag.py @@ -55,10 +55,9 @@ def test_end_to_end(self): drag.set_experiment_options(rp=self.x_plus, rm=self.x_minus) expdata = drag.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra + result = expdata.analysis_results(1) - self.assertTrue(abs(result_data["popt"][4] - backend.ideal_beta) < test_tol) + self.assertTrue(abs(result.value.value - backend.ideal_beta) < test_tol) self.assertEqual(result.quality, "good") # Small leakage will make the curves very flat. @@ -70,13 +69,12 @@ def test_end_to_end(self): drag.set_run_options(meas_level=MeasLevel.KERNELED) exp_data = drag.run(backend) exp_data.block_for_results() - result = exp_data.analysis_results(0) - result_data = result.extra + result = exp_data.analysis_results(1) meas_level = exp_data.metadata()["job_metadata"][-1]["run_options"]["meas_level"] self.assertEqual(meas_level, MeasLevel.KERNELED) - self.assertTrue(abs(result_data["popt"][4] - backend.ideal_beta) < test_tol) + self.assertTrue(abs(result.value.value - backend.ideal_beta) < test_tol) self.assertEqual(result.quality, "good") # Large leakage will make the curves oscillate quickly. @@ -89,13 +87,12 @@ def test_end_to_end(self): drag.set_experiment_options(rp=self.x_plus, rm=self.x_minus) exp_data = drag.run(backend) exp_data.block_for_results() - result = exp_data.analysis_results(0) - result_data = result.extra + result = exp_data.analysis_results(1) meas_level = exp_data.metadata()["job_metadata"][-1]["run_options"]["meas_level"] self.assertEqual(meas_level, MeasLevel.CLASSIFIED) - self.assertTrue(abs(result_data["popt"][4] - backend.ideal_beta) < test_tol) + self.assertTrue(abs(result.value.value - backend.ideal_beta) < test_tol) self.assertEqual(result.quality, "good") diff --git a/test/calibration/experiments/test_fine_amplitude.py b/test/calibration/experiments/test_fine_amplitude.py index 36610b0717..100be9550e 100644 --- a/test/calibration/experiments/test_fine_amplitude.py +++ b/test/calibration/experiments/test_fine_amplitude.py @@ -48,10 +48,8 @@ def test_end_to_end_under_rotation(self): expdata = amp_cal.run(backend) expdata.block_for_results() - result = expdata.analysis_results(-1) - result_data = result.extra - - d_theta = result_data["popt"][result_data["popt_keys"].index("d_theta")] + result = expdata.analysis_results(1) + d_theta = result.value.value tol = 0.04 @@ -71,10 +69,8 @@ def test_end_to_end_over_rotation(self): expdata = amp_cal.run(backend) expdata.block_for_results() - result = expdata.analysis_results(-1) - result_data = result.extra - - d_theta = result_data["popt"][result_data["popt_keys"].index("d_theta")] + result = expdata.analysis_results(1) + d_theta = result.value.value tol = 0.04 diff --git a/test/calibration/experiments/test_rabi.py b/test/calibration/experiments/test_rabi.py index 86698aba64..bfb9dd66e5 100644 --- a/test/calibration/experiments/test_rabi.py +++ b/test/calibration/experiments/test_rabi.py @@ -108,9 +108,9 @@ def test_wrong_processor(self): rabi.set_run_options(shots=2) data = rabi.run(backend) data.block_for_results() - result = data.analysis_results(0) + result = data.analysis_results() - self.assertTrue(f"The input key {fail_key} was not found" in result.extra["error_message"]) + self.assertEqual(len(result), 0) class TestEFRabi(QiskitTestCase): @@ -131,11 +131,10 @@ def test_ef_rabi_end_to_end(self): rabi.set_experiment_options(amplitudes=np.linspace(-0.95, 0.95, 21)) expdata = rabi.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra + result = expdata.analysis_results(1) self.assertEqual(result.quality, "good") - self.assertTrue(abs(result_data["popt"][1] - backend.rabi_rate) < test_tol) + self.assertTrue(abs(result.value.value - backend.rabi_rate) < test_tol) def test_ef_rabi_circuit(self): """Test the EFRabi experiment end to end.""" diff --git a/test/calibration/test_update_library.py b/test/calibration/test_update_library.py index c1df2ba186..12305aaf2b 100644 --- a/test/calibration/test_update_library.py +++ b/test/calibration/test_update_library.py @@ -27,7 +27,6 @@ from qiskit_experiments.exceptions import CalibrationError from qiskit_experiments.calibration_management.update_library import Frequency, Amplitude, Drag from qiskit_experiments.calibration_management.backend_calibrations import BackendCalibrations -from qiskit_experiments.curve_analysis import get_opt_value from qiskit_experiments.test.mock_iq_backend import DragBackend, MockFineAmp @@ -79,8 +78,8 @@ def test_amplitude(self): self.assertEqual(len(self.cals.parameters_table()), 4) # Now check the corresponding schedules - result = exp_data.analysis_results(-1).extra - rate = 2 * np.pi * result["popt"][1] + result = exp_data.analysis_results(1) + rate = 2 * np.pi * result.value.value amp = np.round(np.pi / rate, decimals=8) with pulse.build(name="xp") as expected: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(self.qubit)) @@ -142,19 +141,17 @@ def test_frequency(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) exp_data = spec.run(backend) exp_data.block_for_results() - result = exp_data.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = exp_data.analysis_results(1) + value = result.value.value self.assertTrue(freq01 + peak_offset - 2e6 < value < freq01 + peak_offset + 2e6) self.assertEqual(result.quality, "good") # Test the integration with the BackendCalibrations cals = BackendCalibrations(FakeAthens()) - self.assertNotEqual(cals.get_qubit_frequencies()[qubit], result_data["popt"][2]) + self.assertNotEqual(cals.get_qubit_frequencies()[qubit], value) Frequency.update(cals, exp_data) - self.assertEqual(cals.get_qubit_frequencies()[qubit], result_data["popt"][2]) + self.assertEqual(cals.get_qubit_frequencies()[qubit], value) class TestDragUpdate(QiskitTestCase): @@ -198,11 +195,10 @@ def test_drag(self): exp_data = drag.run(backend) exp_data.block_for_results() - result = exp_data.analysis_results(0) - result_data = result.extra + result = exp_data.analysis_results(1) # Test the fit for good measure. - self.assertTrue(abs(result_data["popt"][4] - backend.ideal_beta) < test_tol) + self.assertTrue(abs(result.value.value - backend.ideal_beta) < test_tol) self.assertEqual(result.quality, "good") # Check schedules pre-update @@ -212,5 +208,5 @@ def test_drag(self): Drag.update(cals, exp_data, parameter="β", schedule="xp") # Check schedules post-update - expected = x_plus.assign_parameters({beta: result_data["popt"][4], chan: 1}, inplace=False) + expected = x_plus.assign_parameters({beta: result.value.value, chan: 1}, inplace=False) self.assertEqual(cals.get_schedule("xp", qubit), expected) diff --git a/test/curve_analysis/test_curve_fit.py b/test/curve_analysis/test_curve_fit.py index 02aa37bfa5..f2396e9159 100644 --- a/test/curve_analysis/test_curve_fit.py +++ b/test/curve_analysis/test_curve_fit.py @@ -22,7 +22,7 @@ from qiskit.qobj.utils import MeasLevel from qiskit_experiments.framework import ExperimentData -from qiskit_experiments.curve_analysis import CurveAnalysis, SeriesDef, fit_function +from qiskit_experiments.curve_analysis import CurveAnalysis, SeriesDef, fit_function, ParameterRepr from qiskit_experiments.curve_analysis.data_processing import probability from qiskit_experiments.exceptions import AnalysisError @@ -297,18 +297,23 @@ def test_run_single_curve_analysis(self): ) default_opts = analysis._default_options() default_opts.p0 = {"p0": ref_p0, "p1": ref_p1, "p2": ref_p2, "p3": ref_p3} + default_opts.result_parameters = [ParameterRepr("p1", "parameter_name", "unit")] results, _ = analysis._run_analysis(test_data, **default_opts.__dict__) result = results[0] - extra = result.extra ref_popt = np.asarray([ref_p0, ref_p1, ref_p2, ref_p3]) # check result data - np.testing.assert_array_almost_equal(extra["popt"], ref_popt, decimal=self.err_decimal) - self.assertEqual(extra["dof"], 46) - self.assertListEqual(extra["xrange"], [0.1, 1.0]) - self.assertListEqual(extra["popt_keys"], ["p0", "p1", "p2", "p3"]) + np.testing.assert_array_almost_equal(result.value.value, ref_popt, decimal=self.err_decimal) + self.assertEqual(result.extra["dof"], 46) + self.assertListEqual(result.extra["popt_keys"], ["p0", "p1", "p2", "p3"]) + + # special entry formatted for database + result = results[1] + self.assertEqual(result.name, "parameter_name") + self.assertEqual(result.value.unit, "unit") + self.assertAlmostEqual(result.value.value, ref_p1, places=self.err_decimal) def test_run_single_curve_fail(self): """Test analysis returns status when it fails.""" @@ -339,10 +344,10 @@ def test_run_single_curve_fail(self): # Try to fit with infeasible parameter boundary. This should fail. results, _ = analysis._run_analysis(test_data, **default_opts.__dict__) - extra = results[0].extra - ref_result_keys = ["error_message", "raw_data", "success"] - self.assertSetEqual(set(extra.keys()), set(ref_result_keys)) - self.assertFalse(extra["success"]) + + # This returns only data point entry + self.assertEqual(len(results), 1) + self.assertEqual(results[0].name, "@Data_TestAnalysis") def test_run_two_curves_with_same_fitfunc(self): """Test analysis for two curves. Curves shares fit model.""" @@ -392,12 +397,12 @@ def test_run_two_curves_with_same_fitfunc(self): default_opts.p0 = {"p0": ref_p0, "p1": ref_p1, "p2": ref_p2, "p3": ref_p3, "p4": ref_p4} results, _ = analysis._run_analysis(test_data0, **default_opts.__dict__) - result = results[0].extra + result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p2, ref_p3, ref_p4]) # check result data - np.testing.assert_array_almost_equal(result["popt"], ref_popt, decimal=self.err_decimal) + np.testing.assert_array_almost_equal(result.value.value, ref_popt, decimal=self.err_decimal) def test_run_two_curves_with_two_fitfuncs(self): """Test analysis for two curves. Curves shares fit parameters.""" @@ -446,12 +451,12 @@ def test_run_two_curves_with_two_fitfuncs(self): default_opts.p0 = {"p0": ref_p0, "p1": ref_p1, "p2": ref_p2, "p3": ref_p3} results, _ = analysis._run_analysis(test_data0, **default_opts.__dict__) - result = results[0].extra + result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p2, ref_p3]) # check result data - np.testing.assert_array_almost_equal(result["popt"], ref_popt, decimal=self.err_decimal) + np.testing.assert_array_almost_equal(result.value.value, ref_popt, decimal=self.err_decimal) def test_run_fixed_parameters(self): """Test analysis when some of parameters are fixed.""" @@ -483,12 +488,12 @@ def test_run_fixed_parameters(self): default_opts.fixed_p2 = ref_p2 results, _ = analysis._run_analysis(test_data, **default_opts.__dict__) - result = results[0].extra + result = results[0] ref_popt = np.asarray([ref_p0, ref_p1, ref_p3]) # check result data - np.testing.assert_array_almost_equal(result["popt"], ref_popt, decimal=self.err_decimal) + np.testing.assert_array_almost_equal(result.value.value, ref_popt, decimal=self.err_decimal) def test_fixed_param_is_missing(self): """Test raising an analysis error when fixed parameter is missing.""" @@ -516,8 +521,9 @@ def test_fixed_param_is_missing(self): ) default_opts = analysis._default_options() - default_opts.p0 = {"p0": ref_p0, "p1": ref_p1, "p3": ref_p3} + # do not define fixed_p2 here + default_opts.p0 = {"p0": ref_p0, "p1": ref_p1, "p3": ref_p3} with self.assertRaises(AnalysisError): analysis._run_analysis(test_data, **default_opts.__dict__) diff --git a/test/curve_analysis/test_curve_fitting.py b/test/curve_analysis/test_curve_fitting.py index 8a94544abc..5e504a3b3d 100644 --- a/test/curve_analysis/test_curve_fitting.py +++ b/test/curve_analysis/test_curve_fitting.py @@ -91,7 +91,7 @@ def test_curve_fit(self): p0 = [0.6] bounds = ([0], [2]) sol = curve_fit(self.objective0, xdata, ydata, p0, sigma=sigma, bounds=bounds) - self.assertTrue(abs(sol["popt"][0] - 0.5) < 0.05) + self.assertTrue(abs(sol.popt[0] - 0.5) < 0.05) def test_multi_curve_fit(self): """Test multi_curve_fit function""" @@ -111,7 +111,7 @@ def test_multi_curve_fit(self): sol = multi_curve_fit( [self.objective0, self.objective1], series, xdata, ydata, p0, sigma=sigma, bounds=bounds ) - self.assertTrue(abs(sol["popt"][0] - 0.5) < 0.05) + self.assertTrue(abs(sol.popt[0] - 0.5) < 0.05) def test_mean_xy_data(self): """Test mean_xy_data function""" diff --git a/test/test_qubit_spectroscopy.py b/test/test_qubit_spectroscopy.py index 0f4641d0d5..81ef75cb8b 100644 --- a/test/test_qubit_spectroscopy.py +++ b/test/test_qubit_spectroscopy.py @@ -20,7 +20,6 @@ from qiskit.test import QiskitTestCase from qiskit_experiments.library import QubitSpectroscopy, EFSpectroscopy -from qiskit_experiments.curve_analysis import get_opt_value from qiskit_experiments.test.mock_iq_backend import MockIQBackend @@ -67,10 +66,8 @@ def test_spectroscopy_end2end_classified(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) expdata = spec.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = expdata.analysis_results(1) + value = result.value.value self.assertTrue(4.999e9 < value < 5.001e9) self.assertEqual(result.quality, "good") @@ -82,10 +79,8 @@ def test_spectroscopy_end2end_classified(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) expdata = spec.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = expdata.analysis_results(1) + value = result.value.value self.assertTrue(5.0049e9 < value < 5.0051e9) self.assertEqual(result.quality, "good") @@ -101,10 +96,8 @@ def test_spectroscopy_end2end_kerneled(self): spec = QubitSpectroscopy(qubit, frequencies, unit="MHz") expdata = spec.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = expdata.analysis_results(1) + value = result.value.value self.assertTrue(freq01 - 2e6 < value < freq01 + 2e6) self.assertEqual(result.quality, "good") @@ -115,10 +108,8 @@ def test_spectroscopy_end2end_kerneled(self): spec = QubitSpectroscopy(qubit, frequencies, unit="MHz") expdata = spec.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = expdata.analysis_results(1) + value = result.value.value self.assertTrue(freq01 + 3e6 < value < freq01 + 8e6) self.assertEqual(result.quality, "good") @@ -126,10 +117,8 @@ def test_spectroscopy_end2end_kerneled(self): spec.set_run_options(meas_return="avg") expdata = spec.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = expdata.analysis_results(1) + value = result.value.value self.assertTrue(freq01 + 3e6 < value < freq01 + 8e6) self.assertEqual(result.quality, "good") @@ -148,10 +137,8 @@ def test_spectroscopy12_end2end_classified(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) expdata = spec.run(backend) expdata.block_for_results() - result = expdata.analysis_results(0) - result_data = result.extra - - value = get_opt_value(result_data, "freq") + result = expdata.analysis_results(1) + value = result.value.value self.assertTrue(freq01 - 2e6 < value < freq01 + 2e6) self.assertEqual(result.quality, "good")