Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Address several warnings raised during tests #1351

Merged
merged 19 commits into from
Jan 19, 2024
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions qiskit_experiments/library/calibration/fine_drag_cal.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,6 @@ def __init__(
auto_update=auto_update,
)

self.set_transpile_options(basis_gates=["sx", schedule_name, "rz"])

@classmethod
def _default_experiment_options(cls) -> Options:
"""Default experiment options.
Expand Down
111 changes: 60 additions & 51 deletions qiskit_experiments/test/fake_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,33 +111,38 @@ def create_experiment(
# backend - the query methods `experiment` and `experiments` are supposed to return an
# an instantiated backend object, and not only the backend name. We assume that the fake
# service works with the fake backend (class FakeBackend).
self.exps = pd.concat(
row = pd.DataFrame(
[
self.exps,
pd.DataFrame(
[
{
"experiment_type": experiment_type,
"experiment_id": experiment_id,
"parent_id": parent_id,
"backend_name": backend_name,
"metadata": metadata,
"job_ids": job_ids,
"tags": tags,
"notes": notes,
"share_level": kwargs.get("share_level", None),
"device_components": [],
"start_datetime": datetime(2022, 1, 1)
+ timedelta(hours=len(self.exps)),
"figure_names": [],
"backend": FakeBackend(backend_name=backend_name),
}
],
columns=self.exps.columns,
),
{
"experiment_type": experiment_type,
"experiment_id": experiment_id,
"parent_id": parent_id,
"backend_name": backend_name,
"metadata": metadata,
"job_ids": job_ids,
"tags": tags,
"notes": notes,
"share_level": kwargs.get("share_level", None),
"device_components": [],
"start_datetime": datetime(2022, 1, 1) + timedelta(hours=len(self.exps)),
"figure_names": [],
"backend": FakeBackend(backend_name=backend_name),
}
],
ignore_index=True,
columns=self.exps.columns,
)
if len(self.exps) > 0:
self.exps = pd.concat(
[
self.exps,
row,
],
ignore_index=True,
)
else:
# Avoid the FutureWarning on concatenating empty DataFrames
# introduced in https://github.com/pandas-dev/pandas/pull/52532
self.exps = row

return experiment_id

Expand Down Expand Up @@ -293,35 +298,39 @@ def create_analysis_result(
# `IBMExperimentService.create_analysis_result`. Since `DbExperimentData` does not set it
# via kwargs (as it does with chisq), the user cannot control the time and the service
# alone decides about it. Here we've chosen to set the start date of the experiment.
self.results = pd.concat(
row = pd.DataFrame(
[
self.results,
pd.DataFrame(
[
{
"result_data": result_data,
"result_id": result_id,
"result_type": result_type,
"device_components": device_components,
"experiment_id": experiment_id,
"quality": quality,
"verified": verified,
"tags": tags,
"backend_name": self.exps.loc[self.exps.experiment_id == experiment_id]
.iloc[0]
.backend_name,
"chisq": kwargs.get("chisq", None),
"creation_datetime": self.exps.loc[
self.exps.experiment_id == experiment_id
]
.iloc[0]
.start_datetime,
}
]
),
],
ignore_index=True,
{
"result_data": result_data,
"result_id": result_id,
"result_type": result_type,
"device_components": device_components,
"experiment_id": experiment_id,
"quality": quality,
"verified": verified,
"tags": tags,
"backend_name": self.exps.loc[self.exps.experiment_id == experiment_id]
.iloc[0]
.backend_name,
"chisq": kwargs.get("chisq", None),
"creation_datetime": self.exps.loc[self.exps.experiment_id == experiment_id]
.iloc[0]
.start_datetime,
}
]
)
if len(self.results) > 0:
self.results = pd.concat(
[
self.results,
row,
],
ignore_index=True,
)
else:
# Avoid the FutureWarning on concatenating empty DataFrames
# introduced in https://github.com/pandas-dev/pandas/pull/52532
self.results = row

# a helper method for updating the experiment's device components, see usage below
def add_new_components(expcomps):
Expand Down
2 changes: 1 addition & 1 deletion qiskit_experiments/test/pulse_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(
None,
name="PulseBackendV2",
description="A PulseBackend simulator",
online_date=datetime.datetime.utcnow(),
online_date=datetime.datetime.now(datetime.timezone.utc),
backend_version="0.0.1",
)

Expand Down
8 changes: 4 additions & 4 deletions test/calibration/test_calibrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,14 +534,14 @@ def test_default_schedules(self):
xp3 = self.cals.get_schedule("xp", (3,))

# Check that xp0 is Play(Gaussian(160, 0.15, 40), 0)
self.assertTrue(isinstance(xp0.instructions[0][1].pulse, Gaussian))
self.assertTrue(xp0.instructions[0][1].pulse.pulse_type == "Gaussian")
self.assertEqual(xp0.instructions[0][1].channel, DriveChannel(0))
self.assertEqual(xp0.instructions[0][1].pulse.amp, 0.15)
self.assertEqual(xp0.instructions[0][1].pulse.sigma, 40)
self.assertEqual(xp0.instructions[0][1].pulse.duration, 160)

# Check that xp3 is Play(Drag(160, 0.25, 40, 10), 3)
self.assertTrue(isinstance(xp3.instructions[0][1].pulse, Drag))
self.assertTrue(xp3.instructions[0][1].pulse.pulse_type == "Drag")
self.assertEqual(xp3.instructions[0][1].channel, DriveChannel(3))
self.assertEqual(xp3.instructions[0][1].pulse.amp, 0.25)
self.assertEqual(xp3.instructions[0][1].pulse.sigma, 40)
Expand Down Expand Up @@ -590,8 +590,8 @@ def test_replace_schedule(self):
# For completeness we check that schedule that comes out.
sched_cal = self.cals.get_schedule("xp", (3,))

self.assertTrue(isinstance(sched_cal.instructions[0][1].pulse, Drag))
self.assertTrue(isinstance(sched_cal.instructions[1][1].pulse, Drag))
self.assertTrue(sched_cal.instructions[0][1].pulse.pulse_type == "Drag")
self.assertTrue(sched_cal.instructions[1][1].pulse.pulse_type == "Drag")
self.assertEqual(sched_cal.instructions[0][1].pulse.amp, 0.125)
self.assertEqual(sched_cal.instructions[1][1].pulse.amp, 0.125)

Expand Down
15 changes: 8 additions & 7 deletions test/calibration/test_setup_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from qiskit_experiments.framework.json import ExperimentEncoder, ExperimentDecoder


class TestLibrary(FixedFrequencyTransmon):
class MutableTestLibrary(FixedFrequencyTransmon):
"""A subclass designed for test_hash_warn.

This class ensures that FixedFrequencyTransmon is preserved if anything goes wrong
Expand Down Expand Up @@ -197,26 +197,27 @@ def test_hash_warn(self):
4. A warning is raised since the class definition has changed.
"""

lib1 = TestLibrary()
lib1 = MutableTestLibrary()
lib_data = json.dumps(lib1, cls=ExperimentEncoder)
lib2 = json.loads(lib_data, cls=ExperimentDecoder)

self.assertTrue(self._test_library_equivalence(lib1, lib2))

# stash method build schedules to avoid other tests from failing
build_schedules = TestLibrary._build_schedules
build_schedules = MutableTestLibrary._build_schedules

def _my_build_schedules():
"""A dummy function to change the class behaviour."""
pass

# Change the schedule behaviour
TestLibrary._build_schedules = _my_build_schedules
MutableTestLibrary._build_schedules = _my_build_schedules

with self.assertWarns(UserWarning):
json.loads(lib_data, cls=ExperimentDecoder)

TestLibrary._build_schedules = build_schedules
try:
json.loads(lib_data, cls=ExperimentDecoder)
finally:
MutableTestLibrary._build_schedules = build_schedules

def _test_library_equivalence(self, lib1, lib2) -> bool:
"""Test if libraries are equivalent.
Expand Down
6 changes: 3 additions & 3 deletions test/framework/test_composite.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@
from unittest import mock
from ddt import ddt, data

from qiskit import QuantumCircuit, Aer
from qiskit import QuantumCircuit
from qiskit.result import Result

from qiskit_aer import noise
from qiskit_aer import AerSimulator, noise

from qiskit_ibm_experiment import IBMExperimentService

Expand Down Expand Up @@ -915,7 +915,7 @@ def test_batch_transpile_options_integrated(self):
(`test_batch_transpiled_circuits` takes care of it) but that it's correctly called within
the entire flow of `BaseExperiment.run`.
"""
backend = Aer.get_backend("aer_simulator")
backend = AerSimulator()
noise_model = noise.NoiseModel()
noise_model.add_all_qubit_quantum_error(noise.depolarizing_error(0.5, 2), ["cx", "swap"])

Expand Down
39 changes: 22 additions & 17 deletions test/library/quantum_volume/test_qv.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,15 @@
"""
A Tester for the Quantum Volume experiment
"""
import warnings
from test.base import QiskitExperimentsTestCase
import json
import os
from uncertainties import UFloat

from qiskit.quantum_info.operators.predicates import matrix_equal
from qiskit_aer import AerSimulator

from qiskit import Aer
from qiskit_experiments.framework import ExperimentData
from qiskit_experiments.library import QuantumVolume
from qiskit_experiments.framework import ExperimentDecoder
Expand Down Expand Up @@ -102,19 +104,21 @@ def test_qv_sigma_decreasing(self):
Test that the sigma is decreasing after adding more trials
"""
num_of_qubits = 3
backend = Aer.get_backend("aer_simulator")
backend = AerSimulator()

qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED)
# set number of trials to a low number to make the test faster
qv_exp.set_experiment_options(trials=2)
expdata1 = qv_exp.run(backend)
self.assertExperimentDone(expdata1)
result_data1 = expdata1.analysis_results(0)
expdata2 = qv_exp.run(backend, analysis=None)
self.assertExperimentDone(expdata2)
expdata2.add_data(expdata1.data())
qv_exp.analysis.run(expdata2)
result_data2 = expdata2.analysis_results(0)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Must use at least 100 trials")
qv_exp.set_experiment_options(trials=2)
expdata1 = qv_exp.run(backend)
self.assertExperimentDone(expdata1)
result_data1 = expdata1.analysis_results(0)
expdata2 = qv_exp.run(backend, analysis=None)
self.assertExperimentDone(expdata2)
expdata2.add_data(expdata1.data())
qv_exp.analysis.run(expdata2)
result_data2 = expdata2.analysis_results(0)

self.assertTrue(result_data1.extra["trials"] == 2, "number of trials is incorrect")
self.assertTrue(
Expand All @@ -139,14 +143,15 @@ def test_qv_failure_insufficient_trials(self):
insufficient_trials_data = json.load(json_file, cls=ExperimentDecoder)

num_of_qubits = 3
backend = Aer.get_backend("aer_simulator")
backend = AerSimulator()

qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED)
exp_data = ExperimentData(experiment=qv_exp, backend=backend)
exp_data.add_data(insufficient_trials_data)

qv_exp.analysis.run(exp_data)
qv_result = exp_data.analysis_results(1)
with self.assertWarns(UserWarning):
qv_exp.analysis.run(exp_data)
qv_result = exp_data.analysis_results(1)
self.assertTrue(
qv_result.extra["success"] is False and qv_result.value == 1,
"quantum volume is successful with less than 100 trials",
Expand All @@ -165,7 +170,7 @@ def test_qv_failure_insufficient_hop(self):
insufficient_hop_data = json.load(json_file, cls=ExperimentDecoder)

num_of_qubits = 4
backend = Aer.get_backend("aer_simulator")
backend = AerSimulator()

qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED)
exp_data = ExperimentData(experiment=qv_exp, backend=backend)
Expand All @@ -192,7 +197,7 @@ def test_qv_failure_insufficient_confidence(self):
insufficient_confidence_data = json.load(json_file, cls=ExperimentDecoder)

num_of_qubits = 4
backend = Aer.get_backend("aer_simulator")
backend = AerSimulator()

qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED)
exp_data = ExperimentData(experiment=qv_exp, backend=backend)
Expand All @@ -216,7 +221,7 @@ def test_qv_success(self):
successful_data = json.load(json_file, cls=ExperimentDecoder)

num_of_qubits = 4
backend = Aer.get_backend("aer_simulator")
backend = AerSimulator()

qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED)
exp_data = ExperimentData(experiment=qv_exp, backend=backend)
Expand Down
2 changes: 2 additions & 0 deletions test/library/tomography/test_process_tomography.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,8 @@ def test_qpt_conditional_meas(self):
exp.analysis.set_options()
if fitter:
exp.analysis.set_options(fitter=fitter)
if "cvxpy" in fitter:
exp.analysis.set_options(fitter_options={"eps_abs": 3e-5})
fitdata = exp.analysis.run(expdata)
states = fitdata.analysis_results("state")
for state in states:
Expand Down
2 changes: 2 additions & 0 deletions test/visualization/mock_plotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,5 +278,7 @@ def expected_supplementary_data_keys(cls) -> List[str]:
textbox_text: Text to draw in a textbox.
"""
return [
"report_text",
"supplementary_data_key",
"textbox_text",
]
5 changes: 4 additions & 1 deletion test/visualization/test_iq_plotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
Test IQ plotter.
"""

import warnings
from itertools import product
from test.base import QiskitExperimentsTestCase
from typing import Any, Dict, List, Tuple
Expand Down Expand Up @@ -126,7 +127,9 @@ def test_discriminator_trained(self, is_trained: bool):
plotter.set_supplementary_data(discriminator=discrim)

# Call figure() to generate discriminator image, if possible.
plotter.figure()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Discriminator was provided but")
plotter.figure()

# Assert that MockDiscriminator.predict() was/wasn't called, depending on whether it was trained
# or not.
Expand Down
2 changes: 1 addition & 1 deletion test/visualization/test_plotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def test_supplementary_data_end_to_end(self):

expected_supplementary_data = {
"report_text": "Lorem ipsum",
"another_data_key": 3e9,
"supplementary_data_key": 3e9,
}

plotter.set_supplementary_data(**expected_supplementary_data)
Expand Down
6 changes: 5 additions & 1 deletion test/visualization/test_plotter_mpldrawer.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,11 @@ def test_unit_scale(self, args):
def test_scale(self):
"""Test the xscale and yscale figure options."""
plotter = MockPlotter(MplDrawer(), plotting_enabled=True)
plotter.set_figure_options(xscale="quadratic", yscale="log")
plotter.set_figure_options(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Outside the scope of this PR, but we should probably handle the case of a log scale with limit 0 gracefully.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I decided to address this here (see aed5d30). I was able to find a somewhat elegant solution by moving the setting of the axis scale earlier in the process (see the commit message). The tricky part is that the code wants to know the axis limits for the part where it tries to rescale the format (like add a k and hide the 000) but if you have axes that have no data in them matplotlib returns 0,1 by default. If the code didn't do that formatting scaling, I would just change it to not set limits when no user limits were specified so matplotlib could use its default limits.

xscale="quadratic",
yscale="log",
ylim=(0.1, 1.0),
)

plotter.figure()
ax = plotter.drawer._axis
Expand Down