Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sgd shots #39

Merged
merged 10 commits into from
Jun 1, 2024
75 changes: 47 additions & 28 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json
import logging
import pathlib
from typing import Optional
from functools import partial

import numpy as np

Expand All @@ -18,12 +18,13 @@
# boostvqe's
from boostvqe.ansatze import build_circuit
from boostvqe.plotscripts import plot_gradients, plot_loss
from boostvqe.shotnoise import loss_shots
from boostvqe.training_utils import vqe_loss
from boostvqe.utils import (
DBI_D_MATRIX,
DBI_ENERGIES,
DBI_FLUCTUATIONS,
DBI_STEPS,
DELTA,
FLUCTUATION_FILE,
GRADS_FILE,
HAMILTONIAN_FILE,
Expand All @@ -36,7 +37,6 @@
results_dump,
rotate_h_with_vqe,
train_vqe,
vqe_loss,
)

DEFAULT_DELTA = 0.5
Expand All @@ -49,37 +49,32 @@ def main(args):
"""VQE training."""
# set backend and number of classical threads

accuracy = args.accuracy

if accuracy == 0.0:
accuracy = None

if args.platform is not None:
qibo.set_backend(backend=args.backend, platform=args.platform)
else:
qibo.set_backend(backend=args.backend)
args.platform = GlobalBackend().platform

qibo.set_threads(args.nthreads)
if args.optimizer_options is None:
opt_options = {}
else:
opt_options = json.loads(args.optimizer_options)

# setup the results folder
logging.info("Set VQE")
path = pathlib.Path(create_folder(generate_path(args)))

ham = getattr(hamiltonians, args.hamiltonian)(nqubits=args.nqubits)
target_energy = float(min(ham.eigenvalues()))
circ0 = build_circuit(nqubits=args.nqubits, nlayers=args.nlayers)
target_energy = np.real(np.min(np.asarray(ham.eigenvalues())))
circ0 = build_circuit(
nqubits=args.nqubits,
nlayers=args.nlayers,
)
circ = circ0.copy(deep=True)
backend = ham.backend
zero_state = backend.zero_state(args.nqubits)

# build hamiltonian and variational quantum circuit
if args.shot_train:
loss = lambda params, circ, _ham: loss_shots(
params, circ, _ham, delta=DEFAULT_DELTA, nshots=args.nshots
)
else:
loss = vqe_loss
loss = partial(vqe_loss, delta=DELTA, nshots=args.nshots)

# fix numpy seed to ensure replicability of the experiment
np.random.seed(int(args.seed))
Expand All @@ -90,7 +85,7 @@ def main(args):
# dbi lists
boost_energies, boost_fluctuations_dbi, boost_steps, boost_d_matrix = {}, {}, {}, {}
# hamiltonian history
hamiltonians_history = []
fun_eval, hamiltonians_history = [], []
hamiltonians_history.append(ham.matrix)
new_hamiltonian = ham
args.nboost += 1
Expand All @@ -115,13 +110,17 @@ def main(args):
niterations=args.boost_frequency,
nmessage=1,
loss=loss,
accuracy=accuracy,
training_options=opt_options,
)
# append results to global lists
params_history[b] = np.array(partial_params_history)
loss_history[b] = np.array(partial_loss_history)
grads_history[b] = np.array(partial_grads_history)
fluctuations[b] = np.array(partial_fluctuations)
# this works with scipy.optimize.minimize only
if args.optimizer not in ["sgd", "cma"]:
fun_eval.append(int(partial_results[2].nfev))

# build new hamiltonian using trained VQE
if b != args.nboost - 1:
new_hamiltonian_matrix = rotate_h_with_vqe(hamiltonian=ham, vqe=vqe)
Expand Down Expand Up @@ -176,16 +175,34 @@ def main(args):
initial_parameters = np.zeros(len(initial_parameters))
circ.set_parameters(initial_parameters)

# reduce the learning rate after DBI has been applied
if "learning_rate" in opt_options:
opt_options["learning_rate"] *= args.decay_rate_lr

best_loss = min(np.min(array) for array in loss_history.values())

opt_results = partial_results[2]
# save final results
output_dict = vars(args)
output_dict.update(
{
"true_ground_energy": target_energy,
"accuracy": args.accuracy,
"feval": list(fun_eval),
"energy": float(vqe.hamiltonian.expectation(zero_state)),
"fluctuations": float(vqe.hamiltonian.energy_fluctuation(zero_state)),
"reached_accuracy": float(np.abs(target_energy - best_loss)),
}
)
# this works only with scipy.optimize.minimize
if args.optimizer not in ["sgd", "cma"]:
output_dict.update(
{
"best_loss": float(opt_results.fun),
"success": bool(opt_results.success),
"message": opt_results.message,
"feval": list(fun_eval),
}
)
np.savez(
path / LOSS_FILE,
**{json.dumps(key): np.array(value) for key, value in loss_history.items()},
Expand Down Expand Up @@ -243,11 +260,19 @@ def main(args):
parser.add_argument(
"--optimizer", default="Powell", type=str, help="Optimizer used by VQE"
)
parser.add_argument(
"--optimizer_options",
type=str,
help="Options to customize the optimizer training",
)
parser.add_argument(
"--tol", default=TOL, type=float, help="Absolute precision to stop VQE training"
)
parser.add_argument(
"--accuracy", default=0.0, type=float, help="Threshold accuracy"
"--decay_rate_lr",
default=1.0,
type=float,
help="Decay factor of the learning rate if sgd is used",
)
parser.add_argument(
"--nqubits", default=6, type=int, help="Number of qubits for Hamiltonian / VQE"
Expand Down Expand Up @@ -303,15 +328,9 @@ def main(args):
default=SEED,
help="Random seed",
)
parser.add_argument(
"--shot_train",
action=argparse.BooleanOptionalAction,
help="If True the Hamiltonian expactation value is evaluate with the shots, otherwise with the state vector",
)
parser.add_argument(
"--nshots",
type=int,
default=10000,
help="number of shots",
)
args = parser.parse_args()
Expand Down
35 changes: 20 additions & 15 deletions run.sh
Original file line number Diff line number Diff line change
@@ -1,20 +1,25 @@
#!/bin/bash
#SBATCH --job-name=dbi8q1l
#SBATCH --output=dbi_8q1l.log
#SBATCH --job-name=adamch
#SBATCH --output=bp_regime.log

NQUBITS=8
NQUBITS=5
NLAYERS=1
DBI_STEPS=2
NBOOST=1
NSHOTS=10000
OPTIMIZER="BFGS"
TOL=0.00001
BOOST_FREQUENCY=10
ACC=0.1

python main.py --nqubits $NQUBITS --nlayers $NLAYERS --optimizer $OPTIMIZER \
--output_folder results/pure_vqe --backend numpy --tol $TOL \
--dbi_step $DBI_STEPS --seed 42 \
--boost_frequency $BOOST_FREQUENCY --accuracy $ACC --nboost $NBOOST
DBI_STEPS=0
NBOOST=0
BOOST_FREQUENCY=100

NSHOTS=1000
SEED=42


OPTIMIZER="sgd"
BACKEND="tensorflow"
OPTIMIZER_OPTIONS="{ \"optimizer\": \"Adam\", \"learning_rate\": 0.1, \"nmessage\": 1, \"nepochs\": $BOOST_FREQUENCY }"
DECAY_RATE_LR=0.05

python main.py --nqubits $NQUBITS --nlayers $NLAYERS --optimizer $OPTIMIZER \
--output_folder results/debugging_decay --backend $BACKEND \
--dbi_step $DBI_STEPS --seed $SEED \
--boost_frequency $BOOST_FREQUENCY --nboost $NBOOST \
--optimizer_options "$OPTIMIZER_OPTIONS" \
--decay_rate_lr $DECAY_RATE_LR
24 changes: 24 additions & 0 deletions run_sgd.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/bin/bash
#SBATCH --job-name=boostvqe
#SBATCH --output=boostvqe.log

NQUBITS=4
NLAYERS=2

DBI_STEPS=2
NBOOST=2
BOOST_FREQUENCY=10

NSHOTS=10000
TOL=1e-8
ACC=0.5

OPTIMIZER="sgd"
BACKEND="tensorflow"
OPTIMIZER_OPTIONS="{ \"optimizer\": \"Adagrad\", \"learning_rate\": 0.1, \"nmessage\": 1, \"nepochs\": $BOOST_FREQUENCY }"

python main.py --nqubits $NQUBITS --nlayers $NLAYERS --optimizer $OPTIMIZER \
--output_folder results/debugging --backend $BACKEND --tol $TOL \
--dbi_step $DBI_STEPS --seed 42 \
--boost_frequency $BOOST_FREQUENCY --nboost $NBOOST \
--optimizer_options "$OPTIMIZER_OPTIONS"
23 changes: 23 additions & 0 deletions run_sgd_exact.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/bin/bash
#SBATCH --job-name=boostvqe
#SBATCH --output=boostvqe.log

NQUBITS=7
NLAYERS=3

DBI_STEPS=0
NBOOST=0
BOOST_FREQUENCY=1000

NSHOTS=1000
TOL=1e-8

OPTIMIZER="sgd"
BACKEND="tensorflow"
OPTIMIZER_OPTIONS="{ \"optimizer\": \"Adagrad\", \"learning_rate\": 0.05, \"nmessage\": 1, \"nepochs\": $BOOST_FREQUENCY }"

python main.py --nqubits $NQUBITS --nlayers $NLAYERS --optimizer $OPTIMIZER \
--output_folder results/sgd_exact --backend $BACKEND --tol $TOL \
--dbi_step $DBI_STEPS --seed 42 \
--boost_frequency $BOOST_FREQUENCY --nboost $NBOOST \
--optimizer_options "$OPTIMIZER_OPTIONS"
23 changes: 23 additions & 0 deletions run_sgd_hybrid.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/bin/bash
#SBATCH --job-name=boostvqe
#SBATCH --output=boostvqe.log

NQUBITS=7
NLAYERS=3

DBI_STEPS=2
NBOOST=1
BOOST_FREQUENCY=100

NSHOTS=1000
TOL=1e-8

OPTIMIZER="sgd"
BACKEND="tensorflow"
OPTIMIZER_OPTIONS="{ \"optimizer\": \"Adagrad\", \"learning_rate\": 0.05, \"nmessage\": 1, \"nepochs\": $BOOST_FREQUENCY }"

python main.py --nqubits $NQUBITS --nlayers $NLAYERS --optimizer $OPTIMIZER \
--output_folder results/sgd_exact_hybrid --backend $BACKEND --tol $TOL \
--dbi_step $DBI_STEPS --seed 42 \
--boost_frequency $BOOST_FREQUENCY --nboost $NBOOST \
--optimizer_options "$OPTIMIZER_OPTIONS"
Loading