Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Track gradients #15

Merged
merged 6 commits into from
Feb 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,12 @@

# boostvqe's
from ansatze import build_circuit
from plotscripts import plot_loss
from plotscripts import plot_gradients, plot_loss
from utils import (
DBI_ENERGIES,
DBI_FLUCTUATIONS,
FLUCTUATION_FILE,
GRADS_FILE,
HAMILTONIAN_FILE,
LOSS_FILE,
SEED,
Expand Down Expand Up @@ -65,7 +66,7 @@ def main(args):
initial_parameters = np.random.randn(len(circ.get_parameters()))

# vqe lists
params_history, loss_history, fluctuations = {}, {}, {}
params_history, loss_history, grads_history, fluctuations = {}, {}, {}, {}
# dbi lists
boost_energies, boost_fluctuations_dbi = {}, {}
# hamiltonian history
Expand All @@ -82,6 +83,7 @@ def main(args):
partial_results,
partial_params_history,
partial_loss_history,
partial_grads_history,
partial_fluctuations,
partial_hamiltonian_history,
vqe,
Expand All @@ -94,10 +96,10 @@ def main(args):
niterations=args.boost_frequency,
nmessage=1,
)
print(hamiltonians_history)
# append results to global lists
params_history[b] = np.array(partial_params_history)
loss_history[b] = np.array(partial_loss_history)
grads_history[b] = np.array(partial_grads_history)
fluctuations[b] = np.array(partial_fluctuations)
hamiltonians_history.extend(partial_hamiltonian_history)
# build new hamiltonian using trained VQE
Expand Down Expand Up @@ -149,6 +151,10 @@ def main(args):
path / LOSS_FILE,
**{json.dumps(key): np.array(value) for key, value in loss_history.items()},
)
np.savez(
path / GRADS_FILE,
**{json.dumps(key): np.array(value) for key, value in grads_history.items()},
)
np.savez(
path / FLUCTUATION_FILE,
**{json.dumps(key): np.array(value) for key, value in fluctuations.items()},
Expand All @@ -171,6 +177,7 @@ def main(args):
path=path,
title="Energy history",
)
plot_gradients(path=path, title="Grads history")


if __name__ == "__main__":
Expand Down
74 changes: 68 additions & 6 deletions plotscripts.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
import json
import os.path
import pathlib

import matplotlib.pyplot as plt
import numpy as np
from qibo.backends import GlobalBackend

from utils import (
DBI_ENERGIES,
DBI_FLUCTUATIONS,
FLUCTUATION_FILE,
FLUCTUATION_FILE2,
GRADS_FILE,
LOSS_FILE,
LOSS_FILE2,
OPTIMIZATION_FILE,
Expand Down Expand Up @@ -92,7 +90,7 @@ def plot_loss(
len(dbi_energies[str(i)]) + len(loss_vqe[str(i)]) + start - 1,
),
dbi_energies[str(i)],
color=GREEN,
color=RED,
lw=1.5,
label="DBI",
)
Expand All @@ -110,7 +108,7 @@ def plot_loss(
),
dbi_energies[str(i)] - dbi_fluctuations[str(i)],
dbi_energies[str(i)] + dbi_fluctuations[str(i)],
color=GREEN,
color=RED,
alpha=0.4,
)

Expand All @@ -120,11 +118,75 @@ def plot_loss(
- 2 * config["nboost"]
+ 1
)
plt.hlines(target_energy, 1, max_length, color="red", lw=1, label="Target energy")
plt.hlines(
target_energy,
1,
max_length,
color="black",
lw=1,
label="Target energy",
ls="--",
)
plt.xlabel("Iterations")
plt.ylabel("Loss")
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
if save:
plt.savefig(f"{path}/loss_{title}.pdf", bbox_inches="tight")


def plot_gradients(
path,
title="",
save=True,
width=0.5,
):
"""
Plot gradients magnitude during the training.
Each value is the average over the parameters of the absolute value of the
derivative of the loss function with respect to the parameter.
"""
grads = dict(np.load(path / f"{GRADS_FILE + '.npz'}"))
config = json.loads((path / OPTIMIZATION_FILE).read_text())

ave_grads = []

for epoch in grads:
for grads_list in grads[epoch]:
ave_grads.append(np.mean(np.abs(grads_list)))

plt.figure(figsize=(10 * width, 10 * width * 6 / 8))
plt.title(title)
plt.plot(
np.arange(1, len(ave_grads) + 1, 1),
ave_grads,
color=BLUE,
lw=1.5,
label=r"$\langle |\partial_{\theta_i}\text{L}| \rangle_i$",
)
for b in range(config["nboost"] - 1):
boost_x = config["boost_frequency"] * (b + 1)
if b == 0:
plt.plot(
(boost_x, boost_x + 1),
(ave_grads[boost_x - 1], ave_grads[boost_x]),
color=RED,
lw=1.5,
alpha=1,
label="Step after DBI",
)
else:
plt.plot(
(boost_x, boost_x + 1),
(ave_grads[boost_x - 1], ave_grads[boost_x]),
color=RED,
lw=1.6,
alpha=1,
)
plt.yscale("log")
plt.xlabel("Iterations")
plt.ylabel("Gradients magnitude")
plt.legend()
if save:
plt.savefig(f"{path}/grads_{title}.pdf", bbox_inches="tight")
2 changes: 1 addition & 1 deletion run.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#!/bin/bash

python main.py --nqubits 6 --nlayers 1 --optimizer Powell --output_folder results --backend numpy --boost_frequency 10 --nboost 2 --dbi_steps 3
python main.py --nqubits 11 --nlayers 1 --optimizer Powell --output_folder results --backend numpy --boost_frequency 10 --nboost 2 --dbi_steps 2
26 changes: 24 additions & 2 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,15 @@
import numpy as np
from qibo.models.variational import VQE

from ansatze import compute_gradients

OPTIMIZATION_FILE = "optimization_results.json"
PARAMS_FILE = "parameters_history.npy"
PLOT_FILE = "energy.pdf"
ROOT_FOLDER = "results"
FLUCTUATION_FILE = "fluctuations"
LOSS_FILE = "energies"
GRADS_FILE = "gradients"
HAMILTONIAN_FILE = "hamiltonian_matrix.npz"
FLUCTUATION_FILE2 = "fluctuations2"
LOSS_FILE2 = "energies2"
Expand Down Expand Up @@ -59,6 +62,7 @@ def loss(params, circuit, hamiltonian):
circuit.set_parameters(params)
result = hamiltonian.backend.execute_circuit(circuit)
final_state = result.state()

return hamiltonian.expectation(final_state), hamiltonian.energy_fluctuation(
final_state
)
Expand All @@ -68,7 +72,13 @@ def train_vqe(
circ, ham, optimizer, initial_parameters, tol, niterations=None, nmessage=1
):
"""Helper function which trains the VQE according to `circ` and `ham`."""
params_history, loss_list, fluctuations, hamiltonian_history = [], [], [], []
params_history, loss_list, fluctuations, hamiltonian_history, grads_history = (
[],
[],
[],
[],
[],
)
circ.set_parameters(initial_parameters)

vqe = VQE(
Expand All @@ -83,6 +93,7 @@ def callbacks(
loss_fluctuation=fluctuations,
params_history=params_history,
hamiltonian_history=hamiltonian_history,
grads_history=grads_history,
):
"""
Callback function that updates the energy, the energy fluctuations and
Expand All @@ -94,6 +105,9 @@ def callbacks(
loss_fluctuation.append(float(energy_fluctuation))
params_history.append(params)
hamiltonian_history.append(rotate_h_with_vqe(vqe.hamiltonian, vqe))
grads_history.append(
compute_gradients(parameters=params, circuit=circ, hamiltonian=ham)
)

iteration_count = len(loss_list)

Expand All @@ -118,7 +132,15 @@ def callbacks(
except StopIteration as e:
logging.info(str(e))

return results, params_history, loss_list, fluctuations, hamiltonian_history, vqe
return (
results,
params_history,
loss_list,
grads_history,
fluctuations,
hamiltonian_history,
vqe,
)


def rotate_h_with_vqe(hamiltonian, vqe):
Expand Down