Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sync with main #28

Merged
merged 21 commits into from
Apr 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ fusion_blossom.egg-info/
*.DS_Store
.vscode
.metals
.idea

# visualizer files
/*.json
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,7 @@ The first version of Python package
- [x] start writing a tutorial using `mdbook`
- [ ] publish `fusion_blossom` package to crate.io
- [ ] add `dangerous_pointer` feature that improve speed by ~20%

## 0.2.9 (2024.4.23)

- add `max_tree_size` option to implement a spectrum of decoders between UF and MWPM
72 changes: 58 additions & 14 deletions benchmark/util.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
import json, subprocess, os, sys, tempfile, math, scipy
import json
import subprocess
import os
import sys
import tempfile
import math
import scipy


class Profile:
"""
read profile given filename; if provided `skip_begin_profiles`, then it will skip such number of profiles in the beginning,
by default to 5 because usually the first few profiles are not stable yet
"""

def __init__(self, filename, skip_begin_profiles=20):
assert isinstance(filename, str)
with open(filename, "r", encoding="utf8") as f:
Expand All @@ -27,70 +34,88 @@ def __init__(self, filename, skip_begin_profiles=20):
skipped += 1
else:
self.entries.append(value)

def __repr__(self):
return f"Profile {{ partition_config: {self.partition_config}, entries: [...{len(self.entries)}] }}"

def sum_decoding_time(self):
decoding_time = 0
for entry in self.entries:
decoding_time += entry["events"]["decoded"]
return decoding_time

def decoding_time_relative_dev(self):
dev_sum = 0
avr_decoding_time = self.average_decoding_time()
for entry in self.entries:
dev_sum += (entry["events"]["decoded"] - avr_decoding_time) ** 2
return math.sqrt(dev_sum / len(self.entries)) / avr_decoding_time

def average_decoding_time(self):
return self.sum_decoding_time() / len(self.entries)

def sum_defect_num(self):
defect_num = 0
for entry in self.entries:
defect_num += entry["defect_num"]
return defect_num

def average_decoding_time_per_defect(self):
return self.sum_decoding_time() / self.sum_defect_num()

def sum_computation_cpu_seconds(self):
total_computation_cpu_seconds = 0
for entry in self.entries:
computation_cpu_seconds = 0
for event_time in entry["solver_profile"]["primal"]["event_time_vec"]:
computation_cpu_seconds += event_time["end"] - event_time["start"]
computation_cpu_seconds += event_time["end"] - \
event_time["start"]
total_computation_cpu_seconds += computation_cpu_seconds
return total_computation_cpu_seconds

def average_computation_cpu_seconds(self):
return self.sum_computation_cpu_seconds() / len(self.entries)

def sum_job_time(self, unit_index):
total_job_time = 0
for entry in self.entries:
event_time = entry["solver_profile"]["primal"]["event_time_vec"][unit_index]
total_job_time += event_time["end"] - event_time["start"]
return total_job_time

def average_job_time(self, unit_index):
return self.sum_job_time(unit_index) / len(self.entries)


class VertexRange:
def __init__(self, start, end):
self.range = (start, end)

def __repr__(self):
return f"[{self.range[0]}, {self.range[1]}]"

def length(self):
return self.range[1] - self.range[0]


class PartitionConfig:
def __init__(self, vertex_num):
self.vertex_num = vertex_num
self.partitions = [VertexRange(0, vertex_num)]
self.fusions = []
self.parents = [None]

def __repr__(self):
return f"PartitionConfig {{ vertex_num: {self.vertex_num}, partitions: {self.partitions}, fusions: {self.fusions} }}"

@staticmethod
def from_json(value):
vertex_num = value['vertex_num']
config = PartitionConfig(vertex_num)
config.partitions.clear()
for vertex_range in value['partitions']:
config.partitions.append(VertexRange(vertex_range[0], vertex_range[1]))
config.partitions.append(VertexRange(
vertex_range[0], vertex_range[1]))
for pair in value['fusions']:
config.fusions.append((pair[0], pair[1]))
assert len(config.partitions) == len(config.fusions) + 1
Expand All @@ -110,15 +135,17 @@ def from_json(value):
assert parents[unit_count - 1] is None
config.parents = parents
return config

def unit_depth(self, unit_index):
depth = 0
while self.parents[unit_index] is not None:
unit_index = self.parents[unit_index]
depth += 1
return depth

git_root_dir = subprocess.run("git rev-parse --show-toplevel", cwd=os.path.dirname(os.path.abspath(__file__))
, shell=True, check=True, capture_output=True).stdout.decode(sys.stdout.encoding).strip(" \r\n")

git_root_dir = subprocess.run("git rev-parse --show-toplevel", cwd=os.path.dirname(os.path.abspath(
__file__)), shell=True, check=True, capture_output=True).stdout.decode(sys.stdout.encoding).strip(" \r\n")
rust_dir = git_root_dir

FUSION_BLOSSOM_COMPILATION_DONE = False
Expand All @@ -127,24 +154,30 @@ def unit_depth(self, unit_index):
FUSION_BLOSSOM_ENABLE_UNSAFE_POINTER = False
if 'FUSION_BLOSSOM_ENABLE_UNSAFE_POINTER' in os.environ and os.environ["FUSION_BLOSSOM_ENABLE_UNSAFE_POINTER"] == "TRUE":
FUSION_BLOSSOM_ENABLE_UNSAFE_POINTER = True


def compile_code_if_necessary(additional_build_parameters=None):
global FUSION_BLOSSOM_COMPILATION_DONE
if FUSION_BLOSSOM_COMPILATION_DONE is False:
build_parameters = ["cargo", "build", "--release"]
if FUSION_BLOSSOM_ENABLE_UNSAFE_POINTER:
build_parameters += ["--features", "dangerous_pointer,u32_index,i32_weight,qecp_integrate"]
build_parameters += ["--features",
"dangerous_pointer,u32_index,i32_weight,qecp_integrate"]
if additional_build_parameters is not None:
build_parameters += additional_build_parameters
# print(build_parameters)
process = subprocess.Popen(build_parameters, universal_newlines=True, stdout=sys.stdout, stderr=sys.stderr, cwd=rust_dir)
process = subprocess.Popen(build_parameters, universal_newlines=True,
stdout=sys.stdout, stderr=sys.stderr, cwd=rust_dir)
process.wait()
assert process.returncode == 0, "compile has error"
FUSION_BLOSSOM_COMPILATION_DONE = True


def fusion_blossom_command():
fusion_path = os.path.join(rust_dir, "target", "release", "fusion_blossom")
return [fusion_path]


def fusion_blossom_benchmark_command(d=None, p=None, total_rounds=None, r=None, noisy_measurements=None, n=None):
assert d is not None
assert p is not None
Expand All @@ -159,22 +192,30 @@ def fusion_blossom_benchmark_command(d=None, p=None, total_rounds=None, r=None,
command += ["-n", f"{n}"]
return command

def fusion_blossom_qecp_generate_command(d, p, total_rounds, noisy_measurements):
command = fusion_blossom_command() + ["qecp", f"[{d}]", f"[{noisy_measurements}]", f"[{p}]", f"-m{total_rounds}"]

def fusion_blossom_qecp_generate_command(d, p, total_rounds, noisy_measurements, min_failed_cases=None):
if min_failed_cases is None:
min_failed_cases = total_rounds
command = fusion_blossom_command(
) + ["qecp", f"[{d}]", f"[{noisy_measurements}]", f"[{p}]", f"-m{total_rounds}", f"-e{min_failed_cases}"]
return command


def fusion_blossom_bin_command(bin):
fusion_path = os.path.join(rust_dir, "target", "release", bin)
command = [fusion_path]
return command


FUSION_BLOSSOM_ENABLE_HIGH_PRIORITY = False
if 'FUSION_BLOSSOM_ENABLE_HIGH_PRIORITY' in os.environ and os.environ["FUSION_BLOSSOM_ENABLE_HIGH_PRIORITY"] == "TRUE":
FUSION_BLOSSOM_ENABLE_HIGH_PRIORITY = True
"""
Note: usually changing the nice value will require root privilege, but rust toolchain may not be installed for root
In this case, change the default nice value for user: https://bencane.com/2013/09/30/changing-the-default-nice-value-for-a-user-or-group/
"""


def run_command_get_stdout(command, no_stdout=False, use_tmp_out=False, stderr_to_stdout=False):
compile_code_if_necessary()
env = os.environ.copy()
Expand All @@ -186,8 +227,8 @@ def run_command_get_stdout(command, no_stdout=False, use_tmp_out=False, stderr_t
stdout = out_file
if no_stdout:
stdout = sys.stdout
process = subprocess.Popen(command, universal_newlines=True, env=env, stdout=stdout, stderr=(stdout if stderr_to_stdout else sys.stderr)
, bufsize=100000000, preexec_fn=(lambda : os.nice(-10)) if FUSION_BLOSSOM_ENABLE_HIGH_PRIORITY else None)
process = subprocess.Popen(command, universal_newlines=True, env=env, stdout=stdout, stderr=(
stdout if stderr_to_stdout else sys.stderr), bufsize=100000000, preexec_fn=(lambda: os.nice(-10)) if FUSION_BLOSSOM_ENABLE_HIGH_PRIORITY else None)
stdout, _ = process.communicate()
if use_tmp_out:
out_file.flush()
Expand Down Expand Up @@ -215,8 +256,11 @@ def __init__(self, filename):
for line in lines:
line = line.strip("\r\n ")
self.data.append(line.split(" "))
def fit(self, x_column, y_column, x_func=lambda x:float(x), y_func=lambda y:float(y), starting_row=0, ending_row=None):
X = [x_func(line[x_column]) for line in self.data[starting_row:ending_row]]
Y = [y_func(line[y_column]) for line in self.data[starting_row:ending_row]]

def fit(self, x_column, y_column, x_func=lambda x: float(x), y_func=lambda y: float(y), starting_row=0, ending_row=None):
X = [x_func(line[x_column])
for line in self.data[starting_row:ending_row]]
Y = [y_func(line[y_column])
for line in self.data[starting_row:ending_row]]
slope, intercept, r, _, _ = scipy.stats.linregress(X, Y)
return slope, intercept, r
2 changes: 1 addition & 1 deletion rust-toolchain.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly"
channel = "nightly-2023-11-16"
67 changes: 67 additions & 0 deletions scripts/graph_time_partition.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
"""
Partition the graph in time domain

It takes a decoding graph and position as input.

When used with QEC-Playground outputs, it will likely be able to partition without a problem
"""

import os
import fusion_blossom as fb

# example
# generated by `cargo run --release tool benchmark '[5]' '[5]' '[0.01]' -p10 -m10 --code-type rotated-planar-code --noise-model phenomenological --decoder fusion --decoder-config '{"max_tree_size":0}' --debug-print fusion-blossom-syndrome-file`
# then see output file at `tmp/fusion.syndromes`


EXAMPLE_SYNDROME_FILE = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "graph_time_partition.syndromes")


def graph_time_partition(initializer: fb.SolverInitializer, positions: list[fb.VisualizePosition]) -> fb.PartitionConfig:
assert len(positions) > 0
partition = fb.PartitionConfig(initializer.vertex_num)
# first check if the time is monotonically increasing
last_t = positions[0].t
t_list = [last_t]
for position in positions:
assert position.t >= last_t, "t not monotonically increasing, vertex reordering must be performed before calling this"
if position.t != last_t:
t_list.append(position.t)
last_t = position.t
# pick the t value in the middle to split it
t_split = t_list[len(t_list) // 2]
# find the vertices indices
split_start_index = None
split_end_index = None
for vertex_index, position in enumerate(positions):
if split_start_index is None and position.t == t_split:
split_start_index = vertex_index
if position.t == t_split:
split_end_index = vertex_index + 1
assert split_start_index is not None
# partitions are found
partition.partitions = [
fb.VertexRange(0, split_start_index),
fb.VertexRange(split_end_index, len(positions)),
]
partition.fusions = [(0, 1)]
return partition


def read_syndrome_file(syndrome_filepath):
reader = fb.ErrorPatternReader(filename=syndrome_filepath)
return reader.get_initializer(), reader.get_positions()


def main():
# example
print(f"reading example from: {EXAMPLE_SYNDROME_FILE}")
initializer, positions = read_syndrome_file(EXAMPLE_SYNDROME_FILE)
partition = graph_time_partition(initializer, positions)
print(partition)
print(partition.to_json())


if __name__ == "__main__":
main()
Loading
Loading