-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.yaml
108 lines (97 loc) · 3.12 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
TCP:
host: localhost
port: 54333
buffer_size: 8388608 # 8MB
timeout: 10 # seconds
logging:
level: INFO
directory: null
formatter: '%(asctime)s | %(name)s | %(levelname)-8s | %(message)s'
plots:
posterior_map_shape: [50,50] #full
scanning:
max_points: 999 # 0 for unlimited
duration: 7200 # in seconds
train_at: [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
train_every: 0 # 0 for no training
normalize_values: training # one of init, training, always, fixed or never
fixed_normalization: [1.0, 1.0] # normalization factors for each task
merge_unique_positions: true
base_error: 0.01 # 1% error
initial_points: hexgrid_2D_13 # grid_3D_27
tasks: # list of tasks to which to reduce the data
# laplace_filter:
# function: laplace_filter
# params:
# sigma: 5
# norm: False
# roi: [[145, 190], [10, 140]]
curvature:
function: curvature
params:
bw: 5
c1: 0.001
c2: 0.001
w: 1
roi: [[145, 190], [20, 130]]
# contrast_noise_ratio:
# function: contrast_noise_ratio
# params:
# signal_roi: [[145, 190], [10, 140]]
# bg_roi: [[200,-1], [50, 100]]
mean: # name of the task found in gp/tasks.py
function: mean # name of the function
params:
roi: [[145, 190], [20, 130]]
acquisition_function:
function: acquisition_function_nd # name of the function found in gp/acquisition_functions.py
params:
a: 1 # exploration vs exploitation, i.e. variance weight
weights: null
norm: 1 # normalization factor for acquisition function
c: 0 # covariance weight
cost_function:
function: cost_per_axis # name of the function found in gp/cost_functions.py
params:
speed: [300,300] # um/s, 0 speed is infinite
weight: [100,100] # 0.01 # weight of the cost function
gp:
optimizer:
output_space_dimension: 1 # just leave it at 1
fvgp:
init_hyperparameters: [ # initial hyperparameters for the GP, should be 2 + parameter dimensions
1_000_000, # overall amplitude
# 100,
100,
100,
0.5 # lengthscale along tasks
]
compute_device: cpu # cpu or cuda:0
gp_kernel_function: null # default is Matern Kernel
gp_mean_function: null
use_inv: false
ram_economy: true # shouldnt really do anything without use_inv
training:
hyperparameter_bounds: [
[1_000_000, 1_000_000_000],
# [1, 50_000],
[10, 50_000],
[10, 50_000],
[0.001, 5000]
]
pop_size: 20 # population size for global optimization
max_iter: 2 # maximum number of iterations for global optimization
tolerance: 0.000001 # tolerance for global optimization
ask:
n: 1 # number of points to ask per iteration
bounds: null # bounds of the input space
method: global # global or local
pop_size: 20 # population size for global optimization
max_iter: 10 # maximum number of iterations for global optimization
tol: 0.000001 # tolerance for global optimization
simulator:
source_file: D:\data\ARPESdatabase\maps\SGM4\MBT_C_62 # Z006_35_0.h5 # Cleave3_52
save_dir: data
save_to_file: true
simulate_times: True
dwell_time: 1 # seconds