forked from Harmonai-org/sample-generator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
defaults.ini
65 lines (43 loc) · 1.19 KB
/
defaults.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
[DEFAULTS]
#name of the run
name = dd-finetune
# training data directory
training_dir = ''
# the batch size
batch_size = 8
# number of GPUs to use for training
num_gpus = 1
# number of nodes to use for training
num_nodes = 1
# number of CPU workers for the DataLoader
num_workers = 2
# Number of audio samples for the training input
sample_size = 65536
# Number of steps between demos
demo_every = 1000
# Number of denoising steps for the demos
demo_steps = 250
# Number of demos to create
num_demos = 16
# the EMA decay
ema_decay = 0.995
# the random seed
seed = 42
# Batches for gradient accumulation
accum_batches = 4
# The sample rate of the audio
sample_rate = 48000
# Number of steps between checkpoints
checkpoint_every = 10000
# unused, required by the model code
latent_dim = 0
# If true training data is kept in RAM
cache_training_data = False
# randomly crop input audio? (for augmentation)
random_crop = True
# checkpoint file to (re)start training from
ckpt_path = ''
# Path to output the model checkpoints
save_path = ''
#the multiprocessing start method ['fork', 'forkserver', 'spawn']
start_method = 'spawn'