-
Notifications
You must be signed in to change notification settings - Fork 9
/
defaults.ini
110 lines (71 loc) · 2.04 KB
/
defaults.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
[DEFAULTS]
#name of the run
name = test-dvae
# training data directory
training_dir = /home/ubuntu/datasets/SignalTrain_LA2A_Dataset_1.1
# the batch size
batch_size = 8
# number of GPUs to use for training
num_gpus = 1
# number of nodes to use for training
num_nodes = 1
# number of CPU workers for the DataLoader
num_workers = 8
# Number of audio samples for the training input
sample_size = 65536
# Number of epochs between demos
demo_every = 20
# Number of denoising steps for the demos
demo_steps = 250
# Number of demos to create
num_demos = 16
# the random seed
seed = 42
# Batches for gradient accumulation
accum_batches = 1
# The sample rate of the audio
sample_rate = 48000
# Number of steps between checkpoints
checkpoint_every = 10000
# the EMA decay
ema_decay = 0.995
# the validation set
latent_dim = 32
# the validation set
codebook_size = 1024
# number of quantizers
num_quantizers = 0
# number of residual quantizers (same as above, depending on sfile)
num_residuals = 0
# number of heads for the memcodes
num_heads = 8
# If true training data is kept in RAM
cache_training_data = False
# number of sub-bands for the PQMF filter
pqmf_bands = 1
# randomly crop input audio? (for augmentation)
random_crop = True
# normalize input audio?
norm_inputs = False
#Number of steps before freezing encoder in dVAE
warmup_steps = 100000
# for jukebox imbeddings. 0 (high res), 1 (med), or 2 (low res)
jukebox_layer = 0
# If true, use MFCCs instead of spectrograms for training
use_mfcc = True
# checkpoint file to (re)start training from
ckpt_path = ''
# configuration model specifying model hyperparameters
model_config = ''
#the multiprocessing start method ['fork', 'forkserver', 'spawn']
start_method = 'spawn'
global_latent_dim = 128
preprocessed_dir = ''
depth=8
diffusion_kernel_size = 5
upsample_ratio = 4
patch_size = 1
# directory to save the checkpoints in
save_dir = ''
# Name of the run for automatic restarts
run_name = ''