-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinvestment_euler_defaults.yaml
111 lines (106 loc) · 2.63 KB
/
investment_euler_defaults.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
trainer:
max_epochs: 100
min_epochs: 0
max_time: 00:00:15:00
precision: 32
num_sanity_val_steps: 0
logger:
class_path: pytorch_lightning.loggers.WandbLogger
init_args:
offline: true # set to true to not upload during testing
log_model: false # set to true to save the model at the end
name: null # can set name or have it automatically generated
project: symmetry_examples
group: null # can group related runs
tags:
- basic_example
callbacks:
- class_path: pytorch_lightning.callbacks.LearningRateMonitor
init_args:
logging_interval: step
log_momentum: false
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
filename: best
monitor: val_loss
verbose: false
save_last: true
save_top_k: 1
save_weights_only: true
mode: min
auto_insert_metric_name: true
- class_path: pytorch_lightning.callbacks.EarlyStopping
init_args:
monitor: val_loss
min_delta: 0.0
patience: 50
mode: min
check_finite: true
divergence_threshold: 100000 # stops if larger
stopping_threshold: 1.0e-6 # typically the binding stopping threshold
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 1.0e-3
# Scheduler currently not tuned
lr_scheduler:
class_path: torch.optim.lr_scheduler.StepLR
init_args:
step_size: 50 # number of epochs
gamma: 0.8
# lr_scheduler:
# class_path: torch.optim.lr_scheduler.ReduceLROnPlateau
# init_args:
# factor: 0.1
# mode: min
# patience: 5
model:
# Model parameters
N: 128
alpha_0: 1.0
alpha_1: 1.0
beta: 0.95
gamma: 90.0
sigma: 0.005
delta: 0.05
eta: 0.001
nu: 1.0
# Settings for output
verbose: false
hpo_objective_name: test_loss
print_metrics: false
save_metrics: false
save_test_results: false
# Settings for method
omega_quadrature_nodes: 7
normalize_shock_vector: true
reset_trajectories_frequency: 0 # in epochs, 0 to never reset
train_trajectories: 16
val_trajectories: 8
test_trajectories: 32
batch_size: 16 # set to 0 for full dataset
shuffle_training: true
T: 63
X_0_loc: 0.9
X_0_scale: 0.05
# Settings for neural networks
# rho
rho:
class_path: econ_layers.layers.FlexibleSequential
init_args:
n_in: 4
n_out: 1
layers: 4
hidden_dim: 256
hidden_bias: true
last_bias: true
# phi
phi:
class_path: econ_layers.layers.FlexibleSequential
init_args:
n_in: 1
n_out: 4
layers: 1
hidden_dim: 128
hidden_bias: false
last_bias: true