forked from louaaron/Score-Entropy-Discrete-Diffusion
-
Notifications
You must be signed in to change notification settings - Fork 0
/
losses.py
118 lines (87 loc) · 3.69 KB
/
losses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import graph_lib
from model import utils as mutils
def get_loss_fn(noise, graph, train, sampling_eps=1e-3, lv=False):
def loss_fn(model, batch, cond=None, t=None, perturbed_batch=None):
"""
Batch shape: [B, L] int. D given from graph
"""
if t is None:
if lv:
raise NotImplementedError("Yeah I gotta do this later")
else:
t = (1 - sampling_eps) * torch.rand(batch.shape[0], device=batch.device) + sampling_eps
sigma, dsigma = noise(t)
if perturbed_batch is None:
perturbed_batch = graph.sample_transition(batch, sigma[:, None])
log_score_fn = mutils.get_score_fn(model, train=train, sampling=False)
log_score = log_score_fn(perturbed_batch, sigma)
loss = graph.score_entropy(log_score, sigma[:, None], perturbed_batch, batch)
loss = (dsigma[:, None] * loss).sum(dim=-1)
return loss
return loss_fn
def get_optimizer(config, params):
if config.optim.optimizer == 'Adam':
optimizer = optim.Adam(params, lr=config.optim.lr, betas=(config.optim.beta1, config.optim.beta2), eps=config.optim.eps,
weight_decay=config.optim.weight_decay)
elif config.optim.optimizer == 'AdamW':
optimizer = optim.AdamW(params, lr=config.optim.lr, betas=(config.optim.beta1, config.optim.beta2), eps=config.optim.eps,
weight_decay=config.optim.weight_decay)
else:
raise NotImplementedError(
f'Optimizer {config.optim.optimizer} not supported yet!')
return optimizer
def optimization_manager(config):
"""Returns an optimize_fn based on `config`."""
def optimize_fn(optimizer,
scaler,
params,
step,
lr=config.optim.lr,
warmup=config.optim.warmup,
grad_clip=config.optim.grad_clip):
"""Optimizes with warmup and gradient clipping (disabled if negative)."""
scaler.unscale_(optimizer)
if warmup > 0:
for g in optimizer.param_groups:
g['lr'] = lr * np.minimum(step / warmup, 1.0)
if grad_clip >= 0:
torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
scaler.step(optimizer)
scaler.update()
return optimize_fn
def get_step_fn(noise, graph, train, optimize_fn, accum):
loss_fn = get_loss_fn(noise, graph, train)
accum_iter = 0
total_loss = 0
def step_fn(state, batch, cond=None):
nonlocal accum_iter
nonlocal total_loss
model = state['model']
if train:
optimizer = state['optimizer']
scaler = state['scaler']
loss = loss_fn(model, batch, cond=cond).mean() / accum
scaler.scale(loss).backward()
accum_iter += 1
total_loss += loss.detach()
if accum_iter == accum:
accum_iter = 0
state['step'] += 1
optimize_fn(optimizer, scaler, model.parameters(), step=state['step'])
state['ema'].update(model.parameters())
optimizer.zero_grad()
loss = total_loss
total_loss = 0
else:
with torch.no_grad():
ema = state['ema']
ema.store(model.parameters())
ema.copy_to(model.parameters())
loss = loss_fn(model, batch, cond=cond).mean()
ema.restore(model.parameters())
return loss
return step_fn