-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
46 lines (38 loc) · 1.37 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
from torch import nn
from torch.optim import SGD
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.stack = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3),
nn.Conv2d(in_channels=4, out_channels=4, kernel_size=3),
nn.ReLU(),
#nn.Dropout(0.25),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Flatten(start_dim=0),
nn.Linear(26244, 3000),
nn.ReLU(),
#nn.Dropout(0.5),
nn.Linear(3000, 3),
)
self.optimizer = SGD(self.parameters(), lr=0.001)
self.criterion = nn.MSELoss()
def forward(self, x):
with torch.no_grad():
print(self.stack(x))
return F.softmax(self.stack(x), dim=-1)
def trainn(self, inp, target, verbose=True):
output = self.forward(inp)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
if verbose:
print(loss)
def save(self, file_name='models/model.pth'):
torch.save(self.state_dict(), file_name)
def load(self, file_name='models/model.pth'):
self.load_state_dict(torch.load(file_name))