-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathverify_pytorch.py
120 lines (104 loc) · 3.88 KB
/
verify_pytorch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# %%
# verification
print('--------')
print('pytorch-gpu testing...')
import torch
print('torch.cuda.is_available():' +
str(torch.cuda.is_available()))
print('--------')
if __name__ == '__main__':
# https://zhuanlan.zhihu.com/p/35434175
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
import datetime
# read data
inp = np.loadtxt("./test/input" , dtype=np.float32)
oup = np.loadtxt("./test/output", dtype=np.float32)
#inp = inp*[4,100,1,4,0.04,1]
oup = oup*500
inp = inp.astype(np.float32)
oup = oup.astype(np.float32)
# Hyper Parameters
input_size = inp.shape[1]
hidden_size = 1000
output_size = 1
num_epochs = 1000
learning_rate = 0.001
# Toy Dataset
x_train = inp
y_train = oup
# Linear Regression Model
class Net(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Net, self).__init__()
#self.fc1 = nn.Linear(input_size, hidden_size)
self.fc1 = nn.Linear(input_size, hidden_size)
self.l1 = nn.ReLU()
self.l2 = nn.Sigmoid()
self.l3 = nn.Tanh()
self.l4 = nn.ELU()
self.l5 = nn.Hardshrink()
self.ln = nn.Linear(hidden_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = self.fc1(x)
out = self.l3(out)
out = self.ln(out)
out = self.l1(out)
out = self.fc2(out)
return out
GPU_used = False
for i in range(2):
model = Net(input_size, hidden_size, output_size)
# Loss and Optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
###### GPU
start = datetime.datetime.now()
# print start time
print(f"Start time = {start.strftime("%Y-%m-%d %H:%M:%S")}")
if torch.cuda.is_available() and not GPU_used:
print("We are using GPU now!!!")
model = model.cuda()
else:
print("We are using CPU now!!!")
# Train the Model
for epoch in range(num_epochs):
# Convert numpy array to torch Variable
if torch.cuda.is_available() and not GPU_used:
inputs = Variable(torch.from_numpy(x_train).cuda())
targets = Variable(torch.from_numpy(y_train).cuda())
else:
inputs = Variable(torch.from_numpy(x_train))
targets = Variable(torch.from_numpy(y_train))
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
if (epoch+1) % 5 == 0:
print ('Epoch [%d/%d], Loss: %.8f'
%(epoch+1, num_epochs, loss.item()))
# print end time
end = datetime.datetime.now()
print(f"End time = {end.strftime("%Y-%m-%d %H:%M:%S")}")
cost = end - start
print(f"Cost time = {cost.seconds} seconds")
# Plot the graph
if torch.cuda.is_available() and not GPU_used:
predicted = model(Variable(torch.from_numpy(x_train).cuda())).data.cpu().numpy()
else:
predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
plt.plot( y_train/500, 'r-', label='Original data')
plt.plot( predicted/500,'-', label='Fitted line')
#plt.plot(y_train/500, predicted/500,'.', label='Fitted line')
# plt.legend()
# plt.show()
# Save the Model
# torch.save(model.state_dict(), 'model.pkl')
GPU_used = True