-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtoolbox.py
115 lines (86 loc) · 3.17 KB
/
toolbox.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import numpy as np
import torch
import random
import os
from torch import Tensor
from snntorch import utils
from typing import List, Tuple, Callable
from dataclasses import dataclass
@dataclass
class Config:
batch_size: int
beta: float
threshold: float
adam_betas: Tuple[float, float]
rates: Tuple[float, float]
epochs: int
timesteps: int
learning_rate: float
data_seed: int
surrogate_gradient: Callable
model_seeds: List[int]
def and_generator(size: int) -> List[Tuple[Tensor, Tensor]]:
x = Tensor(np.random.choice([0, 1], (size, 2)))
y = Tensor([1 if i[0] and i[1] else 0 for i in x]).reshape(size, 1)
return list(zip(x, y))
def or_generator(size: int) -> List[Tuple[Tensor, Tensor]]:
x = Tensor(np.random.choice([0, 1], (size, 2)))
y = Tensor([1 if i[0] or i[1] else 0 for i in x]).reshape(size, 1)
return list(zip(x, y))
def xor_generator(size: int) -> List[Tuple[Tensor, Tensor]]:
x = np.random.choice([0, 1], (size, 2))
y = Tensor([1 if i[0] ^ i[1] else 0 for i in x]).reshape(size, 1)
x = Tensor(x)
return list(zip(x, y))
def continous_and_generator(size: int) -> List[Tuple[Tensor, Tensor]]:
x = torch.from_numpy(np.random.random(size=(size, 2)).round(2)).to(torch.float32)
y = torch.from_numpy(
np.apply_along_axis(lambda t: t[0] > 0.5 and t[1] > 0.5, 1, x).astype(int)
).to(torch.float16)
return list(zip(x, y))
def continous_or_generator(size: int) -> List[Tuple[Tensor, Tensor]]:
x = torch.from_numpy(np.random.random(size=(size, 2)).round(2)).to(torch.float32)
y = torch.from_numpy(
(~np.apply_along_axis(lambda t: t[0] < 0.5 and t[1] < 0.5, 1, x)).astype(int)
).to(torch.float16)
return list(zip(x, y))
def continous_xor_generator(size: int) -> List[Tuple[Tensor, Tensor]]:
x = torch.from_numpy(np.random.random(size=(size, 2)).round(2)).to(torch.float32)
y = torch.from_numpy(
(
~np.apply_along_axis(
lambda t: (t[0] < 0.5 and t[1] < 0.5) or t[0] > 0.5 and t[1] > 0.5, 1, x
)
).astype(int)
).to(torch.float16)
return list(zip(x, y))
def set_seed(seed: int = 42) -> None:
seed = int(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
print(f"Random seed set as {seed}")
def forward_pass(net, data, num_steps: int) -> Tuple[Tensor, np.ndarray]:
spk_rec = []
mem_hist = []
utils.reset(net)
for step in range(num_steps):
spk_out, mem_out = net(data)
mem_hist.append(mem_out.cpu().detach().numpy())
spk_rec.append(spk_out)
return torch.stack(spk_rec), np.stack(mem_hist)
def clear_print():
import os
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
def get_git_revision_hash() -> str:
import subprocess
return subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()