-
Notifications
You must be signed in to change notification settings - Fork 35
/
model.py
130 lines (103 loc) · 4.39 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import tensorflow as tf
class UnitNormLayer(tf.keras.layers.Layer):
'''Normalize vectors (euclidean norm) in batch to unit hypersphere.
'''
def __init__(self):
super(UnitNormLayer, self).__init__()
def call(self, input_tensor):
norm = tf.norm(input_tensor, axis=1)
return input_tensor / tf.reshape(norm, [-1, 1])
class DenseLeakyReluLayer(tf.keras.layers.Layer):
'''A dense layer followed by a LeakyRelu layer
'''
def __init__(self, n, alpha=0.3):
super(DenseLeakyReluLayer, self).__init__()
self.dense = tf.keras.layers.Dense(n, activation=None)
self.lrelu = tf.keras.layers.LeakyReLU(alpha=alpha)
def call(self, input_tensor):
x = self.dense(input_tensor)
return self.lrelu(x)
class Encoder(tf.keras.Model):
'''An encoder network, E(·), which maps an augmented image x to a representation vector, r = E(x) ∈ R^{DE}
'''
def __init__(self, normalize=True, activation='relu'):
super(Encoder, self).__init__(name='')
if activation == 'leaky_relu':
self.hidden1 = DenseLeakyReluLayer(256)
self.hidden2 = DenseLeakyReluLayer(256)
else:
self.hidden1 = tf.keras.layers.Dense(256, activation=activation)
self.hidden2 = tf.keras.layers.Dense(256, activation=activation)
self.normalize = normalize
if self.normalize:
self.norm = UnitNormLayer()
def call(self, input_tensor, training=False):
x = self.hidden1(input_tensor, training=training)
x = self.hidden2(x, training=training)
if self.normalize:
x = self.norm(x)
return x
class Projector(tf.keras.Model):
'''
A projection network, P(·), which maps the normalized representation vector r into a vector z = P(r) ∈ R^{DP}
suitable for computation of the contrastive loss.
'''
def __init__(self, n, normalize=True, activation='relu'):
super(Projector, self).__init__(name='')
if activation == 'leaky_relu':
self.dense = DenseLeakyReluLayer(256)
self.dense2 = DenseLeakyReluLayer(256)
else:
self.dense = tf.keras.layers.Dense(256, activation=activation)
self.dense2 = tf.keras.layers.Dense(256, activation=activation)
self.normalize = normalize
if self.normalize:
self.norm = UnitNormLayer()
def call(self, input_tensor, training=False):
x = self.dense(input_tensor, training=training)
x = self.dense2(x, training=training)
if self.normalize:
x = self.norm(x)
return x
class SoftmaxPred(tf.keras.Model):
'''For stage 2, simply a softmax on top of the Encoder.
'''
def __init__(self, num_classes=10):
super(SoftmaxPred, self).__init__(name='')
self.dense = tf.keras.layers.Dense(num_classes, activation='softmax')
def call(self, input_tensor, training=False):
return self.dense(input_tensor, training=training)
class MLP(tf.keras.Model):
'''A simple baseline MLP with the same architecture to Encoder + Softmax/Regression output.
'''
def __init__(self, num_classes=10, normalize=True, regress=False, activation='relu'):
super(MLP, self).__init__(name='')
if activation == 'leaky_relu':
self.hidden1 = DenseLeakyReluLayer(256)
self.hidden2 = DenseLeakyReluLayer(256)
else:
self.hidden1 = tf.keras.layers.Dense(256, activation=activation)
self.hidden2 = tf.keras.layers.Dense(256, activation=activation)
self.normalize = normalize
if self.normalize:
self.norm = UnitNormLayer()
if not regress:
self.output_layer = tf.keras.layers.Dense(
num_classes, activation='softmax')
else:
self.output_layer = tf.keras.layers.Dense(1)
def call(self, input_tensor, training=False):
x = self.hidden1(input_tensor, training=training)
x = self.hidden2(x, training=training)
if self.normalize:
x = self.norm(x)
preds = self.output_layer(x, training=training)
return preds
def get_last_hidden(self, input_tensor):
'''Get the last hidden layer before prediction.
'''
x = self.hidden1(input_tensor, training=False)
x = self.hidden2(x, training=False)
if self.normalize:
x = self.norm(x)
return x