Skip to content

Commit

Permalink
GlobalAvgPool
Browse files Browse the repository at this point in the history
  • Loading branch information
ChanLumerico committed May 26, 2024
1 parent 966247a commit bc45f7c
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 5 deletions.
6 changes: 3 additions & 3 deletions docs_pending.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"class": [
{
"name": "ConvBlock",
"name": "ConvBlockND",
"script": "luma.neural.block",
"inheriting": "Sequential"
},
Expand All @@ -11,7 +11,7 @@
"inheriting": "Sequential"
},
{
"name": "BatchNorm",
"name": "BatchNormND",
"script": "luma.neural.layer",
"inheriting": "Layer"
},
Expand All @@ -26,7 +26,7 @@
"inheriting": "Layer"
},
{
"name": "Dropout1D, 2D & 3D",
"name": "DropoutND",
"script": "luma.neural.layer",
"inheriting": "Layer"
},
Expand Down
89 changes: 87 additions & 2 deletions luma/neural/_layers/_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,14 @@
from luma.neural.base import Layer


__all__ = ("_Pool1D", "_Pool2D", "_Pool3D")
__all__ = (
"_Pool1D",
"_Pool2D",
"_Pool3D",
"_GlobalAvgPool1D",
"_GlobalAvgPool2D",
"_GlobalAvgPool3D",
)


class _Pool1D(Layer):
Expand Down Expand Up @@ -412,4 +419,82 @@ def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return (batch_size, channels, out_depth, out_height, out_width)


# TODO: Implement _GlobalAvgPool
class _GlobalAvgPool1D(Layer):
def __init__(self) -> None:
super().__init__()

@Tensor.force_dim(3)
def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
_ = is_train
self.input_ = X
out = np.mean(X, axis=2, keepdims=True)

return out

@Tensor.force_dim(3)
def backward(self, d_out: Tensor) -> Tensor:
X = self.input_
_, _, width = X.shape

dX = np.zeros_like(X)
dX += d_out / width
self.dX = dX
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, channels, _ = in_shape
return (batch_size, channels, 1)


class _GlobalAvgPool2D(Layer):
def __init__(self) -> None:
super().__init__()

@Tensor.force_dim(4)
def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
_ = is_train
self.input_ = X
out = np.mean(X, axis=(2, 3), keepdims=True)

return out

@Tensor.force_dim(4)
def backward(self, d_out: Tensor) -> Tensor:
X = self.input_
_, _, height, width = X.shape

dX = np.zeros_like(X)
dX += d_out / (height * width)
self.dX = dX
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, channels, _ = in_shape
return (batch_size, channels, 1, 1)


class _GlobalAvgPool3D(Layer):
def __init__(self) -> None:
super().__init__()

@Tensor.force_dim(5)
def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
_ = is_train
self.input_ = X
out = np.mean(X, axis=(2, 3, 4), keepdims=True)

return out

@Tensor.force_dim(5)
def backward(self, d_out: Tensor) -> Tensor:
X = self.input_
_, _, depth, height, width = X.shape

dX = np.zeros_like(X)
dX += d_out / (depth * height * width)
self.dX = dX
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, channels, _ = in_shape
return (batch_size, channels, 1, 1, 1)
1 change: 1 addition & 0 deletions luma/neural/_models/_inception.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,7 @@ def score(
return super(_Inception_V1, self).score_nn(X, y, metric, argmax)


# Test code
from luma.neural.optimizer import AdamOptimizer

model = _Inception_V1(optimizer=AdamOptimizer())
Expand Down

0 comments on commit bc45f7c

Please sign in to comment.