From bc45f7cee064cabd5773f3620fdfe3ebd5bfa5f6 Mon Sep 17 00:00:00 2001 From: Chan Lee Date: Sun, 26 May 2024 20:04:48 +0900 Subject: [PATCH] GlobalAvgPool --- docs_pending.json | 6 +-- luma/neural/_layers/_pool.py | 89 ++++++++++++++++++++++++++++++- luma/neural/_models/_inception.py | 1 + 3 files changed, 91 insertions(+), 5 deletions(-) diff --git a/docs_pending.json b/docs_pending.json index f50d777..3a4cc6d 100644 --- a/docs_pending.json +++ b/docs_pending.json @@ -1,7 +1,7 @@ { "class": [ { - "name": "ConvBlock", + "name": "ConvBlockND", "script": "luma.neural.block", "inheriting": "Sequential" }, @@ -11,7 +11,7 @@ "inheriting": "Sequential" }, { - "name": "BatchNorm", + "name": "BatchNormND", "script": "luma.neural.layer", "inheriting": "Layer" }, @@ -26,7 +26,7 @@ "inheriting": "Layer" }, { - "name": "Dropout1D, 2D & 3D", + "name": "DropoutND", "script": "luma.neural.layer", "inheriting": "Layer" }, diff --git a/luma/neural/_layers/_pool.py b/luma/neural/_layers/_pool.py index 487e166..c76ceaa 100644 --- a/luma/neural/_layers/_pool.py +++ b/luma/neural/_layers/_pool.py @@ -6,7 +6,14 @@ from luma.neural.base import Layer -__all__ = ("_Pool1D", "_Pool2D", "_Pool3D") +__all__ = ( + "_Pool1D", + "_Pool2D", + "_Pool3D", + "_GlobalAvgPool1D", + "_GlobalAvgPool2D", + "_GlobalAvgPool3D", +) class _Pool1D(Layer): @@ -412,4 +419,82 @@ def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]: return (batch_size, channels, out_depth, out_height, out_width) -# TODO: Implement _GlobalAvgPool +class _GlobalAvgPool1D(Layer): + def __init__(self) -> None: + super().__init__() + + @Tensor.force_dim(3) + def forward(self, X: Tensor, is_train: bool = False) -> Tensor: + _ = is_train + self.input_ = X + out = np.mean(X, axis=2, keepdims=True) + + return out + + @Tensor.force_dim(3) + def backward(self, d_out: Tensor) -> Tensor: + X = self.input_ + _, _, width = X.shape + + dX = np.zeros_like(X) + dX += d_out / width + self.dX = dX + return self.dX + + def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]: + batch_size, channels, _ = in_shape + return (batch_size, channels, 1) + + +class _GlobalAvgPool2D(Layer): + def __init__(self) -> None: + super().__init__() + + @Tensor.force_dim(4) + def forward(self, X: Tensor, is_train: bool = False) -> Tensor: + _ = is_train + self.input_ = X + out = np.mean(X, axis=(2, 3), keepdims=True) + + return out + + @Tensor.force_dim(4) + def backward(self, d_out: Tensor) -> Tensor: + X = self.input_ + _, _, height, width = X.shape + + dX = np.zeros_like(X) + dX += d_out / (height * width) + self.dX = dX + return self.dX + + def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]: + batch_size, channels, _ = in_shape + return (batch_size, channels, 1, 1) + + +class _GlobalAvgPool3D(Layer): + def __init__(self) -> None: + super().__init__() + + @Tensor.force_dim(5) + def forward(self, X: Tensor, is_train: bool = False) -> Tensor: + _ = is_train + self.input_ = X + out = np.mean(X, axis=(2, 3, 4), keepdims=True) + + return out + + @Tensor.force_dim(5) + def backward(self, d_out: Tensor) -> Tensor: + X = self.input_ + _, _, depth, height, width = X.shape + + dX = np.zeros_like(X) + dX += d_out / (depth * height * width) + self.dX = dX + return self.dX + + def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]: + batch_size, channels, _ = in_shape + return (batch_size, channels, 1, 1, 1) diff --git a/luma/neural/_models/_inception.py b/luma/neural/_models/_inception.py index 7fce522..b42f094 100644 --- a/luma/neural/_models/_inception.py +++ b/luma/neural/_models/_inception.py @@ -199,6 +199,7 @@ def score( return super(_Inception_V1, self).score_nn(X, y, metric, argmax) +# Test code from luma.neural.optimizer import AdamOptimizer model = _Inception_V1(optimizer=AdamOptimizer())