Skip to content

Commit

Permalink
_InvertedRes wip
Browse files Browse the repository at this point in the history
  • Loading branch information
ChanLumerico committed Aug 24, 2024
1 parent 71f4b25 commit 8617bca
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 3 deletions.
4 changes: 4 additions & 0 deletions luma/neural/block/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
incep_v4,
incep_res_v1,
incep_res_v2,
mobile,
resnet,
standard,
xception,
Expand Down Expand Up @@ -865,3 +866,6 @@ class Exit(xception._Exit):
Output: Tensor[-1, 1024, 9, 9]
```
"""


class InvertedResBlock(mobile._InvertedRes): ...
132 changes: 132 additions & 0 deletions luma/neural/block/mobile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
from typing import Tuple, override

from luma.core.super import Optimizer
from luma.interface.typing import Tensor, TensorLike
from luma.interface.util import InitUtil

from luma.neural.layer import *
from luma.neural.autoprop import LayerNode, LayerGraph


class _InvertedRes(LayerGraph):
def __init__(
self,
in_channels: int,
out_channels: int,
stride: int = 1,
expand: int = 1,
activation: Activation.FuncType = Activation.ReLU6,
optimizer: Optimizer | None = None,
initializer: InitUtil.InitStr = None,
lambda_: float = 0.0,
do_batch_norm: bool = True,
momentum: float = 0.9,
random_state: int | None = None,
) -> None:
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.expand = expand
self.activation = activation
self.optimizer = optimizer
self.initializer = initializer
self.lambda_ = lambda_
self.do_batch_norm = do_batch_norm
self.momentum = momentum

self.basic_args = {
"initializer": initializer,
"lambda_": lambda_,
"random_state": random_state,
}

assert self.stride in [1, 2]
self.do_shortcut = stride == 1 and in_channels == out_channels
self.hid_channels = int(round(in_channels * expand))

self.init_nodes()
super(_InvertedRes, self).__init__(
graph={
self.rt_: [self.dw_pw_lin],
self.dw_pw_lin: [self.tm_],
},
root=self.rt_,
term=self.tm_,
)

if self.expand != 1:
self[self.rt_].clear()
self[self.rt_].append(self.pw_)
self.graph[self.pw] = [self.dw_pw_lin]

if self.do_shortcut:
self[self.rt_].append(self.tm_)

self.build()
if optimizer is not None:
self.set_optimizer(optimizer)

def init_nodes(self) -> None:
self.rt_ = LayerNode(Identity(), name="rt_")
self.pw_ = LayerNode(
Sequential(
Conv2D(
self.in_channels,
self.hid_channels,
1,
padding="valid",
**self.basic_args,
),
(
BatchNorm2D(self.hid_channels, self.momentum)
if self.do_batch_norm
else None
),
self.activation(),
),
name="pw_",
)
self.dw_pw_lin = LayerNode(
Sequential(
DepthConv2D(
self.hid_channels,
3,
self.stride,
padding="valid" if self.stride == 2 else "same",
**self.basic_args,
),
(
BatchNorm2D(self.hid_channels, self.momentum)
if self.do_batch_norm
else None
),
self.activation(),
Conv2D(
self.hid_channels,
self.out_channels,
1,
padding="valid",
**self.basic_args,
),
),
name="dw_pw_lin",
)
self.tm_ = LayerNode(Identity(), merge_mode="sum", name="tm_")

@Tensor.force_dim(4)
def forward(self, X: TensorLike, is_train: bool = False) -> TensorLike:
return super().forward(X, is_train)

@Tensor.force_dim(4)
def backward(self, d_out: TensorLike) -> TensorLike:
return super().backward(d_out)

@override
def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, _, height, width = in_shape
return (
batch_size,
self.out_channels,
height // self.stride,
width // self.stride,
)
19 changes: 18 additions & 1 deletion luma/neural/layer/act.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Tuple, Type
import numpy as np

from luma.interface.typing import ClassType, Tensor
from luma.interface.typing import ClassType, Tensor, TensorLike
from luma.neural.base import Layer


Expand Down Expand Up @@ -48,6 +48,23 @@ def backward(self, d_out: Tensor) -> Tensor:
def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return _Activation._out_shape(in_shape)

class ReLU6(Layer):
def __init__(self) -> None:
super().__init__()

def forward(self, X: TensorLike, is_train: bool = False) -> TensorLike:
_ = is_train
self.input_ = X
return np.minimum(np.maximum(0, X), 6)

def backward(self, d_out: TensorLike) -> TensorLike:
self.dX = d_out.copy()
self.dX[(self.input_ <= 0) | (self.input_ > 6)] = 0
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return _Activation._out_shape(in_shape)

class Sigmoid(Layer):
def __init__(self) -> None:
super().__init__()
Expand Down
3 changes: 1 addition & 2 deletions luma/neural/model/mobile.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from typing import Any, Self, override, ClassVar
from dataclasses import asdict

from luma.core.super import Estimator, Evaluator, Optimizer, Supervised
from luma.core.super import Estimator, Evaluator, Supervised
from luma.interface.typing import Matrix, Tensor, Vector
from luma.interface.util import InitUtil
from luma.metric.classification import Accuracy
Expand Down

0 comments on commit 8617bca

Please sign in to comment.