Skip to content

Commit

Permalink
Revert "ResNet_34"
Browse files Browse the repository at this point in the history
  • Loading branch information
ChanLumerico authored Aug 11, 2024
1 parent daccdcc commit 642581b
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 262 deletions.
5 changes: 1 addition & 4 deletions luma/__import__.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,6 @@
Inception_V4,
InceptionResNet_V1,
InceptionResNet_V2,
ResNet_18,
ResNet_34,
)
from luma.neural.autoprop import LayerNode, LayerGraph

Expand Down Expand Up @@ -335,8 +333,7 @@
AlexNet, ZFNet,
VGGNet_11, VGGNet_13, VGGNet_16, VGGNet_19,
Inception_V1, Inception_V2, Inception_V3, Inception_V4,
InceptionResNet_V1, InceptionResNet_V2,
ResNet_18, ResNet_34,
InceptionResNet_V1, InceptionResNet_V2

# ------------------- [ luma.metric ] ----------------------
Accuracy, Precision, Recall, F1Score, Specificity
Expand Down
145 changes: 6 additions & 139 deletions luma/neural/_models/resnet.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import Any, Self, override, List, Optional
from dataclasses import asdict, dataclass

from luma.core.super import Estimator, Evaluator, Supervised
from luma.interface.typing import Matrix, Tensor, Vector
from luma.core.super import Estimator, Evaluator, Optimizer, Supervised
from luma.interface.typing import Matrix, Tensor, TensorLike, Vector
from luma.interface.util import InitUtil
from luma.metric.classification import Accuracy

Expand Down Expand Up @@ -107,10 +107,10 @@ def __init__(

self.feature_sizes_ = [
[3, 64],
[64, 64] * 2,
[128, 128] * 2,
[256, 256] * 2,
[512, 512] * 2,
[64, 64, 64, 64],
[128, 128, 128, 128],
[256, 256, 256, 256],
[512, 512, 512, 512],
]
self.feature_shapes_ = [
self._get_feature_shapes(sizes) for sizes in self.feature_sizes_
Expand Down Expand Up @@ -197,136 +197,3 @@ def score(
argmax: bool = True,
) -> float:
return super(_ResNet_18, self).score_nn(X, y, metric, argmax)


class _ResNet_34(Estimator, Supervised, NeuralModel):
def __init__(
self,
activation: Activation.FuncType = Activation.ReLU,
initializer: InitUtil.InitStr = None,
out_features: int = 1000,
batch_size: int = 128,
n_epochs: int = 100,
valid_size: float = 0.1,
lambda_: float = 0.0,
momentum: float = 0.9,
early_stopping: bool = False,
patience: int = 10,
shuffle: bool = True,
random_state: int | None = None,
deep_verbose: bool = False,
) -> None:
self.activation = activation
self.initializer = initializer
self.out_features = out_features
self.lambda_ = lambda_
self.momentum = momentum
self.shuffle = shuffle
self.random_state = random_state
self._fitted = False

super().__init__(
batch_size,
n_epochs,
valid_size,
early_stopping,
patience,
shuffle,
random_state,
deep_verbose,
)
super().init_model()
self.model = Sequential()

self.feature_sizes_ = [
[3, 64],
[64, 64] * 3,
[128, 128] * 4,
[256, 256] * 6,
[512, 512] * 3,
]
self.feature_shapes_ = [
self._get_feature_shapes(sizes) for sizes in self.feature_sizes_
]

self.set_param_ranges(
{
"out_features": ("0<,+inf", int),
"batch_size": ("0<,+inf", int),
"n_epochs": ("0<,+inf", int),
"valid_size": ("0<,<1", None),
"momentum": ("0,1", None),
"dropout_rate": ("0,1", None),
"lambda_": ("0,+inf", None),
"patience": ("0<,+inf", int),
}
)
self.check_param_ranges()
self.build_model()

def build_model(self) -> None:
base_args = {
"initializer": self.initializer,
"lambda_": self.lambda_,
"random_state": self.random_state,
}
res_args = BaseBlockArgs(
activation=self.activation,
do_batch_norm=True,
momentum=self.momentum,
**base_args,
)

self.model.extend(
Convolution2D(3, 64, 7, 2, 3, **base_args),
BatchNorm2D(64, self.momentum),
self.activation(),
Pooling2D(3, 2, "max", "same"),
)
self.layer_2, in_channels = _make_layer(
64, 64, BasicBlock, 3, 2, base_args, res_args
)
self.layer_3, in_channels = _make_layer(
in_channels, 128, BasicBlock, 4, 3, base_args, res_args, stride=2
)
self.layer_4, in_channels = _make_layer(
in_channels, 256, BasicBlock, 6, 4, base_args, res_args, stride=2
)
self.layer_5, in_channels = _make_layer(
in_channels, 512, BasicBlock, 3, 5, base_args, res_args, stride=2
)

self.model.extend(
self.layer_2,
self.layer_3,
self.layer_4,
self.layer_5,
deep_add=True,
)
self.model.extend(
AdaptiveAvgPooling2D((1, 1)),
Flatten(),
Dense(512 * BasicBlock.expansion, self.out_features, **base_args),
)

input_shape: tuple = (-1, 3, 224, 224)

@Tensor.force_shape(input_shape)
def fit(self, X: Tensor, y: Matrix) -> Self:
return super(_ResNet_34, self).fit_nn(X, y)

@override
@Tensor.force_shape(input_shape)
def predict(self, X: Tensor, argmax: bool = True) -> Matrix | Vector:
return super(_ResNet_34, self).predict_nn(X, argmax)

@override
@Tensor.force_shape(input_shape)
def score(
self,
X: Tensor,
y: Matrix,
metric: Evaluator = Accuracy,
argmax: bool = True,
) -> float:
return super(_ResNet_34, self).score_nn(X, y, metric, argmax)
Loading

0 comments on commit 642581b

Please sign in to comment.