Skip to content

Commit

Permalink
Merge pull request #6 from ChanLumerico:resnet-18
Browse files Browse the repository at this point in the history
Resnet-18
  • Loading branch information
ChanLumerico authored Aug 11, 2024
2 parents 4244e83 + b5692bf commit 3b9bb7c
Show file tree
Hide file tree
Showing 12 changed files with 585 additions and 58 deletions.
4 changes: 4 additions & 0 deletions luma/__import__.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@
GlobalAvgPooling1D,
GlobalAvgPooling2D,
GlobalAvgPooling3D,
AdaptiveAvgPooling1D,
AdaptiveAvgPooling2D,
AdaptiveAvgPooling3D,
LpPooling1D,
LpPooling2D,
LpPooling3D,
Expand Down Expand Up @@ -304,6 +307,7 @@
Convolution1D, Convolution2D, Convolution3D,
Pooling1D, Pooling2D, Pooling3D,
GlobalAvgPooling1D, GlobalAvgPooling2D, GlobalAvgPooling3D,
AdaptiveAvgPooling1D, AdaptiveAvgPooling2D, AdaptiveAvgPooling3D,
LpPooling1D, LpPooling2D, LpPooling3D
Dropout, Dropout1D, Dropout2D, Dropout3D,
BatchNorm1D, BatchNorm2D, BatchNorm3D,
Expand Down
173 changes: 173 additions & 0 deletions luma/neural/_layers/pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
"_GlobalAvgPool1D",
"_GlobalAvgPool2D",
"_GlobalAvgPool3D",
"_AdaptiveAvgPool1D",
"_AdaptiveAvgPool2D",
"_AdaptiveAvgPool3D",
"_LpPool1D",
"_LpPool2D",
"_LpPool3D",
Expand Down Expand Up @@ -502,6 +505,176 @@ def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return (batch_size, channels, 1, 1, 1)


class _AdaptiveAvgPool1D(Layer):
def __init__(self, out_size: int | Tuple[int]) -> None:
super().__init__()
self.out_size = out_size

@Tensor.force_dim(3)
def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
_ = is_train
self.input_ = X
batch_size, channels, width = X.shape
target_width = self.out_size

out = np.zeros((batch_size, channels, target_width))

for i in range(target_width):
start = int(np.floor(i * width / target_width))
end = int(np.ceil((i + 1) * width / target_width))

out[:, :, i] = np.mean(X[:, :, start:end], axis=2)

return out

@Tensor.force_dim(3)
def backward(self, d_out: Tensor) -> Tensor:
X = self.input_
_, _, width = X.shape
target_width = self.out_size

dX = np.zeros_like(X)
for i in range(target_width):
start = int(np.floor(i * width / target_width))
end = int(np.ceil((i + 1) * width / target_width))

dX[:, :, start:end] += d_out[:, :, i][:, :, None] / (end - start)

self.dX = dX
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, channels, _ = in_shape
return (batch_size, channels, self.out_size)


class _AdaptiveAvgPool2D(Layer):
def __init__(self, out_size: Tuple[int, int]) -> None:
super().__init__()
self.out_size = out_size

@Tensor.force_dim(4)
def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
_ = is_train
self.input_ = X
batch_size, channels, height, width = X.shape
target_height, target_width = self.out_size

out = np.zeros((batch_size, channels, target_height, target_width))

for i in range(target_height):
for j in range(target_width):
h_start = int(np.floor(i * height / target_height))
h_end = int(np.ceil((i + 1) * height / target_height))
w_start = int(np.floor(j * width / target_width))
w_end = int(np.ceil((j + 1) * width / target_width))

out[:, :, i, j] = np.mean(
X[:, :, h_start:h_end, w_start:w_end], axis=(2, 3)
)

return out

@Tensor.force_dim(4)
def backward(self, d_out: Tensor) -> Tensor:
X = self.input_
_, _, height, width = X.shape
target_height, target_width = self.out_size

dX = np.zeros_like(X)
for i in range(target_height):
for j in range(target_width):
h_start = int(np.floor(i * height / target_height))
h_end = int(np.ceil((i + 1) * height / target_height))
w_start = int(np.floor(j * width / target_width))
w_end = int(np.ceil((j + 1) * width / target_width))

dX[:, :, h_start:h_end, w_start:w_end] += d_out[:, :, i, j][
:, :, None, None
] / ((h_end - h_start) * (w_end - w_start))

self.dX = dX
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, channels, _, _ = in_shape
return (batch_size, channels, *self.out_size)


class _AdaptiveAvgPool3D(Layer):
def __init__(self, out_size: Tuple[int, int, int]) -> None:
super().__init__()
self.out_size = out_size

@Tensor.force_dim(5)
def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
_ = is_train
self.input_ = X
batch_size, channels, depth, height, width = X.shape
target_depth, target_height, target_width = self.out_size

out = np.zeros(
(
batch_size,
channels,
target_depth,
target_height,
target_width,
)
)
for d in range(target_depth):
d_start = int(np.floor(d * depth / target_depth))
d_end = int(np.ceil((d + 1) * depth / target_depth))

for i in range(target_height):
h_start = int(np.floor(i * height / target_height))
h_end = int(np.ceil((i + 1) * height / target_height))

for j in range(target_width):
w_start = int(np.floor(j * width / target_width))
w_end = int(np.ceil((j + 1) * width / target_width))

out[:, :, d, i, j] = np.mean(
X[:, :, d_start:d_end, h_start:h_end, w_start:w_end],
axis=(2, 3, 4),
)

return out

@Tensor.force_dim(5)
def backward(self, d_out: Tensor) -> Tensor:
X = self.input_
_, _, depth, height, width = X.shape
target_depth, target_height, target_width = self.out_size

dX = np.zeros_like(X)

for d in range(target_depth):
d_start = int(np.floor(d * depth / target_depth))
d_end = int(np.ceil((d + 1) * depth / target_depth))

for i in range(target_height):
h_start = int(np.floor(i * height / target_height))
h_end = int(np.ceil((i + 1) * height / target_height))

for j in range(target_width):
w_start = int(np.floor(j * width / target_width))
w_end = int(np.ceil((j + 1) * width / target_width))

dX[:, :, d_start:d_end, h_start:h_end, w_start:w_end] += d_out[
:, :, d, i, j
][:, :, None, None, None] / (
(d_end - d_start) * (h_end - h_start) * (w_end - w_start)
)

self.dX = dX
return self.dX

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
batch_size, channels, _, _, _ = in_shape
return (batch_size, channels, *self.out_size)


class _LpPool1D(Layer):
def __init__(
self,
Expand Down
1 change: 1 addition & 0 deletions luma/neural/_models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,4 @@
import luma.neural._models.alex as alex
import luma.neural._models.vgg as vgg
import luma.neural._models.incep as incep
import luma.neural._models.resnet as resnet
4 changes: 2 additions & 2 deletions luma/neural/_models/alex.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def build_model(self) -> None:
lambda_=self.lambda_,
random_state=self.random_state,
)

input_shape: tuple = (-1, 3, 227, 227)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -412,7 +412,7 @@ def build_model(self) -> None:
lambda_=self.lambda_,
random_state=self.random_state,
)

input_shape: tuple = (-1, 3, 227, 227)

@Tensor.force_shape(input_shape)
Expand Down
26 changes: 13 additions & 13 deletions luma/neural/_models/incep.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from luma.neural.base import NeuralModel
from luma.neural.block import (
IncepBlockArgs,
BaseBlockArgs,
IncepBlock,
IncepResBlock,
)
Expand Down Expand Up @@ -105,7 +105,7 @@ def build_model(self) -> None:
"lambda_": self.lambda_,
"random_state": self.random_state,
}
incep_args = IncepBlockArgs(
incep_args = BaseBlockArgs(
activation=self.activation,
do_batch_norm=False,
**base_args,
Expand Down Expand Up @@ -179,7 +179,7 @@ def build_model(self) -> None:

self.model += Flatten()
self.model += Dense(1024, self.out_features, **base_args)

input_shape: tuple = (-1, 3, 224, 224)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -273,7 +273,7 @@ def build_model(self) -> None:
"lambda_": self.lambda_,
"random_state": self.random_state,
}
incep_args = IncepBlockArgs(
incep_args = BaseBlockArgs(
activation=self.activation,
do_batch_norm=False,
**base_args,
Expand Down Expand Up @@ -385,7 +385,7 @@ def build_model(self) -> None:
Dropout(self.dropout_rate, self.random_state),
Dense(2048, self.out_features, **base_args),
)

input_shape: tuple = (-1, 3, 299, 299)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -482,7 +482,7 @@ def build_model(self) -> None:
"lambda_": self.lambda_,
"random_state": self.random_state,
}
incep_args = IncepBlockArgs(
incep_args = BaseBlockArgs(
activation=self.activation,
do_batch_norm=True,
**base_args,
Expand Down Expand Up @@ -600,7 +600,7 @@ def build_model(self) -> None:
Dropout(self.dropout_rate, self.random_state),
Dense(2048, self.out_features, **base_args),
)

input_shape: tuple = (-1, 3, 299, 299)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -688,7 +688,7 @@ def __init__(
self.build_model()

def build_model(self) -> None:
incep_args = IncepBlockArgs(
incep_args = BaseBlockArgs(
activation=self.activation,
initializer=self.initializer,
lambda_=self.lambda_,
Expand Down Expand Up @@ -726,7 +726,7 @@ def build_model(self) -> None:
Dropout(self.dropout_rate, self.random_state),
Dense(1536, self.out_features),
)

input_shape: tuple = (-1, 3, 299, 299)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -814,7 +814,7 @@ def __init__(
self.build_model()

def build_model(self) -> None:
incep_args = IncepBlockArgs(
incep_args = BaseBlockArgs(
activation=self.activation,
initializer=self.initializer,
lambda_=self.lambda_,
Expand Down Expand Up @@ -851,7 +851,7 @@ def build_model(self) -> None:
Dropout(self.dropout_rate, self.random_state),
Dense(1792, self.out_features),
)

input_shape: tuple = (-1, 3, 299, 299)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -939,7 +939,7 @@ def __init__(
self.build_model()

def build_model(self) -> None:
incep_args = IncepBlockArgs(
incep_args = BaseBlockArgs(
activation=self.activation,
initializer=self.initializer,
lambda_=self.lambda_,
Expand Down Expand Up @@ -979,7 +979,7 @@ def build_model(self) -> None:
Dropout(self.dropout_rate, self.random_state),
Dense(2272, self.out_features),
)

input_shape: tuple = (-1, 3, 299, 299)

@Tensor.force_shape(input_shape)
Expand Down
4 changes: 2 additions & 2 deletions luma/neural/_models/lenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def build_model(self) -> None:
lambda_=self.lambda_,
random_state=self.random_state,
)

input_shape: tuple = (-1, 1, 28, 28)

@Tensor.force_shape(input_shape)
Expand Down Expand Up @@ -246,7 +246,7 @@ def build_model(self) -> None:
lambda_=self.lambda_,
random_state=self.random_state,
)

input_shape: tuple = (-1, 1, 32, 32)

@Tensor.force_shape(input_shape)
Expand Down
Loading

0 comments on commit 3b9bb7c

Please sign in to comment.