Skip to content

Commit

Permalink
dropoutNd
Browse files Browse the repository at this point in the history
  • Loading branch information
ChanLumerico committed May 16, 2024
1 parent 1484a8a commit 990c31d
Show file tree
Hide file tree
Showing 7 changed files with 210 additions and 20 deletions.
10 changes: 10 additions & 0 deletions docs_pending.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,20 @@
"script": "luma.neural.layer",
"inheriting": "Layer"
},
{
"name": "Dropout2D & 3D",
"script": "luma.neural.layer",
"inheriting": "Layer"
},
{
"name": "AlexNet",
"script": "luma.neural.network",
"inheriting": ["Estimator", "Supervised", "NeuralModel"]
},
{
"name": "ZFNet",
"script": "luma.neural.network",
"inheriting": ["Estimator", "Supervised", "NeuralModel"]
}
]
}
3 changes: 0 additions & 3 deletions luma/interface/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,6 @@ class Tensor(TensorLike):
refers to an array with its dimensionality higher then 2.
"""

type Tensor_3D = Self
type Tensor_4D = Self

def __new__(cls, array_like: Any) -> Self:
if isinstance(array_like, list):
obj = Matrix(array_like)
Expand Down
12 changes: 6 additions & 6 deletions luma/neural/_layers/_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ def __init__(
stride: int = 1,
padding: Literal["valid", "same"] = "same",
initializer: InitUtil.InitStr = None,
optimizer: Optimizer = None,
optimizer: Optimizer | None = None,
lambda_: float = 0.0,
random_state: int = None,
random_state: int | None = None,
) -> None:
super().__init__()
self.in_channels = in_channels
Expand Down Expand Up @@ -158,9 +158,9 @@ def __init__(
stride: int = 1,
padding: Literal["valid", "same"] = "same",
initializer: InitUtil.InitStr = None,
optimizer: Optimizer = None,
optimizer: Optimizer | None = None,
lambda_: float = 0.0,
random_state: int = None,
random_state: int | None = None,
) -> None:
super().__init__()
self.in_channels = in_channels
Expand Down Expand Up @@ -323,9 +323,9 @@ def __init__(
stride: int = 1,
padding: Literal["valid", "same"] = "same",
initializer: InitUtil.InitStr = None,
optimizer: Optimizer = None,
optimizer: Optimizer | None = None,
lambda_: float = 0.0,
random_state: int = None,
random_state: int | None = None,
) -> None:
super().__init__()
self.in_channels = in_channels
Expand Down
97 changes: 93 additions & 4 deletions luma/neural/_layers/_drop.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@
from luma.neural.base import Layer


__all__ = "_Dropout"
__all__ = ("_Dropout", "_Dropout1D", "_Dropout2D", "_Dropout3D")


class _Dropout(Layer):
def __init__(
self,
dropout_rate: float = 0.5,
random_state: int = None,
random_state: int | None = None,
) -> None:
super(_Dropout, self).__init__()
self.dropout_rate = dropout_rate
Expand All @@ -27,8 +27,97 @@ def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
if is_train:
self.mask_ = self.rs_.rand(*X.shape) < (1 - self.dropout_rate)
return X * self.mask_ / (1 - self.dropout_rate)
else:
return X
return X

def backward(self, d_out: Tensor) -> Tensor:
if self.mask_ is not None:
return d_out * self.mask_ / (1 - self.dropout_rate)
return d_out

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return in_shape


class _Dropout1D(Layer):
def __init__(
self,
dropout_rate: float = 0.5,
random_state: int | None = None,
) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.random_state = random_state
self.rs_ = np.random.RandomState(self.random_state)
self.mask_ = None

self.set_param_ranges({"dropout_rate": ("0,1", None)})
self.check_param_ranges()

def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
if is_train:
self.mask_ = self.rs_.rand(*X.shape[:2], 1) < (1 - self.dropout_rate)
return X * self.mask_ / (1 - self.dropout_rate)
return X

def backward(self, d_out: Tensor) -> Tensor:
if self.mask_ is not None:
return d_out * self.mask_ / (1 - self.dropout_rate)
return d_out

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return in_shape


class _Dropout2D(Layer):
def __init__(
self,
dropout_rate: float = 0.5,
random_state: int | None = None,
) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.random_state = random_state
self.rs_ = np.random.RandomState(self.random_state)
self.mask_ = None

self.set_param_ranges({"dropout_rate": ("0,1", None)})
self.check_param_ranges()

def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
if is_train:
self.mask_ = self.rs_.rand(*X.shape[:2], 1, 1) < (1 - self.dropout_rate)
return X * self.mask_ / (1 - self.dropout_rate)
return X

def backward(self, d_out: Tensor) -> Tensor:
if self.mask_ is not None:
return d_out * self.mask_ / (1 - self.dropout_rate)
return d_out

def out_shape(self, in_shape: Tuple[int]) -> Tuple[int]:
return in_shape


class _Dropout3D(Layer):
def __init__(
self,
dropout_rate: float = 0.5,
random_state: int | None = None,
) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.random_state = random_state
self.rs_ = np.random.RandomState(self.random_state)
self.mask_ = None

self.set_param_ranges({"dropout_rate": ("0,1", None)})
self.check_param_ranges()

def forward(self, X: Tensor, is_train: bool = False) -> Tensor:
if is_train:
self.mask_ = self.rs_.rand(*X.shape[:2], 1, 1, 1) < (1 - self.dropout_rate)
return X * self.mask_ / (1 - self.dropout_rate)
return X

def backward(self, d_out: Tensor) -> Tensor:
if self.mask_ is not None:
Expand Down
8 changes: 4 additions & 4 deletions luma/neural/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __init__(
pool_filter_size: int = 2,
pool_stride: int = 2,
pool_mode: Literal["max", "avg"] = "max",
random_state: int = None,
random_state: int | None = None,
) -> None:
super(ConvBlock1D, self).__init__(
Convolution1D(
Expand Down Expand Up @@ -217,7 +217,7 @@ def __init__(
pool_filter_size: int = 2,
pool_stride: int = 2,
pool_mode: Literal["max", "avg"] = "max",
random_state: int = None,
random_state: int | None = None,
) -> None:
super(ConvBlock2D, self).__init__(
Convolution2D(
Expand Down Expand Up @@ -342,7 +342,7 @@ def __init__(
pool_filter_size: int = 2,
pool_stride: int = 2,
pool_mode: Literal["max", "avg"] = "max",
random_state: int = None,
random_state: int | None = None,
) -> None:
super(ConvBlock3D, self).__init__(
Convolution3D(
Expand Down Expand Up @@ -445,7 +445,7 @@ def __init__(
momentum: float = 0.9,
do_dropout: bool = True,
dropout_rate: float = 0.5,
random_state: int = None,
random_state: int | None = None,
) -> None:
super(DenseBlock, self).__init__(
Dense(
Expand Down
98 changes: 96 additions & 2 deletions luma/neural/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
"Pooling3D",
"Dense",
"Dropout",
"Dropout1D",
"Dropout2D",
"Dropout3D",
"Flatten",
"Activation",
"BatchNorm1D",
Expand Down Expand Up @@ -416,9 +419,100 @@ class Dropout(_drop._Dropout):
Notes
-----
- During inference, dropout is typically turned off, and the layer behaves
as the identity function.
- This class applies dropout for every element in the input with any shape.
"""

def __init__(
self, dropout_rate: float = 0.5, random_state: int | None = None
) -> None:
super().__init__(dropout_rate, random_state)


class Dropout1D(_drop._Dropout1D):
"""
Dropout layer for 1-dimensional data.
Dropout is a regularization technique used during training to prevent
overfitting by randomly setting a fraction of input units to zero during
the forward pass. This helps in reducing co-adaptation of neurons and
encourages the network to learn more robust features.
Parameters
----------
`dropout_rate` : float, default=0.5
The fraction of input units to drop during training
`random_state` : int, optional, default=None
Seed for various random sampling processes
Notes
-----
- The input `X` must have the form of 3D-array(`Tensor`).
```py
X.shape = (batch_size, channels, width)
```
"""

def __init__(
self, dropout_rate: float = 0.5, random_state: int | None = None
) -> None:
super().__init__(dropout_rate, random_state)


class Dropout2D(_drop._Dropout2D):
"""
Dropout layer for 2-dimensional data.
Dropout is a regularization technique used during training to prevent
overfitting by randomly setting a fraction of input units to zero during
the forward pass. This helps in reducing co-adaptation of neurons and
encourages the network to learn more robust features.
Parameters
----------
`dropout_rate` : float, default=0.5
The fraction of input units to drop during training
`random_state` : int, optional, default=None
Seed for various random sampling processes
Notes
-----
- The input `X` must have the form of 4D-array(`Tensor`).
```py
X.shape = (batch_size, channels, height, width)
```
"""

def __init__(
self, dropout_rate: float = 0.5, random_state: int | None = None
) -> None:
super().__init__(dropout_rate, random_state)


class Dropout3D(_drop._Dropout3D):
"""
Dropout layer for 3-dimensional data.
Dropout is a regularization technique used during training to prevent
overfitting by randomly setting a fraction of input units to zero during
the forward pass. This helps in reducing co-adaptation of neurons and
encourages the network to learn more robust features.
Parameters
----------
`dropout_rate` : float, default=0.5
The fraction of input units to drop during training
`random_state` : int, optional, default=None
Seed for various random sampling processes
Notes
-----
- The input `X` must have the form of 5D-array(`Tensor`).
```py
X.shape = (batch_size, channels, depth, height, width)
```
"""

def __init__(
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

setuptools.setup(
name="luma-ml",
version="0.7.5",
version="0.8.0",
author="ChanLumerico",
author_email="greensox284@gmail.com",
description="A Comprehensive Python Module for Machine Learning and Data Science",
Expand Down

0 comments on commit 990c31d

Please sign in to comment.