diff --git a/luma/__import__.py b/luma/__import__.py index 94ec0da..bd658c4 100644 --- a/luma/__import__.py +++ b/luma/__import__.py @@ -75,7 +75,7 @@ ) from luma.neural.single import PerceptronClassifier, PerceptronRegressor from luma.neural.network import MLPClassifier, MLPRegressor -from luma.neural.layer import Convolution, Pooling, Dense +from luma.neural.layer import Convolution, Pooling, Dense, Dropout, Flatten from luma.metric.classification import Accuracy, Precision, Recall, F1Score, Specificity from luma.metric.regression import ( @@ -223,7 +223,7 @@ AdamOptimizer, AdaGradOptimizer, AdaDeltaOptimizer, AdaMaxOptimizer, AdamWOptimizer, NAdamOptimizer - Convolution, Pooling, Dense + Convolution, Pooling, Dense, Dropout, Flatten # ------------------- [ luma.metric ] ---------------------- Accuracy, Precision, Recall, F1Score, Specificity diff --git a/luma/neural/layer.py b/luma/neural/layer.py index 5580dc7..dafb848 100644 --- a/luma/neural/layer.py +++ b/luma/neural/layer.py @@ -27,6 +27,7 @@ class Convolution(Layer): (`valid` for no padding, `same` for typical 0-padding) `activation` : Type of activation function `optimizer` : Optimizer for weight update (default `SGDOptimizer`) + `lambda_` : L2-regularization strength `random_state` : Seed for various random sampling processes Notes @@ -46,6 +47,7 @@ def __init__( padding: Literal["valid", "same"] = "same", activation: ActivationUtil.FuncType = "relu", optimizer: Optimizer = SGDOptimizer(), + lambda_: float = 0.1, random_state: int = None, ) -> None: super().__init__() @@ -55,6 +57,7 @@ def __init__( self.padding = padding self.activation = activation self.optimizer = optimizer + self.lambda_ = lambda_ act = ActivationUtil(self.activation) self.act_ = act.activation_type() @@ -125,6 +128,8 @@ def backward(self, d_out: Tensor) -> Tuple[Tensor, Tensor, Tensor]: filter_d_out_fft, s=(padded_height, padded_width) )[pad_h : pad_h + self.size, pad_w : pad_w + self.size] + self.dW += 2 * self.lambda_ * self.weights_ + for i in range(batch_size): for c in range(channels): temp = np.zeros(