Skip to content

Commit

Permalink
add l2 to Conv.
Browse files Browse the repository at this point in the history
  • Loading branch information
ChanLumerico committed Apr 17, 2024
1 parent 439c765 commit dc9e831
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 2 deletions.
4 changes: 2 additions & 2 deletions luma/__import__.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
)
from luma.neural.single import PerceptronClassifier, PerceptronRegressor
from luma.neural.network import MLPClassifier, MLPRegressor
from luma.neural.layer import Convolution, Pooling, Dense
from luma.neural.layer import Convolution, Pooling, Dense, Dropout, Flatten

from luma.metric.classification import Accuracy, Precision, Recall, F1Score, Specificity
from luma.metric.regression import (
Expand Down Expand Up @@ -223,7 +223,7 @@
AdamOptimizer, AdaGradOptimizer, AdaDeltaOptimizer,
AdaMaxOptimizer, AdamWOptimizer, NAdamOptimizer

Convolution, Pooling, Dense
Convolution, Pooling, Dense, Dropout, Flatten

# ------------------- [ luma.metric ] ----------------------
Accuracy, Precision, Recall, F1Score, Specificity
Expand Down
5 changes: 5 additions & 0 deletions luma/neural/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class Convolution(Layer):
(`valid` for no padding, `same` for typical 0-padding)
`activation` : Type of activation function
`optimizer` : Optimizer for weight update (default `SGDOptimizer`)
`lambda_` : L2-regularization strength
`random_state` : Seed for various random sampling processes
Notes
Expand All @@ -46,6 +47,7 @@ def __init__(
padding: Literal["valid", "same"] = "same",
activation: ActivationUtil.FuncType = "relu",
optimizer: Optimizer = SGDOptimizer(),
lambda_: float = 0.1,
random_state: int = None,
) -> None:
super().__init__()
Expand All @@ -55,6 +57,7 @@ def __init__(
self.padding = padding
self.activation = activation
self.optimizer = optimizer
self.lambda_ = lambda_

act = ActivationUtil(self.activation)
self.act_ = act.activation_type()
Expand Down Expand Up @@ -125,6 +128,8 @@ def backward(self, d_out: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
filter_d_out_fft, s=(padded_height, padded_width)
)[pad_h : pad_h + self.size, pad_w : pad_w + self.size]

self.dW += 2 * self.lambda_ * self.weights_

for i in range(batch_size):
for c in range(channels):
temp = np.zeros(
Expand Down

0 comments on commit dc9e831

Please sign in to comment.