From d7689167727a1cbc8e2405835b243bd0235b9fd5 Mon Sep 17 00:00:00 2001 From: Chan Lee <150145948+ChanLumerico@users.noreply.github.com> Date: Fri, 9 Aug 2024 11:44:43 +0000 Subject: [PATCH] NeuralModel param changes --- luma/neural/_models/alex.py | 27 +---- luma/neural/_models/incep.py | 69 +++--------- luma/neural/_models/lenet.py | 37 ++---- luma/neural/_models/simple.py | 26 +---- luma/neural/_models/vgg.py | 49 ++------ luma/neural/base.py | 29 +++-- luma/neural/model.py | 207 ---------------------------------- 7 files changed, 65 insertions(+), 379 deletions(-) diff --git a/luma/neural/_models/alex.py b/luma/neural/_models/alex.py index 6268c53..b01722e 100644 --- a/luma/neural/_models/alex.py +++ b/luma/neural/_models/alex.py @@ -1,13 +1,12 @@ from typing import Self, override from dataclasses import asdict -from luma.core.super import Estimator, Evaluator, Optimizer, Supervised +from luma.core.super import Estimator, Evaluator, Supervised from luma.interface.typing import Matrix, Tensor, Vector from luma.interface.util import InitUtil from luma.metric.classification import Accuracy -from luma.neural import loss -from luma.neural.base import Loss, NeuralModel +from luma.neural.base import NeuralModel from luma.neural.block import ConvBlock2D, DenseBlock, ConvBlockArgs, DenseBlockArgs from luma.neural.layer import ( Activation, @@ -24,14 +23,11 @@ class _AlexNet(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -42,8 +38,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -55,16 +49,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 96, 256, 384, 384, 256], @@ -80,7 +73,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -237,14 +229,11 @@ def score( class _ZFNet(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -255,8 +244,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -268,16 +255,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 96, 256, 384, 384, 256], @@ -293,7 +279,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), diff --git a/luma/neural/_models/incep.py b/luma/neural/_models/incep.py index 914f7b2..a2ba3e8 100644 --- a/luma/neural/_models/incep.py +++ b/luma/neural/_models/incep.py @@ -7,8 +7,7 @@ from luma.metric.classification import Accuracy from luma.preprocessing.encoder import LabelSmoothing -from luma.neural import loss -from luma.neural.base import Loss, NeuralModel +from luma.neural.base import NeuralModel from luma.neural.block import ( IncepBlockArgs, IncepBlock, @@ -40,14 +39,11 @@ class _Inception_V1(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -58,8 +54,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -71,16 +65,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 64, 64, 192], @@ -97,7 +90,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -212,14 +204,11 @@ def score( class _Inception_V2(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -230,8 +219,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -243,16 +230,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 32, 32, 64, 64, 80, 192, 288], @@ -270,7 +256,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -424,13 +409,10 @@ class _Inception_V3(Estimator, Supervised, NeuralModel): def __init__( self, optimizer: Optimizer, - activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -441,9 +423,7 @@ def __init__( random_state: int | None = None, deep_verbose: bool = False, ) -> None: - self.activation = activation self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -456,16 +436,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 32, 32, 64, 64, 80, 192, 288], @@ -483,7 +462,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -645,14 +623,11 @@ def score( class _Inception_V4(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.8, @@ -664,8 +639,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -678,16 +651,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [] self.feature_shapes_ = [ @@ -699,7 +671,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -776,14 +747,11 @@ def score( class _InceptionRes_V1(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.8, @@ -795,8 +763,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -809,16 +775,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [] self.feature_shapes_ = [ @@ -830,7 +795,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -906,14 +870,11 @@ def score( class _InceptionRes_V2(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.8, @@ -925,8 +886,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -939,16 +898,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [] self.feature_shapes_ = [ @@ -960,7 +918,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), diff --git a/luma/neural/_models/lenet.py b/luma/neural/_models/lenet.py index c424e98..adaca1c 100644 --- a/luma/neural/_models/lenet.py +++ b/luma/neural/_models/lenet.py @@ -1,12 +1,11 @@ from typing import Self, override -from luma.core.super import Estimator, Evaluator, Optimizer, Supervised +from luma.core.super import Estimator, Evaluator, Supervised from luma.interface.typing import Matrix, Tensor, Vector from luma.interface.util import InitUtil from luma.metric.classification import Accuracy -from luma.neural import loss -from luma.neural.base import Loss, NeuralModel +from luma.neural.base import NeuralModel from luma.neural.block import ConvBlock2D, DenseBlock from luma.neural.layer import Activation, Dense, Flatten, Sequential @@ -17,14 +16,11 @@ class _LeNet_1(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.Tanh, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 10, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, early_stopping: bool = False, @@ -34,8 +30,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -46,16 +40,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [1, 4, 8], @@ -71,7 +64,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -145,14 +137,11 @@ def score( class _LeNet_4(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.Tanh, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 10, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -163,8 +152,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -176,16 +163,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [1, 4, 16], @@ -201,7 +187,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -284,14 +269,11 @@ def score( class _LeNet_5(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.Tanh, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 10, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -302,8 +284,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -315,16 +295,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [1, 6, 16], diff --git a/luma/neural/_models/simple.py b/luma/neural/_models/simple.py index 180b5e6..8819157 100644 --- a/luma/neural/_models/simple.py +++ b/luma/neural/_models/simple.py @@ -1,10 +1,10 @@ from typing import Literal, Self, override -from luma.core.super import Estimator, Evaluator, Optimizer, Supervised +from luma.core.super import Estimator, Evaluator, Supervised from luma.interface.typing import Matrix, Tensor, Vector from luma.interface.util import InitUtil -from luma.neural.base import Loss, NeuralModel +from luma.neural.base import NeuralModel from luma.neural.block import ConvBlock2D, DenseBlock from luma.neural.layer import Activation, Dense, Dropout, Flatten, Sequential @@ -20,12 +20,9 @@ def __init__( hidden_layers: list[int] | int, *, activation: Activation.FuncType, - optimizer: Optimizer, - loss: Loss, initializer: InitUtil.InitStr = None, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.001, valid_size: float = 0.1, dropout_rate: float = 0.5, lambda_: float = 0.0, @@ -40,8 +37,6 @@ def __init__( self.hidden_layers = hidden_layers self.initializer = initializer self.activation = activation - self.optimizer = optimizer - self.loss = loss self.dropout_rate = dropout_rate self.lambda_ = lambda_ self.shuffle = shuffle @@ -51,16 +46,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) if isinstance(self.hidden_layers, int): self.hidden_layers = [self.hidden_layers] @@ -78,7 +72,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -128,8 +121,6 @@ def __init__( filter_size: int, *, activation: Activation.FuncType, - optimizer: Optimizer, - loss: Loss, initializer: InitUtil.InitStr = None, padding: Literal["same", "valid"] = "same", stride: int = 1, @@ -143,7 +134,6 @@ def __init__( dropout_rate: float = 0.5, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.001, valid_size: float = 0.1, lambda_: float = 0.0, early_stopping: bool = False, @@ -158,8 +148,6 @@ def __init__( self.out_features = out_features self.filter_size = filter_size self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.padding = padding self.stride = stride @@ -179,16 +167,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) if isinstance(self.in_channels_list, int): self.in_channels_list = [self.in_channels_list] @@ -216,7 +203,6 @@ def __init__( "dropout_rate": ("0,1", None), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), diff --git a/luma/neural/_models/vgg.py b/luma/neural/_models/vgg.py index 14b47d2..e03f4a6 100644 --- a/luma/neural/_models/vgg.py +++ b/luma/neural/_models/vgg.py @@ -1,13 +1,12 @@ from typing import Self, override from dataclasses import asdict -from luma.core.super import Estimator, Evaluator, Optimizer, Supervised +from luma.core.super import Estimator, Evaluator, Supervised from luma.interface.typing import Matrix, Tensor, Vector from luma.interface.util import InitUtil from luma.metric.classification import Accuracy -from luma.neural import loss -from luma.neural.base import Loss, NeuralModel +from luma.neural.base import NeuralModel from luma.neural.block import ConvBlock2D, DenseBlock, ConvBlockArgs, DenseBlockArgs from luma.neural.layer import ( Activation, @@ -28,14 +27,11 @@ class _VGGNet_11(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -46,8 +42,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -59,16 +53,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 64, 128, 256, 256, *[512] * 4], @@ -84,7 +77,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -200,14 +192,11 @@ def score( class _VGGNet_13(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -218,8 +207,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -231,16 +218,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 64, 64, 128, 128, 256, 256, *[512] * 4], @@ -256,7 +242,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -384,14 +369,11 @@ def score( class _VGGNet_16(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -402,8 +384,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -415,16 +395,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 64, 64, 128, 128, *[256] * 3, *[512] * 6], @@ -440,7 +419,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), @@ -580,14 +558,11 @@ def score( class _VGGNet_19(Estimator, Supervised, NeuralModel): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.5, @@ -598,8 +573,6 @@ def __init__( deep_verbose: bool = False, ) -> None: self.activation = activation - self.optimizer = optimizer - self.loss = loss self.initializer = initializer self.out_features = out_features self.lambda_ = lambda_ @@ -611,16 +584,15 @@ def __init__( super().__init__( batch_size, n_epochs, - learning_rate, valid_size, early_stopping, patience, + shuffle, + random_state, deep_verbose, ) super().init_model() self.model = Sequential() - self.optimizer.set_params(learning_rate=self.learning_rate) - self.model.set_optimizer(optimizer=self.optimizer) self.feature_sizes_ = [ [3, 64, 64, 128, 128, *[256] * 4, *[512] * 8], @@ -636,7 +608,6 @@ def __init__( "out_features": ("0<,+inf", int), "batch_size": ("0<,+inf", int), "n_epochs": ("0<,+inf", int), - "learning_rate": ("0<,+inf", None), "valid_size": ("0<,<1", None), "dropout_rate": ("0,1", None), "lambda_": ("0,+inf", None), diff --git a/luma/neural/base.py b/luma/neural/base.py index 8e5db2d..914a65b 100644 --- a/luma/neural/base.py +++ b/luma/neural/base.py @@ -270,25 +270,22 @@ def __init__( self, batch_size: int, n_epochs: int, - learning_rate: float, valid_size: float, early_stopping: bool, patience: int, + shuffle: bool, + random_state: int | None, deep_verbose: bool, ) -> None: self.batch_size = batch_size self.n_epochs = n_epochs - self.learning_rate = learning_rate self.valid_size = valid_size self.early_stopping = early_stopping self.patience = patience + self.shuffle = shuffle + self.random_state = random_state self.deep_verbose = deep_verbose - if not hasattr(self, "shuffle"): - self.shuffle = True - if not hasattr(self, "random_state"): - self.random_state = None - def init_model(self) -> None: self.feature_sizes_: list = [] self.feature_shapes_: list = [] @@ -298,6 +295,7 @@ def init_model(self) -> None: self.valid_loss_: list[float] = [] self.model: object + self.loss: Optional[Loss] = None self.lr_scheduler: Optional[Scheduler] = None @abstractmethod @@ -306,6 +304,19 @@ def build_model(self) -> None: ... def _get_feature_shapes(self, sizes: list) -> list[tuple]: return [(i, j) for i, j in zip(sizes[:-1], sizes[1:])] + def check_necessaries_(self) -> None: + if self.model.optimizer is None: + raise RuntimeError( + f"'{str(self)}' does not have an optimzier!" + + f" Call 'set_optimizer()' to assign an optimizer.", + ) + + if self.loss is None: + raise RuntimeError( + f"'{str(self)}' does not have a loss function!" + + f" Call 'set_loss()' to assign a loss function.", + ) + def fit_nn(self, X: TensorLike, y: TensorLike) -> Self: X_train, X_val, y_train, y_val = TrainTestSplit( X, @@ -369,6 +380,8 @@ def train(self, X: TensorLike, y: TensorLike, epoch: int) -> list[float]: train_batch = BatchGenerator( X, y, batch_size=self.batch_size, shuffle=self.shuffle ) + self.check_necessaries_() + for i, (X_batch, y_batch) in enumerate(train_batch, start=1): t_start = time.time_ns() out = self.model(X_batch, is_train=True) @@ -394,6 +407,8 @@ def train(self, X: TensorLike, y: TensorLike, epoch: int) -> list[float]: def eval(self, X: TensorLike, y: TensorLike) -> list[float]: valid_loss = [] + self.check_necessaries_() + for X_batch, y_batch in BatchGenerator( X, y, batch_size=self.batch_size, shuffle=self.shuffle ): diff --git a/luma/neural/model.py b/luma/neural/model.py index c086fd7..47f9e03 100644 --- a/luma/neural/model.py +++ b/luma/neural/model.py @@ -1,12 +1,8 @@ from typing import Literal -from luma.core.super import Optimizer from luma.interface.util import InitUtil -from luma.neural import loss -from luma.neural.base import Loss from luma.neural.layer import Activation - from luma.neural import _models @@ -60,18 +56,12 @@ class SimpleMLP(_models.simple._SimpleMLP): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.001 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `initializer` : InitStr, default=None Type of weight initializer `activation` : FuncType Type of activation function - `optimizer` : Optimizer - An optimizer used in weight update process - `loss` : Loss - Type of loss function `dropout_rate` : float, default=0.5 Dropout rate `lambda_` : float, default=0.0 @@ -100,12 +90,9 @@ def __init__( hidden_layers: list[int] | int, *, activation: Activation.FuncType, - optimizer: Optimizer, - loss: Loss, initializer: InitUtil.InitStr = None, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.001, valid_size: float = 0.1, dropout_rate: float = 0.5, lambda_: float = 0, @@ -120,12 +107,9 @@ def __init__( out_features, hidden_layers, activation, - optimizer, - loss, initializer, batch_size, n_epochs, - learning_rate, valid_size, dropout_rate, lambda_, @@ -167,10 +151,6 @@ class SimpleCNN(_models.simple._SimpleCNN): Size of filters for convolution layers `activation` : FuncType Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss - Type of loss function `initializer` : InitStr, default=None Type of weight initializer (None for dense layers) `padding` : {"same", "valid"}, default="same" @@ -197,8 +177,6 @@ class SimpleCNN(_models.simple._SimpleCNN): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.001 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -259,8 +237,6 @@ def __init__( *, filter_size: int, activation: Activation.FuncType, - optimizer: Optimizer, - loss: Loss, initializer: InitUtil.InitStr = None, padding: Literal["same", "valid"] = "same", stride: int = 1, @@ -274,7 +250,6 @@ def __init__( dropout_rate: float = 0.5, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.001, valid_size: float = 0.1, lambda_: float = 0, early_stopping: bool = False, @@ -290,8 +265,6 @@ def __init__( out_features, filter_size, activation, - optimizer, - loss, initializer, padding, stride, @@ -305,7 +278,6 @@ def __init__( dropout_rate, batch_size, n_epochs, - learning_rate, valid_size, lambda_, early_stopping, @@ -354,10 +326,6 @@ class LeNet_1(_models.lenet._LeNet_1): ---------- `activation` : FuncType, default=Activation.Tanh Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=10 @@ -366,8 +334,6 @@ class LeNet_1(_models.lenet._LeNet_1): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.001 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -388,14 +354,11 @@ class LeNet_1(_models.lenet._LeNet_1): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.Tanh, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 10, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, early_stopping: bool = False, @@ -405,14 +368,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, early_stopping, @@ -456,10 +416,6 @@ class LeNet_4(_models.lenet._LeNet_4): ---------- `activation` : FuncType, default=Activation.Tanh Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=10 @@ -468,8 +424,6 @@ class LeNet_4(_models.lenet._LeNet_4): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.001 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -489,14 +443,11 @@ class LeNet_4(_models.lenet._LeNet_4): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.Tanh, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 10, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -507,14 +458,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -560,9 +508,6 @@ class LeNet_5(_models.lenet._LeNet_5): ---------- `activation` : FuncType, default=Activation.Tanh Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() Type of loss function `initializer` : InitStr, default=None Type of weight initializer @@ -572,8 +517,6 @@ class LeNet_5(_models.lenet._LeNet_5): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.001 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -593,14 +536,11 @@ class LeNet_5(_models.lenet._LeNet_5): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.Tanh, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 10, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -611,14 +551,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -672,10 +609,6 @@ class AlexNet(_models.alex._AlexNet): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -684,8 +617,6 @@ class AlexNet(_models.alex._AlexNet): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -707,14 +638,11 @@ class AlexNet(_models.alex._AlexNet): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -725,14 +653,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -787,10 +712,6 @@ class ZFNet(_models.alex._ZFNet): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -799,8 +720,6 @@ class ZFNet(_models.alex._ZFNet): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -821,14 +740,11 @@ class ZFNet(_models.alex._ZFNet): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -839,14 +755,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -904,10 +817,6 @@ class VGGNet_11(_models.vgg._VGGNet_11): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -916,8 +825,6 @@ class VGGNet_11(_models.vgg._VGGNet_11): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -937,14 +844,11 @@ class VGGNet_11(_models.vgg._VGGNet_11): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -955,14 +859,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1023,10 +924,6 @@ class VGGNet_13(_models.vgg._VGGNet_13): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1035,8 +932,6 @@ class VGGNet_13(_models.vgg._VGGNet_13): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1056,14 +951,11 @@ class VGGNet_13(_models.vgg._VGGNet_13): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -1074,14 +966,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1145,10 +1034,6 @@ class VGGNet_16(_models.vgg._VGGNet_16): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1157,8 +1042,6 @@ class VGGNet_16(_models.vgg._VGGNet_16): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1178,14 +1061,11 @@ class VGGNet_16(_models.vgg._VGGNet_16): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -1196,14 +1076,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1270,10 +1147,6 @@ class VGGNet_19(_models.vgg._VGGNet_19): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1282,8 +1155,6 @@ class VGGNet_19(_models.vgg._VGGNet_19): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1303,14 +1174,11 @@ class VGGNet_19(_models.vgg._VGGNet_19): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 100, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0, dropout_rate: float = 0.5, @@ -1321,14 +1189,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1395,10 +1260,6 @@ class Inception_V1(_models.incep._Inception_V1): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1407,8 +1268,6 @@ class Inception_V1(_models.incep._Inception_V1): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1429,14 +1288,11 @@ class Inception_V1(_models.incep._Inception_V1): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -1447,14 +1303,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1519,10 +1372,6 @@ class Inception_V2(_models.incep._Inception_V2): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1531,8 +1380,6 @@ class Inception_V2(_models.incep._Inception_V2): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1553,14 +1400,11 @@ class Inception_V2(_models.incep._Inception_V2): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -1571,14 +1415,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1644,10 +1485,6 @@ class Inception_V3(_models.incep._Inception_V3): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1656,8 +1493,6 @@ class Inception_V3(_models.incep._Inception_V3): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1680,14 +1515,11 @@ class Inception_V3(_models.incep._Inception_V3): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -1699,14 +1531,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1763,10 +1592,6 @@ class Inception_V4(_models.incep._Inception_V4): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1775,8 +1600,6 @@ class Inception_V4(_models.incep._Inception_V4): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1799,14 +1622,11 @@ class Inception_V4(_models.incep._Inception_V4): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -1818,14 +1638,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1882,10 +1699,6 @@ class InceptionResNet_V1(_models.incep._InceptionRes_V1): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -1894,8 +1707,6 @@ class InceptionResNet_V1(_models.incep._InceptionRes_V1): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -1913,14 +1724,11 @@ class InceptionResNet_V1(_models.incep._InceptionRes_V1): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -1932,14 +1740,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate, @@ -1995,10 +1800,6 @@ class InceptionResNet_V2(_models.incep._InceptionRes_V2): ---------- `activation` : FuncType, default=Activation.ReLU Type of activation function - `optimizer` : Optimizer - Type of optimizer for weight update - `loss` : Loss, default=CrossEntropy() - Type of loss function `initializer` : InitStr, default=None Type of weight initializer `out_features` : int, default=1000 @@ -2007,8 +1808,6 @@ class InceptionResNet_V2(_models.incep._InceptionRes_V2): Size of a single mini-batch `n_epochs` : int, default=100 Number of epochs for training - `learning_rate` : float, default=0.01 - Step size during optimization process `valid_size` : float, default=0.1 Fractional size of validation set `lambda_` : float, default=0.0 @@ -2026,14 +1825,11 @@ class InceptionResNet_V2(_models.incep._InceptionRes_V2): def __init__( self, - optimizer: Optimizer, activation: Activation.FuncType = Activation.ReLU, - loss: Loss = loss.CrossEntropy(), initializer: InitUtil.InitStr = None, out_features: int = 1000, batch_size: int = 128, n_epochs: int = 100, - learning_rate: float = 0.01, valid_size: float = 0.1, lambda_: float = 0.0, dropout_rate: float = 0.4, @@ -2045,14 +1841,11 @@ def __init__( deep_verbose: bool = False, ) -> None: super().__init__( - optimizer, activation, - loss, initializer, out_features, batch_size, n_epochs, - learning_rate, valid_size, lambda_, dropout_rate,