Skip to content

Commit

Permalink
tmp-release-0.6.7
Browse files Browse the repository at this point in the history
  • Loading branch information
ChanLumerico committed Apr 18, 2024
1 parent dc9e831 commit a6f1799
Show file tree
Hide file tree
Showing 9 changed files with 267 additions and 62 deletions.
15 changes: 9 additions & 6 deletions luma/__import__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
NearestNeighbors,
)
from luma.interface.util import SilhouetteUtil, DBUtil, KernelUtil, ActivationUtil
from luma.interface.util import Clone, ParamRange, Layer
from luma.interface.util import Clone, ParamRange, Layer, Loss

from luma.classifier.discriminant import (
LDAClassifier,
Expand Down Expand Up @@ -75,7 +75,8 @@
)
from luma.neural.single import PerceptronClassifier, PerceptronRegressor
from luma.neural.network import MLPClassifier, MLPRegressor
from luma.neural.layer import Convolution, Pooling, Dense, Dropout, Flatten
from luma.neural.layer import Convolution, Pooling, Dense, Dropout, Flatten, Sequential
from luma.neural.loss import CategoricalCrossEntropy

from luma.metric.classification import Accuracy, Precision, Recall, F1Score, Specificity
from luma.metric.regression import (
Expand All @@ -90,7 +91,7 @@
from luma.metric.distance import Euclidean, Manhattan, Chebyshev, Minkowski
from luma.metric.distance import CosineSimilarity, Correlation, Mahalanobis

from luma.model_selection.split import TrainTestSplit
from luma.model_selection.split import TrainTestSplit, BatchGenerator
from luma.model_selection.search import GridSearchCV, RandomizedSearchCV
from luma.model_selection.cv import CrossValidator
from luma.model_selection.fold import KFold, StratifiedKFold
Expand Down Expand Up @@ -168,7 +169,7 @@
Matrix, Vector, Tensor, Scalar,
DecisionTreeNode, NearestNeighbors,
SilhouetteUtil, DBUtil, KernelUtil, ActivationUtil,
Clone, ParamRange, Layer
Clone, ParamRange, Layer, Loss

# ----------------- [ luma.classifier ] --------------------
LDAClassifier, QDAClassifier, RDAClassifier, KDAClassifier
Expand Down Expand Up @@ -223,7 +224,9 @@
AdamOptimizer, AdaGradOptimizer, AdaDeltaOptimizer,
AdaMaxOptimizer, AdamWOptimizer, NAdamOptimizer

Convolution, Pooling, Dense, Dropout, Flatten
Convolution, Pooling, Dense, Dropout, Flatten, Sequential

CategoricalCrossEntropy

# ------------------- [ luma.metric ] ----------------------
Accuracy, Precision, Recall, F1Score, Specificity
Expand All @@ -238,7 +241,7 @@
CosineSimilarity, Correlation, Mahalanobis

# --------------- [ luma.module_selection ] ----------------
TrainTestSplit
TrainTestSplit, BatchGenerator

GridSearchCV, RandomizedSearchCV

Expand Down
4 changes: 2 additions & 2 deletions luma/core/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ def __dealloc__(self) -> None:
def __doc__(self) -> str:
return luma.__doc__

if sys.version_info < (3, 10):
print("Luma requires Python 3.10 or more", file=sys.stderr)
if sys.version_info < (3, 12):
print("Luma requires Python 3.12 or more", file=sys.stderr)
4 changes: 2 additions & 2 deletions luma/core/super.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Dict, Self
from typing import Any, Dict, NoReturn, Self
from abc import ABCMeta, abstractmethod

from luma.core.base import *
Expand Down Expand Up @@ -266,7 +266,7 @@ def __init__(self) -> None:
self.updated_weights = None
self.updated_biases = None

def update(self, weights, biases, grad_weights, grad_biases) -> None:
def update(self, weights, biases, grad_weights, grad_biases) -> NoReturn:
if weights is not None:
self.updated_weights = self._update_weights(weights, grad_weights)
if biases is not None:
Expand Down
27 changes: 27 additions & 0 deletions luma/interface/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,13 @@
"Clone",
"ParamRange",
"Layer",
"Loss",
)


type TensorLike = Matrix | Tensor | Vector


class Matrix(np.ndarray):
"""
Internal class for matrices(2D-array) that extends `numpy.ndarray`.
Expand Down Expand Up @@ -556,6 +560,7 @@ class Layer:
- `dW` : Gradient w.r.t. the weights
- `dB` : Gradient w.r.t. the biases
- `optimizer` : Optimizer for certain layer
- `out_shape` : Shape of the output when forwarding
"""

Expand All @@ -569,6 +574,7 @@ def __init__(self) -> None:
self.dB: Tensor = None

self.optimizer: object = None
self.out_shape: tuple = None

def forward(self) -> Tensor: ...

Expand All @@ -582,3 +588,24 @@ def update(self) -> None:
)
self.weights_ = Tensor(weights_)
self.biases_ = Tensor(biases_)

def __str__(self) -> str:
return type(self).__name__


class Loss:
"""
An internal class for loss functions used in neural networks.
Loss functions, integral to the training process of machine
learning models, serve as crucial metrics assessing the disparity
between predicted outcomes and ground truth labels. They play a
pivotal role in optimization algorithms, guiding parameter updates
towards minimizing the discrepancy between predictions and true values.
"""

def __init__(self) -> None: ...

def loss(self) -> float: ...

def grad(self) -> Matrix: ...
77 changes: 71 additions & 6 deletions luma/model_selection/split.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from typing import Tuple
from typing import Iterator, Tuple
import numpy as np

from luma.interface.util import Matrix, Vector
from luma.interface.util import Matrix, Vector, TensorLike


__all__ = "TrainTestSplit"
__all__ = ("TrainTestSplit", "BatchGenerator")


class TrainTestSplit:
Expand Down Expand Up @@ -58,11 +58,11 @@ def get(self) -> Tuple[Matrix, Matrix, Vector, Vector]:
return self._split()

def _split(self) -> Tuple[Matrix, Matrix, Vector, Vector]:
num_samples = self.X.shape[0]
indices = np.arange(num_samples)
n_samples = self.X.shape[0]
indices = np.arange(n_samples)

if isinstance(self.test_size, float):
num_test_samples = int(self.test_size * num_samples)
num_test_samples = int(self.test_size * n_samples)
else:
num_test_samples = self.test_size

Expand Down Expand Up @@ -113,3 +113,68 @@ def _stratified_split(self) -> Tuple[Matrix, Matrix, Vector, Vector]:
y_test = self.y[test_indices]

return X_train, X_test, y_train, y_test


class BatchGenerator:
"""
A class for generating mini-batches of data for training machine
learning models including neural networks.
Parameters
----------
`X` : Input features
`y` : Targets or labels
`batch_size` : Size of a mini-batch
`shuffle` : Whether to shuffle the data for every batch generation
Examples
--------
An instance of `BatchGenerator` can be used as an iterator.
- With instantiation:
```py
batch_gen = BatchGenerator(X, y, batch_size=100)
for X_batch, y_batch in batch_gen:
pass
```
- Without instantiation:
```py
for X_batch, y_batch in BatchGenerator(X, y, batch_size=100):
pass
```
"""

def __init__(
self,
X: TensorLike,
y: TensorLike,
batch_size: int = 100,
shuffle: bool = True,
) -> None:
self.X = X
self.y = y
self.batch_size = batch_size
self.shuffle = shuffle

self.n_samples = X.shape[0]
self.n_batches = self.n_samples // batch_size

if self.n_samples % batch_size != 0:
self.n_batches += 1

self.indices = np.arange(self.n_samples)
if self.shuffle:
np.random.shuffle(self.indices)

def __iter__(self) -> Iterator[Tuple[TensorLike, TensorLike]]:
for i in range(self.n_batches):
start_idx = i * self.batch_size
end_idx = min((i + 1) * self.batch_size, self.n_samples)

batch_indices = self.indices[start_idx:end_idx]
X_batch = self.X[batch_indices]
y_batch = self.y[batch_indices]

yield X_batch, y_batch
29 changes: 8 additions & 21 deletions luma/neural/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
__all__ = ("ReLU", "LeakyReLU", "ELU", "Tanh", "Sigmoid", "Softmax")


type Matrix = np.ndarray
type Matrix = Matrix


class ReLU:
def func(self, X: Matrix) -> Matrix:
return np.maximum(0, X)

def derivative(self, X: Matrix) -> Matrix:
def grad(self, X: Matrix) -> Matrix:
return (X > 0).astype(float)


Expand All @@ -22,7 +22,7 @@ def __init__(self, alpha=0.01):
def func(self, X: Matrix) -> Matrix:
return np.where(X > 0, X, X * self.alpha)

def derivative(self, X: Matrix) -> Matrix:
def grad(self, X: Matrix) -> Matrix:
return np.where(X > 0, 1, self.alpha)


Expand All @@ -33,23 +33,23 @@ def __init__(self, alpha=1.0):
def func(self, X: Matrix) -> Matrix:
return np.where(X > 0, X, self.alpha * (np.exp(X) - 1))

def derivative(self, X: Matrix) -> Matrix:
def grad(self, X: Matrix) -> Matrix:
return np.where(X > 0, 1, self.func(X) + self.alpha)


class Tanh:
def func(self, X: Matrix) -> Matrix:
return np.tanh(X)

def derivative(self, X: Matrix) -> Matrix:
def grad(self, X: Matrix) -> Matrix:
return 1 - np.tanh(X) ** 2


class Sigmoid:
def func(self, X: Matrix) -> Matrix:
return 1 / (1 + np.exp(-X))

def derivative(self, X: Matrix) -> Matrix:
def grad(self, X: Matrix) -> Matrix:
return X * (1 - X)


Expand All @@ -58,18 +58,5 @@ def func(self, X: Matrix) -> Matrix:
exps = np.exp(X - np.max(X, axis=1, keepdims=True))
return exps / np.sum(exps, axis=1, keepdims=True)

def derivative(self, X: Matrix) -> Matrix:
m, n = X.shape
soft_out = self.func(X)
jacobian = np.zeros((m, n, n))

for i in range(len(soft_out)):
for j in range(len(soft_out[i])):
for k in range(len(soft_out[i])):
if j == k:
val = soft_out[i, j] * (1 - soft_out[i, j])
else:
val = -soft_out[i, j] * soft_out[i, k]
jacobian[i, j, k] = val

return jacobian
def grad(self, _: Matrix) -> Matrix:
NotImplemented
Loading

0 comments on commit a6f1799

Please sign in to comment.