Skip to content

Commit

Permalink
Merge pull request #23 from ChanLumerico/autoprop-enum
Browse files Browse the repository at this point in the history
AutoProp Enum
  • Loading branch information
ChanLumerico authored Aug 26, 2024
2 parents 1d3ba07 + 755b1ec commit 1c17892
Show file tree
Hide file tree
Showing 4 changed files with 102 additions and 22 deletions.
8 changes: 8 additions & 0 deletions luma/neural/autoprop/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""
`AutoProp`
----------
Auto propagation system for complex neural networks of Luma Python library.
"""

from .graph import LayerNode, LayerGraph
30 changes: 9 additions & 21 deletions luma/neural/autoprop.py → luma/neural/autoprop/graph.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from typing import List, Literal, Dict, Self, Any, Tuple
from typing import List, Dict, Self, Any, Tuple
from collections import deque
import numpy as np

from luma.interface.typing import TensorLike, LayerLike
from luma.interface.exception import NotFittedError
from luma.interface.util import Clone
from luma.interface.exception import NotFittedError
from luma.core.super import Optimizer

from .merge import MergeMode


__all__ = ("LayerNode", "LayerGraph")

Expand All @@ -15,7 +17,7 @@ class LayerNode:
def __init__(
self,
layer: LayerLike,
merge_mode: Literal["chcat", "sum"] = "sum",
merge_mode: MergeMode = MergeMode.SUM,
name: str | None = None,
) -> None:
self.layer: LayerLike = layer
Expand All @@ -42,13 +44,8 @@ def back_enqueue(self, d_out: TensorLike) -> None:
self.b_queue.append(d_out)

def forward(self, is_train: bool = False) -> TensorLike:
match self.merge_mode:
case "chcat":
X = np.concatenate(self.f_queue, axis=1)
case "sum":
X = np.sum(self.f_queue, axis=0)
out = self.layer(X, is_train)
return out
X = self.merge_mode.forward(self.f_queue)
return self.layer(X, is_train)

def backward(self) -> List[TensorLike]:
d_cum = np.sum(self.b_queue, axis=0)
Expand All @@ -57,17 +54,8 @@ def backward(self) -> List[TensorLike]:
return [d_out]

d_out_arr = []
for i in range(self.n_backward):
if self.merge_mode == "chcat":
d_out_arr.append(
d_out[
:,
self.cum_ch[i] : self.cum_ch[i + 1],
...,
]
)
elif self.merge_mode == "sum":
d_out_arr.append(d_out)
for i in range(self.n_forward):
d_out_arr.append(self.merge_mode.backward(self.f_queue, d_out, i))

return d_out_arr

Expand Down
84 changes: 84 additions & 0 deletions luma/neural/autoprop/merge.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
from enum import Enum
import numpy as np

from luma.interface.typing import TensorLike


class MergeMode(Enum):
CHCAT = "chcat"
SUM = "sum"
HADAMARD = "hadamard"
AVERAGE = "average"
MAX = "max"
MIN = "min"
DOT = "dot"
SUBTRACT = "subtract"

def forward(self, f_queue: list[TensorLike]) -> TensorLike:
match self:
case MergeMode.CHCAT:
return np.concatenate(f_queue, axis=1)

case MergeMode.SUM:
return np.sum(f_queue, axis=0)

case MergeMode.HADAMARD:
X = np.ones_like(f_queue[0])
for tensor in f_queue:
X *= tensor
return X

case MergeMode.AVERAGE:
return np.mean(f_queue, axis=0)

case MergeMode.MAX:
return np.maximum.reduce(f_queue)

case MergeMode.MIN:
return np.minimum.reduce(f_queue)

case MergeMode.DOT:
return np.dot(f_queue[0], f_queue[1])

case MergeMode.SUBTRACT:
result = f_queue[0]
for tensor in f_queue[1:]:
result -= tensor
return result

def backward(
self, f_queue: list[TensorLike], d_out: TensorLike, i: int
) -> TensorLike:
match self:
case MergeMode.CHCAT:
cum_ch = [0]
for tensor in f_queue:
cum_ch.append(cum_ch[-1] + tensor.shape[1])
return d_out[:, cum_ch[i] : cum_ch[i + 1], ...]

case MergeMode.SUM:
return d_out

case MergeMode.HADAMARD:
prod_except_current = np.ones_like(f_queue[0])
for j in range(len(f_queue)):
if j != i:
prod_except_current *= f_queue[j]
return d_out * prod_except_current

case MergeMode.AVERAGE:
return d_out / len(f_queue)

case MergeMode.MAX | MergeMode.MIN:
return (
d_out * (f_queue[i] == getattr(np, self.value).reduce(f_queue))
).astype(d_out.dtype)

case MergeMode.DOT:
if i == 0:
return np.dot(d_out, f_queue[1].T)
elif i == 1:
return np.dot(f_queue[0].T, d_out)

case MergeMode.SUBTRACT:
return d_out if i == 0 else -d_out
2 changes: 1 addition & 1 deletion luma/neural/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2989,7 +2989,7 @@ def __init__(
random_state: int | None = None,
deep_verbose: bool = False,
) -> None:
super(MobileNet_V1, self).__init__(
super(MobileNet_V2, self).__init__(
activation,
initializer,
out_features,
Expand Down

0 comments on commit 1c17892

Please sign in to comment.