Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Edit perturbation type annotation and add algorithm enum #287

Merged
merged 2 commits into from
Aug 8, 2023
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 23 additions & 12 deletions compiler_opt/es/blackbox_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@

import abc
import enum
import gin
import math

import numpy as np
Expand All @@ -64,6 +65,8 @@

from compiler_opt.es import gradient_ascent_optimization_algorithms

ArrayOfFloatArrays = Sequence[npt.NDArray[np.float32]]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit - Might as well also have a FloatArray = npt.NDArray[np.float32]] and then ArrayOfFloatArrays = Sequence[FloatArray] - just for consistency, right now there are various npt.NDArray[np.float32] around.


SequenceOfFloats = Union[Sequence[float], npt.NDArray[np.float32]]

LinearModel = Union[linear_model.Ridge, linear_model.Lasso,
Expand All @@ -75,6 +78,14 @@ class CurrentPointEstimate(enum.Enum):
AVERAGE = 2


@gin.constants_from_enum(module='blackbox_optimizers')
class Algorithm(enum.Enum):
MONTE_CARLO = 1
TRUST_REGION = 2
SKLEARN_REGRESSION = 3


@gin.constants_from_enum(module='blackbox_optimizers')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not also @gin - mark EstimatorType, GrandientType, and RegressionType? might as well...

class EstimatorType(enum.Enum):
FORWARD_FD = 1
ANTITHETIC = 2
Expand All @@ -91,6 +102,7 @@ class RegressionType(enum.Enum):
LINEAR = 3


@gin.constants_from_enum(module='blackbox_optimizers')
class UpdateMethod(enum.Enum):
STATE_NORMALIZATION = 1
NO_METHOD = 2
Expand All @@ -100,9 +112,8 @@ class UpdateMethod(enum.Enum):


def filter_top_directions(
perturbations: npt.NDArray[np.float32],
function_values: npt.NDArray[np.float32], est_type: EstimatorType,
num_top_directions: int
perturbations: ArrayOfFloatArrays, function_values: npt.NDArray[np.float32],
est_type: EstimatorType, num_top_directions: int
) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]:
"""Select the subset of top-performing perturbations.

Expand Down Expand Up @@ -151,7 +162,7 @@ class BlackboxOptimizer(metaclass=abc.ABCMeta):
"""

@abc.abstractmethod
def run_step(self, perturbations: npt.NDArray[np.float32],
def run_step(self, perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_input: npt.NDArray[np.float32],
current_value: float) -> npt.NDArray[np.float32]:
Expand Down Expand Up @@ -332,7 +343,8 @@ def __init__(self,
super().__init__(est_type, normalize_fvalues, hyperparameters_update_method,
extra_params)

def run_step(self, perturbations: npt.NDArray[np.float32],
# TODO: Issue #285
def run_step(self, perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_input: npt.NDArray[np.float32],
current_value: float) -> npt.NDArray[np.float32]:
Expand Down Expand Up @@ -413,7 +425,7 @@ def __init__(self,
super().__init__(est_type, normalize_fvalues, hyperparameters_update_method,
extra_params)

def run_step(self, perturbations: npt.NDArray[np.float32],
def run_step(self, perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_input: npt.NDArray[np.float32],
current_value: float) -> npt.NDArray[np.float32]:
Expand Down Expand Up @@ -487,7 +499,7 @@ def normalize_function_values(
def monte_carlo_gradient(
precision_parameter: float,
est_type: EstimatorType,
perturbations: npt.NDArray[np.float32],
perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_value: float,
energy: Optional[float] = 0) -> npt.NDArray[np.float32]:
Expand Down Expand Up @@ -532,8 +544,7 @@ def monte_carlo_gradient(

def sklearn_regression_gradient(
clf: LinearModel, est_type: EstimatorType,
perturbations: npt.NDArray[np.float32],
function_values: npt.NDArray[np.float32],
perturbations: ArrayOfFloatArrays, function_values: npt.NDArray[np.float32],
current_value: float) -> npt.NDArray[np.float32]:
"""Calculates gradient by function difference regression.

Expand Down Expand Up @@ -981,7 +992,7 @@ def trust_region_test(self, current_input: npt.NDArray[np.float32],
print('Unchanged: ' + str(self.radius) + log_message)
return True

def update_hessian_part(self, perturbations: npt.NDArray[np.float32],
def update_hessian_part(self, perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_value: float, is_update: bool) -> None:
"""Updates the internal state which stores Hessian information.
Expand Down Expand Up @@ -1095,7 +1106,7 @@ def hessv_func(x: npt.NDArray[np.float32]) -> npt.NDArray[np.float32]:

return hessv_func

def update_quadratic_model(self, perturbations: npt.NDArray[np.float32],
def update_quadratic_model(self, perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_value: float,
is_update: bool) -> QuadraticModel:
Expand Down Expand Up @@ -1145,7 +1156,7 @@ def update_quadratic_model(self, perturbations: npt.NDArray[np.float32],
is_update)
return QuadraticModel(self.create_hessv_function(), self.saved_gradient)

def run_step(self, perturbations: npt.NDArray[np.float32],
def run_step(self, perturbations: ArrayOfFloatArrays,
function_values: npt.NDArray[np.float32],
current_input: npt.NDArray[np.float32],
current_value: float) -> npt.NDArray[np.float32]:
Expand Down