Skip to content

Commit

Permalink
work at ATF on Oct 12
Browse files Browse the repository at this point in the history
  • Loading branch information
AmbarCRoAl committed Oct 12, 2023
1 parent 8c09e87 commit 6fffe33
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 34 deletions.
2 changes: 1 addition & 1 deletion bloptools/bayesian/acquisition/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def parse_acq_func(acq_func_identifier):
return acq_func_name


def get_acquisition_function(agent, acq_func_identifier="qei", return_metadata=True, **acq_func_kwargs):
def get_acquisition_function(agent, acq_func_identifier="qei", return_metadata=True, verbose=False, **acq_func_kwargs):
"""
Generates an acquisition function from a supplied identifier.
"""
Expand Down
11 changes: 10 additions & 1 deletion bloptools/bayesian/acquisition/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,14 @@ monte_carlo_probability_of_improvement:
pretty_name: Monte Carlo probability of improvement
type: monte_carlo

random:
description: A uniform random sampel of the parameters.
identifiers:
- r
multitask_only: false
pretty_name: Random
type: none

quasi-random:
description: Sobol-sampled quasi-random points.
identifiers:
Expand All @@ -75,7 +83,8 @@ quasi-random:
grid:
description: A grid scan over the parameters.
identifiers:
- grid_scan
- g
- gs
multitask_only: false
pretty_name: Grid scan
type: none
Expand Down
59 changes: 37 additions & 22 deletions bloptools/bayesian/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ def __init__(
def tell(self, new_table=None, append=True, train=True, **kwargs):
"""
Inform the agent about new inputs and targets for the model.
If run with no arguments, it will just reconstruct all the models.
"""

new_table = pd.DataFrame() if new_table is None else new_table
Expand Down Expand Up @@ -208,7 +210,7 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq
NUM_RESTARTS = 8
RAW_SAMPLES = 1024

candidates, _ = botorch.optim.optimize_acqf(
candidates, acqf_objective = botorch.optim.optimize_acqf(
acq_function=acq_func,
bounds=self.acquisition_function_bounds,
q=n,
Expand All @@ -226,6 +228,13 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq
acq_func_meta["read_only_values"] = read_only_X

else:

acqf_objective = None

if acq_func_name == "random":
acquisition_X = torch.rand()
acq_func_meta = {"name": "random", "args": {}}

if acq_func_name == "quasi-random":
acquisition_X = self._subset_inputs_sampler(n=n, active=True, read_only=False).squeeze(1).numpy()
acq_func_meta = {"name": "quasi-random", "args": {}}
Expand All @@ -241,7 +250,7 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq
acq_func_meta["duration"] = duration = ttime.monotonic() - start_time

if self.verbose:
print(f"found points {acquisition_X} in {duration:.01f} seconds")
print(f"found points {acquisition_X} with acqf {acq_func_meta['name']} in {duration:.01f} seconds (obj = {acqf_objective})")

if route and n > 1:
routing_index = utils.route(self.dofs.subset(active=True, read_only=False).readback, acquisition_X)
Expand All @@ -257,7 +266,7 @@ def acquire(self, acquisition_inputs):
"""
try:
acquisition_devices = self.dofs.subset(active=True, read_only=False).devices
read_only_devices = self.dofs.subset(active=True, read_only=True).devices
#read_only_devices = self.dofs.subset(active=True, read_only=True).devices

# the acquisition plan always takes as arguments:
# (things to move, where to move them, things to trigger once you get there)
Expand All @@ -266,7 +275,7 @@ def acquire(self, acquisition_inputs):
uid = yield from self.acquisition_plan(
acquisition_devices,
acquisition_inputs.astype(float),
[*self.dets, *read_only_devices],
[*self.dets, *self.dofs.devices],
delay=self.trigger_delay,
)

Expand Down Expand Up @@ -341,11 +350,12 @@ def reset(self):
def benchmark(
self, output_dir="./", runs=16, n_init=64, learning_kwargs_list=[{"acq_func": "qei", "n": 4, "iterations": 16}]
):
cache_limits = {dof["name"]: dof["limits"] for dof in self.dofs}
cache_limits = {dof.name:dof.limits for dof in self.dofs}

for run in range(runs):
for dof in self.dofs:
dof["limits"] = cache_limits[dof["name"]] + 0.25 * np.ptp(dof["limits"]) * np.random.uniform(low=-1, high=1)
offset = 0.25 * np.ptp(dof.limits) * np.random.uniform(low=-1, high=1)
dof.limits = (dof.limits[0] + offset, dof.limits[1] + offset)

self.reset()

Expand All @@ -367,7 +377,7 @@ def model(self):

@property
def objective_weights_torch(self):
return torch.tensor(self.objectives.weights, dtype=torch.float)
return torch.tensor(self.objectives.weights, dtype=torch.double)

def _get_objective_targets(self, i):
"""
Expand Down Expand Up @@ -454,8 +464,10 @@ def acquisition_function_bounds(self):
"""
Returns a (2, n_active_dof) array of bounds for the acquisition function
"""
acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in self.dofs]
acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in self.dofs]
active_dofs = self.dofs.subset(active=True)

acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in active_dofs]
acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in active_dofs]

return torch.tensor(np.vstack([acq_func_lower_bounds, acq_func_upper_bounds]), dtype=torch.double)

Expand Down Expand Up @@ -598,20 +610,23 @@ def inputs(self):
def active_inputs(self):
return self.table.loc[:, self.dofs.subset(active=True).device_names].astype(float)

# @property
# def acquisition_inputs(self):
# return self.table.loc[:, self.dofs.subset(active=True, read_only=False).names].astype(float)
@property
def acquisition_inputs(self):
return self.table.loc[:, self.dofs.subset(active=True, read_only=False).names].astype(float)

# @property
# def best_inputs(self):
# return self.acquisition_inputs.values[np.nanargmax(self.scalarized_objectives)]

# def go_to(self, acquisition_inputs):
# args = []
# for dof, value in zip(self.dofs.subset(read_only=False), np.atleast_1d(acquisition_inputs).T):
# args.append(dof.device)
# args.append(value)
# yield from bps.mv(*args)
@property
def best_inputs(self):
"""
Returns a value for each currently active and non-read-only degree of freedom
"""
return self.table.loc[np.nanargmax(self.scalarized_objectives), self.dofs.subset(active=True, read_only=False).names]

def go_to(self, positions):
args = []
for dof, value in zip(self.dofs.subset(active=True, read_only=False), np.atleast_1d(positions)):
args.append(dof.device)
args.append(value)
yield from bps.mv(*args)

def go_to_best(self):
yield from self.go_to(self.best_inputs)
Expand Down
6 changes: 3 additions & 3 deletions bloptools/bayesian/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
numeric = Union[float, int]

DEFAULT_MINIMUM_SNR = 2e1
OBJ_FIELDS = ["name", "key", "limits", "weight", "minimize", "log"]
OBJ_FIELDS = ["name", "key", "limits", "weight", "minimize", "log", "noise"]


class DuplicateKeyError(ValueError):
Expand Down Expand Up @@ -62,8 +62,8 @@ def __repr__(self):
return self.summary.__repr__()

@property
def has_model(self):
return hasattr(self, "model")
def noise(self):
return self.model.likelihood.noise.item() if hasattr(self, "model") else None


class ObjectiveList(Sequence):
Expand Down
14 changes: 7 additions & 7 deletions bloptools/bayesian/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def _plot_objs_one_dof(agent, size=16, lw=1e0):

agent.obj_axes = np.atleast_1d(agent.obj_axes)

x_dof = agent.dofs[0]
x_dof = agent.dofs.subset(active=True)[0]
x_values = agent.table.loc[:, x_dof.device.name].values

for obj_index, obj in enumerate(agent.objectives):
Expand Down Expand Up @@ -68,8 +68,8 @@ def _plot_objs_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL
)

agent.obj_axes = np.atleast_2d(agent.obj_axes)

x_dof, y_dof = agent.dofs[axes[0]], agent.dofs[axes[1]]
x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]]
x_values = agent.table.loc[:, x_dof.device.name].values
y_values = agent.table.loc[:, y_dof.device.name].values

Expand Down Expand Up @@ -185,7 +185,7 @@ def _plot_acq_one_dof(agent, acq_funcs, lw=1e0, **kwargs):
)

agent.acq_axes = np.atleast_1d(agent.acq_axes)
x_dof = agent.dofs[0]
x_dof = agent.dofs.subset(active=True)[0]

test_inputs = agent.test_inputs_grid()

Expand Down Expand Up @@ -219,7 +219,7 @@ def _plot_acq_many_dofs(

agent.acq_axes = np.atleast_1d(agent.acq_axes)

x_dof, y_dof = agent.dofs[axes[0]], agent.dofs[axes[1]]
x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]]

# test_inputs has shape (..., 1, n_active_dofs)
test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024)
Expand Down Expand Up @@ -263,7 +263,7 @@ def _plot_acq_many_dofs(
def _plot_valid_one_dof(agent, size=16, lw=1e0):
agent.valid_fig, agent.valid_ax = plt.subplots(1, 1, figsize=(6, 4 * agent.n_objs), constrained_layout=True)

x_dof = agent.dofs[0]
x_dof = agent.dofs.subset(active=True)[0]
x_values = agent.table.loc[:, x_dof.device.name].values

test_inputs = agent.test_inputs_grid()
Expand All @@ -280,7 +280,7 @@ def _plot_valid_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_CO
if gridded is None:
gridded = len(agent.dofs.subset(active=True, read_only=False)) == 2

x_dof, y_dof = agent.dofs[axes[0]], agent.dofs[axes[1]]
x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]]

# test_inputs has shape (..., 1, n_active_dofs)
test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024)
Expand Down

0 comments on commit 6fffe33

Please sign in to comment.