From 6fffe33f93fb45b615048ec81ac8cb1fc172965b Mon Sep 17 00:00:00 2001 From: Ambar Date: Thu, 12 Oct 2023 15:47:52 -0400 Subject: [PATCH] work at ATF on Oct 12 --- bloptools/bayesian/acquisition/__init__.py | 2 +- bloptools/bayesian/acquisition/config.yml | 11 +++- bloptools/bayesian/agent.py | 59 ++++++++++++++-------- bloptools/bayesian/objective.py | 6 +-- bloptools/bayesian/plotting.py | 14 ++--- 5 files changed, 58 insertions(+), 34 deletions(-) diff --git a/bloptools/bayesian/acquisition/__init__.py b/bloptools/bayesian/acquisition/__init__.py index e71fb6f..c0d8f39 100644 --- a/bloptools/bayesian/acquisition/__init__.py +++ b/bloptools/bayesian/acquisition/__init__.py @@ -30,7 +30,7 @@ def parse_acq_func(acq_func_identifier): return acq_func_name -def get_acquisition_function(agent, acq_func_identifier="qei", return_metadata=True, **acq_func_kwargs): +def get_acquisition_function(agent, acq_func_identifier="qei", return_metadata=True, verbose=False, **acq_func_kwargs): """ Generates an acquisition function from a supplied identifier. """ diff --git a/bloptools/bayesian/acquisition/config.yml b/bloptools/bayesian/acquisition/config.yml index da19b3a..b62398f 100644 --- a/bloptools/bayesian/acquisition/config.yml +++ b/bloptools/bayesian/acquisition/config.yml @@ -64,6 +64,14 @@ monte_carlo_probability_of_improvement: pretty_name: Monte Carlo probability of improvement type: monte_carlo +random: + description: A uniform random sampel of the parameters. + identifiers: + - r + multitask_only: false + pretty_name: Random + type: none + quasi-random: description: Sobol-sampled quasi-random points. identifiers: @@ -75,7 +83,8 @@ quasi-random: grid: description: A grid scan over the parameters. identifiers: - - grid_scan + - g + - gs multitask_only: false pretty_name: Grid scan type: none diff --git a/bloptools/bayesian/agent.py b/bloptools/bayesian/agent.py index e94cb6f..954f6ae 100644 --- a/bloptools/bayesian/agent.py +++ b/bloptools/bayesian/agent.py @@ -104,6 +104,8 @@ def __init__( def tell(self, new_table=None, append=True, train=True, **kwargs): """ Inform the agent about new inputs and targets for the model. + + If run with no arguments, it will just reconstruct all the models. """ new_table = pd.DataFrame() if new_table is None else new_table @@ -208,7 +210,7 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq NUM_RESTARTS = 8 RAW_SAMPLES = 1024 - candidates, _ = botorch.optim.optimize_acqf( + candidates, acqf_objective = botorch.optim.optimize_acqf( acq_function=acq_func, bounds=self.acquisition_function_bounds, q=n, @@ -226,6 +228,13 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq acq_func_meta["read_only_values"] = read_only_X else: + + acqf_objective = None + + if acq_func_name == "random": + acquisition_X = torch.rand() + acq_func_meta = {"name": "random", "args": {}} + if acq_func_name == "quasi-random": acquisition_X = self._subset_inputs_sampler(n=n, active=True, read_only=False).squeeze(1).numpy() acq_func_meta = {"name": "quasi-random", "args": {}} @@ -241,7 +250,7 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq acq_func_meta["duration"] = duration = ttime.monotonic() - start_time if self.verbose: - print(f"found points {acquisition_X} in {duration:.01f} seconds") + print(f"found points {acquisition_X} with acqf {acq_func_meta['name']} in {duration:.01f} seconds (obj = {acqf_objective})") if route and n > 1: routing_index = utils.route(self.dofs.subset(active=True, read_only=False).readback, acquisition_X) @@ -257,7 +266,7 @@ def acquire(self, acquisition_inputs): """ try: acquisition_devices = self.dofs.subset(active=True, read_only=False).devices - read_only_devices = self.dofs.subset(active=True, read_only=True).devices + #read_only_devices = self.dofs.subset(active=True, read_only=True).devices # the acquisition plan always takes as arguments: # (things to move, where to move them, things to trigger once you get there) @@ -266,7 +275,7 @@ def acquire(self, acquisition_inputs): uid = yield from self.acquisition_plan( acquisition_devices, acquisition_inputs.astype(float), - [*self.dets, *read_only_devices], + [*self.dets, *self.dofs.devices], delay=self.trigger_delay, ) @@ -341,11 +350,12 @@ def reset(self): def benchmark( self, output_dir="./", runs=16, n_init=64, learning_kwargs_list=[{"acq_func": "qei", "n": 4, "iterations": 16}] ): - cache_limits = {dof["name"]: dof["limits"] for dof in self.dofs} + cache_limits = {dof.name:dof.limits for dof in self.dofs} for run in range(runs): for dof in self.dofs: - dof["limits"] = cache_limits[dof["name"]] + 0.25 * np.ptp(dof["limits"]) * np.random.uniform(low=-1, high=1) + offset = 0.25 * np.ptp(dof.limits) * np.random.uniform(low=-1, high=1) + dof.limits = (dof.limits[0] + offset, dof.limits[1] + offset) self.reset() @@ -367,7 +377,7 @@ def model(self): @property def objective_weights_torch(self): - return torch.tensor(self.objectives.weights, dtype=torch.float) + return torch.tensor(self.objectives.weights, dtype=torch.double) def _get_objective_targets(self, i): """ @@ -454,8 +464,10 @@ def acquisition_function_bounds(self): """ Returns a (2, n_active_dof) array of bounds for the acquisition function """ - acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in self.dofs] - acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in self.dofs] + active_dofs = self.dofs.subset(active=True) + + acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in active_dofs] + acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in active_dofs] return torch.tensor(np.vstack([acq_func_lower_bounds, acq_func_upper_bounds]), dtype=torch.double) @@ -598,20 +610,23 @@ def inputs(self): def active_inputs(self): return self.table.loc[:, self.dofs.subset(active=True).device_names].astype(float) - # @property - # def acquisition_inputs(self): - # return self.table.loc[:, self.dofs.subset(active=True, read_only=False).names].astype(float) + @property + def acquisition_inputs(self): + return self.table.loc[:, self.dofs.subset(active=True, read_only=False).names].astype(float) - # @property - # def best_inputs(self): - # return self.acquisition_inputs.values[np.nanargmax(self.scalarized_objectives)] - - # def go_to(self, acquisition_inputs): - # args = [] - # for dof, value in zip(self.dofs.subset(read_only=False), np.atleast_1d(acquisition_inputs).T): - # args.append(dof.device) - # args.append(value) - # yield from bps.mv(*args) + @property + def best_inputs(self): + """ + Returns a value for each currently active and non-read-only degree of freedom + """ + return self.table.loc[np.nanargmax(self.scalarized_objectives), self.dofs.subset(active=True, read_only=False).names] + + def go_to(self, positions): + args = [] + for dof, value in zip(self.dofs.subset(active=True, read_only=False), np.atleast_1d(positions)): + args.append(dof.device) + args.append(value) + yield from bps.mv(*args) def go_to_best(self): yield from self.go_to(self.best_inputs) diff --git a/bloptools/bayesian/objective.py b/bloptools/bayesian/objective.py index 70a0bf9..a0f344f 100644 --- a/bloptools/bayesian/objective.py +++ b/bloptools/bayesian/objective.py @@ -7,7 +7,7 @@ numeric = Union[float, int] DEFAULT_MINIMUM_SNR = 2e1 -OBJ_FIELDS = ["name", "key", "limits", "weight", "minimize", "log"] +OBJ_FIELDS = ["name", "key", "limits", "weight", "minimize", "log", "noise"] class DuplicateKeyError(ValueError): @@ -62,8 +62,8 @@ def __repr__(self): return self.summary.__repr__() @property - def has_model(self): - return hasattr(self, "model") + def noise(self): + return self.model.likelihood.noise.item() if hasattr(self, "model") else None class ObjectiveList(Sequence): diff --git a/bloptools/bayesian/plotting.py b/bloptools/bayesian/plotting.py index cd35bc0..523ff0f 100644 --- a/bloptools/bayesian/plotting.py +++ b/bloptools/bayesian/plotting.py @@ -23,7 +23,7 @@ def _plot_objs_one_dof(agent, size=16, lw=1e0): agent.obj_axes = np.atleast_1d(agent.obj_axes) - x_dof = agent.dofs[0] + x_dof = agent.dofs.subset(active=True)[0] x_values = agent.table.loc[:, x_dof.device.name].values for obj_index, obj in enumerate(agent.objectives): @@ -68,8 +68,8 @@ def _plot_objs_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL ) agent.obj_axes = np.atleast_2d(agent.obj_axes) - - x_dof, y_dof = agent.dofs[axes[0]], agent.dofs[axes[1]] + + x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]] x_values = agent.table.loc[:, x_dof.device.name].values y_values = agent.table.loc[:, y_dof.device.name].values @@ -185,7 +185,7 @@ def _plot_acq_one_dof(agent, acq_funcs, lw=1e0, **kwargs): ) agent.acq_axes = np.atleast_1d(agent.acq_axes) - x_dof = agent.dofs[0] + x_dof = agent.dofs.subset(active=True)[0] test_inputs = agent.test_inputs_grid() @@ -219,7 +219,7 @@ def _plot_acq_many_dofs( agent.acq_axes = np.atleast_1d(agent.acq_axes) - x_dof, y_dof = agent.dofs[axes[0]], agent.dofs[axes[1]] + x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]] # test_inputs has shape (..., 1, n_active_dofs) test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024) @@ -263,7 +263,7 @@ def _plot_acq_many_dofs( def _plot_valid_one_dof(agent, size=16, lw=1e0): agent.valid_fig, agent.valid_ax = plt.subplots(1, 1, figsize=(6, 4 * agent.n_objs), constrained_layout=True) - x_dof = agent.dofs[0] + x_dof = agent.dofs.subset(active=True)[0] x_values = agent.table.loc[:, x_dof.device.name].values test_inputs = agent.test_inputs_grid() @@ -280,7 +280,7 @@ def _plot_valid_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_CO if gridded is None: gridded = len(agent.dofs.subset(active=True, read_only=False)) == 2 - x_dof, y_dof = agent.dofs[axes[0]], agent.dofs[axes[1]] + x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]] # test_inputs has shape (..., 1, n_active_dofs) test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024)