diff --git a/bloptools/bayesian/plotting.py b/bloptools/bayesian/plotting.py index 523ff0f..cef4226 100644 --- a/bloptools/bayesian/plotting.py +++ b/bloptools/bayesian/plotting.py @@ -56,8 +56,14 @@ def _plot_objs_one_dof(agent, size=16, lw=1e0): def _plot_objs_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_COLORMAP, gridded=None, size=32, grid_zoom=1): + """ + Axes represents which active, non-read-only axes to plot with + """ + + plottable_dofs = agent.dofs.subset(active=True, read_only=False) + if gridded is None: - gridded = len(agent.dofs.subset(active=True, read_only=False)) == 2 + gridded = len(plottable_dofs) == 2 agent.obj_fig, agent.obj_axes = plt.subplots( len(agent.objectives), @@ -68,15 +74,16 @@ def _plot_objs_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL ) agent.obj_axes = np.atleast_2d(agent.obj_axes) - - x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]] + + x_dof, y_dof = plottable_dofs[axes[0]], plottable_dofs[axes[1]] x_values = agent.table.loc[:, x_dof.device.name].values y_values = agent.table.loc[:, y_dof.device.name].values # test_inputs has shape (*input_shape, 1, n_active_dofs) + # test_x and test_y should be squeezeable test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024) - test_x = test_inputs[..., 0, axes[0]].detach().numpy() - test_y = test_inputs[..., 0, axes[1]].detach().numpy() + test_x = test_inputs[..., 0, axes[0]].detach().squeeze().numpy() + test_y = test_inputs[..., 0, axes[1]].detach().squeeze().numpy() for obj_index, obj in enumerate(agent.objectives): obj_values = agent._get_objective_targets(obj_index) @@ -88,8 +95,8 @@ def _plot_objs_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL # mean and sigma will have shape (*input_shape,) test_posterior = obj.model.posterior(test_inputs) - test_mean = test_posterior.mean[..., 0, 0].detach().numpy() - test_sigma = test_posterior.variance.sqrt()[..., 0, 0].detach().numpy() + test_mean = test_posterior.mean[..., 0, 0].detach().squeeze().numpy() + test_sigma = test_posterior.variance.sqrt()[..., 0, 0].detach().squeeze().numpy() if gridded: _ = agent.obj_axes[obj_index, 1].pcolormesh( @@ -214,22 +221,24 @@ def _plot_acq_many_dofs( constrained_layout=True, ) + plottable_dofs = agent.dofs.subset(active=True, read_only=False) + if gridded is None: - gridded = len(agent.dofs.subset(active=True, read_only=False)) == 2 + gridded = len(plottable_dofs) == 2 agent.acq_axes = np.atleast_1d(agent.acq_axes) - x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]] + x_dof, y_dof = plottable_dofs[axes[0]], plottable_dofs[axes[1]] # test_inputs has shape (..., 1, n_active_dofs) test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024) - test_x = test_inputs[..., 0, axes[0]].detach().numpy() - test_y = test_inputs[..., 0, axes[1]].detach().numpy() + test_x = test_inputs[..., 0, axes[0]].detach().squeeze().numpy() + test_y = test_inputs[..., 0, axes[1]].detach().squeeze().numpy() for iacq_func, acq_func_identifier in enumerate(acq_funcs): acq_func, acq_func_meta = acquisition.get_acquisition_function(agent, acq_func_identifier) - test_acqf = acq_func(test_inputs).detach().numpy() + test_acqf = acq_func(test_inputs).detach().squeeze().numpy() if gridded: agent.acq_axes[iacq_func].set_title(acq_func_meta["name"]) @@ -277,17 +286,19 @@ def _plot_valid_one_dof(agent, size=16, lw=1e0): def _plot_valid_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_COLORMAP, size=16, gridded=None): agent.valid_fig, agent.valid_axes = plt.subplots(1, 2, figsize=(8, 4 * agent.n_objs), constrained_layout=True) + plottable_dofs = agent.dofs.subset(active=True, read_only=False) + if gridded is None: - gridded = len(agent.dofs.subset(active=True, read_only=False)) == 2 + gridded = len(plottable_dofs) == 2 - x_dof, y_dof = agent.dofs.subset(active=True)[axes[0]], agent.dofs.subset(active=True)[axes[1]] + x_dof, y_dof = plottable_dofs[axes[0]], plottable_dofs[axes[1]] # test_inputs has shape (..., 1, n_active_dofs) test_inputs = agent.test_inputs_grid() if gridded else agent.test_inputs(n=1024) - test_x = test_inputs[..., 0, axes[0]].detach().numpy() - test_y = test_inputs[..., 0, axes[1]].detach().numpy() + test_x = test_inputs[..., 0, axes[0]].detach().squeeze().numpy() + test_y = test_inputs[..., 0, axes[1]].detach().squeeze().numpy() - constraint = agent.constraint(test_inputs)[..., 0] + constraint = agent.constraint(test_inputs)[..., 0].squeeze().numpy() if gridded: _ = agent.valid_axes[1].pcolormesh( diff --git a/bloptools/functions.py b/bloptools/functions.py deleted file mode 100644 index 2d7eff3..0000000 --- a/bloptools/functions.py +++ /dev/null @@ -1,5 +0,0 @@ -import numpy as np - - -def sigmoid(x): - return 1 / (1 + np.exp(-x)) diff --git a/bloptools/tests/conftest.py b/bloptools/tests/conftest.py index 33b3fbc..a345227 100644 --- a/bloptools/tests/conftest.py +++ b/bloptools/tests/conftest.py @@ -8,8 +8,8 @@ from databroker import Broker from ophyd.utils import make_dir_tree -from bloptools import test_functions from bloptools.bayesian import DOF, Agent, Objective +from bloptools.utils import functions @pytest.fixture(scope="function") @@ -59,7 +59,7 @@ def agent(db): agent = Agent( dofs=dofs, objectives=objectives, - digestion=test_functions.constrained_himmelblau_digestion, + digestion=functions.constrained_himmelblau_digestion, db=db, verbose=True, tolerate_acquisition_errors=False, @@ -78,8 +78,8 @@ def digestion(db, uid): products = db[uid].table() for index, entry in products.iterrows(): - products.loc[index, "ST1"] = test_functions.styblinski_tang(entry.x1, entry.x2) - products.loc[index, "ST2"] = test_functions.styblinski_tang(entry.x1, -entry.x2) + products.loc[index, "ST1"] = functions.styblinski_tang(entry.x1, entry.x2) + products.loc[index, "ST2"] = functions.styblinski_tang(entry.x1, -entry.x2) return products diff --git a/bloptools/tests/test_passive_dofs.py b/bloptools/tests/test_passive_dofs.py index 732caec..3db1c06 100644 --- a/bloptools/tests/test_passive_dofs.py +++ b/bloptools/tests/test_passive_dofs.py @@ -1,7 +1,7 @@ import pytest -from bloptools import test_functions from bloptools.bayesian import DOF, Agent, BrownianMotion, Objective +from bloptools.utils import functions @pytest.mark.test_func @@ -9,8 +9,9 @@ def test_passive_dofs(RE, db): dofs = [ DOF(name="x1", limits=(-5.0, 5.0)), DOF(name="x2", limits=(-5.0, 5.0)), + DOF(name="x3", limits=(-5.0, 5.0), active=False), DOF(BrownianMotion(name="brownian1"), read_only=True), - DOF(BrownianMotion(name="brownian2"), read_only=True), + DOF(BrownianMotion(name="brownian2"), read_only=True, active=False), ] objectives = [ @@ -20,7 +21,7 @@ def test_passive_dofs(RE, db): agent = Agent( dofs=dofs, objectives=objectives, - digestion=test_functions.constrained_himmelblau_digestion, + digestion=functions.constrained_himmelblau_digestion, db=db, verbose=True, tolerate_acquisition_errors=False, diff --git a/bloptools/utils/__init__.py b/bloptools/utils/__init__.py index ca9e043..98e5c40 100644 --- a/bloptools/utils/__init__.py +++ b/bloptools/utils/__init__.py @@ -1,229 +1,2 @@ -import botorch -import numpy as np -import scipy as sp -import torch -from ortools.constraint_solver import pywrapcp, routing_enums_pb2 - from .functions import * # noqa F401 - - -def sobol_sampler(bounds, n, q=1): - """ - Returns $n$ quasi-randomly sampled points within the bounds (a 2 by d tensor) - and batch size $q$ - """ - return botorch.utils.sampling.draw_sobol_samples(bounds, n=n, q=q) - - -def normalized_sobol_sampler(n, d): - """ - Returns $n$ quasi-randomly sampled points in the [0,1]^d hypercube - """ - normalized_bounds = torch.outer(torch.tensor([0, 1]), torch.ones(d)) - return sobol_sampler(normalized_bounds, n=n, q=1) - - -def estimate_root_indices(x): - # or, indices_before_sign_changes - i_whole = np.where(np.sign(x[1:]) != np.sign(x[:-1]))[0] - i_part = 1 - x[i_whole + 1] / (x[i_whole + 1] - x[i_whole]) - return i_whole + i_part - - -def _fast_psd_inverse(M): - """ - About twice as fast as np.linalg.inv for large, PSD matrices. - """ - cholesky, dpotrf_info = sp.linalg.lapack.dpotrf(M) - invM, dpotri_info = sp.linalg.lapack.dpotri(cholesky) - return np.where(invM, invM, invM.T) - - -def mprod(*M): - res = M[0] - for m in M[1:]: - res = np.matmul(res, m) - return res - - -def route(start_point, points): - """ - Returns the indices of the most efficient way to visit `points`, starting from `start_point`. - """ - - total_points = np.r_[np.atleast_2d(start_point), points] - points_scale = total_points.ptp(axis=0) - dim_mask = points_scale > 0 - - if dim_mask.sum() == 0: - return np.arange(len(points)) - - normalized_points = (total_points - total_points.min(axis=0))[:, dim_mask] / points_scale[dim_mask] - - delay_matrix = np.sqrt(np.square(normalized_points[:, None, :] - normalized_points[None, :, :]).sum(axis=-1)) - delay_matrix = (1e4 * delay_matrix).astype(int) # it likes integers idk - - manager = pywrapcp.RoutingIndexManager(len(total_points), 1, 0) # number of depots, number of salesmen, starting index - routing = pywrapcp.RoutingModel(manager) - - def delay_callback(from_index, to_index): - to_node = manager.IndexToNode(to_index) - if to_node == 0: - return 0 # it is free to return to the depot from anywhere; we just won't do it - from_node = manager.IndexToNode(from_index) - return delay_matrix[from_node][to_node] - - transit_callback_index = routing.RegisterTransitCallback(delay_callback) - routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index) - search_parameters = pywrapcp.DefaultRoutingSearchParameters() - search_parameters.first_solution_strategy = routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC - - solution = routing.SolveWithParameters(search_parameters) - - index = routing.Start(0) - route_indices, route_delays = [0], [] - while not routing.IsEnd(index): - previous_index = index - index = solution.Value(routing.NextVar(index)) - route_delays.append(routing.GetArcCostForVehicle(previous_index, index, 0)) - route_indices.append(index) - - # omit the first and last indices, which correspond to the start - return np.array(route_indices)[1:-1] - 1 - - -def get_movement_time(x, v_max, a): - """ - How long does it take an object to go distance $x$ with acceleration $a$ and maximum velocity $v_max$? - That's what this function answers. - """ - return 2 * np.sqrt(np.abs(x) / a) * (np.abs(x) < v_max**2 / a) + (np.abs(x) / v_max + v_max / a) * ( - np.abs(x) > v_max**2 / a - ) - - -def get_principal_component_bounds(image, beam_prop=0.5): - """ - Returns the bounding box in pixel units of an image, along with a goodness of fit parameter. - This should go off without a hitch as long as beam_prop is less than 1. - """ - - if image.sum() == 0: - return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan - - # print(f'{extent = }') - - u, s, v = np.linalg.svd(image - image.mean()) - separability = np.square(s[0]) / np.square(s).sum() - - # q refers to "quantile" - q_min, q_max = 0.5 * (1 - beam_prop), 0.5 * (1 + beam_prop) - - # these represent the cumulative proportion of the beam, as captured by the SVD. - cs_beam_x = np.cumsum(v[0]) / np.sum(v[0]) - cs_beam_y = np.cumsum(u[:, 0]) / np.sum(u[:, 0]) - cs_beam_x[0], cs_beam_y[0] = 0, 0 # change this lol - - # the first coordinate where the cumulative beam is greater than the minimum - i_q_min_x = np.where((cs_beam_x[1:] > q_min) & (cs_beam_x[:-1] < q_min))[0][0] - i_q_min_y = np.where((cs_beam_y[1:] > q_min) & (cs_beam_y[:-1] < q_min))[0][0] - - # the last coordinate where the cumulative beam is less than the maximum - i_q_max_x = np.where((cs_beam_x[1:] > q_max) & (cs_beam_x[:-1] < q_max))[0][-1] - i_q_max_y = np.where((cs_beam_y[1:] > q_max) & (cs_beam_y[:-1] < q_max))[0][-1] - - # interpolate, so that we can go finer than one pixel. this quartet is the "bounding box", from 0 to 1. - # (let's make this more efficient later) - x_min = np.interp(q_min, cs_beam_x[[i_q_min_x, i_q_min_x + 1]], [i_q_min_x, i_q_min_x + 1]) - x_max = np.interp(q_max, cs_beam_x[[i_q_max_x, i_q_max_x + 1]], [i_q_max_x, i_q_max_x + 1]) - y_min = np.interp(q_min, cs_beam_y[[i_q_min_y, i_q_min_y + 1]], [i_q_min_y, i_q_min_y + 1]) - y_max = np.interp(q_max, cs_beam_y[[i_q_max_y, i_q_max_y + 1]], [i_q_max_y, i_q_max_y + 1]) - - return ( - x_min, - x_max, - y_min, - y_max, - separability, - ) - - -def get_beam_bounding_box(image, thresh=0.5): - """ - Returns the bounding box in pixel units of an image, along with a goodness of fit parameter. - This should go off without a hitch as long as beam_prop is less than 1. - """ - - n_y, n_x = image.shape - - if image.sum() == 0: - return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan - - # filter the image - zim = sp.ndimage.median_filter(image.astype(float), size=3) - zim -= np.median(zim, axis=0) - zim -= np.median(zim, axis=1)[:, None] - - x_sum = zim.sum(axis=0) - y_sum = zim.sum(axis=1) - - x_sum_min_val = thresh * x_sum.max() - y_sum_min_val = thresh * y_sum.max() - - gtt_x = x_sum > x_sum_min_val - gtt_y = y_sum > y_sum_min_val - - i_x_min_start = np.where(~gtt_x[:-1] & gtt_x[1:])[0][0] - i_x_max_start = np.where(gtt_x[:-1] & ~gtt_x[1:])[0][-1] - i_y_min_start = np.where(~gtt_y[:-1] & gtt_y[1:])[0][0] - i_y_max_start = np.where(gtt_y[:-1] & ~gtt_y[1:])[0][-1] - - x_min = ( - 0 - if gtt_x[0] - else np.interp(x_sum_min_val, x_sum[[i_x_min_start, i_x_min_start + 1]], [i_x_min_start, i_x_min_start + 1]) - ) - y_min = ( - 0 - if gtt_y[0] - else np.interp(y_sum_min_val, y_sum[[i_y_min_start, i_y_min_start + 1]], [i_y_min_start, i_y_min_start + 1]) - ) - x_max = ( - n_x - 2 - if gtt_x[-1] - else np.interp(x_sum_min_val, x_sum[[i_x_max_start + 1, i_x_max_start]], [i_x_max_start + 1, i_x_max_start]) - ) - y_max = ( - n_y - 2 - if gtt_y[-1] - else np.interp(y_sum_min_val, y_sum[[i_y_max_start + 1, i_y_max_start]], [i_y_max_start + 1, i_y_max_start]) - ) - - return ( - x_min, - x_max, - y_min, - y_max, - ) - - -def best_image_feedback(image): - n_y, n_x = image.shape - - fim = sp.ndimage.median_filter(image, size=3) - - masked_image = fim * (fim - fim.mean() > 0.5 * fim.ptp()) - - x_weight = masked_image.sum(axis=0) - y_weight = masked_image.sum(axis=1) - - x = np.arange(n_x) - y = np.arange(n_y) - - x0 = np.sum(x_weight * x) / np.sum(x_weight) - y0 = np.sum(y_weight * y) / np.sum(y_weight) - - xw = 2 * np.sqrt((np.sum(x_weight * x**2) / np.sum(x_weight) - x0**2)) - yw = 2 * np.sqrt((np.sum(y_weight * y**2) / np.sum(y_weight) - y0**2)) - - return x0, xw, y0, yw +from .misc import * # noqa F401 diff --git a/bloptools/utils/functions.py b/bloptools/utils/functions.py index 377cf16..b46426b 100644 --- a/bloptools/utils/functions.py +++ b/bloptools/utils/functions.py @@ -1,6 +1,10 @@ import numpy as np +def sigmoid(x): + return 1 / (1 + np.exp(-x)) + + def booth(x1, x2): """ The Booth function (https://en.wikipedia.org/wiki/Test_functions_for_optimization) diff --git a/bloptools/utils/misc.py b/bloptools/utils/misc.py new file mode 100644 index 0000000..6f3bd85 --- /dev/null +++ b/bloptools/utils/misc.py @@ -0,0 +1,227 @@ +import botorch +import numpy as np +import scipy as sp +import torch +from ortools.constraint_solver import pywrapcp, routing_enums_pb2 + + +def sobol_sampler(bounds, n, q=1): + """ + Returns $n$ quasi-randomly sampled points within the bounds (a 2 by d tensor) + and batch size $q$ + """ + return botorch.utils.sampling.draw_sobol_samples(bounds, n=n, q=q) + + +def normalized_sobol_sampler(n, d): + """ + Returns $n$ quasi-randomly sampled points in the [0,1]^d hypercube + """ + normalized_bounds = torch.outer(torch.tensor([0, 1]), torch.ones(d)) + return sobol_sampler(normalized_bounds, n=n, q=1) + + +def estimate_root_indices(x): + # or, indices_before_sign_changes + i_whole = np.where(np.sign(x[1:]) != np.sign(x[:-1]))[0] + i_part = 1 - x[i_whole + 1] / (x[i_whole + 1] - x[i_whole]) + return i_whole + i_part + + +def _fast_psd_inverse(M): + """ + About twice as fast as np.linalg.inv for large, PSD matrices. + """ + cholesky, dpotrf_info = sp.linalg.lapack.dpotrf(M) + invM, dpotri_info = sp.linalg.lapack.dpotri(cholesky) + return np.where(invM, invM, invM.T) + + +def mprod(*M): + res = M[0] + for m in M[1:]: + res = np.matmul(res, m) + return res + + +def route(start_point, points): + """ + Returns the indices of the most efficient way to visit `points`, starting from `start_point`. + """ + + total_points = np.r_[np.atleast_2d(start_point), points] + points_scale = total_points.ptp(axis=0) + dim_mask = points_scale > 0 + + if dim_mask.sum() == 0: + return np.arange(len(points)) + + normalized_points = (total_points - total_points.min(axis=0))[:, dim_mask] / points_scale[dim_mask] + + delay_matrix = np.sqrt(np.square(normalized_points[:, None, :] - normalized_points[None, :, :]).sum(axis=-1)) + delay_matrix = (1e4 * delay_matrix).astype(int) # it likes integers idk + + manager = pywrapcp.RoutingIndexManager(len(total_points), 1, 0) # number of depots, number of salesmen, starting index + routing = pywrapcp.RoutingModel(manager) + + def delay_callback(from_index, to_index): + to_node = manager.IndexToNode(to_index) + if to_node == 0: + return 0 # it is free to return to the depot from anywhere; we just won't do it + from_node = manager.IndexToNode(from_index) + return delay_matrix[from_node][to_node] + + transit_callback_index = routing.RegisterTransitCallback(delay_callback) + routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index) + search_parameters = pywrapcp.DefaultRoutingSearchParameters() + search_parameters.first_solution_strategy = routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC + + solution = routing.SolveWithParameters(search_parameters) + + index = routing.Start(0) + route_indices, route_delays = [0], [] + while not routing.IsEnd(index): + previous_index = index + index = solution.Value(routing.NextVar(index)) + route_delays.append(routing.GetArcCostForVehicle(previous_index, index, 0)) + route_indices.append(index) + + # omit the first and last indices, which correspond to the start + return np.array(route_indices)[1:-1] - 1 + + +def get_movement_time(x, v_max, a): + """ + How long does it take an object to go distance $x$ with acceleration $a$ and maximum velocity $v_max$? + That's what this function answers. + """ + return 2 * np.sqrt(np.abs(x) / a) * (np.abs(x) < v_max**2 / a) + (np.abs(x) / v_max + v_max / a) * ( + np.abs(x) > v_max**2 / a + ) + + +def get_principal_component_bounds(image, beam_prop=0.5): + """ + Returns the bounding box in pixel units of an image, along with a goodness of fit parameter. + This should go off without a hitch as long as beam_prop is less than 1. + """ + + if image.sum() == 0: + return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan + + # print(f'{extent = }') + + u, s, v = np.linalg.svd(image - image.mean()) + separability = np.square(s[0]) / np.square(s).sum() + + # q refers to "quantile" + q_min, q_max = 0.5 * (1 - beam_prop), 0.5 * (1 + beam_prop) + + # these represent the cumulative proportion of the beam, as captured by the SVD. + cs_beam_x = np.cumsum(v[0]) / np.sum(v[0]) + cs_beam_y = np.cumsum(u[:, 0]) / np.sum(u[:, 0]) + cs_beam_x[0], cs_beam_y[0] = 0, 0 # change this lol + + # the first coordinate where the cumulative beam is greater than the minimum + i_q_min_x = np.where((cs_beam_x[1:] > q_min) & (cs_beam_x[:-1] < q_min))[0][0] + i_q_min_y = np.where((cs_beam_y[1:] > q_min) & (cs_beam_y[:-1] < q_min))[0][0] + + # the last coordinate where the cumulative beam is less than the maximum + i_q_max_x = np.where((cs_beam_x[1:] > q_max) & (cs_beam_x[:-1] < q_max))[0][-1] + i_q_max_y = np.where((cs_beam_y[1:] > q_max) & (cs_beam_y[:-1] < q_max))[0][-1] + + # interpolate, so that we can go finer than one pixel. this quartet is the "bounding box", from 0 to 1. + # (let's make this more efficient later) + x_min = np.interp(q_min, cs_beam_x[[i_q_min_x, i_q_min_x + 1]], [i_q_min_x, i_q_min_x + 1]) + x_max = np.interp(q_max, cs_beam_x[[i_q_max_x, i_q_max_x + 1]], [i_q_max_x, i_q_max_x + 1]) + y_min = np.interp(q_min, cs_beam_y[[i_q_min_y, i_q_min_y + 1]], [i_q_min_y, i_q_min_y + 1]) + y_max = np.interp(q_max, cs_beam_y[[i_q_max_y, i_q_max_y + 1]], [i_q_max_y, i_q_max_y + 1]) + + return ( + x_min, + x_max, + y_min, + y_max, + separability, + ) + + +def get_beam_bounding_box(image, thresh=0.5): + """ + Returns the bounding box in pixel units of an image, along with a goodness of fit parameter. + This should go off without a hitch as long as beam_prop is less than 1. + """ + + n_y, n_x = image.shape + + if image.sum() == 0: + return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan + + # filter the image + zim = sp.ndimage.median_filter(image.astype(float), size=3) + zim -= np.median(zim, axis=0) + zim -= np.median(zim, axis=1)[:, None] + + x_sum = zim.sum(axis=0) + y_sum = zim.sum(axis=1) + + x_sum_min_val = thresh * x_sum.max() + y_sum_min_val = thresh * y_sum.max() + + gtt_x = x_sum > x_sum_min_val + gtt_y = y_sum > y_sum_min_val + + i_x_min_start = np.where(~gtt_x[:-1] & gtt_x[1:])[0][0] + i_x_max_start = np.where(gtt_x[:-1] & ~gtt_x[1:])[0][-1] + i_y_min_start = np.where(~gtt_y[:-1] & gtt_y[1:])[0][0] + i_y_max_start = np.where(gtt_y[:-1] & ~gtt_y[1:])[0][-1] + + x_min = ( + 0 + if gtt_x[0] + else np.interp(x_sum_min_val, x_sum[[i_x_min_start, i_x_min_start + 1]], [i_x_min_start, i_x_min_start + 1]) + ) + y_min = ( + 0 + if gtt_y[0] + else np.interp(y_sum_min_val, y_sum[[i_y_min_start, i_y_min_start + 1]], [i_y_min_start, i_y_min_start + 1]) + ) + x_max = ( + n_x - 2 + if gtt_x[-1] + else np.interp(x_sum_min_val, x_sum[[i_x_max_start + 1, i_x_max_start]], [i_x_max_start + 1, i_x_max_start]) + ) + y_max = ( + n_y - 2 + if gtt_y[-1] + else np.interp(y_sum_min_val, y_sum[[i_y_max_start + 1, i_y_max_start]], [i_y_max_start + 1, i_y_max_start]) + ) + + return ( + x_min, + x_max, + y_min, + y_max, + ) + + +def best_image_feedback(image): + n_y, n_x = image.shape + + fim = sp.ndimage.median_filter(image, size=3) + + masked_image = fim * (fim - fim.mean() > 0.5 * fim.ptp()) + + x_weight = masked_image.sum(axis=0) + y_weight = masked_image.sum(axis=1) + + x = np.arange(n_x) + y = np.arange(n_y) + + x0 = np.sum(x_weight * x) / np.sum(x_weight) + y0 = np.sum(y_weight * y) / np.sum(y_weight) + + xw = 2 * np.sqrt((np.sum(x_weight * x**2) / np.sum(x_weight) - x0**2)) + yw = 2 * np.sqrt((np.sum(y_weight * y**2) / np.sum(y_weight) - y0**2)) + + return x0, xw, y0, yw diff --git a/docs/source/tutorials/Untitled.ipynb b/docs/source/tutorials/Untitled.ipynb new file mode 100644 index 0000000..d833089 --- /dev/null +++ b/docs/source/tutorials/Untitled.ipynb @@ -0,0 +1,72 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "9b72e238-f9c5-44ff-b519-63cdbfc8db3b", + "metadata": {}, + "outputs": [], + "source": [ + "from bloptools.utils import functions\n", + "from bloptools.bayesian import DOF, Agent, BrownianMotion, Objective\n", + "\n", + "\n", + "dofs = [\n", + " DOF(name=\"x1\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x2\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x3\", limits=(-5.0, 5.0), active=False),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True, active=False),\n", + "]\n", + "\n", + "objectives = [\n", + " Objective(key=\"himmelblau\", minimize=True),\n", + "]\n", + "\n", + "agent = Agent(\n", + " dofs=dofs,\n", + " objectives=objectives,\n", + " digestion=functions.constrained_himmelblau_digestion,\n", + " db=db,\n", + " verbose=True,\n", + " tolerate_acquisition_errors=False,\n", + ")\n", + "\n", + "RE(agent.learn(\"qr\", n=32))\n", + "\n", + "agent.plot_objectives()\n", + "agent.plot_acquisition()\n", + "agent.plot_validity()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b54b7c0-f1b2-4544-84fb-b581fb93f5d9", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/source/tutorials/Untitled1.ipynb b/docs/source/tutorials/Untitled1.ipynb new file mode 100644 index 0000000..60a6f5b --- /dev/null +++ b/docs/source/tutorials/Untitled1.ipynb @@ -0,0 +1,92 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "bb7b2eff-7c38-4a50-9a0f-9c27bd7cfc15", + "metadata": {}, + "outputs": [], + "source": [ + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", + "from bloptools.utils import functions\n", + "from bloptools.bayesian import DOF, Agent, BrownianMotion, Objective" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5e55ada4-bbab-48d0-8ba0-5c9b4880b363", + "metadata": {}, + "outputs": [], + "source": [ + "dofs = [\n", + " DOF(name=\"x1\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x2\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x3\", limits=(-5.0, 5.0), active=False),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True, active=False),\n", + "]\n", + "\n", + "objectives = [\n", + " Objective(key=\"himmelblau\", minimize=True),\n", + "]\n", + "\n", + "agent = Agent(\n", + " dofs=dofs,\n", + " objectives=objectives,\n", + " digestion=functions.constrained_himmelblau_digestion,\n", + " db=db,\n", + " verbose=True,\n", + " tolerate_acquisition_errors=False,\n", + ")\n", + "\n", + "RE(agent.learn(\"qr\", n=32))\n", + "\n", + "agent.plot_objectives()\n", + "agent.plot_acquisition()\n", + "agent.plot_validity()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6793103c-684d-4e22-9ccd-ef0306cd80bf", + "metadata": {}, + "outputs": [], + "source": [ + "%debug" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63d9e7b5-03a3-4893-80ac-0611a4ab26e5", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/source/tutorials/himmelblau.ipynb b/docs/source/tutorials/himmelblau.ipynb index d42c32a..54fa840 100644 --- a/docs/source/tutorials/himmelblau.ipynb +++ b/docs/source/tutorials/himmelblau.ipynb @@ -41,12 +41,12 @@ "import numpy as np\n", "import matplotlib as mpl\n", "from matplotlib import pyplot as plt\n", - "from bloptools import test_functions\n", + "from bloptools.utils import functions\n", "\n", "x1 = x2 = np.linspace(-6, 6, 1024)\n", "X1, X2 = np.meshgrid(x1, x2)\n", "\n", - "F = test_functions.himmelblau(X1, X2)\n", + "F = functions.himmelblau(X1, X2)\n", "\n", "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(vmin=1e-1, vmax=1e3), cmap=\"magma_r\")\n", "plt.colorbar()\n", @@ -119,7 +119,7 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"himmelblau\"] = test_functions.himmelblau(entry.x1, entry.x2)\n", + " products.loc[index, \"himmelblau\"] = functions.himmelblau(entry.x1, entry.x2)\n", "\n", " return products" ] diff --git a/docs/source/tutorials/hyperparameters.ipynb b/docs/source/tutorials/hyperparameters.ipynb index 29bda11..e8c42b9 100644 --- a/docs/source/tutorials/hyperparameters.ipynb +++ b/docs/source/tutorials/hyperparameters.ipynb @@ -21,12 +21,12 @@ "import numpy as np\n", "import matplotlib as mpl\n", "from matplotlib import pyplot as plt\n", - "from bloptools import test_functions\n", + "from bloptools.utils import functions\n", "\n", "x1 = x2 = np.linspace(-10, 10, 256)\n", "X1, X2 = np.meshgrid(x1, x2)\n", "\n", - "F = test_functions.booth(X1, X2)\n", + "F = functions.booth(X1, X2)\n", "\n", "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(), shading=\"auto\")\n", "plt.colorbar()\n", @@ -54,7 +54,7 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"booth\"] = test_functions.booth(entry.x1, entry.x2)\n", + " products.loc[index, \"booth\"] = functions.booth(entry.x1, entry.x2)\n", "\n", " return products" ] diff --git a/docs/source/tutorials/passive-dofs.ipynb b/docs/source/tutorials/passive-dofs.ipynb index 1de0900..3f88734 100644 --- a/docs/source/tutorials/passive-dofs.ipynb +++ b/docs/source/tutorials/passive-dofs.ipynb @@ -38,24 +38,40 @@ "metadata": {}, "outputs": [], "source": [ - "from bloptools.utils import prepare_re_env\n", - "\n", - "%run -i $prepare_re_env.__file__ --db-type=temp\n", + "import pytest\n", "\n", - "from bloptools import devices, test_functions\n", - "from bloptools.bayesian import Agent\n", + "from bloptools.utils import functions\n", + "from bloptools.bayesian import DOF, Agent, BrownianMotion, Objective\n", "\n", "\n", - "def digestion(db, uid):\n", - " products = db[uid].table()\n", + "@pytest.mark.test_func\n", + "def test_passive_dofs(RE, db):\n", + " dofs = [\n", + " DOF(name=\"x1\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x2\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x3\", limits=(-5.0, 5.0), active=False),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True, active=False),\n", + " ]\n", "\n", - " for index, entry in products.iterrows():\n", - " products.loc[index, \"styblinksi-tang\"] = test_functions.styblinski_tang(entry.x1 - 1e-1 * entry.brownian)\n", + " objectives = [\n", + " Objective(key=\"himmelblau\", minimize=True),\n", + " ]\n", "\n", - " return products\n", + " agent = Agent(\n", + " dofs=dofs,\n", + " objectives=objectives,\n", + " digestion=functions.constrained_himmelblau_digestion,\n", + " db=db,\n", + " verbose=True,\n", + " tolerate_acquisition_errors=False,\n", + " )\n", "\n", + " RE(agent.learn(\"qr\", n=32))\n", "\n", - "\n" + " agent.plot_objectives()\n", + " agent.plot_acquisition()\n", + " agent.plot_validity()" ] }, { diff --git a/docs/wip/constrained-himmelblau copy.ipynb b/docs/wip/constrained-himmelblau copy.ipynb index c566dda..0c8a7b3 100644 --- a/docs/wip/constrained-himmelblau copy.ipynb +++ b/docs/wip/constrained-himmelblau copy.ipynb @@ -29,14 +29,14 @@ "import numpy as np\n", "import matplotlib as mpl\n", "from matplotlib import pyplot as plt\n", - "from bloptools import test_functions\n", + "from bloptools.utils import functions\n", "\n", "x1 = x2 = np.linspace(-8, 8, 256)\n", "X1, X2 = np.meshgrid(x1, x2)\n", "from bloptools.tasks import Task\n", "\n", "task = Task(key=\"himmelblau\", kind=\"min\")\n", - "F = test_functions.constrained_himmelblau(X1, X2)\n", + "F = functions.constrained_himmelblau(X1, X2)\n", "\n", "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(), cmap=\"gnuplot\")\n", "plt.colorbar()\n", @@ -107,7 +107,7 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"himmelblau\"] = test_functions.constrained_himmelblau(entry.x1, entry.x2)\n", + " products.loc[index, \"himmelblau\"] = functions.constrained_himmelblau(entry.x1, entry.x2)\n", "\n", " return products" ] diff --git a/docs/wip/introduction.ipynb b/docs/wip/introduction.ipynb index 709af97..aede30e 100644 --- a/docs/wip/introduction.ipynb +++ b/docs/wip/introduction.ipynb @@ -31,11 +31,11 @@ "import matplotlib as mpl\n", "from matplotlib import pyplot as plt\n", "\n", - "from bloptools import test_functions\n", + "from bloptools.utils import functions\n", "\n", "x = np.linspace(-5, 5, 256)\n", "\n", - "plt.plot(x, test_functions.styblinski_tang(x), c=\"b\")\n", + "plt.plot(x, functions.styblinski_tang(x), c=\"b\")\n", "plt.xlim(-5, 5)" ] }, @@ -95,7 +95,7 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"styblinski-tang\"] = test_functions.styblinski_tang(entry.x)\n", + " products.loc[index, \"styblinski-tang\"] = functions.styblinski_tang(entry.x)\n", "\n", " return products" ]