From bbdf4ab32efed70ff753a567ba6df6e467812fa9 Mon Sep 17 00:00:00 2001 From: Thomas Morris Date: Tue, 24 Oct 2023 13:57:41 -0400 Subject: [PATCH] fixed tutorial notebooks --- docs/source/tutorials/himmelblau.ipynb | 144 ++++++++++++++++---- docs/source/tutorials/hyperparameters.ipynb | 26 ++-- docs/source/tutorials/passive-dofs.ipynb | 75 ++++++---- 3 files changed, 176 insertions(+), 69 deletions(-) diff --git a/docs/source/tutorials/himmelblau.ipynb b/docs/source/tutorials/himmelblau.ipynb index 1a93aa7..54fa840 100644 --- a/docs/source/tutorials/himmelblau.ipynb +++ b/docs/source/tutorials/himmelblau.ipynb @@ -19,6 +19,18 @@ "Let's use ``bloptools`` to minimize Himmelblau's function, which has four global minima:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf27fc9e-d11c-40f4-a200-98e7814f506b", + "metadata": {}, + "outputs": [], + "source": [ + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp" + ] + }, { "cell_type": "code", "execution_count": null, @@ -29,16 +41,14 @@ "import numpy as np\n", "import matplotlib as mpl\n", "from matplotlib import pyplot as plt\n", - "from bloptools import test_functions\n", + "from bloptools.utils import functions\n", "\n", "x1 = x2 = np.linspace(-6, 6, 1024)\n", "X1, X2 = np.meshgrid(x1, x2)\n", - "from bloptools.tasks import Task\n", "\n", - "task = Task(key=\"himmelblau\", kind=\"min\")\n", - "F = test_functions.himmelblau(X1, X2)\n", + "F = functions.himmelblau(X1, X2)\n", "\n", - "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm())\n", + "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(vmin=1e-1, vmax=1e3), cmap=\"magma_r\")\n", "plt.colorbar()\n", "plt.xlabel(\"x1\")\n", "plt.ylabel(\"x2\")" @@ -59,11 +69,13 @@ "metadata": {}, "outputs": [], "source": [ - "from bloptools import devices\n", + "from bloptools.bayesian import DOF, BrownianMotion\n", "\n", "dofs = [\n", - " {\"device\": devices.DOF(name=\"x1\"), \"limits\": (-6, 6), \"kind\": \"active\"},\n", - " {\"device\": devices.DOF(name=\"x2\"), \"limits\": (-6, 6), \"kind\": \"active\"},\n", + " DOF(name=\"x1\", limits=(-6, 6)),\n", + " DOF(name=\"x2\", limits=(-6, 6)),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True),\n", "]" ] }, @@ -82,9 +94,9 @@ "metadata": {}, "outputs": [], "source": [ - "tasks = [\n", - " {\"key\": \"himmelblau\", \"kind\": \"minimize\"},\n", - "]" + "from bloptools.bayesian import Objective\n", + "\n", + "objectives = [Objective(key=\"himmelblau\", minimize=True)]" ] }, { @@ -107,7 +119,7 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"himmelblau\"] = test_functions.himmelblau(entry.x1, entry.x2)\n", + " products.loc[index, \"himmelblau\"] = functions.himmelblau(entry.x1, entry.x2)\n", "\n", " return products" ] @@ -130,54 +142,62 @@ }, "outputs": [], "source": [ - "from bloptools.utils import prepare_re_env\n", - "\n", - "%run -i $prepare_re_env.__file__ --db-type=temp\n", "from bloptools.bayesian import Agent\n", "\n", + "\n", "agent = Agent(\n", " dofs=dofs,\n", - " tasks=tasks,\n", + " objectives=objectives,\n", " digestion=digestion,\n", " db=db,\n", ")" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e964d5a5-2a4a-4403-8c06-4ad17b00cecf", + "metadata": {}, + "outputs": [], + "source": [ + "agent.test_inputs_grid().shape" + ] + }, { "cell_type": "markdown", - "id": "b7a608d6", + "id": "27685849", "metadata": {}, "source": [ - "To decide which points to sample, the agent needs an acquisition function. The available acquisition function are here:" + "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. Let's start by quasi-randomly sampling the parameter space, and plotting our model of the function:" ] }, { "cell_type": "code", "execution_count": null, - "id": "fb06739b", + "id": "996da937", "metadata": {}, "outputs": [], "source": [ - "agent.acq_func_info" + "RE(agent.learn(\"quasi-random\", n=32))\n", + "agent.plot_objectives()" ] }, { "cell_type": "markdown", - "id": "27685849", + "id": "dc264346-10fb-4c88-9925-4bfcf0dd3b07", "metadata": {}, "source": [ - "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. Let's start by quasi-randomly sampling the parameter space, and plotting our model of the function:" + "To decide which points to sample, the agent needs an acquisition function. The available acquisition function are here:" ] }, { "cell_type": "code", "execution_count": null, - "id": "996da937", + "id": "fb06739b", "metadata": {}, "outputs": [], "source": [ - "RE(agent.learn(\"quasi-random\", n=32))\n", - "agent.plot_tasks()" + "agent.acq_func_info" ] }, { @@ -196,9 +216,43 @@ "metadata": {}, "outputs": [], "source": [ - "agent.plot_acquisition(acq_funcs=[\"qei\", \"qpi\", \"ucb\"])" + "agent.plot_acquisition(acq_funcs=[\"qei\", \"pi\", \"qucb\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c0b42dc-2df3-4ba6-b02f-569dab48db80", + "metadata": {}, + "outputs": [], + "source": [ + "agent.dofs.limits" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "62b24d9b-740f-45a6-9617-47796c260273", + "metadata": {}, + "outputs": [], + "source": [ + "self = agent\n", + "import torch\n", + "\n", + "acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in self.dofs]\n", + "acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in self.dofs]\n", + "\n", + "torch.tensor(np.vstack([acq_func_lower_bounds, acq_func_upper_bounds]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "217158c0-aa65-409c-a7ab-63bb924723ad", + "metadata": {}, + "outputs": [], + "source": [] + }, { "attachments": {}, "cell_type": "markdown", @@ -208,6 +262,24 @@ "To decide where to go, the agent will find the inputs that maximize a given acquisition function:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "16ec3c97-211b-49df-9e45-fcdd61ae98eb", + "metadata": {}, + "outputs": [], + "source": [ + "agent.acquisition_function_bounds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a066da53-0cdc-429b-a588-ce22b4a599b5", + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, @@ -238,9 +310,13 @@ }, "outputs": [], "source": [ - "X, _ = agent.ask(\"qei\", n=8)\n", + "X, _ = agent.ask(\"qei\", n=8, route=True)\n", "agent.plot_acquisition(acq_funcs=[\"qei\"])\n", - "plt.plot(*X.T, lw=5e-1, c=\"r\", marker=\"x\")" + "plt.scatter(*X.T, marker=\"d\", facecolor=\"w\", edgecolor=\"k\")\n", + "plt.plot(\n", + " *X.T,\n", + " color=\"r\",\n", + ")" ] }, { @@ -276,9 +352,17 @@ "metadata": {}, "outputs": [], "source": [ - "agent.plot_tasks()\n", - "print(agent.best_inputs)" + "agent.plot_objectives()\n", + "# print(agent.best_inputs)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6453b87-f864-40af-ba70-9a42960f54b9", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/docs/source/tutorials/hyperparameters.ipynb b/docs/source/tutorials/hyperparameters.ipynb index e4026e1..e47c05b 100644 --- a/docs/source/tutorials/hyperparameters.ipynb +++ b/docs/source/tutorials/hyperparameters.ipynb @@ -21,12 +21,12 @@ "import numpy as np\n", "import matplotlib as mpl\n", "from matplotlib import pyplot as plt\n", - "from bloptools import test_functions\n", + "from bloptools.utils import functions\n", "\n", "x1 = x2 = np.linspace(-10, 10, 256)\n", "X1, X2 = np.meshgrid(x1, x2)\n", "\n", - "F = test_functions.booth(X1, X2)\n", + "F = functions.booth(X1, X2)\n", "\n", "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(), shading=\"auto\")\n", "plt.colorbar()\n", @@ -54,7 +54,7 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"booth\"] = test_functions.booth(entry.x1, entry.x2)\n", + " products.loc[index, \"booth\"] = functions.booth(entry.x1, entry.x2)\n", "\n", " return products" ] @@ -72,28 +72,28 @@ "\n", "%run -i $prepare_re_env.__file__ --db-type=temp\n", "\n", - "from bloptools import devices\n", - "from bloptools.bayesian import Agent\n", + "from bloptools.bayesian import DOF, Objective, Agent\n", "\n", "dofs = [\n", - " {\"device\": devices.DOF(name=\"x1\"), \"limits\": (-5, 5), \"kind\": \"active\"},\n", - " {\"device\": devices.DOF(name=\"x2\"), \"limits\": (-5, 5), \"kind\": \"active\"},\n", + " DOF(name=\"x1\", limits=(-6, 6)),\n", + " DOF(name=\"x2\", limits=(-6, 6)),\n", "]\n", "\n", - "tasks = [\n", - " {\"key\": \"booth\", \"kind\": \"minimize\"},\n", + "objectives = [\n", + " Objective(key=\"booth\", minimize=True),\n", "]\n", "\n", + "\n", "agent = Agent(\n", " dofs=dofs,\n", - " tasks=tasks,\n", + " objectives=objectives,\n", " digestion=digestion,\n", " db=db,\n", ")\n", "\n", "RE(agent.learn(acq_func=\"qr\", n=16))\n", "\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] }, { @@ -125,13 +125,13 @@ "outputs": [], "source": [ "RE(agent.learn(\"qei\", n=4, iterations=4))\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3.11.4 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/docs/source/tutorials/passive-dofs.ipynb b/docs/source/tutorials/passive-dofs.ipynb index 363a2a0..aa257f2 100644 --- a/docs/source/tutorials/passive-dofs.ipynb +++ b/docs/source/tutorials/passive-dofs.ipynb @@ -28,40 +28,63 @@ "source": [ "from bloptools.utils import prepare_re_env\n", "\n", - "%run -i $prepare_re_env.__file__ --db-type=temp\n", - "\n", - "from bloptools import devices, test_functions\n", - "from bloptools.bayesian import Agent\n", - "\n", - "\n", - "def digestion(db, uid):\n", - " products = db[uid].table()\n", + "%run -i $prepare_re_env.__file__ --db-type=temp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4cf5dbd1-e404-4504-b822-3956ca61ef74", + "metadata": {}, + "outputs": [], + "source": [ + "from bloptools.utils import prepare_re_env\n", "\n", - " for index, entry in products.iterrows():\n", - " products.loc[index, \"styblinksi-tang\"] = test_functions.styblinski_tang(entry.x - 1e-1 * entry.brownian)\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", + "import pytest\n", "\n", - " return products\n", + "from bloptools.utils import functions\n", + "from bloptools.bayesian import DOF, Agent, BrownianMotion, Objective\n", "\n", "\n", - "dofs = [\n", - " {\"device\": devices.DOF(name=\"x\"), \"limits\": (-5, 5), \"kind\": \"active\"},\n", - " {\"device\": devices.BrownianMotion(name=\"brownian\"), \"limits\": (-2, 2), \"kind\": \"passive\"},\n", - "]\n", + "@pytest.mark.test_func\n", + "def test_passive_dofs(RE, db):\n", + " dofs = [\n", + " DOF(name=\"x1\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x2\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x3\", limits=(-5.0, 5.0), active=False),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True, active=False),\n", + " ]\n", "\n", - "tasks = [\n", - " {\"key\": \"styblinksi-tang\", \"kind\": \"minimize\"},\n", - "]\n", + " objectives = [\n", + " Objective(key=\"himmelblau\", minimize=True),\n", + " ]\n", "\n", - "agent = Agent(\n", - " dofs=dofs,\n", - " tasks=tasks,\n", - " digestion=digestion,\n", - " db=db,\n", - ")\n", + " agent = Agent(\n", + " dofs=dofs,\n", + " objectives=objectives,\n", + " digestion=functions.constrained_himmelblau_digestion,\n", + " db=db,\n", + " verbose=True,\n", + " tolerate_acquisition_errors=False,\n", + " )\n", "\n", - "RE(agent.learn(\"qr\", n=32))\n", + " RE(agent.learn(\"qr\", n=32))\n", "\n", - "agent.plot_tasks()" + " agent.plot_objectives()\n", + " agent.plot_acquisition()\n", + " agent.plot_validity()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "990a877e-f533-419c-bf5d-569ad7e72c6b", + "metadata": {}, + "outputs": [], + "source": [ + "agent.plot_objectives()" ] } ],