ennbo 0.1.1__tar.gz → 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {ennbo-0.1.1 → ennbo-0.1.2}/PKG-INFO +4 -3
  2. {ennbo-0.1.1 → ennbo-0.1.2}/README.md +2 -1
  3. {ennbo-0.1.1 → ennbo-0.1.2}/examples/demo_enn.ipynb +8 -0
  4. ennbo-0.1.2/examples/demo_morbo_enn.ipynb +253 -0
  5. ennbo-0.1.2/examples/demo_turbo_enn.ipynb +246 -0
  6. {ennbo-0.1.1 → ennbo-0.1.2}/pyproject.toml +2 -2
  7. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/enn/enn.py +71 -31
  8. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/enn/enn_fit.py +26 -24
  9. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/enn/enn_normal.py +3 -2
  10. ennbo-0.1.2/src/enn/enn/enn_params.py +23 -0
  11. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/enn/enn_util.py +40 -12
  12. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/base_turbo_impl.py +53 -7
  13. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/lhd_only_impl.py +7 -0
  14. ennbo-0.1.2/src/enn/turbo/morbo_trust_region.py +189 -0
  15. ennbo-0.1.2/src/enn/turbo/no_trust_region.py +65 -0
  16. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/proposal.py +11 -2
  17. ennbo-0.1.2/src/enn/turbo/turbo_config.py +72 -0
  18. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_enn_impl.py +46 -21
  19. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_gp.py +9 -1
  20. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_mode_impl.py +11 -2
  21. ennbo-0.1.2/src/enn/turbo/turbo_one_impl.py +302 -0
  22. ennbo-0.1.2/src/enn/turbo/turbo_optimizer.py +525 -0
  23. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_trust_region.py +8 -10
  24. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_utils.py +116 -26
  25. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_zero_impl.py +5 -0
  26. {ennbo-0.1.1 → ennbo-0.1.2}/tests/test_enn_core.py +81 -18
  27. {ennbo-0.1.1 → ennbo-0.1.2}/tests/test_enn_fit.py +62 -6
  28. {ennbo-0.1.1 → ennbo-0.1.2}/tests/test_enn_util.py +30 -4
  29. {ennbo-0.1.1 → ennbo-0.1.2}/tests/test_turbo.py +417 -60
  30. ennbo-0.1.1/examples/demo_morbo.ipynb +0 -320
  31. ennbo-0.1.1/examples/demo_turbo_enn.ipynb +0 -299
  32. ennbo-0.1.1/src/enn/enn/enn_params.py +0 -10
  33. ennbo-0.1.1/src/enn/turbo/turbo_config.py +0 -28
  34. ennbo-0.1.1/src/enn/turbo/turbo_one_impl.py +0 -163
  35. ennbo-0.1.1/src/enn/turbo/turbo_optimizer.py +0 -337
  36. {ennbo-0.1.1 → ennbo-0.1.2}/.gitignore +0 -0
  37. {ennbo-0.1.1 → ennbo-0.1.2}/.pre-commit-config.yaml +0 -0
  38. {ennbo-0.1.1 → ennbo-0.1.2}/LICENSE +0 -0
  39. {ennbo-0.1.1 → ennbo-0.1.2}/admin/conda-macos.yml +0 -0
  40. {ennbo-0.1.1 → ennbo-0.1.2}/admin/find_forgotten_py.sh +0 -0
  41. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/__init__.py +0 -0
  42. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/enn/__init__.py +0 -0
  43. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/__init__.py +0 -0
  44. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_gp_base.py +0 -0
  45. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_gp_noisy.py +0 -0
  46. {ennbo-0.1.1 → ennbo-0.1.2}/src/enn/turbo/turbo_mode.py +0 -0
  47. {ennbo-0.1.1 → ennbo-0.1.2}/tests/conftest.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ennbo
3
- Version: 0.1.1
3
+ Version: 0.1.2
4
4
  Summary: Epistemic Nearest Neighbors
5
5
  Project-URL: Homepage, https://github.com/yubo-research/enn
6
6
  Project-URL: Source, https://github.com/yubo-research/enn
@@ -39,7 +39,7 @@ Requires-Python: >=3.11
39
39
  Requires-Dist: faiss-cpu>=1.9.0
40
40
  Requires-Dist: gpytorch==1.13
41
41
  Requires-Dist: nds==0.4.3
42
- Requires-Dist: numpy==1.26.4
42
+ Requires-Dist: numpy<2.0.0,>=1.26.4
43
43
  Requires-Dist: scipy==1.15.3
44
44
  Requires-Dist: torch==2.5.1
45
45
  Description-Content-Type: text/markdown
@@ -80,9 +80,10 @@ On my MacBook I can run into problems with dependencies and compatibilities.
80
80
 
81
81
  On MacOS try:
82
82
  ```
83
- micromamba env create -n ennbo -f conda-macos.yml
83
+ micromamba env create -n ennbo -f admin/conda-macos.yml
84
84
  micromamba activate ennbo
85
85
  pip install --no-deps ennbo
86
+ pytest -sv tests
86
87
  ```
87
88
 
88
89
  You may replace `micromamba` with `conda` and this will probably still work.
@@ -34,9 +34,10 @@ On my MacBook I can run into problems with dependencies and compatibilities.
34
34
 
35
35
  On MacOS try:
36
36
  ```
37
- micromamba env create -n ennbo -f conda-macos.yml
37
+ micromamba env create -n ennbo -f admin/conda-macos.yml
38
38
  micromamba activate ennbo
39
39
  pip install --no-deps ennbo
40
+ pytest -sv tests
40
41
  ```
41
42
 
42
43
  You may replace `micromamba` with `conda` and this will probably still work.
@@ -114,6 +114,14 @@
114
114
  "ax.set_ylabel(\"y\")\n",
115
115
  "fig.tight_layout()"
116
116
  ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "id": "7abb3c1a",
122
+ "metadata": {},
123
+ "outputs": [],
124
+ "source": []
117
125
  }
118
126
  ],
119
127
  "metadata": {
@@ -0,0 +1,253 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# MORBO-ENN: Multi-Objective Optimization\n",
8
+ "\n",
9
+ "This notebook demonstrates multi-objective Bayesian optimization using the MORBO trust region with ENN surrogate."
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {},
15
+ "source": [
16
+ "---"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {},
23
+ "outputs": [],
24
+ "source": [
25
+ "import numpy as np\n",
26
+ "\n",
27
+ "\n",
28
+ "def ackley_core(\n",
29
+ " x: np.ndarray, a: float = 20.0, b: float = 0.2, c: float = 2 * np.pi\n",
30
+ ") -> np.ndarray:\n",
31
+ " if x.ndim == 1:\n",
32
+ " x = x[None, :]\n",
33
+ " x = x - 1\n",
34
+ " term1 = -a * np.exp(-b * np.sqrt((x**2).mean(axis=1)))\n",
35
+ " term2 = -np.exp(np.cos(c * x).mean(axis=1))\n",
36
+ " return term1 + term2 + a + np.e\n",
37
+ "\n",
38
+ "\n",
39
+ "class DoubleAckley:\n",
40
+ " def __init__(self, noise: float, rng):\n",
41
+ " self.noise = noise\n",
42
+ " self.rng = rng\n",
43
+ " self.bounds = [-32.768, 32.768]\n",
44
+ "\n",
45
+ " def __call__(self, x: np.ndarray) -> np.ndarray:\n",
46
+ " x = np.asarray(x, dtype=float)\n",
47
+ " if x.ndim == 1:\n",
48
+ " x = x[None, :]\n",
49
+ " n, d = x.shape\n",
50
+ " assert d % 2 == 0, \"num_dim must be even for DoubleAckley\"\n",
51
+ "\n",
52
+ " mid = d // 2\n",
53
+ " x1 = x[:, :mid]\n",
54
+ " x2 = x[:, mid:]\n",
55
+ "\n",
56
+ " y1 = -ackley_core(x1) + self.noise * self.rng.normal(size=n)\n",
57
+ " y2 = -ackley_core(x2) + self.noise * self.rng.normal(size=n)\n",
58
+ "\n",
59
+ " return np.stack([y1, y2], axis=1)"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "import torch\n",
69
+ "from botorch.utils.multi_objective.hypervolume import Hypervolume\n",
70
+ "from nds import ndomsort\n",
71
+ "\n",
72
+ "\n",
73
+ "def get_pareto_front_indices(y: np.ndarray) -> np.ndarray:\n",
74
+ " y = np.asarray(y, dtype=float)\n",
75
+ " if y.ndim != 2:\n",
76
+ " raise ValueError(y.shape)\n",
77
+ " front_ids = ndomsort.non_domin_sort(-y, only_front_indices=True)\n",
78
+ " return np.array([i for i, f in enumerate(front_ids) if f == 0], dtype=int)\n",
79
+ "\n",
80
+ "\n",
81
+ "def compute_hypervolume(y: np.ndarray, ref_point: np.ndarray) -> float:\n",
82
+ " if len(y) == 0:\n",
83
+ " return 0.0\n",
84
+ "\n",
85
+ " pareto_idx = get_pareto_front_indices(y)\n",
86
+ " if len(pareto_idx) == 0:\n",
87
+ " return 0.0\n",
88
+ "\n",
89
+ " pareto_y = y[pareto_idx]\n",
90
+ " pareto_y_tensor = torch.tensor(pareto_y, dtype=torch.float64)\n",
91
+ " ref_point_tensor = torch.tensor(ref_point, dtype=torch.float64)\n",
92
+ "\n",
93
+ " hv = Hypervolume(ref_point=ref_point_tensor)\n",
94
+ " return hv.compute(pareto_y_tensor)"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "metadata": {},
101
+ "outputs": [],
102
+ "source": [
103
+ "import time\n",
104
+ "\n",
105
+ "from enn import Turbo, TurboMode\n",
106
+ "from enn.turbo.turbo_config import (\n",
107
+ " LHDOnlyConfig,\n",
108
+ " TurboENNConfig,\n",
109
+ " TurboOneConfig,\n",
110
+ " TurboZeroConfig,\n",
111
+ ")\n",
112
+ "\n",
113
+ "\n",
114
+ "def run_optimization(turbo_mode: TurboMode):\n",
115
+ " num_dim = 30\n",
116
+ " num_iterations = 100\n",
117
+ " num_arms = 10\n",
118
+ " noise = 0.1\n",
119
+ " ref_point = np.array([-25.0, -25.0])\n",
120
+ "\n",
121
+ " rng = np.random.default_rng(42)\n",
122
+ " objective = DoubleAckley(noise=noise, rng=rng)\n",
123
+ " bounds = np.array([objective.bounds] * num_dim, dtype=float)\n",
124
+ "\n",
125
+ " common_cfg = dict(k=10, tr_type=\"morbo\", num_metrics=2)\n",
126
+ " if turbo_mode == TurboMode.TURBO_ENN:\n",
127
+ " config = TurboENNConfig(**common_cfg, num_fit_samples=100, acq_type=\"ucb\")\n",
128
+ " elif turbo_mode == TurboMode.TURBO_ONE:\n",
129
+ " config = TurboOneConfig(**common_cfg)\n",
130
+ " elif turbo_mode == TurboMode.TURBO_ZERO:\n",
131
+ " config = TurboZeroConfig(**common_cfg)\n",
132
+ " elif turbo_mode == TurboMode.LHD_ONLY:\n",
133
+ " config = LHDOnlyConfig(**common_cfg)\n",
134
+ " else:\n",
135
+ " raise ValueError(turbo_mode)\n",
136
+ "\n",
137
+ " optimizer = Turbo(bounds=bounds, mode=turbo_mode, rng=rng, config=config)\n",
138
+ "\n",
139
+ " all_y = []\n",
140
+ " hypervolume_history = []\n",
141
+ " proposal_times = []\n",
142
+ "\n",
143
+ " for iteration in range(num_iterations):\n",
144
+ " t0 = time.time()\n",
145
+ " x_arms = optimizer.ask(num_arms=num_arms)\n",
146
+ " proposal_times.append(time.time() - t0)\n",
147
+ "\n",
148
+ " y_obs = objective(x_arms)\n",
149
+ " optimizer.tell(x_arms, y_obs)\n",
150
+ "\n",
151
+ " all_y.append(y_obs)\n",
152
+ " hv = compute_hypervolume(np.vstack(all_y), ref_point)\n",
153
+ " hypervolume_history.append(hv)\n",
154
+ "\n",
155
+ " if iteration % 10 == 0:\n",
156
+ " print(f\"{turbo_mode.name} iter {iteration:3d}: HV = {hv:.2f}\")\n",
157
+ "\n",
158
+ " evals = num_arms * np.arange(1, len(hypervolume_history) + 1)\n",
159
+ " return hypervolume_history, proposal_times, evals"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": null,
165
+ "metadata": {},
166
+ "outputs": [],
167
+ "source": [
168
+ "RUN_TURBO_ONE = True\n",
169
+ "\n",
170
+ "modes = [TurboMode.TURBO_ENN, TurboMode.LHD_ONLY, TurboMode.TURBO_ZERO]\n",
171
+ "if RUN_TURBO_ONE:\n",
172
+ " modes.append(TurboMode.TURBO_ONE)\n",
173
+ "\n",
174
+ "results = {}\n",
175
+ "for mode in modes:\n",
176
+ " hv, times, evals = run_optimization(mode)\n",
177
+ " results[mode] = {\"hv\": hv, \"times\": times, \"evals\": evals}"
178
+ ]
179
+ },
180
+ {
181
+ "cell_type": "markdown",
182
+ "metadata": {},
183
+ "source": [
184
+ "## Hypervolume vs Function Evaluations"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": null,
190
+ "metadata": {},
191
+ "outputs": [],
192
+ "source": [
193
+ "import matplotlib.pyplot as plt\n",
194
+ "\n",
195
+ "plt.figure(figsize=(10, 6))\n",
196
+ "for mode, data in results.items():\n",
197
+ " plt.plot(data[\"evals\"], data[\"hv\"], linewidth=2, label=mode.name)\n",
198
+ "plt.xlabel(\"Function Evaluations\")\n",
199
+ "plt.ylabel(\"Hypervolume\")\n",
200
+ "plt.legend()\n",
201
+ "plt.grid(True, alpha=0.3)\n",
202
+ "plt.tight_layout()\n",
203
+ "plt.show()"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "markdown",
208
+ "metadata": {},
209
+ "source": [
210
+ "## Proposal Time vs Function Evaluations"
211
+ ]
212
+ },
213
+ {
214
+ "cell_type": "code",
215
+ "execution_count": null,
216
+ "metadata": {},
217
+ "outputs": [],
218
+ "source": [
219
+ "plt.figure(figsize=(10, 6))\n",
220
+ "for mode, data in results.items():\n",
221
+ " plt.plot(data[\"evals\"], data[\"times\"], linewidth=2, label=mode.name)\n",
222
+ "plt.xlabel(\"Function Evaluations\")\n",
223
+ "plt.ylabel(\"Proposal Time (seconds)\")\n",
224
+ "plt.legend()\n",
225
+ "plt.grid(True, alpha=0.3)\n",
226
+ "plt.tight_layout()\n",
227
+ "plt.show()"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "code",
232
+ "execution_count": null,
233
+ "metadata": {},
234
+ "outputs": [],
235
+ "source": []
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": null,
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": []
243
+ }
244
+ ],
245
+ "metadata": {
246
+ "language_info": {
247
+ "name": "python",
248
+ "pygments_lexer": "ipython3"
249
+ }
250
+ },
251
+ "nbformat": 4,
252
+ "nbformat_minor": 4
253
+ }
@@ -0,0 +1,246 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# TuRBO-ENN\n",
8
+ "\n",
9
+ "This code implements TuRBO [1], a SOTA Bayesian optimization algorithm.\n",
10
+ "\n",
11
+ "The optimization class, `Turbo`, supports four modes of operation.\n",
12
+ "\n",
13
+ "**LHD_ONLY** \n",
14
+ "Generate a Latin Hypercube Design (LHD) for every batch of arms. This is included as a simple baseline.\n",
15
+ "\n",
16
+ "**TURBO_ZERO** \n",
17
+ "Initialze with LHD. Afterward, sample near the best-so-far x value, x_best. Samples are \"near\" x_best in two senses: (i) They are in a trust region, an adaptively-sized box around x_best, and (ii) They perturb only a small number of dimensions using RAASP sampling [2]. Other dimensions take the same value as in x_best. The num_arms proposals are chosen randomly from RAASP candidates inside the trust region.\n",
18
+ "\n",
19
+ "This is included to help differentiate the impact of the trust region from the impact of the surrogate. Notice (below) that the trust region has high impact.\n",
20
+ "\n",
21
+ "**TURBO_ONE** \n",
22
+ "This adds a GP surrogate to TURBO_ZERO. The num_arms proposals are chosen via Thompson sampling from RAASP candidates inside the trust region. Occasionally, the trust region adapter resets and (i) discards all observations, and (ii) begins anew with and LHD design.\n",
23
+ "\n",
24
+ "This is the standard SOTA method. It should match the TuRBO reference [implementation](https://github.com/uber-research/TuRBO). \n",
25
+ "\n",
26
+ "**TURBO_ENN** \n",
27
+ "This replaces the GP surrogate with a simpler, more scalable surrogate called Epistemic Nearest Neighbors (ENN). ENN's proposal time scales as $O(N)$ rather than the $O(N^2)$ of a GP surrogate. [3]\n",
28
+ "\n",
29
+ "\n",
30
+ "## References\n",
31
+ "\n",
32
+ "1. **Eriksson, D., Pearce, M., Gardner, J. R., Turner, R., & Poloczek, M. (2020).** Scalable Global Optimization via Local Bayesian Optimization. *Advances in Neural Information Processing Systems, 32*. \n",
33
+ " https://arxiv.org/abs/1910.01739\n",
34
+ "\n",
35
+ "2. **Rashidi, B., Johnstonbaugh, K., & Gao, C. (2024).** Cylindrical Thompson Sampling for High-Dimensional Bayesian Optimization. *Proceedings of The 27th International Conference on Artificial Intelligence and Statistics* (pp. 3502–3510). PMLR. \n",
36
+ " https://proceedings.mlr.press/v238/rashidi24a.html\n",
37
+ "\n",
38
+ "3. **Sweet, D., & Jadhav, S. A. (2025).** Taking the GP Out of the Loop. *arXiv preprint arXiv:2506.12818*. \n",
39
+ " https://arxiv.org/abs/2506.12818\n"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "markdown",
44
+ "metadata": {},
45
+ "source": [
46
+ "---"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": null,
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "import numpy as np\n",
56
+ "\n",
57
+ "\n",
58
+ "class Ackley:\n",
59
+ " def __init__(self, noise: float, rng):\n",
60
+ " self.noise = noise\n",
61
+ " self.rng = rng\n",
62
+ " self.a = 20.0\n",
63
+ " self.b = 0.2\n",
64
+ " self.c = 2 * np.pi\n",
65
+ " self.bounds = [-32.768, 32.768]\n",
66
+ "\n",
67
+ " def __call__(self, x):\n",
68
+ " x = np.asarray(x, dtype=float)\n",
69
+ " if x.ndim == 1:\n",
70
+ " x = x[None, :]\n",
71
+ " x = x - 1\n",
72
+ " y = (\n",
73
+ " -self.a * np.exp(-self.b * np.sqrt((x**2).mean(axis=1)))\n",
74
+ " - np.exp(np.cos(self.c * x).mean(axis=1))\n",
75
+ " + self.a\n",
76
+ " + np.e\n",
77
+ " )\n",
78
+ " result = -y + self.noise * self.rng.normal(size=(x.shape[0],))\n",
79
+ " return result if result.ndim > 0 else float(result)"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": null,
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "import time\n",
89
+ "import torch\n",
90
+ "\n",
91
+ "from enn import Turbo, TurboMode\n",
92
+ "from enn.turbo.turbo_config import (\n",
93
+ " LHDOnlyConfig,\n",
94
+ " TurboENNConfig,\n",
95
+ " TurboOneConfig,\n",
96
+ " TurboZeroConfig,\n",
97
+ ")\n",
98
+ "\n",
99
+ "\n",
100
+ "def run_optimization(turbo_mode: TurboMode):\n",
101
+ " num_dim = 30\n",
102
+ " num_iterations = 100\n",
103
+ " num_arms = 10\n",
104
+ " noise = 0.1\n",
105
+ " use_y_hat = False\n",
106
+ "\n",
107
+ " rng = np.random.default_rng(17)\n",
108
+ " torch.manual_seed(19)\n",
109
+ " objective = Ackley(noise=noise, rng=rng)\n",
110
+ " bounds = np.array([objective.bounds] * num_dim, dtype=float)\n",
111
+ "\n",
112
+ " common_cfg = dict(k=10)\n",
113
+ " if turbo_mode == TurboMode.TURBO_ENN:\n",
114
+ " config = TurboENNConfig(**common_cfg, num_fit_samples=100, acq_type=\"ucb\")\n",
115
+ " elif turbo_mode == TurboMode.TURBO_ONE:\n",
116
+ " config = TurboOneConfig(**common_cfg)\n",
117
+ " elif turbo_mode == TurboMode.TURBO_ZERO:\n",
118
+ " config = TurboZeroConfig(**common_cfg)\n",
119
+ " elif turbo_mode == TurboMode.LHD_ONLY:\n",
120
+ " config = LHDOnlyConfig(**common_cfg)\n",
121
+ " else:\n",
122
+ " raise ValueError(turbo_mode)\n",
123
+ "\n",
124
+ " optimizer = Turbo(\n",
125
+ " bounds=bounds,\n",
126
+ " mode=turbo_mode,\n",
127
+ " rng=rng,\n",
128
+ " config=config,\n",
129
+ " )\n",
130
+ "\n",
131
+ " best_values = []\n",
132
+ " proposal_times = []\n",
133
+ " y_hat_best = -np.inf\n",
134
+ " y_best = -np.inf\n",
135
+ "\n",
136
+ " for iteration in range(num_iterations):\n",
137
+ " t_0 = time.time()\n",
138
+ " x_arms = optimizer.ask(num_arms=num_arms)\n",
139
+ " t_1 = time.time()\n",
140
+ " proposal_times.append(t_1 - t_0)\n",
141
+ "\n",
142
+ " y_obs = objective(x_arms)\n",
143
+ "\n",
144
+ " y_hat = optimizer.tell(x_arms, y_obs, y_var=noise**2 * np.ones_like(y_obs))\n",
145
+ " if use_y_hat:\n",
146
+ " y_decide = y_hat\n",
147
+ " else:\n",
148
+ " y_decide = y_obs\n",
149
+ " i = np.where(y_decide == np.max(y_decide))[0][0]\n",
150
+ " y_hat_batch_best = y_decide[i]\n",
151
+ "\n",
152
+ " if y_hat_batch_best > y_hat_best:\n",
153
+ " y_hat_best = y_hat_batch_best\n",
154
+ " y_best = objective(x_arms[i])[0]\n",
155
+ "\n",
156
+ " best_values.append(y_best)\n",
157
+ " if iteration % 10 == 0:\n",
158
+ " print(\n",
159
+ " f\"{turbo_mode} iteration = {iteration} y_best = {y_best} y_hat_best = {y_hat_best}\"\n",
160
+ " )\n",
161
+ "\n",
162
+ " evals = num_arms * np.arange(len(best_values))\n",
163
+ " return best_values, proposal_times, evals"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": [
172
+ "import matplotlib.pyplot as plt\n",
173
+ "\n",
174
+ "\n",
175
+ "RUN_TURBO_ONE = True\n",
176
+ "\n",
177
+ "best_values_zero, proposal_times_zero, evals_zero = run_optimization(\n",
178
+ " TurboMode.TURBO_ZERO\n",
179
+ ")\n",
180
+ "if RUN_TURBO_ONE:\n",
181
+ " best_values_one, proposal_times_one, evals_one = run_optimization(\n",
182
+ " TurboMode.TURBO_ONE\n",
183
+ " )\n",
184
+ "best_values_enn, proposal_times_enn, evals_enn = run_optimization(TurboMode.TURBO_ENN)\n",
185
+ "best_values_lhd, proposal_times_lhd, evals_lhd = run_optimization(TurboMode.LHD_ONLY)"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": null,
191
+ "metadata": {},
192
+ "outputs": [],
193
+ "source": [
194
+ "plt.figure(figsize=(10, 6))\n",
195
+ "plt.plot(evals_zero, best_values_zero, linewidth=2, label=\"TURBO_ZERO\")\n",
196
+ "plt.plot(evals_enn, best_values_enn, linewidth=2, label=\"TURBO_ENN\")\n",
197
+ "plt.plot(evals_lhd, best_values_lhd, linewidth=2, label=\"LHD_ONLY\")\n",
198
+ "if RUN_TURBO_ONE:\n",
199
+ " plt.plot(evals_one, best_values_one, linewidth=2, label=\"TURBO_ONE\")\n",
200
+ "plt.xlabel(\"Function Evaluations\")\n",
201
+ "plt.ylabel(\"Best Function Value\")\n",
202
+ "plt.legend()\n",
203
+ "plt.grid(True, alpha=0.3)\n",
204
+ "plt.tight_layout()\n",
205
+ "plt.show()"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": null,
211
+ "metadata": {},
212
+ "outputs": [],
213
+ "source": [
214
+ "plt.figure(figsize=(10, 6))\n",
215
+ "plt.plot(evals_zero, proposal_times_zero, linewidth=2, label=\"TURBO_ZERO\")\n",
216
+ "plt.plot(evals_enn, proposal_times_enn, linewidth=2, label=\"TURBO_ENN\")\n",
217
+ "plt.plot(evals_lhd, proposal_times_lhd, linewidth=2, label=\"LHD_ONLY\")\n",
218
+ "if RUN_TURBO_ONE:\n",
219
+ " plt.plot(evals_one, proposal_times_one, linewidth=2, label=\"TURBO_ONE\")\n",
220
+ "plt.xlabel(\"Function Evaluations\")\n",
221
+ "plt.ylabel(\"Proposal Time (seconds)\")\n",
222
+ "# c = plt.axis()\n",
223
+ "# plt.axis([c[0], c[1], 0, 5])\n",
224
+ "plt.legend()\n",
225
+ "plt.grid(True, alpha=0.3)\n",
226
+ "plt.tight_layout()\n",
227
+ "plt.show()"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "code",
232
+ "execution_count": null,
233
+ "metadata": {},
234
+ "outputs": [],
235
+ "source": []
236
+ }
237
+ ],
238
+ "metadata": {
239
+ "language_info": {
240
+ "name": "python",
241
+ "pygments_lexer": "ipython3"
242
+ }
243
+ },
244
+ "nbformat": 4,
245
+ "nbformat_minor": 2
246
+ }
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "ennbo"
7
- version = "0.1.1"
7
+ version = "0.1.2"
8
8
  description = "Epistemic Nearest Neighbors"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.11"
@@ -13,7 +13,7 @@ authors = [
13
13
  { name = "YUBO Lab", email = "david.sweet@yu.edu" },
14
14
  ]
15
15
  dependencies = [
16
- "numpy==1.26.4",
16
+ "numpy>=1.26.4,<2.0.0",
17
17
  "torch==2.5.1",
18
18
  "gpytorch==1.13",
19
19
  "faiss-cpu>=1.9.0",