sgptools 1.2.0__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sgptools/__init__.py +3 -4
- sgptools/core/__init__.py +1 -0
- sgptools/{models/core → core}/augmented_gpr.py +11 -17
- sgptools/{models/core → core}/augmented_sgpr.py +27 -34
- sgptools/core/osgpr.py +417 -0
- sgptools/core/transformations.py +699 -0
- sgptools/kernels/__init__.py +0 -8
- sgptools/kernels/attentive_kernel.py +214 -69
- sgptools/kernels/neural_kernel.py +268 -92
- sgptools/kernels/neural_network.py +127 -28
- sgptools/methods.py +1047 -0
- sgptools/objectives.py +275 -0
- sgptools/utils/__init__.py +0 -9
- sgptools/utils/data.py +452 -149
- sgptools/utils/gpflow.py +335 -174
- sgptools/utils/metrics.py +375 -102
- sgptools/utils/misc.py +145 -111
- sgptools/utils/tsp.py +224 -84
- sgptools-2.0.0.dist-info/METADATA +216 -0
- sgptools-2.0.0.dist-info/RECORD +23 -0
- {sgptools-1.2.0.dist-info → sgptools-2.0.0.dist-info}/WHEEL +1 -1
- sgptools/models/__init__.py +0 -10
- sgptools/models/bo.py +0 -118
- sgptools/models/cma_es.py +0 -121
- sgptools/models/continuous_sgp.py +0 -68
- sgptools/models/core/__init__.py +0 -9
- sgptools/models/core/osgpr.py +0 -291
- sgptools/models/core/transformations.py +0 -434
- sgptools/models/greedy_mi.py +0 -115
- sgptools/models/greedy_sgp.py +0 -97
- sgptools-1.2.0.dist-info/METADATA +0 -39
- sgptools-1.2.0.dist-info/RECORD +0 -27
- {sgptools-1.2.0.dist-info → sgptools-2.0.0.dist-info/licenses}/LICENSE.txt +0 -0
- {sgptools-1.2.0.dist-info → sgptools-2.0.0.dist-info}/top_level.txt +0 -0
sgptools/methods.py
ADDED
@@ -0,0 +1,1047 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import cma
|
3
|
+
from copy import deepcopy
|
4
|
+
from shapely import geometry
|
5
|
+
from apricot import CustomSelection
|
6
|
+
from bayes_opt import BayesianOptimization
|
7
|
+
import gpflow
|
8
|
+
import tensorflow as tf
|
9
|
+
from typing import Optional, List, Tuple, Union, Dict, Any, Type
|
10
|
+
|
11
|
+
from sgptools.utils.misc import cont2disc, get_inducing_pts
|
12
|
+
from sgptools.objectives import get_objective
|
13
|
+
from sgptools.utils.gpflow import optimize_model
|
14
|
+
from sgptools.core.augmented_sgpr import AugmentedSGPR
|
15
|
+
from sgptools.core.transformations import Transform # Import Transform for type hinting
|
16
|
+
|
17
|
+
|
18
|
+
class Method:
|
19
|
+
"""
|
20
|
+
Method class for optimization methods.
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
num_sensing (int): Number of sensing locations to optimize.
|
24
|
+
num_dim (int): Dimensionality of the data points.
|
25
|
+
num_robots (int): Number of robots/agents.
|
26
|
+
X_objective (np.ndarray): (n, d); Data points used to define the objective function.
|
27
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
28
|
+
noise_variance (float): Data noise variance.
|
29
|
+
transform (Optional[Transform]): Transform object to apply to inducing points.
|
30
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
31
|
+
num_dim (int): Dimensionality of the sensing locations.
|
32
|
+
"""
|
33
|
+
|
34
|
+
def __init__(self,
|
35
|
+
num_sensing: int,
|
36
|
+
X_objective: np.ndarray,
|
37
|
+
kernel: gpflow.kernels.Kernel,
|
38
|
+
noise_variance: float,
|
39
|
+
transform: Optional[Transform] = None,
|
40
|
+
num_robots: int = 1,
|
41
|
+
X_candidates: Optional[np.ndarray] = None,
|
42
|
+
num_dim: Optional[int] = None,
|
43
|
+
**kwargs: Any):
|
44
|
+
"""
|
45
|
+
Initializes the Method class.
|
46
|
+
|
47
|
+
Args:
|
48
|
+
num_sensing (int): Number of sensing locations to optimize.
|
49
|
+
X_objective (np.ndarray): (n, d); Data points used to define the objective function.
|
50
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
51
|
+
noise_variance (float): Data noise variance.
|
52
|
+
transform (Optional[Transform]): Transform object to apply to inducing points. Defaults to None.
|
53
|
+
num_robots (int): Number of robots/agents. Defaults to 1.
|
54
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
55
|
+
Defaults to None.
|
56
|
+
num_dim (Optional[int]): Dimensionality of the sensing locations. Defaults to dimensonality of X_objective.
|
57
|
+
**kwargs: Additional keyword arguments.
|
58
|
+
"""
|
59
|
+
self.num_sensing = num_sensing
|
60
|
+
self.num_robots = num_robots
|
61
|
+
self.X_candidates = X_candidates
|
62
|
+
if num_dim is None:
|
63
|
+
self.num_dim = X_objective.shape[-1]
|
64
|
+
else:
|
65
|
+
self.num_dim = num_dim
|
66
|
+
|
67
|
+
def optimize(self) -> np.ndarray:
|
68
|
+
"""
|
69
|
+
Optimizes the sensor placements/path(s).
|
70
|
+
|
71
|
+
Raises:
|
72
|
+
NotImplementedError: This method must be implemented by subclasses.
|
73
|
+
|
74
|
+
Returns:
|
75
|
+
np.ndarray: (num_robots, num_sensing, num_dim); Optimized sensing locations.
|
76
|
+
"""
|
77
|
+
raise NotImplementedError
|
78
|
+
|
79
|
+
def update(self, kernel: gpflow.kernels.Kernel,
|
80
|
+
noise_variance: float) -> None:
|
81
|
+
"""
|
82
|
+
Updates the kernel and noise variance parameters of the underlying model/objective.
|
83
|
+
|
84
|
+
Args:
|
85
|
+
kernel (gpflow.kernels.Kernel): Updated GPflow kernel function.
|
86
|
+
noise_variance (float): Updated data noise variance.
|
87
|
+
|
88
|
+
Raises:
|
89
|
+
NotImplementedError: This method must be implemented by subclasses.
|
90
|
+
"""
|
91
|
+
raise NotImplementedError
|
92
|
+
|
93
|
+
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
|
94
|
+
"""
|
95
|
+
Retrieves the current kernel and noise variance hyperparameters.
|
96
|
+
|
97
|
+
Raises:
|
98
|
+
NotImplementedError: This method must be implemented by subclasses.
|
99
|
+
|
100
|
+
Returns:
|
101
|
+
Tuple[gpflow.kernels.Kernel, float]: A tuple containing the kernel and noise variance.
|
102
|
+
"""
|
103
|
+
raise NotImplementedError
|
104
|
+
|
105
|
+
|
106
|
+
class BayesianOpt(Method):
|
107
|
+
"""
|
108
|
+
Implements informative sensor placement/path optimization using Bayesian Optimization.
|
109
|
+
|
110
|
+
This method optimizes a given objective function (e.g., Mutual Information)
|
111
|
+
by sampling and evaluating points in the search space, building a surrogate
|
112
|
+
model, and using an acquisition function to guide further sampling.
|
113
|
+
|
114
|
+
Refer to the following papers for more details:
|
115
|
+
- UAV route planning for active disease classification [Vivaldini et al., 2019]
|
116
|
+
- Occupancy map building through Bayesian exploration [Francis et al., 2019]
|
117
|
+
|
118
|
+
Attributes:
|
119
|
+
objective (object): The objective function to be optimized.
|
120
|
+
transform (Optional[Transform]): Transform object applied to inducing points.
|
121
|
+
pbounds (Dict[str, Tuple[float, float]]): Dictionary defining the search space bounds.
|
122
|
+
"""
|
123
|
+
|
124
|
+
def __init__(self,
|
125
|
+
num_sensing: int,
|
126
|
+
X_objective: np.ndarray,
|
127
|
+
kernel: gpflow.kernels.Kernel,
|
128
|
+
noise_variance: float,
|
129
|
+
transform: Optional[Transform] = None,
|
130
|
+
num_robots: int = 1,
|
131
|
+
X_candidates: Optional[np.ndarray] = None,
|
132
|
+
num_dim: Optional[int] = None,
|
133
|
+
objective: Union[str, Any] = 'SLogMI',
|
134
|
+
**kwargs: Any):
|
135
|
+
"""
|
136
|
+
Initializes the BayesianOpt optimizer.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
num_sensing (int): Number of sensing locations to optimize.
|
140
|
+
X_objective (np.ndarray): (n, d); Data points used to define the objective function.
|
141
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
142
|
+
noise_variance (float): Data noise variance.
|
143
|
+
transform (Optional[Transform]): Transform object to apply to inducing points. Defaults to None.
|
144
|
+
num_robots (int): Number of robots/agents. Defaults to 1.
|
145
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
146
|
+
Defaults to None.
|
147
|
+
num_dim (Optional[int]): Dimensionality of the sensing locations. Defaults to dimensonality of X_objective.
|
148
|
+
objective (Union[str, Any]): The objective function to use. Can be a string ('SLogMI', 'MI')
|
149
|
+
or an instance of an objective class. Defaults to 'SLogMI'.
|
150
|
+
**kwargs: Additional keyword arguments passed to the objective function.
|
151
|
+
"""
|
152
|
+
super().__init__(num_sensing, X_objective, kernel, noise_variance,
|
153
|
+
transform, num_robots, X_candidates, num_dim)
|
154
|
+
self.transform = transform
|
155
|
+
|
156
|
+
if isinstance(objective, str):
|
157
|
+
self.objective = get_objective(objective)(X_objective, kernel,
|
158
|
+
noise_variance, **kwargs)
|
159
|
+
else:
|
160
|
+
self.objective = objective
|
161
|
+
|
162
|
+
# Use the boundaries of the X_objective area as the search space limits
|
163
|
+
pbounds_dims: List[Tuple[float, float]] = []
|
164
|
+
for i in range(self.num_dim):
|
165
|
+
pbounds_dims.append(
|
166
|
+
(np.min(X_objective[:, i]), np.max(X_objective[:, i])))
|
167
|
+
self.pbounds: Dict[str, Tuple[float, float]] = {}
|
168
|
+
for i in range(self.num_dim * self.num_sensing * self.num_robots):
|
169
|
+
self.pbounds[f'x{i}'] = pbounds_dims[i % self.num_dim]
|
170
|
+
|
171
|
+
def update(self, kernel: gpflow.kernels.Kernel,
|
172
|
+
noise_variance: float) -> None:
|
173
|
+
"""
|
174
|
+
Updates the kernel and noise variance parameters of the objective function.
|
175
|
+
|
176
|
+
Args:
|
177
|
+
kernel (gpflow.kernels.Kernel): Updated GPflow kernel function.
|
178
|
+
noise_variance (float): Updated data noise variance.
|
179
|
+
"""
|
180
|
+
self.objective.update(kernel, noise_variance)
|
181
|
+
|
182
|
+
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
|
183
|
+
"""
|
184
|
+
Retrieves the current kernel and noise variance hyperparameters from the objective.
|
185
|
+
|
186
|
+
Returns:
|
187
|
+
Tuple[gpflow.kernels.Kernel, float]: A tuple containing a deep copy of the kernel and the noise variance.
|
188
|
+
"""
|
189
|
+
return deepcopy(self.objective.kernel), \
|
190
|
+
self.objective.noise_variance
|
191
|
+
|
192
|
+
def optimize(self,
|
193
|
+
max_steps: int = 50,
|
194
|
+
init_points: int = 10,
|
195
|
+
verbose: bool = False,
|
196
|
+
seed: Optional[int] = None,
|
197
|
+
**kwargs: Any) -> np.ndarray:
|
198
|
+
"""
|
199
|
+
Optimizes the sensor placement/path using Bayesian Optimization.
|
200
|
+
|
201
|
+
Args:
|
202
|
+
max_steps (int): Maximum number of optimization steps (iterations). Defaults to 50.
|
203
|
+
init_points (int): Number of random exploration steps before Bayesian Optimization starts. Defaults to 10.
|
204
|
+
verbose (bool): Verbosity, if True additional details will by reported. Defaults to False.
|
205
|
+
seed (Optional[int]): Random seed for reproducibility. Defaults to None.
|
206
|
+
**kwargs: Additional keyword arguments for the optimizer.
|
207
|
+
|
208
|
+
Returns:
|
209
|
+
np.ndarray: (num_robots, num_sensing, num_dim); Optimized sensing locations.
|
210
|
+
|
211
|
+
Usage:
|
212
|
+
```python
|
213
|
+
# Assuming X_train, candidates, kernel_opt, noise_variance_opt are defined
|
214
|
+
bo_method = BayesianOpt(
|
215
|
+
num_sensing=10,
|
216
|
+
X_objective=X_train,
|
217
|
+
kernel=kernel_opt,
|
218
|
+
noise_variance=noise_variance_opt,
|
219
|
+
transform=IPPTransform(num_robots=1), # Example transform
|
220
|
+
X_candidates=candidates
|
221
|
+
)
|
222
|
+
optimized_solution = bo_method.optimize(max_steps=50, init_points=10)
|
223
|
+
```
|
224
|
+
"""
|
225
|
+
verbose = 1 if verbose else 0
|
226
|
+
optimizer = BayesianOptimization(f=self._objective,
|
227
|
+
pbounds=self.pbounds,
|
228
|
+
verbose=verbose,
|
229
|
+
random_state=seed,
|
230
|
+
allow_duplicate_points=True)
|
231
|
+
optimizer.maximize(init_points=init_points, n_iter=max_steps)
|
232
|
+
|
233
|
+
sol: List[float] = []
|
234
|
+
for i in range(self.num_dim * self.num_sensing * self.num_robots):
|
235
|
+
sol.append(optimizer.max['params'][f'x{i}'])
|
236
|
+
|
237
|
+
sol_np = np.array(sol).reshape(-1, self.num_dim)
|
238
|
+
if self.transform is not None:
|
239
|
+
try:
|
240
|
+
sol_np = self.transform.expand(sol_np,
|
241
|
+
expand_sensor_model=False)
|
242
|
+
except TypeError:
|
243
|
+
pass
|
244
|
+
|
245
|
+
if not isinstance(sol_np, np.ndarray):
|
246
|
+
sol_np = sol_np.numpy()
|
247
|
+
|
248
|
+
# Map solution locations to candidates set locations if X_candidates is provided
|
249
|
+
if self.X_candidates is not None:
|
250
|
+
sol_np = cont2disc(sol_np, self.X_candidates)
|
251
|
+
|
252
|
+
sol_np = sol_np.reshape(self.num_robots, -1, self.num_dim)
|
253
|
+
return sol_np
|
254
|
+
|
255
|
+
def _objective(self, **kwargs: float) -> float:
|
256
|
+
"""
|
257
|
+
Internal objective function to be maximized by the Bayesian Optimization.
|
258
|
+
|
259
|
+
This function reshapes the input parameters from the optimizer, applies
|
260
|
+
any specified transformations, calculates the objective value, and
|
261
|
+
applies a penalty for constraint violations.
|
262
|
+
|
263
|
+
Args:
|
264
|
+
**kwargs: Keyword arguments where keys are 'x0', 'x1', ..., representing
|
265
|
+
the flattened sensor placement coordinates.
|
266
|
+
|
267
|
+
Returns:
|
268
|
+
float: The objective value (reward - constraint penalty) to be maximized.
|
269
|
+
"""
|
270
|
+
X_list: List[float] = []
|
271
|
+
for i in range(len(kwargs)):
|
272
|
+
X_list.append(kwargs[f'x{i}'])
|
273
|
+
X = np.array(X_list).reshape(-1, self.num_dim)
|
274
|
+
|
275
|
+
constraint_penality: float = 0.0
|
276
|
+
if self.transform is not None:
|
277
|
+
X_expanded = self.transform.expand(X)
|
278
|
+
constraint_penality = self.transform.constraints(X)
|
279
|
+
reward = self.objective(X_expanded) # maximize
|
280
|
+
else:
|
281
|
+
reward = self.objective(X) # maximize
|
282
|
+
|
283
|
+
reward += constraint_penality # minimize (large negative value when constraint is unsatisfied)
|
284
|
+
return reward.numpy()
|
285
|
+
|
286
|
+
|
287
|
+
class CMA(Method):
|
288
|
+
"""
|
289
|
+
Implements informative sensor placement/path optimization using CMA-ES (Covariance Matrix Adaptation Evolution Strategy).
|
290
|
+
|
291
|
+
CMA-ES is a powerful black-box optimization algorithm for non-convex problems.
|
292
|
+
|
293
|
+
Refer to the following paper for more details:
|
294
|
+
- Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]
|
295
|
+
|
296
|
+
Attributes:
|
297
|
+
objective (object): The objective function to be minimized/maximized.
|
298
|
+
transform (Optional[Transform]): Transform object applied to inducing points.
|
299
|
+
X_init (np.ndarray): Initial solution guess for the optimization.
|
300
|
+
pbounds (geometry.MultiPoint): The convex hull of the objective area, used implicitly for bounds.
|
301
|
+
"""
|
302
|
+
|
303
|
+
def __init__(self,
|
304
|
+
num_sensing: int,
|
305
|
+
X_objective: np.ndarray,
|
306
|
+
kernel: gpflow.kernels.Kernel,
|
307
|
+
noise_variance: float,
|
308
|
+
transform: Optional[Transform] = None,
|
309
|
+
num_robots: int = 1,
|
310
|
+
X_candidates: Optional[np.ndarray] = None,
|
311
|
+
num_dim: Optional[int] = None,
|
312
|
+
objective: Union[str, Any] = 'SLogMI',
|
313
|
+
X_init: Optional[np.ndarray] = None,
|
314
|
+
**kwargs: Any):
|
315
|
+
"""
|
316
|
+
Initializes the CMA-ES optimizer.
|
317
|
+
|
318
|
+
Args:
|
319
|
+
num_sensing (int): Number of sensing locations to optimize.
|
320
|
+
X_objective (np.ndarray): (n, d); Data points used to define the objective function.
|
321
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
322
|
+
noise_variance (float): Data noise variance.
|
323
|
+
transform (Optional[Transform]): Transform object to apply to inducing points. Defaults to None.
|
324
|
+
num_robots (int): Number of robots/agents. Defaults to 1.
|
325
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
326
|
+
Defaults to None.
|
327
|
+
num_dim (Optional[int]): Dimensionality of the sensing locations. Defaults to dimensonality of X_objective.
|
328
|
+
objective (Union[str, Any]): The objective function to use. Can be a string ('SLogMI', 'MI')
|
329
|
+
or an instance of an objective class. Defaults to 'SLogMI'.
|
330
|
+
X_init (Optional[np.ndarray]): (num_sensing * num_robots, num_dim); Initial guess for sensing locations.
|
331
|
+
If None, initial points are randomly selected from X_objective.
|
332
|
+
**kwargs: Additional keyword arguments passed to the objective function.
|
333
|
+
"""
|
334
|
+
super().__init__(num_sensing, X_objective, kernel, noise_variance,
|
335
|
+
transform, num_robots, X_candidates, num_dim)
|
336
|
+
self.transform = transform
|
337
|
+
if X_init is None:
|
338
|
+
X_init = get_inducing_pts(X_objective,
|
339
|
+
num_sensing * self.num_robots)
|
340
|
+
else:
|
341
|
+
# override num_dim with initial inducing points dim, in case it differes from X_objective dim
|
342
|
+
self.num_dim = X_init.shape[-1]
|
343
|
+
|
344
|
+
self.X_init: np.ndarray = X_init.reshape(-1) # Flattened initial guess
|
345
|
+
|
346
|
+
if isinstance(objective, str):
|
347
|
+
self.objective = get_objective(objective)(X_objective, kernel,
|
348
|
+
noise_variance, **kwargs)
|
349
|
+
else:
|
350
|
+
self.objective = objective
|
351
|
+
|
352
|
+
# Use the boundaries of the X_objective area as the search space limits
|
353
|
+
self.pbounds = geometry.MultiPoint([[p[0], p[1]]
|
354
|
+
for p in X_objective]).convex_hull
|
355
|
+
|
356
|
+
def update(self, kernel: gpflow.kernels.Kernel,
|
357
|
+
noise_variance: float) -> None:
|
358
|
+
"""
|
359
|
+
Updates the kernel and noise variance parameters of the objective function.
|
360
|
+
|
361
|
+
Args:
|
362
|
+
kernel (gpflow.kernels.Kernel): Updated GPflow kernel function.
|
363
|
+
noise_variance (float): Updated data noise variance.
|
364
|
+
"""
|
365
|
+
self.objective.update(kernel, noise_variance)
|
366
|
+
|
367
|
+
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
|
368
|
+
"""
|
369
|
+
Retrieves the current kernel and noise variance hyperparameters from the objective.
|
370
|
+
|
371
|
+
Returns:
|
372
|
+
Tuple[gpflow.kernels.Kernel, float]: A tuple containing a deep copy of the kernel and the noise variance.
|
373
|
+
"""
|
374
|
+
return deepcopy(self.objective.kernel), \
|
375
|
+
self.objective.noise_variance
|
376
|
+
|
377
|
+
def optimize(self,
|
378
|
+
max_steps: int = 500,
|
379
|
+
tol: float = 1e-6,
|
380
|
+
verbose: bool = False,
|
381
|
+
seed: Optional[int] = None,
|
382
|
+
restarts: int = 5,
|
383
|
+
**kwargs: Any) -> np.ndarray:
|
384
|
+
"""
|
385
|
+
Optimizes the sensor placement/path using CMA-ES.
|
386
|
+
|
387
|
+
Args:
|
388
|
+
max_steps (int): Maximum number of optimization steps (function evaluations). Defaults to 500.
|
389
|
+
tol (float): Tolerance for termination. Defaults to 1e-6.
|
390
|
+
verbose (bool): Verbosity, if True additional details will by reported. Defaults to False.
|
391
|
+
seed (Optional[int]): Random seed for reproducibility. Defaults to None.
|
392
|
+
restarts (int): Number of restarts for CMA-ES. Defaults to 5.
|
393
|
+
**kwargs: Additional keyword arguments for CMA-ES.
|
394
|
+
|
395
|
+
Returns:
|
396
|
+
np.ndarray: (num_robots, num_sensing, num_dim); Optimized sensing locations.
|
397
|
+
|
398
|
+
Usage:
|
399
|
+
```python
|
400
|
+
# Assuming X_train, candidates, kernel_opt, noise_variance_opt are defined
|
401
|
+
cma_method = CMA(
|
402
|
+
num_sensing=10,
|
403
|
+
X_objective=X_train,
|
404
|
+
kernel=kernel_opt,
|
405
|
+
noise_variance=noise_variance_opt,
|
406
|
+
transform=IPPTransform(num_robots=1), # Example transform
|
407
|
+
X_candidates=candidates
|
408
|
+
)
|
409
|
+
optimized_solution = cma_method.optimize(max_steps=1000)
|
410
|
+
```
|
411
|
+
"""
|
412
|
+
sigma0 = 1.0
|
413
|
+
verbose = 1 if verbose else 0
|
414
|
+
sol, _ = cma.fmin2(self._objective,
|
415
|
+
self.X_init,
|
416
|
+
sigma0,
|
417
|
+
options={
|
418
|
+
'maxfevals': max_steps,
|
419
|
+
'verb_disp': verbose,
|
420
|
+
'tolfun': tol,
|
421
|
+
'seed': seed
|
422
|
+
},
|
423
|
+
restarts=restarts)
|
424
|
+
|
425
|
+
sol_np = np.array(sol).reshape(-1, self.num_dim)
|
426
|
+
if self.transform is not None:
|
427
|
+
try:
|
428
|
+
sol_np = self.transform.expand(sol_np,
|
429
|
+
expand_sensor_model=False)
|
430
|
+
except TypeError:
|
431
|
+
pass
|
432
|
+
if not isinstance(sol_np, np.ndarray):
|
433
|
+
sol_np = sol_np.numpy()
|
434
|
+
|
435
|
+
# Map solution locations to candidates set locations if X_candidates is provided
|
436
|
+
if self.X_candidates is not None:
|
437
|
+
sol_np = cont2disc(sol_np, self.X_candidates)
|
438
|
+
|
439
|
+
sol_np = sol_np.reshape(self.num_robots, -1, self.num_dim)
|
440
|
+
return sol_np
|
441
|
+
|
442
|
+
def _objective(self, X: np.ndarray) -> float:
|
443
|
+
"""
|
444
|
+
Internal objective function to be minimized by CMA-ES.
|
445
|
+
|
446
|
+
This function reshapes the input array, applies any specified transformations,
|
447
|
+
calculates the objective value, and applies a penalty for constraint violations.
|
448
|
+
Note: CMA-ES minimizes, so the reward (which is to be maximized) is returned as negative.
|
449
|
+
|
450
|
+
Args:
|
451
|
+
X (np.ndarray): (num_sensing * num_robots * num_dim); Flattened array of
|
452
|
+
current solution sensor placement locations.
|
453
|
+
|
454
|
+
Returns:
|
455
|
+
float: The negative objective value (-reward + constraint penalty) to be minimized.
|
456
|
+
"""
|
457
|
+
X_reshaped = np.array(X).reshape(-1, self.num_dim)
|
458
|
+
constraint_penality: float = 0.0
|
459
|
+
if self.transform is not None:
|
460
|
+
X_expanded = self.transform.expand(X_reshaped)
|
461
|
+
constraint_penality = self.transform.constraints(X_reshaped)
|
462
|
+
reward = self.objective(X_expanded) # maximize
|
463
|
+
else:
|
464
|
+
reward = self.objective(X_reshaped) # maximize
|
465
|
+
|
466
|
+
reward += constraint_penality # minimize (large negative value when constraint is unsatisfied)
|
467
|
+
return -reward.numpy() # Return negative as CMA-ES minimizes
|
468
|
+
|
469
|
+
def update_transform(self, transform: Transform) -> None:
|
470
|
+
"""
|
471
|
+
Updates the transform object used by the CMA-ES optimizer.
|
472
|
+
|
473
|
+
Args:
|
474
|
+
transform (Transform): The new transform object.
|
475
|
+
"""
|
476
|
+
self.transform = transform
|
477
|
+
|
478
|
+
def get_transform(self) -> Transform:
|
479
|
+
"""
|
480
|
+
Retrieves a deep copy of the transform object.
|
481
|
+
|
482
|
+
Returns:
|
483
|
+
Transform: A deep copy of the transform object.
|
484
|
+
"""
|
485
|
+
return deepcopy(self.transform)
|
486
|
+
|
487
|
+
|
488
|
+
class ContinuousSGP(Method):
|
489
|
+
"""
|
490
|
+
Implements informative sensor placement/path optimization using a Sparse Gaussian Process (SGP).
|
491
|
+
|
492
|
+
This method optimizes the inducing points of an SGP model to maximize the ELBO or other SGP-related objectives.
|
493
|
+
|
494
|
+
Refer to the following papers for more details:
|
495
|
+
- Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]
|
496
|
+
- Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [[Jakkala and Akella, 2024](https://www.itskalvik.com/publication/sgp-ipp/)]
|
497
|
+
|
498
|
+
Attributes:
|
499
|
+
sgpr (AugmentedSGPR): The Augmented Sparse Gaussian Process Regression model.
|
500
|
+
"""
|
501
|
+
|
502
|
+
def __init__(self,
|
503
|
+
num_sensing: int,
|
504
|
+
X_objective: np.ndarray,
|
505
|
+
kernel: gpflow.kernels.Kernel,
|
506
|
+
noise_variance: float,
|
507
|
+
transform: Optional[Transform] = None,
|
508
|
+
num_robots: int = 1,
|
509
|
+
X_candidates: Optional[np.ndarray] = None,
|
510
|
+
num_dim: Optional[int] = None,
|
511
|
+
X_init: Optional[np.ndarray] = None,
|
512
|
+
X_time: Optional[np.ndarray] = None,
|
513
|
+
orientation: bool = False,
|
514
|
+
**kwargs: Any):
|
515
|
+
"""
|
516
|
+
Initializes the ContinuousSGP optimizer.
|
517
|
+
|
518
|
+
Args:
|
519
|
+
num_sensing (int): Number of sensing locations (inducing points) to optimize.
|
520
|
+
X_objective (np.ndarray): (n, d); Data points used to approximate the bounds of the environment.
|
521
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
522
|
+
noise_variance (float): Data noise variance.
|
523
|
+
transform (Optional[Transform]): Transform object to apply to inducing points. Defaults to None.
|
524
|
+
num_robots (int): Number of robots/agents. Defaults to 1.
|
525
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
526
|
+
Defaults to None.
|
527
|
+
num_dim (Optional[int]): Dimensionality of the sensing locations. Defaults to dimensonality of X_objective.
|
528
|
+
X_init (Optional[np.ndarray]): (num_sensing * num_robots, d); Initial inducing points.
|
529
|
+
If None, initial points are randomly selected from X_objective.
|
530
|
+
X_time (Optional[np.ndarray]): (m, d); Temporal dimensions of the inducing points, used when
|
531
|
+
modeling spatio-temporal IPP. Defaults to None.
|
532
|
+
orientation (bool): If True, adds an additional dimension to model sensor FoV rotation angle
|
533
|
+
when selecting initial inducing points. Defaults to False.
|
534
|
+
**kwargs: Additional keyword arguments.
|
535
|
+
"""
|
536
|
+
super().__init__(num_sensing, X_objective, kernel, noise_variance,
|
537
|
+
transform, num_robots, X_candidates, num_dim)
|
538
|
+
if X_init is None:
|
539
|
+
X_init = get_inducing_pts(X_objective,
|
540
|
+
num_sensing * self.num_robots,
|
541
|
+
orientation=orientation)
|
542
|
+
else:
|
543
|
+
# override num_dim with initial inducing points dim, in case it differes from X_objective dim
|
544
|
+
self.num_dim = X_init.shape[-1]
|
545
|
+
|
546
|
+
# Fit the SGP
|
547
|
+
dtype = X_objective.dtype
|
548
|
+
train_set: Tuple[tf.Tensor, tf.Tensor] = (tf.constant(X_objective,
|
549
|
+
dtype=dtype),
|
550
|
+
tf.zeros(
|
551
|
+
(len(X_objective), 1),
|
552
|
+
dtype=dtype))
|
553
|
+
self.sgpr = AugmentedSGPR(train_set,
|
554
|
+
noise_variance=noise_variance,
|
555
|
+
kernel=kernel,
|
556
|
+
inducing_variable=X_init,
|
557
|
+
inducing_variable_time=X_time,
|
558
|
+
transform=transform)
|
559
|
+
|
560
|
+
def update(self, kernel: gpflow.kernels.Kernel,
|
561
|
+
noise_variance: float) -> None:
|
562
|
+
"""
|
563
|
+
Updates the kernel and noise variance parameters of the SGP model.
|
564
|
+
|
565
|
+
Args:
|
566
|
+
kernel (gpflow.kernels.Kernel): Updated GPflow kernel function.
|
567
|
+
noise_variance (float): Updated data noise variance.
|
568
|
+
"""
|
569
|
+
self.sgpr.update(kernel, noise_variance)
|
570
|
+
|
571
|
+
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
|
572
|
+
"""
|
573
|
+
Retrieves the current kernel and noise variance hyperparameters from the SGP model.
|
574
|
+
|
575
|
+
Returns:
|
576
|
+
Tuple[gpflow.kernels.Kernel, float]: A tuple containing a deep copy of the kernel and the noise variance.
|
577
|
+
"""
|
578
|
+
return deepcopy(self.sgpr.kernel), \
|
579
|
+
self.sgpr.likelihood.variance.numpy()
|
580
|
+
|
581
|
+
def optimize(self,
|
582
|
+
max_steps: int = 500,
|
583
|
+
optimizer: str = 'scipy.L-BFGS-B',
|
584
|
+
verbose: bool = False,
|
585
|
+
**kwargs: Any) -> np.ndarray:
|
586
|
+
"""
|
587
|
+
Optimizes the inducing points of the SGP model.
|
588
|
+
|
589
|
+
Args:
|
590
|
+
max_steps (int): Maximum number of optimization steps. Defaults to 500.
|
591
|
+
optimizer (str): Optimizer "<backend>.<method>" to use for training (e.g., 'scipy.L-BFGS-B', 'tf.adam').
|
592
|
+
Defaults to 'scipy.L-BFGS-B'.
|
593
|
+
verbose (bool): Verbosity, if True additional details will by reported. Defaults to False.
|
594
|
+
**kwargs: Additional keyword arguments for the optimizer.
|
595
|
+
|
596
|
+
Returns:
|
597
|
+
np.ndarray: (num_robots, num_sensing, num_dim); Optimized inducing points (sensing locations).
|
598
|
+
|
599
|
+
Usage:
|
600
|
+
```python
|
601
|
+
# Assuming X_train, candidates, kernel_opt, noise_variance_opt are defined
|
602
|
+
csgp_method = ContinuousSGP(
|
603
|
+
num_sensing=10,
|
604
|
+
X_objective=dataset.X_train,
|
605
|
+
kernel=kernel_opt,
|
606
|
+
noise_variance=noise_variance_opt,
|
607
|
+
transform=IPPTransform(num_robots=1), # Example transform
|
608
|
+
X_candidates=candidates # Only if the solution needs to be mapped to candidates
|
609
|
+
)
|
610
|
+
optimized_solution = csgp_method.optimize(max_steps=500, optimizer='scipy.L-BFGS-B')
|
611
|
+
```
|
612
|
+
"""
|
613
|
+
_ = optimize_model(
|
614
|
+
self.sgpr,
|
615
|
+
max_steps=max_steps,
|
616
|
+
optimize_hparams=
|
617
|
+
False, # Inducing points are optimized, not kernel hyperparameters
|
618
|
+
optimizer=optimizer,
|
619
|
+
verbose=verbose,
|
620
|
+
**kwargs)
|
621
|
+
|
622
|
+
sol: tf.Tensor = self.sgpr.inducing_variable.Z
|
623
|
+
try:
|
624
|
+
sol_expanded = self.transform.expand(sol,
|
625
|
+
expand_sensor_model=False)
|
626
|
+
except TypeError:
|
627
|
+
sol_expanded = sol
|
628
|
+
if not isinstance(sol_expanded, np.ndarray):
|
629
|
+
sol_np = sol_expanded.numpy()
|
630
|
+
else:
|
631
|
+
sol_np = sol_expanded
|
632
|
+
|
633
|
+
# Map solution locations to candidates set locations if X_candidates is provided
|
634
|
+
if self.X_candidates is not None:
|
635
|
+
sol_np = cont2disc(sol_np, self.X_candidates)
|
636
|
+
|
637
|
+
sol_np = sol_np.reshape(self.num_robots, -1, self.num_dim)
|
638
|
+
return sol_np
|
639
|
+
|
640
|
+
@property
|
641
|
+
def transform(self) -> Transform:
|
642
|
+
"""
|
643
|
+
Gets the transform object associated with the SGP model.
|
644
|
+
|
645
|
+
Returns:
|
646
|
+
Transform: The transform object.
|
647
|
+
"""
|
648
|
+
return self.sgpr.transform
|
649
|
+
|
650
|
+
|
651
|
+
class GreedyObjective(Method):
|
652
|
+
"""
|
653
|
+
Implements informative sensor placement/path optimization using a greedy approach based on a specified objective function.
|
654
|
+
|
655
|
+
This method iteratively selects the best sensing location from a set of candidates
|
656
|
+
that maximizes the objective function. It currently supports only single-robot scenarios.
|
657
|
+
|
658
|
+
Refer to the following papers for more details:
|
659
|
+
- Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
|
660
|
+
- Data-driven learning and planning for environmental sampling [Ma et al., 2018]
|
661
|
+
|
662
|
+
Attributes:
|
663
|
+
objective (object): The objective function to be maximized (e.g., Mutual Information).
|
664
|
+
transform (Optional[Transform]): Transform object applied to selected locations.
|
665
|
+
"""
|
666
|
+
|
667
|
+
def __init__(self,
|
668
|
+
num_sensing: int,
|
669
|
+
X_objective: np.ndarray,
|
670
|
+
kernel: gpflow.kernels.Kernel,
|
671
|
+
noise_variance: float,
|
672
|
+
transform: Optional[Transform] = None,
|
673
|
+
num_robots: int = 1,
|
674
|
+
X_candidates: Optional[np.ndarray] = None,
|
675
|
+
num_dim: Optional[int] = None,
|
676
|
+
objective: Union[str, Any] = 'SLogMI',
|
677
|
+
**kwargs: Any):
|
678
|
+
"""
|
679
|
+
Initializes the GreedyObjective optimizer.
|
680
|
+
|
681
|
+
Args:
|
682
|
+
num_sensing (int): Number of sensing locations to select.
|
683
|
+
X_objective (np.ndarray): (n, d); Data points used to define the objective function.
|
684
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
685
|
+
noise_variance (float): Data noise variance.
|
686
|
+
transform (Optional[Transform]): Transform object to apply to inducing points. Defaults to None.
|
687
|
+
num_robots (int): Number of robots/agents. Defaults to 1.
|
688
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
689
|
+
If None, X_objective is used as candidates.
|
690
|
+
num_dim (Optional[int]): Dimensionality of the sensing locations. Defaults to dimensonality of X_objective.
|
691
|
+
objective (Union[str, Any]): The objective function to use. Can be a string ('SLogMI', 'MI')
|
692
|
+
or an instance of an objective class. Defaults to 'SLogMI'.
|
693
|
+
**kwargs: Additional keyword arguments passed to the objective function.
|
694
|
+
"""
|
695
|
+
super().__init__(num_sensing, X_objective, kernel, noise_variance,
|
696
|
+
transform, num_robots, X_candidates, num_dim)
|
697
|
+
self.X_objective = X_objective
|
698
|
+
if X_candidates is None:
|
699
|
+
self.X_candidates = X_objective # Default candidates to objective points
|
700
|
+
|
701
|
+
if transform is not None:
|
702
|
+
try:
|
703
|
+
num_robots_transform = transform.num_robots
|
704
|
+
except AttributeError:
|
705
|
+
num_robots_transform = 1 # Assume single robot if num_robots not defined in transform
|
706
|
+
error = f"num_robots is not equal in transform: {num_robots_transform} and GreedyObjective: {self.num_robots}"
|
707
|
+
assert self.num_robots == num_robots_transform, error
|
708
|
+
|
709
|
+
error = f"num_robots={self.num_robots}; GreedyObjective only supports num_robots=1"
|
710
|
+
assert self.num_robots == 1, error
|
711
|
+
|
712
|
+
self.transform = transform
|
713
|
+
|
714
|
+
if isinstance(objective, str):
|
715
|
+
self.objective = get_objective(objective)(X_objective, kernel,
|
716
|
+
noise_variance, **kwargs)
|
717
|
+
else:
|
718
|
+
self.objective = objective
|
719
|
+
|
720
|
+
def update(self, kernel: gpflow.kernels.Kernel,
|
721
|
+
noise_variance: float) -> None:
|
722
|
+
"""
|
723
|
+
Updates the kernel and noise variance parameters of the objective function.
|
724
|
+
|
725
|
+
Args:
|
726
|
+
kernel (gpflow.kernels.Kernel): Updated GPflow kernel function.
|
727
|
+
noise_variance (float): Updated data noise variance.
|
728
|
+
"""
|
729
|
+
self.objective.update(kernel, noise_variance)
|
730
|
+
|
731
|
+
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
|
732
|
+
"""
|
733
|
+
Retrieves the current kernel and noise variance hyperparameters from the objective.
|
734
|
+
|
735
|
+
Returns:
|
736
|
+
Tuple[gpflow.kernels.Kernel, float]: A tuple containing a deep copy of the kernel and the noise variance.
|
737
|
+
"""
|
738
|
+
return deepcopy(self.objective.kernel), \
|
739
|
+
self.objective.noise_variance
|
740
|
+
|
741
|
+
def optimize(self,
|
742
|
+
optimizer: str = 'naive',
|
743
|
+
verbose: bool = False,
|
744
|
+
**kwargs: Any) -> np.ndarray:
|
745
|
+
"""
|
746
|
+
Optimizes sensor placement using a greedy approach.
|
747
|
+
|
748
|
+
Args:
|
749
|
+
optimizer (str): The greedy optimizer strategy (e.g., 'naive', 'lazy'). Defaults to 'naive'.
|
750
|
+
verbose (bool): Verbosity, if True additional details will by reported. Defaults to False.
|
751
|
+
**kwargs: Additional keyword arguments.
|
752
|
+
|
753
|
+
Returns:
|
754
|
+
np.ndarray: (num_robots, num_sensing, num_dim); Optimized sensing locations.
|
755
|
+
|
756
|
+
Usage:
|
757
|
+
```python
|
758
|
+
# Assuming X_train, candidates, kernel_opt, noise_variance_opt are defined
|
759
|
+
greedy_obj_method = GreedyObjective(
|
760
|
+
num_sensing=5,
|
761
|
+
X_objective=X_train,
|
762
|
+
kernel=kernel_opt,
|
763
|
+
noise_variance=noise_variance_opt,
|
764
|
+
X_candidates=candidates
|
765
|
+
)
|
766
|
+
optimized_solution = greedy_obj_method.optimize(optimizer='naive')
|
767
|
+
```
|
768
|
+
"""
|
769
|
+
model = CustomSelection(self.num_sensing,
|
770
|
+
self._objective,
|
771
|
+
optimizer=optimizer,
|
772
|
+
verbose=verbose)
|
773
|
+
|
774
|
+
# apricot's CustomSelection expects indices, so pass a dummy array of indices
|
775
|
+
sol_indices = model.fit_transform(
|
776
|
+
np.arange(len(self.X_candidates)).reshape(-1, 1))
|
777
|
+
sol_indices = np.array(sol_indices).reshape(-1).astype(int)
|
778
|
+
sol_locations = self.X_candidates[sol_indices]
|
779
|
+
|
780
|
+
sol_locations = np.array(sol_locations).reshape(-1, self.num_dim)
|
781
|
+
if self.transform is not None:
|
782
|
+
try:
|
783
|
+
sol_locations = self.transform.expand(
|
784
|
+
sol_locations, expand_sensor_model=False)
|
785
|
+
except TypeError:
|
786
|
+
pass
|
787
|
+
if not isinstance(sol_locations, np.ndarray):
|
788
|
+
sol_locations = sol_locations.numpy()
|
789
|
+
sol_locations = sol_locations.reshape(self.num_robots, -1,
|
790
|
+
self.num_dim)
|
791
|
+
return sol_locations
|
792
|
+
|
793
|
+
def _objective(self, X_indices: np.ndarray) -> float:
|
794
|
+
"""
|
795
|
+
Internal objective function for the greedy selection.
|
796
|
+
|
797
|
+
This function maps the input indices to actual locations, applies any
|
798
|
+
transformations, calculates the objective value, and applies a penalty
|
799
|
+
for constraint violations.
|
800
|
+
|
801
|
+
Args:
|
802
|
+
X_indices (np.ndarray): (n, 1); Array of indices corresponding to candidate locations.
|
803
|
+
|
804
|
+
Returns:
|
805
|
+
float: The objective value (reward - constraint penalty) for the given selection.
|
806
|
+
"""
|
807
|
+
# Map solution location indices to locations
|
808
|
+
X_indices_flat = np.array(X_indices).reshape(-1).astype(int)
|
809
|
+
X_locations = self.X_objective[X_indices_flat].reshape(
|
810
|
+
-1, self.num_dim)
|
811
|
+
|
812
|
+
constraint_penality: float = 0.0
|
813
|
+
if self.transform is not None:
|
814
|
+
X_expanded = self.transform.expand(X_locations)
|
815
|
+
constraint_penality = self.transform.constraints(X_locations)
|
816
|
+
reward = self.objective(X_expanded) # maximize
|
817
|
+
else:
|
818
|
+
reward = self.objective(X_locations) # maximize
|
819
|
+
|
820
|
+
reward -= constraint_penality # minimize
|
821
|
+
return reward.numpy()
|
822
|
+
|
823
|
+
|
824
|
+
class GreedySGP(Method):
|
825
|
+
"""
|
826
|
+
Implements informative sensor placement/path optimization using a greedy approach combined with a Sparse Gaussian Process (SGP) ELBO objective.
|
827
|
+
|
828
|
+
This method iteratively selects inducing points to maximize the SGP's ELBO.
|
829
|
+
It currently supports only single-robot scenarios.
|
830
|
+
|
831
|
+
Refer to the following papers for more details:
|
832
|
+
- Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]
|
833
|
+
|
834
|
+
Attributes:
|
835
|
+
sgpr (AugmentedSGPR): The Augmented Sparse Gaussian Process Regression model.
|
836
|
+
"""
|
837
|
+
|
838
|
+
def __init__(self,
|
839
|
+
num_sensing: int,
|
840
|
+
X_objective: np.ndarray,
|
841
|
+
kernel: gpflow.kernels.Kernel,
|
842
|
+
noise_variance: float,
|
843
|
+
transform: Optional[Transform] = None,
|
844
|
+
num_robots: int = 1,
|
845
|
+
X_candidates: Optional[np.ndarray] = None,
|
846
|
+
num_dim: Optional[int] = None,
|
847
|
+
**kwargs: Any):
|
848
|
+
"""
|
849
|
+
Initializes the GreedySGP optimizer.
|
850
|
+
|
851
|
+
Args:
|
852
|
+
num_sensing (int): Number of sensing locations (inducing points) to select.
|
853
|
+
X_objective (np.ndarray): (n, d); Data points used to train the SGP model.
|
854
|
+
kernel (gpflow.kernels.Kernel): GPflow kernel function.
|
855
|
+
noise_variance (float): Data noise variance.
|
856
|
+
transform (Optional[Transform]): Transform object to apply to inducing points. Defaults to None.
|
857
|
+
num_robots (int): Number of robots/agents. Defaults to 1.
|
858
|
+
X_candidates (Optional[np.ndarray]): (c, d); Discrete set of candidate locations for sensor placement.
|
859
|
+
If None, X_objective is used as candidates.
|
860
|
+
num_dim (Optional[int]): Dimensionality of the sensing locations. Defaults to dimensonality of X_objective.
|
861
|
+
**kwargs: Additional keyword arguments.
|
862
|
+
"""
|
863
|
+
super().__init__(num_sensing, X_objective, kernel, noise_variance,
|
864
|
+
transform, num_robots, X_candidates, num_dim)
|
865
|
+
self.X_objective = X_objective
|
866
|
+
if X_candidates is None:
|
867
|
+
self.X_candidates = X_objective # Default candidates to objective points
|
868
|
+
|
869
|
+
if transform is not None:
|
870
|
+
try:
|
871
|
+
num_robots_transform = transform.num_robots
|
872
|
+
except AttributeError:
|
873
|
+
num_robots_transform = 1 # Assume single robot if num_robots not defined in transform
|
874
|
+
error = f"num_robots is not equal in transform: {num_robots_transform} and GreedySGP: {self.num_robots}"
|
875
|
+
assert self.num_robots == num_robots_transform, error
|
876
|
+
|
877
|
+
error = f"num_robots={self.num_robots}; GreedySGP only supports num_robots=1"
|
878
|
+
assert self.num_robots == 1, error
|
879
|
+
|
880
|
+
# Fit the SGP
|
881
|
+
dtype = X_objective.dtype
|
882
|
+
train_set: Tuple[tf.Tensor, tf.Tensor] = (tf.constant(X_objective,
|
883
|
+
dtype=dtype),
|
884
|
+
tf.zeros(
|
885
|
+
(len(X_objective), 1),
|
886
|
+
dtype=dtype))
|
887
|
+
|
888
|
+
X_init = get_inducing_pts(X_objective, num_sensing)
|
889
|
+
self.sgpr = AugmentedSGPR(train_set,
|
890
|
+
noise_variance=noise_variance,
|
891
|
+
kernel=kernel,
|
892
|
+
inducing_variable=X_init,
|
893
|
+
transform=transform)
|
894
|
+
|
895
|
+
def update(self, kernel: gpflow.kernels.Kernel,
|
896
|
+
noise_variance: float) -> None:
|
897
|
+
"""
|
898
|
+
Updates the kernel and noise variance parameters of the SGP model.
|
899
|
+
|
900
|
+
Args:
|
901
|
+
kernel (gpflow.kernels.Kernel): Updated GPflow kernel function.
|
902
|
+
noise_variance (float): Updated data noise variance.
|
903
|
+
"""
|
904
|
+
self.sgpr.update(kernel, noise_variance)
|
905
|
+
|
906
|
+
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
|
907
|
+
"""
|
908
|
+
Retrieves the current kernel and noise variance hyperparameters from the SGP model.
|
909
|
+
|
910
|
+
Returns:
|
911
|
+
Tuple[gpflow.kernels.Kernel, float]: A tuple containing a deep copy of the kernel and the noise variance.
|
912
|
+
"""
|
913
|
+
return deepcopy(self.sgpr.kernel), \
|
914
|
+
self.sgpr.likelihood.variance.numpy()
|
915
|
+
|
916
|
+
def optimize(self,
|
917
|
+
optimizer: str = 'naive',
|
918
|
+
verbose: bool = False,
|
919
|
+
**kwargs: Any) -> np.ndarray:
|
920
|
+
"""
|
921
|
+
Optimizes sensor placement using a greedy SGP approach.
|
922
|
+
|
923
|
+
Args:
|
924
|
+
optimizer (str): The greedy optimizer strategy (e.g., 'naive', 'lazy'). Defaults to 'naive'.
|
925
|
+
verbose (bool): Verbosity, if True additional details will by reported. Defaults to False.
|
926
|
+
**kwargs: Additional keyword arguments.
|
927
|
+
|
928
|
+
Returns:
|
929
|
+
np.ndarray: (num_robots, num_sensing, num_dim); Optimized sensing locations.
|
930
|
+
|
931
|
+
Usage:
|
932
|
+
```python
|
933
|
+
# Assuming X_train, candidates, kernel_opt, noise_variance_opt are defined
|
934
|
+
greedy_sgp_method = GreedySGP(
|
935
|
+
num_sensing=5,
|
936
|
+
X_objective=X_train,
|
937
|
+
kernel=kernel_opt,
|
938
|
+
noise_variance=noise_variance_opt,
|
939
|
+
X_candidates=candidates
|
940
|
+
)
|
941
|
+
optimized_solution = greedy_sgp_method.optimize(optimizer='naive')
|
942
|
+
```
|
943
|
+
"""
|
944
|
+
model = CustomSelection(self.num_sensing,
|
945
|
+
self._objective,
|
946
|
+
optimizer=optimizer,
|
947
|
+
verbose=verbose)
|
948
|
+
|
949
|
+
# apricot's CustomSelection expects indices, so pass a dummy array of indices
|
950
|
+
sol_indices = model.fit_transform(
|
951
|
+
np.arange(len(self.X_candidates)).reshape(-1, 1))
|
952
|
+
sol_indices = np.array(sol_indices).reshape(-1).astype(int)
|
953
|
+
sol_locations = self.X_candidates[sol_indices]
|
954
|
+
|
955
|
+
sol_locations = np.array(sol_locations).reshape(-1, self.num_dim)
|
956
|
+
try:
|
957
|
+
sol_expanded = self.transform.expand(sol_locations,
|
958
|
+
expand_sensor_model=False)
|
959
|
+
except AttributeError:
|
960
|
+
sol_expanded = sol_locations
|
961
|
+
if not isinstance(sol_expanded, np.ndarray):
|
962
|
+
sol_np = sol_expanded.numpy()
|
963
|
+
else:
|
964
|
+
sol_np = sol_expanded
|
965
|
+
|
966
|
+
sol_np = sol_np.reshape(self.num_robots, -1, self.num_dim)
|
967
|
+
return sol_np
|
968
|
+
|
969
|
+
def _objective(self, X_indices: np.ndarray) -> float:
|
970
|
+
"""
|
971
|
+
Internal objective function for the greedy SGP selection.
|
972
|
+
|
973
|
+
This function maps the input indices to actual locations and updates
|
974
|
+
the SGP model's inducing points to calculate the ELBO. The ELBO is
|
975
|
+
then used as the objective for greedy maximization.
|
976
|
+
|
977
|
+
Args:
|
978
|
+
X_indices (np.ndarray): (n, 1); Array of indices corresponding to candidate locations.
|
979
|
+
|
980
|
+
Returns:
|
981
|
+
float: The ELBO of the SGP model for the given inducing points.
|
982
|
+
"""
|
983
|
+
# Map solution location indices to locations
|
984
|
+
# Since SGP requires num_sensing points,
|
985
|
+
# pad the current greedy solution with the
|
986
|
+
# first location in the solution (or zeros if no points selected yet)
|
987
|
+
X_indices_flat = np.array(X_indices).reshape(-1).astype(int)
|
988
|
+
num_pad = self.num_sensing - len(X_indices_flat)
|
989
|
+
|
990
|
+
# Ensure that if X_indices_flat is empty, we still create a valid padding array
|
991
|
+
if len(X_indices_flat) == 0 and num_pad > 0:
|
992
|
+
X_pad = np.zeros(num_pad, dtype=int)
|
993
|
+
elif len(X_indices_flat) > 0 and num_pad > 0:
|
994
|
+
X_pad = np.full(num_pad, X_indices_flat[0], dtype=int)
|
995
|
+
else: # num_pad is 0 or negative
|
996
|
+
X_pad = np.array([], dtype=int)
|
997
|
+
|
998
|
+
X_combined_indices = np.concatenate([X_indices_flat, X_pad])
|
999
|
+
X_locations = self.X_objective[X_combined_indices].reshape(
|
1000
|
+
-1, self.num_dim)
|
1001
|
+
|
1002
|
+
# Update the SGP inducing points
|
1003
|
+
self.sgpr.inducing_variable.Z.assign(X_locations)
|
1004
|
+
return self.sgpr.elbo().numpy()
|
1005
|
+
|
1006
|
+
@property
|
1007
|
+
def transform(self) -> Transform:
|
1008
|
+
"""
|
1009
|
+
Gets the transform object associated with the SGP model.
|
1010
|
+
|
1011
|
+
Returns:
|
1012
|
+
Transform: The transform object.
|
1013
|
+
"""
|
1014
|
+
return self.sgpr.transform
|
1015
|
+
|
1016
|
+
|
1017
|
+
METHODS: Dict[str, Type[Method]] = {
|
1018
|
+
'BayesianOpt': BayesianOpt,
|
1019
|
+
'CMA': CMA,
|
1020
|
+
'ContinuousSGP': ContinuousSGP,
|
1021
|
+
'GreedyObjective': GreedyObjective,
|
1022
|
+
'GreedySGP': GreedySGP,
|
1023
|
+
}
|
1024
|
+
|
1025
|
+
|
1026
|
+
def get_method(method: str) -> Type[Method]:
|
1027
|
+
"""
|
1028
|
+
Retrieves an optimization method class by its string name.
|
1029
|
+
|
1030
|
+
Args:
|
1031
|
+
method (str): The name of the optimization method (e.g., 'ContinuousSGP', 'CMA').
|
1032
|
+
|
1033
|
+
Returns:
|
1034
|
+
Type[Method]: The class of the requested optimization method.
|
1035
|
+
|
1036
|
+
Raises:
|
1037
|
+
KeyError: If the method name is not found.
|
1038
|
+
|
1039
|
+
Usage:
|
1040
|
+
```python
|
1041
|
+
# To get the ContinuousSGP class
|
1042
|
+
ContinuousSGPClass = get_method('ContinuousSGP')
|
1043
|
+
# You can then instantiate it:
|
1044
|
+
# CSGP_instance = ContinuousSGPClass(...)
|
1045
|
+
```
|
1046
|
+
"""
|
1047
|
+
return METHODS[method]
|