sgptools 1.0.6__tar.gz → 1.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sgptools-1.0.6 → sgptools-1.1.0}/PKG-INFO +1 -1
- {sgptools-1.0.6 → sgptools-1.1.0}/README.md +5 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/setup.py +1 -1
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/bo.py +48 -25
- sgptools-1.1.0/sgptools/models/cma_es.py +121 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/greedy_mi.py +24 -14
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/greedy_sgp.py +15 -15
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/utils/misc.py +1 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools.egg-info/PKG-INFO +1 -1
- sgptools-1.0.6/sgptools/models/cma_es.py +0 -189
- {sgptools-1.0.6 → sgptools-1.1.0}/LICENSE.txt +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/setup.cfg +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/__init__.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/kernels/__init__.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/kernels/neural_kernel.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/__init__.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/continuous_sgp.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/core/__init__.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/core/augmented_gpr.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/core/augmented_sgpr.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/core/osgpr.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/models/core/transformations.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/utils/__init__.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/utils/data.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/utils/gpflow.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/utils/metrics.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools/utils/tsp.py +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools.egg-info/SOURCES.txt +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools.egg-info/dependency_links.txt +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools.egg-info/requires.txt +0 -0
- {sgptools-1.0.6 → sgptools-1.1.0}/sgptools.egg-info/top_level.txt +0 -0
@@ -18,6 +18,11 @@ The library includes python code for the following:
|
|
18
18
|
|
19
19
|
<img alt="Dark" src="docs/assets/AIPP-non-point_sensing.gif" width="45%"></p>
|
20
20
|
|
21
|
+
## Related Packages
|
22
|
+
|
23
|
+
- The [ros_sgp_tools](https://github.com/itskalvik/ros_sgp_tools) package provides a ROS2 companion package for SGP-Tools that can be deployed on ArduPilot-based vehicles.
|
24
|
+
- The [docker-sgp-tools](https://github.com/itskalvik/docker-sgp-tools) package provides docker containers for running SGP-Tools in simulation and on ArduPilot-based vehicles.
|
25
|
+
|
21
26
|
## Installation
|
22
27
|
The library is available as a ```pip``` package. To install the package, run the following command:
|
23
28
|
|
@@ -29,67 +29,90 @@ class BayesianOpt:
|
|
29
29
|
X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
|
30
30
|
noise_variance (float): data variance
|
31
31
|
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
32
|
+
transform (Transform): Transform object
|
32
33
|
"""
|
33
|
-
def __init__(self, X_train, noise_variance, kernel
|
34
|
+
def __init__(self, X_train, noise_variance, kernel,
|
35
|
+
transform=None):
|
34
36
|
self.X_train = X_train
|
35
37
|
self.noise_variance = noise_variance
|
36
38
|
self.kernel = kernel
|
37
39
|
self.num_dim = X_train.shape[-1]
|
38
|
-
|
40
|
+
self.transform = transform
|
41
|
+
|
39
42
|
# use the boundaries of the region as the search space
|
40
43
|
self.pbounds_dim = []
|
41
44
|
for i in range(self.num_dim):
|
42
45
|
self.pbounds_dim.append((np.min(X_train[:, i]), np.max(X_train[:, i])))
|
43
|
-
|
46
|
+
|
44
47
|
def objective(self, **kwargs):
|
45
|
-
"""
|
48
|
+
"""Objective function (GP-based Mutual Information)
|
49
|
+
|
50
|
+
Args:
|
51
|
+
x<i> (ndarray): (1, d); Current solution sensor placement location i
|
46
52
|
"""
|
53
|
+
# MI does not depend on waypoint order (reshape to -1, num_dim)
|
47
54
|
X = []
|
48
55
|
for i in range(len(kwargs)):
|
49
56
|
X.append(kwargs['x{}'.format(i)])
|
50
57
|
X = np.array(X).reshape(-1, self.num_dim)
|
51
|
-
|
52
|
-
|
58
|
+
if self.transform is not None:
|
59
|
+
X = self.transform.expand(X)
|
60
|
+
constraints_loss = self.transform.constraints(X)
|
61
|
+
|
62
|
+
try:
|
63
|
+
mi = get_mi(X, self.X_train, self.noise_variance, self.kernel)
|
64
|
+
mi += constraints_loss
|
65
|
+
mi = mi.numpy()
|
66
|
+
except:
|
67
|
+
mi = -1e4 # if the cholskey decomposition fails
|
68
|
+
return mi
|
69
|
+
|
53
70
|
def optimize(self,
|
54
71
|
num_sensors=10,
|
55
72
|
max_steps=100,
|
56
73
|
X_init=None,
|
57
|
-
init_points=10
|
74
|
+
init_points=10,
|
75
|
+
verbose=0,
|
76
|
+
seed=1234):
|
58
77
|
"""Optimizes the sensor placements using Bayesian Optimization without any constraints
|
59
78
|
|
60
79
|
Args:
|
61
|
-
num_sensors (int): Number of sensor locations to optimize
|
62
|
-
max_steps (int): Maximum number of optimization steps
|
63
|
-
X_init (ndarray): (m, d); Initial inducing points
|
64
|
-
init_points (int):
|
80
|
+
num_sensors (int): Number of sensor locations to optimize.
|
81
|
+
max_steps (int): Maximum number of optimization steps.
|
82
|
+
X_init (ndarray): (m, d); Initial inducing points.
|
83
|
+
init_points (int): Number of random solutions used for initial exploration.
|
65
84
|
Random exploration can help by diversifying the exploration space.
|
85
|
+
verbose (int): The level of verbosity.
|
86
|
+
seed (int): The algorithm will use it to seed the randomnumber generator, ensuring replicability.
|
66
87
|
|
67
88
|
Returns:
|
68
89
|
Xu (ndarray): (m, d); Solution sensor placement locations
|
69
90
|
"""
|
70
91
|
if X_init is None:
|
71
92
|
X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
|
93
|
+
else:
|
94
|
+
num_sensors = len(X_init.reshape(-1, self.num_dim))
|
72
95
|
X_init = X_init.reshape(-1)
|
73
96
|
|
74
97
|
pbounds = {}
|
75
98
|
for i in range(self.num_dim*num_sensors):
|
76
99
|
pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]
|
77
100
|
|
78
|
-
optimizer = BayesianOptimization(
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
optimizer.maximize(
|
87
|
-
init_points=init_points,
|
88
|
-
n_iter=max_steps,
|
89
|
-
)
|
101
|
+
optimizer = BayesianOptimization(f=self.objective,
|
102
|
+
pbounds=pbounds,
|
103
|
+
verbose=verbose,
|
104
|
+
random_state=seed,
|
105
|
+
allow_duplicate_points=True)
|
106
|
+
optimizer.maximize(init_points=init_points,
|
107
|
+
n_iter=max_steps)
|
90
108
|
|
91
109
|
sol = []
|
92
110
|
for i in range(self.num_dim*num_sensors):
|
93
111
|
sol.append(optimizer.max['params']['x{}'.format(i)])
|
94
|
-
|
95
|
-
|
112
|
+
sol = np.array(sol).reshape(-1, self.num_dim)
|
113
|
+
if self.transform is not None:
|
114
|
+
sol = self.transform.expand(sol,
|
115
|
+
expand_sensor_model=False)
|
116
|
+
if not isinstance(sol, np.ndarray):
|
117
|
+
sol = sol.numpy()
|
118
|
+
return sol.reshape(-1, self.num_dim)
|
@@ -0,0 +1,121 @@
|
|
1
|
+
# Copyright 2024 The SGP-Tools Contributors. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import cma
|
16
|
+
import numpy as np
|
17
|
+
from shapely import geometry
|
18
|
+
from ..utils.metrics import get_mi
|
19
|
+
from ..utils.data import get_inducing_pts
|
20
|
+
|
21
|
+
|
22
|
+
class CMA_ES:
|
23
|
+
"""Class for optimizing sensor placements using CMA-ES (a genetic algorithm)
|
24
|
+
|
25
|
+
Refer to the following paper for more details:
|
26
|
+
- Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]
|
27
|
+
|
28
|
+
Args:
|
29
|
+
X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
|
30
|
+
noise_variance (float): data variance
|
31
|
+
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
32
|
+
distance_budget (float): Distance budget for when treating the inducing points
|
33
|
+
as waypoints of a path
|
34
|
+
num_robots (int): Number of robots, used when modeling
|
35
|
+
multi-robot IPP with a distance budget
|
36
|
+
transform (Transform): Transform object
|
37
|
+
"""
|
38
|
+
def __init__(self, X_train, noise_variance, kernel,
|
39
|
+
distance_budget=None,
|
40
|
+
num_robots=1,
|
41
|
+
transform=None):
|
42
|
+
self.boundaries = geometry.MultiPoint([[p[0], p[1]] for p in X_train]).convex_hull
|
43
|
+
self.X_train = X_train
|
44
|
+
self.noise_variance = noise_variance
|
45
|
+
self.kernel = kernel
|
46
|
+
self.num_dim = X_train.shape[-1]
|
47
|
+
self.distance_budget = distance_budget
|
48
|
+
self.num_robots = num_robots
|
49
|
+
self.transform = transform
|
50
|
+
|
51
|
+
def update(self, noise_variance, kernel):
|
52
|
+
"""Update GP noise variance and kernel function parameters
|
53
|
+
|
54
|
+
Args:
|
55
|
+
noise_variance (float): data variance
|
56
|
+
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
57
|
+
"""
|
58
|
+
self.noise_variance = noise_variance
|
59
|
+
self.kernel = kernel
|
60
|
+
|
61
|
+
def objective(self, X):
|
62
|
+
"""Objective function (GP-based Mutual Information)
|
63
|
+
|
64
|
+
Args:
|
65
|
+
X (ndarray): (n, d); Current solution sensor placement locations
|
66
|
+
"""
|
67
|
+
# MI does not depend on waypoint order (reshape to -1, num_dim)
|
68
|
+
X = np.array(X).reshape(-1, self.num_dim)
|
69
|
+
constraints_loss = 0.0
|
70
|
+
if self.transform is not None:
|
71
|
+
X = self.transform.expand(X)
|
72
|
+
constraints_loss = self.transform.constraints(X)
|
73
|
+
|
74
|
+
try:
|
75
|
+
mi = -get_mi(X, self.X_train, self.noise_variance, self.kernel)
|
76
|
+
mi -= constraints_loss
|
77
|
+
mi = mi.numpy()
|
78
|
+
except:
|
79
|
+
mi = 0.0 # if the cholskey decomposition fails
|
80
|
+
return mi
|
81
|
+
|
82
|
+
def optimize(self,
|
83
|
+
num_sensors=10,
|
84
|
+
max_steps=5000,
|
85
|
+
tol=1e-6,
|
86
|
+
X_init=None,
|
87
|
+
verbose=0,
|
88
|
+
seed=1234):
|
89
|
+
"""Optimizes the sensor placements using CMA-ES without any constraints
|
90
|
+
|
91
|
+
Args:
|
92
|
+
num_sensors (int): Number of sensor locations to optimize
|
93
|
+
max_steps (int): Maximum number of optimization steps
|
94
|
+
tol (float): Convergence tolerance to decide when to stop optimization
|
95
|
+
X_init (ndarray): (m, d); Initial inducing points
|
96
|
+
verbose (int): The level of verbosity.
|
97
|
+
seed (int): The algorithm will use it to seed the randomnumber generator, ensuring replicability.
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
Xu (ndarray): (m, d); Solution sensor placement locations
|
101
|
+
"""
|
102
|
+
sigma0 = 1.0
|
103
|
+
|
104
|
+
if X_init is None:
|
105
|
+
X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
|
106
|
+
X_init = X_init.reshape(-1)
|
107
|
+
|
108
|
+
xopt, _ = cma.fmin2(self.objective, X_init, sigma0,
|
109
|
+
options={'maxfevals': max_steps,
|
110
|
+
'verb_disp': verbose,
|
111
|
+
'tolfun': tol,
|
112
|
+
'seed': seed},
|
113
|
+
restarts=5)
|
114
|
+
|
115
|
+
xopt = np.array(xopt).reshape(-1, self.num_dim)
|
116
|
+
if self.transform is not None:
|
117
|
+
xopt = self.transform.expand(xopt,
|
118
|
+
expand_sensor_model=False)
|
119
|
+
if not isinstance(xopt, np.ndarray):
|
120
|
+
xopt = xopt.numpy()
|
121
|
+
return xopt.reshape(-1, self.num_dim)
|
@@ -12,8 +12,8 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
-
from .core.augmented_gpr import AugmentedGPR
|
16
15
|
from apricot import CustomSelection
|
16
|
+
from gpflow.models.gpr import GPR
|
17
17
|
import numpy as np
|
18
18
|
|
19
19
|
|
@@ -32,15 +32,24 @@ class GreedyMI:
|
|
32
32
|
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
33
33
|
transform (Transform): Transform object
|
34
34
|
"""
|
35
|
-
def __init__(self, S, V, noise_variance, kernel,
|
35
|
+
def __init__(self, S, V, noise_variance, kernel,
|
36
|
+
transform=None):
|
36
37
|
self.S = S
|
37
38
|
self.V = V
|
38
39
|
self.kernel = kernel
|
39
40
|
self.input_dim = S.shape[1]
|
40
41
|
self.noise_variance = noise_variance
|
41
42
|
self.transform = transform
|
42
|
-
|
43
|
+
|
43
44
|
def mutual_info(self, x):
|
45
|
+
"""Computes mutual information using the points `x`
|
46
|
+
|
47
|
+
Args:
|
48
|
+
x (ndarray): (n); Indices of the solution placement locations
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
MI (float): Mutual information between the placement x and candidate locations
|
52
|
+
"""
|
44
53
|
x = np.array(x).reshape(-1).astype(int)
|
45
54
|
A = self.S[x[:-1]].reshape(-1, self.input_dim)
|
46
55
|
y = self.S[x[-1]].reshape(-1, self.input_dim)
|
@@ -50,26 +59,27 @@ class GreedyMI:
|
|
50
59
|
else:
|
51
60
|
if self.transform is not None:
|
52
61
|
A = self.transform.expand(A)
|
53
|
-
a_gp =
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
_, sigma_a = a_gp.predict_f(y, aggregate_train=True)
|
62
|
+
a_gp = GPR(data=(A, np.zeros((len(A), 1))),
|
63
|
+
kernel=self.kernel,
|
64
|
+
noise_variance=self.noise_variance)
|
65
|
+
_, sigma_a = a_gp.predict_f(y)
|
58
66
|
|
59
|
-
# Remove locations in A to build A bar
|
67
|
+
# Remove locations in A∪y from V to build A bar (Refer to Krause et al., 2008)
|
60
68
|
V_ = self.V.copy()
|
61
69
|
V_rows = V_.view([('', V_.dtype)] * V_.shape[1])
|
70
|
+
|
62
71
|
if self.transform is not None:
|
63
|
-
|
72
|
+
solution = self.S[x].reshape(-1, self.input_dim)
|
73
|
+
A_ = self.transform.expand(solution)
|
64
74
|
else:
|
65
75
|
A_ = self.S[x]
|
66
76
|
A_rows = A_.view([('', V_.dtype)] * A_.shape[1])
|
77
|
+
|
67
78
|
V_ = np.setdiff1d(V_rows, A_rows).view(V_.dtype).reshape(-1, V_.shape[1])
|
68
79
|
|
69
|
-
self.v_gp =
|
70
|
-
|
71
|
-
|
72
|
-
transform=self.transform)
|
80
|
+
self.v_gp = GPR(data=(V_, np.zeros((len(V_), 1))),
|
81
|
+
kernel=self.kernel,
|
82
|
+
noise_variance=self.noise_variance)
|
73
83
|
_, sigma_v = self.v_gp.predict_f(y)
|
74
84
|
|
75
85
|
return (sigma_a/sigma_v).numpy().squeeze()
|
@@ -30,20 +30,16 @@ class GreedySGP:
|
|
30
30
|
V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
|
31
31
|
noise_variance (float): Data noise variance
|
32
32
|
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
33
|
-
Xu_fixed (ndarray): (m, d); Inducing points that are not optimized and are always
|
34
|
-
added to the inducing points set during loss function computation
|
35
33
|
transform (Transform): Transform object
|
36
34
|
"""
|
37
35
|
def __init__(self, num_inducing, S, V, noise_variance, kernel,
|
38
|
-
Xu_fixed=None,
|
39
36
|
transform=None):
|
40
|
-
self.
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
37
|
+
self.sgp = AugmentedSGPR((V, np.zeros((len(V), 1))),
|
38
|
+
noise_variance=noise_variance,
|
39
|
+
kernel=kernel,
|
40
|
+
inducing_variable=S[:num_inducing],
|
41
|
+
transform=transform)
|
45
42
|
self.locs = S
|
46
|
-
self.Xu_fixed = Xu_fixed
|
47
43
|
self.num_inducing = num_inducing
|
48
44
|
self.inducing_dim = S.shape[1]
|
49
45
|
|
@@ -51,21 +47,25 @@ class GreedySGP:
|
|
51
47
|
"""Computes the SGP's optimization bound using the inducing points `x`
|
52
48
|
|
53
49
|
Args:
|
54
|
-
x (ndarray): (n
|
50
|
+
x (ndarray): (n); Indices of the solution placement locations
|
55
51
|
|
56
52
|
Returns:
|
57
53
|
elbo (float): Evidence lower bound/SGP's optimization bound value
|
58
54
|
"""
|
59
55
|
x = np.array(x).reshape(-1).astype(int)
|
60
56
|
Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)
|
57
|
+
|
58
|
+
# Initialize all inducing points at the first solution placement location.
|
59
|
+
# Ensures that the number of inducing points is always fixed and no additional
|
60
|
+
# information is passed to the SGP
|
61
61
|
Xu *= self.locs[x][0]
|
62
|
-
Xu[-len(x):] = self.locs[x]
|
63
62
|
|
64
|
-
|
65
|
-
|
63
|
+
# Copy all given solution placements to the inducing points set
|
64
|
+
Xu[-len(x):] = self.locs[x]
|
66
65
|
|
67
|
-
|
68
|
-
|
66
|
+
# Update the SGP inducing points
|
67
|
+
self.sgp.inducing_variable.Z.assign(Xu)
|
68
|
+
return self.sgp.elbo().numpy() # return the ELBO
|
69
69
|
|
70
70
|
|
71
71
|
def get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel,
|
@@ -1,189 +0,0 @@
|
|
1
|
-
# Copyright 2024 The SGP-Tools Contributors. All Rights Reserved.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
import cma
|
16
|
-
import numpy as np
|
17
|
-
from shapely import geometry
|
18
|
-
from ..utils.metrics import get_mi
|
19
|
-
from ..utils.data import get_inducing_pts
|
20
|
-
|
21
|
-
|
22
|
-
class CMA_ES:
|
23
|
-
"""Class for optimizing sensor placements using CMA-ES (a genetic algorithm)
|
24
|
-
|
25
|
-
Refer to the following paper for more details:
|
26
|
-
- Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]
|
27
|
-
|
28
|
-
Args:
|
29
|
-
X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
|
30
|
-
noise_variance (float): data variance
|
31
|
-
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
32
|
-
distance_budget (float): Distance budget for when treating the inducing points
|
33
|
-
as waypoints of a path
|
34
|
-
num_robots (int): Number of robots, used when modeling
|
35
|
-
multi-robot IPP with a distance budget
|
36
|
-
transform (Transform): Transform object
|
37
|
-
"""
|
38
|
-
def __init__(self, X_train, noise_variance, kernel,
|
39
|
-
distance_budget=None,
|
40
|
-
num_robots=1,
|
41
|
-
transform=None):
|
42
|
-
self.boundaries = geometry.MultiPoint([[p[0], p[1]] for p in X_train]).convex_hull
|
43
|
-
self.X_train = X_train
|
44
|
-
self.noise_variance = noise_variance
|
45
|
-
self.kernel = kernel
|
46
|
-
self.num_dim = X_train.shape[-1]
|
47
|
-
self.distance_budget = distance_budget
|
48
|
-
self.num_robots = num_robots
|
49
|
-
self.transform = transform
|
50
|
-
|
51
|
-
def update(self, noise_variance, kernel):
|
52
|
-
"""Update GP noise variance and kernel function parameters
|
53
|
-
|
54
|
-
Args:
|
55
|
-
noise_variance (float): data variance
|
56
|
-
kernel (gpflow.kernels.Kernel): gpflow kernel function
|
57
|
-
"""
|
58
|
-
self.noise_variance = noise_variance
|
59
|
-
self.kernel = kernel
|
60
|
-
|
61
|
-
def constraint(self, X):
|
62
|
-
"""Constraint function for the optimization problem (constraint to limit the boundary of the region)
|
63
|
-
Does not work well with CMA-ES as it is a step function and is not continuous
|
64
|
-
|
65
|
-
Args:
|
66
|
-
X (ndarray): (n, d); Current sensor placement locations
|
67
|
-
"""
|
68
|
-
X = np.array(X).reshape(-1, self.num_dim)
|
69
|
-
lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]
|
70
|
-
lagrangian = np.logical_not(lagrangian).astype(float)
|
71
|
-
return lagrangian
|
72
|
-
|
73
|
-
def distance_constraint(self, X):
|
74
|
-
"""Constraint function for the optimization problem (constraint to limit the total travel distance)
|
75
|
-
Does not work well with CMA-ES as it is a step function and is not continuous
|
76
|
-
|
77
|
-
Args:
|
78
|
-
X (ndarray): (n, d); Current sensor placement locations
|
79
|
-
"""
|
80
|
-
X = np.array(X).reshape(self.num_robots, -1, self.num_dim)
|
81
|
-
dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)
|
82
|
-
lagrangian = dists - self.distance_budget
|
83
|
-
lagrangian_mask = np.logical_not(lagrangian <= 0)
|
84
|
-
lagrangian[lagrangian_mask] = 0
|
85
|
-
lagrangian = np.sum(lagrangian)
|
86
|
-
return lagrangian
|
87
|
-
|
88
|
-
def objective(self, X):
|
89
|
-
"""Objective function (GP-based Mutual Information)
|
90
|
-
|
91
|
-
Args:
|
92
|
-
X (ndarray): (n, d); Initial sensor placement locations
|
93
|
-
"""
|
94
|
-
# MI does not depend on waypoint order (reshape to -1, num_dim)
|
95
|
-
X = np.array(X).reshape(-1, self.num_dim)
|
96
|
-
if self.transform is not None:
|
97
|
-
X = self.transform.expand(X,
|
98
|
-
expand_sensor_model=False).numpy()
|
99
|
-
|
100
|
-
try:
|
101
|
-
mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)
|
102
|
-
except:
|
103
|
-
mi = 0.0 # if the cholskey decomposition fails
|
104
|
-
return mi
|
105
|
-
|
106
|
-
def optimize(self,
|
107
|
-
num_sensors=10,
|
108
|
-
max_steps=5000,
|
109
|
-
tol=1e-11,
|
110
|
-
X_init=None):
|
111
|
-
"""Optimizes the SP objective function using CMA-ES without any constraints
|
112
|
-
|
113
|
-
Args:
|
114
|
-
num_sensors (int): Number of sensor locations to optimize
|
115
|
-
max_steps (int): Maximum number of optimization steps
|
116
|
-
tol (float): Convergence tolerance to decide when to stop optimization
|
117
|
-
X_init (ndarray): (m, d); Initial inducing points
|
118
|
-
|
119
|
-
Returns:
|
120
|
-
Xu (ndarray): (m, d); Solution sensor placement locations
|
121
|
-
"""
|
122
|
-
sigma0 = 1.0
|
123
|
-
|
124
|
-
if X_init is None:
|
125
|
-
X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
|
126
|
-
X_init = X_init.reshape(-1)
|
127
|
-
|
128
|
-
xopt, _ = cma.fmin2(self.objective, X_init, sigma0,
|
129
|
-
options={'maxfevals': max_steps,
|
130
|
-
'verb_disp': 0,
|
131
|
-
'tolfun': tol,
|
132
|
-
'seed': 1234},
|
133
|
-
restarts=5)
|
134
|
-
|
135
|
-
xopt = np.array(xopt).reshape(-1, self.num_dim)
|
136
|
-
if self.transform is not None:
|
137
|
-
xopt = self.transform.expand(xopt,
|
138
|
-
expand_sensor_model=False).numpy()
|
139
|
-
|
140
|
-
return xopt.reshape(-1, self.num_dim)
|
141
|
-
|
142
|
-
def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
|
143
|
-
"""Optimizes the SP objective function using CMA-ES with a distance budget constraint
|
144
|
-
|
145
|
-
Args:
|
146
|
-
num_sensors (int): Number of sensor locations to optimize
|
147
|
-
max_steps (int): Maximum number of optimization steps
|
148
|
-
tol (float): Convergence tolerance to decide when to stop optimization
|
149
|
-
|
150
|
-
Returns:
|
151
|
-
Xu (ndarray): (m, d); Solution sensor placement locations
|
152
|
-
"""
|
153
|
-
sigma0 = 1.0
|
154
|
-
idx = np.random.randint(len(self.X_train), size=num_sensors)
|
155
|
-
x_init = self.X_train[idx].reshape(-1)
|
156
|
-
cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)
|
157
|
-
xopt, _ = cma.fmin2(cfun, x_init, sigma0,
|
158
|
-
options={'maxfevals': max_steps,
|
159
|
-
'verb_disp': 0,
|
160
|
-
'tolfun': tol,
|
161
|
-
'seed': 1234},
|
162
|
-
callback=cfun.update,
|
163
|
-
restarts=5)
|
164
|
-
return xopt.reshape(-1, self.num_dim)
|
165
|
-
|
166
|
-
def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
|
167
|
-
"""Optimizes the SP objective function using CMA-ES with the constraints
|
168
|
-
to ensure that the sensors are placed within the boundaries of the region
|
169
|
-
|
170
|
-
Args:
|
171
|
-
num_sensors (int): Number of sensor locations to optimize
|
172
|
-
max_steps (int): Maximum number of optimization steps
|
173
|
-
tol (float): Convergence tolerance to decide when to stop optimization
|
174
|
-
|
175
|
-
Returns:
|
176
|
-
Xu (ndarray): (m, d); Solution sensor placement locations
|
177
|
-
"""
|
178
|
-
sigma0 = 1.0
|
179
|
-
idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)
|
180
|
-
x_init = self.X_train[idx].reshape(-1)
|
181
|
-
cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)
|
182
|
-
xopt, _ = cma.fmin2(cfun, x_init, sigma0,
|
183
|
-
options={'maxfevals': max_steps,
|
184
|
-
'verb_disp': 0,
|
185
|
-
'tolfun': tol,
|
186
|
-
'seed': 1234},
|
187
|
-
callback=cfun.update,
|
188
|
-
restarts=5)
|
189
|
-
return xopt.reshape(-1, self.num_dim)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|